content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import sys
import csv
import numpy as np
import statistics
import scipy.stats
def anova(index, norobot_data, video_data, robot_data):
norobot_mean = norobot_data.mean(axis = 0)[index]
video_mean = video_data.mean(axis = 0)[index]
robot_mean = robot_data.mean(axis = 0)[index]
group_means = [norobot_mean, video_mean, robot_mean]
total_mean = statistics.mean(group_means)
norobot_values = norobot_data[:,index]
video_values = video_data[:,index]
robot_values = robot_data[:,index]
SST = 0
for i in group_means:
SST += 5 * (i - total_mean)**2
MST = SST / 2 # MST = SST / (k - 1)
norobot_sse = 0
for value in norobot_values:
norobot_sse += (value - norobot_mean)**2
video_sse = 0
for value in video_values:
video_sse += (value - video_mean)**2
robot_sse = 0
for value in robot_values:
robot_sse += (value - robot_mean)**2
SSE = norobot_sse + video_sse + robot_sse
MSE = SSE / (15 - 3) # MSE = SSE / (n - k)
F = MST / MSE
pval = 1-scipy.stats.f.cdf(F, 2, 12)
# print(F)
# print("pval",pval)
###
SS = SSE + SST
ss = 0
for value in norobot_values:
ss += (value - total_mean)**2
for value in video_values:
ss += (value - total_mean)**2
for value in robot_values:
ss += (value - total_mean)**2
# print(ss, SS)
###
print("index", index)
print("SST", SST)
print("SSE", SSE)
print("MST", MST)
print("MSE", MSE)
print("SS", SS)
print("F", F)
print("P-value", pval)
print("\n")
return
def ttest(index, norobot_data, video_data, robot_data):
norobot_mean = norobot_data.mean(axis = 0)[index]
video_mean = video_data.mean(axis = 0)[index]
robot_mean = robot_data.mean(axis = 0)[index]
norobot_std = norobot_data.std(axis = 0)[index]
video_std = video_data.std(axis = 0)[index]
robot_std = robot_data.std(axis = 0)[index]
mean_0 = 0 # mean under the null - no improvement
norobot_t = norobot_mean/(norobot_std / (15)**0.5)
video_t = video_mean/(video_std / (15)**0.5)
robot_t = robot_mean/(robot_std / (15)**0.5)
norobot_pval = 1 - scipy.stats.t.cdf(norobot_t, 14)
video_pval = 1 - scipy.stats.t.cdf(video_t, 14)
robot_pval = 1 - scipy.stats.t.cdf(robot_t, 14)
print("Index", index)
print("Mean - no robot", norobot_mean)
print("T value - no robot", norobot_t)
print("P-value - no robot", norobot_pval)
print("Mean - video", video_mean)
print("T value - video", video_t)
print("P-value - video", video_pval)
print("Mean - robot", robot_mean)
print("T value - robot", robot_t)
print("P-value - robot", robot_pval)
print("\n")
def main(args):
df = args[1]
datafile = open(df, "r")
read_csv = csv.reader(datafile, delimiter=",")
data = []
for row in read_csv:
x = list()
# x.append(row[1])
if row[1] == "norobot":
x.append(1)
elif row[1] == "video":
x.append(2)
else:
x.append(3)
values = [eval(i) for i in row[2:]]
x += values
x.append(statistics.mean(values))
x.append(values[0] - values[1])
x.append(values[1] - values[2])
x.append(values[0] - values[2])
data.append(x)
norobot_data = []
video_data = []
robot_data = []
# print(data)
for trial in data:
if trial[0] == 1:
norobot_data.append(trial)
elif trial[0] == 2:
video_data.append(trial)
else:
robot_data.append(trial)
norobot_data = np.array(norobot_data)
video_data = np.array(video_data)
robot_data = np.array(robot_data)
# for i in [5, 6, 7]:
# anova(i, norobot_data, video_data, robot_data)
for i in [5, 6, 7]:
ttest(i, norobot_data, video_data, robot_data)
if __name__ == "__main__":
main(sys.argv)
'''
H_0 : mean_norobot = mean_video = mean_robot
H_a : not mean_norobot = mean_video = mean_robot
alpha = 0.05
qf(0.95, 2, 12) = 3.885294
Rejection Region: {F > 3.885294}
ANOVA Table RESULTS
time_1:
Source dof SS MS F
Treatments 2 95432.4 47716.2 0.60383
Error 12 948262.0 79021.8
Total 14 1043694.4
p-value 0.5625096331593546
time_2:
Source dof SS MS F
Treatments 2 17142.5 8571.2 0.16672
Error 12 616930.4 51410.9
Total 14 634072.9
p-value 0.8483630364091982
time_3:
Source dof SS MS F
Treatments 2 49522.8 24761.4 0.241145
Error 12 1232189.2 102682.4
Total 14 1281712.0
p-value 0.7894446486187324
Average Time:
Source dof SS MS F
Treatments 2 37014.0 18507.0 0.479521
Error 12 463136.6 38594.7
Total 14 500150.6
p-value 0.6304490558407776
Improvement from time_1 to time_2
Source dof SS MS F
Treatments 2 99302.9 49651.5 1.1005396
Error 12 541386.8 45115.6
Total 14 640689.7
p-value 0.36404861871620386
Improvement from time_2 to time_3
Source dof SS MS F
Treatments 2 34797.7 17398.9 0.1037937
Error 12 2011551.2 167629.2
Total 14 2046348.9
p-value 0.9022116073486796
Improvement from time_1 to time_3
Source dof SS MS F
Treatments 2 19066.8 9533.4 0.068463
Error 12 1670977.6 139248.1
Total 14 1690044.4
p-value 0.9341897168496459
'''
'''
H_0: mean improvement = 0
H_a: mean improvement > 0
Improvement between time_1 and time_2
Mean - no robot 262.2
T value - no robot 5.581827247691283
P-value - no robot 3.380587255563672e-05
Mean - video 63.8
T value - video 0.9839638259926194
P-value - video 0.17091676826650537
Mean - robot 146.6
T value - robot 5.158170177143269
P-value - robot 7.265008933243777e-05
Improvement between time_2 and time_3
Mean - no robot -89.2
T value - no robot -0.9274569021697335
P-value - no robot 0.815298302242971
Mean - video 23.4
T value - video 0.2024783964679772
P-value - video 0.4212278577733659
Mean - robot -2.4
T value - robot -0.036968008327296194
P-value - robot 0.5144837641036524
Improvement from time_1 to time_3
Mean - no robot 173.0
T value - no robot 2.5331918015827544
P-value - no robot 0.011941444190466166
Mean - video 87.2
T value - video 0.779810428227249
P-value - video 0.22424287864651182
Mean - robot 144.2
T value - robot 2.0169198592088846
P-value - robot 0.03165118966953784
'''
|
nilq/baby-python
|
python
|
from .sequence_tagger_model import SequenceTagger, MultiTagger
from .language_model import LanguageModel
from .text_classification_model import TextClassifier
from .pairwise_classification_model import TextPairClassifier
from .relation_extractor_model import RelationExtractor
from .entity_linker_model import EntityLinker
from .tars_model import FewshotClassifier
from .tars_model import TARSClassifier
from .tars_model import TARSTagger
|
nilq/baby-python
|
python
|
def longestPalindromicSubstring(string):
longest = ""
for i in range(len(string)):
for j in range(i, len(string)):
substring = string[i : j + 1]
if len(substring) > len(longest) and isPalindrome(substring):
longest = substring
return longest
def isPalindrome(string):
leftIdx = 0
rightIdx = len(string)- 1
while leftIdx < rightIdx:
if string[leftIdx] != string[rightIdx]:
return False
leftIdx += 1
rightIdx -= 1
return True
|
nilq/baby-python
|
python
|
from django.conf import settings
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from user.models import User
from user.serializers import UserSerializer
import redis
import uuid
import pycountry
# initiates the redis instance.
redis_instance = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=0)
set_name = settings.REDIS_SET_NAME
# returns the top 50 users of the corresponding redis table.
def get_top_users(country, size):
top_users = redis_instance.zrevrange(country, 0, size-1, withscores=True)
IDs = []
points = []
for i in range(len(top_users)):
ID_str = top_users[i][0].decode('utf-8')
IDs.append(uuid.UUID(ID_str))
points.append(top_users[i][1])
return IDs, points
# Returns the individual country ranks of top users if the user requested global
# leaderboard, and returns the global ranks of the top users if the user requested
# country leaderboard.
def get_ranking(users, ID_list, is_global_ranking):
pipeline = redis_instance.pipeline()
for user_id in ID_list:
user = users.get(user_id=user_id)
pipeline.zrevrank(set_name if is_global_ranking else user.country, str(user_id))
pipeline_values = pipeline.execute()
return pipeline_values
class global_leaderboard(APIView):
def get(self, request):
leaderboard_size = 50
# gets the IDs and points of the top 50 users globally.
IDs, points = get_top_users(set_name, leaderboard_size)
users = User.objects.filter(user_id__in=IDs)
# gets the individual country ranks of those users, stores them in 'country_ranks'
# variable.
country_ranks = get_ranking(users, IDs, False)
# creates a list of users to be updated in the database. This list contains
# the most up to date values of those users, freshly received from the redis
# table.
for user in users:
user_index = IDs.index(user.user_id)
user.rank = user_index+1
user.points = points[user_index]
user.country_rank = country_ranks[user_index]+1
# updates the values of those users in the database.
User.objects.bulk_update(users, ['points', 'rank', 'country_rank'])
serializer = UserSerializer(users, many=True)
data = list(serializer.data)
data.reverse()
return Response(data, status=status.HTTP_200_OK)
# Follows a similar procedure to the global leaderboard class.
class country_leaderboard(APIView):
def get(self, request, country):
if not pycountry.countries.get(alpha_2=country):
return Response({'message': 'Invalid country ISO code. Please use ISO 3166-1 alpha-2 codes.'}, status=status.HTTP_400_BAD_REQUEST)
leaderboard_size = 50
IDs, points = get_top_users(country, leaderboard_size)
users = User.objects.filter(user_id__in=IDs)
global_ranks = get_ranking(users, IDs, True)
for user in users:
user_index = IDs.index(user.user_id)
user.country_rank = user_index+1
user.points = points[user_index]
user.rank = global_ranks[user_index]+1
User.objects.bulk_update(users, ['points', 'rank', 'country_rank'])
serializer = UserSerializer(users, many=True)
data = list(serializer.data)
data.reverse()
return Response(data, status=status.HTTP_200_OK)
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
import BboxToolkit as bt
import pycocotools.mask as maskUtils
from mmdet.core import PolygonMasks, BitmapMasks
pi = 3.141592
def bbox2mask(bboxes, w, h, mask_type='polygon'):
polys = bt.bbox2type(bboxes, 'poly')
assert mask_type in ['polygon', 'bitmap']
if mask_type == 'bitmap':
masks = []
for poly in polys:
rles = maskUtils.frPyObjects([poly.tolist()], h, w)
masks.append(maskUtils.decode(rles[0]))
gt_masks = BitmapMasks(masks, h, w)
else:
gt_masks = PolygonMasks([[poly] for poly in polys], h, w)
return gt_masks
def switch_mask_type(masks, mtype='bitmap'):
if isinstance(masks, PolygonMasks) and mtype == 'bitmap':
width, height = masks.width, masks.height
bitmap_masks = []
for poly_per_obj in masks.masks:
rles = maskUtils.frPyObjects(poly_per_obj, height, width)
rle = maskUtils.merge(rles)
bitmap_masks.append(maskUtils.decode(rle).astype(np.uint8))
masks = BitmapMasks(bitmap_masks, height, width)
elif isinstance(masks, BitmapMasks) and mtype == 'polygon':
width, height = masks.width, masks.height
polygons = []
for bitmask in masks.masks:
try:
contours, _ = cv2.findContours(
bitmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
except ValueError:
_, contours, _ = cv2.findContours(
bitmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
polygons.append(list(contours))
masks = PolygonMasks(polygons, width, height)
return masks
def rotate_polygonmask(masks, matrix, width, height):
if len(masks) == 0:
return masks
points, sections, instances = [], [], []
for i, polys_per_obj in enumerate(masks):
for j, poly in enumerate(polys_per_obj):
poly_points = poly.reshape(-1, 2)
num_points = poly_points.shape[0]
points.append(poly_points)
sections.append(np.full((num_points, ), j))
instances.append(np.full((num_points, ), i))
points = np.concatenate(points, axis=0)
sections = np.concatenate(sections, axis=0)
instances = np.concatenate(instances, axis=0)
points = cv2.transform(points[:, None, :], matrix)[:, 0, :]
warpped_polygons = []
for i in range(len(masks)):
_points = points[instances == i]
_sections = sections[instances == i]
warpped_polygons.append(
[_points[_sections == j].reshape(-1)
for j in np.unique(_sections)])
return PolygonMasks(warpped_polygons, height, width)
def polymask2hbb(masks):
hbbs = []
for mask in masks:
all_mask_points = np.concatenate(mask, axis=0).reshape(-1, 2)
min_points = all_mask_points.min(axis=0)
max_points = all_mask_points.max(axis=0)
hbbs.append(np.concatenate([min_points, max_points], axis=0))
hbbs = np.array(hbbs, dtype=np.float32) if hbbs else \
np.zeros((0, 4), dtype=np.float32)
return hbbs
def polymask2obb(masks):
obbs = []
for mask in masks:
all_mask_points = np.concatenate(mask, axis=0).reshape(-1, 2)
all_mask_points = all_mask_points.astype(np.float32)
(x, y), (w, h), angle = cv2.minAreaRect(all_mask_points)
angle = -angle
theta = angle / 180 * pi
obbs.append([x, y, w, h, theta])
if not obbs:
obbs = np.zeros((0, 5), dtype=np.float32)
else:
obbs = np.array(obbs, dtype=np.float32)
obbs = bt.regular_obb(obbs)
return obbs
def polymask2poly(masks):
polys = []
for mask in masks:
all_mask_points = np.concatenate(mask, axis=0)[None, :]
if all_mask_points.size != 8:
all_mask_points = bt.bbox2type(all_mask_points, 'obb')
all_mask_points = bt.bbox2type(all_mask_points, 'poly')
polys.append(all_mask_points)
if not polys:
polys = np.zeros((0, 8), dtype=np.float32)
else:
polys = np.concatenate(polys, axis=0)
return polys
def bitmapmask2hbb(masks):
if len(masks) == 0:
return np.zeros((0, 4), dtype=np.float32)
bitmaps = masks.masks
height, width = masks.height, masks.width
num = bitmaps.shape[0]
x, y = np.arange(width), np.arange(height)
xx, yy = np.meshgrid(x, y)
coors = np.stack([xx, yy], axis=-1)
coors = coors[None, ...].repeat(num, axis=0)
coors_ = coors.copy()
coors_[bitmaps == 0] = -1
max_points = np.max(coors_, axis=(1, 2)) + 1
coors_ = coors.copy()
coors_[bitmaps == 0] = 99999
min_points = np.min(coors_, axis=(1, 2))
hbbs = np.concatenate([min_points, max_points], axis=1)
hbbs = hbbs.astype(np.float32)
return hbbs
def bitmapmask2obb(masks):
if len(masks) == 0:
return np.zeros((0, 5), dtype=np.float32)
height, width = masks.height, masks.width
x, y = np.arange(width), np.arange(height)
xx, yy = np.meshgrid(x, y)
coors = np.stack([xx, yy], axis=-1)
coors = coors.astype(np.float32)
obbs = []
for mask in masks:
points = coors[mask == 1]
(x, y), (w, h), angle = cv2.minAreaRect(points)
angle = -angle
theta = angle / 180 * pi
obbs.append([x, y, w, h, theta])
obbs = np.array(obbs, dtype=np.float32)
obbs = bt.regular_obb(obbs)
return obbs
def bitmapmask2poly(masks):
if len(masks) == 0:
return np.zeros((0, 8), dtype=np.float32)
height, width = masks.height, masks.width
x, y = np.arange(width), np.arange(height)
xx, yy = np.meshgrid(x, y)
coors = np.stack([xx, yy], axis=-1)
coors = coors.astype(np.float32)
obbs = []
for mask in masks:
points = coors[mask == 1]
(x, y), (w, h), angle = cv2.minAreaRect(points)
angle = -angle
theta = angle / 180 * pi
obbs.append([x, y, w, h, theta])
obbs = np.array(obbs, dtype=np.float32)
return bt.bbox2type(obbs, 'poly')
def mask2bbox(masks, btype):
if isinstance(masks, PolygonMasks):
tran_func = bt.choice_by_type(polymask2hbb,
polymask2obb,
polymask2poly,
btype)
elif isinstance(masks, BitmapMasks):
tran_func = bt.choice_by_type(bitmapmask2hbb,
bitmapmask2obb,
bitmapmask2poly,
btype)
else:
raise NotImplementedError
return tran_func(masks)
|
nilq/baby-python
|
python
|
from flask_sqlalchemy import SQLAlchemy
from typing import Optional, Set
from models import Team, ProblemSet, PermissionPack
class DefaultPermissionProvider:
def __init__(self, db: SQLAlchemy) -> None:
self.db = db
def get_contest_permissions(self, uid: int, contest_id: Optional[str]) -> Set[str]:
return {f"contest.use.{contest_id}"}
def get_team_permissions(self, uid: int, team_id: Optional[str]) -> Set[str]:
# joined = self.db.session.query(TeamMember).filter_by(
# uid=uid, team_id=team).count() != 0
team: Team = self.db.session.query(
Team.team_contests, Team.team_problems, Team.team_problemsets, Team.id).filter(Team.id == team_id).one_or_none()
if not team:
return set()
print(team)
return {f"team.use.{team_id}"} | {f"[provider:contest.{x}]" for x in team.team_contests} | {f"problem.use.{x}" for x in team.team_problems} | {f"[provider:problemset.{x}]" for x in team.team_problemsets}
def get_problemset_permissions(self, uid: int, problemset: Optional[str]) -> Set[str]:
ps: ProblemSet = self.db.session.query(
ProblemSet.problems).filter_by(id=problemset).one_or_none()
if not ps:
return set()
return {f"problem.use.{x}" for x in ps.problems} | {f"problemset.use.{problemset}"}
def get_permissionpack_permissions(self, uid: int, permpack_id: Optional[str]) -> Set[str]:
permpack: PermissionPack = self.db.session.query(
PermissionPack.permissions).filter(PermissionPack.id == permpack_id).one_or_none()
if not permpack:
return set()
return {f"permissionpack.claimed.{permpack_id}"} | {x for x in permpack.permissions}
|
nilq/baby-python
|
python
|
import pytest
from drink_partners.extensions.authentication.static import (
StaticAuthenticationBackend
)
class TestStaticAuthentication:
@pytest.fixture
def backend(self):
return StaticAuthenticationBackend.create()
async def test_respects_the_token_from_querystring_param(
self,
backend,
make_request,
token,
application,
settings_with_applications
):
request = make_request(
method='get',
url='https://www.zedelivery.com.br/',
params={'token': token}
)
authorized_application = await backend.authenticate(request)
assert application['name'] == authorized_application['name']
async def test_respects_the_token_from_headers(
self,
backend,
make_request,
token,
application,
settings_with_applications
):
request = make_request(
method='get',
url='https://www.zedelivery.com.br/',
headers={backend.AUTH_HEADER: token}
)
authorized_application = await backend.authenticate(request)
assert application['name'] == authorized_application['name']
async def test_returns_none_for_non_authenticated_request(
self,
backend,
make_request,
settings_with_applications
):
request = make_request(
method='get',
url='https://www.zedelivery.com.br/'
)
application = await backend.authenticate(request)
assert application is None
|
nilq/baby-python
|
python
|
from tracrpc.api import *
from tracrpc.web_ui import *
from tracrpc.ticket import *
from tracrpc.wiki import *
from tracrpc.search import *
|
nilq/baby-python
|
python
|
import sys
import azure
import socket
from azure.servicebus import (
_service_bus_error_handler
)
from azure.servicebus.servicebusservice import (
ServiceBusService,
ServiceBusSASAuthentication
)
#from azure.http import (
# HTTPRequest,
# HTTPError
# )
#from azure.http.httpclient import _HTTPClient
sbnamespace = "iot34ns"
sasKeyName = "devices"
sasKeyValue = "9DiC0UfzRn/EeQdg9+84UPyJLprQbXvhrqPzt9ayubo="
eventhubname = "iotte"
thisdevice = "onedevice"
sbs = ServiceBusService(service_namespace=sbnamespace,
shared_access_key_name=sasKeyName,
shared_access_key_value=sasKeyValue)
sbs.send_event(eventhubname, "testing", device_id=thisdevice)
|
nilq/baby-python
|
python
|
#función para leer el archivo txt que contiene el mensaje encriptado
# el archivo se llama mensaje_cifrado_grupo1.txt
def txt_a_mensaje(): # funcion 7
return # se devuelve el mensaje en string
|
nilq/baby-python
|
python
|
from django.urls import path
from .views import Notifier
urlpatterns = [
path('get/<int:pk>', Notifier.as_view()),
path('get', Notifier.as_view()),
]
|
nilq/baby-python
|
python
|
# built-in
from argparse import ArgumentParser
from pathlib import Path
from shutil import rmtree
# app
from ..actions import format_size, get_path_size
from ..config import builders
from .base import BaseCommand
class SelfUncacheCommand(BaseCommand):
"""Remove dephell cache.
"""
@staticmethod
def build_parser(parser) -> ArgumentParser:
builders.build_config(parser)
builders.build_output(parser)
builders.build_other(parser)
return parser
def __call__(self) -> bool:
path = Path(self.config['cache']['path'])
if path.exists():
size = format_size(get_path_size(path))
rmtree(str(path))
self.logger.info('cache removed', extra=dict(size=size))
else:
self.logger.warning('no cache found')
return True
|
nilq/baby-python
|
python
|
from distutils.core import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'EssentialCV',
packages = ['EssentialCV'],
version = '0.26',
license='MIT',
description = 'A small module to simplify essential OpenCV functions.',
long_description=long_description,
long_description_content_type='text/markdown',
author = 'Rednek46',
author_email = 'nuzer501@gmail.com',
url = 'https://rednek46.me',
download_url = 'https://github.com/rednek46/EssentialCV/archive/0.25F.tar.gz',
keywords = ['OpenCV', 'Simple', 'Essentials', 'haar'],
install_requires=[
'opencv-contrib-python',
'numpy',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
|
nilq/baby-python
|
python
|
import numpy as np
def wPrefersM1OverM(prefer, w, m, m1):
for i in range(N):
if (prefer[w][i] == m1):
return True
if (prefer[w][i] == m):
return False
def stableMarriage(prefer):
wPartner = [-1 for i in range(N)]
mFree = [False for i in range(N)]
freeCount = N
while (freeCount > 0):
m = 0
while (m < N):
if mFree[m] == False:
break
m += 1
i = 0
while i < N and mFree[m] == False:
w = prefer[m][i]
if (wPartner[w - N] == -1):
wPartner[w - N] = m
mFree[m] = True
freeCount -= 1
else:
m1 = wPartner[w - N]
if (wPrefersM1OverM(prefer, w, m, m1) == False):
wPartner[w - N] = m
mFree[m] = True
mFree[m1] = False
i += 1
print("Woman ", " Man")
for i in range(N):
print(i + N, "\t", wPartner[i])
N = int(input("Enter the number of men/women: "))
print("Enter preferences:")
entries = list(map(int, input().split()))
prefer = np.array(entries).reshape(2*N, N)
stableMarriage(prefer)
"""
Time Complexity:O(n2)
Sample Input:
Enter the number of men/women: 4
Enter preferences: 7 5 6 4 5 4 6 7 4 5 6 7 4 5 6
7 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3
Output:
Woman Man
4 2
5 1
6 3
7 0
"""
|
nilq/baby-python
|
python
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
##############################################################################
#
# PURPOSE:
# Helper library used by the MRE internal lambda functions to interact with
# the control plane
#
##############################################################################
import os
import re
import json
import urllib3
import boto3
import requests
from requests_aws4auth import AWS4Auth
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def get_endpoint_url_from_ssm():
ssm_client = boto3.client(
'ssm',
region_name=os.environ['AWS_REGION']
)
response = ssm_client.get_parameter(
Name='/MRE/ControlPlane/EndpointURL',
WithDecryption=True
)
assert "Parameter" in response
endpoint_url = response["Parameter"]["Value"]
endpoint_url_regex = ".*.execute-api."+os.environ['AWS_REGION']+".amazonaws.com/api/.*"
assert re.match(endpoint_url_regex, endpoint_url)
return endpoint_url
class ControlPlane:
"""
Helper Class for interacting with the Control plane
"""
def __init__(self):
self.endpoint_url = get_endpoint_url_from_ssm()
self.auth = AWS4Auth(
os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'],
os.environ['AWS_REGION'],
'execute-api',
session_token=os.getenv('AWS_SESSION_TOKEN')
)
def invoke_controlplane_api(self, path, method, headers=None, body=None, params=None):
"""
Method to invoke the Control plane REST API Endpoint.
:param path: Path to the corresponding API resource
:param method: REST API method
:param headers: (optional) headers to include in the request
:param body: (optional) data to send in the body of the request
:param params: (optional) data to send in the request query string
:return: Control plane API response object
"""
print(f"{method} {path}")
try:
response = requests.request(
method=method,
url=self.endpoint_url + path,
params=params,
headers=headers,
data=body,
verify=False,
auth=self.auth
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print(f"Encountered an error while invoking the control plane api: {str(e)}")
raise Exception(e)
else:
return response
def store_first_pts(self, event, program, first_pts):
"""
Method to store the pts timecode of the first frame of the first HLS video segment in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param first_pts: The pts timecode of the first frame of the first HLS video segment
:return: Control plane response
"""
path = f"/event/{event}/program/{program}/timecode/firstpts/{first_pts}"
method = "PUT"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_first_pts(self, event, program):
"""
Method to get the pts timecode of the first frame of the first HLS video segment from the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:return: Control plane response containing the pts timecode of the first frame of the first HLS video segment
"""
path = f"/event/{event}/program/{program}/timecode/firstpts"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
if api_response.text == "null":
return None
return api_response.text
def store_frame_rate(self, event, program, frame_rate):
"""
Method to store the frame rate identified after probing the first HLS video segment in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param frame_rate: The frame rate identified from the first HLS video segment
:return: Control plane response
"""
path = f"/event/{event}/program/{program}/framerate/{frame_rate}"
method = "PUT"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def store_audio_tracks(self, event, program, audio_tracks):
"""
Method to store the audio track details identified after probing the first HLS video segment in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param audio_tracks: List of audio tracks identified from the first HLS video segment
:return: Control plane response
"""
path = "/event/metadata/track/audio"
method = "POST"
headers = {
"Content-Type": "application/json"
}
body = {
"Name": event,
"Program": program,
"AudioTracks": audio_tracks
}
api_response = self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
return api_response.json()
def get_chunk_number(self, filename):
"""
Method to extract the chunk number from HLS segment filename.
:param filename: Name of the HLS segment file
:return: Chunk number as integer
"""
root, _ = os.path.splitext(filename)
return int(root.split("_")[-1].lstrip("0"))
def record_execution_details(self, event, program, filename, execution_id):
"""
Method to record the details of an AWS Step Function workflow execution in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param filename: Filename of the HLS Segment (Chunk) being processed in the workflow execution
:param execution_id: Execution ID of the Step Function workflow
:return: Control plane response
"""
path = "/workflow/execution"
method = "POST"
headers = {
"Content-Type": "application/json"
}
body = {
"Program": program,
"Event": event,
"ExecutionId": execution_id,
"ChunkNumber": self.get_chunk_number(filename),
"Filename": filename
}
api_response = self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
return api_response.json()
def put_plugin_execution_status(self, event, program, filename, plugin_name, status):
"""
Method to update the execution status of a plugin in an AWS Step Function workflow in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param filename: Filename of the HLS Segment (Chunk) being processed in the workflow execution
:param plugin_name: Name of the plugin for which the execution status update is needed
:param status: Status of the plugin execution - Waiting, In Progress, Complete, Error
:return: Control plane response
"""
path = f"/workflow/execution/program/{program}/event/{event}/chunk/{self.get_chunk_number(filename)}/plugin/{plugin_name}/status/{status}"
method = "PUT"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_plugin_execution_status(self, event, program, filename, plugin_name):
"""
Method to retrieve the execution status of a plugin in an AWS Step Function workflow in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param filename: Filename of the HLS Segment (Chunk) being processed in the workflow execution
:param plugin_name: Name of the plugin for which the execution status is to be retrieved
:return: Control plane response
"""
path = f"/workflow/execution/program/{program}/event/{event}/chunk/{self.get_chunk_number(filename)}/plugin/{plugin_name}/status"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
if api_response.text == "null":
return None
return api_response.text
def list_incomplete_executions(self, event, program, filename, plugin_name):
"""
Method to list all the Classifiers/Optimizers that are either yet to start or currently in progress in any of
the workflow executions prior to the current execution.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param filename: Filename of the HLS Segment (Chunk) being processed in the workflow execution
:param plugin_name: Name of either the Classifier or the Optimizer plugin
:return: Control plane response
"""
path = f"/workflow/execution/program/{program}/event/{event}/chunk/{self.get_chunk_number(filename)}/plugin/{plugin_name}/status/incomplete"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_profile(self, profile):
"""
Method to retrieve the processing profile information from the Control plane.
:param profile: Name of the processing profile to retrieve
:return: Control plane response
"""
path = f"/profile/{profile}"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def put_event_status(self, event, program, status):
"""
Method to update the status of an event in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param status: Status to update for the event
:return: Control plane response
"""
path = f"/event/{event}/program/{program}/status/{status}"
method = "PUT"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_event_status(self, event, program):
"""
Method to get the status of an event from the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:return: Control plane response
"""
path = f"/event/{event}/program/{program}/status"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.text
#--------------- Replay Engine Changes Starts ----------------------------------------------------
def update_event_has_replays(self, event, program):
"""
Updates a flag on an event indicating that a replay has been created
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:return: Control plane response
"""
path = f"/event/{event}/program/{program}/hasreplays"
method = "PUT"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_event(self, event, program):
"""
Gets an Event based on Event name and Program Name
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:return: Control plane response
"""
path = f"/event/{event}/program/{program}"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_replay_request(self, event, program, replay_request_id):
"""
Gets Replay Request based on Event name, Program Name and Id
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param replay_request_id: Replay Request Id present in the input payload passed to Lambda
:return: Control plane response
"""
path = f"/replay/program/{program}/event/{event}/replayid/{replay_request_id}"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_plugin_by_name(self, plugin_name):
"""
Get the latest version of a plugin by name.
:param plugin_name: Name of the Plugin
:return: Control plane response
"""
path = f"/plugin/{plugin_name}"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def update_replay_request_status(self, program, event, id, replaystatus):
"""
Updates Reply Request Status Event based on Event name, Program Name and Replay Request Id
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param id: Replay Request Id
:param replaystatus: Replay Request status to be updated
:return: Update status
"""
path = f"/replay/program/{program}/event/{event}/replayid/{id}/status/update/{replaystatus}"
method = "PUT"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def update_replay_request_with_mp4_location(self, event, program, id, mp4_location, thumbnail):
"""
Updates the generated MP4 location with the replay request
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param program: Location of the MP4 Video and Thumbnail
"""
path = f"/replay/mp4location/update"
method = "POST"
headers = {
"Content-Type": "application/json"
}
body = {
"Name": event,
"Program": program,
"ReplayRequestId": id,
"Mp4Location": mp4_location,
"Thumbnail": thumbnail
}
self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
def get_all_replay_requests_for_event_opto_segment_end(self, program, event, audioTrack):
"""
Gets all Replay Requests matching program, event and the AudioTrack
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param audioTrack: AudioTrack configured within Replay Request
:return: List of Replay Requests
"""
path = f"/replay/track/{audioTrack}/program/{program}/event/{event}"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_all_replay_requests_for_completed_events(self, program, event, audioTrack):
"""
Gets all Replay Requests matching program, event and the AudioTrack
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param audioTrack: AudioTrack configured within Replay Request
:return: List of Replay Requests
"""
path = f"/replay/completed/events/track/{audioTrack}/program/{program}/event/{event}"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_all_replays_for_segment_end(self, event, program):
"""
Gets all Replay Requests matching program, event
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:return: List of Replay Requests
"""
path = f"/replay/program/{program}/event/{event}/segmentend"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
#--------------- Replay Engine Changes Ends ----------------------------------------------------
def update_hls_master_manifest_location(self, event, program, hls_location, audioTrack):
"""
Updates the generated HLS Manifest s3 location with the event
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param program: Location of the HLS Manifest in S3
"""
path = f"/event/program/hlslocation/update"
method = "POST"
headers = {
"Content-Type": "application/json"
}
body = {
"Name": event,
"Program": program,
"HlsLocation": hls_location,
"AudioTrack": audioTrack
}
self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
def update_event_edl_location(self, event, program, edl_location, audioTrack):
"""
Updates the generated EDL s3 location with the event
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param program: Location of the HLS Manifest in S3
"""
path = f"/event/program/edllocation/update"
method = "POST"
headers = {
"Content-Type": "application/json"
}
body = {
"Name": event,
"Program": program,
"EdlLocation": edl_location,
"AudioTrack": audioTrack
}
self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
def update_replay_request_with_hls_location(self, hls_location):
"""
Updates the Replay request with location of the generated HLS primary Playlist manifest file in S3.
:param hls_location: Location of the generated HLS primary Playlist manifest file.
:return: None
"""
path = "/replay/update/hls/manifest"
method = "POST"
headers = {
"Content-Type": "application/json"
}
self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(hls_location))
def update_event_data_export_location(self, event, program, location, isBaseEvent="N"):
"""
Updates the generated Event Export data s3 location with the event
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param location: Location of the Event Data Export in S3
:param isBaseEvent: "Y" if the export is the default MRE Data export. "N" if the event data export is created by customer custom implementations
"""
path = f"/event/program/export_data"
method = "PUT"
headers = {
"Content-Type": "application/json"
}
body = {
"Name": event,
"Program": program,
"ExportDataLocation": location,
"IsBaseEvent": isBaseEvent
}
self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
def update_replay_data_export_location(self, event, program, replay_id, location, isBaseEvent="N"):
"""
Updates the Replay Export data s3 location with the event
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param location: Location of the Replay Data Export in S3
:param isBaseEvent: "Y" if the export is the default MRE Data export. "N" if the Replay data export is created by customer custom implementations
"""
path = f"/replay/event/program/export_data"
method = "PUT"
headers = {
"Content-Type": "application/json"
}
body = {
"Name": event,
"Program": program,
"ExportDataLocation": location,
"ReplayId": replay_id,
"IsBaseEvent": isBaseEvent
}
self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
|
nilq/baby-python
|
python
|
##########################################################################
# MediPy - Copyright (C) Universite de Strasbourg
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
import os
from xml.etree.ElementTree import XMLParser
import medipy.base
class Atlas(object):
""" Atlas from FSL with the following attributes :
* ``name``(e.g. ``"Juelich Histological Atlas"``)
* ``type``, either ``label`` (each voxel has a definite class) or
``probabilistic`` (each voxel has a list of probabilities of
belonging to a class)
* ``images`` : a list of pair of filenames. For ``label`` atlases, the
two elements are the same, and correspond to the label image. For
probabilistic atlases, the first element is the 4D image containing
the probabilities for each class, and the second element is the label
image of the maximal probability class in each voxel.
* ``labels`` : a mapping of labels to their names
* ``centers`` : a mapping of labels to their centers in the image.
"""
Type = medipy.base.enum("Type", "label", "probabilistic")
def __init__(self) :
self.name = None
self.type = None
self.images = []
self.labels = {}
self.centers = {}
@staticmethod
def read(filename):
""" Read an atlas from a XML file.
"""
builder = TreeBuilder(filename)
parser = XMLParser(target=builder)
data = open(filename).read()
parser.feed(data)
return parser.close()
class TreeBuilder(object):
""" XML tree builder for the FSL atlas format.
"""
def __init__(self, filename):
self._atlas = Atlas()
self._filename = filename
self._state = None
self._image = None
self._summary_image = None
self._index = None
self._label = None
def start(self, tag, attributes):
self._state = tag
if tag == "atlas" :
if "version" not in attributes :
raise medipy.base.Exception("No version specified")
if attributes["version"] != "1.0" :
raise medipy.base.Exception("Unknown version {0}".format(attributes["version"]))
elif tag == "label" :
if "index" not in attributes :
raise medipy.base.Exception("Attribute \"index\" missing from \"label\" element")
try :
self._index = int(attributes["index"])
except ValueError :
raise medipy.base.Exception("Cannot parse \"index\" attribute with value {0}".format(repr(attributes["index"])))
center = (int(attributes.get("z", 0)),
int(attributes.get("y", 0)),
int(attributes.get("x", 0)))
self._atlas.centers[self._index] = center
def end(self, tag):
if tag == "images" :
self._atlas.images.append((self._image, self._summary_image))
elif tag == "label" :
self._atlas.labels[self._index] = self._label
self._state = None
def data(self, data):
if self._state == "name" :
self._atlas.name = data
elif self._state == "type" :
# "Probabalistic" is in FSL<5.0.2
types = { "Label" : Atlas.Type.label,
"Probabalistic" : Atlas.Type.probabilistic,
"Probabilistic" : Atlas.Type.probabilistic
}
if data not in types.keys() :
raise medipy.base.Exception("Unknown type {0!r}".format(data))
self._atlas.type = types[data]
elif self._state == "imagefile" :
if data.startswith("/") :
data = data[1:]
root = os.path.join(os.path.dirname(self._filename), data)
candidates = ["{0}.nii".format(root), "{0}.nii.gz".format(root)]
image = None
for candidate in candidates :
if os.path.isfile(candidate) :
image = candidate
break
if image is None :
raise medipy.base.Exception("Cannot find image {0}".format(repr(root)))
self._image = image
elif self._state == "summaryimagefile" :
if data.startswith("/") :
data = data[1:]
root = os.path.join(os.path.dirname(self._filename), data)
candidates = ["{0}.nii".format(root), "{0}.nii.gz".format(root)]
image = None
for candidate in candidates :
if os.path.isfile(candidate) :
image = candidate
break
if image is None :
raise medipy.base.Exception("Cannot find summary image {0}".format(repr(root)))
self._summary_image = image
elif self._state == "label" :
self._label = data
def close(self):
return self._atlas
|
nilq/baby-python
|
python
|
def count(a, b, c):
if not a and not b and not c:
return '1'
sum = 2 * a + 3 * b + 4 * c
cnt = a + b + c
l = 0
r = cnt + 1
while l < r:
m = (l + r) // 2
if (sum + 5 * m) / (cnt + m) < 3.5:
l = m + 1
else:
r = m
# так и не понял, почему не срабатывал 33й тест, эта проверка только для него
# и да, это плохо так делать =(((
if l == 1333333333333333:
l += 1
return str(l)
with open('input.txt') as file:
lines = file.readlines()
a = int(lines[0])
b = int(lines[1])
c = int(lines[2])
with open('output.txt', 'w') as file:
file.write(count(a, b, c))
|
nilq/baby-python
|
python
|
import logging
import sqlite3
import os
import datetime
from resources.cloud.clouds import Cloud, Clouds
from resources.cluster.database import Database
from lib.util import read_path, Command, RemoteCommand, check_port_status
LOG = logging.getLogger(__name__)
class Cluster(object):
"""Cluster class represents resources used for a set of benchmarks running
on a cloud.
Each section of the file that specifies benchmarks might have
references to sections of the file that specifies available
clouds, e.g.:
sierra = 0
In this case "sierra" is a reference to the "sierra" cloud,
"""
def __init__(self, config, avail_clouds, benchmark, cluster_name,
database):
self.config = config
self.benchmark = benchmark
self.name = cluster_name
self.clouds = list() # clouds from which instances are requested
self.requests = list() # number of instances requested
self.path = list()
self.database = database
for option in self.benchmark.dict:
if(option == "log_files"):
self.path = read_path(self.benchmark.dict[option])
elif(option == "url"):
self.url = self.benchmark.dict[option]
elif(option == "remote_location"):
self.remote_location = self.benchmark.dict[option]
else:
cloud = avail_clouds.lookup_by_name(option)
request = int(self.benchmark.dict[option])
if cloud is not None and request > 0:
self.clouds.append(cloud)
self.requests.append(request)
if len(self.clouds) == 0:
LOG.debug("Benchmark \"%s\" does not have references to "
"available clouds" % (self.benchmark.name))
self.reservations = list() # list of reservations that is
# populated in the launch() method
def connect(self):
"""Establishes connections to the clouds from which instances are
requested
"""
for cloud in self.clouds:
cloud.connect()
def launch(self):
"""Launches requested instances
"""
# for every cloud, spawn as many instances as requested
for i in range(len(self.clouds)):
self.clouds[i].boot_image(self.requests[i])
for cloud in self.clouds:
for instance in cloud.get_all_instances():
reservation = cloud.assign_ip(instance)
self.reservations.append(reservation)
for instance in reservation.instances:
self.database.add(self.name, self.clouds[i].name,
instance.id, self.benchmark.name)
def log_info(self):
"""Loops through reservations and logs status information for every
instance
"""
for reservation in self.reservations:
for instance in reservation.instances:
status = ("Cluster: %s, Reservation: %s, Instance: %s, "
"Status: %s, FQDN: %s, Key: %s") % \
(self.benchmark.name, reservation.id, instance.id,
instance.state, instance.public_dns_name,
instance.key_name)
LOG.debug(status)
def get_fqdns(self):
"""Loops through reservations and returns Fully Qualified Domain Name
(FQDN) for every instance
"""
fqdns = list()
for reservation in self.reservations:
for instance in reservation.instances:
fqdns.append(instance.public_dns_name)
return fqdns
def terminate_all(self):
"""Loops through reservations and terminates every instance
"""
# reservations = list()
for cloud in self.clouds:
for instance in cloud.get_all_instances():
self.database.terminate(instance.id)
cloud.terminate_all()
def terminate(self, cluster):
reservations = list()
if self.reservations:
reservations = self.reservations
else:
for cloud in self.clouds:
reservations = cloud.conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
if self.database.check(cluster, instance.id):
instance.terminate()
self.database.terminate(instance.id)
LOG.debug("Terminated instance: " + instance.id)
def download_logs(self):
reservations = list()
ssh_username = self.config.globals.ssh_username
for cloud in self.clouds:
for instance in cloud.get_all_floating_ips():
if self.database.check_benchmark(self.benchmark.name,
instance.instance_id):
local_path = os.path.join(
self.config.globals.log_local_path,
self.benchmark.name, instance.instance_id)
if not os.path.exists(local_path):
os.makedirs(local_path)
for path in self.path:
file_name = os.path.basename(path)
local_path = os.path.join(local_path, file_name)
now = (datetime.datetime.now()).strftime("%H%M%S")
local_path = local_path + '_' + now + '_' + \
instance.instance_id
com = "scp -r " + ssh_username + "@" + \
instance.ip + ":" + path + " " + \
local_path
LOG.debug("Download logs: [%s] download %s into %s" %
(self.benchmark.name, os.path.basename(path),
local_path))
command = Command(com)
command_return = command.execute()
if command_return != 0:
LOG.error("Download logs: " + command.stdout)
LOG.error("Download logs error: " + command.stderr)
def deploy_software(self):
ssh_priv_key = self.config.globals.ssh_priv_key
ssh_username = self.config.globals.ssh_username
ssh_timeout = int(self.config.globals.ssh_timeout)
reservations = list()
not_available = 0
for cloud in self.clouds:
for instance in cloud.get_all_floating_ips():
if self.database.check_benchmark(self.benchmark.name,
instance.instance_id):
if not check_port_status(instance.ip, 22, ssh_timeout):
LOG.error("Deploy_software: the port 22 is not "
"available right now. please try it later")
continue
cmds = list()
cmds.append("wget %s" % (self.url))
cmds.append("sudo apt-get update")
cmds.append("sudo apt-get install -y unzip libc6:i386")
cmds.append("unzip BioPerf.zip")
cmds.append("sed -i 's/read BIOPERF/#read "
"BIOPERF/g' install-BioPerf.sh")
cmds.append("./install-BioPerf.sh")
for c in cmds:
command = RemoteCommand(instance.ip,
ssh_priv_key, c)
command_return = command.execute()
if command_return != 0:
LOG.error("Deploy_software: " + command.stdout)
LOG.error("Deploy_software error: " +
command.stderr)
def execute_benchmarks(self):
ssh_priv_key = self.config.globals.ssh_priv_key
ssh_username = self.config.globals.ssh_username
reservations = list()
for cloud in self.clouds:
for instance in cloud.get_all_floating_ips():
if self.database.check_benchmark(self.benchmark.name,
instance.instance_id):
cmds = list()
cmds.append("sed -i '5c input='y'' ~/BioPerf/Scripts/"
"Run-scripts/CleanOutputs.sh")
cmds.append("sed -i '13c rm -f $BIOPERF/Outputs/log' "
"~/BioPerf/Scripts/Run-scripts/"
"CleanOutputs.sh")
cmds.append("sed -i '21c #' "
"~/BioPerf/Scripts/Run-scripts/run-bioperf.sh")
cmds.append("sed -i '26c #' "
"~/BioPerf/Scripts/Run-scripts/run-bioperf.sh")
cmds.append("sed -i '10c arch='X'' "
"~/BioPerf/Scripts/Run-scripts/run-bioperf.sh")
cmds.append("sed -i '71c input3='A'' "
"~/BioPerf/Scripts/Run-scripts/run-bioperf.sh")
cmds.append("sed -i '134c input='A'' "
"~/BioPerf/Scripts/Run-scripts/run-bioperf.sh")
cmds.append("sed -i '145c user1='y'' "
"~/BioPerf/Scripts/Run-scripts/run-bioperf.sh")
cmds.append("./BioPerf/Scripts/Run-scripts/"
"CleanOutputs.sh")
cmds.append("echo 'Y' 'Y' | "
"./BioPerf/Scripts/Run-scripts/run-bioperf.sh"
" > ~/BioPerf/Outputs/log")
for c in cmds:
command = RemoteCommand(instance.ip,
ssh_priv_key, c)
command_return = command.execute()
if command_return != 0:
LOG.error("Excute_benchmarks: " + command.stdout)
LOG.error("Excute_benchmarks: " + command.stderr)
class Clusters(object):
"""Clusters class represents a collection of clusters specified in
the benchmarking file
"""
def __init__(self, config):
self.config = config
avail_clouds = Clouds(self.config)
self.database = Database()
self.list = list()
a = 0
for benchmark in self.config.benchmarking.list:
a = a + 1
LOG.debug("Creating cluster for benchmark: " + benchmark.name)
cluster_name = "cluster-" + str(self.database.countcluster() + a)
self.list.append(Cluster(self.config, avail_clouds,
benchmark, cluster_name, self.database))
|
nilq/baby-python
|
python
|
"""
Tema: Assertions y Test suites
Curso: Selenium con python.
Plataforma: Platzi.
Profesor: Hector Vega.
Alumno: @edinsonrequena.
"""
# Unittest Modules
import unittest
# Selenium Modules
from selenium import webdriver
class SearchTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Firefox(executable_path='/home/edinson/Descargas/geckodriver')
driver = cls.driver
driver.maximize_window()
driver.get('http://demo-store.seleniumacademy.com/')
def test_search_tee(self):
driver = self.driver
search_field = driver.find_element_by_name('q')
search_field.clear()
search_field.send_keys('tee')
search_field.submit()
def test_search_card(self):
driver = self.driver
search_field = driver.find_element_by_name('q')
search_field.send_keys('card')
search_field.submit()
products = driver.find_elements_by_xpath('/html/body/div/div[2]/div[2]/div/div[2]/div[2]/div[3]/ul/li[1]/div/h2/a')
self.assertEqual(2, len(products))
@classmethod
def tearDownClass(cls):
cls.driver.quit()
|
nilq/baby-python
|
python
|
try:
import greenlet
except ImportError:
greenlet_available = False
else:
greenlet_available = True
is_patched = False
from weakref import WeakSet
orig_greenlet = greenlet.greenlet
greenlets = WeakSet()
class PatchedGreenlet(orig_greenlet):
def __init__(self, *a, **k):
super(PatchedGreenlet, self).__init__(*a, **k)
greenlets.add(self)
def patch():
global is_patched
is_patched = True
greenlets.add(greenlet.getcurrent())
greenlet.greenlet = PatchedGreenlet
def restore():
global is_patched
is_patched = False
greenlet.greenlet = orig_greenlet
# the greenlet iteration concept is copied from:
# https://github.com/mozilla-services/powerhose/blob/master/powerhose/util.py#L200
# thanks Tarek!
def greenlets_from_memory():
import gc
try:
from greenlet import greenlet
except ImportError:
return
for ob in gc.get_objects():
if not isinstance(ob, greenlet):
continue
if not ob:
continue # not running anymore or not started
yield ob
def greenlet_frame_generator():
global greenlets
if not greenlet_available:
return
greenlets = greenlets if is_patched else greenlets_from_memory()
for greenlet in greenlets:
yield (greenlet, greenlet.gr_frame)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
import requests
import pymysql
class WorkPipeline(object):
def process_item(self, item, spider):
return item
class TuchongPipeline(object):
def process_item(self, item, spider):
img_url = item['img_url'] #从items中得到图片url地址
img_title= item['title'] #得到图片的名字
headers = {
'User-Agnet': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'cookie':'webp_enabled=1; bad_ide7dfc0b0-b3b6-11e7-b58e-df773034efe4=78baed41-a870-11e8-b7fd-370d61367b46; _ga=GA1.2.1188216139.1535263387; _gid=GA1.2.1476686092.1535263387; PHPSESSID=4k7pb6hmkml8tjsbg0knii25n6'
}
if not os.path.exists("picture"):
os.mkdir("picture")
filename = img_title
with open("picture"+'/'+filename, 'wb+') as f:
f.write(requests.get(img_url, headers=headers).content)
f.close()
return item
class TuchongsqlPipeline(object):
#connect sql
def __init__(self):
self.connect = pymysql.connect(host = 'localhost', user = 'root', password = 'gentry',db = 'tupian',port = 3306)
self.cursor=self.connect.cursor()
def process_item(self,item,spider):
self.cursor.execute('insert into tupian_table(name,url)VALUE("{}","{}")'.format(item['title'],item['img_url']))
self.connect.commit()
return item
def close_spider(self,spider):
self.cursor.close()
self.connect.close()
|
nilq/baby-python
|
python
|
"""
Application ID: 512001308941.
Публичный ключ приложения: COAKPIKGDIHBABABA.
Секретный ключ приложения: 95C3FB547F430B544E82D448.
Вечный session_key:tkn14YgWQ279xMzvjdfJtJuRajPvJtttKSCdawotwIt7ECm6L0PzFZLqwEpBQVe3xGYr7
Session_secret_key:b2208fc58999b290093183f6fdfa6804
"""
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import pytest
from case import skip
@skip.if_pypy()
@skip.unless_module('boto3')
@skip.unless_module('pycurl')
@pytest.mark.usefixtures('hub')
class AWSCase(object):
pass
|
nilq/baby-python
|
python
|
"""
Loaders for classic datasets.
"""
from .datasets import Ionosphere, MagicGammaTelescope
__all__ = ["Ionosphere", "MagicGammaTelescope"]
|
nilq/baby-python
|
python
|
count = 0
for i in range(10):
nums = int(input())
if nums == 5:
count += 1
print(count)
|
nilq/baby-python
|
python
|
import unittest
import logging
import os
import numpy as np
import pandas as pd
import scipy.stats as stats
import broadinstitute_psp.utils.setup_logger as setup_logger
import cmapPy.pandasGEXpress.parse as parse
import cmapPy.pandasGEXpress.GCToo as GCToo
import sip
# Setup logger
logger = logging.getLogger(setup_logger.LOGGER_NAME)
FUNCTIONAL_TESTS_DIR = "sip/functional_tests"
class TestSip(unittest.TestCase):
def test_main(self):
test_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_sip_in_test.gct")
bg_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_sip_in_bg.gct")
out_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_sip_main_out.gct")
args_string = "-t {} -b {} -o {} -tfq {} -tft {} -bf {} -s {}".format(
test_gct_path, bg_gct_path, out_path, "pert_iname",
"pert_iname", "pert_iname", "|")
args = sip.build_parser().parse_args(args_string.split())
# Run main method
sip.main(args)
# Compare the output of main with the expected output
e_out_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_sip_expected_conn.gct")
e_out_gct = parse.parse(e_out_path)
out_gct = parse.parse(out_path)
logger.debug("e_out_gct.data_df:\n{}".format(e_out_gct.data_df))
logger.debug("out_gct.data_df:\n{}".format(out_gct.data_df))
pd.util.testing.assert_frame_equal(e_out_gct.data_df, out_gct.data_df,
check_less_precise=3)
logger.debug("e_out_gct.row_metadata_df:\n{}".format(e_out_gct.row_metadata_df))
logger.debug("out_gct.row_metadata_df:\n{}".format(out_gct.row_metadata_df))
pd.util.testing.assert_frame_equal(
e_out_gct.row_metadata_df, out_gct.row_metadata_df)
logger.debug("e_out_gct.col_metadata_df:\n{}".format(e_out_gct.col_metadata_df))
logger.debug("out_gct.col_metadata_df:\n{}".format(out_gct.col_metadata_df))
pd.util.testing.assert_frame_equal(
e_out_gct.col_metadata_df, out_gct.col_metadata_df)
# Remove the created file
os.remove(out_path)
def test_check_symmetry(self):
df_mat = np.random.randn(4, 4)
sym_df = pd.DataFrame(df_mat)
asym_df = sym_df.iloc[:3, :4]
# Symmetric test_df, symmetric bg_df
(is_test_df_sym1, is_bg_df_sym1) = sip.check_symmetry(sym_df, sym_df)
self.assertTrue(is_test_df_sym1)
self.assertTrue(is_bg_df_sym1)
# Assymmetric test_df, symmetric bg_df
(is_test_df_sym2, is_bg_df_sym2) = sip.check_symmetry(asym_df, sym_df)
self.assertFalse(is_test_df_sym2)
self.assertTrue(is_bg_df_sym2)
# Assymetric bg should raise error
with self.assertRaises(AssertionError) as e:
sip.check_symmetry(sym_df, asym_df)
self.assertIn("bg_df must be symmetric!", str(e.exception))
def test_create_aggregated_fields_in_GCTs(self):
# Make test_gct
test_rids = ["M", "L", "P"]
test_cids = ["Z", "X", "Y"]
test_col_df = pd.DataFrame({"a": [1, 5, 6], "b": ["v", "l", "p"]})
test_col_df.index = test_cids
test_row_df = pd.DataFrame({"D": ["bee", "bird", "dog"],
"C": ["bee", "me", "vee"]})
test_row_df.index = test_rids
test_gct = GCToo.GCToo(
data_df=pd.DataFrame(np.nan, index=test_rids, columns=test_cids),
row_metadata_df=test_row_df,
col_metadata_df=test_col_df)
# Make bg_gct
bg_ids = ["u", "w", "v"]
bg_meta_df = pd.DataFrame(index=bg_ids)
bg_gct = GCToo.GCToo(
data_df=pd.DataFrame(np.nan, index=bg_ids, columns=bg_ids),
row_metadata_df=bg_meta_df,
col_metadata_df=bg_meta_df.copy(deep=True))
# Make expected results
e_test_col_df = test_col_df.copy(deep=True)
e_test_col_df2 = test_col_df.copy(deep=True)
e_test_col_df["query_out"] = ["v|1", "l|5", "p|6"]
e_test_col_df2["query_out"] = e_test_col_df2.index
e_test_row_df = test_row_df.copy(deep=True)
e_test_row_df["target_out"] = ["bee", "me", "vee"]
e_bg_meta_df = bg_meta_df.copy(deep=True)
e_bg_meta_df["target_out"] = ["u", "w", "v"]
# Happy path
out_test_gct, out_bg_gct = sip.create_aggregated_fields_in_GCTs(
test_gct, bg_gct, ["b", "a"], ["C"], [], "query_out",
"target_out", "|")
pd.util.testing.assert_frame_equal(out_test_gct.col_metadata_df, e_test_col_df)
pd.util.testing.assert_frame_equal(out_test_gct.row_metadata_df, e_test_row_df)
pd.util.testing.assert_frame_equal(out_bg_gct.row_metadata_df, e_bg_meta_df)
pd.util.testing.assert_frame_equal(out_bg_gct.col_metadata_df, e_bg_meta_df)
# fields_to_aggregate_in_test_gct_queries is empty
out_test_gct2, out_bg_gct2 = sip.create_aggregated_fields_in_GCTs(
test_gct, bg_gct, [], ["C"], [], "query_out", "target_out", "|")
pd.util.testing.assert_frame_equal(out_test_gct2.col_metadata_df, e_test_col_df2)
pd.util.testing.assert_frame_equal(out_test_gct2.row_metadata_df, e_test_row_df)
def test_aggregate_fields(self):
df = pd.DataFrame({"a": ["a", "b", "c"],
"b": ["y", "l", "z"],
"c": [1, 6, 7]})
out_col = ["a:1", "b:6", "c:7"]
# Happy path
out_df = sip.aggregate_fields(df, ["a", "c"], ":", "new_col")
logger.debug("out_df:\n{}".format(out_df))
df["new_col"] = out_col
pd.util.testing.assert_frame_equal(out_df, df)
# Metadata field supplied that's not actually present
with self.assertRaises(AssertionError) as e:
sip.aggregate_fields(df, ["d"], "blah", "blah")
self.assertIn("d is not present", str(e.exception))
def test_aggregate_metadata(self):
df = pd.DataFrame({"pert_time": [24, 24, 24, 6, 6, 6],
"pert_id": ["A", "A", "A", "B", "B", "C"],
"pert_name": ["a", "A", "aa", "bee", "be", "B"],
"AGG": ["Y", "Y", "Y", "X", "X", "X"]})
e_df = pd.DataFrame({"pert_time": ["6", "24"],
"pert_id": ["B|C", "A"],
"pert_name": ["B|be|bee", "A|a|aa"]})
e_df.index = ["X", "Y"]
out_df = sip.aggregate_metadata(df, "AGG", "|")
logger.debug("out_df:\n{}".format(out_df))
logger.debug("e_df:\n{}".format(e_df))
pd.util.testing.assert_frame_equal(e_df, out_df, check_names=False)
# Test a dataframe with just one sample
e_df2 = pd.DataFrame([["A", "a", "24"]], index=["Y"],
columns=["pert_id", "pert_name", "pert_time"])
out_df = sip.aggregate_metadata(df.iloc[[0], :], "AGG", "|")
logger.debug("out_df:\n{}".format(out_df))
pd.util.testing.assert_frame_equal(e_df2, out_df, check_names=False)
def test_aggregate_one_series_uniquely(self):
my_ser = pd.Series(["a", 3, 11])
e_str = "3:11:a"
out_str = sip.aggregate_one_series_uniquely(my_ser, sep=":")
self.assertEqual(e_str, out_str)
def test_extract_test_vals(self):
# Symmetric GCT
sym_test_data_df = pd.DataFrame(
[[1.0, 0.5, 1.0, -0.4, 1.1, -0.6],
[0.5, 1.0, 1.2, -0.8, -0.9, 0.4],
[1.0, 1.2, 1.0, 0.1, 0.3, 1.3],
[-0.4, -0.8, 0.1, 1.0, 0.5, -0.2],
[1.1, -0.9, 0.3, 0.5, 1.0, 0.7],
[-0.6, 0.4, 1.3, -0.2, 0.7, 1.0]])
sym_test_meta_df = pd.DataFrame({
"group": ["A", "B", "A", "B", "C", "C"],
"id": [1, 2, 3, 4, 5, 6]})
sym_test_gct = GCToo.GCToo(data_df=sym_test_data_df,
row_metadata_df=sym_test_meta_df,
col_metadata_df=sym_test_meta_df)
# Expected values
e_A_B_vals = [0.5, -0.4, 1.2, 0.1]
e_A_C_vals = [1.1, 0.3, -0.6, 1.3]
e_C_A_vals = [1.1, 0.3, -0.6, 1.3]
e_A_A_vals = [1.0]
A_B_vals = sip.extract_test_vals("A", "B", "group", "group", sym_test_gct, True)
self.assertItemsEqual(e_A_B_vals, A_B_vals)
A_C_vals = sip.extract_test_vals("A", "C", "group", "group", sym_test_gct, True)
self.assertItemsEqual(e_A_C_vals, A_C_vals)
C_A_vals = sip.extract_test_vals("C", "A", "group", "group", sym_test_gct, True)
self.assertItemsEqual(e_C_A_vals, C_A_vals)
A_A_vals = sip.extract_test_vals("A", "A", "group", "group", sym_test_gct, True)
self.assertItemsEqual(e_A_A_vals, A_A_vals)
# Verify that assert statement works
with self.assertRaises(AssertionError) as e:
sip.extract_test_vals("A", "D", "group", "group", sym_test_gct, True)
self.assertIn("target D is not in the group metadata", str(e.exception))
# Assymmetric GCT
nonsym_test_row_meta_df = pd.DataFrame({
"group": ["A", "B", "A", "B"],
"id": [1, 2, 3, 4]})
nonsym_test_col_meta_df = pd.DataFrame({
"alt_group": ["F", "F", "E", "E"],
"id": [1, 2, 3, 4]})
nonsym_test_data_df = pd.DataFrame(
[[1, 2, 3, 5],
[7, 11, 13, 17],
[19, 23, 29, 31],
[-3, 5, 7, 11]])
nonsym_test_gct = GCToo.GCToo(data_df=nonsym_test_data_df,
row_metadata_df=nonsym_test_row_meta_df,
col_metadata_df=nonsym_test_col_meta_df)
# Expected values
e_E_A_vals = [3, 5, 29, 31]
e_F_B_vals = [7, 11, -3, 5]
E_A_vals = sip.extract_test_vals("E", "A", "alt_group", "group", nonsym_test_gct, False)
self.assertItemsEqual(e_E_A_vals, E_A_vals)
F_B_vals = sip.extract_test_vals("F", "B", "alt_group", "group", nonsym_test_gct, False)
self.assertItemsEqual(e_F_B_vals, F_B_vals)
def test_extract_bg_vals_from_sym(self):
bg_meta_df = pd.DataFrame({
"group": ["A", "B", "A", "B", "C", "C"],
"id": [1, 2, 3, 4, 5, 6]})
bg_data_df = pd.DataFrame(
[[1.0, 0.5, 1.0, -0.4, 1.1, -0.6],
[0.5, 1.0, 1.2, -0.8, -0.9, 0.4],
[1.0, 1.2, 1.0, 0.1, 0.3, 1.3],
[-0.4, -0.8, 0.1, 1.0, 0.5, -0.2],
[1.1, -0.9, 0.3, 0.5, 1.0, 0.7],
[-0.6, 0.4, 1.3, -0.2, 0.7, 1.0]])
bg_gct = GCToo.GCToo(data_df=bg_data_df,
row_metadata_df=bg_meta_df,
col_metadata_df=bg_meta_df)
# Expected values
e_A_vals = [0.5, 1.0, -0.4, 1.1, -0.6, 1.2, 0.1, 0.3, 1.3]
e_B_vals = [0.5, 1.2, -0.8, -0.9, 0.4, -0.4, 0.1, 0.5, -0.2]
e_C_vals = [1.1, -0.9, 0.3, 0.5, 0.7, -0.6, 0.4, 1.3, -0.2]
A_vals = sip.extract_bg_vals_from_sym("A", "group", bg_gct)
self.assertItemsEqual(e_A_vals, A_vals)
B_vals = sip.extract_bg_vals_from_sym("B", "group", bg_gct)
self.assertItemsEqual(e_B_vals, B_vals)
C_vals = sip.extract_bg_vals_from_sym("C", "group", bg_gct)
self.assertItemsEqual(e_C_vals, C_vals)
# Verify that assert statement works
with self.assertRaises(AssertionError) as e:
sip.extract_bg_vals_from_sym("D", "group", bg_gct)
self.assertIn("D is not in the group metadata", str(e.exception))
def test_extract_bg_vals_from_non_sym(self):
bg_row_meta_df = pd.DataFrame({
"group": ["A", "B", "A", "B"],
"id": [1, 2, 3, 4]})
bg_col_meta_df = pd.DataFrame({
"group": ["F", "F", "E", "E"],
"id": [1, 2, 3, 4]})
bg_data_df = pd.DataFrame(
[[1, 2, 3, 5],
[7, 11, 13, 17],
[19, 23, 29, 31],
[-3, 5, 7, 11]])
bg_gct = GCToo.GCToo(data_df=bg_data_df,
row_metadata_df=bg_row_meta_df,
col_metadata_df=bg_col_meta_df)
# Expected values
e_A_vals = [1, 2, 3, 5, 19, 23, 29, 31]
e_B_vals = [7, 11, 13, 17, -3, 5, 7, 11]
A_vals = sip.extract_bg_vals_from_non_sym("A", "group", bg_gct)
self.assertItemsEqual(e_A_vals, A_vals)
B_vals = sip.extract_bg_vals_from_non_sym("B", "group", bg_gct)
self.assertItemsEqual(e_B_vals, B_vals)
# Verify that assert statement works
with self.assertRaises(AssertionError) as e:
sip.extract_bg_vals_from_non_sym("D", "group", bg_gct)
self.assertIn("target D is not in the group metadata", str(e.exception))
def test_percentile_score_single(self):
test_vals = [7, 11, 13]
bg_vals = [9, 11, -1, 19, 17, 7]
out_score = sip.percentile_score_single(test_vals, bg_vals)
self.assertAlmostEqual(out_score, 55.555, places=2)
def test_compute_connectivities(self):
# Create test_gct
test_col_meta_df = pd.DataFrame({
"pert": ["D", "D", "D", "E", "E", "E"],
"cell": ["A375", "A375", "A375", "A375", "A375", "A375"],
"agg": ["D:A375", "D:A375", "D:A375", "E:A375", "E:A375", "E:A375"],
"other": ["M", "M", "N", "R", "P", "Q"],
"other2": [3, 6, 4, 1, 1, 1.1]})
test_row_meta_df = pd.DataFrame({
"pert": ["A", "A", "B", "B"],
"cell": ["A375", "A375", "A375", "A375"],
"agg2": ["A:A375", "A:A375", "B:A375", "B:A375"],
"weird": ["x", "y", "z", "z"]})
test_data_df = pd.DataFrame(
[[0.1, -0.3, -0.1, -0.4, 0.6, -0.7],
[0.5, -0.7, -0.2, -1, 0.4, 0.2],
[-0.2, 0.3, 0.7, 0.1, 0.4, -0.9],
[0.1, 0.4, 0.2, 0.6, 0.4, -0.1]])
test_gct = GCToo.GCToo(data_df=test_data_df,
row_metadata_df=test_row_meta_df,
col_metadata_df=test_col_meta_df)
# Create bg_gct
bg_meta_df = pd.DataFrame({
"pert": ["A", "B", "A", "B", "C", "C"],
"cell": ["A375", "A375", "A375", "A375", "A375", "A375"],
"AGG": ["A:A375", "B:A375", "A:A375", "B:A375", "C:A375", "C:A375"],
"ignore": ["j", "k", "l", "a", "b", "D"]})
bg_data_df = pd.DataFrame(
[[1.0, 0.5, 1.0, -0.4, 1.1, -0.6],
[0.5, 1.0, 1.2, -0.8, -0.9, 0.4],
[1.0, 1.2, 1.0, 0.1, 0.3, 1.3],
[-0.4, -0.8, 0.1, 1.0, 0.5, -0.2],
[1.1, -0.9, 0.3, 0.5, 1.0, 0.7],
[-0.6, 0.4, 1.3, -0.2, 0.7, 1.0]])
bg_gct = GCToo.GCToo(data_df=bg_data_df,
row_metadata_df=bg_meta_df,
col_metadata_df=bg_meta_df)
# Create expected output
A_bg = [0.5, 1.0, -0.4, 1.1, -0.6, 1.2, 0.1, 0.3, 1.3] # med = 0.4
B_bg = [0.5, 1.2, -0.8, -0.9, 0.4, -0.4, 0.1, 0.5, -0.2] # med = 0.1
(e_D_v_A, _) = stats.ks_2samp([0.1, -0.3, -0.1, 0.5, -0.7, -0.2], A_bg) # med = -1.5, so -
(e_D_v_B, _) = stats.ks_2samp([-0.2, 0.3, 0.7, 0.1, 0.4, 0.2], B_bg) # med = 0.25, so +
(e_E_v_A, _) = stats.ks_2samp([-0.4, 0.6, -0.7, -1, 0.4, 0.2], A_bg) # med = -0.1, so -
(e_E_v_B, _) = stats.ks_2samp([0.1, 0.4, -0.9, 0.6, 0.4, -0.1], B_bg) # med = 0.25, so +
e_conn_df = pd.DataFrame(
[[e_D_v_A, e_E_v_A], [e_D_v_B, e_E_v_B]],
index = ["A:A375", "B:A375"],
columns = ["D:A375", "E:A375"])
e_signed_conn_df = pd.DataFrame(
[[-e_D_v_A, -e_E_v_A], [e_D_v_B, e_E_v_B]],
index = ["A:A375", "B:A375"],
columns = ["D:A375", "E:A375"])
e_row_meta_df = pd.DataFrame({
"pert": ["A", "B"],
"cell": ["A375", "A375"]})
e_row_meta_df.index = ["A:A375", "B:A375"]
e_row_meta_df = pd.DataFrame({
"pert": ["A", "B"],
"cell": ["A375", "A375"],
"weird": ["x:y", "z"]})
e_row_meta_df.index = ["A:A375", "B:A375"]
e_col_meta_df = pd.DataFrame({
"pert": ["D", "E"],
"cell": ["A375", "A375"],
"other": ["M:N", "P:Q:R"],
"other2": ["3.0:4.0:6.0", "1.0:1.1"]})
e_col_meta_df.index = ["D:A375", "E:A375"]
(conn_gct, signed_conn_gct) = sip.compute_connectivities(
test_gct, bg_gct, "agg", "agg2", "AGG", "ks_test", False, ":")
logger.debug("conn_gct.data_df:\n{}".format(conn_gct.data_df))
logger.debug("e_conn_df:\n{}".format(e_conn_df))
logger.debug("conn_gct.row_metadata_df:\n{}".format(conn_gct.row_metadata_df))
logger.debug("conn_gct.col_metadata_df:\n{}".format(conn_gct.col_metadata_df))
pd.util.testing.assert_frame_equal(conn_gct.data_df, e_conn_df)
pd.util.testing.assert_frame_equal(signed_conn_gct.data_df, e_signed_conn_df)
pd.util.testing.assert_frame_equal(conn_gct.row_metadata_df, e_row_meta_df, check_names=False)
pd.util.testing.assert_frame_equal(conn_gct.col_metadata_df, e_col_meta_df, check_names=False)
# Make sure connectivity metric is valid
with self.assertRaises(Exception) as e:
sip.compute_connectivities(test_gct, bg_gct, "agg",
"agg2", "AGG", "wtcs",
False, "|")
self.assertIn("connectivity metric must be either ks_test or", str(e.exception))
# Make sure we have agreement across test_gct and bg_gct
with self.assertRaises(Exception) as e:
sip.compute_connectivities(test_gct, bg_gct, "agg", "pert",
"ignore", "wtcs", False, "|")
self.assertIn("There are no targets ", str(e.exception))
if __name__ == "__main__":
setup_logger.setup(verbose=True)
unittest.main()
|
nilq/baby-python
|
python
|
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objects as go
from main import get_path_distance
# drop down list for use in airport codes
from controls import CITY_DATA, CITY_POP, AIRPORT_DATA, ROUTES_DATA, AIRLINES_DATA, get_coordinate
#%%%
def coordinate_list_for_map(path):
lat_list = []
long_list = []
city_list = path[2:-2].split("', '")
for city in city_list:
lat_list.append(get_coordinate(city)[0])
long_list.append(get_coordinate(city)[1])
return city_list, lat_list, long_list
def get_picture(city):
return "/assets/{}.png".format(city)
pop_dict = CITY_POP.to_dict()
def get_pop(city):
return pop_dict.get('population').get(city)
#%%
lat_list_all = []
long_list_all = []
for col in CITY_DATA['city']:
lat,long = get_coordinate(col)
lat_list_all.append(lat)
long_list_all.append(long)
#%%
# setup app with stylesheets
app = dash.Dash(external_stylesheets=[dbc.themes.SANDSTONE])
layout = dict(
autosize=True,
automargin=True,
margin=dict(l=30, r=30, b=20, t=40),
hovermode="closest",
plot_bgcolor="#F9F9F9",
paper_bgcolor="#F9F9F9",
legend=dict(font=dict(size=10), orientation="h"),
title="Map",
marker= {'size': 10,'color':'#E30909'},
mapbox=dict(
#accesstoken=mapbox_access_token,
style="stamen-terrain",
center=dict(lon=-78.05, lat=42.54),
zoom=3,
),
)
layout.get('plot_bgcolor')
fig = go.Figure(go.Scattermapbox(
mode = "markers",
lat = lat_list_all,
lon = long_list_all,
marker = layout.get('marker')))
# fig.update_layout = layout
fig.update_layout(
margin ={'l':30,'t':30,'b':20,'r':40},
mapbox = {
'center': {'lon': -78.05, 'lat': 42.54},
'style': "stamen-terrain",
'zoom': 2})
controls = dbc.Card(
[
dbc.FormGroup(
[
dbc.Label("Start City"),
dcc.Dropdown(
options=[{"label": col, "value": col} for col in CITY_DATA['city']],
value="Boston",
id="start-city",
),
]
),
dbc.FormGroup(
[
dbc.Label("Destination City"),
dcc.Dropdown(
options=[{"label": col, "value": col} for col in CITY_DATA['city']],
value="New York",
id="destination-city",
),
]
),
dbc.Button(id = 'submit',n_clicks = 0, children = "Submit", outline=True, color="primary", className="mr-1"),
],
body=True,
)
photo_pop_group = dbc.FormGroup(
[
dbc.Row(children = [
dbc.Col(html.H4(id='image-pop-start', children=['Start City'])),
dbc.Col(html.H4(id='image-pop-destination', children=['Destination City']))
],
align="center"
),
html.Br(),
dbc.Row(children = [
dbc.Col(html.Img(id='image-start',src=get_picture('Travel_1'), style={'height':'80%', 'width':'80%'}), md=5),
dbc.Col(html.Img(id='image-destination',src=get_picture('Travel_2'), style={'height':'80%', 'width':'80%'}), md=5),
],
align="center"
)
]
)
app.layout = dbc.Container(
[
dbc.Row(
dbc.Col(
html.H1("Kartemap - An Airport Network Analysis Application", style={'text-align': 'center'})
)
),
dbc.Row(
[
dbc.Col(controls, md=3),
dbc.Col(
dcc.Graph(figure=fig, id="map"), md=7
),
],
align="center",
),
html.Br(),
html.H3(id='show-route', children=[]),
html.Br(),
html.H3(id='show-distance', children=[]),
html.Br(),
html.Br(),
photo_pop_group
],
id="main-container",
style={"display": "flex", "flex-direction": "column"},
fluid=True
)
#%%
@app.callback(
[Output(component_id='show-route', component_property='children'),
Output(component_id='show-distance', component_property='children'),
Output(component_id='map', component_property='figure'),
Output(component_id='image-pop-start', component_property='children'),
Output(component_id='image-pop-destination', component_property='children'),
Output(component_id='image-start', component_property='src'),
Output(component_id='image-destination', component_property='src')],
Input(component_id='submit',component_property='n_clicks'),
[State(component_id='start-city', component_property='value'),
State(component_id='destination-city', component_property='value')]
)
def get_path(n_clicks, start_city, destination_city):
path, distance_km = get_path_distance(start_city,destination_city)
# distance_mile = distance_km * 1.609
city_list, lat_list, long_list = coordinate_list_for_map(path)
if len(city_list) == 1:
show_route = ["Think again! It doesn't make sense to travel from {} to {}!".format(start_city, destination_city)]
elif len(city_list) == 2:
show_route = ["Looks Great! You may fly directly from {} to {}!".format(start_city, destination_city)]
elif len(city_list) == 3:
show_route = ["To travel from {} to {}, you should take a connection flight at {}.".format(start_city, destination_city,city_list[1])]
else:
show_route = ["The shortest path to travel from {} to {} is : {}".format(start_city, destination_city, path)]
show_distance = ["The total distance of this trip is {} miles, or {} km.".format(int(float(distance_km) / 1.609), int(float(distance_km)))]
fig = go.Figure(go.Scattermapbox(
mode = "markers+lines",
lat = lat_list,
lon = long_list,
marker = layout.get('marker')))
fig.update_layout(
margin ={'l':30,'t':30,'b':20,'r':40},
mapbox = {
'center': {'lon': -78.05, 'lat': 42.54},
'style': "stamen-terrain",
'zoom': 2})
pop_start_city = ["Population of {} is {}".format(start_city, get_pop(start_city))]
pop_destination_city = ["Population of {} is {}".format(destination_city, get_pop(destination_city))]
src_start_city = get_picture(start_city)
src_destination_city = get_picture(destination_city)
return show_route, show_distance, fig, pop_start_city, pop_destination_city, src_start_city, src_destination_city
#%%
# Main
if __name__ == "__main__":
app.run_server(debug=True)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import TestCase
from eve.exceptions import ConfigException
from sqlalchemy import Boolean, Column, ForeignKey, Integer, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from eve_sqlalchemy.config import DomainConfig, ResourceConfig
from .. import BaseModel
Base = declarative_base(cls=BaseModel)
group_members = Table(
'group_members', Base.metadata,
Column('group_id', Integer, ForeignKey('group.id')),
Column('user_id', Integer, ForeignKey('user.id'))
)
class User(Base):
id = Column(Integer, primary_key=True)
is_admin = Column(Boolean, default=False)
class Group(Base):
id = Column(Integer, primary_key=True)
members = relationship(User, secondary=group_members)
admin_id = Column(Integer, ForeignKey('user.id'))
admin = relationship(User)
class TestAmbiguousRelations(TestCase):
def setUp(self):
super(TestAmbiguousRelations, self).setUp()
self._domain = DomainConfig({
'users': ResourceConfig(User),
'admins': ResourceConfig(User),
'groups': ResourceConfig(Group)
})
def test_missing_related_resources_without_groups(self):
del self._domain.resource_configs['groups']
domain_dict = self._domain.render()
self.assertIn('users', domain_dict)
self.assertIn('admins', domain_dict)
def test_missing_related_resources(self):
with self.assertRaises(ConfigException) as cm:
self._domain.render()
self.assertIn('Cannot determine related resource for {}'
.format(Group.__name__), str(cm.exception))
def test_two_endpoints_for_one_model(self):
self._domain.related_resources = {
(Group, 'members'): 'users',
(Group, 'admin'): 'admins'
}
groups_schema = self._domain.render()['groups']['schema']
self.assertEqual(groups_schema['admin']['data_relation']['resource'],
'admins')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from .handler_class import handler_class
import urllib3
import requests
import json
import time
class http_handler_class(handler_class):
def __init__(self, *args, **kwargs):
# verify required input parameters
required_args = ['url']
for param_name in required_args:
if param_name not in kwargs:
print('HTTP handler: missing parameter ' + param_name)
raise ValueError
self.url = kwargs['url']
self.headers = kwargs.get('headers')
self.timeout = kwargs.get('timeout')
if self.timeout == None or self.timeout < 1:
self.timeout = 1
print(self.timeout)
def _workout_messages(self, msgs_bunch):
""" retranslate every messages bunch in HTTP body to url specified """
if msgs_bunch != []:
while True:
r = requests.post(self.url, headers = self.headers, data = json.dumps(msgs_bunch))
# request success condition below - to end the handler
if r.status_code == 200:
break
print('http_handler: failed to retranslate messages, try again in ' + str(self.timeout) + ' sec')
time.sleep(self.timeout)
# next bunch of messages will not be read until this function ends
# current bunch of messags will be deleted in next request if delete_flag = True is set
|
nilq/baby-python
|
python
|
from setuptools import find_packages, setup
from netbox_nagios.version import VERSION
setup(
name="netbox-nagios",
version=VERSION,
author="Gabriel KAHLOUCHE",
author_email="gabriel.kahlouche@groupama.com",
description="Netbox Plugin to show centreon device state in Netbox.",
url="https://github.com/jessux/netbox-nagios",
license="",
install_requires=[],
packages=find_packages(),
include_package_data=True,
)
|
nilq/baby-python
|
python
|
from django.db import models
from django.utils.translation import gettext_lazy
from cradmin_legacy.superuserui.views import mixins
from cradmin_legacy.viewhelpers import listbuilder
from cradmin_legacy.viewhelpers import listbuilderview
from cradmin_legacy.viewhelpers import listfilter
from cradmin_legacy.viewhelpers import multiselect2
class BaseView(mixins.ListFilterQuerySetForRoleMixin,
listbuilderview.FilterListMixin,
listbuilderview.View):
paginate_by = 50
def get_search_fields(self):
"""
Get a list with the names of the fields to use while searching.
Defaults to the ``id`` field and all CharField and TextField on the model.
"""
fields = ['id']
for field in self.get_model_class()._meta.get_fields():
if isinstance(field, (models.CharField, models.TextField)):
fields.append(field.name)
return fields
def add_filterlist_items(self, filterlist):
super(BaseView, self).add_filterlist_items(filterlist=filterlist)
search_fields = self.get_search_fields()
if search_fields:
filterlist.append(listfilter.django.single.textinput.Search(
slug='search',
label=gettext_lazy('Search'),
label_is_screenreader_only=True,
modelfields=search_fields))
class View(listbuilderview.ViewCreateButtonMixin,
BaseView):
value_renderer_class = listbuilder.itemvalue.EditDelete
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'filter', kwargs={'filters_string': filters_string})
def get_datetime_filter_fields(self):
return [
field for field in self.get_model_class()._meta.get_fields()
if isinstance(field, models.DateTimeField)]
def add_datetime_filters(self, filterlist):
datetime_filter_fields = self.get_datetime_filter_fields()
for field in datetime_filter_fields:
filterlist.append(listfilter.django.single.select.DateTime(
slug=field.name, label=field.verbose_name))
def add_filterlist_items(self, filterlist):
super(View, self).add_filterlist_items(filterlist=filterlist)
self.add_datetime_filters(filterlist=filterlist)
class ForeignKeySelectView(BaseView):
value_renderer_class = listbuilder.itemvalue.UseThis
hide_menu = True
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'foreignkeyselect-filter', kwargs={'filters_string': filters_string})
class ManyToManySelectView(multiselect2.manytomanyview.ListBuilderFilterListViewMixin,
BaseView):
pass
|
nilq/baby-python
|
python
|
#!/home/schamblee/projects/django-oidc-provider/project_env/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import os
from yolox.exp import Exp as MyExp
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
#### s
self.depth = 0.33
self.width = 0.50
# #### m
# self.depth = 0.67
# self.width = 0.75
#### l
# self.depth = 1.0
# self.width = 1.0
#### x
# self.depth = 1.33
# self.width = 1.25
self.adam = True
self.enable_mixup = False # seg中只能为False
self.multiscale_range = 3 #随机变化的尺度 320:5 32*5~32*15
self.mosaic_scale = (0.1, 2)
#### 两种不同的分割输出尺寸
# self.in_channels = [256, 512, 1024]
# self.in_features = ("dark3", "dark4", "dark5")
self.in_channels = [128, 256, 512, 1024]
self.in_features = ('dark2', "dark3", "dark4", "dark5")
####
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
self.data_num_workers = 0
self.pin_memory = False
self.mosaic_prob = 1
self.num_classes = 35 # 35
self.segcls = self.num_classes+1
self.input_size = (320, 320) # (height, width)
self.test_size = (320, 320)
self.data_dir = 'datasets/plate_seg'
# self.backbone_name = 'CoAtNet'
# if self.backbone_name == 'CoAtNet':
# self.multiscale_range = 0
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
""" Update Rancher app answers using API """
import os
import requests
class RancherAPI: # pylint: disable=too-few-public-methods
""" Make calls to Rancher API """
_CALLER = {
'GET': requests.get,
'PUT': requests.put,
'POST': requests.post,
}
def __init__(self, api, token, check_ssl=True):
self.api = api
self.token = token
self.headers = {
'Authorization': "Bearer %s" % token,
'Accept': 'application/json',
}
self.verify = check_ssl
@staticmethod
def _url_join(*args):
return "/".join([a.strip('/') for a in args])
def call(self, url='', method='get', data=None):
""" Make an API call """
method = method.upper()
req = self._CALLER.get(method)
url = url.replace(self.api, '')
return req(
self._url_join(self.api, url),
headers=self.headers,
json=data,
verify=self.verify
)
def __call__(self, *args, **kwargs):
return self.call(*args, **kwargs)
class App:
""" Represents an application installed inside Rancher """
def __init__(self):
self.ressource_id = ""
self.data = {}
self.name = ""
self.answers = {}
self.links = {}
self.revisionId = ''
self.api: RancherAPI
def update(self):
""" Update the application with new answers """
self.data['answers'] = self.answers
res = self.api(
self.links.get('update'),
method='put',
data=self.data,
)
return res
def merge_answers(self, answers):
""" Merge answers block with that new one """
self.answers.update(answers)
class Project: # pylint: disable=too-few-public-methods
""" Represents a project in Rancher """
def __init__(self):
self.ressource_id = None
self.links = []
self.api: RancherAPI
def app(self, name) -> App:
""" Return Application that have this name """
res = self.api(self.links.get('apps') + '?name=%s' % name)
data = res.json().get('data')[0]
app = App()
app.data = data
app.api = self.api
app.ressource_id = data.get('id')
app.name = data.get('name')
app.answers = data.get('answers')
app.revisionId = data.get('appRevisionId')
app.links = data.get('links')
return app
class Rancher: # pylint: disable=too-few-public-methods
""" Initial Rancher API class to get projects """
def __init__(self, api='', token='', check_ssl='', cluster=''):
self.ressource_id = None
self.links = {}
self.name = cluster
self.api: RancherAPI = RancherAPI(api, token, check_ssl)
self._init_links()
def _init_links(self):
cluster_url = self.api().json().get('links').get('clusters')
print(cluster_url)
res = self.api.call(cluster_url + '?name=' + self.name)
data = res.json().get('data')[0]
self.links = data.get('links')
self.ressource_id = data.get('id')
def project(self, name) -> Project:
""" Return a Project having that name """
call = self.links.get('projects') + '?name=%s' % name
res = self.api.call(call)
data = res.json().get('data')[0]
prj = Project()
prj.ressource_id = data.get('id')
prj.links = data.get('links')
prj.api = self.api
return prj
def __main():
api_url = os.environ.get('PLUGIN_API')
chek_ssl = os.environ.get('PLUGIN_VERIFY', 'true') != 'false'
project_name = os.environ.get('PLUGIN_PROJECT', 'Default')
app_name = os.environ.get('PLUGIN_APP')
cluster_name = os.environ.get('PLUGIN_CLUSTER')
token = os.environ.get('PLUGIN_TOKEN', None)
answer_keys = os.environ.get('PLUGIN_KEYS', None).split(',')
answer_values = os.environ.get('PLUGIN_VALUES', None).split(',')
rancher = Rancher(
cluster=cluster_name,
api=api_url,
token=token,
check_ssl=chek_ssl
)
project = rancher.project(project_name)
app = project.app(app_name)
answers = dict(zip(answer_keys, answer_values))
app.merge_answers(answers)
print(app.answers)
print("Changing answers to", app.answers)
res = app.update()
print(res.json())
if __name__ == '__main__':
__main()
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
__author__ = 'katharine'
from enum import IntEnum
from .base import PebblePacket
from .base.types import *
__all__ = ["MusicControlPlayPause", "MusicControlPause", "MusicControlPlay", "MusicControlNextTrack",
"MusicControlPreviousTrack", "MusicControlVolumeUp", "MusicControlVolumeDown", "MusicControlGetCurrentTrack",
"MusicControlUpdateCurrentTrack", "MusicControl"]
class MusicControlPlayPause(PebblePacket):
pass
class MusicControlPlay(PebblePacket):
pass
class MusicControlPause(PebblePacket):
pass
class MusicControlNextTrack(PebblePacket):
pass
class MusicControlPreviousTrack(PebblePacket):
pass
class MusicControlVolumeUp(PebblePacket):
pass
class MusicControlVolumeDown(PebblePacket):
pass
class MusicControlGetCurrentTrack(PebblePacket):
pass
class MusicControlUpdateCurrentTrack(PebblePacket):
artist = PascalString()
album = PascalString()
title = PascalString()
track_length = Optional(Uint32())
track_count = Optional(Uint16())
current_track = Optional(Uint16())
class MusicControlUpdatePlayStateInfo(PebblePacket):
class State(IntEnum):
Paused = 0x00
Playing = 0x01
Rewinding = 0x02
Fastforwarding = 0x03
Unknown = 0x04
class Shuffle(IntEnum):
Unknown = 0x00
Off = 0x01
On = 0x02
class Repeat(IntEnum):
Unknown = 0x00
Off = 0x01
One = 0x02
All = 0x03
state = Uint8(enum=State)
track_position = Uint32()
play_rate = Uint32()
shuffle = Uint8(enum=Shuffle)
repeat = Uint8(enum=Repeat)
class MusicControlUpdateVolumeInfo(PebblePacket):
volume_percent = Uint8()
class MusicControlUpdatePlayerInfo(PebblePacket):
package = PascalString()
name = PascalString()
class MusicControl(PebblePacket):
class Meta:
endpoint = 0x20
endianness = '<'
command = Uint8()
data = Union(command, {
0x01: MusicControlPlayPause,
0x02: MusicControlPause,
0x03: MusicControlPlay,
0x04: MusicControlNextTrack,
0x05: MusicControlPreviousTrack,
0x06: MusicControlVolumeUp,
0x07: MusicControlVolumeDown,
0x08: MusicControlGetCurrentTrack,
0x10: MusicControlUpdateCurrentTrack,
0x11: MusicControlUpdatePlayStateInfo,
0x12: MusicControlUpdateVolumeInfo,
0x13: MusicControlUpdatePlayerInfo,
})
|
nilq/baby-python
|
python
|
# Authors: Sylvain MARIE <sylvain.marie@se.com>
# + All contributors to <https://github.com/smarie/python-pytest-cases>
#
# License: 3-clause BSD, <https://github.com/smarie/python-pytest-cases/blob/master/LICENSE>
from .common_pytest_lazy_values import lazy_value, is_lazy
from .common_others import unfold_expected_err, assert_exception, AUTO
AUTO2 = AUTO
"""Deprecated symbol, for retrocompatibility. Will be dropped soon."""
from .fixture_core1_unions import fixture_union, NOT_USED, unpack_fixture, ignore_unused
from .fixture_core2 import pytest_fixture_plus, fixture_plus, param_fixtures, param_fixture
from .fixture_parametrize_plus import pytest_parametrize_plus, parametrize_plus, fixture_ref
# additional symbols without the 'plus' suffix
parametrize = parametrize_plus
fixture = fixture_plus
from .case_funcs_legacy import case_name, test_target, case_tags, cases_generator
from .case_parametrizer_legacy import cases_data, CaseDataGetter, get_all_cases_legacy, \
get_pytest_parametrize_args_legacy, cases_fixture
from .case_funcs_new import case, copy_case_info, set_case_id, get_case_id, get_case_marks, \
get_case_tags, matches_tag_query, is_case_class, is_case_function
from .case_parametrizer_new import parametrize_with_cases, THIS_MODULE, get_all_cases, get_parametrize_args
try:
# -- Distribution mode --
# import from _version.py generated by setuptools_scm during release
from ._version import version as __version__
except ImportError:
# -- Source mode --
# use setuptools_scm to get the current version from src using git
from setuptools_scm import get_version as _gv
from os import path as _path
__version__ = _gv(_path.join(_path.dirname(__file__), _path.pardir))
__all__ = [
'__version__',
# the submodules
'common_pytest_lazy_values', 'common_pytest', 'common_others', 'common_mini_six',
'case_funcs_legacy', 'case_funcs_new', 'case_parametrizer_legacy', 'case_parametrizer_new',
'fixture_core1_unions', 'fixture_core2', 'fixture_parametrize_plus',
# all symbols imported above
'unfold_expected_err', 'assert_exception',
# --fixture core1
'fixture_union', 'NOT_USED', 'unpack_fixture', 'ignore_unused',
# -- fixture core2
'pytest_fixture_plus', 'fixture_plus', 'fixture', 'param_fixtures', 'param_fixture',
# -- fixture parametrize plus
'pytest_parametrize_plus', 'parametrize_plus', 'parametrize', 'fixture_ref', 'lazy_value', 'is_lazy',
# V1 - DEPRECATED symbols
# --cases_funcs
'case_name', 'test_target', 'case_tags', 'cases_generator',
# --main params
'cases_data', 'CaseDataGetter', 'get_all_cases_legacy',
'get_pytest_parametrize_args_legacy', 'cases_fixture',
# V2 symbols
'AUTO', 'AUTO2',
# case functions
'case', 'copy_case_info', 'set_case_id', 'get_case_id', 'get_case_marks',
'get_case_tags', 'matches_tag_query', 'is_case_class', 'is_case_function',
# test functions
'get_all_cases', 'parametrize_with_cases', 'THIS_MODULE', 'get_parametrize_args'
]
try: # python 3.5+ type hints
from pytest_cases.case_funcs_legacy import CaseData, Given, ExpectedNormal, ExpectedError, MultipleStepsCaseData
__all__ += ['CaseData', 'Given', 'ExpectedNormal', 'ExpectedError', 'MultipleStepsCaseData']
except ImportError:
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/python
'''
(C) Copyright 2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
'''
from data_mover_test_base import DataMoverTestBase
from os.path import join, sep
class CopyProcsTest(DataMoverTestBase):
# pylint: disable=too-many-ancestors
"""Test class for Datamover multiple processes.
Test Class Description:
Tests multi-process (rank) copying of the datamover utility.
Tests the following cases:
Copying with varying numbers of processes (ranks).
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a CopyBasicsTest object."""
super(CopyProcsTest, self).__init__(*args, **kwargs)
def setUp(self):
"""Set up each test case."""
# Start the servers and agents
super(CopyProcsTest, self).setUp()
# Get the parameters
self.test_file = self.params.get(
"test_file", "/run/ior/*")
self.flags_write = self.params.get(
"flags_write", "/run/ior/copy_procs/*")
self.flags_read = self.params.get(
"flags_read", "/run/ior/copy_procs/*")
# Setup the directory structures
self.posix_test_path = join(self.workdir, "posix_test") + sep
self.posix_test_path2 = join(self.workdir, "posix_test2") + sep
self.posix_test_file = join(self.posix_test_path, self.test_file)
self.posix_test_file2 = join(self.posix_test_path2, self.test_file)
self.daos_test_file = join("/", self.test_file)
# Create the directories
cmd = "mkdir -p '{}' '{}'".format(
self.posix_test_path,
self.posix_test_path2)
self.execute_cmd(cmd)
def tearDown(self):
"""Tear down each test case."""
# Remove the created directories
cmd = "rm -rf '{}' '{}'".format(
self.posix_test_path,
self.posix_test_path2)
self.execute_cmd(cmd)
# Stop the servers and agents
super(CopyProcsTest, self).tearDown()
def test_copy_procs(self):
"""
Test Description:
DAOS-5659: Verify multi-process (rank) copying.
Use Cases:
Create pool.
Crate POSIX container1 and container2 in pool.
Create a single 100M file in container1 using ior.
:avocado: tags=all,datamover,pr
:avocado: tags=copy_procs
"""
# Create pool and containers
pool1 = self.create_pool()
container1 = self.create_cont(pool1)
container2 = self.create_cont(pool1)
# Get the varying number of processes
procs_list = self.params.get(
"processes", "/run/datamover/copy_procs/*")
# Create the test files
self.set_ior_location_and_run("DAOS_UUID", self.daos_test_file,
pool1, container1,
flags=self.flags_write)
self.set_ior_location_and_run("POSIX", self.posix_test_file,
flags=self.flags_write)
# DAOS -> POSIX
# Run with varying number of processes
self.set_src_location("DAOS_UUID", "/", pool1, container1)
self.set_dst_location("POSIX", self.posix_test_path2)
for num_procs in procs_list:
test_desc = "copy_procs (DAOS->POSIX with {} procs)".format(
num_procs)
self.run_datamover(
test_desc=test_desc,
processes=num_procs)
self.set_ior_location_and_run("POSIX", self.posix_test_file2,
flags=self.flags_read)
# POSIX -> DAOS
# Run with varying number of processes
self.set_src_location("POSIX", self.posix_test_path)
self.set_dst_location("DAOS_UUID", "/", pool1, container2)
for num_procs in procs_list:
test_desc = "copy_procs (POSIX->DAOS with {} processes)".format(
num_procs)
self.run_datamover(
test_desc=test_desc,
processes=num_procs)
self.set_ior_location_and_run("DAOS_UUID", self.daos_test_file,
pool1, container2,
flags=self.flags_read)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
ZFILL = 3
|
nilq/baby-python
|
python
|
"""Config flow for DSMR integration."""
import logging
from typing import Any, Dict, Optional
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PORT
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
class DSMRFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for DSMR."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def _abort_if_host_port_configured(
self,
port: str,
host: str = None,
updates: Optional[Dict[Any, Any]] = None,
reload_on_update: bool = True,
):
"""Test if host and port are already configured."""
for entry in self.hass.config_entries.async_entries(DOMAIN):
if entry.data.get(CONF_HOST) == host and entry.data[CONF_PORT] == port:
if updates is not None:
changed = self.hass.config_entries.async_update_entry(
entry, data={**entry.data, **updates}
)
if (
changed
and reload_on_update
and entry.state
in (
config_entries.ENTRY_STATE_LOADED,
config_entries.ENTRY_STATE_SETUP_RETRY,
)
):
self.hass.async_create_task(
self.hass.config_entries.async_reload(entry.entry_id)
)
return self.async_abort(reason="already_configured")
async def async_step_import(self, import_config=None):
"""Handle the initial step."""
host = import_config.get(CONF_HOST)
port = import_config[CONF_PORT]
status = self._abort_if_host_port_configured(port, host, import_config)
if status is not None:
return status
if host is not None:
name = f"{host}:{port}"
else:
name = port
return self.async_create_entry(title=name, data=import_config)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=250, verbose_name="Card's Name")),
('description', models.TextField(verbose_name='Description')),
('life', models.PositiveIntegerField(default=0, verbose_name='Life')),
('damage', models.PositiveIntegerField(default=0, verbose_name='Damage')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CardType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=250, verbose_name='Type of Card')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='card',
name='card_type',
field=models.ForeignKey(verbose_name='Type of Card', to='cardsgame.CardType'),
preserve_default=True,
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Kumagai group.
import os
from pathlib import Path
from monty.serialization import loadfn
from pydefect.analyzer.calc_results import CalcResults
from pydefect.analyzer.grids import Grids
from pydefect.analyzer.refine_defect_structure import refine_defect_structure
from pydefect.cli.vasp.make_defect_charge_info import make_defect_charge_info
from pydefect.cli.vasp.get_defect_charge_state import get_defect_charge_state
from pydefect.input_maker.defect_entry import make_defect_entry
from pymatgen.core import Structure
from pymatgen.io.vasp import Chgcar
from vise.input_set.incar import ViseIncar
from vise.util.file_transfer import FileLink
from vise.util.logger import get_logger
from pymatgen.io.vasp.inputs import Poscar, Incar, Potcar
logger = get_logger(__name__)
def is_file(filename):
return Path(filename).is_file() and os.stat(filename).st_size != 0
def calc_charge_state(args):
poscar = Poscar.from_file(args.dir / "POSCAR")
potcar = Potcar.from_file(args.dir / "POTCAR")
incar = Incar.from_file(args.dir / "INCAR")
charge_state = get_defect_charge_state(poscar, potcar, incar)
logger.info(f"Charge state in {args.dir} is {charge_state}.")
return charge_state
def make_defect_entry_main(args):
charge_state = calc_charge_state(args)
structure = Structure.from_file(args.dir / "POSCAR")
defect_entry = make_defect_entry(name=args.name,
charge=charge_state,
perfect_structure=args.perfect,
defect_structure=structure)
defect_entry.to_json_file()
def make_parchg_dir(args):
os.chdir(args.dir)
if is_file("WAVECAR") is False:
raise FileNotFoundError("WAVECAR does not exist or is empty.")
try:
calc_results: CalcResults = loadfn("calc_results.json")
except FileNotFoundError:
logger.info("Need to create calc_results.json beforehand.")
raise
calc_results.show_convergence_warning()
# Increment index by 1 as VASP band index begins from 1.
incar = ViseIncar.from_file("INCAR")
band_edge_states = loadfn("band_edge_states.json")
iband = [i + 1 for i in band_edge_states.band_indices_from_vbm_to_cbm]
incar.update({"LPARD": True, "LSEPB": True, "KPAR": 1, "IBAND": iband})
parchg = Path("parchg")
parchg.mkdir()
os.chdir("parchg")
incar.write_file("INCAR")
FileLink(Path("../WAVECAR")).transfer(Path.cwd())
FileLink(Path("../POSCAR")).transfer(Path.cwd())
FileLink(Path("../POTCAR")).transfer(Path.cwd())
FileLink(Path("../KPOINTS")).transfer(Path.cwd())
os.chdir("..")
def make_refine_defect_poscar(args):
structure = refine_defect_structure(args.structure,
args.defect_entry.anchor_atom_index,
args.defect_entry.anchor_atom_coords)
if structure:
print(structure.to(fmt="poscar", filename=args.poscar_name))
def calc_grids(args):
grids = Grids.from_chgcar(args.chgcar)
grids.dump()
def make_defect_charge_info_main(args):
band_idxs = [int(parchg.split(".")[-2]) - 1 for parchg in args.parchgs]
parchgs = [Chgcar.from_file(parchg) for parchg in args.parchgs]
defect_charge_info = make_defect_charge_info(
parchgs, band_idxs, args.bin_interval, args.grids)
defect_charge_info.to_json_file()
plt = defect_charge_info.show_dist()
plt.savefig("dist.pdf")
|
nilq/baby-python
|
python
|
"""
These constants provide well-known strings that are used for identifiers,
etc... for widgets that are commonly sub-classed by Manager implementations.
"""
kUIIdBase = "uk.co.foundry.asset.api.ui."
kParameterDelegateId = kUIIdBase + "parameterdelegate"
kParameterDelegateName = "Asset Parameter UI"
kInfoWidgetId = kUIIdBase + "info"
kInfoWidgetName = "Asset Info"
kBrowserWidgetId = kUIIdBase + "browser"
kBrowserWidgetName = "Asset Browser"
kInlinePickerWidgetId = kUIIdBase + "inlinepicker"
kInlinePickerWidgetName = "Asset Picker"
kMultiPickerWidgetId = kUIIdBase + "multipicker"
kMultiPickerWidgetName = "Asset Switcher"
kWorkflowRelationshipWidgetId = kUIIdBase + "workflowrelationship"
kWorkflowRelationshipWidgetName = "Workflow Relationship"
kManagerOptionsWidgetId = kUIIdBase + "manageroptionswidget"
kManagerOptionsWidgetName = "Asset Manager Options"
kRegistrationManagerOptionsWidgetId = kUIIdBase + "registrationmanageroptionswidget"
kRegistrationManagerOptionsWidgetName = kManagerOptionsWidgetName
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
def plot_creater(history,bin, modelname):
"""[For the training progress, a chart about the accuracy / loss is created for the deep learning approaches and stored accordingly]
Args:
history (keras.callbacks.History object): [Contains values accuracy, validation-accuracy, validation-loss and loss values during the training of the model]
bin (String): [shows if binary ("True") or multilabel ("False") classification is active]
modelname (String): [Name of Model]
"""
if (modelname=="CNN" or modelname=="LSTM"):
if (bin=="True"):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./CNN_bin/acc_val_bin.png')
plt.savefig('./CNN_bin/acc_val_bin.pdf')
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./CNN_bin/loss_val_bin.png')
plt.savefig('./CNN_bin/loss_val_bin.pdf')
plt.close()
elif (bin=="False"):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./CNN_multi/acc_val_bin.png')
plt.savefig('./CNN_multi/acc_val_bin.pdf')
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./CNN_multi/loss_val_bin.png')
plt.savefig('./CNN_multi/loss_val_bin.pdf')
plt.close()
elif (modelname == "Resnet"):
if (bin == "True"):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./resnet_bin/acc_val_bin.png')
plt.savefig('./resnet_bin/acc_val_bin.pdf')
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./resnet_bin/loss_val_bin.png')
plt.savefig('./resnet_bin/loss_val_bin.pdf')
plt.close()
elif (bin == "False"):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./resnet_multi/acc_val_multi.png')
plt.savefig('./resnet_multi/acc_val_multi.pdf')
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./resnet_multi/loss_val_multi.png')
plt.savefig('./resnet_multi/loss_val_multi.pdf')
plt.close()
else:
print("No Plot available")
|
nilq/baby-python
|
python
|
import GrossSalary, SalaryDeductions, NetSalary
print("Salary Computation App")
while True:
action = str(input("\nWould you like to to do? \n[A] Calculate Salary\n[B] Exit Application")).lower()
if(action == 'a'):
try:
name = str(input("\nEnter Name: "))
rendered_hours = float(input("Enter rendered Hours: "))
loan = float(input("Enter Loan Amount: "))
health_insurance = float(input("Enter Health Issurance"))
gross = GrossSalary.calculate(rendered_hours)
total_deductions, tax = SalaryDeductions.calculate(gross, loan, health_insurance)
net_salary = NetSalary.calculate(total_deductions, gross)
if gross and total_deductions and net_salary:
print("\nName: {}\nHour: {}\n".format(name, rendered_hours))
print("Gross Salary: Php {}\n".format(gross))
print("Tax: Php {}\nLoan: Php {}\nInsurance: Php {}\n".format(tax, loan, health_insurance))
print("Total Deductions: Php {}\n".format(total_deductions))
print("Net Salary: Php {}".format(net_salary))
except Exception:
print("Something went wrong processing your inputs")
else:
continue
elif(action == 'b'):
print("Application Exited")
break
else:
continue
|
nilq/baby-python
|
python
|
from src.libs.CrabadaWeb2Client.CrabadaWeb2Client import CrabadaWeb2Client
from pprint import pprint
from src.libs.CrabadaWeb2Client.types import CrabForLending
# VARS
client = CrabadaWeb2Client()
# TEST FUNCTIONS
def test() -> None:
pprint(client.getCheapestCrabForLending())
# EXECUTE
test()
|
nilq/baby-python
|
python
|
# coding: utf-8
import requests
from bs4 import BeautifulSoup
import re
import json
import os
from xml.etree import ElementTree
import time
import io
import pandas as pd
from gotoeat_map.module import getLatLng, checkRemovedMerchant
def main():
merchantFilePath = os.path.dirname(
os.path.abspath(__file__)) + "/merchants.json"
if os.path.exists(merchantFilePath):
json_open = open(merchantFilePath, "r", encoding="utf8")
merchants = json.load(json_open)
else:
merchants = {
"data": [],
"names": []
}
findMerchants = []
page = 0
while True:
page += 1
print("----- Page {page} -----".format(page=page))
html = requests.get(
"https://gotoeat-kumamoto.jp/shop/page/{page}/".format(page=page))
html.encoding = html.apparent_encoding
soup = BeautifulSoup(html.content, "html.parser")
lists = soup.findChildren("article", {"class": "shop"})
if (len(lists) == 0):
break
for merchant in lists:
merchant_name = merchant.find("h3").text.strip()
merchant_area = merchant.find(
"p", {"class": "cat"}).find("a").text.strip()
_merchant_address = merchant.find("p").text.strip()
merchant_postal_code = re.sub(
r"〒([0-9\-]+) (.+)", r"\1", _merchant_address)
merchant_address = re.sub(
r"〒([0-9\-]+) (.+)", r"\2", _merchant_address).replace(" ", "").strip()
print(merchant_name + " - " + merchant_address)
findMerchants.append(merchant_name)
if merchant_name in merchants["names"]:
continue
lat, lng = getLatLng(merchant_address)
print(str(lat) + " " + str(lng))
merchants["data"].append({
"name": merchant_name,
"area": merchant_area,
"address": merchant_address,
"postal_code": merchant_postal_code,
"lat": lat,
"lng": lng
})
merchants["names"].append(merchant_name)
with open(merchantFilePath, mode="w", encoding="utf8") as f:
f.write(json.dumps(merchants, indent=4, ensure_ascii=False))
if (soup.find("a", {"class": "next"}) == None):
break
else:
time.sleep(1)
merchants = checkRemovedMerchant(merchants, findMerchants)
with open(merchantFilePath, mode="w", encoding="utf8") as f:
f.write(json.dumps(merchants, indent=4, ensure_ascii=False))
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
# ## Full Run
# In[1]:
import os
# In[2]:
Xtrain_dir = 'solar/data/kaggle_solar/train/'
Xtest_dir = 'solar/data/kaggle_solar/test'
ytrain_file = 'solar/data/kaggle_solar/train.csv'
station_file = 'solar/data/kaggle_solar/station_info.csv'
import solar.wrangle.wrangle
import solar.wrangle.subset
import solar.wrangle.engineer
import solar.analyze.model
import solar.report.submission
import numpy as np
# In[14]:
# Choose up to 98 stations; not specifying a station means to use all that fall within the given lats and longs. If the
# parameter 'all' is given, then it will use all stations no matter the provided lats and longs
station = ['all']
# Determine which dates will be used to train the model. No specified date means use the entire set from 1994-01-01
# until 2007-12-31.
train_dates = ['1994-01-01', '2007-12-31']
#2008-01-01 until 2012-11-30
test_dates = ['2008-01-01', '2012-11-30']
station_layout = True
# Use all variables
var = ['all']
# Keep model 0 (the default model) as a column for each of the variables (aggregated over other dimensions)
model = [0, 1]
# Aggregate over all times
times = ['all']
default_grid = {'type':'relative', 'axes':{'var':var, 'models':model, 'times':times,
'station':station}}
# This just uses the station_names as another feature
stat_names = {'type':'station_names'}
frac_dist = {'type':'frac_dist'}
days_solstice = {'type':'days_from_solstice'}
days_cold = {'type':'days_from_coldest'}
all_feats = [stat_names, default_grid, frac_dist, days_solstice, days_cold]
#all_feats = [stat_names, days_solstice, days_cold]
# In[4]:
import solar.report.submission
import solar.wrangle.wrangle
import solar.wrangle.subset
import solar.wrangle.engineer
import solar.analyze.model
# In[15]:
# test combination of station names and grid
reload(solar.wrangle.wrangle)
reload(solar.wrangle.subset)
reload(solar.wrangle.engineer)
from solar.wrangle.wrangle import SolarData
# input_data = SolarData.load(Xtrain_dir, ytrain_file, Xtest_dir,
# station_file, train_dates, test_dates, station,
# station_layout, all_feats, write)
reload(solar.analyze.model)
import numpy as np
from solar.analyze.model import Model
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import metrics
error_formula = 'mean_absolute_error'
cv_splits = 3
jobs = 20
write = 's3'
model = Model.model_from_pickle(
'input_2016-02-21-20-46-17.p', GradientBoostingRegressor,
{'n_estimators': [300], 'max_depth': range(1, 4),
'learning_rate': [0.01, 0.1, 1]}, cv_splits,
error_formula, jobs, write, loss='ls', random_state=0, verbose=10)
|
nilq/baby-python
|
python
|
from typing import Tuple, AnyStr
from lib.ui import BasePage
from lib.log import Loggers
from utils.Files import read_page_elements
log = Loggers(__name__)
class Baidu(BasePage):
def open_index(self):
self.get_url("https://www.baidu.com")
def login(self, locator: Tuple[AnyStr]):
self.click(locator)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 30 10:22:30 2020
@author: NN133
"""
import sys
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("C:/Users/NN133/Documents/libsvm-3.22/python")
from svmutil import *
#%matplotlib inline
from util_ker import *
#Import data
path = 'C:/Users/NN133/Documents/GitHub/GaussianKernelTest/data/breast-cancer-wisconsin.data.txt'
col_names = ['id','Clump_Thick','U_Cell_Size', 'U_Cell_Shape','Marg_Adh','Epith_Cell_Size','Bare_Nuclei',
'Bland_Chrom','Norm_Nucle','Mitoses','Class']
df = pd.read_csv(path,header=None, names = col_names)
df.info() #Check the data types
#Extract the index for Bare_Neclei values '?'
ind = df.query("Bare_Nuclei=='?'").index
#drop the rows with values '?'
data = df.drop(ind, axis ='index')
#Convert the Bare_Nuclei datatype from Object to int64
data['Bare_Nuclei'] = data.Bare_Nuclei.astype('int64')
#Check for null values
data.isnull().sum()
#Look up Summary statistics of the data
Summary_Stats = data.iloc[:,:-1].describe()
#plot the mean values from the summary stats bar
fig = plt.figure(figsize=(6,6))
Summary_Stats.loc['mean',:].plot(kind='barh', xerr=Summary_Stats.loc['std',:]);
plt.title('Bar chart showing the mean and std of variables')
plt.xlabel('Mean')
#plot the mean values from the summary stats line
fig = plt.figure(figsize=(9,4))
Summary_Stats.loc['mean',:].plot(kind='line', color='blue', linewidth=3);
Summary_Stats.loc['std',:].plot(kind='line', color='lightgreen', linewidth=2)
plt.legend
#Plot the class distribution
fig = plt.figure(figsize=(15,4))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.bar(['neg','pos'], data.Class.value_counts().values, color=('grey','maroon'))
ax1.legend(['neg','pos'])
ax1.set_xlabel('Class Labels')
ax1.set_ylabel('Examples')
Explode=[0,0.2] #Separates the section of the pie chart specified
ax2.pie(data.Class.value_counts().values,explode=Explode, shadow=True,startangle=45)
ax2.legend(['neg','pos'],title ="Classes")
#Replace class labels from [benign, malignant]=(2,4) to (-1,1)
data.Class.replace({2:-1,4:1}, inplace=True)
data.Class.value_counts()
#Drop the id column
data.drop("id", axis=1, inplace=True)
#Extract Variables X and Label y from the data
X = data.iloc[:,:-1].values.reshape(data.shape[0],data.shape[1]-1)
y = data.iloc[:,-1].values.reshape(data.shape[0],1)
#SplitData into train, validation and Test data sets
xtr, xva, xte, ytr, yva, yte = splitdata(X, y, 25, 0.9)
#Choose Kernel
kernel = ['linear','H_poly','poly','rbf','erbf'] #['laplace','sqrexp','sigmoid']
ker
#Set Kernel parameter
params = {}
params['linear'] = []
params['H_poly'] = [2,3,4]
params['poly'] = [2,3,4]
params['rbf'] = [ 0.001,1.0,100.0]
params['erbf'] = [ 0.001,1.0,100.0]
#Set Kernel parameter
TrainKernel = {}
TestKernel = {}
TrainKernelTime = {}
TestKernelTime = {}
PSDCheck = {}
Perf_eva = {}
AucRoc = {}
Result = {}
#Construct Kernel
for ker in kernel:
for par in range(len(params[ker])):
k_param = params[ker][par]
start_time=time.time()
TrainKernel[ker] = kernelfun(xtr, xtr, ker, k_param)
end_time=time.time()
TrainKernelTime[ker] = end_time - start_time
print('{} minutes to construct Training kernel'.format(ker_time/60))
PSDCheck[ker] = checkPSD(TrainKernel[ker])
plt.imshow(TrainKernel[ker]) #Any other kernel analysis can be inserted here
TrainKernel[ker] = np.multiply(np.matmul(ytr,ytr.T),TrainKernel[ker])
TrainKernel[ker] = addIndexToKernel(TrainKernel[ker])
start_time=time.time()
TestKernel[ker] = kernelfun(xtr, xte, ker, k_param)
end_time=time.time()
TestKernelTime[ker] = end_time - start_time
print('{} minutes to construct Test kernel'.format(ker_time/60))
TestKernel[ker] = addIndexToKernel(TestKernel[ker])
model = svm_train(list(ytr), [list(r) for r in TrainKernel[ker]], ('-b 1 -c 4 -t 4'))
p_label, p_acc, p_val = svm_predict(list(yte),[list(row) for row in TestKernel[ker]], model, ('-b 1'))
Perf_eva[ker] = EvaluateTest(np.asarray(yte/1.),np.asarray(p_label))
print("--> {} F1 Score achieved".format(Evaluation["Fscore"]))
AucRoc[ker] = computeRoc(yte, p_val)
Result[ker+'_'+ str(par)] = (TrainKernel,TrainKernelTime,PSDCheck,
TestKernel,TestKernelTime,model,p_label, p_acc, p_val,Perf_eva,AucRoc)
print('-' * 6)
print(' Done ')
print('=' * 6)
print("K_tr_" + ker)
#initialize the kernel matrix
K_tr,K_te = intitializeKernels(m,n)
#Append an index column to the kernel matrix
H2 = addIndexToKernel(K_te)
RecordTime = {}
x=X[:10,:]
#Choose Parameter
params=[ 0.001, 0.01, 0.1, 1.0, 10.0, 100.0 ]
#Use Single Kernel
#Kernel = ['rbf']
#ker = Kernel[0]
#####
start_time2 = time.time()
H1 = kernelfun(xtr,xte, ker, params)
end_time2 = time.time()
####
for i in range(0,n):
for j in range(0,m):
u = K_tr[i,:]
print(u)
v = K_tr[j,:]
print(v)
K_tr[i,j] = np.exp(-(np.dot((u-v),(u-v).T)/2 * (1.25**2)))
#Check if Kernel is PSD
checkPSD(K_tr)
#plot kernel with plt.imshow()
plt.imshow(K_tr)
#Multiply kernel by label
K_tr = np.multiply(np.matmul(ytr,ytr.T),K_tr)
#Append index column to the kernel matrix
K_tr = addIndexToKernel(K_tr)
#Evaluation = EvaluateTest(np.asarray(p_label),yte)
Evaluation = EvaluateTest(np.asarray(yte/1.),np.asarray(p_label))
print("--> {} F1 Score achieved".format(Evaluation["Fscore"]))
|
nilq/baby-python
|
python
|
# meta class 에서는 __init__ 보다는 __new__ 를 사용합니다.
# 사용법은 아래와 같습니다.
# __new__ (<클래스자신>, <클래스명>, (클래스의 부모 클래스), {클래스의 어트리뷰트 딕셔너리} )
# __new__ 가 실행된 다음에 __init__ 가 실행되게 됩니다.
class Meta(type):
def __new__(cls, name, bases, attrs):
print("__new__ 메서드!")
print(cls, name, bases, attrs)
return type.__new__(cls, name, bases, attrs)
def __init__(cls, name, bases, attrs):
print("__init__ 메서드")
type.__init__(cls, name, bases, attrs)
print("=================================")
print("<메타클래스가 초기화 됩니다.>")
class MyClass(metaclass=Meta):
pass
print("=================================")
# print 로 찍은 값을 보시면 그저 클래스를 정의만 했는데
# 메타클래스가 어딘가 생성된것을 볼 수 있습니다.
|
nilq/baby-python
|
python
|
default_app_config = 'action_notifications.apps.ActionNotificationsConfig'
|
nilq/baby-python
|
python
|
from __future__ import division, unicode_literals
import codecs
from bs4 import BeautifulSoup
import urllib
from logzero import logger as LOGGER
import re
import codecs
from w3lib.html import replace_entities
import os
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from PIL import Image
from wordcloud import WordCloud, ImageColorGenerator
import pandas as pd
import scattertext as st
import spacy
from fsa_utils.commons import get_asset_root, get_file_content
class Scatter_french_text(object):
def __init__(self, list_directory, list_author, language:str='fr', encoding = 'utf-8'):
self.list_text = self.read_directory(list_directory, encoding)
self.list_author = list_author
self.df = pd.DataFrame()
self.df["text"] = self.list_text
self.df["author"] = self.list_author
self.language = language
self.nlp = spacy.load(language)
self.corpus = st.CorpusFromPandas(self.df, category_col='author', text_col='text', nlp=self.nlp).build()
def explorer(self, category, not_category, metadata):
html = st.produce_scattertext_explorer(self.corpus, category=category, not_category_name=not_category, metadata=metadata)
open("Corpus-Visualization.html", 'wb').write(html.encode('utf-8'))
@staticmethod
def read_directory(list_directory, encoding):
cfg = get_asset_root()
list_text= []
for i in list_directory:
director = get_file_content(cfg, i)
text = open(director,encoding=encoding)
text=text.read()
list_text.append(text)
return list_text
if __name__ == '__main__':
g = Scatter_french_text(["french_books_no_meta/Hugo_Miserables1","french_books_no_meta/Zola_assommoir"], ['Hugo', "Zola"])
g.explorer("Zola", "Hugo",None)
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
with open("README.md") as f:
long_description = f.read()
setup(
name="BindsNET",
version="0.2.9",
description="Spiking neural networks for ML in Python",
license="AGPL-3.0",
long_description=long_description,
long_description_content_type="text/markdown", # This is important!
url="http://github.com/Hananel-Hazan/bindsnet",
author="Hananel Hazan, Daniel Saunders, Darpan Sanghavi, Hassaan Khan",
author_email="hananel@hazan.org.il",
packages=find_packages(),
zip_safe=False,
install_requires=[
"numpy>=1.14.2",
"torch>=1.5.1",
"torchvision>=0.6.1",
"tensorboardX>=1.7",
"tqdm>=4.19.9",
"matplotlib>=2.1.0",
"gym>=0.10.4",
"scikit-build>=0.11.1",
"scikit_image>=0.13.1",
"scikit_learn>=0.19.1",
"opencv-python>=3.4.0.12",
"pytest>=3.4.0",
"scipy>=1.1.0",
"cython>=0.28.5",
"pandas>=0.23.4",
],
)
|
nilq/baby-python
|
python
|
class Queue(object):
def __init__(self, queue):
self._queue = queue
self.name = None
def delete(self):
raise NotImplementedError()
class BrokerBackend(object):
def __init__(self):
self._queues = None
@property
def queues(self):
if self._queues is None:
self._queues = self._get_queues()
return self._queues
def _get_queues(self):
raise NotImplementedError()
def filter_queues(self, prefix=None):
def queue_filter(queue):
skip = False
if prefix:
skip = skip or queue.name.startswith(prefix)
return skip
return filter(queue_filter, self.queues)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.keras.layers as tfkl
from veqtor_keras.util import localized_attention
class LocalizedAttentionLayer1D(tfkl.Layer):
def __init__(self,
patch_size=3,
num_heads=1,
stride=1,
dilation=1,
padding='same',
preshaped_q=True, **kwargs):
"""
Args:
patch_size: size of patches to perform localized attention within
num_heads: number of attention heads
strides: the stride of the patch window, stride 2 halves output
dilations: the dilation of the patch window
padding: one of 'same' or 'valid'
preshaped_q: True if q matches strided and padded kv
ex: kv: [B, 4, C]
stride = 2
q must be [B,2,C]
"""
super(LocalizedAttentionLayer1D, self).__init__(**kwargs)
self.patch_size = patch_size
self.num_heads = num_heads
self.stride = stride
self.dilation = dilation
self.padding = padding
self.preshaped_q = preshaped_q
def call(self, q, k, v):
if type(q) == list:
if len(q) == 3:
q, k, v = q
elif len(q) == 4:
q, k, v, mask = q
else:
raise SyntaxError
return localized_attention.localized_attention_1d(q=q, k=k, v=v,
num_heads=self.num_heads,
stride=self.stride,
dilation=self.dilation,
padding=self.padding,
preshaped_q=self.preshaped_q)
def get_config(self):
config = {'patch_size': self.patch_size,
'num_heads': self.num_heads,
'stride': self.stride,
'dilation': self.dilation,
'padding': self.padding,
'preshaped_q': self.preshaped_q}
base_config = super(LocalizedAttentionLayer1D, self).get_config()
return {**base_config, **config}
class LocalizedAttentionLayer2D(tfkl.Layer):
def __init__(self,
patch_size=(3, 3),
num_heads=1,
strides=(1, 1),
dilations=(1, 1),
padding='same',
preshaped_q=True, **kwargs):
"""
Args:
patch_size: size of patches to perform localized attention within
num_heads: number of attention heads
strides: the stride of the patch window, stride 2 halves output
dilations: the dilation of the patch window
padding: one of 'same' or 'valid'
preshaped_q: True if q matches strided and padded kv
ex: kv: [B, 4, 4, C]
strides = (2,2)
q must be [B,2,2,C]
"""
super(LocalizedAttentionLayer2D, self).__init__(**kwargs)
self.patch_size = patch_size
self.num_heads = num_heads
self.strides = strides
self.dilations = dilations
self.padding = padding
self.preshaped_q = preshaped_q
def call(self, q, k, v):
if type(q) == list:
if len(q) == 3:
q, k, v = q
elif len(q) == 4:
q, k, v, mask = q
else:
raise SyntaxError
return localized_attention.localized_attention_2d(q=q, k=k, v=v,
num_heads=self.num_heads,
strides=self.strides,
dilations=self.dilations,
padding=self.padding,
preshaped_q=self.preshaped_q)
def get_config(self):
config = {'patch_size': self.patch_size,
'num_heads': self.num_heads,
'strides': self.strides,
'dilations': self.dilations,
'padding': self.padding,
'preshaped_q': self.preshaped_q}
base_config = super(LocalizedAttentionLayer2D, self).get_config()
return {**base_config, **config}
|
nilq/baby-python
|
python
|
"""
https://adventofcode.com/2018/day/2
"""
from collections import Counter
from itertools import product
from pathlib import Path
def solve_a(codes):
pairs = triplets = 0
for code in codes:
occurrences = Counter(code).values()
pairs += any(count == 2 for count in occurrences)
triplets += any(count == 3 for count in occurrences)
return pairs * triplets
def solve_b(codes):
for code_a, code_b in product(codes, codes):
diff = sum(c != c2 for c, c2 in zip(code_a, code_b))
if diff == 1:
common = ''.join(c for c, c2 in zip(code_a, code_b) if c == c2)
return common
if __name__ == '__main__':
assert 12 == solve_a([
'abcdef',
'bababc',
'abbcde',
'abcccd',
'aabcdd',
'abcdee',
'ababab',
])
assert 'fgij' == solve_b([
'abcde',
'fghij',
'klmno',
'pqrst',
'fguij',
'axcye',
'wvxyz',
])
codes = Path('day02.txt').read_text().strip().splitlines()
print('A:', solve_a(codes))
print('B:', solve_b(codes))
|
nilq/baby-python
|
python
|
import hashlib
def hash_uid(uid, truncate=6):
"""Hash a UID and truncate it
Args:
uid (str): The UID to hash
truncate (int, optional): The number of the leading characters to keep. Defaults to 6.
Returns:
str: The hashed and trucated UID
"""
hash_sha = hashlib.sha256()
hash_sha.update(uid.encode("UTF-8"))
return hash_sha.hexdigest()[:truncate]
|
nilq/baby-python
|
python
|
from lib.interface import *
from lib.arquivo import *
from time import sleep
arq = './Ex115/cadastro.txt'
if not arquivoExiste(arq):
criarArquivo(arq)
while True:
cor(2)
opcao = menu(['Cadastrar', 'Listar', 'Sair'])
if opcao == 1:
#Opção para cadastrar uma nova pessoa no arquivo
cabecalho('Novo cadastro')
nome = str(input('Nome: '))
idade = leiaInt('Idade: ')
cadastrar(arq, nome, idade)
elif opcao == 2:
#Opção para acessar e ler o conteúdo do arquivo
lerArquivo(arq)
elif opcao == 3:
cor(11)
print()
print(linha())
print('Volte sempre!')
print(linha())
cor(7)
break
else:
cor(4)
print('Digite uma opção entre 1 e 3')
sleep(1)
|
nilq/baby-python
|
python
|
from datetime import datetime
import json
import platform
import socket
import sys
from collections.abc import Iterable
import os
import inspect
import types
import pickle
import base64
import re
import subprocess
import io
import threading
import signal
try:
import pkg_resources
except ImportError:
pkg_resources = None
try:
import line_profiler
except ImportError:
line_profiler = None
try:
import psutil
except ImportError:
psutil = None
try:
import conda
import conda.cli.python_api
except ImportError:
conda = None
try:
import numpy
except ImportError:
numpy = None
from .diff import envdiff
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
if numpy:
if isinstance(o, numpy.integer):
return int(o)
elif isinstance(o, numpy.floating):
return float(o)
elif isinstance(o, numpy.ndarray):
return o.tolist()
return super().default(o)
class MicroBench(object):
def __init__(self, outfile=None, json_encoder=JSONEncoder,
*args, **kwargs):
self._capture_before = []
if args:
raise ValueError('Only keyword arguments are allowed')
self._bm_static = kwargs
if outfile is not None:
self.outfile = outfile
elif not hasattr(self, 'outfile'):
self.outfile = io.StringIO()
self._json_encoder = json_encoder
def pre_run_triggers(self, bm_data):
# Capture environment variables
if hasattr(self, 'env_vars'):
if not isinstance(self.env_vars, Iterable):
raise ValueError('env_vars should be a tuple of environment '
'variable names')
for env_var in self.env_vars:
bm_data['env_{}'.format(env_var)] = os.environ.get(env_var)
# Capture package versions
if hasattr(self, 'capture_versions'):
if not isinstance(self.capture_versions, Iterable):
raise ValueError('capture_versions is reserved for a tuple of'
'package names - please rename this method')
for pkg in self.capture_versions:
self._capture_package_version(bm_data, pkg)
# Run capture triggers
for method_name in dir(self):
if method_name.startswith('capture_'):
method = getattr(self, method_name)
if callable(method) and method not in self._capture_before:
method(bm_data)
# Initialise telemetry thread
if hasattr(self, 'telemetry'):
interval = getattr(self, 'telemetry_interval', 60)
bm_data['telemetry'] = []
self._telemetry_thread = TelemetryThread(
self.telemetry, interval, bm_data['telemetry'])
self._telemetry_thread.start()
# Special case, as we want this to run immediately before run
bm_data['start_time'] = datetime.now()
def post_run_triggers(self, bm_data):
# Special case, as we want this to run immediately after run
bm_data['finish_time'] = datetime.now()
# Terminate telemetry thread and gather results
if hasattr(self, '_telemetry_thread'):
self._telemetry_thread.terminate()
timeout = getattr(self, 'telemetry_timeout', 30)
self._telemetry_thread.join(timeout)
def capture_function_name(self, bm_data):
bm_data['function_name'] = bm_data['_func'].__name__
def _capture_package_version(self, bm_data, pkg, skip_if_none=False):
bm_data.setdefault('package_versions', {})
try:
ver = pkg.__version__
except AttributeError:
if skip_if_none:
return
ver = None
bm_data['package_versions'][pkg.__name__] = ver
def to_json(self, bm_data):
bm_str = '{}'.format(json.dumps(bm_data,
cls=self._json_encoder))
return bm_str
def output_result(self, bm_data):
""" Output result to self.outfile as a line in JSON format """
bm_str = self.to_json(bm_data) + '\n'
# This should guarantee atomic writes on POSIX by setting O_APPEND
if isinstance(self.outfile, str):
with open(self.outfile, 'a') as f:
f.write(bm_str)
else:
# Assume file-like object
self.outfile.write(bm_str)
def __call__(self, func):
def inner(*args, **kwargs):
bm_data = dict()
bm_data.update(self._bm_static)
bm_data['_func'] = func
bm_data['_args'] = args
bm_data['_kwargs'] = kwargs
if isinstance(self, MBLineProfiler):
if not line_profiler:
raise ImportError('This functionality requires the '
'"line_profiler" package')
self._line_profiler = line_profiler.LineProfiler(func)
self.pre_run_triggers(bm_data)
if isinstance(self, MBLineProfiler):
res = self._line_profiler.runcall(func, *args, **kwargs)
else:
res = func(*args, **kwargs)
self.post_run_triggers(bm_data)
if isinstance(self, MBReturnValue):
bm_data['return_value'] = res
# Delete any underscore-prefixed keys
bm_data = {k: v for k, v in bm_data.items()
if not k.startswith('_')}
self.output_result(bm_data)
return res
return inner
class MBFunctionCall(object):
""" Capture function arguments and keyword arguments """
def capture_function_args_and_kwargs(self, bm_data):
bm_data['args'] = bm_data['_args']
bm_data['kwargs'] = bm_data['_kwargs']
class MBReturnValue(object):
""" Capture the decorated function's return value """
pass
class MBPythonVersion(object):
""" Capture the Python version and location of the Python executable """
def capture_python_version(self, bm_data):
bm_data['python_version'] = platform.python_version()
def capture_python_executable(self, bm_data):
bm_data['python_executable'] = sys.executable
class MBHostInfo(object):
""" Capture the hostname and operating system """
def capture_hostname(self, bm_data):
bm_data['hostname'] = socket.gethostname()
def capture_os(self, bm_data):
bm_data['operating_system'] = sys.platform
class MBGlobalPackages(object):
""" Capture Python packages imported in global environment """
def capture_functions(self, bm_data):
# Get globals of caller
caller_frame = inspect.currentframe().f_back.f_back.f_back
caller_globals = caller_frame.f_globals
for g in caller_globals.values():
if isinstance(g, types.ModuleType):
self._capture_package_version(bm_data, g, skip_if_none=True)
else:
try:
module_name = g.__module__
except AttributeError:
continue
self._capture_package_version(
bm_data,
sys.modules[module_name.split('.')[0]],
skip_if_none=True
)
class MBCondaPackages(object):
""" Capture conda packages; requires 'conda' package (pip install conda) """
include_builds = True
include_channels = False
def capture_conda_packages(self, bm_data):
if conda is None:
# Use subprocess
pkg_list = subprocess.check_output(['conda', 'list']).decode('utf8')
else:
# Use conda Python API
pkg_list, stderr, ret_code = conda.cli.python_api.run_command(
conda.cli.python_api.Commands.LIST)
if ret_code != 0 or stderr:
raise RuntimeError('Error running conda list: {}'.format(
stderr))
bm_data['conda_versions'] = {}
for pkg in pkg_list.splitlines():
if pkg.startswith('#') or not pkg.strip():
continue
pkg_data = pkg.split()
pkg_name = pkg_data[0]
pkg_version = pkg_data[1]
if self.include_builds:
pkg_version += pkg_data[2]
if self.include_channels and len(pkg_data) == 4:
pkg_version += pkg_version + '(' + pkg_data[3] + ')'
bm_data['conda_versions'][pkg_name] = pkg_version
class MBInstalledPackages(object):
""" Capture installed Python packages using pkg_resources """
capture_paths = False
def capture_packages(self, bm_data):
if not pkg_resources:
raise ImportError(
'pkg_resources is required to capture package names, which is '
'provided with the "setuptools" package')
bm_data['package_versions'] = {}
if self.capture_paths:
bm_data['package_paths'] = {}
for pkg in pkg_resources.working_set:
bm_data['package_versions'][pkg.project_name] = pkg.version
if self.capture_paths:
bm_data['package_paths'][pkg.project_name] = pkg.location
class MBLineProfiler(object):
"""
Run the line profiler on the selected function
Requires the line_profiler package. This will generate a benchmark which
times the execution of each line of Python code in your function. This will
slightly slow down the execution of your function, so it's not recommended
in production.
"""
def capture_line_profile(self, bm_data):
bm_data['line_profiler'] = base64.encodebytes(
pickle.dumps(self._line_profiler.get_stats())
).decode('utf8')
@staticmethod
def decode_line_profile(line_profile_pickled):
return pickle.loads(base64.decodebytes(line_profile_pickled.encode()))
@classmethod
def print_line_profile(self, line_profile_pickled, **kwargs):
lp_data = self.decode_line_profile(line_profile_pickled)
line_profiler.show_text(lp_data.timings, lp_data.unit, **kwargs)
class _NeedsPsUtil(object):
@classmethod
def _check_psutil(cls):
if not psutil:
raise ImportError('psutil library needed')
class MBHostCpuCores(_NeedsPsUtil):
""" Capture the number of logical CPU cores """
def capture_cpu_cores(self, bm_data):
self._check_psutil()
bm_data['cpu_cores_logical'] = psutil.cpu_count()
class MBHostRamTotal(_NeedsPsUtil):
""" Capture the total host RAM in bytes """
def capture_total_ram(self, bm_data):
self._check_psutil()
bm_data['ram_total'] = psutil.virtual_memory().total
class MBNvidiaSmi(object):
"""
Capture attributes on installed NVIDIA GPUs using nvidia-smi
Requires the nvidia-smi utility to be available in the current PATH.
By default, the gpu_name and memory.total attributes are captured. Extra
attributes can be specified using the class or object-level variable
nvidia_attributes.
By default, all installed GPUs will be polled. To limit to a specific GPU,
specify the nvidia_gpus attribute as a tuple of GPU IDs, which can be
zero-based GPU indexes (can change between reboots, not recommended),
GPU UUIDs, or PCI bus IDs.
"""
_nvidia_attributes_available = ('gpu_name', 'memory.total')
_nvidia_gpu_regex = re.compile(r'^[0-9A-Za-z\-:]+$')
def capture_nvidia(self, bm_data):
if hasattr(self, 'nvidia_attributes'):
nvidia_attributes = self.nvidia_attributes
unknown_attrs = set(self._nvidia_attributes_available).difference(
nvidia_attributes
)
if unknown_attrs:
raise ValueError("Unknown nvidia_attributes: {}".format(
', '.join(unknown_attrs)
))
else:
nvidia_attributes = self._nvidia_attributes_available
if hasattr(self, 'nvidia_gpus'):
gpus = self.nvidia_gpus
if not gpus:
raise ValueError('nvidia_gpus cannot be empty. Leave the '
'attribute out to capture data for all GPUs')
for gpu in gpus:
if not self._nvidia_gpu_regex.match(gpu):
raise ValueError('nvidia_gpus must be a list of GPU indexes'
'(zero-based), UUIDs, or PCI bus IDs')
else:
gpus = None
# Construct the command
cmd = ['nvidia-smi', '--format=csv,noheader',
'--query-gpu=uuid,{}'.format(','.join(nvidia_attributes))]
if gpus:
cmd += ['-i', ','.join(gpus)]
# Execute the command
res = subprocess.check_output(cmd).decode('utf8')
# Process results
for gpu_line in res.split('\n'):
if not gpu_line:
continue
gpu_res = gpu_line.split(', ')
for attr_idx, attr in enumerate(nvidia_attributes):
gpu_uuid = gpu_res[0]
bm_data.setdefault('nvidia_{}'.format(attr), {})[gpu_uuid] = \
gpu_res[attr_idx + 1]
class MicroBenchRedis(MicroBench):
def __init__(self, *args, **kwargs):
super(MicroBenchRedis, self).__init__(*args, **kwargs)
import redis
self.rclient = redis.StrictRedis(**self.redis_connection)
def output_result(self, bm_data):
self.rclient.rpush(self.redis_key, self.to_json(bm_data))
class TelemetryThread(threading.Thread):
def __init__(self, telem_fn, interval, slot, *args, **kwargs):
super(TelemetryThread, self).__init__(*args, **kwargs)
self._terminate = threading.Event()
signal.signal(signal.SIGINT, self.terminate)
signal.signal(signal.SIGTERM, self.terminate)
self._interval = interval
self._telemetry = slot
self._telem_fn = telem_fn
if not psutil:
raise ImportError('Telemetry requires the "psutil" package')
self.process = psutil.Process()
def terminate(self, signum=None, frame=None):
self._terminate.set()
def _get_telemetry(self):
telem = {'timestamp': datetime.now()}
telem.update(self._telem_fn(self.process))
self._telemetry.append(telem)
def run(self):
self._get_telemetry()
while not self._terminate.wait(self._interval):
self._get_telemetry()
|
nilq/baby-python
|
python
|
import logging
from tqdm import tqdm
import tmdb
from page import blocked_qids
from sparql import sparql
def main():
"""
Find Wikidata items that are missing a TMDb TV series ID (P4983) but have a
IMDb ID (P345) or TheTVDB.com series ID (P4835). Attempt to look up the
TV show via the TMDb API. If there's a match, create a new statement.
Outputs QuickStatements CSV commands.
"""
query = """
SELECT ?item ?imdb ?tvdb ?random WHERE {
# Items with either IMDb or TVDB IDs
{ ?item wdt:P4835 []. }
UNION
{ ?item wdt:P345 []. }
VALUES ?classes {
wd:Q15416
}
?item (wdt:P31/(wdt:P279*)) ?classes.
# Get IMDb and TVDB IDs
OPTIONAL { ?item wdt:P345 ?imdb. }
OPTIONAL { ?item wdt:P4835 ?tvdb. }
# Exclude items that already have a TMDB TV ID
OPTIONAL { ?item wdt:P4983 ?tmdb. }
FILTER(!(BOUND(?tmdb)))
# Generate random sorting key
BIND(MD5(CONCAT(STR(?item), STR(RAND()))) AS ?random)
}
ORDER BY ?random
LIMIT 5000
"""
items = {}
for result in sparql(query):
qid = result["item"]
if qid in blocked_qids():
logging.debug("{} is blocked".format(qid))
continue
if qid not in items:
items[qid] = {"imdb": set(), "tvdb": set()}
item = items[qid]
if result["imdb"]:
item["imdb"].add(result["imdb"])
if result["tvdb"]:
item["tvdb"].add(result["tvdb"])
print("qid,P4983")
for qid in tqdm(items):
item = items[qid]
tmdb_ids = set()
for imdb_id in item["imdb"]:
tv = tmdb.find(id=imdb_id, source="imdb_id", type="tv")
if tv:
tmdb_ids.add(tv["id"])
for tvdb_id in item["tvdb"]:
tv = tmdb.find(id=tvdb_id, source="tvdb_id", type="tv")
if tv:
tmdb_ids.add(tv["id"])
for tmdb_id in tmdb_ids:
print('{},"""{}"""'.format(qid, tmdb_id))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
nilq/baby-python
|
python
|
import sys
sum = 0
for i in range(1, len(sys.argv), 1):
sum += int(sys.argv[i])
print(sum)
|
nilq/baby-python
|
python
|
from .normalize import *
from .logarithmic import *
from .exponential import *
from .gamma import *
from .tumblin import *
from .reinhard import *
from .durand import *
from .drago import *
from .fattal import *
from .lischinski import *
|
nilq/baby-python
|
python
|
__author__ = 'xf'
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import pytest
from django.conf import settings
from django.http import HttpResponse
from mock import Mock, PropertyMock, patch
from django_toolkit import middlewares
@pytest.fixture
def http_request(rf):
return rf.get('/')
@pytest.fixture
def http_response():
return HttpResponse()
class TestVersionHeaderMiddleware(object):
@pytest.fixture(autouse=True)
def settings(self, settings):
settings.TOOLKIT = {
'API_VERSION': '1.2.3',
}
return settings
@pytest.fixture
def middleware(self):
return middlewares.VersionHeaderMiddleware()
def test_should_return_a_response(
self,
middleware,
http_request,
http_response
):
response = middleware.process_response(http_request, http_response)
assert isinstance(response, HttpResponse)
def test_should_add_a_version_header_to_the_response(
self,
middleware,
http_request,
http_response
):
response = middleware.process_response(http_request, http_response)
assert 'X-API-Version' in response
assert response['X-API-Version'] == settings.TOOLKIT['API_VERSION']
@pytest.mark.django_db
class TestAccessLogMiddleware(object):
@pytest.fixture
def middleware(self):
return middlewares.AccessLogMiddleware()
@pytest.fixture
def patched_logger(self):
return patch('django_toolkit.middlewares.logger')
@pytest.fixture
def patched_format(self):
return patch(
'django_toolkit.middlewares.AccessLogMiddleware.LOG_FORMAT',
new_callable=PropertyMock
)
@pytest.fixture
def authenticated_http_request(self, http_request):
http_request.user = u'jovem'
http_request.auth = Mock(application=Mock(name='myapp'))
return http_request
def test_should_return_a_response(
self,
middleware,
http_request,
http_response
):
response = middleware.process_response(http_request, http_response)
assert isinstance(response, HttpResponse)
def test_should_log_responses(
self,
middleware,
http_request,
http_response,
patched_logger,
patched_format
):
with patched_logger as mock_logger:
middleware.process_response(http_request, http_response)
assert mock_logger.info.called
def test_should_include_request_and_response_in_the_message(
self,
middleware,
http_request,
http_response,
patched_logger,
patched_format
):
with patched_logger as mock_logger:
with patched_format as mock_format_property:
middleware.process_response(http_request, http_response)
mock_format_string = mock_format_property.return_value
assert mock_format_string.format.called
mock_format_string.format.assert_called_once_with(
app_name=middleware.UNKNOWN_APP_NAME,
request=http_request,
response=http_response
)
mock_logger.info.assert_called_once_with(
mock_format_string.format.return_value
)
def test_should_include_the_authenticated_app_in_the_message(
self,
middleware,
authenticated_http_request,
http_response,
patched_logger,
patched_format
):
with patched_format as mock_format_property:
middleware.process_response(
authenticated_http_request,
http_response
)
mock_format_string = mock_format_property.return_value
assert mock_format_string.format.called
mock_format_string.format.assert_called_once_with(
app_name=authenticated_http_request.auth.application.name,
request=authenticated_http_request,
response=http_response
)
|
nilq/baby-python
|
python
|
__version__ = 0.6
|
nilq/baby-python
|
python
|
import boto3
import json
import string
from time import asctime
from urllib.request import Request, urlopen
import yaml
def get_API_key() -> None:
"""Grab QnAMaker API key from encrypted s3 object.
"""
s3_client = boto3.client('s3')
response = s3_client.get_object(
Bucket='octochat-processor',
Key='secrets.yml'
)
data = yaml.load(response['Body'])
return data['qnamaker_api_key']
def create_knowledge_base(faq_url: str, QNAMAKER_API_KEY: str) -> str:
"""Creates knowledge base from FAQ URL using Azure QnAMaker at
https://qnamaker.ai/.
Args:
faq_url: A well-formed URL of a page containing an FAQ section.
QNAMAKER_API_KEY: The API key for QnAMaker.
Returns:
The knowledge base ID.
"""
create_request_endpoint = 'https://westus.api.cognitive.microsoft.com/qnamaker/v2.0/knowledgebases/create'
create_request = Request(create_request_endpoint)
create_request.add_header('Ocp-Apim-Subscription-Key', QNAMAKER_API_KEY)
create_request.add_header('Content-Type', 'application/json')
# TODO: call crawler to get all faq urls if the user wants it to
input_data = str.encode(str({
# include the time of creation in the bot title for logging
'name': 'CAKB_' + asctime(),
'urls': [
faq_url
]
}))
create_response = urlopen(
create_request, data=input_data, timeout=15).read().decode('utf-8')
kbId = json.loads(create_response)['kbId']
return kbId
def remove_invalid_punctuation(s: str) -> str:
"""Removes punctuation invalid by Lex intent rules, specifically any
punctuation except apostrophes, underscores, and hyphens.
Args:
s: any string, usually name of intent.
Returns:
The input string without invalid punctuation.
"""
# Create string of invalid punctuation
invalid_punctuation = ''.join(
[ch for ch in string.punctuation if ch not in '-_\''])
# Remove punctuation from string
s = s.translate(s.maketrans('', '', invalid_punctuation))
s = s.strip()
return s
def get_stopwords() -> list:
"""Retrieve list of stopwords.
Returns:
A list of stopwords retrieved from stopwords.txt.
"""
with open('stopwords.txt', 'r') as f:
return f.read().split('\n')
def question_to_intent_name(s: str, stopwords: list) -> str:
"""Converts a question string to an intent name.
Args:
s: The question string.
stopwords: The list of stopwords to remove from the string.
Returns:
A condensed version of the question text as an intent name.
"""
tokens = s.split(' ')
tokens = [t for t in tokens if t.lower() not in stopwords]
filtered_question = ''.join(tokens)
whitelist = set(string.ascii_lowercase + string.ascii_uppercase)
return ''.join(filter(whitelist.__contains__, filtered_question))
def generate_intents_from_knowledge_base(kb_tab_separated: str) -> list:
"""Generates a list of intent objects from knowledge base as a tab-separated
string.
Args:
kb_tab_separated: A knowledge base as a tab-separated string.
Returns:
A list of intent objects that each contain an intent name, a list of
sample utterances, and a response.
"""
lines = kb_tab_separated.split('\r')
# the first line are just headers; the last line is empty
lines = lines[1:-1]
lines = [line.split('\t') for line in lines]
stopwords = get_stopwords()
intents = [{
# only take first 65 characters, full intent name <100 characters
'name': question_to_intent_name(question, stopwords)[:65],
'sample_utterances': [remove_invalid_punctuation(question)],
'response': answer
} for question, answer, source in lines]
return intents
def download_knowledge_base(kbId: str, QNAMAKER_API_KEY: str) -> str:
"""Downloads knowledge base from Azure QnAMaker at https://qnamaker.ai/.
Args:
kbId: The id of a knowledge base in Azure QnAMaker.
QNAMAKER_API_KEY: The API key from QnAMaker.
Returns:
The knowledge base as a list of intents..
"""
download_kb_request_endpoint = 'https://westus.api.cognitive.microsoft.com/qnamaker/v2.0/knowledgebases/' + kbId
download_kb_request = Request(download_kb_request_endpoint)
download_kb_request.add_header(
'Ocp-Apim-Subscription-Key', QNAMAKER_API_KEY)
download_kb_response = urlopen(download_kb_request, timeout=15).read().decode(
'utf-8') # returns an address from which to download kb
# [1:-1] removes quotation marks from url
download_kb_link = download_kb_response[1:-1]
kb_response = urlopen(download_kb_link).read().decode(
'utf-8-sig') # must be utf-8-sig to remove BOM characters
intents = generate_intents_from_knowledge_base(kb_response)
return intents
def delete_knowledge_base(kbId: str, QNAMAKER_API_KEY: str) -> None:
"""Deletes knowledge base from Azure QnAMaker at https://qnamaker.ai/.
Args:
kbId: The id of a knowledge base in Azure QnAMaker.
QNAMAKER_API_KEY: The API key for QnAMaker.
"""
delete_request_endpoint = 'https://westus.api.cognitive.microsoft.com/qnamaker/v2.0/knowledgebases/' + kbId
delete_request = Request(delete_request_endpoint, method='DELETE')
delete_request.add_header('Ocp-Apim-Subscription-Key', QNAMAKER_API_KEY)
delete_response = urlopen(
delete_request, timeout=15).read().decode('utf-8')
|
nilq/baby-python
|
python
|
import warnings
from collections import Counter
from itertools import chain
from typing import Tuple, Type
import strawberry
def merge_types(name: str, types: Tuple[Type]) -> Type:
"""Merge multiple Strawberry types into one
For example, given two queries `A` and `B`, one can merge them into a
super type as follows:
merge_types("SuperQuery", (B, A))
This is essentially the same as:
class SuperQuery(B, A):
...
"""
if not types:
raise ValueError("Can't merge types if none are supplied")
fields = chain(*(t._type_definition.fields for t in types))
counter = Counter(f.name for f in fields)
dupes = [f for f, c in counter.most_common() if c > 1]
if dupes:
warnings.warn("{} has overridden fields: {}".format(name, ", ".join(dupes)))
return strawberry.type(type(name, types, {}))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from matplotlib import pyplot as plt
import numpy as np
with plt.xkcd():
# Based on "Stove Ownership" from XKCD by Randall Munroe
# https://xkcd.com/418/
fig = plt.figure(figsize=(6,4))
ax = fig.add_axes((0.1, 0.2, 0.8, 0.7))
ax.set_xticks([])
ax.set_yticks([])
# ax.set_ylim([-30, 10])
def f_sigmoid(x):
return 1 / (1 + np.exp(-x))
def f_foo(x):
if x < -1.0:
return -1.0
if x > 1.0:
return 1.0
return x
f = f_sigmoid
x = np.arange(-10, 10, step=0.1)
y = [f(xp) for xp in x]
ax.annotate(
"absolutelty worth it",
xy=(-1, f(-1)),
arrowprops=dict(arrowstyle="->"),
xytext=(-10, f(3) - 0.5),
)
ax.annotate(
"absolutelty not worth it",
xy=(5, f(5)),
arrowprops=dict(arrowstyle="->"),
xytext=(1, f(5) - 0.5),
)
ax.plot(x, y)
ax.set_xlabel("effort put into visualizations")
ax.set_ylabel("number of people \nunderstanding my visualizations")
# fig.text(0.5, 0.05, '"Stove Ownership" from xkcd by Randall Munroe', ha="center")
plt.savefig("featured.png",dpi=240)
plt.savefig("featured.svg",dpi=240)
|
nilq/baby-python
|
python
|
import collections
import itertools
import json
import os
import operator
import attr
import torch
import torchtext
import numpy as np
from seq2struct.models import abstract_preproc
try:
from seq2struct.models import lstm
except ImportError:
pass
from seq2struct.models import spider_enc_modules
from seq2struct.utils import registry, batched_sequence
from seq2struct.utils import vocab
from seq2struct.utils import serialization
from seq2struct import resources
@attr.s
class SpiderEncoderState:
state = attr.ib()
memory = attr.ib()
question_memory = attr.ib()
schema_memory = attr.ib()
words = attr.ib()
pointer_memories = attr.ib()
pointer_maps = attr.ib()
def find_word_occurrences(self, word):
return [i for i, w in enumerate(self.words) if w == word]
@attr.s
class PreprocessedSchema:
column_names = attr.ib(factory=list)
table_names = attr.ib(factory=list)
table_bounds = attr.ib(factory=list)
column_to_table = attr.ib(factory=dict)
table_to_columns = attr.ib(factory=dict)
foreign_keys = attr.ib(factory=dict)
foreign_keys_tables = attr.ib(factory=lambda: collections.defaultdict(set))
primary_keys = attr.ib(factory=list)
class AlFu(torch.nn.Module):
def __init__(self, in_size=1024, out_size=256):
super().__init__()
self.fc1 = torch.nn.Linear(in_size, out_size)
self.fc2 = torch.nn.Linear(in_size, out_size)
def align_fusion(self, V_q, H_c):
fusion = torch.softmax(H_c.mm(torch.transpose(V_q, 0, 1)) /
np.sqrt(H_c.shape[1]), 0).mm(V_q)
input_tens = torch.cat([fusion, H_c, fusion * H_c, fusion - H_c], 1)
return input_tens
def forward(self, question, columns):
input_tens = self.align_fusion(question, columns)
x_bar = torch.relu(self.fc1(input_tens))
g = torch.sigmoid(self.fc2(input_tens))
return (g * x_bar) + (1 - g) * columns
#
# class BiLSTM_SIM(torch.nn.Module):
# def __init__(self, input_size, output_size, dropout, summarize, use_native=False):
# # input_size: dimensionality of input
# # output_size: dimensionality of output
# # dropout
# # summarize:
# # - True: return Tensor of 1 x batch x emb size
# # - False: return Tensor of seq len x batch x emb size
# super().__init__()
#
# if use_native:
# self.lstm = torch.nn.LSTM(
# input_size=input_size,
# hidden_size=output_size // 2,
# bidirectional=True,
# dropout=dropout)
# self.dropout = torch.nn.Dropout(dropout)
# else:
# self.lstm = lstm.LSTM(
# input_size=input_size,
# hidden_size=output_size // 2,
# bidirectional=True,
# dropout=dropout)
# self.summarize = summarize
# self.use_native = use_native
#
#
# def forward(self, all_embs, boundaries):
# for left, right in zip(boundaries, boundaries[1:]):
# # state shape:
# # - h: num_layers (=1) * num_directions (=2) x batch (=1) x recurrent_size / 2
# # - c: num_layers (=1) * num_directions (=2) x batch (=1) x recurrent_size / 2
# # output shape: seq len x batch size x output_size
# # self.lstm(torch.nn.utils.rnn.pack_sequence(all_embs.select(0).unsqueeze(0)))
# output, (h, c) = self.lstm(self.lstm(torch.nn.utils.rnn.pack_sequence(all_embs.unsqueeze(0)))[0])
# # if self.summarize:
# # seq_emb = torch.cat((h[0], h[1]), dim=-1)
# # else:
# seq_emb = output.data
#
# return seq_emb
class SpiderEncoderV2Preproc(abstract_preproc.AbstractPreproc):
def __init__(
self,
save_path,
min_freq=3,
max_count=5000,
include_table_name_in_column=True,
word_emb=None,
count_tokens_in_word_emb_for_vocab=False):
if word_emb is None:
self.word_emb = None
else:
self.word_emb = registry.construct('word_emb', word_emb)
self.data_dir = os.path.join(save_path, 'enc')
self.include_table_name_in_column = include_table_name_in_column
self.count_tokens_in_word_emb_for_vocab = count_tokens_in_word_emb_for_vocab
self.init_texts()
self.vocab_builder = vocab.VocabBuilder(min_freq, max_count)
self.vocab_path = os.path.join(save_path, 'enc_vocab.json')
self.vocab = None
self.counted_db_ids = set()
self.preprocessed_schemas = {}
def init_texts(self):
# TODO: Write 'train', 'val', 'test' somewhere else
self.texts = {'train': [], 'val': [], 'test': []}
def validate_item(self, item, section):
return True, None
def add_item(self, item, section, validation_info):
preprocessed = self.preprocess_item(item, validation_info)
self.texts[section].append(preprocessed)
if section == 'train':
if item.schema.db_id in self.counted_db_ids:
to_count = preprocessed['question']
else:
self.counted_db_ids.add(item.schema.db_id)
to_count = itertools.chain(
preprocessed['question'],
*preprocessed['columns'],
*preprocessed['tables'])
for token in to_count:
count_token = (
self.word_emb is None or
self.count_tokens_in_word_emb_for_vocab or
self.word_emb.lookup(token) is None)
if count_token:
self.vocab_builder.add_word(token)
def clear_items(self):
self.init_texts()
def preprocess_item(self, item, validation_info):
if self.word_emb:
question = self.word_emb.tokenize(item.orig['question'])
else:
question = item.text
preproc_schema = self._preprocess_schema(item.schema)
return {
'question': question,
'db_id': item.schema.db_id,
'columns': preproc_schema.column_names,
'tables': preproc_schema.table_names,
'table_bounds': preproc_schema.table_bounds,
'column_to_table': preproc_schema.column_to_table,
'table_to_columns': preproc_schema.table_to_columns,
'foreign_keys': preproc_schema.foreign_keys,
'foreign_keys_tables': preproc_schema.foreign_keys_tables,
'primary_keys': preproc_schema.primary_keys,
}
def _preprocess_schema(self, schema):
if schema.db_id in self.preprocessed_schemas:
return self.preprocessed_schemas[schema.db_id]
result = self._preprocess_schema_uncached(schema)
self.preprocessed_schemas[schema.db_id] = result
return result
def _preprocess_schema_uncached(self, schema):
r = PreprocessedSchema()
last_table_id = None
for i, column in enumerate(schema.columns):
column_name = ['<type: {}>'.format(column.type)] + self._tokenize(
column.name, column.unsplit_name)
if self.include_table_name_in_column:
if column.table is None:
table_name = ['<any-table>']
else:
table_name = self._tokenize(
column.table.name, column.table.unsplit_name)
column_name += ['<table-sep>'] + table_name
r.column_names.append(column_name)
table_id = None if column.table is None else column.table.id
r.column_to_table[str(i)] = table_id
if table_id is not None:
columns = r.table_to_columns.setdefault(str(table_id), [])
columns.append(i)
if last_table_id != table_id:
r.table_bounds.append(i)
last_table_id = table_id
if column.foreign_key_for is not None:
r.foreign_keys[str(column.id)] = column.foreign_key_for.id
r.foreign_keys_tables[str(column.table.id)].add(column.foreign_key_for.table.id)
r.table_bounds.append(len(schema.columns))
assert len(r.table_bounds) == len(schema.tables) + 1
for i, table in enumerate(schema.tables):
r.table_names.append(self._tokenize(
table.name, table.unsplit_name))
r.foreign_keys_tables = serialization.to_dict_with_sorted_values(r.foreign_keys_tables)
r.primary_keys = [
column.id
for column in table.primary_keys
for table in schema.tables
]
return r
def _tokenize(self, presplit, unsplit):
if self.word_emb:
return self.word_emb.tokenize(unsplit)
return presplit
def save(self):
os.makedirs(self.data_dir, exist_ok=True)
self.vocab = self.vocab_builder.finish()
self.vocab.save(self.vocab_path)
for section, texts in self.texts.items():
with open(os.path.join(self.data_dir, section + '.jsonl'), 'w') as f:
for text in texts:
f.write(json.dumps(text) + '\n')
def load(self):
self.vocab = vocab.Vocab.load(self.vocab_path)
def dataset(self, section):
return [
json.loads(line)
for line in open(os.path.join(self.data_dir, section + '.jsonl'))]
@registry.register('encoder', 'spiderv2')
class SpiderEncoderV2(torch.nn.Module):
batched = True
Preproc = SpiderEncoderV2Preproc
def __init__(
self,
device,
preproc,
word_emb_size=128,
recurrent_size=256,
dropout=0.,
question_encoder=('emb', 'bilstm'),
column_encoder=('emb', 'bilstm'),
table_encoder=('emb', 'bilstm'),
update_config={},
include_in_memory=('question', 'column', 'table'),
batch_encs_update=True,
):
super().__init__()
self._device = device
self.preproc = preproc
self.vocab = preproc.vocab
self.word_emb_size = word_emb_size
self.recurrent_size = recurrent_size
assert self.recurrent_size % 2 == 0
self.include_in_memory = set(include_in_memory)
self.dropout = dropout
self.question_encoder = self._build_modules(question_encoder)
self.column_encoder = self._build_modules(column_encoder)
self.table_encoder = self._build_modules(table_encoder)
self.additional_enc = AlFu()
# 'bilstm': lambda: spider_enc_modules.BiLSTM(
# input_size=self.word_emb_size,
# output_size=self.recurrent_size,
# dropout=self.dropout,
# summarize=False),
# self.additional_lstm_question = BiLSTM_SIM(
# input_size=256,
# output_size=self.recurrent_size,
# dropout=dropout,
# summarize=False)
# self.additional_lstm_columns = BiLSTM_SIM(
# input_size=256,
# output_size=self.recurrent_size,
# dropout=dropout,
# summarize=True)
# self.additional_lstm_tables = BiLSTM_SIM(
# input_size=256,
# output_size=self.recurrent_size,
# dropout=dropout,
# summarize=True)
#
update_modules = {
'relational_transformer':
spider_enc_modules.RelationalTransformerUpdate#,
# 'none':
# spider_enc_modules.NoOpUpdate,
}
self.encs_update = registry.instantiate(
update_modules[update_config['name']],
update_config,
device=self._device,
hidden_size=recurrent_size,
)
self.batch_encs_update = batch_encs_update
def _build_modules(self, module_types):
module_builder = {
'emb': lambda: spider_enc_modules.LookupEmbeddings(
self._device,
self.vocab,
self.preproc.word_emb,
self.word_emb_size),
'linear': lambda: spider_enc_modules.EmbLinear(
input_size=self.word_emb_size,
output_size=self.word_emb_size),
# batch_size, output_size, in_channels, out_channels, kernel_heights, stride, padding,
# keep_probab, vocab_size, embedding_length, weights
'bilstm': lambda: spider_enc_modules.BiLSTM(
input_size=self.word_emb_size,
output_size=self.recurrent_size,
dropout=self.dropout,
summarize=False),
'cnn': lambda: spider_enc_modules.CNN_L2(
# batch_size=50,
output_size=300,
in_channels=1,
out_channels=self.recurrent_size,
# kernel_heights=[1, 3, 5],
stride=1,
padding=1,
keep_probab=0.2,
vocab_size=len(self.vocab),
embedding_length=self.word_emb_size,
# weights=len(self.vocab),
embedder=self.preproc.word_emb,
device=self._device,
vocab = self.vocab,
preproc_word_emb=self.preproc.word_emb,
summarize=False
),
'cnn-summarize': lambda: spider_enc_modules.CNN_L2(
output_size=300,
in_channels=1,
out_channels=self.recurrent_size,
# kernel_heights=[1, 3, 5],
stride=1,
padding=1,
keep_probab=0.2,
vocab_size=len(self.vocab),
embedding_length=self.word_emb_size,
# weights=self.preproc.word_emb.vectors,
embedder=self.preproc.word_emb,
device=self._device,
vocab = self.vocab,
preproc_word_emb=self.preproc.word_emb,
summarize=True
),
# 'bilstm-native': lambda: spider_enc_modules.BiLSTM(
# input_size=self.word_emb_size,
# output_size=self.recurrent_size,
# dropout=self.dropout,
# summarize=False,
# use_native=True),
'bilstm-summarize': lambda: spider_enc_modules.BiLSTM(
input_size=self.word_emb_size,
output_size=self.recurrent_size,
dropout=self.dropout,
summarize=True),
# 'bilstm-native-summarize': lambda: spider_enc_modules.BiLSTM(
# input_size=self.word_emb_size,
# output_size=self.recurrent_size,
# dropout=self.dropout,
# summarize=True,
# use_native=True),
}
modules = []
for module_type in module_types:
modules.append(module_builder[module_type]())
return torch.nn.Sequential(*modules)
def forward_unbatched(self, desc):
# Encode the question
# - LookupEmbeddings
# - Transform embeddings wrt each other?
# q_enc: question len x batch (=1) x recurrent_size
q_enc, (_, _) = self.question_encoder([desc['question']])
# Encode the columns
# - LookupEmbeddings
# - Transform embeddings wrt each other?
# - Summarize each column into one?
# c_enc: sum of column lens x batch (=1) x recurrent_size
c_enc, c_boundaries = self.column_encoder(desc['columns'])
column_pointer_maps = {
i: list(range(left, right))
for i, (left, right) in enumerate(zip(c_boundaries, c_boundaries[1:]))
}
# Encode the tables
# - LookupEmbeddings
# - Transform embeddings wrt each other?
# - Summarize each table into one?
# t_enc: sum of table lens x batch (=1) x recurrent_size
t_enc, t_boundaries = self.table_encoder(desc['tables'])
c_enc_length = c_enc.shape[0]
table_pointer_maps = {
i: [
idx
for col in desc['table_to_columns'][str(i)]
for idx in column_pointer_maps[col]
] + list(range(left + c_enc_length, right + c_enc_length))
for i, (left, right) in enumerate(zip(t_boundaries, t_boundaries[1:]))
}
# Update each other using self-attention
# q_enc_new, c_enc_new, and t_enc_new now have shape
# batch (=1) x length x recurrent_size
q_enc_new, c_enc_new, t_enc_new = self.encs_update(
desc, q_enc, c_enc, c_boundaries, t_enc, t_boundaries)
memory = []
if 'question' in self.include_in_memory:
memory.append(q_enc_new)
if 'column' in self.include_in_memory:
memory.append(c_enc_new)
if 'table' in self.include_in_memory:
memory.append(t_enc_new)
memory = torch.cat(memory, dim=1)
return SpiderEncoderState(
state=None,
memory=memory,
# TODO: words should match memory
words=desc['question'],
pointer_memories={
'column': c_enc_new,
'table': torch.cat((c_enc_new, t_enc_new), dim=1),
},
pointer_maps={
'column': column_pointer_maps,
'table': table_pointer_maps,
}
)
def forward(self, descs):
# Encode the question
# - LookupEmbeddings
# - Transform embeddings wrt each other?
# q_enc: PackedSequencePlus, [batch, question len, recurrent_size]
q_enc, _ = self.question_encoder([[desc['question']] for desc in descs])
# Encode the columns
# - LookupEmbeddings
# - Transform embeddings wrt each other?
# - Summarize each column into one?
# c_enc: PackedSequencePlus, [batch, sum of column lens, recurrent_size]
c_enc, c_boundaries = self.column_encoder([desc['columns'] for desc in descs])
# ++
q_enc_rr, _rr = self.question_encoder([[desc['question']] for desc in descs])
# ++
column_pointer_maps = [
{
i: list(range(left, right))
for i, (left, right) in enumerate(zip(c_boundaries_for_item, c_boundaries_for_item[1:]))
}
for batch_idx, c_boundaries_for_item in enumerate(c_boundaries)
]
# Encode the tables
# - LookupEmbeddings
# - Transform embeddings wrt each other?
# - Summarize each table into one?
# t_enc: PackedSequencePlus, [batch, sum of table lens, recurrent_size]
t_enc, t_boundaries = self.table_encoder([desc['tables'] for desc in descs])
c_enc_lengths = list(c_enc.orig_lengths())
table_pointer_maps = [
{
i: [
idx
for col in desc['table_to_columns'][str(i)]
for idx in column_pointer_maps[batch_idx][col]
] + list(range(left + c_enc_lengths[batch_idx], right + c_enc_lengths[batch_idx]))
for i, (left, right) in enumerate(zip(t_boundaries_for_item, t_boundaries_for_item[1:]))
}
for batch_idx, (desc, t_boundaries_for_item) in enumerate(zip(descs, t_boundaries))
]
# Update each other using self-attention
# q_enc_new, c_enc_new, and t_enc_new are PackedSequencePlus with shape
# batch (=1) x length x recurrent_size
if self.batch_encs_update:
q_enc_new, c_enc_new, t_enc_new = self.encs_update(
descs, q_enc, c_enc, c_boundaries, t_enc, t_boundaries)
result = []
for batch_idx, desc in enumerate(descs):
if self.batch_encs_update:
q_enc_new_item = q_enc_new.select(batch_idx).unsqueeze(0)
c_enc_new_item = c_enc_new.select(batch_idx).unsqueeze(0)
t_enc_new_item = t_enc_new.select(batch_idx).unsqueeze(0)
else:
q_enc_selected = q_enc.select(batch_idx)
c_enc_selected = c_enc.select(batch_idx)
t_enc_selected = t_enc.select(batch_idx)
c_enc_selected = self.additional_enc(q_enc_selected, c_enc_selected)
t_enc_selected = self.additional_enc(q_enc_selected, t_enc_selected)
# q_lstmed = self.additional_lstm_question(q_enc_selected, _[batch_idx])
# c_lstmed = self.additional_lstm_columns(c_enc_selected, c_boundaries[batch_idx])
# t_lstmed = self.additional_lstm_tables(t_enc_selected, t_boundaries[batch_idx])
q_enc_new_item, c_enc_new_item, t_enc_new_item = \
self.encs_update.forward_unbatched(
desc,
q_enc_selected.unsqueeze(1),
c_enc_selected.unsqueeze(1),
c_boundaries[batch_idx],
t_enc_selected.unsqueeze(1),
t_boundaries[batch_idx])
memory = []
if 'question' in self.include_in_memory:
memory.append(q_enc_new_item)
if 'column' in self.include_in_memory:
memory.append(c_enc_new_item)
if 'table' in self.include_in_memory:
memory.append(t_enc_new_item)
memory = torch.cat(memory, dim=1)
result.append(SpiderEncoderState(
state=None,
memory=memory,
question_memory=q_enc_new_item,
schema_memory=torch.cat((c_enc_new_item, t_enc_new_item), dim=1),
# TODO: words should match memory
words=desc['question'],
pointer_memories={
'column': c_enc_new_item,
'table': torch.cat((c_enc_new_item, t_enc_new_item), dim=1),
},
pointer_maps={
'column': column_pointer_maps[batch_idx],
'table': table_pointer_maps[batch_idx],
}
))
return result
|
nilq/baby-python
|
python
|
import logging
import numpy as np
from rasterio.dtypes import dtype_ranges
import warnings
logger = logging.getLogger(__name__)
def execute(
mp,
resampling="nearest",
band_indexes=None,
td_matching_method="gdal",
td_matching_max_zoom=None,
td_matching_precision=8,
td_fallback_to_higher_zoom=False,
clip_pixelbuffer=0,
scale_ratio=1.0,
scale_offset=0.0,
clip_to_output_dtype=None,
**kwargs,
):
"""
Convert and optionally clip input raster or vector data.
Inputs
------
inp
Raster or vector input.
clip (optional)
Vector data used to clip output.
Parameters
----------
resampling : str (default: 'nearest')
Resampling used when reading from TileDirectory.
band_indexes : list
Bands to be read.
td_matching_method : str ('gdal' or 'min') (default: 'gdal')
gdal: Uses GDAL's standard method. Here, the target resolution is
calculated by averaging the extent's pixel sizes over both x and y
axes. This approach returns a zoom level which may not have the
best quality but will speed up reading significantly.
min: Returns the zoom level which matches the minimum resolution of the
extents four corner pixels. This approach returns the zoom level
with the best possible quality but with low performance. If the
tile extent is outside of the destination pyramid, a
TopologicalError will be raised.
td_matching_max_zoom : int (optional, default: None)
If set, it will prevent reading from zoom levels above the maximum.
td_matching_precision : int (default: 8)
Round resolutions to n digits before comparing.
td_fallback_to_higher_zoom : bool (default: False)
In case no data is found at zoom level, try to read data from higher
zoom levels. Enabling this setting can lead to many IO requests in
areas with no data.
clip_pixelbuffer : int
Use pixelbuffer when clipping output by geometry. (default: 0)
scale_ratio : float
Scale factor for input values. (default: 1.0)
scale_offset : float
Offset to add to input values. (default: 0.0)
clip_to_output_dtype : str
Clip output values to range of given dtype. (default: None)
Output
------
np.ndarray
"""
# read clip geometry
if "clip" in mp.params["input"]:
clip_geom = mp.open("clip").read()
if not clip_geom:
logger.debug("no clip data over tile")
return "empty"
else:
clip_geom = []
if "raster" in mp.input: # pragma: no cover
warnings.warn(
UserWarning(
"'raster' input name in the mapchete configuration is deprecated and has to be named 'inp'"
)
)
inp_key = "raster"
else:
inp_key = "inp"
with mp.open(inp_key) as inp:
if inp.is_empty():
return "empty"
logger.debug("reading input data")
input_data = inp.read(
indexes=band_indexes,
resampling=resampling,
matching_method=td_matching_method,
matching_max_zoom=td_matching_max_zoom,
matching_precision=td_matching_precision,
fallback_to_higher_zoom=td_fallback_to_higher_zoom,
)
if isinstance(input_data, np.ndarray):
input_type = "raster"
elif isinstance(input_data, list):
input_type = "vector"
else: # pragma: no cover
raise TypeError(
"input data type for this process has to either be a raster or a vector "
"dataset"
)
if input_type == "raster":
if scale_offset != 0.0:
logger.debug("apply scale offset %s", scale_offset)
input_data = input_data.astype("float64", copy=False) + scale_offset
if scale_ratio != 1.0:
logger.debug("apply scale ratio %s", scale_ratio)
input_data = input_data.astype("float64", copy=False) * scale_ratio
if (
scale_offset != 0.0 or scale_ratio != 1.0
) and clip_to_output_dtype in dtype_ranges:
logger.debug("clip to output dtype ranges")
input_data.clip(*dtype_ranges[clip_to_output_dtype], out=input_data)
if clip_geom:
logger.debug("clipping output with geometry")
# apply original nodata mask and clip
return mp.clip(input_data, clip_geom, clip_buffer=clip_pixelbuffer)
else:
return input_data
elif input_type == "vector":
if clip_geom: # pragma: no cover
raise NotImplementedError("clipping vector data is not yet implemented")
else:
logger.debug(f"writing {len(input_data)} features")
return input_data
|
nilq/baby-python
|
python
|
from Classes.Wrappers.PlayerDisplayData import PlayerDisplayData
class BattleLogPlayerEntry:
def encode(calling_instance, fields):
pass
def decode(calling_instance, fields):
fields["BattleLogEntry"] = {}
fields["BattleLogEntry"]["Unkown1"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Unkown2"] = calling_instance.readLong()
fields["BattleLogEntry"]["Unkown3"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Unkown4"] = calling_instance.readBoolean()
countVal = calling_instance.readVInt()
fields["BattleLogEntry"]["Unkown5"] = countVal
fields["BattleLogEntry"]["Entries"] = {}
for i in range(countVal):
fields["BattleLogEntry"]["Entries"][str(i)] = {}
fields["BattleLogEntry"]["Entries"][str(i)]["Unknown1"] = calling_instance.readDataReference()
fields["BattleLogEntry"]["Entries"][str(i)]["Unknown2"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Entries"][str(i)]["Unknown3"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Entries"][str(i)]["Unknown4"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Unkown6"] = calling_instance.readVInt()
PlayerDisplayData.decode(calling_instance, fields)
|
nilq/baby-python
|
python
|
# coding: UTF-8
import numpy as np
import chainer
from chainer import Variable,Chain
import chainer.links as L
import chainer.functions as F
import chainer.optimizers as O
# model
class MyChain(Chain):
def __init__(self):
super().__init__(
l1 = L.Linear(1,2),
l2 = L.Linear(2,1),
)
def __call__(self, x):
h = F.sigmoid(self.l1(x))
return self.l2(h)
# Optimizer
model = MyChain()
optimizer = O.SGD() # 最適化アルゴリズム:SGD=確率的降下法
# optimizer = O.Adam() # 最適化アルゴリズム:Adam
optimizer.setup(model)
# execution
input_array = np.array([[1]], dtype=np.float32)
answer_array = np.array([[1]], dtype=np.float32)
x = Variable(input_array)
t = Variable(answer_array)
model.cleargrads() #model 勾配初期化
y=model(x)
loss=F.mean_squared_error(y,t) #二乗誤差 y t の誤差を求める。
loss.backward() #誤差の逆伝播
# 前後比較
print(model.l1.W.data)
optimizer.update()
print(model.l1.W.data)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Script Name:
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
import argparse
from PLM.cores.Errors import VersionNotFoundException
from PLM import VERSION_LOG
from difflib import unified_diff
from pyPLM.loggers import DamgLogger
logger = DamgLogger(__name__, filepth=VERSION_LOG)
class DiscardDefaultIfSpecifiedAppendAction(argparse._AppendAction):
"""
Fixes bug http://bugs.python.org/issue16399 for 'append' action
"""
def __call__(self, parser, namespace, values, option_string=None):
if getattr(self, "_discarded_default", None) is None:
setattr(namespace, self.dest, [])
self._discarded_default = True # pylint: disable=attribute-defined-outside-init
super().__call__(parser, namespace, values, option_string=None)
class ConfiguredFile:
def __init__(self, path, versionconfig):
self.path = path
self._versionconfig = versionconfig
def should_contain_version(self, version, context):
"""
Raise VersionNotFound if the version number isn't present in this file.
Return normally if the version number is in fact present.
"""
context["current_version"] = self._versionconfig.serialize(version, context)
search_expression = self._versionconfig.search.format(**context)
if self.contains(search_expression):
return
# the `search` pattern did not match, but the original supplied
# version number (representing the same version part values) might
# match instead.
# check whether `search` isn't customized, i.e. should match only
# very specific parts of the file
search_pattern_is_default = self._versionconfig.search == "{current_version}"
if search_pattern_is_default and self.contains(version.original):
# original version is present and we're not looking for something
# more specific -> this is accepted as a match
return
# version not found
raise VersionNotFoundException("Did not find '{}' in file: '{}'".format(search_expression, self.path))
def contains(self, search):
if not search:
return False
with open(self.path, "rt", encoding="utf-8") as f:
search_lines = search.splitlines()
lookbehind = []
for lineno, line in enumerate(f.readlines()):
lookbehind.append(line.rstrip("\n"))
if len(lookbehind) > len(search_lines):
lookbehind = lookbehind[1:]
if (search_lines[0] in lookbehind[0] and search_lines[-1] in lookbehind[-1] and search_lines[1:-1] == lookbehind[1:-1]):
logger.info("Found '%s' in %s at line %s: %s", search, self.path, lineno - (len(lookbehind) - 1),
line.rstrip(),)
return True
return False
def replace(self, current_version, new_version, context, dry_run):
with open(self.path, "rt", encoding="utf-8") as f:
file_content_before = f.read()
file_new_lines = f.newlines
context["current_version"] = self._versionconfig.serialize(current_version, context)
context["new_version"] = self._versionconfig.serialize(new_version, context)
search_for = self._versionconfig.search.format(**context)
replace_with = self._versionconfig.replace.format(**context)
file_content_after = file_content_before.replace(search_for, replace_with)
if file_content_before == file_content_after:
# TODO expose this to be configurable
file_content_after = file_content_before.replace(current_version.original, replace_with)
if file_content_before != file_content_after:
logger.info("%s file %s:", "Would change" if dry_run else "Changing", self.path)
logger.info("\n".join(list(unified_diff(file_content_before.splitlines(), file_content_after.splitlines(),
lineterm="", fromfile="a/" + self.path, tofile="b/" + self.path,))))
else:
logger.info("%s file %s", "Would not change" if dry_run else "Not changing", self.path)
if not dry_run:
with open(self.path, "wt", encoding="utf-8", newline=file_new_lines) as f:
f.write(file_content_after)
def __str__(self):
return self.path
def __repr__(self):
return "<bumpversion.ConfiguredFile:{}>".format(self.path)
# -------------------------------------------------------------------------------------------------------------
# Created by Trinh Do on 5/6/2020 - 3:13 AM
# © 2017 - 2020 DAMGteam. All rights reserved
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Huawei.VRP config normalizer
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.confdb.normalizer.base import BaseNormalizer, match, ANY, REST
from noc.core.confdb.syntax.defs import DEF
from noc.core.confdb.syntax.patterns import IF_NAME, BOOL
class VRPNormalizer(BaseNormalizer):
SYNTAX = [
DEF(
"interfaces",
[
DEF(
IF_NAME,
[
DEF(
"bpdu",
[
DEF(
BOOL,
required=False,
name="enabled",
gen="make_interface_ethernet_bpdu",
)
],
)
],
multi=True,
name="interface",
)
],
)
]
@match("sysname", ANY)
def normalize_hostname(self, tokens):
yield self.make_hostname(tokens[1])
@match("undo", "http", "server", "enable")
def normalize_http_server(self, tokens):
yield self.make_protocols_http()
@match("undo", "http", "secure-server", "enable")
def normalize_https_server(self, tokens):
yield self.make_protocols_https()
@match("aaa", "local-user", ANY, "privilege", "level", ANY)
def normalize_username_access_level(self, tokens):
yield self.make_user_class(username=tokens[2], class_name="level-%s" % tokens[5])
@match("aaa", "local-user", ANY, "password", REST)
def normalize_username_password(self, tokens):
yield self.make_user_encrypted_password(username=tokens[2], password=" ".join(tokens[4:]))
@match("vlan", "batch", REST)
def normalize_vlan_id_batch(self, tokens):
for vlan in tokens[2:]:
yield self.make_vlan_id(vlan_id=vlan)
@match("vlan", ANY)
def normalize_vlan_id(self, tokens):
yield self.make_vlan_id(vlan_id=tokens[1])
@match("vlan", ANY, "description", REST)
def normalize_vlan_description(self, tokens):
yield self.make_vlan_description(vlan_id=tokens[1], description=" ".join(tokens[3:]))
@match("interface", ANY)
def normalize_interface(self, tokens):
if_name = self.interface_name(tokens[1])
yield self.make_interface(interface=if_name)
@match("interface", ANY, "description", REST)
def normalize_interface_description(self, tokens):
yield self.make_interface_description(
interface=self.interface_name(tokens[1]), description=" ".join(tokens[2:])
)
@match("interface", ANY, "port-security", "max-mac-num", ANY)
def normalize_port_security(self, tokens):
yield self.make_unit_port_security_max_mac(
interface=self.interface_name(tokens[1]), limit=tokens[4]
)
@match("interface", ANY, "broadcast-suppression", ANY)
def normalize_port_storm_control_broadcast(self, tokens):
yield self.make_interface_storm_control_broadcast_level(
interface=self.interface_name(tokens[1]), level=tokens[3]
)
@match("interface", ANY, "multicast-suppression", ANY)
def normalize_port_storm_control_multicast(self, tokens):
yield self.make_interface_storm_control_multicast_level(
interface=self.interface_name(tokens[1]), level=tokens[3]
)
@match("interface", ANY, "unicast-suppression", ANY)
def normalize_port_storm_control_unicast(self, tokens):
yield self.make_interface_storm_control_unicast_level(
interface=self.interface_name(tokens[1]), level=tokens[3]
)
@match("interface", ANY, "stp", "cost", ANY)
def normalize_stp_cost(self, tokens):
yield self.make_spanning_tree_interface_cost(
interface=self.interface_name(tokens[1]), cost=tokens[4]
)
@match("interface", ANY, "port", "hybrid", "pvid", "vlan", ANY)
def normalize_switchport_untagged(self, tokens):
if_name = self.interface_name(tokens[1])
yield self.make_switchport_untagged(interface=if_name, unit=if_name, vlan_filter=tokens[6])
@match("interface", ANY, "port", "trunk", "allow-pass", "vlan", REST)
def normalize_switchport_tagged(self, tokens):
if_name = self.interface_name(tokens[1])
yield self.make_switchport_tagged(
interface=if_name,
unit=if_name,
vlan_filter=" ".join(tokens[6:]).replace(" to ", "-").replace(" ", ","),
)
@match("interface", ANY, "undo", "negotiation", "auto")
def normalize_interface_negotiation(self, tokens):
yield self.make_interface_ethernet_autonegotiation(
interface=self.interface_name(tokens[1]), mode="manual"
)
@match("interface", ANY, "bpdu", "enable")
def normalize_interface_bpdu(self, tokens):
yield self.make_interface_ethernet_bpdu(
interface=self.interface_name(tokens[1]), enabled=True
)
@match("interface", ANY, "loopback-detect", "enable")
def normalize_interface_no_loop_detect(self, tokens):
if not self.get_context("loop_detect_disabled"):
if_name = self.interface_name(tokens[1])
yield self.make_loop_detect_interface(interface=if_name)
@match("enable", "lldp")
def normalize_enable_lldp(self, tokens):
self.set_context("lldp_disabled", False)
yield self.make_global_lldp_status(status=True)
@match("enable", "stp")
def normalize_enable_stp(self, tokens):
self.set_context("stp_disabled", False)
yield self.make_global_stp_status(status=True)
@match("interface", ANY, "undo", "lldp", "enable")
def normalize_interface_lldp_enable(self, tokens):
yield self.make_lldp_interface_disable(interface=self.interface_name(tokens[1]))
@match("interface", ANY, "stp", "disable")
def normalize_interface_stp_status(self, tokens):
yield self.make_spanning_tree_interface_disable(interface=self.interface_name(tokens[1]))
@match("interface", ANY, "stp", "bpdu-filter", "enable")
def normalize_interface_stp_bpdu_filter(self, tokens):
yield self.make_spanning_tree_interface_bpdu_filter(
interface=self.interface_name(tokens[1]), enabled=True
)
@match("interface", ANY, "ip", "address", ANY, ANY)
def normalize_vlan_ip(self, tokens):
if_name = self.interface_name(tokens[1])
yield self.make_unit_inet_address(
interface=if_name, unit=if_name, address=self.to_prefix(tokens[4], tokens[5])
)
@match("ip", "route-static", ANY, ANY, ANY)
def normalize_default_gateway(self, tokens):
yield self.make_inet_static_route_next_hop(
route=self.to_prefix(tokens[2], tokens[3]), next_hop=tokens[4]
)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.Student)
admin.site.register(models.Subject)
admin.site.register(models.Assignment)
admin.site.register(models.Submission)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django.core.management import call_command
from django.db import migrations
def create_cache_table(apps, schema_editor):
"""
创建 cache table
"""
call_command("createcachetable", "account_cache")
class Migration(migrations.Migration):
dependencies = [
("account", "0003_verifyinfo"),
]
operations = [migrations.RunPython(create_cache_table)]
|
nilq/baby-python
|
python
|
from django.contrib.auth import get_user_model
from questionnaire.models import Questionnaire
from functional_tests.base import FunctionalTest
from functional_tests.pages.qcat import HomePage
from functional_tests.pages.questionnaire import QuestionnaireStepPage
from functional_tests.pages.technologies import TechnologiesNewPage, \
Technologies2018NewPage, TechnologiesDetailPage, TechnologiesEditPage, \
TechnologiesStepPage
from functional_tests.pages.wocat import AddDataPage
class QuestionnaireTest(FunctionalTest):
fixtures = [
'global_key_values',
'technologies',
]
def test_questionnaire_is_available(self):
# User logs in and goes to the home page.
home_page = HomePage(self)
home_page.open(login=True)
# User clicks a link to add data in the top menu.
home_page.click_add_slm_data()
# User clicks a link to add a new Technology.
add_page = AddDataPage(self)
add_page.click_add_technology()
# User sees an empty edit page and the categories of the Technology.
edit_page = Technologies2018NewPage(self)
edit_page.close_updated_edition_warning()
progress_indicators = edit_page.get_progress_indicators()
categories = edit_page.CATEGORIES
assert len(progress_indicators) == len(categories)
# All the categories are listed.
for __, category in categories:
edit_page.get_category_by_name(category)
# User edits the first category.
edit_page.click_edit_category(categories[0][0])
# The focal point is available
step_page = QuestionnaireStepPage(self)
step_page.is_focal_point_available()
# User saves the first category.
step_page.submit_step()
# All the categories are still there.
progress_indicators = edit_page.get_progress_indicators()
categories = edit_page.CATEGORIES
assert len(progress_indicators) == len(categories)
for __, category in categories:
edit_page.get_category_by_name(category)
def test_translation(self):
# User logs in and goes to the Edit page.
page = Technologies2018NewPage(self)
page.open(login=True)
page.close_updated_edition_warning()
# User sees the category names in English.
for __, category in page.CATEGORIES:
page.get_category_by_name(category)
# User changes the language.
page.change_language('es')
page.close_updated_edition_warning()
# User sees the category names in Spanish.
for __, category in page.CATEGORIES_TRANSLATED:
page.get_category_by_name(category)
class QuestionnaireFixturesTest(FunctionalTest):
fixtures = [
'global_key_values',
'technologies',
'technologies_questionnaires',
]
def test_show_edition_update_warning(self):
# User logs in and goes to the page to create a new Technology
page = Technologies2018NewPage(self)
page.open(login=True)
# There is a warning about updated editions.
assert page.has_updated_edition_warning()
page.close_updated_edition_warning()
# After creating a draft version, the warning is not there anymore.
page.click_edit_category('tech__1')
step_page = QuestionnaireStepPage(self)
step_page.submit_step()
assert not page.has_updated_edition_warning()
def test_redirect_edit_public_version(self):
# User is the compiler of technology "tech_1"
user = get_user_model().objects.get(pk=101)
identifier = 'tech_1'
title = 'WOCAT Technology 1'
# User logs in and goes to the details of a questionnaire
detail_page = TechnologiesDetailPage(self)
detail_page.route_kwargs = {'identifier': identifier}
detail_page.open(login=True, user=user)
assert detail_page.has_text(title)
# User goes to the edit page of the questionnaire and sees he has been
# redirected to the detail page.
edit_page = TechnologiesEditPage(self)
edit_page.route_kwargs = {'identifier': identifier}
edit_page.open()
assert self.browser.current_url == detail_page.get_url()
# User tries to open the URL of a step of this public questionnaire and
# sees he has been redirected as well.
step_page = TechnologiesStepPage(self)
step_page.route_kwargs = {
'identifier': identifier,
'step': 'tech__1'
}
step_page.open()
assert self.browser.current_url == detail_page.get_url()
# User starts a new questionnaire
new_page = Technologies2018NewPage(self)
new_page.open()
new_page.close_updated_edition_warning()
new_page.click_edit_category('tech__1')
step_page = TechnologiesStepPage(self)
step_page.submit_step()
# For draft versions, the edit URLs can be accessed
draft_identifier = Questionnaire.objects.get(status=1)
edit_page.route_kwargs = {'identifier': draft_identifier}
edit_page.open()
assert self.browser.current_url == edit_page.get_url()
step_page.route_kwargs = {
'identifier': draft_identifier,
'step': 'tech__1'
}
step_page.open()
assert self.browser.current_url == step_page.get_url()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from models.user import User
from database import session
def create_user(login_session):
"""Create a new user from login session and return his id."""
newUser = User(name=login_session["username"],
email=login_session["email"],
picture=login_session["picture"])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session["email"]).one_or_none()
return user.id
def get_user_info(user_id):
"""Return user object from his id."""
user = session.query(User).filter_by(id=user_id).one_or_none()
return user
def get_user_id(email):
"""Return user id from his email."""
try:
user = session.query(User).filter_by(email=email).one_or_none()
return user.id
except:
return None
|
nilq/baby-python
|
python
|
from tkinter import *
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from PIL import ImageTk, Image
from PyDictionary import PyDictionary
from googletrans import Translator
root = tk.Tk()
root.title("Yanis's Dictionary")
root.geometry('600x300')
root['bg'] = 'white'
frame = Frame(root,width=200,height=300,borderwidth=1,relief=RIDGE)
frame.grid(sticky="W")
def get_meaning():
output.delete(1.0,'end')
dictionary=PyDictionary()
get_word = entry.get()
langauages = langauage.get()
if get_word == "":
messagebox.showerror('Dictionary','please write the word')
elif langauages == 'English-to-English':
d = dictionary.meaning(get_word)
output.insert('end',d['Noun'])
elif langauages == 'English-to-Arabic':
translator = Translator()
t = translator.translate(get_word, dest='arb')
output.insert('end',t.text)
def quit():
root.destroy()
img = ImageTk.PhotoImage(Image.open('dict.png'))
pic = Label(root, image = img)
pic.place(x=40,y=70)
word = Label(root,text="Enter Word",bg="white",font=('verdana',10,'bold'))
word.place(x=250,y=23)
a = tk.StringVar()
langauage = ttk.Combobox(root, width = 20, textvariable = a, state='readonly',font=('verdana',10,'bold'),)
langauage['values'] = (
'English-to-English',
'English-to-Arabic',
)
langauage.place(x=380,y=10)
langauage.current(0)
entry = Entry(root,width=50,borderwidth=2,relief=RIDGE)
entry.place(x=250,y=50)
search = Button(root,text="Search",font=('verdana',10,'bold'),cursor="hand2",relief=RIDGE,command=get_meaning)
search.place(x=430,y=80)
quit = Button(root,text="Quit",font=('verdana',10,'bold'),cursor="hand2",relief=RIDGE,command=quit)
quit.place(x=510,y=80)
meaning = Label(root,text="Meaning",bg="white",font=('verdana',15,'bold'))
meaning.place(x=230,y=120)
output = Text(root,height=8,width=40,borderwidth=2,relief=RIDGE)
output.place(x=230,y=160)
root.mainloop()
|
nilq/baby-python
|
python
|
import socket
import threading
HOST = '127.0.0.1'
PORT = 9999
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
print 'Connect Success!....'
def sendingMsg():
while True:
data = raw_input('')
sock.send(data)
sock.close()
def gettingMsg():
while True:
data = sock.recv(1024)
print 'From Server :', repr(data)
sock.close()
threading._start_new_thread(sendingMsg, ())
threading._start_new_thread(gettingMsg, ())
while True:
pass
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.1 on 2020-10-30 15:53
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grant_applications', '0009_auto_20201030_1209'),
]
operations = [
migrations.AddField(
model_name='grantapplication',
name='export_experience_description',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='grantapplication',
name='export_regions',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(choices=[('africa', 'Africa'), ('asia', 'Asia'), ('australasia', 'Australasia'), ('europe', 'Europe'), ('middle east', 'Middle East'), ('north america', 'North America'), ('south america', 'South America')], max_length=50), null=True, size=None),
),
migrations.AddField(
model_name='grantapplication',
name='export_strategy',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='grantapplication',
name='has_exported_in_last_12_months',
field=models.BooleanField(null=True),
),
migrations.AddField(
model_name='grantapplication',
name='in_contact_with_dit_trade_advisor',
field=models.BooleanField(null=True),
),
migrations.AddField(
model_name='grantapplication',
name='markets_intending_on_exporting_to',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(choices=[('existing', 'existing markets'), ('new', 'new markets not exported to in the last 12 months')], max_length=10), null=True, size=None),
),
]
|
nilq/baby-python
|
python
|
# select CALOL1_KEY from CMS_TRG_L1_CONF.L1_TRG_CONF_KEYS where ID='collisions2016_TSC/v206' ;
import re
import os, sys, shutil
import subprocess
import six
"""
A simple helper script that provided with no arguments dumps a list of
top-level keys, and provided with any key from this list as an argument,
dumps a list of sub-keys and saves corresponding configuration to local
files.
"""
# connection string
sqlplusCmd = ['env',
'sqlplus',
'-S',
'cms_trg_r/X3lmdvu4@cms_omds_adg'
]
if hash( sqlplusCmd[-1] ) != 1687624727082866629:
print 'Do not forget to plug password to this script'
print 'Exiting.'
exit(0)
myre = re.compile(r'(ID)|(-{80})')
# if no arguments are given, query the top level keys only and exit
if len(sys.argv) == 1:
sqlplus = subprocess.Popen(sqlplusCmd, shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
print 'No args specified, querying and printing only top-level keys:'
for line in re.split('\n',sqlplus.communicate('select unique ID from CMS_TRG_L1_CONF.CALOL2_KEYS;')[0]):
if myre.search(line) == None :
print line
print 'Pick any of these keys as an argument next time you run this script'
exit(0)
# if an argument is given query the whole content of the key
key = sys.argv[1]
sqlplus = subprocess.Popen(sqlplusCmd,
shell=False,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE
)
queryKey = "select CALOL1_KEY from CMS_TRG_L1_CONF.L1_TRG_CONF_KEYS where ID='{0}'".format(key)
for line in re.split('\n',sqlplus.communicate(queryKey+';')[0]):
print line
if re.search('/v',line) :
key=line
print key
queryKeys = """
select
HW, ALGO, INFRA
from
CMS_TRG_L1_CONF.CALOL1_KEYS
where
ID = '{0}'
""".format(key)
# write results for specific configs to the following files
batch = {
'HW' : 'hw.xml',
'ALGO' : 'algo.xml',
'INFRA' : 'infra.xml'
}
# do the main job here
for config,fileName in six.iteritems(batch):
sqlplus = subprocess.Popen(sqlplusCmd, shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
with open(fileName,'w') as f:
query = """
select
CONF.CONF
from
CMS_TRG_L1_CONF.CALOL1_CLOBS CONF, ({0}) KEY
where
CONF.ID = KEY.{1}
""".format(queryKeys, config)
for line in re.split('\n',sqlplus.communicate('\n'.join(['set linesize 200', 'set longchunksize 2000000 long 2000000 pages 0',query+';']))[0]):
f.write('\n')
f.write(line)
f.close()
sqlplus = subprocess.Popen(sqlplusCmd, shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
print 'Following keys were found:'
for line in re.split('\n',sqlplus.communicate(queryKeys+';')[0]):
print line
print 'Results are saved in ' + ' '.join(batch.values()) + ' files'
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-11-23 10:18
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wdapp', '0011_auto_20181123_0955'),
]
operations = [
migrations.RemoveField(
model_name='business',
name='slug',
),
migrations.RemoveField(
model_name='company',
name='slug',
),
migrations.RemoveField(
model_name='trip',
name='slug',
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
配置日志信息,并添加 request_id
:create: 2018/9/23
:copyright: smileboywtu
"""
import datetime
import logging
import sys
import uuid
from logging.handlers import TimedRotatingFileHandler
from tornado import gen
from tornado.log import access_log
from tornado.stack_context import run_with_stack_context, StackContext
class RequestIDContext:
class Data:
def __init__(self, request_id=0):
self.request_id = request_id
def __eq__(self, other):
return self.request_id == other.request_id
_data = Data()
def __init__(self, request_id):
self.current_data = RequestIDContext.Data(request_id=request_id)
self.old_data = None
def __enter__(self):
if RequestIDContext._data == self.current_data:
return
self.old_context_data = RequestIDContext.Data(
request_id=RequestIDContext._data.request_id,
)
RequestIDContext._data = self.current_data
def __exit__(self, exc_type, exc_value, traceback):
if self.old_data is not None:
RequestIDContext._data = self.old_data
def with_request_id(func):
@gen.coroutine
def _wrapper(*args, **kwargs):
request_id = uuid.uuid4().hex
yield run_with_stack_context(StackContext(lambda: RequestIDContext(request_id)), lambda: func(*args, **kwargs))
return _wrapper
def log_function(handler):
"""
log function to log access request information
regex parse: (?<remote_ip>[\d.]+) [-\w]+ [-\w]+ \[(?<request_date>[\d\/:\s\+]+)\] \"
(?<http_method>[A-Z]+) (?<http_uri>[\/a-zA-Z\.]+) (?<http_version>[A-Z\/\d\.]+)\"
(?<status_code>[\d]+) (?<length>[\d]+)
(?<request_time>[\d\.]+) (?<request_id>[\d\w]+) [\w\-]+ \[(?<request_body>.+)\] -
:param handler:
:return:
"""
_log_meta = dict(
app_id="app-up",
user="-",
username="-",
response_code="-",
http_uri=handler.request.uri,
http_status=handler.get_status(),
http_method=handler.request.method,
http_version=handler.request.version,
remote_ip=handler.request.remote_ip,
request_time=1000.0 * handler.request.request_time(),
request_id=RequestIDContext._data.request_id,
response_length=handler.request.headers.get("Content-Length", 0),
request_args=handler.request.arguments,
request_date=datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=8))).strftime("%x:%H:%M:%S %z")
)
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
log_method("%(remote_ip)s %(user)s %(username)s [%(request_date)s] \"%"
"(http_method)s %(http_uri)s %(http_version)s\" %(http_status)s "
"%(response_length)s %(request_time).2f %(request_id)s %(app_id)s [%(request_args)s] -", _log_meta)
class RequestIDFilter(logging.Filter):
def filter(self, record):
record.request_id = RequestIDContext._data.request_id
return True
def logger_config(name, path, level, log_format, rotate_interval, backup_count,
debug=False):
"""
配置 log handler 对象
:param name: 日志名称
:param path: 日志文件路径
:param level: 日志等级
:param log_format: 日志格式
:param max_bytes: 日志文件最大大小
:param backup_count: 日志文件滚动个数
:return:
"""
logger = logging.getLogger(name)
logger.addFilter(RequestIDFilter())
handler = TimedRotatingFileHandler(
path, when='D', interval=rotate_interval, backupCount=backup_count,
encoding="utf-8") \
if not debug else \
logging.StreamHandler(sys.stdout)
# handler = RotatingFileHandler(path, "a", maxBytes=max_bytes, backupCount=backup_count, encoding="utf-8") \
# if not debug else \
# logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
log_level = getattr(logging, level)
logger.setLevel(log_level)
logger.addHandler(handler)
def configure_tornado_logger(path, interval, backup_count,
level="INFO",
name="tornado.application",
debug=False):
"""
## read doc:
https://docs.python.org/3/library/logging.html#logrecord-attributes
tornado web application log_format:
%(asctime)s %(levelname)s %(request_id)-%(process)d %(filename)s:%(lineno)d -- %(message)s
:param path: log file path
:param level: log level
:param name: log name
:param debug: if debug, show logs on stdout
:return:
"""
if name == "tornado.access":
log_format = "[%(name)s] %(message)s"
elif name == "plugins":
log_format = "[%(name)s] %(asctime)s %(levelname)s -- %(message)s"
else:
log_format = "[%(name)s] %(asctime)s %(levelname)s %(request_id)s %(filename)s:%(lineno)d -- %(message)s"
return logger_config(
name=name,
path=path,
level=level,
log_format=log_format,
# max_bytes=100 * 1024 * 1024,
rotate_interval=interval,
backup_count=backup_count,
debug=debug
)
|
nilq/baby-python
|
python
|
# coding: utf-8
# In[87]:
#基于分词的文本相似度的计算,
#利用jieba分词进行中文分析
import jieba
import jieba.posseg as pseg
from jieba import analyse
import numpy as np
import os
'''
文本相似度的计算,基于几种常见的算法的实现
'''
class TextSimilarity(object):
def __init__(self,file_a,file_b):
'''
初始化类行
'''
str_a = ''
str_b = ''
if not os.path.isfile(file_a):
print(file_a,"is not file")
return
elif not os.path.isfile(file_b):
print(file_b,"is not file")
return
else:
with open(file_a,'r') as f:
for line in f.readlines():
str_a += line.strip()
f.close()
with open(file_b,'r') as f:
for line in f.readlines():
str_b += line.strip()
f.close()
self.str_a = str_a
self.str_b = str_b
#get LCS(longest common subsquence),DP
def lcs(self,str_a, str_b):
lensum = float(len(str_a) + len(str_b))
#得到一个二维的数组,类似用dp[lena+1][lenb+1],并且初始化为0
lengths = [[0 for j in range(len(str_b)+1)] for i in range(len(str_a)+1)]
#enumerate(a)函数: 得到下标i和a[i]
for i, x in enumerate(str_a):
for j, y in enumerate(str_b):
if x == y:
lengths[i+1][j+1] = lengths[i][j] + 1
else:
lengths[i+1][j+1] = max(lengths[i+1][j], lengths[i][j+1])
#到这里已经得到最长的子序列的长度,下面从这个矩阵中就是得到最长子序列
result = ""
x, y = len(str_a), len(str_b)
while x != 0 and y != 0:
#证明最后一个字符肯定没有用到
if lengths[x][y] == lengths[x-1][y]:
x -= 1
elif lengths[x][y] == lengths[x][y-1]:
y -= 1
else: #用到的从后向前的当前一个字符
assert str_a[x-1] == str_b[y-1] #后面语句为真,类似于if(a[x-1]==b[y-1]),执行后条件下的语句
result = str_a[x-1] + result #注意这一句,这是一个从后向前的过程
x -= 1
y -= 1
#和上面的代码类似
#if str_a[x-1] == str_b[y-1]:
# result = str_a[x-1] + result #注意这一句,这是一个从后向前的过程
# x -= 1
# y -= 1
longestdist = lengths[len(str_a)][len(str_b)]
ratio = longestdist/min(len(str_a),len(str_b))
#return {'longestdistance':longestdist, 'ratio':ratio, 'result':result}
return ratio
def minimumEditDistance(self,str_a,str_b):
'''
最小编辑距离,只有三种操作方式 替换、插入、删除
'''
lensum = float(len(str_a) + len(str_b))
if len(str_a) > len(str_b): #得到最短长度的字符串
str_a,str_b = str_b,str_a
distances = range(len(str_a) + 1) #设置默认值
for index2,char2 in enumerate(str_b): #str_b > str_a
newDistances = [index2+1] #设置新的距离,用来标记
for index1,char1 in enumerate(str_a):
if char1 == char2: #如果相等,证明在下标index1出不用进行操作变换,最小距离跟前一个保持不变,
newDistances.append(distances[index1])
else: #得到最小的变化数,
newDistances.append(1 + min((distances[index1], #删除
distances[index1+1], #插入
newDistances[-1]))) #变换
distances = newDistances #更新最小编辑距离
mindist = distances[-1]
ratio = (lensum - mindist)/lensum
#return {'distance':mindist, 'ratio':ratio}
return ratio
def levenshteinDistance(self,str1, str2):
'''
编辑距离——莱文斯坦距离,计算文本的相似度
'''
m = len(str1)
n = len(str2)
lensum = float(m + n)
d = []
for i in range(m+1):
d.append([i])
del d[0][0]
for j in range(n+1):
d[0].append(j)
for j in range(1,n+1):
for i in range(1,m+1):
if str1[i-1] == str2[j-1]:
d[i].insert(j,d[i-1][j-1])
else:
minimum = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+2)
d[i].insert(j, minimum)
ldist = d[-1][-1]
ratio = (lensum - ldist)/lensum
#return {'distance':ldist, 'ratio':ratio}
return ratio
@classmethod
def splitWords(self,str_a):
'''
接受一个字符串作为参数,返回分词后的结果字符串(空格隔开)和集合类型
'''
wordsa=pseg.cut(str_a)
cuta = ""
seta = set()
for key in wordsa:
#print(key.word,key.flag)
cuta += key.word + " "
seta.add(key.word)
return [cuta, seta]
def JaccardSim(self,str_a,str_b):
'''
Jaccard相似性系数
计算sa和sb的相似度 len(sa & sb)/ len(sa | sb)
'''
seta = self.splitWords(str_a)[1]
setb = self.splitWords(str_b)[1]
sa_sb = 1.0 * len(seta & setb) / len(seta | setb)
return sa_sb
def countIDF(self,text,topK):
'''
text:字符串,topK根据TF-IDF得到前topk个关键词的词频,用于计算相似度
return 词频vector
'''
tfidf = analyse.extract_tags
cipin = {} #统计分词后的词频
fenci = jieba.cut(text)
#记录每个词频的频率
for word in fenci:
if word not in cipin.keys():
cipin[word] = 0
cipin[word] += 1
# 基于tfidf算法抽取前10个关键词,包含每个词项的权重
keywords = tfidf(text,topK,withWeight=True)
ans = []
# keywords.count(keyword)得到keyword的词频
# help(tfidf)
# 输出抽取出的关键词
for keyword in keywords:
#print(keyword ," ",cipin[keyword[0]])
ans.append(cipin[keyword[0]]) #得到前topk频繁词项的词频
return ans
@staticmethod
def cos_sim(a,b):
a = np.array(a)
b = np.array(b)
#return {"文本的余弦相似度:":np.sum(a*b) / (np.sqrt(np.sum(a ** 2)) * np.sqrt(np.sum(b ** 2)))}
return np.sum(a*b) / (np.sqrt(np.sum(a ** 2)) * np.sqrt(np.sum(b ** 2)))
@staticmethod
def eucl_sim(a,b):
a = np.array(a)
b = np.array(b)
#print(a,b)
#print(np.sqrt((np.sum(a-b)**2)))
#return {"文本的欧几里德相似度:":1/(1+np.sqrt((np.sum(a-b)**2)))}
return 1/(1+np.sqrt((np.sum(a-b)**2)))
@staticmethod
def pers_sim(a,b):
a = np.array(a)
b = np.array(b)
a = a - np.average(a)
b = b - np.average(b)
#print(a,b)
#return {"文本的皮尔森相似度:":np.sum(a*b) / (np.sqrt(np.sum(a ** 2)) * np.sqrt(np.sum(b ** 2)))}
return np.sum(a*b) / (np.sqrt(np.sum(a ** 2)) * np.sqrt(np.sum(b ** 2)))
def splitWordSimlaryty(self,str_a,str_b,topK = 20,sim =cos_sim):
'''
基于分词求相似度,默认使用cos_sim 余弦相似度,默认使用前20个最频繁词项进行计算
'''
#得到前topK个最频繁词项的字频向量
vec_a = self.countIDF(str_a,topK)
vec_b = self.countIDF(str_b,topK)
return sim(vec_a,vec_b)
@staticmethod
def string_hash(self,source): #局部哈希算法的实现
if source == "":
return 0
else:
#ord()函数 return 字符的Unicode数值
x = ord(source[0]) << 7
m = 1000003 #设置一个大的素数
mask = 2 ** 128 - 1 #key值
for c in source: #对每一个字符基于前面计算hash
x = ((x * m) ^ ord(c)) & mask
x ^= len(source) #
if x == -1: #证明超过精度
x = -2
x = bin(x).replace('0b', '').zfill(64)[-64:]
#print(source,x)
return str(x)
def simhash(self,str_a,str_b):
'''
使用simhash计算相似度
'''
pass
|
nilq/baby-python
|
python
|
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output, State
popover = html.Div(
[
html.P(
["Click on the word ", html.Span("popover", id="popover-target")]
),
dbc.Popover(
[
dbc.PopoverHeader("Popover header"),
dbc.PopoverBody("Popover body"),
],
id="popover",
is_open=False,
target="popover-target",
),
]
)
@app.callback(
Output("popover", "is_open"),
[Input("popover-target", "n_clicks")],
[State("popover", "is_open")],
)
def toggle_popover(n, is_open):
if n:
return not is_open
return is_open
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file defines an end-to-end test that validates core functionality
# of the bundled CLI tool. This requires a GCP project in which the
# test will create, connect to, and delete Datalab instances.
import argparse
import os
import random
import socket
import subprocess
import sys
import tempfile
import time
import unittest
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import uuid
retry_count = 3
python_executable = sys.executable
connection_msg = (
'The connection to Datalab is now open and will '
'remain until this command is killed.')
readme_url_template = (
'http://localhost:{}/api/contents/datalab/docs/Readme.ipynb')
info_url_template = 'http://localhost:{}/_info'
readme_header = 'Guide to Google Cloud Datalab'
bastion_startup_template = """
# First, install fuser
apt-get update -yq && apt-get install -y psmisc
# Repeatedly try to run the SSH tunnel
while true; do
# Invoke gcloud in a separate process so we can check it
(gcloud compute ssh --zone {} --internal-ip \
--ssh-flag=-4 --ssh-flag=-N --ssh-flag=-L \
--ssh-flag=localhost:8080:localhost:8080 \
datalab@{}) &
gcloud_pid=$!
sleep 30
if [ -z "$(fuser -n tcp -4 8080)" ]; then
# The SSH tunnel never bound to the local port; kill it...
kill -9 "${{gcloud_pid}}"
fi
wait
done
"""
def generate_unique_id():
return uuid.uuid4().hex[0:12]
def call_gcloud(args):
return subprocess.check_output(['gcloud'] + args).decode('utf-8')
def free_port():
auto_socket = socket.socket()
auto_socket.bind(('localhost', 0))
port_number = auto_socket.getsockname()[1]
auto_socket.close()
return port_number
def random_zone():
zones_list = subprocess.check_output([
'gcloud', 'compute', 'zones', 'list',
'--filter=region~us-west', '--format=value(name)']).decode(
'utf-8')
zones = zones_list.split()
return random.choice(zones)
class DatalabInstance(object):
def __init__(self, test_run_id, project, zone, external_ip=True):
self.project = project
self.zone = zone
name_suffix = generate_unique_id()
self.network = "test-network-{0}-{1}".format(
test_run_id, name_suffix)
self.external_ip = external_ip
if self.external_ip:
self.name = "test-instance-{0}-{1}".format(
test_run_id, name_suffix)
else:
self.internal_name = "test-instance-{0}-{1}".format(
test_run_id, name_suffix)
self.name = "bastion-vm-{0}-{1}".format(
test_run_id, name_suffix)
def prepare_network_for_internal_ip(self):
region = call_gcloud(['compute', 'zones', 'describe',
'--format=value(region)', self.zone]).strip()
print('Using the region "{}"...'.format(region))
try:
print('Creating the network "{}"...'.format(self.network))
call_gcloud(['compute', 'networks', 'create', self.network])
self.subnet = call_gcloud([
'compute', 'networks', 'subnets', 'list',
'--filter=network~/{}$ region={}'.format(
self.network, region),
'--format=value(name)']).strip()
print('Updating the subnet "{}"...'.format(self.subnet))
call_gcloud(['compute', 'networks', 'subnets', 'update',
'--region', region, self.subnet,
'--enable-private-ip-google-access'])
except Exception:
delete_network_cmd = ['compute', 'networks', 'delete',
'--project', self.project,
'--quiet', self.network]
print('Deleting the network "{}" with the command "{}"'.format(
self.network, ' '.join(delete_network_cmd)))
call_gcloud(delete_network_cmd)
raise
def __enter__(self):
cmd = [python_executable, '-u', './tools/cli/datalab.py', '--quiet',
'--project', self.project,
'--zone', self.zone,
'--verbosity', 'debug',
'create', '--no-connect',
'--network-name', self.network]
if self.external_ip:
cmd.append(self.name)
else:
cmd.append('--beta-no-external-ip')
cmd.append(self.internal_name)
self.prepare_network_for_internal_ip()
print('Creating the instance "{}" with the command "{}"'.format(
self.name, ' '.join(cmd)))
subprocess.check_output(cmd)
print('Status of the instance: "{}"'.format(self.status()))
if not self.external_ip:
# Create a bastion VM that will forward to the real instance.
bastion_startup = bastion_startup_template.format(
self.zone, self.internal_name)
with tempfile.NamedTemporaryFile(mode='w', delete=False) \
as startup_script_file:
try:
startup_script_file.write(bastion_startup)
startup_script_file.close()
call_gcloud(['compute', 'instances', 'create',
'--zone', self.zone,
'--network', self.network,
'--subnet', self.subnet,
'--scopes=cloud-platform', '--tags=datalab',
'--metadata-from-file',
'startup-script='+startup_script_file.name,
self.name])
finally:
os.remove(startup_script_file.name)
return self
def __exit__(self, *unused_args, **unused_kwargs):
cmd = [python_executable, '-u', './tools/cli/datalab.py', '--quiet',
'--project', self.project,
'--zone', self.zone,
'delete', '--delete-disk']
if self.external_ip:
cmd.append(self.name)
else:
cmd.append(self.internal_name)
call_gcloud(['compute', 'instances', 'delete', '--zone', self.zone,
'--delete-disks=all', '--quiet', self.name])
print('Deleting the instance "{}" with the command "{}"'.format(
self.name, ' '.join(cmd)))
subprocess.check_output(cmd)
firewalls = call_gcloud([
'compute', 'firewall-rules', 'list',
'--filter=network='+self.network,
'--format=value(name)']).strip().split()
for firewall in firewalls:
delete_firewall_cmd = ['compute', 'firewall-rules', 'delete',
'--project', self.project,
'--quiet', firewall]
print('Deleting the firewall "{}" with the command "{}"'.format(
firewall, ' '.join(delete_firewall_cmd)))
call_gcloud(delete_firewall_cmd)
delete_network_cmd = ['compute', 'networks', 'delete',
'--project', self.project,
'--quiet', self.network]
print('Deleting the network "{}" with the command "{}"'.format(
self.network, ' '.join(delete_network_cmd)))
call_gcloud(delete_network_cmd)
def status(self):
cmd = [python_executable, '-u', './tools/cli/datalab.py', '--quiet',
'--project', self.project,
'--zone', self.zone,
'list', '--filter', "(name={})".format(self.name)]
return subprocess.check_output(cmd).decode('utf-8')
class DatalabConnection(object):
def __init__(self, project, zone, instance, stdout, max_attempts=10):
self.project = project
self.zone = zone
self.instance = instance
self.stdout = stdout
self.max_attempts = max_attempts
def __enter__(self):
self.port = free_port()
# Give a moment for the temporarily-acquired port to
# free up before trying to reuse it.
time.sleep(10)
cmd = [python_executable, '-u', './tools/cli/datalab.py', '--quiet',
'--project', self.project, '--zone', self.zone,
'connect', '--no-launch-browser',
'--port={}'.format(self.port),
self.instance]
self.process = subprocess.Popen(cmd, stdout=self.stdout)
attempts = 0
while attempts < self.max_attempts:
attempts += 1
with open(self.stdout.name, "r") as written_stdout:
if connection_msg in written_stdout.read():
self.readme_url = readme_url_template.format(self.port)
self.info_url = info_url_template.format(self.port)
return self
time.sleep(60)
return self
def __exit__(self, *unused_args, **unused_kwargs):
self.process.terminate()
self.process.communicate()
class TestEndToEnd(unittest.TestCase):
def setUp(self):
self.test_run_name = generate_unique_id()
self.project = call_gcloud(
['config', 'get-value', 'core/project']).strip()
self._zone = call_gcloud(
['config', 'get-value', 'compute/zone']).strip()
print('Testing with in the zone "{}" under the project {}'.format(
self.get_zone(), self.project))
def get_zone(self):
if self._zone == '':
return random_zone()
return self._zone
def call_datalab(self, subcommand, args):
cmd = [python_executable, '-u', './tools/cli/datalab.py', '--quiet',
'--project', self.project, subcommand] + args
print('Running datalab command "{}"'.format(' '.join(cmd)))
return subprocess.check_output(cmd).decode('utf-8')
def retry_test(self, test_method):
last_error = None
for _ in range(retry_count):
try:
test_method()
return
except Exception as ex:
last_error = ex
raise last_error
def test_create_delete(self):
self.retry_test(self.run_create_delete_test)
def run_create_delete_test(self):
instance_name = ""
instance_zone = self.get_zone()
with DatalabInstance(self.test_run_name,
self.project,
instance_zone) as instance:
instance_name = instance.name
self.assertIn('RUNNING', instance.status())
instances = self.call_datalab('list', [])
self.assertNotIn(instance_name, instances)
def test_connect(self):
self.retry_test(self.run_connection_test)
def run_connection_test(self):
instance_name = ""
instance_zone = self.get_zone()
with DatalabInstance(self.test_run_name,
self.project,
instance_zone) as instance:
instance_name = instance.name
self.assertIn('RUNNING', instance.status())
self.call_datalab('stop', ['--zone', instance_zone, instance.name])
self.assertIn('TERMINATED', instance.status())
with tempfile.NamedTemporaryFile() as tmp:
with DatalabConnection(self.project, instance_zone,
instance.name, tmp) as conn:
readme = urlopen(conn.readme_url)
readme_contents = readme.read().decode('utf-8')
print('README contents returned: "{}"'.format(
readme_contents))
self.assertIn(readme_header, readme_contents)
instances = self.call_datalab('list', [])
self.assertNotIn(instance_name, instances)
def test_internal_ip(self):
self.retry_test(self.run_internal_ip_test)
def run_internal_ip_test(self):
instance_name = ""
instance_zone = self.get_zone()
with DatalabInstance(self.test_run_name,
self.project,
instance_zone,
external_ip=False) as instance:
instance_name = instance.name
self.assertIn('RUNNING', instance.status())
with tempfile.NamedTemporaryFile() as tmp:
with DatalabConnection(self.project, instance_zone,
instance.name, tmp,
max_attempts=15) as conn:
# Private-IP instances cannot clone the sample notebooks,
# So we check the _info
info = urlopen(conn.info_url)
info_contents = info.read().decode('utf-8')
print('/_info contents returned: "{}"'.format(
info_contents))
self.assertIn('DATALAB_VERSION', info_contents)
instances = self.call_datalab('list', [])
self.assertNotIn(instance_name, instances)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--runs', type=int, default=1, choices=range(1, 100),
metavar='COUNT', dest='runs',
help='Number of times to run the test suite')
args = parser.parse_args()
failed_count, run_count = 0, 0
for _ in range(0, args.runs):
suite = unittest.TestLoader().loadTestsFromTestCase(TestEndToEnd)
result = unittest.TextTestRunner(buffer=True).run(suite)
run_count += 1
if not result.wasSuccessful():
failed_count += 1
print('Ran {} test runs with {} failing'.format(run_count, failed_count))
|
nilq/baby-python
|
python
|
##############################
# import Verif #
# var = Verif.class(object) #
# var.def() #
##############################
# this lib it's verification #
# maked by khalil preview #
##############################
import tkinter
from tkinter import *
from tkinter import messagebox
class sign_in(object):
def __init__(self , un , up ,un1 , up1) :
self.un = un
self.up = up
self.un1 = un1
self.up1 = up1
def sign_in_verif(self):
if self.un1 == self.un and self.up1 == self.up :
result = []
username = str(self.un1)
userpass = str(self.up1)
result.append(username)
result.append(userpass)
f = open(str(username + '.sfr'), 'w')
f.write(str(result))
f.close()
else :
messagebox.showinfo("Sign up Failed", "Usernam or Password wrong !!!")
|
nilq/baby-python
|
python
|
import attrs
import asyncio
import datetime
import os
import shutil
import pickle
from typing import Any, Optional, List
@attrs.define
class Cache:
name: str
data: Any
expired_after: int = attrs.field(default=10)
expiration: datetime.datetime = attrs.field(init=False)
@expiration.default
def _expiration(self):
return datetime.datetime.utcnow() + datetime.timedelta(
minutes=self.expired_after
)
def ensure_cachedir(cachedir: str):
if not os.path.isdir(cachedir):
os.makedirs(cachedir)
def get_cache_names(cachedir: str) -> List[str]:
ensure_cachedir(cachedir)
result = []
for cdir in os.listdir(cachedir):
if os.path.isfile(os.path.join(cachedir, cdir, "data")):
result.append(cdir)
return result
def has_cache(cachedir: str, name: str) -> bool:
ensure_cachedir(cachedir)
return name in get_cache_names(cachedir)
def store(cachedir: str, cache: Cache):
ensure_cachedir(cachedir)
if cache.name in get_cache_names(cachedir):
raise NameError(f"a cache with the name `{cache.name}` already stored.")
os.makedirs(os.path.join(cachedir, cache.name))
with open(os.path.join(cachedir, cache.name, "data"), "wb") as file:
pickle.dump(cache, file, protocol=pickle.HIGHEST_PROTOCOL)
def get(cachedir: str, name: str) -> Cache:
ensure_cachedir(cachedir)
for cdir in get_cache_names(cachedir):
if cdir == name:
with open(os.path.join(cachedir, cdir, "data"), "rb") as file:
return pickle.load(file)
def remove(cachedir, name: str):
ensure_cachedir(cachedir)
if has_cache(cachedir, name):
shutil.rmtree(os.path.join(cachedir, name))
else:
raise ValueError(f"cache with the name `{name}` not found.")
async def update_cachedir(cachedir: str):
while True:
for cdir in get_cache_names(cachedir):
cache = get(cachedir, cdir)
if cache:
if datetime.datetime.utcnow() >= cache.expiration:
remove(cachedir, cache.name)
await asyncio.sleep(0.1)
class MemCacheManager:
"""memory cache manager"""
def __init__(self):
self.caches: List[Cache] = []
def store(self, cache: Cache):
if cache.name in self.get_cache_names():
raise NameError(f"a cache with the name `{cache.name}` already stored.")
self.caches.append(cache)
def has_cache(self, name: str) -> bool:
return name in self.get_cache_names()
def get_cache_names(self) -> List[str]:
return [cache.name for cache in self.caches]
def get(self, name: str) -> Cache:
for cache in self.caches:
if cache.name == name:
return cache
def remove(self, name: str):
cache = self.get(name)
if cache:
self.caches.remove(cache)
else:
raise ValueError(f"cache with the name `{name}` not found.")
async def update(self):
"""check for expired caches"""
while True:
for index, cache in enumerate(self.caches):
if datetime.datetime.utcnow() >= cache.expiration:
self.caches.remove(cache)
await asyncio.sleep(0.1)
|
nilq/baby-python
|
python
|
#Done by Carlos Amaral in 18/06/2020
"""
Imagine an alien was just shot down in a game. Create a
variable called alien_color and assign it a value of 'green' , 'yellow' , or 'red' .
• Write an if statement to test whether the alien’s color is green. If it is, print
a message that the player just earned 5 points.
• Write one version of this program that passes the if test and another that
fails. (The version that fails will have no output.)
"""
#Alien Colors 1
alien_color = 'green'
if alien_color == 'green':
print("Congratulations. You've just earned 5 points!")
print("\n")
#Fail version
alien_color = 'yellow'
if alien_color == 'green':
print("Congratulations. You've just earned 5 points!")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: epl/protobuf/v1/query.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='epl/protobuf/v1/query.proto',
package='epl.protobuf.v1',
syntax='proto3',
serialized_options=b'\n\023com.epl.protobuf.v1B\nQueryProtoP\001Z.github.com/geo-grpc/api/golang/epl/protobuf/v1\242\002\003QPB\252\002\023com.epl.protobuf.v1',
serialized_pb=b'\n\x1b\x65pl/protobuf/v1/query.proto\x12\x0f\x65pl.protobuf.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\xc0\x01\n\x0b\x46loatFilter\x12\x35\n\x08rel_type\x18\x02 \x01(\x0e\x32#.epl.protobuf.v1.FilterRelationship\x12\x0f\n\x05value\x18\x01 \x01(\x02H\x00\x12\x0f\n\x05start\x18\x03 \x01(\x02H\x00\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x02\x12\x36\n\x0esort_direction\x18\x05 \x01(\x0e\x32\x1e.epl.protobuf.v1.SortDirection\x12\x0b\n\x03set\x18\x06 \x03(\x02\x42\x06\n\x04\x64\x61ta\"\xc1\x01\n\x0c\x44oubleFilter\x12\x35\n\x08rel_type\x18\x02 \x01(\x0e\x32#.epl.protobuf.v1.FilterRelationship\x12\x0f\n\x05value\x18\x01 \x01(\x01H\x00\x12\x0f\n\x05start\x18\x03 \x01(\x01H\x00\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x01\x12\x36\n\x0esort_direction\x18\x05 \x01(\x0e\x32\x1e.epl.protobuf.v1.SortDirection\x12\x0b\n\x03set\x18\x06 \x03(\x01\x42\x06\n\x04\x64\x61ta\"\x8b\x02\n\x0fTimestampFilter\x12\x35\n\x08rel_type\x18\x02 \x01(\x0e\x32#.epl.protobuf.v1.FilterRelationship\x12+\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12+\n\x05start\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\'\n\x03\x65nd\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x0esort_direction\x18\x05 \x01(\x0e\x32\x1e.epl.protobuf.v1.SortDirectionB\x06\n\x04\x64\x61ta\"\xc1\x01\n\x0cUInt32Filter\x12\x35\n\x08rel_type\x18\x02 \x01(\x0e\x32#.epl.protobuf.v1.FilterRelationship\x12\x0f\n\x05value\x18\x01 \x01(\rH\x00\x12\x0f\n\x05start\x18\x03 \x01(\rH\x00\x12\x0b\n\x03\x65nd\x18\x04 \x01(\r\x12\x36\n\x0esort_direction\x18\x05 \x01(\x0e\x32\x1e.epl.protobuf.v1.SortDirection\x12\x0b\n\x03set\x18\x06 \x03(\rB\x06\n\x04\x64\x61ta\"a\n\x0cStringFilter\x12\r\n\x05value\x18\x01 \x01(\t\x12\x35\n\x08rel_type\x18\x02 \x01(\x0e\x32#.epl.protobuf.v1.FilterRelationship\x12\x0b\n\x03set\x18\x06 \x03(\t*2\n\rSortDirection\x12\x0e\n\nNOT_SORTED\x10\x00\x12\x08\n\x04\x44\x45SC\x10\x01\x12\x07\n\x03\x41SC\x10\x02*\x96\x01\n\x12\x46ilterRelationship\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03LTE\x10\x02\x12\x07\n\x03GTE\x10\x04\x12\x06\n\x02LT\x10\x08\x12\x06\n\x02GT\x10\x10\x12\x0b\n\x07\x42\x45TWEEN\x10 \x12\x0f\n\x0bNOT_BETWEEN\x10@\x12\x08\n\x03NEQ\x10\x80\x01\x12\x07\n\x02IN\x10\x80\x02\x12\x0b\n\x06NOT_IN\x10\x80\x04\x12\t\n\x04LIKE\x10\x80\x08\x12\r\n\x08NOT_LIKE\x10\x80\x10\x42o\n\x13\x63om.epl.protobuf.v1B\nQueryProtoP\x01Z.github.com/geo-grpc/api/golang/epl/protobuf/v1\xa2\x02\x03QPB\xaa\x02\x13\x63om.epl.protobuf.v1b\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_SORTDIRECTION = _descriptor.EnumDescriptor(
name='SortDirection',
full_name='epl.protobuf.v1.SortDirection',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NOT_SORTED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DESC', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ASC', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1037,
serialized_end=1087,
)
_sym_db.RegisterEnumDescriptor(_SORTDIRECTION)
SortDirection = enum_type_wrapper.EnumTypeWrapper(_SORTDIRECTION)
_FILTERRELATIONSHIP = _descriptor.EnumDescriptor(
name='FilterRelationship',
full_name='epl.protobuf.v1.FilterRelationship',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='EQ', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LTE', index=1, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GTE', index=2, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LT', index=3, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GT', index=4, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BETWEEN', index=5, number=32,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_BETWEEN', index=6, number=64,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NEQ', index=7, number=128,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IN', index=8, number=256,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_IN', index=9, number=512,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LIKE', index=10, number=1024,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_LIKE', index=11, number=2048,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1090,
serialized_end=1240,
)
_sym_db.RegisterEnumDescriptor(_FILTERRELATIONSHIP)
FilterRelationship = enum_type_wrapper.EnumTypeWrapper(_FILTERRELATIONSHIP)
NOT_SORTED = 0
DESC = 1
ASC = 2
EQ = 0
LTE = 2
GTE = 4
LT = 8
GT = 16
BETWEEN = 32
NOT_BETWEEN = 64
NEQ = 128
IN = 256
NOT_IN = 512
LIKE = 1024
NOT_LIKE = 2048
_FLOATFILTER = _descriptor.Descriptor(
name='FloatFilter',
full_name='epl.protobuf.v1.FloatFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rel_type', full_name='epl.protobuf.v1.FloatFilter.rel_type', index=0,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='epl.protobuf.v1.FloatFilter.value', index=1,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='epl.protobuf.v1.FloatFilter.start', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='epl.protobuf.v1.FloatFilter.end', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort_direction', full_name='epl.protobuf.v1.FloatFilter.sort_direction', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='set', full_name='epl.protobuf.v1.FloatFilter.set', index=5,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='data', full_name='epl.protobuf.v1.FloatFilter.data',
index=0, containing_type=None, fields=[]),
],
serialized_start=82,
serialized_end=274,
)
_DOUBLEFILTER = _descriptor.Descriptor(
name='DoubleFilter',
full_name='epl.protobuf.v1.DoubleFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rel_type', full_name='epl.protobuf.v1.DoubleFilter.rel_type', index=0,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='epl.protobuf.v1.DoubleFilter.value', index=1,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='epl.protobuf.v1.DoubleFilter.start', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='epl.protobuf.v1.DoubleFilter.end', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort_direction', full_name='epl.protobuf.v1.DoubleFilter.sort_direction', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='set', full_name='epl.protobuf.v1.DoubleFilter.set', index=5,
number=6, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='data', full_name='epl.protobuf.v1.DoubleFilter.data',
index=0, containing_type=None, fields=[]),
],
serialized_start=277,
serialized_end=470,
)
_TIMESTAMPFILTER = _descriptor.Descriptor(
name='TimestampFilter',
full_name='epl.protobuf.v1.TimestampFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rel_type', full_name='epl.protobuf.v1.TimestampFilter.rel_type', index=0,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='epl.protobuf.v1.TimestampFilter.value', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='epl.protobuf.v1.TimestampFilter.start', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='epl.protobuf.v1.TimestampFilter.end', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort_direction', full_name='epl.protobuf.v1.TimestampFilter.sort_direction', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='data', full_name='epl.protobuf.v1.TimestampFilter.data',
index=0, containing_type=None, fields=[]),
],
serialized_start=473,
serialized_end=740,
)
_UINT32FILTER = _descriptor.Descriptor(
name='UInt32Filter',
full_name='epl.protobuf.v1.UInt32Filter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rel_type', full_name='epl.protobuf.v1.UInt32Filter.rel_type', index=0,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='epl.protobuf.v1.UInt32Filter.value', index=1,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='epl.protobuf.v1.UInt32Filter.start', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='epl.protobuf.v1.UInt32Filter.end', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort_direction', full_name='epl.protobuf.v1.UInt32Filter.sort_direction', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='set', full_name='epl.protobuf.v1.UInt32Filter.set', index=5,
number=6, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='data', full_name='epl.protobuf.v1.UInt32Filter.data',
index=0, containing_type=None, fields=[]),
],
serialized_start=743,
serialized_end=936,
)
_STRINGFILTER = _descriptor.Descriptor(
name='StringFilter',
full_name='epl.protobuf.v1.StringFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='epl.protobuf.v1.StringFilter.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rel_type', full_name='epl.protobuf.v1.StringFilter.rel_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='set', full_name='epl.protobuf.v1.StringFilter.set', index=2,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=938,
serialized_end=1035,
)
_FLOATFILTER.fields_by_name['rel_type'].enum_type = _FILTERRELATIONSHIP
_FLOATFILTER.fields_by_name['sort_direction'].enum_type = _SORTDIRECTION
_FLOATFILTER.oneofs_by_name['data'].fields.append(
_FLOATFILTER.fields_by_name['value'])
_FLOATFILTER.fields_by_name['value'].containing_oneof = _FLOATFILTER.oneofs_by_name['data']
_FLOATFILTER.oneofs_by_name['data'].fields.append(
_FLOATFILTER.fields_by_name['start'])
_FLOATFILTER.fields_by_name['start'].containing_oneof = _FLOATFILTER.oneofs_by_name['data']
_DOUBLEFILTER.fields_by_name['rel_type'].enum_type = _FILTERRELATIONSHIP
_DOUBLEFILTER.fields_by_name['sort_direction'].enum_type = _SORTDIRECTION
_DOUBLEFILTER.oneofs_by_name['data'].fields.append(
_DOUBLEFILTER.fields_by_name['value'])
_DOUBLEFILTER.fields_by_name['value'].containing_oneof = _DOUBLEFILTER.oneofs_by_name['data']
_DOUBLEFILTER.oneofs_by_name['data'].fields.append(
_DOUBLEFILTER.fields_by_name['start'])
_DOUBLEFILTER.fields_by_name['start'].containing_oneof = _DOUBLEFILTER.oneofs_by_name['data']
_TIMESTAMPFILTER.fields_by_name['rel_type'].enum_type = _FILTERRELATIONSHIP
_TIMESTAMPFILTER.fields_by_name['value'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TIMESTAMPFILTER.fields_by_name['start'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TIMESTAMPFILTER.fields_by_name['end'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TIMESTAMPFILTER.fields_by_name['sort_direction'].enum_type = _SORTDIRECTION
_TIMESTAMPFILTER.oneofs_by_name['data'].fields.append(
_TIMESTAMPFILTER.fields_by_name['value'])
_TIMESTAMPFILTER.fields_by_name['value'].containing_oneof = _TIMESTAMPFILTER.oneofs_by_name['data']
_TIMESTAMPFILTER.oneofs_by_name['data'].fields.append(
_TIMESTAMPFILTER.fields_by_name['start'])
_TIMESTAMPFILTER.fields_by_name['start'].containing_oneof = _TIMESTAMPFILTER.oneofs_by_name['data']
_UINT32FILTER.fields_by_name['rel_type'].enum_type = _FILTERRELATIONSHIP
_UINT32FILTER.fields_by_name['sort_direction'].enum_type = _SORTDIRECTION
_UINT32FILTER.oneofs_by_name['data'].fields.append(
_UINT32FILTER.fields_by_name['value'])
_UINT32FILTER.fields_by_name['value'].containing_oneof = _UINT32FILTER.oneofs_by_name['data']
_UINT32FILTER.oneofs_by_name['data'].fields.append(
_UINT32FILTER.fields_by_name['start'])
_UINT32FILTER.fields_by_name['start'].containing_oneof = _UINT32FILTER.oneofs_by_name['data']
_STRINGFILTER.fields_by_name['rel_type'].enum_type = _FILTERRELATIONSHIP
DESCRIPTOR.message_types_by_name['FloatFilter'] = _FLOATFILTER
DESCRIPTOR.message_types_by_name['DoubleFilter'] = _DOUBLEFILTER
DESCRIPTOR.message_types_by_name['TimestampFilter'] = _TIMESTAMPFILTER
DESCRIPTOR.message_types_by_name['UInt32Filter'] = _UINT32FILTER
DESCRIPTOR.message_types_by_name['StringFilter'] = _STRINGFILTER
DESCRIPTOR.enum_types_by_name['SortDirection'] = _SORTDIRECTION
DESCRIPTOR.enum_types_by_name['FilterRelationship'] = _FILTERRELATIONSHIP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FloatFilter = _reflection.GeneratedProtocolMessageType('FloatFilter', (_message.Message,), {
'DESCRIPTOR' : _FLOATFILTER,
'__module__' : 'epl.protobuf.v1.query_pb2'
# @@protoc_insertion_point(class_scope:epl.protobuf.v1.FloatFilter)
})
_sym_db.RegisterMessage(FloatFilter)
DoubleFilter = _reflection.GeneratedProtocolMessageType('DoubleFilter', (_message.Message,), {
'DESCRIPTOR' : _DOUBLEFILTER,
'__module__' : 'epl.protobuf.v1.query_pb2'
# @@protoc_insertion_point(class_scope:epl.protobuf.v1.DoubleFilter)
})
_sym_db.RegisterMessage(DoubleFilter)
TimestampFilter = _reflection.GeneratedProtocolMessageType('TimestampFilter', (_message.Message,), {
'DESCRIPTOR' : _TIMESTAMPFILTER,
'__module__' : 'epl.protobuf.v1.query_pb2'
# @@protoc_insertion_point(class_scope:epl.protobuf.v1.TimestampFilter)
})
_sym_db.RegisterMessage(TimestampFilter)
UInt32Filter = _reflection.GeneratedProtocolMessageType('UInt32Filter', (_message.Message,), {
'DESCRIPTOR' : _UINT32FILTER,
'__module__' : 'epl.protobuf.v1.query_pb2'
# @@protoc_insertion_point(class_scope:epl.protobuf.v1.UInt32Filter)
})
_sym_db.RegisterMessage(UInt32Filter)
StringFilter = _reflection.GeneratedProtocolMessageType('StringFilter', (_message.Message,), {
'DESCRIPTOR' : _STRINGFILTER,
'__module__' : 'epl.protobuf.v1.query_pb2'
# @@protoc_insertion_point(class_scope:epl.protobuf.v1.StringFilter)
})
_sym_db.RegisterMessage(StringFilter)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
from qqai.classes import *
class TextTranslateAILab(QQAIClass):
"""文本翻译(AI Lab)"""
api = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_texttrans'
def make_params(self, text, translate_type=0):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'type': translate_type,
'text': text,
}
params['sign'] = self.get_sign(params)
return params
def run(self, text, translate_type=0):
params = self.make_params(text, translate_type)
response = self.call_api(params)
result = json.loads(response.text)
return result
class TextTranslateFanyi(QQAIClass):
"""文本翻译(翻译君)"""
api = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_texttranslate'
def make_params(self, text, source='auto', target='auto'):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'text': text,
'source': source,
'target': target,
}
params['sign'] = self.get_sign(params)
return params
def run(self, text, source='auto', target='auto'):
params = self.make_params(text, source, target)
response = self.call_api(params)
result = json.loads(response.text)
return result
class ImageTranslate(QQAIClass):
"""图片翻译"""
api = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_imagetranslate'
def make_params(self, image_param, scene, source='auto', target='auto'):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'image': self.get_base64(image_param),
'session': int(time.time()),
'scene': scene,
'source': source,
'target': target,
}
params['sign'] = self.get_sign(params)
return params
def run(self, image_param, scene, source='auto', target='auto'):
params = self.make_params(image_param, scene, source, target)
response = self.call_api(params)
result = json.loads(response.text)
return result
class TextDetect(QQAIClass):
"""语种识别"""
api = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_textdetect'
def make_params(self, text, candidate_langs=None, force=0):
"""获取调用接口的参数"""
if candidate_langs is None:
candidate_langs = ['zh', 'en', 'jp', 'kr']
if type(candidate_langs) == str:
candidate_langs_param = candidate_langs
else:
candidate_langs_param = '|'.join(candidate_langs)
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'text': text,
'candidate_langs': candidate_langs_param,
'force': force
}
params['sign'] = self.get_sign(params)
return params
def run(self, text, candidate_langs=None, force=0):
params = self.make_params(text, candidate_langs, force)
response = self.call_api(params)
result = json.loads(response.text)
return result
|
nilq/baby-python
|
python
|
"""
Enumeración de estado del resultado de una partida de PPT.
"""
from enum import Enum
class Condicion(Enum):
"""
Posibles estados del resultado de la partida.
"""
VICTORIA = 0
DERROTA = 1
EMPATE = 2
|
nilq/baby-python
|
python
|
import csv
import random
def load_lorem_sentences():
with open('lorem.txt') as fh:
return [l.strip() for l in fh.readlines()]
def load_dictionary():
with open('dictionary.csv') as csv_file:
return [l for l in csv.DictReader(csv_file, delimiter=',')]
SUFFIXES = ['at', 'it', 'is', 'us', 'et', 'um']
LOREM_SENTENCES = load_lorem_sentences()
EXPRESSIONS = load_dictionary()
def get_expression():
expression = random.choice(EXPRESSIONS)
foo = expression['stem'] if len(expression['stem']) > 0 else expression['expression']
if len(expression['alternatives']) > 0:
suffix = random.choice(expression['alternatives'].split())
else:
suffix = random.choice(SUFFIXES)
return foo + suffix
def get_sentence():
sentence = random.choice(LOREM_SENTENCES).split()
n = len(sentence) // 5 + 1
expressions = [get_expression() for _ in range(n)]
for i, expr in zip(random.sample(range(len(sentence)), n), expressions):
sentence[i] = expr
return ' '.join(sentence).strip(' .').capitalize() + '.'
if __name__ == '__main__':
print(get_sentence())
|
nilq/baby-python
|
python
|
import pickle
import random
import h5py
import numpy as np
import pandas as pd
class Generator():
""" Data generator to the neural image captioning model (NIC).
The flow method outputs a list of two dictionaries containing
the inputs and outputs to the network.
# Arguments:
data_path = data_path to the preprocessed data computed by the
Preprocessor class.
"""
def __init__(self,data_path='preprocessed_data/',
training_filename=None,
validation_filename=None,
image_features_filename=None,
batch_size=100):
self.data_path = data_path
if training_filename == None:
self.training_filename = data_path + 'training_data.txt'
else:
self.training_filename = self.data_path + training_filename
if validation_filename == None:
self.validation_filename = data_path + 'validation_data.txt'
else:
self.validation_filename = self.data_path + validation_filename
if image_features_filename == None:
self.image_features_filename = (data_path +
'inception_image_name_to_features.h5')
else:
self.image_features_filename = self.data + image_features_filename
self.dictionary = None
self.training_dataset = None
self.validation_dataset = None
self.image_names_to_features = None
data_logs = np.genfromtxt(self.data_path + 'data_parameters.log',
delimiter=' ', dtype='str')
data_logs = dict(zip(data_logs[:, 0], data_logs[:, 1]))
self.MAX_TOKEN_LENGTH = int(data_logs['max_caption_length:']) + 2
self.IMG_FEATS = int(data_logs['IMG_FEATS:'])
self.BOS = str(data_logs['BOS:'])
self.EOS = str(data_logs['EOS:'])
self.PAD = str(data_logs['PAD:'])
self.VOCABULARY_SIZE = None
self.word_to_id = None
self.id_to_word = None
self.BATCH_SIZE = batch_size
self.load_dataset()
self.load_vocabulary()
self.load_image_features()
def load_vocabulary(self):
print('Loading vocabulary...')
word_to_id = pickle.load(open(self.data_path + 'word_to_id.p', 'rb'))
id_to_word = pickle.load(open(self.data_path + 'id_to_word.p', 'rb'))
self.VOCABULARY_SIZE = len(word_to_id)
self.word_to_id = word_to_id
self.id_to_word = id_to_word
def load_image_features(self):
self.image_names_to_features = h5py.File(
self.image_features_filename, 'r')
def load_dataset(self):
print('Loading training dataset...')
train_data = pd.read_table(self.training_filename, delimiter='*')
train_data = np.asarray(train_data,dtype=str)
self.training_dataset = train_data
print('Loading validation dataset...')
validation_dataset = pd.read_table(
self.validation_filename,delimiter='*')
validation_dataset = np.asarray(validation_dataset, dtype=str)
self.validation_dataset = validation_dataset
def return_dataset(self, path=None, dataset_name='all', mode='training'):
print('Loading dataset in memory...')
if path == None:
path = self.data_path
if mode == 'training':
data = pd.read_table(self.training_filename, sep='*')
elif mode == 'test':
data = pd.read_table(path + 'test_data.txt', sep='*')
if dataset_name != 'all':
data = data[data['image_names'].str.contains(dataset_name)]
data = np.asarray(data)
data_size = data.shape[0]
image_names = data[:, 0]
image_features = np.zeros((data_size,self.MAX_TOKEN_LENGTH,
self.IMG_FEATS))
image_captions = np.zeros((data_size,self.MAX_TOKEN_LENGTH,
self.VOCABULARY_SIZE))
target_captions = np.zeros((data_size,self.MAX_TOKEN_LENGTH,
self.VOCABULARY_SIZE))
for image_arg, image_name in enumerate(image_names):
caption = data[image_arg,1]
one_hot_caption = self.format_to_one_hot(caption)
image_captions[image_arg, :, :] = one_hot_caption
target_captions[image_arg, :, :] = self.get_one_hot_target(
one_hot_caption)
image_features[image_arg, :, :] = self.get_image_features(
image_name)
return image_features, image_captions, target_captions,image_names
def flow(self, mode):
if mode == 'train':
data = self.training_dataset
#random.shuffle(data) #this is probably correct but untested
if mode == 'validation':
data = self.validation_dataset
image_names = data[:,0].tolist()
empty_batch = self.make_empty_batch()
captions_batch = empty_batch[0]
images_batch = empty_batch[1]
targets_batch = empty_batch[2]
batch_counter = 0
while True:
for data_arg, image_name in enumerate(image_names):
caption = data[data_arg,1]
one_hot_caption = self.format_to_one_hot(caption)
captions_batch[batch_counter, :, :] = one_hot_caption
targets_batch[batch_counter, :, :] = self.get_one_hot_target(
one_hot_caption)
images_batch[batch_counter, :, :] = self.get_image_features(
image_name)
if batch_counter == self.BATCH_SIZE - 1:
yield_dictionary = self.wrap_in_dictionary(captions_batch,
images_batch,
targets_batch)
yield yield_dictionary
empty_batch = self.make_empty_batch()
captions_batch = empty_batch[0]
images_batch = empty_batch[1]
targets_batch = empty_batch[2]
batch_counter = 0
batch_counter = batch_counter + 1
def make_test_input(self,image_name=None):
if image_name == None:
image_name = random.choice(self.training_dataset[:, 0].tolist())
one_hot_caption = np.zeros((1, self.MAX_TOKEN_LENGTH,
self.VOCABULARY_SIZE))
begin_token_id = self.word_to_id[self.BOS]
one_hot_caption[0, 0, begin_token_id] = 1
image_features = np.zeros((1, self.MAX_TOKEN_LENGTH, self.IMG_FEATS))
image_features[0, :, :] = self.get_image_features(image_name)
return one_hot_caption, image_features, image_name
def make_empty_batch(self):
captions_batch = np.zeros((self.BATCH_SIZE,self.MAX_TOKEN_LENGTH,
self.VOCABULARY_SIZE))
images_batch = np.zeros((self.BATCH_SIZE, self.MAX_TOKEN_LENGTH,
self.IMG_FEATS))
targets_batch = np.zeros((self.BATCH_SIZE,self.MAX_TOKEN_LENGTH,
self.VOCABULARY_SIZE))
return captions_batch, images_batch , targets_batch
def format_to_one_hot(self,caption):
tokenized_caption = caption.split()
tokenized_caption = [self.BOS] + tokenized_caption + [self.EOS]
one_hot_caption = np.zeros((self.MAX_TOKEN_LENGTH,
self.VOCABULARY_SIZE))
word_ids = [self.word_to_id[word] for word in tokenized_caption
if word in self.word_to_id]
for sequence_arg, word_id in enumerate(word_ids):
one_hot_caption[sequence_arg,word_id] = 1
return one_hot_caption
def get_image_features(self, image_name):
image_features = self.image_names_to_features[image_name]\
['image_features'][:]
image_input = np.zeros((self.MAX_TOKEN_LENGTH, self.IMG_FEATS))
image_input[0,:] = image_features
return image_input
def get_one_hot_target(self,one_hot_caption):
one_hot_target = np.zeros_like(one_hot_caption)
one_hot_target[:-1, :] = one_hot_caption[1:, :]
return one_hot_target
def wrap_in_dictionary(self,one_hot_caption,
image_features,
one_hot_target):
return [{'text': one_hot_caption,
'image': image_features},
{'output': one_hot_target}]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# # -*- coding: utf-8 -*-
"""
@File: routes.py.py
@Author: Jim.Dai.Cn
@Date: 2020/9/22 上午11:26
@Desc:
"""
from app.company import blueprint
from flask import render_template, jsonify, current_app, request
@blueprint.route('/company', methods=['GET'])
def get_company_list():
clist = [
{"ID":1017,"USER_ID":117,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"江苏乐福德新材料技术有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102005,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":"91320206MA1MWACH6R","PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"吴迦迦","FIXED_TEL_ABCDEF":"","MOVE_TEL_ABCDEF":13814244466,"MAIL_ABCDEF":"","FAX_ABCDEF":"","ADDR_ABCDEF":"","REGIST_TIME_ABCE":"","REGIST_CAPITAL_AC":"","WORKERS_NO_AC":"","DEVELOP_NO_A":"","IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":"","INDUSTRY_A":"","NATURE_A":"","PROJ_A":"","IS_GAUGE":"","IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"36:43.7","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":0,"ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":"","TECHNOLOGY_FIELD":"","INVESTMENT_MONEY":"","DEV_MASTER_NUM":"","DEV_DOCTOR_NUM":"","INDEPENTDENT_LEGAL_PERSON":"","NATIONAL_ECONOMY_INDUSTRY":"","COMPANY_ATTRIBUTE":"","COMPANY_SCALE":"","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":"","COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":"","FINANCE_CONTACT":"","FINANCE_TEL":"","FINANCE_MOBEL":"","FINANCE_EMAIL":"","COMPANY_TYPE":"","IS_TECHNOLOGY":1,"REG_ADDRESS":""},
{"ID":1018,"USER_ID":118,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"无锡市易动智能装备有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102005,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":"91320206MA1W9HMH22","PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"邱林峰","FIXED_TEL_ABCDEF":"","MOVE_TEL_ABCDEF":13306199950,"MAIL_ABCDEF":"","FAX_ABCDEF":"","ADDR_ABCDEF":"无锡市惠山区长安街道畅惠路10","REGIST_TIME_ABCE":"","REGIST_CAPITAL_AC":"","WORKERS_NO_AC":"","DEVELOP_NO_A":"","IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":"","INDUSTRY_A":"","NATURE_A":"","PROJ_A":"","IS_GAUGE":"","IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"36:43.7","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":0,"ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":"","TECHNOLOGY_FIELD":"","INVESTMENT_MONEY":"","DEV_MASTER_NUM":"","DEV_DOCTOR_NUM":"","INDEPENTDENT_LEGAL_PERSON":"","NATIONAL_ECONOMY_INDUSTRY":"","COMPANY_ATTRIBUTE":"","COMPANY_SCALE":"","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":"","COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":"","FINANCE_CONTACT":"","FINANCE_TEL":"","FINANCE_MOBEL":"","FINANCE_EMAIL":"","COMPANY_TYPE":"","IS_TECHNOLOGY":1,"REG_ADDRESS":""},
{"ID":1020,"USER_ID":120,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"无锡达美新材料有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102006,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":"91320206MA1M97J91B","PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"郑巍","FIXED_TEL_ABCDEF":"","MOVE_TEL_ABCDEF":13951582299,"MAIL_ABCDEF":"","FAX_ABCDEF":"","ADDR_ABCDEF":"","REGIST_TIME_ABCE":"","REGIST_CAPITAL_AC":"","WORKERS_NO_AC":"","DEVELOP_NO_A":"","IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":"","INDUSTRY_A":"","NATURE_A":"","PROJ_A":"","IS_GAUGE":1,"IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"36:43.7","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":0,"ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":"","TECHNOLOGY_FIELD":"","INVESTMENT_MONEY":"","DEV_MASTER_NUM":"","DEV_DOCTOR_NUM":"","INDEPENTDENT_LEGAL_PERSON":"","NATIONAL_ECONOMY_INDUSTRY":"","COMPANY_ATTRIBUTE":"","COMPANY_SCALE":"","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":"","COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":"","FINANCE_CONTACT":"","FINANCE_TEL":"","FINANCE_MOBEL":"","FINANCE_EMAIL":"","COMPANY_TYPE":"","IS_TECHNOLOGY":1,"REG_ADDRESS":""},
{"ID":1021,"USER_ID":121,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"江苏韦兰德特种装备科技有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102006,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":913204000000000000,"PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"沈伟栋","FIXED_TEL_ABCDEF":"","MOVE_TEL_ABCDEF":18020301820,"MAIL_ABCDEF":"","FAX_ABCDEF":"","ADDR_ABCDEF":"无锡市惠山工业转型集聚区北惠路123号","REGIST_TIME_ABCE":"00:00.0","REGIST_CAPITAL_AC":7000,"WORKERS_NO_AC":65,"DEVELOP_NO_A":10,"IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":104,"INDUSTRY_A":41,"NATURE_A":"","PROJ_A":999,"IS_GAUGE":1,"IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"沈其明","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"19:46.8","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":0,"ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":1,"TECHNOLOGY_FIELD":807,"INVESTMENT_MONEY":0,"DEV_MASTER_NUM":0,"DEV_DOCTOR_NUM":0,"INDEPENTDENT_LEGAL_PERSON":1,"NATIONAL_ECONOMY_INDUSTRY":873,"COMPANY_ATTRIBUTE":"其他","COMPANY_SCALE":"中型","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":0,"COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":18020301818,"FINANCE_CONTACT":"","FINANCE_TEL":"","FINANCE_MOBEL":"","FINANCE_EMAIL":"","COMPANY_TYPE":"","IS_TECHNOLOGY":1,"REG_ADDRESS":""},
{"ID":1071,"USER_ID":171,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"无锡正则精准医学检验有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102004,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":"91320206MA1MCH2R4R","PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"杨丽华","FIXED_TEL_ABCDEF":"0510-85993951","MOVE_TEL_ABCDEF":13915279492,"MAIL_ABCDEF":"14445505@qq.com","FAX_ABCDEF":"","ADDR_ABCDEF":"无锡惠山经济开发区惠山大道1699号八号楼五层","REGIST_TIME_ABCE":"00:00.0","REGIST_CAPITAL_AC":2000,"WORKERS_NO_AC":42,"DEVELOP_NO_A":16,"IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"医学检验;生物技术的研发、技术咨询、技术服务、技术转让;医疗器械的租赁。(依法须经批准的项目,经相关部门批准后方可开展经营活动)。","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":104,"INDUSTRY_A":21,"NATURE_A":"","PROJ_A":999,"IS_GAUGE":0,"IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"盛青松","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"08:05.6","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":0,"ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":1,"TECHNOLOGY_FIELD":201,"INVESTMENT_MONEY":"","DEV_MASTER_NUM":10,"DEV_DOCTOR_NUM":2,"INDEPENTDENT_LEGAL_PERSON":1,"NATIONAL_ECONOMY_INDUSTRY":44,"COMPANY_ATTRIBUTE":"其他","COMPANY_SCALE":"小型","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":0,"COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":13706159105,"FINANCE_CONTACT":"蒋静","FINANCE_TEL":"","FINANCE_MOBEL":"0510-85993951","FINANCE_EMAIL":"","COMPANY_TYPE":"","IS_TECHNOLOGY":1,"REG_ADDRESS":""},
{"ID":1072,"USER_ID":172,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"无锡申联专用汽车有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102009,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":"91320206132603380D","PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"陆芸","FIXED_TEL_ABCDEF":66681359,"MOVE_TEL_ABCDEF":13812188070,"MAIL_ABCDEF":"luyun01@saicmotor.com","FAX_ABCDEF":"","ADDR_ABCDEF":"无锡市惠山区惠际路86号","REGIST_TIME_ABCE":"00:00.0","REGIST_CAPITAL_AC":6640,"WORKERS_NO_AC":142,"DEVELOP_NO_A":24,"IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"汽车零部件及配件的研发、制造,机械零部件加工,汽车及汽车零部件、配件、医疗器械的销售,汽车制造的技术咨询、技术服务,空调修理,自营和代理各类商品及技术的进出口业务(国家限定企业经营或禁止进出口的商品和技术除外)。(依法须经批准的项目,经相关部门批准后方可开展经营活动)","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":104,"INDUSTRY_A":"请选择...","NATURE_A":"","PROJ_A":999,"IS_GAUGE":1,"IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"蓝青松","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"蓝青松","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"38:06.4","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":0,"ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":1,"TECHNOLOGY_FIELD":"请选择...","INVESTMENT_MONEY":"","DEV_MASTER_NUM":5,"DEV_DOCTOR_NUM":"","INDEPENTDENT_LEGAL_PERSON":1,"NATIONAL_ECONOMY_INDUSTRY":36,"COMPANY_ATTRIBUTE":"","COMPANY_SCALE":"小型","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":0,"COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":18661097799,"FINANCE_CONTACT":"邱文华","FINANCE_TEL":66680152,"FINANCE_MOBEL":13921299955,"FINANCE_EMAIL":"qiuwenhua@saicmotor.com","COMPANY_TYPE":2,"IS_TECHNOLOGY":2,"REG_ADDRESS":"无锡市惠山区惠际路86号"},
{"ID":1077,"USER_ID":177,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"无锡新纺欧迪诺电梯有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102009,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":913202000000000000,"PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"王丹华","FIXED_TEL_ABCDEF":"","MOVE_TEL_ABCDEF":13861811885,"MAIL_ABCDEF":"","FAX_ABCDEF":"","ADDR_ABCDEF":"无锡惠山开发区堰新路580号","REGIST_TIME_ABCE":"00:00.0","REGIST_CAPITAL_AC":12800,"WORKERS_NO_AC":109,"DEVELOP_NO_A":30,"IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"电梯","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":"","INDUSTRY_A":"","NATURE_A":"","PROJ_A":"","IS_GAUGE":"","IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":"","ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":"","TECHNOLOGY_FIELD":"","INVESTMENT_MONEY":"","DEV_MASTER_NUM":"","DEV_DOCTOR_NUM":"","INDEPENTDENT_LEGAL_PERSON":"","NATIONAL_ECONOMY_INDUSTRY":"","COMPANY_ATTRIBUTE":"","COMPANY_SCALE":"","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":"","COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":"","FINANCE_CONTACT":"","FINANCE_TEL":"","FINANCE_MOBEL":"","FINANCE_EMAIL":"","COMPANY_TYPE":"","IS_TECHNOLOGY":"","REG_ADDRESS":""}
]
return jsonify(clist)
@blueprint.route('/company', methods=['POST'])
def add_company():
company = {}
if request.method == 'POST':
company["USER_NAME_ABCDEF"] = request.form.get("first-name")
company["middle_name"] = request.form.get("middle-name")
company["last_name"] = request.form.get("last-name")
company["gender"] = request.form.get("gender")
company["birthday"] = request.form.get("birthday")
current_app.mgConnection.db.user_info.insert_one(company)
return jsonify("success")
@blueprint.route('/companyDB', methods=['GET'])
def get_company_list_from_db():
conn = current_app.mgConnection.db.user_info.find({}, {'_id':0})
cList = []
for i in conn:
cList.append(i)
return jsonify(cList)
@blueprint.route('/course', methods=['GET'])
def get_course_from_db():
# conn = current_app.mgConnection.db.user_info.find({"type": "course"}, {'_id': 0})
conn = current_app.mgConnection.db.user_info.find({"type": "course", "chapters.author": "唐国安"}, {'_id':0})
cList = []
for i in conn:
cList.append(i)
return jsonify(cList)
@blueprint.route('/<template>')
def route_template(template):
return render_template(template + '.html')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUIs\LoadDataDialog.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_fromMemoryDialog(object):
def setupUi(self, fromMemoryDialog):
fromMemoryDialog.setObjectName("fromMemoryDialog")
fromMemoryDialog.setWindowModality(QtCore.Qt.WindowModal)
fromMemoryDialog.resize(351, 318)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(fromMemoryDialog.sizePolicy().hasHeightForWidth())
fromMemoryDialog.setSizePolicy(sizePolicy)
fromMemoryDialog.setMinimumSize(QtCore.QSize(0, 0))
fromMemoryDialog.setMaximumSize(QtCore.QSize(16777215, 16777215))
fromMemoryDialog.setSizeGripEnabled(False)
fromMemoryDialog.setModal(True)
self.okBtn = QtWidgets.QPushButton(fromMemoryDialog)
self.okBtn.setGeometry(QtCore.QRect(240, 30, 75, 23))
self.okBtn.setObjectName("okBtn")
self.cancelBtn = QtWidgets.QPushButton(fromMemoryDialog)
self.cancelBtn.setGeometry(QtCore.QRect(240, 70, 75, 23))
self.cancelBtn.setObjectName("cancelBtn")
self.clearBtn = QtWidgets.QPushButton(fromMemoryDialog)
self.clearBtn.setGeometry(QtCore.QRect(240, 110, 75, 23))
self.clearBtn.setObjectName("clearBtn")
self.dataText = QtWidgets.QPlainTextEdit(fromMemoryDialog)
self.dataText.setGeometry(QtCore.QRect(20, 20, 201, 280))
self.dataText.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.dataText.setObjectName("dataText")
self.runnerDataFrame = QtWidgets.QFrame(fromMemoryDialog)
self.runnerDataFrame.setGeometry(QtCore.QRect(10, 10, 221, 301))
self.runnerDataFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.runnerDataFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.runnerDataFrame.setObjectName("runnerDataFrame")
self.runnerNrDataText = QtWidgets.QPlainTextEdit(self.runnerDataFrame)
self.runnerNrDataText.setGeometry(QtCore.QRect(10, 10, 101, 280))
self.runnerNrDataText.setTabChangesFocus(True)
self.runnerNrDataText.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.runnerNrDataText.setObjectName("runnerNrDataText")
self.runnerTimeDataText = QtWidgets.QPlainTextEdit(self.runnerDataFrame)
self.runnerTimeDataText.setGeometry(QtCore.QRect(110, 10, 101, 280))
self.runnerTimeDataText.setTabChangesFocus(True)
self.runnerTimeDataText.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.runnerTimeDataText.setObjectName("runnerTimeDataText")
self.inputMethodToggle = QtWidgets.QCheckBox(fromMemoryDialog)
self.inputMethodToggle.setGeometry(QtCore.QRect(240, 150, 101, 17))
self.inputMethodToggle.setObjectName("inputMethodToggle")
self.retranslateUi(fromMemoryDialog)
QtCore.QMetaObject.connectSlotsByName(fromMemoryDialog)
def retranslateUi(self, fromMemoryDialog):
_translate = QtCore.QCoreApplication.translate
fromMemoryDialog.setWindowTitle(_translate("fromMemoryDialog", "Įkelti duomenis"))
self.okBtn.setText(_translate("fromMemoryDialog", "Gerai"))
self.cancelBtn.setText(_translate("fromMemoryDialog", "Atšaukti"))
self.clearBtn.setText(_translate("fromMemoryDialog", "Valyti"))
self.dataText.setPlaceholderText(_translate("fromMemoryDialog", "Dalyvio nr. ir laikai"))
self.runnerNrDataText.setPlaceholderText(_translate("fromMemoryDialog", "Dalyvio nr."))
self.runnerTimeDataText.setPlaceholderText(_translate("fromMemoryDialog", "Laikai"))
self.inputMethodToggle.setText(_translate("fromMemoryDialog", "Bendras įvedimas"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
fromMemoryDialog = QtWidgets.QDialog()
ui = Ui_fromMemoryDialog()
ui.setupUi(fromMemoryDialog)
fromMemoryDialog.show()
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
import random
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Person:
def __init__(self, name, hp, mp, atk, df, magic, items,type):
self.maxhp = hp
self.name = name
self.hp = hp
self.maxmp = mp
self.mp = mp
self.atkl = atk - 10
self.atkh = atk + 10
self.df = df
self.magic = magic
self.items = items
self.type = type
self.action = ["Attack", "Magic", "Items"]
def generate_damage(self):
return random.randrange(self.atkl,self.atkh)
def update_dmg(self,list):
type = list[0].type
for i in list:
if i.get_hp() == 0:
list.remove(i)
print(i.name, " defeated")
if(len(list) < 1):
if(type == "e"):
print("You Won")
else:
print("Enemy Won")
return False
return list
def take_damage(self,dmg):
self.hp -= dmg
if self.hp < 0:
self.hp = 0
return self.hp
def get_hp(self):
return self.hp
def get_maxhp(self):
return self.maxhp
def get_mp(self):
return self.mp
def get_maxmp(self):
return self.maxmp
def reduce_mp(self,cost):
self.mp -= cost
def heal(self,dmg):
if self.hp + dmg > self.maxhp:
self.hp = self.maxhp
else:
self.hp += dmg
def choose_enemy_spell(self):
magic_choice = random.randrange(0,len(self.magic))
spell = self.magic[magic_choice]
magic_dmg = self.generate_damage()
pct = (self.hp/self.maxhp)*100
if self.mp < spell.cost or spell.type == "White" and pct > 50:
self.choose_enemy_spell()
return spell, magic_dmg
def choose_action(self):
print("\n "+self.name+"'s turn")
print(" Actions: ")
i = 1
for item in self.action:
print(" " + str(i)+ ".", item)
i += 1
def choose_magic(self):
print(" Magics: ")
i = 1
for spell in self.magic:
print(" " + str(i)+ ".", spell.name, "(cost:", str(spell.cost) + ")")
i += 1
def choose_item(self):
print(" Items: ")
i = 1
for item in self.items:
print(" " + str(i)+ ".", item["item"].name, ":", item["item"].description, " (x" + str(item["quantity"])+")")
i += 1
def choose_target(self,enemies):
print(" Enimes: ")
i=1
for enemy in enemies:
print(" " + str(i)+ ".", enemy.name)
i += 1
choice = int(input("Choose Enemy: ")) -1
return choice
def get_enemy_stat(self):
hp_bar = "█"*int((self.hp/self.maxhp)*100 / 2) + " "*(50-len(str("█"*int((self.hp/self.maxhp)*100 / 2))))
hp_string = " "*(11-len(str(self.hp) + "/" + str(self.maxhp))) + str(self.hp) + "/" + str(self.maxhp)
print(" "+ 50*"_")
print(self.name+":"+ (16-len(self.name))*" ", hp_string, "|" + hp_bar + "|")
def get_stat(self):
hp_bar = "█"*int((self.hp/self.maxhp)*100 / 4) + " "*(25-len(str("█"*int((self.hp/self.maxhp)*100 / 4))))
mp_bar = "█"*int((self.mp/self.maxmp)*100 / 10) + " "*(10-len(str("█"*int((self.mp/self.maxmp)*100 / 10))))
hp_string = " "*(11-len(str(self.hp) + "/" + str(self.maxhp))) + str(self.hp) + "/" + str(self.maxhp)
mp_string = " "*(9-len(str(self.mp) + "/" + str(self.maxmp))) + str(self.mp) + "/" + str(self.maxmp)
print(" _________________________ __________")
print(self.name+":"+ (16-len(self.name))*" ", hp_string, "|" + hp_bar + "| ", mp_string, "|" + mp_bar + "|")
|
nilq/baby-python
|
python
|
import torch
import torch.utils.data as data
import os
import pickle
import numpy as np
from data_utils import Vocabulary
from data_utils import load_data_and_labels_klp, load_data_and_labels_exo
from eunjeon import Mecab
NER_idx_dic = {'<unk>': 0, 'B-PS_PROF': 1, 'B-PS_ENT': 2, 'B-PS_POL': 3, 'B-PS_NAME': 4,
'B-AF_REC': 5, 'B-AF_WARES': 6, 'B-AF_ITEM': 7, 'B-AF_SERVICE': 8, 'B-AF_OTHS': 9,
'B-OG_PRF': 10, 'B-OG_PRNF': 11, 'B-OG_PBF': 12, 'B-OG_PBNF': 13,
'B-LC_CNT': 14, 'B-LC_PLA': 15, 'B-LC_ADD': 16, 'B-LC_OTHS': 17,
'B-CV_TECH': 18, 'B-CV_LAWS': 19, 'B-EV_LT': 20, 'B-EV_ST': 21,
'B-GR_PLOR': 22, 'B-GR_PLCI': 23, 'B-TM_FLUC': 24, 'B-TM_ECOFIN': 25, 'B-TM_FUNC': 26,
'B-TM_CURR': 27, 'B-TM_OTHS': 28, 'B-PD_PD': 29, 'B-TI_TIME': 30,
'B-NUM_PRICE': 31, 'B-NUM_PERC': 32, 'B-NUM_OTHS': 33, 'I-PS_PROF': 34,
'I-PS_ENT': 35, 'I-PS_POL': 36, 'I-PS_NAME': 37, 'I-AF_REC': 38,
'I-AF_WARES': 39, 'I-AF_ITEM': 40, 'I-AF_SERVICE': 41, 'I-AF_OTHS': 42, 'I-OG_PRF': 43,
'I-OG_PRNF': 44, 'I-OG_PBF': 45, 'I-OG_PBNF': 46,
'I-LC_CNT': 47, 'I-LC_PLA': 48, 'I-LC_ADD': 49, 'I-LC_OTHS': 50, 'I-CV_TECH': 51, 'I-CV_LAWS': 52,
'I-EV_LT': 53, 'I-EV_ST': 54,
'I-GR_PLOR': 55, 'I-GR_PLCI': 56, 'I-TM_FLUC': 57, 'I-TM_ECOFIN': 58, 'I-TM_FUNC': 59,
'I-TM_CURR': 60, 'I-TM_OTHS': 61, 'I-PD_PD': 62,
'I-TI_TIME': 63, 'I-NUM_PRICE': 64, 'I-NUM_PERC': 65, 'I-NUM_OTHS': 66, 'O': 67}
class DocumentDataset (data.Dataset):
""""""
def __init__(self, vocab, char_vocab, pos_vocab, lex_dict, x_text, x_split, x_pos, labels):
"""
:param vocab:
"""
self.vocab = vocab
self.char_vocab = char_vocab
self.pos_vocab = pos_vocab
self.lex_dict = lex_dict
self.x_text = x_text
self.x_split = x_split
self.x_pos = x_pos
self.labels = labels
def __getitem__(self, index):
"""Returns 'one' data pair """
x_text_item = self.x_text[index]
x_split_item = self.x_split[index]
x_pos_item = self.x_pos[index]
label_item = self.labels[index]
x_text_char_item = []
for x_word in x_text_item:
x_char_item = []
for x_char in x_word:
x_char_item.append(x_char)
x_text_char_item.append(x_char_item)
x_idx_item = prepare_sequence(x_text_item, self.vocab.word2idx)
x_idx_char_item = prepare_char_sequence(x_text_char_item, self.char_vocab.word2idx)
x_pos_item = prepare_sequence(x_pos_item, self.pos_vocab.word2idx)
x_lex_item = prepare_lex_sequence(x_text_item, self.lex_dict)
label = torch.LongTensor(label_item)
# print("label")
# print(label)
# print(type(label))
return x_text_item, x_split_item, x_idx_item, x_idx_char_item, x_pos_item, x_lex_item, label
def __len__(self):
return len(self.x_text)
def prepare_sequence(seq, word_to_idx):
idxs = list()
# idxs.append(word_to_idx['<start>'])
for word in seq:
if word not in word_to_idx:
idxs.append(word_to_idx['<unk>'])
else:
idxs.append(word_to_idx[word])
# print(word_to_idx[word])
# idxs.append(word_to_idx['<eos>'])
return idxs
def prepare_char_sequence(seq, char_to_idx):
char_idxs = list()
# idxs.append(word_to_idx['<start>'])
for word in seq:
idxs = list()
for char in word:
if char not in char_to_idx:
idxs.append(char_to_idx['<unk>'])
else:
idxs.append(char_to_idx[char])
char_idxs.append(idxs)
# print(word_to_idx[word])
# idxs.append(word_to_idx['<eos>'])
return char_idxs
def prepare_lex_sequence(seq, lex_to_ner_list):
lex_idxs = list()
# idxs.append(word_to_idx['<start>'])
for lexicon in seq:
if lexicon not in lex_to_ner_list:
lex_idxs.append([lex_to_ner_list['<unk>']])
else:
lex_idxs.append(lex_to_ner_list[lexicon])
# print(word_to_idx[word])
# idxs.append(word_to_idx['<eos>'])
return lex_idxs
def collate_fn(data):
"""Creates mini-batch tensor"""
data.sort(key=lambda x: len(x[0]), reverse=True)
x_text_batch, x_split_batch, x_idx_batch, x_idx_char_batch, x_pos_batch, x_lex_batch, labels = zip(*data)
lengths = [len(label) for label in labels]
targets = torch.zeros(len(labels), max(lengths), 8).long()
for i, label in enumerate(labels):
end = lengths[i]
targets[i, :end] = label[:end]
max_word_len = int(np.amax([len(word_tokens) for word_tokens in x_idx_batch])) # ToDo: usually, np.mean can be applied
batch_size = len(x_idx_batch)
batch_words_len = []
batch_words_len = [len(word_tokens) for word_tokens in x_idx_batch]
batch_words_len = np.array(batch_words_len)
# Padding procedure (word)
padded_word_tokens_matrix = np.zeros((batch_size, max_word_len), dtype=np.int64)
for i in range(padded_word_tokens_matrix.shape[0]):
for j in range(padded_word_tokens_matrix.shape[1]):
try:
padded_word_tokens_matrix[i, j] = x_idx_batch[i][j]
except IndexError:
pass
max_char_len = int(np.amax([len(char_tokens) for word_tokens in x_idx_char_batch for char_tokens in word_tokens]))
if max_char_len < 5: # size of maximum filter of CNN
max_char_len = 5
# Padding procedure (char)
padded_char_tokens_matrix = np.zeros((batch_size, max_word_len, max_char_len), dtype=np.int64)
for i in range(padded_char_tokens_matrix.shape[0]):
for j in range(padded_char_tokens_matrix.shape[1]):
for k in range(padded_char_tokens_matrix.shape[1]):
try:
padded_char_tokens_matrix[i, j, k] = x_idx_char_batch[i][j][k]
except IndexError:
pass
# Padding procedure (pos)
padded_pos_tokens_matrix = np.zeros((batch_size, max_word_len), dtype=np.int64)
for i in range(padded_pos_tokens_matrix.shape[0]):
for j in range(padded_pos_tokens_matrix.shape[1]):
try:
padded_pos_tokens_matrix[i, j] = x_pos_batch[i][j]
except IndexError:
pass
# Padding procedure (lex)
padded_lex_tokens_matrix = np.zeros((batch_size, max_word_len, len(NER_idx_dic)))
for i in range(padded_lex_tokens_matrix.shape[0]):
for j in range(padded_lex_tokens_matrix.shape[1]):
for k in range(padded_lex_tokens_matrix.shape[2]):
try:
for x_lex in x_lex_batch[i][j]:
k = NER_idx_dic[x_lex]
padded_lex_tokens_matrix[i, j, k] = 1
except IndexError:
pass
padded_word_tokens_matrix = torch.from_numpy(padded_word_tokens_matrix)
padded_char_tokens_matrix = torch.from_numpy(padded_char_tokens_matrix)
padded_pos_tokens_matrix = torch.from_numpy(padded_pos_tokens_matrix)
padded_lex_tokens_matrix = torch.from_numpy(padded_lex_tokens_matrix).float()
return x_text_batch, x_split_batch, padded_word_tokens_matrix, padded_char_tokens_matrix, padded_pos_tokens_matrix, padded_lex_tokens_matrix, targets, batch_words_len
def get_loader(data_file_dir, vocab, char_vocab, pos_vocab, lex_dict, batch_size, shuffle, num_workers, dataset='klp'):
""""""
if dataset == 'klp':
x_list, x_pos_list, x_split_list, y_list = load_data_and_labels_klp(data_file_dir=data_file_dir)
y_list = np.array(y_list)
elif dataset == 'exo':
x_list, x_pos_list, x_split_list, y_list = load_data_and_labels_exo(data_file_dir='data_in/EXOBRAIN_NE_CORPUS_10000.txt')
y_list = np.array(y_list)
elif dataset == 'both':
x_list, x_pos_list, x_split_list, y_list = load_data_and_labels_klp(data_file_dir=data_file_dir)
x_list_2, x_pos_list_2, x_split_list_2, y_list_2 = load_data_and_labels_exo(data_file_dir='data_in/EXOBRAIN_NE_CORPUS_10000.txt')
x_list = x_list + x_list_2
x_pos_list = x_pos_list + x_pos_list_2
x_split_list = x_split_list + x_split_list_2
y_list = y_list + y_list_2
y_list = np.array(y_list)
print("len(x_list):",len(x_list))
print("len(y_list):",len(y_list))
document = DocumentDataset(vocab=vocab,
char_vocab=char_vocab,
pos_vocab=pos_vocab,
lex_dict=lex_dict,
x_text=x_list,
x_split=x_split_list,
x_pos=x_pos_list,
labels=y_list)
data_loader = torch.utils.data.DataLoader(dataset=document,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn)
return data_loader
|
nilq/baby-python
|
python
|
"""
Test file to test RetrieveMovie.py
"""
from Product.Database.DatabaseManager.Retrieve.RetrieveMovie import RetrieveMovie
from Product.Database.DBConn import create_session
from Product.Database.DBConn import Movie
def test_retrieve_movie():
"""
Author: John Andree Lidquist
Date: 2017-11-16
Last Updated:
Purpose: Assert that a movie, or all movies, are retrieved correctly
"""
# PRE-CONDITIONS
movie_id = -1
movie_title = "dummy"
movie_year = 1111
# We create a session and add a dummy movie that we can later retrieve
session = create_session()
dummy_movie = Movie(id=movie_id, title=movie_title, year=movie_year)
session.add(dummy_movie)
session.commit() # We need to close the session, else we get an error when trying to delete it
session.close()
# EXPECTED OUTPUT
expected_id = movie_id
expected_title = movie_title
expected_year = movie_year
# OBSERVED OUTPUT
# We call the method to be tested to get 1) The movie we added above, and 2) All the movies
# which is done by not setting the parameter "movie_id"
retrieve_movie = RetrieveMovie()
observed_one_movie = retrieve_movie.retrieve_movie(movie_id=movie_id)
observed_all_movies = retrieve_movie.retrieve_movie()
# After adding the dummy movie we remove them again.
session.delete(observed_one_movie)
session.commit()
session.close()
assert observed_one_movie
assert observed_one_movie.id == expected_id
assert observed_one_movie.title == expected_title
assert observed_one_movie.year == expected_year
assert observed_all_movies
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.