text stringlengths 38 1.54M |
|---|
import numpy as np
import pandas as pd
import math
import requests
import xlsxwriter
from scipy import stats
from scipy.stats import percentileofscore as score
#IMPORT OUR LIST OF STOCKS
stocks = pd.read_csv("C:\\V'S LIFE\\4_Project\\Algro-Trading\\actual_output\\Project 1\\sp_500_stocks.csv",'r')
#ACQUIRING AN API TOKEN
from secrets import IEX_CLOUD_API_TOKEN
#MAKING API CALL
symbol = 'AAPL'
api_url = f'https://sandbox.iexapis.com/stable/stock/{symbol}/quote/?token={IEX_CLOUD_API_TOKEN}'
data = requests.get(api_url).json()
#PARSING API
#data['year1ChangePercent']
#Executing A Batch API Call & Building Our DataFrame
# Function sourced from
# https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
symbol_groups = list(chunks(stocks['Ticker'], 100))
symbol_strings = []
for i in range(0, len(symbol_groups)):
symbol_strings.append(','.join(symbol_groups[i]))
my_columns = ['Ticker', 'Price', 'One-Year Price Return', 'Number of Shares to Buy']
final_dataframe = pd.DataFrame (columns=my_columns)
for symbol_string in symbol_strings:
batch_api_call_url=f'https://sandbox.iexapis.com/stable/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data=requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
final_dataframe=final_dataframe.append(
pd.Series([symbol,
data[symbol]['quote']['latestPrice'],
data[symbol]['stats']['year1ChangePercent'],
'N/A'], index = my_columns),
ignore_index = True)
#REMOVING LOW MOMENTUM STOCKS
final_dataframe.sort_values('One-Year Price Return',ascending=False,inplace=True)
final_dataframe=final_dataframe[:50]
final_dataframe.reset_index(inplace=True)
#CALCULATING THE NUMBER OF SHARES TO BUY
def portfolio_input():
global portfolio_size
portfolio_size = input('Enter the size of your portfolio')
try:
float(portfolio_size)
except:
print('That is not a number')
print('Please try again:')
portfolio_size = input('Enter the size of your portfolio')
position_size=float(portfolio_size)/len(final_dataframe.index)
for i in range(0,len(final_dataframe)):
final_dataframe.loc[i,'Number of Shares to Buy']=math.floor(position_size/final_dataframe.loc[i,'Price'])
#BUILDING A BETTER MOMENTUM STRATEGY
hqm_columns = [
'Ticker',
'Price',
'Number of Shares to Buy',
'One-Year Price Return',
'One-Year Return Percentile',
'Six-Month Price Return',
'Six-Month Return Percentile',
'Three-Month Price Return',
'Three-Month Return Percentile',
'One-Month Price Return',
'One-Month Return Percentile',
'HQM Score'
]
hqm_dataframe=pd.DataFrame(columns=hqm_columns)
for symbol_string in symbol_strings:
batch_api_call_url=f'https://sandbox.iexapis.com/stable/stock/market/batch/?types=stats,quote&symbols={symbol_string}&token={IEX_CLOUD_API_TOKEN}'
data=requests.get(batch_api_call_url).json()
for symbol in symbol_string.split(','):
hqm_dataframe = hqm_dataframe.append(
pd.Series([symbol,
data[symbol]['quote']['latestPrice'],
'N/A',
data[symbol]['stats']['year1ChangePercent'],
'N/A',
data[symbol]['stats']['month6ChangePercent'],
'N/A',
data[symbol]['stats']['month3ChangePercent'],
'N/A',
data[symbol]['stats']['month1ChangePercent'],
'N/A',
'N/A'
],
index = hqm_columns),
ignore_index = True)
#CALCULATE MOMENTUM PERCENTILE
time_periods = [
'One-Year',
'Six-Month',
'Three-Month',
'One-Month'
]
for row in hqm_dataframe.index:
for time_period in time_periods:
hqm_dataframe.loc[row, f'{time_period} Return Percentile'] = stats.percentileofscore(hqm_dataframe[f'{time_period} Price Return'], hqm_dataframe.loc[row, f'{time_period} Price Return'])/100
# Print each percentile score to make sure it was calculated properly
for time_period in time_periods:
print(hqm_dataframe[f'{time_period} Return Percentile'])
#CALCULATE HQM SCORE
from statistics import mean
for row in hqm_dataframe.index:
momentum_percentiles = []
for time_period in time_periods:
momentum_percentiles.append(hqm_dataframe.loc[row, f'{time_period} Return Percentile'])
hqm_dataframe.loc[row, 'HQM Score'] = mean(momentum_percentiles)
#SELECTING SHARES TO BUY
hqm_dataframe.sort_values(by = 'HQM Score', ascending = False)
hqm_dataframe = hqm_dataframe[:51]
#CALCULATE THE NUMBER OF SHARES TO BUY
portfolio_input()
position_size = float(portfolio_size) / len(hqm_dataframe.index)
for i in range(0, len(hqm_dataframe['Ticker'])-1):
hqm_dataframe.loc[i, 'Number of Shares to Buy'] = math.floor(position_size / hqm_dataframe['Price'][i])
#FORMATING EXCEL OUTPUT
writer = pd.ExcelWriter('momentum_strategy.xlsx', engine='xlsxwriter')
hqm_dataframe.to_excel(writer, sheet_name='Momentum Strategy', index = False)
background_color = '#0a0a23'
font_color = '#ffffff'
string_template = writer.book.add_format(
{
'font_color': font_color,
'bg_color': background_color,
'border': 1
}
)
dollar_template = writer.book.add_format(
{
'num_format':'$0.00',
'font_color': font_color,
'bg_color': background_color,
'border': 1
}
)
integer_template = writer.book.add_format(
{
'num_format':'0',
'font_color': font_color,
'bg_color': background_color,
'border': 1
}
)
percent_template = writer.book.add_format(
{
'num_format':'0.0%',
'font_color': font_color,
'bg_color': background_color,
'border': 1
}
)
column_formats = {
'A': ['Ticker', string_template],
'B': ['Price', dollar_template],
'C': ['Number of Shares to Buy', integer_template],
'D': ['One-Year Price Return', percent_template],
'E': ['One-Year Return Percentile', percent_template],
'F': ['Six-Month Price Return', percent_template],
'G': ['Six-Month Return Percentile', percent_template],
'H': ['Three-Month Price Return', percent_template],
'I': ['Three-Month Return Percentile', percent_template],
'J': ['One-Month Price Return', percent_template],
'K': ['One-Month Return Percentile', percent_template],
'L': ['HQM Score', integer_template]
}
for column in column_formats.keys():
writer.sheets['Momentum Strategy'].set_column(f'{column}:{column}', 20, column_formats[column][1])
writer.sheets['Momentum Strategy'].write(f'{column}1', column_formats[column][0], string_template)
writer.save()
|
from django.views.generic import View
from django.views.decorators.cache import never_cache
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from gbe.models import Profile
from django.shortcuts import (
get_object_or_404,
render,
)
from gbe.functions import (
conference_slugs,
get_current_conference,
get_conference_by_slug,
validate_perms,
)
from scheduler.idd import get_schedule
from gbe.ticketing_idd_interface import get_checklist_items
class PerformerShowComp(View):
profiles = None
def groundwork(self, request, args, kwargs):
viewer_profile = validate_perms(request, 'any', require=True)
if request.GET and request.GET.get('conf_slug'):
self.conference = get_conference_by_slug(request.GET['conf_slug'])
else:
self.conference = get_current_conference()
self.profiles = Profile.objects.filter(
user_object__is_active=True, bio__isnull=False
).select_related().distinct()
@never_cache
def get(self, request, *args, **kwargs):
self.groundwork(request, args, kwargs)
schedules = []
for person in self.profiles:
response = get_schedule(
person.user_object,
labels=[self.conference.conference_slug])
if len(response.schedule_items) > 0:
ticket_items, role_items = get_checklist_items(
person,
self.conference,
response.schedule_items)
if len(role_items) > 0 and "Performer" in role_items.keys():
schedules += [{'person': person,
'role_items': role_items}]
sorted_sched = sorted(
schedules,
key=lambda schedule: schedule['person'].get_badge_name())
return render(request,
'gbe/report/comp_report.tmpl',
{'schedules': sorted_sched,
'conference_slugs': conference_slugs(),
'conference': self.conference,
'columns': ['Badge Name',
'First',
'Last',
'Email',
'Performer Comps']})
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PerformerShowComp, self).dispatch(*args, **kwargs)
|
import os
import click
from mgshell.version import __version__
@click.group()
def cli():
pass
@cli.command()
def version():
click.echo("mgshell %s" % __version__)
|
message = "Hello world! This is python!"
print(message)
message = "I'm taking a crash course."
print(message)
message = "Now let's get crazy!"
print(message)
|
from .base.worker import Worker
class Counter(Worker):
def __init__(self):
"""
A worker that counts and manually checks for messages.
"""
super().__init__()
self.count = 0
self.timeout = 2 # We should respond to messages in < 2 seconds.
def on_start(self):
"""
Go IDLE on start.
:return: The next function to run.
"""
return self.on_run
def on_run(self):
while self._no_messages():
self.count += 1
self._debug("{}".format(int(self.count / 1000)))
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
import sys
import uuid
import requests
import requests_kerberos
from desktop.conf import AUTH_USERNAME
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.sdxaas.knox_jwt import fetch_jwt
import desktop.lib.raz.signer_protos_pb2 as raz_signer
if sys.version_info[0] > 2:
from urllib.parse import urlparse as lib_urlparse, unquote as lib_urlunquote
else:
from urlparse import urlparse as lib_urlparse
from urllib import unquote as lib_urlunquote
LOG = logging.getLogger()
class RazClient(object):
def __init__(self, raz_url, auth_type, username, service='s3', service_name='cm_s3', cluster_name='myCluster'):
self.raz_url = raz_url.strip('/')
self.auth_type = auth_type
self.username = username
self.service = service
if self.service == 'adls':
self.service_params = {
'endpoint_prefix': 'adls',
'service_name': 'adls',
'serviceType': 'adls'
}
else:
self.service_params = {
'endpoint_prefix': 's3',
'service_name': 's3',
'serviceType': 's3'
}
self.service_name = service_name
self.cluster_name = cluster_name
self.requestid = str(uuid.uuid4())
def check_access(self, method, url, params=None, headers=None, data=None):
LOG.debug("Check access: method {%s}, url {%s}, params {%s}, headers {%s}" % (method, url, params, headers))
path = lib_urlparse(url)
url_params = dict([p.split('=') if '=' in p else (p, '') for p in path.query.split('&') if path.query]) # ?delete, ?prefix=/hue
params = params if params is not None else {}
headers = headers if headers is not None else {}
endpoint = "%s://%s" % (path.scheme, path.netloc)
resource_path = path.path.lstrip("/")
request_data = {
"requestId": self.requestid,
"serviceType": self.service_params['serviceType'],
"serviceName": self.service_name,
"user": self.username,
"userGroups": [],
"clientIpAddress": "",
"clientType": "",
"clusterName": self.cluster_name,
"clusterType": "",
"sessionId": "",
"accessTime": "",
"context": {}
}
request_headers = {"Content-Type": "application/json"}
raz_url = "%s/api/authz/%s/access?doAs=%s" % (self.raz_url, self.service, self.username)
if self.service == 'adls':
self._make_adls_request(request_data, method, path, url_params, resource_path)
elif self.service == 's3':
self._make_s3_request(request_data, request_headers, method, params, headers, url_params, endpoint, resource_path, data=data)
LOG.debug('Raz url: %s' % raz_url)
LOG.debug("Sending access check headers: {%s} request_data: {%s}" % (request_headers, request_data))
raz_req = self._handle_raz_req(raz_url, request_headers, request_data)
signed_response_result = None
signed_response = None
if raz_req.ok:
result = raz_req.json().get("operResult", False) and raz_req.json()["operResult"]["result"]
if result == "NOT_DETERMINED":
msg = "Failure %s" % raz_req.json()
LOG.error(msg)
raise PopupException(msg)
if result != "ALLOWED":
msg = "Permission missing %s" % raz_req.json()
raise PopupException(msg, error_code=401)
if result == "ALLOWED":
LOG.debug('Received allowed response %s' % raz_req.json())
signed_response_data = raz_req.json()["operResult"]["additionalInfo"]
if self.service == 'adls':
LOG.debug("Received SAS %s" % signed_response_data["ADLS_DSAS"])
return {'token': signed_response_data["ADLS_DSAS"]}
else:
signed_response_result = signed_response_data["S3_SIGN_RESPONSE"]
if signed_response_result is not None:
raz_response_proto = raz_signer.SignResponseProto()
signed_response = raz_response_proto.FromString(base64.b64decode(signed_response_result))
LOG.debug("Received signed Response %s" % signed_response)
# Signed headers "only"
if signed_response is not None:
return dict([(i.key, i.value) for i in signed_response.signer_generated_headers])
def _handle_raz_req(self, raz_url, request_headers, request_data):
if self.auth_type == 'kerberos':
auth_handler = requests_kerberos.HTTPKerberosAuth(mutual_authentication=requests_kerberos.OPTIONAL)
raz_req = requests.post(raz_url, headers=request_headers, json=request_data, auth=auth_handler, verify=False)
elif self.auth_type == 'jwt':
jwt_token = fetch_jwt()
if jwt_token is None:
raise PopupException('Knox JWT is not available to send to RAZ.')
request_headers['Authorization'] = 'Bearer %s' % (jwt_token)
raz_req = requests.post(raz_url, headers=request_headers, json=request_data, verify=False)
return raz_req
def _make_adls_request(self, request_data, method, path, url_params, resource_path):
resource_path = resource_path.split('/', 1)
storage_account = path.netloc.split('.')[0]
container = resource_path[0]
relative_path = "/"
relative_path = self._handle_relative_path(method, url_params, resource_path, relative_path)
access_type = self.handle_adls_req_mapping(method, url_params)
request_data.update({
"clientType": "adls",
"operation": {
"resource": {
"storageaccount": storage_account,
"container": container,
"relativepath": relative_path,
},
"action": access_type,
"accessTypes": [access_type]
}
})
def _handle_relative_path(self, method, params, resource_path, relative_path):
if len(resource_path) == 2:
relative_path += resource_path[1]
if relative_path == "/" and method == 'GET' and params.get('resource') == 'filesystem' and params.get('directory'):
relative_path += params['directory']
# Unquoting the full relative_path to catch edge cases like path having whitespaces or non-ascii chars.
return lib_urlunquote(relative_path)
def handle_adls_req_mapping(self, method, params):
if method == 'HEAD':
access_type = ''
if params.get('action') == 'getStatus' or params.get('resource') == 'filesystem':
access_type = 'get-status'
if params.get('action') == 'getAccessControl':
access_type = 'get-acl'
if method == 'DELETE':
access_type = 'delete-recursive' if params.get('recursive') == 'true' else 'delete'
if method == 'GET':
access_type = 'list' if params.get('resource') == 'filesystem' else 'read'
if method == 'PATCH':
if params.get('action') in ('append', 'flush'):
access_type = 'write'
elif params.get('action') == 'setAccessControl':
access_type = 'set-permission'
if method == 'PUT':
if params.get('resource') == 'file':
access_type = 'create-file'
elif params.get('resource') == 'directory':
access_type = 'create-directory'
else:
access_type = 'rename-source'
return access_type
def _make_s3_request(self, request_data, request_headers, method, params, headers, url_params, endpoint, resource_path, data=None):
# In GET operations with non-ascii chars, only the non-ascii part is URL encoded.
# We need to unquote the path fully before making a signed request for RAZ.
if method == 'GET' and 'prefix' in url_params and '%' in url_params['prefix']:
if sys.version_info[0] < 3 and isinstance(url_params['prefix'], unicode):
url_params['prefix'] = url_params['prefix'].encode()
url_params['prefix'] = lib_urlunquote(url_params['prefix'])
allparams = [raz_signer.StringListStringMapProto(key=key, value=[val]) for key, val in url_params.items()]
allparams.extend([raz_signer.StringListStringMapProto(key=key, value=[val]) for key, val in params.items()])
headers = [raz_signer.StringStringMapProto(key=key, value=val) for key, val in headers.items()]
LOG.debug(
"Preparing sign request with "
"http_method: {%s}, headers: {%s}, parameters: {%s}, endpoint: {%s}, resource_path: {%s}, content_to_sign: {%s}" %
(method, headers, allparams, endpoint, resource_path, data)
)
# Raz signed request proto call expects data as bytes instead of str for Py3.
if sys.version_info[0] > 2 and data is not None and not isinstance(data, bytes):
data = data.encode()
raz_req = raz_signer.SignRequestProto(
endpoint_prefix=self.service_params['endpoint_prefix'],
service_name=self.service_params['service_name'],
endpoint=endpoint,
http_method=method,
headers=headers,
parameters=allparams,
resource_path=resource_path,
content_to_sign=data,
time_offset=0
)
raz_req_serialized = raz_req.SerializeToString()
if sys.version_info[0] > 2:
signed_request = base64.b64encode(raz_req_serialized).decode('utf-8')
else:
signed_request = base64.b64encode(raz_req_serialized)
request_headers["Accept-Encoding"] = "gzip,deflate"
request_data["context"] = {
"S3_SIGN_REQUEST": signed_request
}
def get_raz_client(raz_url, username, auth='kerberos', service='s3', service_name='cm_s3', cluster_name='myCluster'):
if not username:
from crequest.middleware import CrequestMiddleware
request = CrequestMiddleware.get_request()
username = request.user.username if request and hasattr(request, 'user') and request.user.is_authenticated else None
if not username:
raise PopupException('No username set.')
return RazClient(raz_url, auth, username, service=service, service_name=service_name, cluster_name=cluster_name)
|
# -*- coding: utf-8 -*-
from panda3d.core import TextNode, TransparencyAttrib
from direct.gui.OnscreenText import OnscreenText
from direct.gui.OnscreenImage import OnscreenImage
from base import Manager
class HUDManager(Manager):
def __init__(self):
self._hud = []
self._info = None
self._natans = None
self._natans_img = None
@debug(['managers'])
def setup(self):
self.help()
@debug(['managers'])
def clear(self):
"""Remove every text from the screen."""
# We could remove everything from `aspect2d', however
# it's cleaner to just destroy what we have created.
#aspect2d.removeChildren()
while self._hud:
self._hud.pop().destroy()
def clear_one(self, ost):
"""Remove a given OnscreenText from the screen."""
if ost in self._hud:
self._hud.remove(ost)
ost.destroy()
def show(self, text, **props):
"""Show text on the screen."""
ost = OnscreenText(text=text, **props)
self._hud.append(ost)
return ost
def show_image(self, path, **props):
osi = OnscreenImage(image=path, **props)
osi.setTransparency(TransparencyAttrib.MAlpha)
self._hud.append(osi)
return osi
def show_centered(self, text, **kwargs):
"""Display text centered in the HUD."""
props = dict(
pos = (0, +0.5),
scale = 0.26,
align = TextNode.ACenter,
shadow = (0, 0, 0, 1),
)
props.update(kwargs)
return self.show(text, **props)
def help(self):
"""Display informative text on HUD."""
text = u"""\
Comandos:
W
A S D - Movimentar jogador
<setas> - Movimentar jogador
"""
commands = (
("F2", "Novo jogo"),
("P", "Pausar/Continuar"),
("F5", "Ligar/Desligar IA"),
("Esc", "Sair do jogo"),
)
props = dict(
pos = (-1.1, -0.55),
scale = 0.07,
align = TextNode.ALeft,
fg = (0.8, 0.8, 0.8, 0.4),
shadow = (0, 0, 0, 1),
)
self.show(text, **props)
props.update(
pos = (0.2, -0.70)
)
self.show("\n".join(map(lambda t: t[0], commands)), **props)
props.update(
pos = (0.42, -0.70)
)
self.show("\n".join(map(lambda t: "- %s" % t[1], commands)), **props)
def win(self, extra_msg=""):
text = u"Você venceu!"
self.show_centered(text, fg=(0.3, 1, 0.2, 1))
if extra_msg:
self.show_centered(extra_msg, fg=(0.3, 1, 0.2, 1),
pos=(0, +0.1),
scale=0.17)
def lose(self):
text = u"Você perdeu!"
self.show_centered(text, fg=(1, 0.3, 0.2, 1))
def pause(self):
text = u"Pausado"
self.show_centered(text, fg=(0.8, 0.8, 0.2, 1))
def info(self, msg):
"""Display information in the HUD."""
props = dict(
pos = (0, -0.6),
scale = 0.05,
align = TextNode.ACenter,
fg = (0.9, 0.8, 0.4, 1),
shadow = (0, 0, 0, 1),
)
if self._info:
self.clear_one(self._info)
self._info = self.show(msg, **props)
return self._info
def natans(self, n):
"""Display information in the HUD."""
# Load img only once
if not self._natans_img:
img_props = dict(
pos = (-0.15, 0, 0.92),
scale = 0.06,
)
self._natans_img = self.show_image("models/imgs/natan.png", **img_props)
props = dict(
pos = (0, 0.9),
scale = 0.12,
align = TextNode.ACenter,
fg = (1, 1, 1, 1),
shadow = (0, 0, 0, 1),
)
if self._natans:
self.clear_one(self._natans)
self._natans = self.show(str(n), **props)
return self._natans
|
# coding=utf-8
# list: 属于序列类型
def test1():
# list创建
s = [1, 2, "hi", (2, 3)] # 或者 s = list([1, 2, "hi", (2, 3)])
t = [[i + j for j in range(3)] for i in range(2)]
# 序列类型通用操作符
print("hi" in s) # True
print("hi" not in s) # False
print(s + t) # [1, 2, 'hi', (2, 3), [0, 1, 2], [0, 1, 2]]
print(2 * s) # [1, 2, 'hi', (2, 3), 1, 2, 'hi', (2, 3)]
print(s[2]) # hi
print(s[1:3]) # 切片的详细信息请参考字符串 [2, 'hi']
# 序列类型通用函数和方法
print("---------------------------------------")
print(len(s)) # 4
print(min(t)) # 返回序列s的最小元素, s中元素需要可比较 [0, 1, 2]
print(max(t)) # 返回序列s的最大元素, s中元素需要可比较 [1, 2, 3]
print(s.index(2, 1, 3)) # 返回序列s从1开始到3位置中第一次出现元素2的位置,不存在产生ValueError异常
print(s.count(12)) # 返回序列s中出现12的总次数 0
# 列表类型操作函数和方法
print("---------------------------------------")
lt = [] # 定义空列表lt
lt += [1, 2, 3, 4, 5] # 向lt新增5个元素
lt[2] = 6 # 修改lt中第2个元素 [1, 2, 6, 4, 5]
lt.insert(2, 7) # 向lt中第2个位置增加一个元素 [1, 2, 7, 6, 4, 5]
del lt[1] # [1, 7, 6, 4, 5]
del lt[1:4] # [1, 5]
print(0 in lt) # 判断lt中是否包含数字0 False
lt.append(0) # 向lt新增数字0 [1, 5, 0]
print(lt.index(0)) # 返回数字0所在lt中的索引,不存在产生ValueError异常
print(len(lt)) # lt的长度 3
print(max(lt)) # lt中最大元素 5
for x in lt: # 遍历
print(x, end=", ")
print()
for i, x in enumerate(lt): # lt[i] = x
print(i, x)
lt.clear() # 清空lt []
print("---------------------------------------")
t = [i for i in range(5)] # [0, 1, 2, 3, 4]
t.pop(0) # 删除列表中的头元素 [1, 2, 3, 4]
t.append(3) # 在列表的尾部添加元素3 [1, 2, 3, 4, 3]
t.reverse() # 反转列表 [3, 4, 3, 2, 1]
t.remove(3) # 删除第一个3,不存在产生ValueError异常 [4, 3, 2, 1]
t.extend(["xyz", 'abc', 123]) # 追加一个列表 [4, 3, 2, 1, 'hi', 'k', 122]
# queue
def test2():
# queue: 使用list模拟队列
q = [] # 定义队列
q.append(4) # 入队 [4]
q.append(3) # 入队 [4, 3]
q.append(7) # 入队 [4, 3, 7]
q.pop(0) # 队首出队 [3, 7]
print(q[0]) # 获得队首元素 3
q.clear() # 清空队列
print(len(q)) # 获取队列中元素个数 0
print(len(q) == 0) # 判断队列是否为空 True
# PriorityQueue: 默认小顶堆,是基于heapq实现的
print("---------------------------------------")
from queue import PriorityQueue
heap = PriorityQueue()
heap.put((2, 'hi'))
heap.put((1, 'hello'))
heap.put((3, 'world'))
print(heap.queue) # 输出堆中所有元素
print(heap.queue[0]) # 输出堆顶元素, 不删除
print(heap.get()) # 输出堆顶元素, 并删除堆顶元素
print(heap.qsize())
# python3中不能向PriorityQueue添加参数变为大顶堆,需要自己封装一个,可以参考下述网址
# https://stackoverflow.com/questions/14189540/python-topn-max-heap-use-heapq-or-self-implement
# 当做算法题使用到大顶堆是,不建议使用python
import heapq
class MaxHeap(object):
def __init__(self, x):
self.heap = [-e for e in x]
heapq.heapify(self.heap)
def put(self, value):
heapq.heappush(self.heap, -value)
def get(self):
return -heapq.heappop(self.heap)
# stk
def test3():
# python是没有栈的,我们可以列表模拟一个栈
stk = []
stk.append(20) # 入队
stk.append(10)
stk.append(30)
stk.pop(len(stk) - 1) # 出队
stk.clear() # 清空队列
print(len(stk)) # 获取队列中元素个数 0
print(len(stk) == 0) # 判断队列是否为空 True
# deque
def test4():
from collections import deque
# 定义 deque
d = deque()
# 双端队列中插入、删除元素
d.append(20) # 队尾插入一个元素 deque([20])
d.append(10) # 队尾插入一个元素 deque([20, 10])
d.pop() # 弹出一个尾部数据 deque([20])
d.appendleft(40) # 队首插入一个元素 deque([40, 20])
d.appendleft(30) # 队首插入一个元素 deque([30, 40, 20])
d.popleft() # 弹出一个队首数据 deque([40, 20])
# 遍历
for x in d:
print(x, end=", ")
print()
# 返回队首和队尾元素
print(d[0]) # 队首元素 40
print(d[len(d) - 1]) # 队尾元素 20
# 清空 deque
d.clear()
print(len(d)) # 0
print(len(d) == 0) # True
# set
def test5():
"""
有序的集合:SortedSet
网址:http://www.grantjenks.com/docs/sortedcontainers/sortedset.html
"""
from sortedcontainers import SortedSet
# 创建 SortedSet
ss = SortedSet([3, 1, 2, 5, 4])
print(ss) # SortedSet([1, 2, 3, 4, 5])
from operator import neg
ss1 = SortedSet([3, 1, 2, 5, 4], neg)
print(ss1) # SortedSet([5, 4, 3, 2, 1], key=<built-in function neg>)
# SortedSet 转为 list/tuple/set
print(list(ss)) # SortedSet转为list [1, 2, 3, 4, 5]
print(tuple(ss)) # SortedSet转为tuple (1, 2, 3, 4, 5)
print(set(ss)) # SortedSet转为set {1, 2, 3, 4, 5}
# 插入、删除元素
ss.discard(-1) # 删除不存在的元素不报错
ss.remove(1) # 删除不存在的元素报错, KeyError
ss.discard(3) # SortedSet([1, 2, 4, 5])
ss.add(-10) # SortedSet([-10, 1, 2, 4, 5])
# 返回第一个和最后一个元素
print(ss[0]) # -10
print(ss[-1]) # 5
# 遍历 set
for e in ss:
print(e, end=", ") # -10, 2, 4, 5,
print()
# set 中判断某元素是否存在
print(2 in ss) # True
# bisect_left() / bisect_right()
print(ss.bisect_left(4)) # 返回大于等于4的最小元素对应的下标 2
print(ss.bisect_right(4)) # 返回大于4的最小元素对应的下标 3
# 清空 set
ss.clear()
print(len(ss)) # 0
print(len(ss) == 0) # True
"""
无序的集合: set
"""
# 集合的定义:集合是不可变的,因此集合中元素不能是list
A = {"hi", 2, ("we", 24)}
B = set() # 空集合的定义,不能使用B = {}定义集合,这样是字典的定义
# 集合间的操作, 下面的运算法符都可以写成 op= 的形式
print("---------------------------------------")
S = {1, 2, 3}
T = {3, 4, 5}
print(S & T) # 交集,返回一个新集合,包括同时在集合S和T中的元素
print(S | T) # 并集,返回一个新集合,包括在集合S和T中的所有元素
print(S - T) # 差集,返回一个新集合,包括在集合S但不在T中的元素
print(S ^ T) # 补集,返回一个新集合,包括集合S和T中的非相同元素
# 集合的包含关系
print("---------------------------------------")
C = {1, 2}
D = {1, 2}
print(C <= D) # C是否是D的子集 True
print(C < D) # C是否是D的真子集 False
print(C >= D) # D是否是C的子集 True
print(C > D) # D是否是C的真子集 False
# 集合的处理方法
print("---------------------------------------")
S = {1, 2, 3, 5, 6}
S.add(4) # 如果x不在集合S中,将x增加到S
S.discard(1) # 移除S中元素x,如果x不在集合S中,不报错
S.remove(2) # 移除S中元素x,如果x不在集合S中,产生KeyError异常
for e in S: # 遍历
print(e, end=",")
print()
print(S.pop()) # 从S中随机弹出一个元素,S长度减1,若S为空产生KeyError异常
print(S.copy()) # 返回集合S的一个副本, 对该副本的操作不会影响S
print(len(S)) # 返回集合S的元素个数
print(5 in S) # 判断S中元素x, x在集合S中,返回True,否则返回False
print(5 not in S) # 判断S中元素x, x在集合S中,返回True,否则返回False
S.clear() # 移除S中所有元素
# SortedList: 类似于C++中的multiset
def _test5():
"""
网址:http://www.grantjenks.com/docs/sortedcontainers/sortedlist.html
"""
from sortedcontainers import SortedList
# 定义
sl = SortedList(key=lambda x: -x) # 降序
sl = SortedList([3, 1, 2, 1, 5, 4]) # 升序
print(sl) # SortedList([1, 1, 2, 3, 4, 5])
# 插入、删除元素
sl.add(3)
sl.add(3)
sl.discard(2) # SortedList([1, 1, 3, 3, 3, 4, 5])
print(sl)
# 统计某个元素出现的次数
print(sl.count(3)) # 3
# 返回第一个和最后一个元素
print(sl[0]) # 1
print(sl[-1]) # 5
# 遍历 set
for e in sl:
print(e, end=", ") # 1, 1, 3, 3, 3, 4, 5,
print()
# 判断某元素是否存在
print(2 in sl) # False
# bisect_left() / bisect_right()
print(sl.bisect_left(3)) # 返回大于等于3的最小元素对应的下标 2
print(sl.bisect_right(3)) # 返回大于3的最小元素对应的下标 5
# 清空
sl.clear()
print(len(sl)) # 0
print(len(sl) == 0) # True
# map
def test6():
"""
有序的map: SortedDict
网址: http://www.grantjenks.com/docs/sortedcontainers/sorteddict.html
"""
from sortedcontainers import SortedDict
sd = SortedDict()
# 插入、删除元素
sd["wxx"] = 21
sd["hh"] = 18
sd["other"] = 20
print(sd) # SortedDict({'hh': 18, 'other': 20, 'wxx': 21})
print(sd["wxx"]) # 访问不存在的键会报错, KeyError
print(sd.get("c")) # 访问不存在的键会返回None None
# SortedDict转dict
print(dict(sd)) # {'hh': 18, 'other': 20, 'wxx': 21}
# 返回最后一个元素和最后一个元素
print(sd.peekitem(0)) # 类型tuple, 返回第一个元素 ('hh', 18)
print(sd.peekitem()) # 类型tuple, 返回最后一个元素 ('wxx', 21)
# 遍历
for k, v in sd.items():
print(k, ':', v, sep="", end=", ") # sep取消每行输出之间的空格
print()
for k in sd: # 遍历键k, 等价于for k in d.keys:
print(str(k) + ":" + str(sd[k]), end=", ")
print()
for v in sd.values(): # 遍历值v
print(v, end=", ")
print()
# 返回Map中的一个键
print(sd.peekitem()[0])
# 返回Map中的一个值
print(sd.peekitem()[1])
# 中判断某元素是否存在
print("wxx" in sd) # True
# bisect_left() / bisect_right()
sd["a"] = 1
sd["c1"] = 2
sd["c2"] = 4
print(sd) # SortedDict({'a': 1, 'c1': 2, 'c2': 4, 'hh': 18, 'other': 20, 'wxx': 21})
print(sd.bisect_left("c1")) # 返回键大于等于"c1"的最小元素对应的下标 1
print(sd.bisect_right("c1")) # 返回键大于"c1"的最小元素对应的下标 2
# 清空
sd.clear()
print(len(sd)) # 0
print(len(sd) == 0) # True
"""
无序的map: dict
"""
print("---------------------------------------")
d = {"c1": 2, "c2": 4, "hh": 18, "wxx": 21, 13: 14, 1: 0}
print(d["wxx"]) # 21
print(d[13]) # 14
d[13] += 1
print(d[13]) # 15
d["future"] = "wonderful" # 字典中添加键值对
del d[1] # 删除字典d中键1对应的数据值
print("wxx" in d) # 判断键"wxx"是否在字典d中,如果在返回True,否则False
print(d.keys()) # 返回字典d中所有的键信息 dict_keys(['c1', 'c2', 'hh', 'wxx', 13])
print(d.values()) # 返回字典d中所有的值信息 dict_values([2, 4, 18, 21, 14])
print(d.items()) # dict_items([('c1', 2), ('c2', 4), ('hh', 18), ('wxx', 21), (13, 14)])
for k, v in d.items(): # 遍历 k, v
print(k, ':', v)
for k in d: # 遍历键k, 等价于for k in d.keys:
print(str(k) + ":" + str(d[k]), end=", ")
print()
for v in d.values(): # 遍历值v
print(v, end=", ")
print()
# 字典类型操作函数和方法
print("---------------------------------------")
d = {"中国": "北京", "美国": "华盛顿", "法国": "巴黎"}
print(len(d)) # 返回字典d中元素的个数 3
print(d.get("中国", "不存在")) # 键k存在,则返回相应值,不在则返回<default>值 北京
print(d.get("中", "不存在")) # 不存在
print(d.get("中")) # None
d["美国"] = "Washington" # 修改键对应的值
print(d.pop("美国")) # 键k存在,则返回相应值,并将其从dict中删除
print(d.popitem()) # 随机从字典d中取出一个键值对,以元组形式返回,并将其从dict中删除
d.clear() # 删除所有的键值对
if __name__ == "__main__":
test6()
|
from keras.models import Sequential
from scipy.misc import imread
import matplotlib.pyplot as plt
import numpy as np
import keras
from keras.layers import Dense
import pandas as pd
from keras import backend as K
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input
import numpy as np
from keras.applications.resnet50 import decode_predictions
import math
from collections import Counter
import pickle
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from PIL import Image
from tqdm import tqdm
import cv2
def image_array(filename , size):
array = cv2.imread(filename)
array = cv2.resize(array, (size, size))
return array
def prediction(array):
#size = 224
#array = image_array(filename , size)
#print(array.shape)
array = np.array(array , dtype = np.float64)
array = np.reshape(array, (1,224,224,3))
a = preprocess_input(array)
model = ResNet50(weights='imagenet', include_top=False)
features = model.predict(a)
K.clear_session()
image = features.reshape(features.shape[0] , -1)
loaded_model = keras.models.load_model('breast_cancer.h5')
#print(test)
#a = test[0]
#a = np.reshape(a , (1,25088))
#print(image.shape)
y_predict = loaded_model.predict(image)
#print(y_predict.shape)
K.clear_session()
print('Benign Probability: ', y_predict[0][0])
print('Malignant Probability: ', y_predict[0][1])
if y_predict[0][0] > y_predict[0][1]:
return "Benign", image
else:
return "Malignant" , image
def features(array):
Z = np.reshape(array, (32, 64))
G = np.zeros((32,64,3))
G[Z>1] = [1,1,1]
G[Z<1] = [0,0,0]
plt.imshow(G,interpolation='nearest')
plt.show() |
import sys
import glob
import math
import re
from ROOT import *
from array import *
gROOT.SetBatch(True)
gStyle.SetOptStat(0)
gStyle.SetPalette(1)
gROOT.LoadMacro("../style/AtlasStyle.C")
gROOT.LoadMacro("../style/AtlasUtils.C")
SetAtlasStyle()
y_axis_label = "S/#sqrt(B)"
#############################
btagStrategy = "FourPlusTags"
btagStrategy = "ThreeTags"
btagStrategy = "Incl"
btagStrategy = "TwoTags"
Region = "Resolved_SR"
############################
def SignificanceFunction2(s, b, rel_unc):
n = s+b
sigma = n*rel_unc
print n, b, s, sigma
#print n*math.log((n*(b+(sigma**2)))/((b**2)+n*(sigma**2)))
# print(b**2)/(sigma**2)*math.log(1+((sigma**2)*(n-b))/(b*(b+(sigma**2))))
if s <= 0 or b <= 0:
print("ERROR signal or background events is Zero!")
return -1
else:
if b >= s:
try:
Z = math.sqrt(2*(n*math.log((n*(b+(sigma**2)))/((b**2)+n*(sigma**2)))-(
b**2)/(sigma**2)*math.log(1+(((sigma**2)*(n-b))/(b*(b+(sigma**2)))))))
except:
Z = 0
else:
Z = -1
print "this is at n<b"
return Z
def analyseEvent():
# for HistoName in ["MET","maxMVAResponse","pTH","METSig","nJets","Lepton_Pt","nBtagCategory","ntagsOutside","mH","DeltaPhi_HW","nBTags"]:
# HistoName="MET"
for HistoName in ["maxMVAResponse"]:
# for HistoName in ["DeltaPhi_HW"]:
c1 = TCanvas("c1", "", 1200, 900)
pad1 = TPad()
pad2 = TPad()
pad1.SetCanvas(c1)
pad2.SetCanvas(c1)
pad1.SetCanvasSize(1200, 900)
pad2.SetCanvasSize(1200, 900)
pad1.SetLeftMargin(0.15)
pad2.SetLeftMargin(0.15)
pad2.SetRightMargin(0.13)
pad1.SetRightMargin(0.13)
pad2.SetFillStyle(4000)
pad2.SetFrameFillStyle(0)
bin_breite = 0
file1 = TFile.Open(
"../PlotFiles/ForOptimisation/sig_Hplus_Wh_m800-0.root", "READ")
dir1 = file1.GetDirectory("Nominal").GetDirectory(HistoName)
h_signalHisto_800 = dir1.Get(
"sig_Hplus_Wh_m800-0_"+HistoName+"_"+Region+"_"+btagStrategy+"_Nominal")
file6 = TFile.Open(
"../PlotFiles/ForOptimisation/sig_Hplus_Wh_m400-0.root", "READ")
dir6 = file6.GetDirectory("Nominal").GetDirectory(HistoName)
h_signalHisto_400 = dir6.Get(
"sig_Hplus_Wh_m400-0_"+HistoName+"_"+Region+"_"+btagStrategy+"_Nominal")
file7 = TFile.Open(
"../PlotFiles/ForOptimisation/sig_Hplus_Wh_m1600-0.root", "READ")
dir7 = file7.GetDirectory("Nominal").GetDirectory(HistoName)
h_signalHisto_1600 = dir7.Get(
"sig_Hplus_Wh_m1600-0_"+HistoName+"_"+Region+"_"+btagStrategy+"_Nominal")
file5 = TFile.Open("../PlotFiles/ForOptimisation/ttbar.root", "READ")
dir5 = file5.GetDirectory("Nominal").GetDirectory(HistoName)
h_backgHisto_t = dir5.Get(
"ttbar_"+HistoName+"_"+Region+"_"+btagStrategy+"_Nominal")
file2 = TFile.Open("../PlotFiles/ForOptimisation/Wjets.root", "READ")
dir2 = file2.GetDirectory("Nominal").GetDirectory(HistoName)
h_backgHisto_W = dir2.Get(
"Wjets_"+HistoName+"_"+Region+"_"+btagStrategy+"_Nominal")
file3 = TFile.Open(
"../PlotFiles/ForOptimisation/singleTop.root", "READ")
dir3 = file3.GetDirectory("Nominal").GetDirectory(HistoName)
h_backgHisto_single = dir3.Get(
"singleTop_"+HistoName+"_"+Region+"_"+btagStrategy+"_Nominal")
file4 = TFile.Open("../PlotFiles/ForOptimisation/diboson.root", "READ")
dir4 = file4.GetDirectory("Nominal").GetDirectory(HistoName)
h_backgHisto_diboson = dir4.Get(
"diboson_"+HistoName+"_"+Region+"_"+btagStrategy+"_Nominal")
h_backgHisto = h_backgHisto_t+h_backgHisto_W + \
h_backgHisto_single+h_backgHisto_diboson
print h_signalHisto_400.Integral()/math.sqrt(h_backgHisto.Integral())
x = array('d')
y_800 = array('d')
y_400 = array('d')
y_1600 = array('d')
y_backg = array('d')
z = array('d')
z_800 = array('d')
z_400 = array('d')
z_400_5 = array('d')
z_400_25 = array('d')
z_400_10 = array('d')
z_1600 = array('d')
y = array('d')
z_5 = array('d')
z_25 = array('d')
z_10 = array('d')
# x_bin_size=h_signalHisto_400.GetXaxis().GetXmax()/h_signalHisto_400.GetNbinsX()
x_bin_size = h_signalHisto_400.GetBinWidth(2)
for i in range(1, h_signalHisto_400.GetNbinsX()+1):
h_signalHisto = h_signalHisto_400
bin_breite = h_signalHisto.GetBinWidth(2)
print (bin_breite-x_bin_size)
n_sig = GetNumberOfEvents(i, h_signalHisto)
n_backg = GetNumberOfEvents(i, h_backgHisto)
if n_backg == 0:
continue
x.append(h_signalHisto.GetBinCenter(i))
# n_sig_800=GetNumberOfEvents(i,h_signalHisto_800)
# n_sig_400=GetNumberOfEvents(i,h_signalHisto_1600)
# n_sig_1600=GetNumberOfEvents(i,h_signalHisto_1600)
# y_800.append((GetNumberOfEvents(i,h_signalHisto_1600)/math.sqrt(n_backg)))
# y_400.append((GetNumberOfEvents(i,h_signalHisto_1600)/math.sqrt(n_backg)))
# y_1600.append((GetNumberOfEvents(i,h_signalHisto_1600)/math.sqrt(n_backg)))
# z_400.append(SignificanceFunction2(n_sig_800,n_backg,0.05))
# z_400_5.append(SignificanceFunction2(n_sig_1600,n_backg,0.05))
# z_400_25.append(SignificanceFunction2(n_sig_1600,n_backg,0.025))
# z_400_10.append(SignificanceFunction2(n_sig_1600,n_backg,0.1))
y.append(((n_sig)/math.sqrt(n_backg)))
z_5.append(SignificanceFunction2(n_sig, n_backg, 0.05))
z_25.append(SignificanceFunction2(n_sig, n_backg, 0.025))
z_10.append(SignificanceFunction2(n_sig, n_backg, 0.1))
# z_800.append(SignificanceFunction2(n_sig_800,n_backg,0.05))
# z_1600.append(SignificanceFunction2(n_sig_1600,n_backg,0.05))
# z.append(SignificanceFunction2(n_sig,n_backg,0.05))
# x_single.append(n_sig/math.sqrt(GetNumberOfEvents(i,h_backgHisto_single)))
# x_di.append(n_sig/math.sqrt(GetNumberOfEvents(i,h_backgHisto_diboson)))
# x_W.append(n_sig/math.sqrt(GetNumberOfEvents(i,h_backgHisto_W)))
# z_single.append(SignificanceFunction2(n_sig,GetNumberOfEvents(i,h_backgHisto_single),0.05))
# z_W.append(SignificanceFunction2(n_sig,GetNumberOfEvents(i,h_backgHisto_W),0.05))
# z_di.append(SignificanceFunction2(n_sig,GetNumberOfEvents(i,h_backgHisto_diboson),0.05))
# graph_800_z=TGraph(len(x),x,z_800)
# graph_400_z=TGraph(len(x),x,z_400)
# graph_1600_z=TGraph(len(x),x,z_1600)
# graph_800_sq=TGraph(len(x),x,y_800)
graph_400_sq = TGraph(len(x), x, y)
# graph_1600_sq=TGraph(len(x),x,y_1600)
graph_400_25_z = TGraph(len(x), x, z_25)
graph_400_5_z = TGraph(len(x), x, z_5)
graph_400_10_z = TGraph(len(x), x, z_10)
# graph_W=TGraph(len(x),x,z_W)
# graph_single=TGraph(len(x),x,z_single)
# graph_di=TGraph(len(x),x,z_di)
# print "GetMaximun:"
# print h_signalHisto.GetXaxis().GetXmax()
# graph_z=TGraph(len(x),x,z_5)
# graph_sq=TGraph(len(x),x,y)
# c1.Divide(1,2,0,0)
# c1.cd(1)
# graph_1600_z.GetXaxis().SetTitle(HistoName)
# graph_1600_z.GetYaxis().SetTitle("Z")
# graph_1600_z.SetMarkerColor(kBlue)
# graph_800_z.SetMarkerColor(kRed)
# graph_400_z.SetMarkerColor(kGreen)
# graph_1600_sq.SetMarkerColor(kBlue)
# graph_800_sq.SetMarkerColor(kRed)
graph_400_sq.SetMarkerColor(kBlack)
graph_400_sq.GetXaxis().SetTitle("Lowerbound on:"+HistoName)
graph_400_sq.GetYaxis().SetTitle("s/#sqrt{b}")
graph_400_sq.GetYaxis().CenterTitle()
graph_400_25_z.SetMarkerStyle(4)
graph_400_25_z.GetYaxis().SetTitle("z")
graph_400_25_z.GetYaxis().CenterTitle()
graph_400_25_z.GetYaxis().SetLimits(-0.05, 1)
graph_400_5_z.SetMarkerStyle(25)
graph_400_10_z.SetMarkerStyle(26)
graph_400_sq.SetMarkerStyle(3)
graph_400_5_z.SetMarkerColor(kBlue)
graph_400_10_z.SetMarkerColor(kRed)
graph_400_25_z.SetMarkerColor(kGreen)
# graph_1600_z.Draw("AP")
# graph_400_z.Draw("P")
# graph_800_z.Draw("P")
# c1.cd(2)
pad1.Draw()
pad1.cd()
graph_400_sq.Draw("AP")
# graph_800_sq.Draw("P")
# graph_1600_sq.Draw("P")
pad2.Draw()
pad2.cd()
graph_400_25_z.Draw("APY+")
graph_400_10_z.Draw("P")
graph_400_5_z.Draw("P")
leg = TLegend(0.675, 0.7, 0.85, 0.95)
# leg=TLegend(0.175,0.7,0.35,0.95)
leg.SetHeader("m=400GeV", "C")
leg.SetTextAlign(12)
leg.AddEntry(graph_400_25_z, "z_{ 2.5%}", "P")
leg.AddEntry(graph_400_5_z, "z_{5%}", "P")
leg.AddEntry(graph_400_10_z, "z_{10%}", "P")
leg.AddEntry(graph_400_sq, "s/#sqrt{b}", "P")
leg.Draw()
print x
# graphZ.Draw("ACP")
c1.Update()
c1.SaveAs("../Plots/Sig_Plots/m400/Combined_m400_test_%s.pdf" %
(HistoName))
pad1.Close()
pad2.Close()
c1.Close()
max_sq = GetMaximumX(x, y)
max_5 = GetMaximumX(x, z_5)
max_25 = GetMaximumX(x, z_25)
max_10 = GetMaximumX(x, z_10)
f = open("../max_z/max_m400.txt", "a")
f.write(HistoName+";"+str(max_sq)+";"+str(max_25)+";" +
str(max_5)+";"+str(max_10)+";"+str(bin_breite)+"\n")
f.close()
def GetNumberOfEvents(lowerBound, histo):
# x_lowerbound=histo.FindBin(lowerBound)
x_lowerbound = lowerBound
x_upperbound = histo.GetNbinsX()
if x_lowerbound < 1:
return
n = histo.Integral(x_lowerbound, x_upperbound)
return n
def GetNumberOfEventsRevers(upperBound, histo):
x_lowerbound = 1
x_upperbound = upperBound
if x_lowerbound < 1:
return
n = histo.Integral(x_lowerbound, x_upperbound)
return n
def GetMaximumX(x, y):
maximum = 0
x_maximum = 0
for i in range(0, len(x)):
if y[i] > maximum:
maximum = y[i]
x_maximum = x[i]
return x_maximum
analyseEvent()
|
from rest_framework import serializers
from .models import Schedule, ScheduleHour
from doctor.serializers import DoctorSerializer
class ScheduleHourSerializer(serializers.ModelSerializer):
class Meta:
model = ScheduleHour
fields = (
'hour',
)
class ScheduleSerializer(serializers.ModelSerializer):
schedule = serializers.StringRelatedField(many=True)
doctor = DoctorSerializer()
class Meta:
model = Schedule
fields = (
'id',
'doctor',
'day',
'schedule',
)
depth = 2
|
from collections import defaultdict
import traceback
import zmq
import time
import signal
import threading
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from management.portfolio import Portfolio
from management.order_book import OrderBooks
from agents.trader import Trader
from utils.state import State
from utils.exchange_messages import split_topic, parse_book_str, parse_event_str
from utils.sliding_window import SlidingWindow
CHART_UPDATE_MS = 1000
HISTORY_LEN_SEC = -1
def signal_handler(signum, frame):
exit(1)
class DummyTrader(Trader):
def symbols(self):
pass
def init(self, state):
pass
def run(self, state):
pass
def run(psub_addr, portfolios, traders):
states = {}
order_book = OrderBooks()
pcontext = zmq.Context()
psocket = pcontext.socket(zmq.SUB)
psocket.connect(psub_addr)
psocket.subscribe("")
while True:
while True:
try:
message = psocket.recv(zmq.NOBLOCK)
# print(message, flush=True)
topic, body = message.split(b" ", 1)
msg_type, symbol = split_topic(topic)
if msg_type == "book":
books = parse_book_str(body)
order_book.process_exchange_books(symbol, books)
elif msg_type == "event":
events = parse_event_str(body)
for lst in events.values():
for e in lst:
user = e.get("user")
if user is not None and user not in traders:
traders[user] = DummyTrader(user=user)
portfolios[user] = Portfolio(user=user)
states[user] = State(portfolios[user], None, order_book)
for p in portfolios.values():
p.process_exchange_events(events)
else:
continue
for u, t in traders.items():
t.set_pnls(states[u])
except zmq.ZMQError:
time.sleep(0.01)
def updater_func(ax, portfolio, traders, history_len_sec):
user_to_pnl = defaultdict(lambda: SlidingWindow(history_len_sec))
def update_plot(i):
ts = time.time()
ax.clear()
for u, t in traders.items():
window = user_to_pnl[u]
window.append(t.realized_pnl + t.unrealized_pnl, ts)
ax.plot(window.indexes, window.values, label=u)
ax.set_title("P/L for all users")
ax.set_ylabel("P/L")
ax.set_xlabel("Timestamp")
ax.legend()
return update_plot
signal.signal(signal.SIGINT, signal_handler)
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
portfolios = {}
traders = {}
th = threading.Thread(
target=run,
args=(
"tcp://localhost:10001",
portfolios,
traders,
),
)
th.setDaemon(True)
th.start()
ani = animation.FuncAnimation(
fig,
updater_func(ax1, portfolios, traders, HISTORY_LEN_SEC),
interval=CHART_UPDATE_MS,
)
plt.show()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import models
# Create your models here.
class Album(models.Model):
artist=models.CharField(max_length=250)
album_title=models.CharField(max_length=250)
genre=models.CharField(max_length=250)
album_logo=models.FileField()
def __str__(self):
return self.album_title + '-' + self.artist
def get_absolute_url(self):
return reverse('music:music_detail',kwargs={'pk':self.pk})
|
from atila import Atila
import confutil
import skitai
import asyncore
import os
from rs4 import jwt as jwt_
import time
def test_cli (app, dbpath):
@app.route ("/hello")
@app.route ("/")
def index (was):
return "Hello, World"
@app.route ("/petse/<int:id>")
def pets_error (was):
return "Pets"
@app.route ("/pets/<int:id>", methods = ["GET", "POST"])
def pets (was, id = None):
return "Pets{}".format (id)
@app.route ("/pets2/<int:id>", methods = ["POST"])
def pets2 (was, id = None):
return "Pets{}".format (id)
@app.route ("/pets3/<int:id>")
def pets3 (was, id = None):
return "Pets{}".format (id)
@app.route ("/echo")
def echo (was, m):
return m
@app.route ("/json")
def json (was, m):
return was.response.api (data = m)
@app.route ("/pypi")
def pypi (was):
req = was.get ("@pypi/project/skitai/")
res = req.getwait ()
return was.response.api (data = res.text)
@app.route ("/pypi3")
def pypi3 (was):
req = was.getjson ("https://pypi.org/project/skitai/")
res = req.getwait ()
return was.response.api (data = res.text)
@app.route ("/pypi2")
def pypi2 (was):
req = was.get ("https://pypi.org/project/skitai/")
res = req.getwait ()
return was.response.api (data = res.text)
@app.route ("/db")
def db (was):
stub = was.backend ("@sqlite")
req = stub.execute ('SELECT * FROM stocks WHERE symbol=?', ('RHAT',))
res = req.getwait ()
return was.response.api (data = res.data)
@app.route ('/jwt')
@app.authorization_required ("bearer")
def jwt (was):
return was.response.api (was.request.JWT)
@app.route ("/db2")
def db2 (was):
stub = was.backend ("@sqlite")
req = stub.select ("stocks").filter (symbol = 'RHAT').execute ()
res = req.getwait ()
return was.response.api (data = res.data)
@app.maintain
def increase (was, now, count):
if "total-user" in app.store:
app.store.set ("total-user", app.store.get ("total-user") + 100)
@app.route ("/getval")
def getval (was):
ret = str (app.store.get ("total-user"))
return ret
app.alias ("@pypi", skitai.PROTO_HTTPS, "pypi.org")
app.alias ("@sqlite", skitai.DB_SQLITE3, dbpath)
app.alias ("@postgres", skitai.DB_POSTGRESQL, "postgres:password@192.168.0.80/coin_core")
with app.test_client ("/", confutil.getroot ()) as cli:
resp = cli.get ("/")
assert resp.text == "Hello, World"
resp = cli.get ("/hello")
assert resp.text == "Hello, World"
resp = cli.get ("/petse/1")
assert resp.status_code == 502
resp = cli.get ("/pets/1")
assert resp.status_code == 200
assert resp.text == "Pets1"
resp = cli.post ("/pets", {"a": 1})
assert resp.status_code == 200
assert resp.text == "PetsNone"
resp = cli.get ("/pets")
assert resp.status_code == 200
resp = cli.get ("/pets2/1")
assert resp.status_code == 405
resp = cli.post ("/pets2/1", {"id": 1})
assert resp.status_code == 200
resp = cli.post ("/pets2", {"a": 1})
assert resp.status_code == 200
assert resp.text == "PetsNone"
resp = cli.get ("/pets2")
assert resp.status_code == 405
resp = cli.get ("/pets3")
assert resp.text == "PetsNone"
resp = cli.get ("/pets3/1")
print (resp)
assert resp.text == "Pets1"
resp = cli.get ("/echo?m=GET")
assert resp.text == "GET"
resp = cli.post ("/json", {"m": "POST"})
assert '"data": "POST"' in resp.text
resp = cli.post ("/json", {"m": "POST"})
assert '"data": "POST"' in resp.text
resp = cli.get ("/db2")
assert resp.data ["data"][0][3] == 'RHAT'
resp = cli.get ("/db")
assert resp.data ["data"][0][3] == 'RHAT'
resp = cli.get ("/pypi3")
assert resp.status_code == 502
resp = cli.get ("/pypi2")
assert "skitai" in resp.text
resp = cli.get ("/pypi")
assert "skitai" in resp.text
app.securekey = "securekey"
resp = cli.get ("/jwt", headers = {"Authorization": "Bearer {}".format (jwt_.gen_token (app.salt, {"exp": 3000000000, "username": "hansroh"}))})
assert resp.data == {'exp': 3000000000, 'username': 'hansroh'}
resp = cli.get ("/jwt", headers = {"Authorization": "Bearer {}".format (jwt_.gen_token (app.salt, {"exp": 1, "username": "hansroh"}))})
assert resp.code == 401
assert resp.get_header ("WWW-Authenticate") == 'Bearer realm="App", error="token expired"'
app.securekey = None
app.config.maintain_interval = 1
app.store.set ("total-user", 100)
time.sleep (2)
resp = cli.get ("/getval")
assert int (resp.text) >= 200
|
import random
def gamewin(a,b):
if b==a:
print("yor are tied")
print("play again")
elif b=="s":
if a=="g":
print("you win")
else:
print("comp has win")
elif b=="g":
if a=="s":
print("comp has win")
else:
print("you win")
elif b=="w":
if a=="s":
print("you has win")
else:
print("comp has win")
print("comp turn:snake(s),gun(g)or Water(w)")
b= ""
randnum=random.randint(1,3)
if randnum==1:
b=="s"
b+="s"
elif randnum==2:
b=="g"
b+="s"
elif randnum==3:
b=="w"
b+="w"
a=input("you turn:snake(s),gun(g)or Water(w)")
gamewin(a,b)
print(f"you chose {a}")
print(f"comp chose {b}") |
# Copyright 2013-2023 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import re
from .exceptions import SyntaxException
from . import commands
from . import constants
from . import operation
from . import util
try:
from ply import lex, yacc
except Exception:
pass
HealthVars = {}
class HealthLexer:
SNAPSHOT_KEY_PATTERN = r"SNAPSHOT(\d+)$"
assert_levels = {
"CRITICAL": constants.AssertLevel.CRITICAL,
"WARNING": constants.AssertLevel.WARNING,
"INFO": constants.AssertLevel.INFO,
}
components = {
"ALL": "ALL",
"ASD_PROCESS": "ASD_PROCESS",
"AVG-CPU": "AVG-CPU",
"BIN": "BIN",
"BUFFERS/CACHE": "BUFFERS/CACHE",
"CONFIG": "CONFIG",
"CPU_UTILIZATION": "CPU_UTILIZATION",
"DEVICE_INTERRUPTS": "DEVICE_INTERRUPTS",
"DEVICE_STAT": "DEVICE_STAT",
"DF": "DF",
"DMESG": "DMESG",
"ENDPOINTS": "ENDPOINTS",
"ENVIRONMENT": "ENVIRONMENT",
"FREE": "FREE",
"HDPARM": "HDPARM",
"HEALTH": "HEALTH",
"INTERRUPTS": "INTERRUPTS",
"IOSTAT": "IOSTAT",
"IPTABLES": "IPTABLES",
"LIMITS": "LIMITS",
"LSB": "LSB",
"LSCPU": "LSCPU",
"MEM": "MEM",
"MEMINFO": "MEMINFO",
"METADATA": "METADATA",
"NETWORK": "NETWORK",
"ORIGINAL_CONFIG": "ORIGINAL_CONFIG",
"RAM": "RAM",
"ROLES": "ROLES",
"ROSTER": "ROSTER",
"SYSTEM": "SYSTEM",
"SECURITY": "SECURITY",
"SERVICE": "SERVICE",
"SERVICES": "SERVICES",
"SCHEDULER": "SCHEDULER",
"STATISTICS": "STATISTICS",
"SWAP": "SWAP",
"SYSCTLALL": "SYSCTLALL",
"TASKS": "TASKS",
"TOP": "TOP",
"UDF": "UDF",
"UPTIME": "UPTIME",
"USERS": "USERS",
"XDR": "XDR",
"XDR_PROCESS": "XDR_PROCESS",
}
group_ids = {
"BUCKET_END": "BUCKET_END",
"BUCKET_START": "BUCKET_START",
"CLUSTER": "CLUSTER",
"DEVICE": "DEVICE",
"FILENAME": "FILENAME",
"FILE_SYSTEM": "FILE_SYSTEM",
"INTERRUPT_DEVICE": "INTERRUPT_DEVICE",
"INTERRUPT_ID": "INTERRUPT_ID",
"INTERRUPT_TYPE": "INTERRUPT_TYPE",
"KEY": "KEY",
"NODE": "NODE",
"OUTLIER": "OUTLIER",
"SNAPSHOT": "SNAPSHOT",
}
component_and_group_id = {
"DC": "DC",
"HISTOGRAM": "HISTOGRAM",
"NAMESPACE": "NAMESPACE",
"RACKS": "RACKS",
"SET": "SET",
"SINDEX": "SINDEX",
}
agg_ops = {
"AND": "AND",
"AVG": "AVG",
"COUNT": "COUNT",
"COUNT_ALL": "COUNT_ALL",
"EQUAL": "EQUAL",
"MAX": "MAX",
"MIN": "MIN",
"OR": "OR",
"FIRST": "FIRST",
"SUM": "SUM",
"VALUE_UNIFORM": "VALUE_UNIFORM",
}
complex_ops = {"DIFF": "DIFF", "SD_ANOMALY": "SD_ANOMALY", "NO_MATCH": "NO_MATCH"}
apply_ops = {"APPLY_TO_ANY": "APPLY_TO_ANY", "APPLY_TO_ALL": "APPLY_TO_ALL"}
simple_ops = {"SPLIT": "SPLIT", "UNIQUE": "UNIQUE"}
complex_params = {
"MAJORITY": constants.MAJORITY,
}
assert_ops = {"ASSERT": "ASSERT"}
bool_vals = {"true": True, "false": False}
reserved = {
"as": "AS",
"by": "BY",
"common": "COMMON",
"do": "DO",
"from": "FROM",
"group": "GROUP",
"ignore": "IGNORE",
"like": "LIKE",
"on": "ON",
"save": "SAVE",
"select": "SELECT",
}
tokens = [
"NUMBER",
"FLOAT",
"BOOL_VAL",
"VAR",
"NEW_VAR",
"COMPONENT",
"GROUP_ID",
"COMPONENT_AND_GROUP_ID",
"AGG_OP",
"COMPLEX_OP",
"APPLY_OP",
"SIMPLE_OP",
"COMPLEX_PARAM",
"ASSERT_OP",
"ASSERT_LEVEL",
"STRING",
"COMMA",
"DOT",
"IN",
"PLUS",
"MINUS",
"TIMES",
"DIVIDE",
"BINARY_AND",
"BINARY_OR",
"LPAREN",
"RPAREN",
"GT",
"GE",
"LT",
"LE",
"EQ",
"NE",
"ASSIGN",
"PCT",
] + list(reserved.values())
def t_FLOAT(self, t):
r"\d+(\.(\d+)?([eE][-+]?\d+)?|[eE][-+]?\d+)"
t.value = float(t.value)
return t
def t_NUMBER(self, t):
r"\d+"
t.value = int(t.value)
return t
def t_VAR(self, t):
r"[a-zA-Z_][a-zA-Z_0-9]*"
# Check for reserved words
t.type = HealthLexer.reserved.get(t.value.lower(), "NEW_VAR")
if not t.type == "NEW_VAR":
return t
elif t.value.lower() in HealthLexer.bool_vals.keys():
t.type = "BOOL_VAL"
t.value = HealthLexer.bool_vals.get(t.value.lower())
elif re.match(HealthLexer.SNAPSHOT_KEY_PATTERN, t.value):
t.value = util.create_snapshot_key(
int(re.search(HealthLexer.SNAPSHOT_KEY_PATTERN, t.value).group(1))
)
t.type = "COMPONENT"
elif t.value in HealthLexer.components.keys():
t.type = "COMPONENT"
elif t.value in HealthLexer.group_ids.keys():
t.type = "GROUP_ID"
elif t.value in HealthLexer.component_and_group_id:
t.type = "COMPONENT_AND_GROUP_ID"
elif t.value in HealthLexer.agg_ops.keys():
t.type = "AGG_OP"
elif t.value in HealthLexer.complex_ops.keys():
t.type = "COMPLEX_OP"
elif t.value in HealthLexer.apply_ops.keys():
t.type = "APPLY_OP"
elif t.value in HealthLexer.simple_ops.keys():
t.type = "SIMPLE_OP"
elif t.value == "IN":
t.type = "IN"
elif t.value in HealthLexer.complex_params.keys():
t.value = HealthLexer.complex_params[t.value]
t.type = "COMPLEX_PARAM"
elif t.value in HealthLexer.assert_ops.keys():
t.type = "ASSERT_OP"
elif t.value in HealthLexer.assert_levels.keys():
t.value = HealthLexer.assert_levels[t.value]
t.type = "ASSERT_LEVEL"
elif t.value in HealthVars:
t.type = "VAR"
t.value = (
constants.HEALTH_PARSER_VAR,
t.value,
copy.deepcopy(HealthVars[t.value]),
)
return t
def t_STRING(self, t):
r"\".*?\" "
if len(t.value) < 3:
t.value = None
else:
t.value = t.value[1 : len(t.value) - 1]
return t
# Define a rule so we can track line numbers
def t_newline(self, t):
r"\n+"
t.lexer.lineno += len(t.value)
t_ignore = " \t"
# Regular expression rules for simple tokens
t_COMMA = r"\,"
t_DOT = r"\."
t_PLUS = r"\+"
t_MINUS = r"-"
t_PCT = r"%%"
t_TIMES = r"\*"
t_DIVIDE = r"/"
t_BINARY_OR = r"\|\|"
t_BINARY_AND = r"&&"
t_LPAREN = r"\("
t_RPAREN = r"\)"
t_GT = r">"
t_GE = r">="
t_LT = r"<"
t_LE = r"<="
t_EQ = r"=="
t_NE = r"!="
t_ASSIGN = r"="
def t_error(self, t):
raise TypeError("Unknown text '%s'" % (t.value,))
def build(self, **kwargs):
self.lexer = lex.lex(module=self, **kwargs)
return self.lexer
class HealthParser:
tokens = HealthLexer.tokens
health_input_data = {}
precedence = (
("left", "ASSIGN"),
("left", "BINARY_OR"),
("left", "BINARY_AND"),
("left", "EQ", "NE", "LT", "GT", "LE", "GE"),
("left", "PLUS", "MINUS"),
("left", "TIMES", "DIVIDE"),
("left", "PCT"),
)
def p_statement(self, p):
"""
statement : VAR opt_assign_statement
| NEW_VAR assign_statement
| assert_statement
"""
if len(p) > 2 and p[2] is not None:
if isinstance(p[2], Exception):
val = None
elif util.is_health_parser_variable(p[2]):
val = p[2][2]
else:
val = p[2]
if util.is_health_parser_variable(p[1]):
HealthVars[p[1][1]] = val
else:
HealthVars[p[1]] = val
p[0] = val
if isinstance(p[2], Exception):
raise p[2]
else:
p[0] = p[1]
def p_binary_operation(self, p):
"""
binary_operation : operand op operand opt_on_clause
"""
p[0] = (p[2], p[1], p[3], None, None, p[4])
def p_opt_on_clause(self, p):
"""
opt_on_clause : ON COMMON
|
"""
if len(p) == 1:
p[0] = False
else:
p[0] = True
def p_agg_operation(self, p):
"""
agg_operation : AGG_OP LPAREN operand RPAREN
"""
p[0] = (p[1], p[3], None, None, None, False)
def p_complex_operation(self, p):
"""
complex_operation : COMPLEX_OP LPAREN operand COMMA comparison_op COMMA complex_comparison_operand RPAREN
"""
p[0] = (p[1], p[3], None, p[5], p[7], False)
def p_apply_operation(self, p):
"""
apply_operation : APPLY_OP LPAREN operand COMMA apply_comparison_op COMMA operand RPAREN
"""
p[0] = (p[1], p[3], p[7], p[5], None, False)
def p_simple_operation(self, p):
"""
simple_operation : SIMPLE_OP LPAREN operand opt_simple_operation_param RPAREN
"""
p[0] = (p[1], p[3], p[4], None, None, False)
def p_opt_simple_operation_param(self, p):
"""
opt_simple_operation_param : COMMA constant
|
"""
if len(p) == 1:
p[0] = None
else:
p[0] = util.create_health_internal_tuple(p[2], [])
def p_apply_comparison_op(self, p):
"""
apply_comparison_op : IN
| comparison_op
"""
p[0] = p[1]
def p_complex_comparison_operand(self, p):
"""
complex_comparison_operand : COMPLEX_PARAM
| operand
"""
if util.is_health_parser_variable(p[1]):
p[0] = p[1][2]
elif not isinstance(p[1], tuple):
p[0] = util.create_health_internal_tuple(p[1], [])
else:
p[0] = p[1]
def p_operand(self, p):
"""
operand : VAR
| constant
"""
if util.is_health_parser_variable(p[1]):
p[0] = p[1][2]
else:
p[0] = util.create_health_internal_tuple(p[1], [])
def p_value(self, p):
"""
value : NUMBER
| FLOAT
"""
p[0] = p[1]
def p_number(self, p):
"""
number : value
| PLUS value
| MINUS value
"""
if len(p) == 2:
p[0] = p[1]
elif p[1] == "-":
p[0] = p[2] * -1
else:
p[0] = p[2]
def p_op(self, p):
"""
op : PLUS
| MINUS
| TIMES
| DIVIDE
| PCT
| comparison_op
| BINARY_AND
| BINARY_OR
| IN
"""
p[0] = p[1]
def p_comparison_op(self, p):
"""
comparison_op : EQ
| NE
| LT
| GT
| LE
| GE
"""
p[0] = p[1]
def p_group_by_clause(self, p):
"""
group_by_clause : GROUP BY group_by_ids
"""
p[0] = p[3]
def p_group_by_ids(self, p):
"""
group_by_ids : group_by_ids COMMA group_by_id
| group_by_id
"""
if len(p) > 2:
p[1].append(p[3])
p[0] = p[1]
else:
p[0] = [p[1]]
def p_group_by_id(self, p):
"""
group_by_id : GROUP_ID
| COMPONENT_AND_GROUP_ID
"""
p[0] = p[1]
def p_opt_group_by_clause(self, p):
"""
opt_group_by_clause : group_by_clause
|
"""
if len(p) == 1:
p[0] = None
else:
p[0] = p[1]
def p_group_by_statement(self, p):
"""
group_by_statement : group_by_clause VAR
"""
try:
p[0] = operation.do_multiple_group_by(p[2][2], p[1])
except Exception as e:
p[0] = e
def p_opt_assign_statement(self, p):
"""
opt_assign_statement : assign_statement
|
"""
if len(p) > 1:
p[0] = p[1]
else:
p[0] = None
def p_assign_statement(self, p):
"""
assign_statement : ASSIGN cmd_statement
"""
p[0] = p[2]
def p_cmd_statement(self, p):
"""
cmd_statement : select_statement
| op_statement
| group_by_statement
"""
p[0] = p[1]
def p_op_statement(self, p):
"""
op_statement : opt_group_by_clause DO binary_operation opt_save_clause
| opt_group_by_clause DO agg_operation opt_save_clause
| opt_group_by_clause DO complex_operation opt_save_clause
| opt_group_by_clause DO apply_operation opt_save_clause
| opt_group_by_clause DO simple_operation opt_save_clause
"""
try:
p[0] = commands.do_operation(
op=p[3][0],
arg1=p[3][1],
arg2=p[3][2],
group_by=p[1],
result_comp_op=p[3][3],
result_comp_val=p[3][4],
on_common_only=p[3][5],
save_param=p[4],
)
except Exception as e:
p[0] = e
def p_opt_save_clause(self, p):
"""
opt_save_clause : SAVE opt_as_clause
|
"""
if len(p) == 3:
if p[2] is None:
# No keyname entered so use same as value keyname
p[0] = ""
else:
# Keyname entered
p[0] = p[2]
else:
# No data saving
p[0] = None
def p_assert_statement(self, p):
"""
assert_statement : ASSERT_OP LPAREN assert_arg COMMA assert_comparison_arg COMMA error_string COMMA assert_category COMMA ASSERT_LEVEL COMMA assert_desc_string COMMA assert_success_msg COMMA assert_if_condition RPAREN
| ASSERT_OP LPAREN assert_arg COMMA assert_comparison_arg COMMA error_string COMMA assert_category COMMA ASSERT_LEVEL COMMA assert_desc_string COMMA assert_success_msg RPAREN
| ASSERT_OP LPAREN assert_arg COMMA assert_comparison_arg COMMA error_string COMMA assert_category COMMA ASSERT_LEVEL COMMA assert_desc_string RPAREN
| ASSERT_OP LPAREN assert_arg COMMA assert_comparison_arg COMMA error_string COMMA assert_category COMMA ASSERT_LEVEL RPAREN
"""
if len(p) < 14:
p[0] = commands.do_assert(
op=p[1],
data=p[3],
check_val=p[5],
error=p[7],
category=p[9],
level=p[11],
)
elif len(p) < 16:
p[0] = commands.do_assert(
op=p[1],
data=p[3],
check_val=p[5],
error=p[7],
category=p[9],
level=p[11],
description=p[13],
)
elif len(p) < 18:
p[0] = commands.do_assert(
op=p[1],
data=p[3],
check_val=p[5],
error=p[7],
category=p[9],
level=p[11],
description=p[13],
success_msg=p[15],
)
else:
skip_assert, assert_filter_arg = p[17]
if skip_assert:
p[0] = None
else:
if assert_filter_arg is not None:
data = commands.do_operation(op="==", arg1=p[3], arg2=p[5])
try:
# If key filtration throws exception (due to non-matching), it just passes that and executes main assert
new_data = commands.do_operation(
op="||",
arg1=data,
arg2=assert_filter_arg,
on_common_only=True,
)
if new_data:
data = new_data
except Exception:
pass
p[0] = commands.do_assert(
op=p[1],
data=data,
check_val=util.create_health_internal_tuple(True, []),
error=p[7],
category=p[9],
level=p[11],
description=p[13],
success_msg=p[15],
)
else:
p[0] = commands.do_assert(
op=p[1],
data=p[3],
check_val=p[5],
error=p[7],
category=p[9],
level=p[11],
description=p[13],
success_msg=p[15],
)
def p_assert_if_condition(self, p):
"""
assert_if_condition : assert_arg opt_assert_if_arg2
"""
skip_assert, assert_filter_arg = commands.do_assert_if_check(
p[2][0], p[1], p[2][1]
)
p[0] = (skip_assert, assert_filter_arg)
def p_opt_assert_if_arg2(self, p):
"""
opt_assert_if_arg2 : comparison_op assert_arg
|
"""
if len(p) > 1:
p[0] = (p[1], p[2])
else:
p[0] = (None, None)
def p_assert_arg(self, p):
"""
assert_arg : operand
"""
p[0] = p[1]
def p_assert_comparison_arg(self, p):
"""
assert_comparison_arg : constant
"""
p[0] = util.create_health_internal_tuple(p[1], [])
def p_constant(self, p):
"""
constant : number
| STRING
| BOOL_VAL
"""
p[0] = util.h_eval(p[1])
def p_assert_category(self, p):
"""
assert_category : STRING
"""
p[0] = p[1]
def p_assert_desc_string(self, p):
"""
assert_desc_string : NUMBER
| STRING
"""
p[0] = p[1]
def p_assert_success_msg(self, p):
"""
assert_success_msg : NUMBER
| STRING
"""
p[0] = p[1]
def p_error_string(self, p):
"""
error_string : NUMBER
| STRING
"""
p[0] = p[1]
def p_select_statement(self, p):
"""
select_statement : SELECT select_keys opt_from_clause opt_ignore_clause opt_save_clause
| operand
"""
if len(p) > 2:
try:
p[0] = commands.select_keys(
data=self.health_input_data,
select_keys=p[2],
select_from_keys=p[3],
ignore_keys=p[4],
save_param=p[5],
)
except Exception as e:
p[0] = e
else:
p[0] = p[1]
def p_opt_from_clause(self, p):
"""
opt_from_clause : FROM opt_snapshot_var select_from_keys
|
"""
if len(p) == 1:
p[0] = None
else:
p[0] = p[2] + p[3]
def p_opt_snapshot_var(self, p):
"""
opt_snapshot_var : VAR opt_dot
|
"""
if len(p) == 1:
p[0] = []
elif re.match(HealthLexer.SNAPSHOT_KEY_PATTERN, p[1][1]):
p[0] = [p[1][1]]
else:
raise SyntaxException("Wrong snapshot component " + p[1][1])
def p_opt_dot(self, p):
"""
opt_dot : DOT
|
"""
if len(p) == 1:
p[0] = None
else:
p[0] = p[1]
def p_select_from_keys(self, p):
"""
select_from_keys : select_from_keys DOT select_from_key
| select_from_key
"""
if len(p) > 2:
p[1].append(p[3])
p[0] = p[1]
else:
p[0] = [p[1]]
def p_select_from_key(self, p):
"""
select_from_key : COMPONENT
| COMPONENT_AND_GROUP_ID
"""
p[0] = p[1]
def p_opt_as_clause(self, p):
"""
opt_as_clause : AS STRING
|
"""
if len(p) == 1:
p[0] = None
else:
p[0] = p[2]
def p_select_keys(self, p):
"""
select_keys : select_keys COMMA select_key
| select_key
"""
if len(p) > 2:
p[1].append(p[3])
p[0] = p[1]
else:
p[0] = [p[1]]
def p_select_key(self, p):
"""
select_key : LIKE LPAREN key RPAREN opt_as_clause
| key opt_as_clause
"""
if len(p) > 3:
pattern = p[3]
if not pattern.startswith("^"):
pattern = "^" + str(pattern)
if not pattern.endswith("$"):
pattern += "$"
p[0] = (True, pattern, p[5])
else:
p[0] = (False, p[1], p[2])
def p_opt_ignore_clause(self, p):
"""
opt_ignore_clause : IGNORE ignore_keys
|
"""
if len(p) == 1:
p[0] = []
else:
p[0] = p[2]
def p_ignore_keys(self, p):
"""
ignore_keys : ignore_keys COMMA ignore_key
| ignore_key
"""
if len(p) > 2:
p[1].append(p[3])
p[0] = p[1]
else:
p[0] = [p[1]]
def p_ignore_key(self, p):
"""
ignore_key : LIKE LPAREN key RPAREN
| key
"""
if len(p) > 2:
pattern = p[3]
if not pattern.startswith("^"):
pattern = "^" + str(pattern)
if not pattern.endswith("$"):
pattern += "$"
p[0] = (True, pattern)
else:
p[0] = (False, p[1])
def p_key(self, p):
"""
key : STRING
| TIMES
"""
p[0] = p[1]
def p_error(self, p):
if p:
raise SyntaxException(
"Syntax error at position %d : %s" % ((p.lexpos), str(p))
)
else:
raise SyntaxException("Syntax error : Insufficient tokens")
def build(self, **kwargs):
self.parser = yacc.yacc(
module=self,
debug=False,
write_tables=False,
errorlog=yacc.NullLogger(),
**kwargs
)
self.lexer = HealthLexer().build()
return self.parser
def set_health_data(self, health_input_data):
self.health_input_data = health_input_data
def clear_health_cache(self):
global HealthVars
HealthVars = {}
def parse(self, text):
return self.parser.parse(text, lexer=self.lexer)
|
import logging # pylint: disable=C0302
from unittest.mock import patch
from sqlalchemy import desc
from web3.datastructures import AttributeDict
from src.models.indexing.cid_data import CIDData
from src.tasks.backfill_cid_data import backfill_cid_data
from src.utils import redis_connection
from src.utils.db_session import get_db
logger = logging.getLogger(__name__)
csv = """cid_1\tuser\t"{""user_id"": 1}"
cid_2\tuser\t"{""user_id"": 2}"
cid_3\ttrack\t"{""name"": ""ray""}"
"""
@patch(
"src.tasks.backfill_cid_data.requests.get",
return_value=AttributeDict({"iter_content": lambda _: [bytes(csv, "utf-8")]}),
)
def test_backfill_cid_data(request_get, app, mocker):
"""Happy path test: test that we get all valid listens from prior year"""
# setup
with app.app_context():
db = get_db()
mocker.patch(
"os.getenv",
return_value="stage",
)
backfill_cid_data(db)
with db.scoped_session() as session:
users = (
session.query(CIDData)
.filter(CIDData.type == "user")
.order_by(desc(CIDData.cid))
.all()
)
assert len(users) == 2
assert users[0].data == {"user_id": 2}
assert users[1].data == {"user_id": 1}
assert (
redis_connection.get_redis().get("backfilled_cid_data").decode() == "true"
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : is_a_cat.py
# Author: PengLei
# Date : 2018/10/18
# 利用神经网络判断一张图片是否是一只猫
import numpy as np
import matplotlib.pyplot as plt
import h5py
from lr_utils import load_dataset
train_set_x_orig , train_set_y , test_set_x_orig , test_set_y , classes = load_dataset()
index = 22
plt.imshow(test_set_x_orig[index])
m_train = train_set_y.shape[1] #训练集里图片的数量。
m_test = test_set_y.shape[1] #测试集里图片的数量。
num_px = train_set_x_orig.shape[1] #训练、测试集里面的图片的宽度和高度(均为64x64)。
#现在看一看我们加载的东西的具体情况
print ("训练集的数量: m_train = " + str(m_train))
print ("测试集的数量 : m_test = " + str(m_test))
print ("每张图片的宽/高 : num_px = " + str(num_px))
print ("每张图片的大小 : (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("训练集_图片的维数 : " + str(train_set_x_orig.shape))
print ("训练集_标签的维数 : " + str(train_set_y.shape))
print ("测试集_图片的维数: " + str(test_set_x_orig.shape))
print ("测试集_标签的维数: " + str(test_set_y.shape))
#将训练集的维度降低并转置。
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
#将测试集的维度降低并转置。
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
print ("训练集降维最后的维度: " + str(train_set_x_flatten.shape))
print ("训练集_标签的维数 : " + str(train_set_y.shape))
print ("测试集降维之后的维度: " + str(test_set_x_flatten.shape))
print ("测试集_标签的维数 : " + str(test_set_y.shape))
train_set_x = train_set_x_flatten / 255
test_set_x = test_set_x_flatten / 255
def sigmoid(z):
"""
参数:
z - 任何大小的标量或numpy数组。
返回:
s - sigmoid(z)
"""
s = 1 / (1 + np.exp(-z))
return s
def initialize_with_zeros(dim):
"""
此函数为w创建一个维度为(dim,1)的0向量,并将b初始化为0。
参数:
dim - 我们想要的w矢量的大小(或者这种情况下的参数数量)
返回:
w - 维度为(dim,1)的初始化向量。
b - 初始化的标量(对应于偏差)
"""
w = np.zeros(shape=(dim,1))
b = 0
assert(w.shape == (dim,1))
assert(isinstance(b, float) or isinstance(b, int))
return (w, b)
def propagate(w, b, X, Y):
"""
实现前向和后向传播的成本函数及其梯度。
参数:
w - 权重,大小不等的数组(num_px * num_px * 3,1)
b - 偏差,一个标量
X - 矩阵类型为(num_px * num_px * 3,训练数量)
Y - 真正的“标签”矢量(如果非猫则为0,如果是猫则为1),矩阵维度为(1,训练数据数量)
返回:
cost- 逻辑回归的负对数似然成本
dw - 相对于w的损失梯度,因此与w相同的形状
db - 相对于b的损失梯度,因此与b的形状相同
"""
# m:训练图片数量
m = X.shape[1]
# 正向传播
A = sigmoid(np.dot(w.T,X) + b) #计算激活值
cost = (-1 / m) * np.sum(Y * np.log(A) + (1-Y) * np.log(1-A))#计算损失
# 反向传播
dw = (1 / m) * np.dot(X, (A-Y).T)
db = (1 / m) * np.sum((A-Y))
# 使用断言确保数据正确
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)#压缩为一个数
assert(cost.shape == ())
grads = {
"dw":dw,
"db":db
}
return (grads, cost)
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
此函数通过运行梯度下降算法来优化w和b
参数:
w - 权重,大小不等的数组(num_px * num_px * 3,1)
b - 偏差,一个标量
X - 维度为(num_px * num_px * 3,训练数据的数量)的数组。
Y - 真正的“标签”矢量(如果非猫则为0,如果是猫则为1),矩阵维度为(1,训练数据的数量)
num_iterations - 优化循环的迭代次数
learning_rate - 梯度下降更新规则的学习率
print_cost - 每100步打印一次损失值
返回:
params - 包含权重w和偏差b的字典
grads - 包含权重和偏差相对于成本函数的梯度的字典
成本 - 优化期间计算的所有成本列表,将用于绘制学习曲线。
提示:
我们需要写下两个步骤并遍历它们:
1)计算当前参数的成本和梯度,使用propagate()。
2)使用w和b的梯度下降法则更新参数。
"""
costs = []
for i in range(num_iterations):
grads, cost = propagate(w, b, X, Y)
dw = grads["dw"]
db = grads["db"]
w = w - learning_rate * dw
b = b - learning_rate * db
# 记录成本
if i % 100 == 0:
costs.append(cost)
# 打印成本数据
if (print_cost) and (i % 100 == 0):
print("迭代的次数:%i,误差值:%f", (i,cost))
params = {
"w":w,
"b":b
}
grads = {
"dw":dw,
"db":db
}
return (params, grads, costs)
def predict(w, b, X):
"""
使用学习逻辑回归参数logistic (w,b)预测标签是0还是1,
参数:
w - 权重,大小不等的数组(num_px * num_px * 3,1)
b - 偏差,一个标量
X - 维度为(num_px * num_px * 3,训练数据的数量)的数据
返回:
Y_prediction - 包含X中所有图片的所有预测【0 | 1】的一个numpy数组(向量)
"""
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0],1)
A = sigmoid(np.dot(w.T,X) + b)
for i in range(A.shape[1]):
# 将概率转换为实际预测
Y_prediction[0,i] = 1 if A[0,i] > 0.5 else 0
assert(Y_prediction.shape == (1,m))
return Y_prediction
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
通过调用之前实现的函数来构建逻辑回归模型
参数:
X_train - numpy的数组,维度为(num_px * num_px * 3,m_train)的训练集
Y_train - numpy的数组,维度为(1,m_train)(矢量)的训练标签集
X_test - numpy的数组,维度为(num_px * num_px * 3,m_test)的测试集
Y_test - numpy的数组,维度为(1,m_test)的(向量)的测试标签集
num_iterations - 表示用于优化参数的迭代次数的超参数
learning_rate - 表示optimize()更新规则中使用的学习速率的超参数
print_cost - 设置为true以每100次迭代打印成本
返回:
d - 包含有关模型信息的字典。
"""
w, b = initialize_with_zeros(X_train.shape[0])
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# 从字典”参数“中检索w和b
w, b = parameters["w"], parameters["b"]
# 预测测试集,训练集
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
# 打印训练后的准确性
print("训练集准确性:", format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100), "%")
print("测试集准确性:", format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100), "%")
d = {
"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediciton_train": Y_prediction_train,
"w": w,
"b": b,
"learning_rate": learning_rate,
"num_iterations": num_iterations}
return d
print("======================测试model=======================")
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations=2000, learning_rate=0.005, print_cost=False) |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import urllib.parse
import urllib.request
import lxml
from bs4 import BeautifulSoup
import re
import ssl
import time
import os
"""
1. add comments
2. add daily
"""
class main_arxiv(object):
def __init__(self, query_word: str, domain='cs.CV/', query_mode='all',
key_words=['self-supervised', 'contrastive learning', 'anomaly detection',
'novelty detection', 'representation learning', 'out-of-distribution'],
key_words_conference=['ICLR', 'CVPR', 'ICML', 'ICCV'],
download_root_dir='/Users/zhangzilong/Desktop/arxiv/'):
"""query_word: month_year, recent, pastweek"""
self.original_url = 'https://arxiv.org/'
self.domain_url = self.original_url + 'list/' + domain + query_word
assert 'all' in query_mode or 'daily' in query_mode, 'please input correct query mode(all, daily)'
self.query_mode = query_mode
self.headers = {
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36'
}
self.key_words = key_words
self.key_words_conference = key_words_conference
self.root_dir = download_root_dir
current_time = time.strftime("%Y_%m_%d", time.localtime())
self.current_time_dir = self.root_dir + current_time
if not os.path.exists(self.current_time_dir):
os.mkdir(self.current_time_dir)
def get_url_context(self, target_url, pdf=False):
context = ssl._create_unverified_context()
request = urllib.request.Request(target_url, headers=self.headers)
if not pdf:
response = urllib.request.urlopen(request, context=context).read().decode()
else:
response = urllib.request.urlopen(request, context=context).read()
return response
def get_pdf(self, title_name_pdf, pdf_temp, element, UL=False):
if UL:
element = 'unsupervised learning'
if not os.path.exists(os.path.join(self.current_time_dir, element)):
os.mkdir(os.path.join(self.current_time_dir, element))
sub_dir = os.path.join(self.current_time_dir, element)
pdf_response = self.get_url_context(self.original_url + pdf_temp, pdf=True)
with open(sub_dir + '/' + title_name_pdf, 'wb') as f:
f.write(pdf_response)
time.sleep(1)
def run_get_pdf(self):
response = self.get_url_context(self.domain_url)
# get the target page range
soup = BeautifulSoup(response, 'lxml')
# all mode
if self.query_mode == 'all':
html_all_page = soup.find('div', id='dlpage').find('small').text
pattern = re.compile(r'.*?total of (\d*) entries.*?', re.S)
target_total_page = pattern.findall(html_all_page)
# add daily mode
elif self.query_mode == 'daily':
html_all_page = soup.find('h3').text
pattern = re.compile(r'.*? of (\d*) entries.*?', re.S)
target_total_page = pattern.findall(html_all_page)
query_string = {
"show": target_total_page[0]
}
query_string_encode = urllib.parse.urlencode(query_string)
url_immediate = self.domain_url + '?' + query_string_encode
# get_final_url, get title and pdf
target_response = self.get_url_context(url_immediate)
soup = BeautifulSoup(target_response, 'lxml')
total_context = soup.find('div', id='dlpage')
list_title_dd = total_context.find_all('dd')
list_pdf_dt = total_context.find_all('dt')
pattern_title = re.compile(r'.Title: (.*).', re.S)
ab_f = open(self.current_time_dir + '/' + 'summary.txt', 'w')
start_val_paper = 0
for i in range(len(list_pdf_dt)):
query_state = False
comments_temp_text = ''
title_temp = list_title_dd[i].find('div', class_='list-title mathjax').text
comments_temp = list_title_dd[i].find('div', class_='list-comments mathjax')
if comments_temp:
comments_temp_text = comments_temp.text.replace('\n', '')
pdf_temp = list_pdf_dt[i].find('a', title='Download PDF')['href']
title_name = pattern_title.findall(title_temp)[0]
title_name = title_name.replace('/', ' ')
title_name_pdf = title_name + '.pdf'
for element in self.key_words:
if element in title_name.lower():
query_state = True
self.get_pdf(title_name_pdf, pdf_temp, element)
break
if not query_state:
pattern_word_match = re.compile(r'(.*)unsupervised (.*?)learning(.*)', re.S)
if pattern_word_match.findall(title_name.lower()):
query_state = True
self.get_pdf(title_name_pdf, pdf_temp, element='unsupervised learning', UL=True)
if not query_state:
for element in self.key_words_conference:
if element in comments_temp_text:
query_state = True
self.get_pdf(title_name_pdf, pdf_temp, element)
break
if query_state:
start_val_paper += 1
print("Download {} papers!".format(str(start_val_paper)))
Abstract_temp = list_pdf_dt[i].find('a', title='Abstract')['href']
abstract_url = self.original_url + Abstract_temp[1:]
abstract_response = self.get_url_context(abstract_url)
soup = BeautifulSoup(abstract_response, 'lxml')
abstract_text = soup.find('blockquote', class_='abstract mathjax').text.replace('\n', ' ')
ab_f.write("[{}] ".format(str(i)) + title_name + '\n' * 2)
if comments_temp:
ab_f.write(comments_temp_text + '\n')
ab_f.write(abstract_text + '\n' * 2)
ab_f.close()
if __name__ == "__main__":
query_word = input("input your query range:")
query_mode = input("input your query mode:")
arxiv_download = main_arxiv(query_word=query_word, query_mode=query_mode)
arxiv_download.run_get_pdf()
|
#lex_auth_0127382206342184961397
def check_anagram(data1, data2):
data1 = data1.lower()
data2 = data2.lower()
if set(data1) != set(data2) or len(data1) != len(data2):
return False
for i in range(len(data1)):
if data1[i] == data2[i]:
return False
return True
#start writing your code here
print(check_anagram("eat", "tea"))
|
""" import matplotlib
matplotlib.use(‘TkAgg’)
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 20, 100) # Create a list of evenly-spaced numbers over the range
plt.plot(x, np.sin(x)) # Plot the sine of each x point
plt.show() """
import dash
import dash_core_components as dcc
import dash_html_components as html
from datetime import datetime as dt
app = dash.Dash(__name__)
server = app.server
app.layout = html.Div([item1, item2]) |
from tkinter import *
from tkinter import messagebox
class Gui(Tk):
# Initialise the Gui object
# Self = the function belongs to this name
# Pass = placeholder. Does nothing
def __init__(self):
super().__init__()
#Load Resources
self.tick_image = PhotoImage(file="tick.gif")
# Set window attributes
self.title("Currency Converter")
self.configure(bg="#ffe8e8",
height=400,
width=500,
pady=20,
padx=20)
# Add components
self.add_heading_label()
self.add_amount_label()
self.add_box1_entry()
self.add_from_label()
self.add_box2_entry()
self.add_to_label()
self.add_box3_entry()
self.add_cc_frame()
self.add_clearbtn_button()
self.add_convertbtn_button()
self.add_systeminfo_label()
#........................FUNCTIONS..........................#
def add_heading_label(self):
self.heading_label = Label()
self.heading_label.grid(row=0, column=2, columnspan=2)
self.heading_label.configure(font="Arial 24", bg="#ffe8e8", text="Currency Converter", padx=10, pady=20)
def add_amount_label(self):
self.amount_label = Label()
self.amount_label.grid(row=1, column=2, columnspan=2, sticky=W)
self.amount_label.configure(font="Arial 13", bg="#ffe8e8", text="Amount")
def add_box1_entry(self):
self.box1_entry = Entry()
self.box1_entry.grid(row=2, column=2, columnspan=2)
self.box1_entry.configure(font="Arial 13", bg="white", bd=3, relief="solid", width=50)
def add_from_label(self):
self.from_label = Label()
self.from_label.grid(row=3, column=2, columnspan=2, sticky=W)
self.from_label.configure(font="Arial 13", bg="#ffe8e8", text="From", image=self.tick_image)
def add_box2_entry(self):
self.box2_entry = Entry()
self.box2_entry.grid(row=4, column=2, columnspan=2)
self.box2_entry.configure(font="Arial 13", bg="white", bd=3, relief="solid", width=50)
def add_to_label(self):
self.to_label = Label()
self.to_label.grid(row=5, column=2, columnspan=2, sticky=W)
self.to_label.configure(font="Arial 13", bg="#ffe8e8", text="To")
def add_box3_entry(self):
self.box3_entry = Entry()
self.box3_entry.grid(row=6, column=2, columnspan=2)
self.box3_entry.configure(font="Arial 13", bg="white", bd=3, relief="solid", width=50)
def add_clearbtn_button(self):
self.clearbtn_button = Button(self.cc_frame)
self.clearbtn_button.grid(row=7, column=2, columnspan=1, padx=20, pady=20)
self.clearbtn_button.configure(font="Arial 10", bd=3, relief="solid", bg="white", text="Clear", width=10)
self.clearbtn_button.bind("<ButtonRelease-1>", self.clearbtn_button_clicked)
def clearbtn_button_clicked(self, event):
self.systeminfo_label.configure(text="System Message Displayed Here")
def add_convertbtn_button(self):
self.convertbtn_button = Button(self.cc_frame)
self.convertbtn_button.grid(row=7, column=3, sticky=W, padx=20, pady=20)
self.convertbtn_button.configure(font="Arial 10", bd=3, relief="solid", bg="white", text="Convert", width=10)
self.convertbtn_button.bind("<ButtonRelease-1>", self.convertbtn_button_clicked)
def convertbtn_button_clicked(self, event):
self.systeminfo_label.configure(text="Converting...")
def add_systeminfo_label(self):
self.systeminfo_label = Label()
self.systeminfo_label.grid(row=8, column=2, columnspan=2)
self.systeminfo_label.configure(font="Arial 13", bg="#fffbce", bd=3, relief="solid", text="System Message Displayed Here", width=50, height=5)
#..........................FRAMES BELOW..........................#
def add_cc_frame(self):
self.cc_frame = Frame()
self.cc_frame.grid(row=7, column=2, columnspan=2)
self.cc_frame.configure(bg="#ffe8e8")
# Create an object of the Gui class when this module is executed
if (__name__ == "__main__"):
gui = Gui()
gui.mainloop()
|
import usb
import array
import copy
import time
import zlib
import enum
import goodweSample
import iGoodwe
class State( enum.Enum):
OFFLINE = 1
CONNECTED = 2
DISCOVER = 3
ALLOC = 4
ALLOC_CONF = 5
ALLOC_ASK = 6
RUNNING = 7
class CC:
reg = 0x00
read = 0x01
class FC:
# Register function codes
offline = 0x00
allocreg = 0x01
query = 0x01
remreg = 0x02
query_id = 0x02
query_stt = 0x03
# Read function codes
regreq = 0x80
result = 0x81
addconf = 0x81
result_id = 0x82
remconf = 0x82
result_stt = 0x83
class goodweUsb( iGoodwe.iGoodwe) :
#--------------------------------------------------------------------------
def __init__(self, url, login_url, station_id):
'''Initialisation of the goodweUsb class. All data members are set
to default values. '''
self.m_sample = goodweSample.goodweSample()
self.m_state = State.OFFLINE
self.m_serialNumber = ""
self.m_serialBuffer = ''
self.m_inverter_adr = 0x11
self.m_inverter_adr_confirmed = False
self.m_deviceId = station_id
self.m_dev = None
self.m_epi = None
self.m_initialized = False
self.cc_reg_switch = {FC.offline: self._skip_message,
FC.regreq: self._reg_received_registration,
FC.allocreg: self._skip_message,
FC.addconf: self._reg_received_confirm_registration,
FC.remreg: self._skip_message,
FC.remconf: self._reg_received_confirm_removal}
self.cc_read_switch = {FC.query: self._skip_message,
FC.result: self._read_received_message,
FC.query_id: self._skip_message,
FC.result_id: self._skip_message,
FC.query_stt: self._skip_message,
FC.result_stt: self._skip_message}
self.state_switch = { State.OFFLINE: self.initialize,
State.CONNECTED: self._remove_registration,
State.DISCOVER: self._discover_goodwe,
State.ALLOC: self._alloc_register,
State.ALLOC_CONF: self._no_action,
State.ALLOC_ASK: self._read_data_init,
State.RUNNING: self._read_data }
# Initialize message buffer
# [0xCC,0x99,0x09,0xAA,0x55,0x80,0x7F,0x00,
# 0x00,0x00,0x01,0xFE,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00])
# Init Ack message buffer
# [0xCC,0x99,0x1A,0xAA,0x55,0x80,0x7F,0x00,
# 0x01,0x11,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x11,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00])
# Data message buffer
# [0xCC,0x99,0x09,0xAA,0x55,0x80,0x11,0x01,
# 0x01,0x00,0x01,0x92,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
# 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00])
#--------------------------------------------------------------------------
def initialize( self):
'''Initialize the USB port'''
tries = 0
self.m_initialized = False
self.m_state = State.OFFLINE
try:
self._terminate_usb()
except:
pass
time.sleep(3)
try:
self._usb_init()
except Exception, ex:
raise IOError("Cannot initialize USB port: " + str(ex))
else:
self.m_initialized = True
self.m_state = State.CONNECTED
#--------------------------------------------------------------------------
def is_online( self):
#TRUE when the GoodWe inverter returns the correct status
#
print "Vpv0: " + str(self.m_sample.get_vpv(0))
print "Vpv1: " + str(self.m_sample.get_vpv(1))
return ((self.m_sample.is_inverter_status('Normal')) and (abs(self.m_sample.get_vpv(0)+self.m_sample.get_vpv(1)) > 0.01))
#--------------------------------------------------------------------------
def read_sample_data( self):
'''Read a data sample.'''
try:
self.state_switch[self.m_state]()
except Exception, ex:
raise IOError( "Cannot read from GoodweUSB in state %s: %s" % str(self.m_state), str(ex))
#--------------------------------------------------------------------------
def _read_data( self):
try:
data = self._read_data_goodwe()
self._convert_data( data)
print self.m_sample.to_string()
except Exception, ex:
print "Error, set offline"
# self.m_sample.set_online( 'Offline')
raise IOError( "Cannot read from GoodweUSB: " + str(ex))
return self.m_sample
#--------------------------------------------------------------------------
# internal functions
#--------------------------------------------------------------------------
def _usb_init( self):
'''This initialises the USB device'''
self.m_dev = usb.core.find(idVendor = self.m_deviceId)
if self.m_dev:
self.m_dev.reset()
else:
raise ValueError('Device for vendor GoodWe (vendor ID %s) not found' % str(hex(self.m_deviceId)))
if self.m_dev.is_kernel_driver_active(0):
print "Need to detach kernel driver."
self.m_dev.detach_kernel_driver(0)
print "Claiming device."
try:
print "Setting default USB configuration."
self.m_dev.set_configuration()
except:
raise ValueError('Error setting USB configuration')
try:
print "Claiming USB interface."
usb.util.claim_interface( self.m_dev, 0)
except:
raise ValueError('Error claiming USB interface')
print "Getting active USB configuration."
cfg = self.m_dev.get_active_configuration()
intf = cfg[(0, 0)]
print intf
# get the BULK IN descriptor
self.m_epi = usb.util.find_descriptor(
intf,
# match our first out endpoint
custom_match= \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_IN)
#--------------------------------------------------------------------------
def _terminate_usb( self):
'''This terminates the USB driver'''
usb.util.dispose_resources( self.m_dev)
self.m_dev = None
self.m_epi = None
self.m_initialized = False
#--------------------------------------------------------------------------
def _read_data_goodwe( self):
'''Continiuously read messages from the Goodwe inverter until a complete
message packet has been received. The start of the message is marked
with 2 bytes: 0xAA, 0x55. The 5th byte represents the message length.'''
more = True
startFound = False
dataLen = 0
dataPtr = 0
lastByte = 0x00
inBuffer = ''
while more:
try:
dataStream = self.m_dev.read( self.m_epi, 8, 1000)
except Exception, ex:
raise IOError(" Unable to read 8 bytes from USB: " + str(ex))
for byte in dataStream:
if chr(byte) == 0x55 and lastByte == 0xAA:
startFound = True
dataPtr = 0
dataLen = 0
lastByte = 0x00
inBuffer = chr(lastByte) + chr(byte)
if startFound:
if dataLen > 0 or self.dataPtr < 5:
inBuffer += chr(byte)
dataPtr += 1
if dataPtr == 5:
dataLen = ord(byte) + 2
elif dataPtr > 5:
dataLen -= 1
if dataPtr >= 5 and dataLen == 0:
startFound = False
self._check_crc_and_update_state( inBuffer)
more = False
lastByte = byte
return inBuffer
#--------------------------------------------------------------------------
def _check_crc_and_update_state( self, inBuffer):
'''Calculate the CRC from a message and compare to the sent CRC in the
message from the Goodwe inverter, then interpret the received message
and call the correct message reply function. The CRC is encoded in the
last 2 bytes and is not included in the CRC calculation.'''
hB = inBuffer[len(inBuffer)-2]
lB = inBuffer[len(inBuffer)-1]
hC,lC = self._calc_crc16( inBuffer, len(inBuffer)-2)
if not (hB == hC and lB == lC):
raise ValueError("Calculated CRC doesn't match message CRC")
src = inBuffer[0]
dst = inBuffer[1]
cc = inBuffer[2]
fc = inBuffer[3]
leng = inBuffer[4]
data = inBuffer[5:]
# Call the reply function for the received message
if cc == CC.reg:
self.cc_reg_switch[fc]( src, leng, inBuffer)
elif cc == CC.read:
self.cc_read_switch[fc]( src, leng, inBuffer)
#--------------------------------------------------------------------------
def _calc_crc16( self, buffer, length):
'''Calculate the CRC from the message.'''
crc = 0
for cnt in xrange(length):
crc += ord(buffer[cnt])
#write out the high and low
high = (crc >> 8) & 0xff;
low = crc & 0xff;
return high, low
#--------------------------------------------------------------------------
def _skip_message( self, src, leng, inBuffer):
'''Not all possible messages have been implemented/can be received. This
handles thos messages.'''
print "An unused state was received: " + str(self.m_state) + "."
#--------------------------------------------------------------------------
def _reg_received_confirm_removal( self, src, leng, inBuffer):
'''When the inverter sends the removal confirm message.'''
print "Inverter removed."
self.m_serialBuffer = ''
self.m_inverter_adr_confirmed = False
#--------------------------------------------------------------------------
def _reg_received_registration( self, src, leng, inBuffer):
'''When the inverter sends the registration message.'''
print "Inverter registration received."
self.m_serialBuffer = inBuffer[0:16]
self.m_state = State.ALLOC
#--------------------------------------------------------------------------
def _reg_received_confirm_registration( self, src, leng, inBuffer):
'''When th einverter sends the registration confirmation message.'''
print "Inverter registration confirmation received."
if self.m_inverter_adr == src:
self.m_inverter_adr_confirmed = True
self.m_state = State.ALLOC_ASK
else:
self.m_state = State.OFFLINE
#--------------------------------------------------------------------------
def _read_received_message( self, src, leng, inBuffer):
'''When the inverter sends the sample data.'''
self._convert_data( inBuffer, leng == 66)
#--------------------------------------------------------------------------
def _scale_data( self, indata, offset, length, factor):
'''Function to decode and scale the received sample data.'''
res = 0.0
for i in xrange(length):
h = int(indata[offset+i].encode('hex'),16)
res = res * 256.0 + float(h)
return res / factor
#--------------------------------------------------------------------------
def _convert_data( self, indata, isDTseries):
'''Function to disassemble the incoming data into readable format.'''
base = 6
self.m_sample.set_vpv(0, self._scale_data( indata, base+ 0, 2, 10.0))
self.m_sample.set_vpv(1, self._scale_data( indata, base+ 2, 2, 10.0))
self.m_sample.set_ipv(0, self._scale_data( indata, base+ 4, 2, 10.0))
self.m_sample.set_ipv(1, self._scale_data( indata, base+ 6, 2, 10.0))
self.m_sample.set_vac(0, self._scale_data( indata, base+ 8, 2, 10.0))
if isDTseries:
self.m_sample.set_vac(1, self._scale_data( indata, base+10, 2, 10.0))
self.m_sample.set_vac(2, self._scale_data( indata, base+12, 2, 10.0))
self.m_sample.set_iac(0, self._scale_data( indata, base+14, 2, 10.0))
if isDTseries:
self.m_sample.set_iac(1, self._scale_data( indata, base+16, 2, 10.0))
self.m_sample.set_iac(2, self._scale_data( indata, base+18, 2, 10.0))
self.m_sample.set_fac(0, self._scale_data( indata, base+20, 2, 100.0))
if isDTseries:
self.m_sample.set_fac(1, self._scale_data( indata, base+22, 2, 100.0))
self.m_sample.set_fac(2, self._scale_data( indata, base+24, 2, 100.0))
self.m_sample.set_pgrid( self._scale_data( indata, base+26, 2, 1.0))
if self._scale_data( indata, base+28, 2, 1.0) > 0.0:
#self.m_sample.set_online( 'Normal')
print "Online"
else:
print "Offline"
#self.m_sample.set_online( 'Offline')
self.m_sample.set_temperature( self._scale_data( indata, base+30, 2, 10.0))
self.m_sample.set_etotal( self._scale_data( indata, base+36, 4, 10.0))
self.m_sample.set_htotal( self._scale_data( indata, base+40, 4, 1.0))
self.m_sample.set_eday( self._scale_data( indata, base+64, 2, 10.0))
self.m_sample.set_error( indata[base+73:base+77])
try:
self.m_sample.set_eff( self.m_sample.get_pgrid() / ((self.m_sample.get_vpv(0) * self.m_sample.get_ipv(0)) + (self.m_sample.get_vpv(1) * self.m_sample.get_ipv(1))))
except:
self.m_sample.set_eff( 0.0)
#Values that I'm not using (or don't know what they are
self.m_sample.set_consume_day(0.0)
self.m_sample.set_consume_total(0.0)
self.m_sample.set_vbattery(0.0)
self.m_sample.set_ibattery(0.0)
self.m_sample.set_soc(0.0)
self.m_sample.set_load(0.0)
self.m_sample.set_description('')
#--------------------------------------------------------------------------
def _remove_registration( self):
'''Function to handle the message state machine. This function handles
the removal of the registration state. No action is needed.'''
print "Remove registration"
#--------------------------------------------------------------------------
def _discover_goodwe( self):
'''Function to handle the message state machine. This function handles
the discovery of the inverter. A message is sent.'''
if not self.m_inverter_adr_confirmed:
self._goodwe_send( 0x7F, CC.reg, FC.offline)
#--------------------------------------------------------------------------
def _alloc_register( self):
'''Function to handle the message state machine. This function handles
the registration of the inverter. A message is sent with the
previously received serial number.'''
serial=''.join(chr(x) for x in self.m_serialBuffer)
serial+=self.m_inverter_adr
self._goodwe_send( 0x7F, CC.reg, FC.allocreg, serial)
self.m_state = State.ALLOC_CONF
#--------------------------------------------------------------------------
def _no_action( self):
'''Function to skip a certain state.'''
print "An unused state was received: " + str(self.m_state) + "."
#--------------------------------------------------------------------------
def _read_data_init( self):
'''Function to handle the message state machine. This function handles
the first request of sample data. A message is sent with the
previously negotiated inverter address.'''
self._goodwe_send( self.m_inverter_adr, CC.read, FC.query)
self.m_state = State.RUNNING
#--------------------------------------------------------------------------
def _read_data( self):
'''Function to handle the message state machine. This function handles
subsequent requests of sample data. A message is sent with the
previously negotiated inverter address.'''
if self.m_inverter_adr_confirmed:
self._goodwe_send( self.m_inverter_adr, CC.read, FC.query)
else:
raise IOError("Inverter not online, or address unkown. Cannot read.")
#--------------------------------------------------------------------------
def _goodwe_send( self, address, cc, fc, leng, data = None):
sendBuffer=''.join([0xAA, 0x55, 0x80, address, cc, fc])
if data:
sendBuffer+=data
h,l=self._calc_crc16(sendBuffer, len(sendBuffer))
sendBuffer=sendBuffer + ''.join( [chr(h),chr(l)])
sendBuffer=''.join( chr(x) for x in [0xCC, 0x99, len(sendBuffer)])
self._hexprint("goodwe send", senfBuffer)
lenn = self.m_dev.ctrl_transfer( 0x21, 0x09, 0, 0, sendBuffer)
if lenn != len(sendBuffer):
print 'received length ' + str(lenn) + ' is not ' + str(len(sendBuffer)) + '.'
return lenn
#--------------------------------------------------------------------------
def _hexprint( self, string, data):
ret=string + ':'
for character in data:
ret += '0x' + character.encode('hex') + ':'
print ret
#--------------------------------------------------------------------------
def terminate( self):
self._terminate_usb()
#---------------- End of file ------------------------------------------------
|
#!/usr/bin/env python
import cv2
from cv2 import aruco
import math
import tempfile
import numpy as np
import logging
import olympe
from olympe.messages.ardrone3.Piloting import TakeOff, Landing, PCMD, moveBy, CancelMoveBy
from olympe.messages.ardrone3.PilotingState import FlyingStateChanged
from olympe.messages.ardrone3.Camera import Orientation
from olympe.messages.ardrone3.CameraState import Orientation as StateOrientation
import olympe_deps as od
# NOTE: from Parrot Olympe repository
class DronePreciseLanding:
def __init__(self):
# Create the olympe.Drone object from its IP address
logging.basicConfig(format='%(asctime)s %(message)s')
self.drone = olympe.Drone(
"192.168.42.1",
loglevel=1,
drone_type=od.ARSDK_DEVICE_TYPE_BEBOP_2,
)
self.tempd = tempfile.mkdtemp(prefix="olympe_streaming_test_")
print("Olympe streaming example output dir: {}".format(self.tempd))
self.cv2frame = None
self.landing = False
self.contour = None
self.aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
self.aruco_params = aruco.DetectorParameters_create()
self.corners = None
self.ids = []
self.rejectedImgPoints = None
def start(self):
# Connect the the drone
self.drone.connection()
self.drone(Orientation(pan=0, tilt=0) & StateOrientation(pan=0, tilt=0)).wait().success()
# You can record the video stream from the drone if you plan to do some
# post processing.
# Here, we don't record the (huge) raw YUV video stream
# raw_data_file=os.path.join(self.tempd,'raw_data.bin'),
# raw_meta_file=os.path.join(self.tempd,'raw_metadata.json'),
# Setup your callback functions to do some live video processing
self.drone.set_streaming_callbacks(
raw_cb=self.yuv_frame_cb,
)
# Start video streaming
self.drone.start_video_streaming()
def stop(self):
# Properly stop the video stream and disconnect
self.drone.stop_video_streaming()
cv2.destroyAllWindows()
self.drone.disconnection()
def yuv_frame_cb(self, yuv_frame):
"""
This function will be called by Olympe for each decoded YUV frame.
:type yuv_frame: olympe.VideoFrame
"""
# the VideoFrame.info() dictionary contains some useful informations
# such as the video resolution
info = yuv_frame.info()
height, width = info["yuv"]["height"], info["yuv"]["width"]
# convert pdraw YUV flag to OpenCV YUV flag
cv2_cvt_color_flag = {
olympe.PDRAW_YUV_FORMAT_I420: cv2.COLOR_YUV2BGR_I420,
olympe.PDRAW_YUV_FORMAT_NV12: cv2.COLOR_YUV2BGR_NV12,
}[info["yuv"]["format"]]
# yuv_frame.as_ndarray() is a 2D numpy array with the proper "shape"
# i.e (3 * height / 2, width) because it's a YUV I420 or NV12 frame
# Use OpenCV to convert the yuv frame to RGB
self.cv2frame = cv2.cvtColor(yuv_frame.as_ndarray(), cv2_cvt_color_flag)
img = cv2.cvtColor(self.cv2frame, cv2.COLOR_BGR2GRAY)
if self.landing:
self.corners, self.ids, self.rejectedImgPoints = aruco.detectMarkers(img, self.aruco_dict, parameters=self.aruco_params)
img = aruco.drawDetectedMarkers(img, self.corners, self.ids)
if self.ids is None:
self.ids = []
for i in range(len(self.ids)):
c = self.corners[i][0]
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("Grayscale", img)
cv2.waitKey(1)
# Use OpenCV to show this frame
cv2.imshow("Parrot Bebop 2 FPV", self.cv2frame)
cv2.waitKey(1) # please OpenCV for 1 ms...
def land(self):
from time import sleep
self.landing = True
maxsize = 0
no_mark_count = 0
rotated = 0
while self.landing:
if self.corners is not None:
for i in self.corners:
adj_location = False
f_b = 0
l_r = 0
rotated = 0
no_mark_count = 0
c = i
x, y, w, h = cv2.boundingRect(c)
maxsize = cv2.contourArea(c)
if maxsize > self.cv2frame.shape[0] * self.cv2frame.shape[1] * 0.065:
print('Landing pad below drone -> engines off')
self.drone(
Landing()
>> FlyingStateChanged(state="landed", _timeout=5)
).wait().success()
self.landing = False
break
print(maxsize)
from_top = y-50
from_left = x
from_bottom = self.cv2frame.shape[0]-y-h
from_right = self.cv2frame.shape[1]-x-w
top_bot_diff = from_top-from_bottom
if abs(top_bot_diff) >100:
if top_bot_diff>0:
print('Need to move backward')
f_b = -1
adj_location = True
else:
print('Need to move forward')
f_b = 1
adj_location = True
left_right_diff = from_left-from_right
if abs(left_right_diff) >150:
if left_right_diff>0:
print('Need to move right')
l_r = 1
adj_location = True
else:
print('Need to move left')
l_r = -1
adj_location = True
if adj_location:
dist_adj = max([maxsize/(self.cv2frame.shape[0]*self.cv2frame.shape[1]*0.22),0.5])
self.drone(CancelMoveBy()) >> self.drone(moveBy(dist_adj*f_b*0.3, dist_adj*l_r*0.3, 0, 0, _timeout=20)).wait().success()
else:
#self.drone(Orientation(pan=0, tilt=-82) & StateOrientation(pan=0, tilt=-82)).wait().success()
print('On spot - lowering')
self.drone(moveBy(0, 0, 0.3, 0, _timeout=20)).wait().success()
else:
no_mark_count += 1
if no_mark_count > 5:
print("Nothing detected - rotating")
self.drone(moveBy(0, 0, 0, math.pi/2, _timeout=20)).wait().success()
rotated += 1
#if rotated > 4:
# self.drone(Orientation(pan=0, tilt=-60) & StateOrientation(pan=0, tilt=-60)).wait().success()
#else:
#if StateOrientation(pan=0,tilt=-82):
# print('aaaa')
# self.drone(Orientation(pan=0, tilt=-82) & StateOrientation(pan=0, tilt=-82)).wait().success()
sleep(1)
return True
def fly(self):
# Takeoff, fly, land, ...
print("Takeoff")
self.drone(
TakeOff()
& FlyingStateChanged(
state="hovering", _timeout=10, _policy="check_wait")
).wait()
#for i in range(2):
# print("Moving by ({}/2)...".format(i + 1))
self.drone(moveBy(1, 0, -0.5, 0, _timeout=20)).wait().success()
print("Landing...")
self.drone(Orientation(pan=0, tilt=-82) & StateOrientation(pan=0, tilt=-82)).wait().success()
self.land()
print("Landed\n")
if __name__ == "__main__":
streaming_example = StreamingExample()
# Start the video stream
streaming_example.start()
# Perform some live video processing while the drone is flying
streaming_example.fly()
streaming_example.stop()
|
from compressor.templatetags.compress import CompressorNode
from django.template.base import Template
def seizaki_compress(context, data, name):
"""
Data is the string from the template (the list of js files in this case)
Name is either 'js' or 'css' (the sekizai namespace)
We basically just manually pass the string through the {% compress 'js' %} template tag
"""
print data
return CompressorNode(nodelist=Template(data).nodelist, kind=name, mode='file').render(context=context) |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
# login code modified from https://gist.github.com/guillaumevincent/4771570
import tornado.auth
import tornado.escape
import tornado.web
import tornado.websocket
from os.path import dirname, join
from base64 import b64encode
from uuid import uuid4
from qiita_core.qiita_settings import qiita_config
from qiita_core.util import is_test_environment
from qiita_pet.handlers.base_handlers import (
MainHandler, NoPageHandler, IFrame)
from qiita_pet.handlers.auth_handlers import (
AuthCreateHandler, AuthLoginHandler, AuthLogoutHandler, AuthVerifyHandler)
from qiita_pet.handlers.user_handlers import (
ChangeForgotPasswordHandler, ForgotPasswordHandler, UserProfileHandler,
UserMessagesHander, UserJobs)
from qiita_pet.handlers.analysis_handlers import (
ListAnalysesHandler, AnalysisSummaryAJAX, SelectedSamplesHandler,
AnalysisDescriptionHandler, AnalysisGraphHandler, CreateAnalysisHandler,
AnalysisJobsHandler, ShareAnalysisAJAX)
from qiita_pet.handlers.study_handlers import (
StudyIndexHandler, StudyBaseInfoAJAX, SampleTemplateHandler,
SampleTemplateOverviewHandler, SampleTemplateColumnsHandler,
StudyEditHandler, ListStudiesHandler, ListStudiesAJAX, EBISubmitHandler,
CreateStudyAJAX, ShareStudyAJAX, StudyApprovalList, ArtifactGraphAJAX,
VAMPSHandler, Study, StudyTags, StudyGetTags,
ListCommandsHandler, ListOptionsHandler, PrepTemplateSummaryAJAX,
PrepTemplateAJAX, NewArtifactHandler, SampleAJAX, StudyDeleteAjax,
ArtifactAdminAJAX, NewPrepTemplateAjax, DataTypesMenuAJAX, StudyFilesAJAX,
ArtifactGetSamples, ArtifactGetInfo, WorkflowHandler,
WorkflowRunHandler, JobAJAX, AutocompleteHandler)
from qiita_pet.handlers.artifact_handlers import (
ArtifactSummaryAJAX, ArtifactAJAX, ArtifactSummaryHandler)
from qiita_pet.handlers.websocket_handlers import (
MessageHandler, SelectedSocketHandler, SelectSamplesHandler)
from qiita_pet.handlers.logger_handlers import LogEntryViewerHandler
from qiita_pet.handlers.upload import (
UploadFileHandler, StudyUploadFileHandler, StudyUploadViaRemote)
from qiita_pet.handlers.stats import StatsHandler
from qiita_pet.handlers.download import (
DownloadHandler, DownloadStudyBIOMSHandler, DownloadRelease,
DownloadRawData, DownloadEBISampleAccessions, DownloadEBIPrepAccessions,
DownloadUpload, DownloadPublicHandler)
from qiita_pet.handlers.prep_template import (
PrepTemplateHandler, PrepTemplateGraphHandler, PrepTemplateJobHandler)
from qiita_pet.handlers.ontology import OntologyHandler
from qiita_pet.handlers.software import SoftwareHandler
from qiita_db.handlers.processing_job import (
JobHandler, HeartbeatHandler, ActiveStepHandler, CompleteHandler,
ProcessingJobAPItestHandler)
from qiita_db.handlers.artifact import (
ArtifactHandler, ArtifactAPItestHandler, ArtifactTypeHandler)
from qiita_db.handlers.sample_information import SampleInfoDBHandler
from qiita_db.handlers.user import UserInfoDBHandler, UsersListDBHandler
from qiita_db.handlers.prep_template import (
PrepTemplateDataHandler, PrepTemplateAPItestHandler,
PrepTemplateDBHandler)
from qiita_db.handlers.oauth2 import TokenAuthHandler
from qiita_db.handlers.reference import ReferenceHandler
from qiita_db.handlers.core import ResetAPItestHandler
from qiita_db.handlers.plugin import (
PluginHandler, CommandHandler, CommandListHandler, CommandActivateHandler,
ReloadPluginAPItestHandler)
from qiita_db.handlers.analysis import APIAnalysisMetadataHandler
from qiita_db.handlers.archive import APIArchiveObservations
from qiita_db.util import get_mountpoint
from qiita_pet.handlers.rest import ENDPOINTS as REST_ENDPOINTS
from qiita_pet.handlers.qiita_redbiom import RedbiomPublicSearch
if qiita_config.portal == "QIITA":
from qiita_pet.handlers.portal import (
StudyPortalHandler, StudyPortalAJAXHandler)
DIRNAME = dirname(__file__)
STATIC_PATH = join(DIRNAME, "static")
TEMPLATE_PATH = join(DIRNAME, "templates") # base folder for webpages
_, RES_PATH = get_mountpoint('job')[0]
COOKIE_SECRET = b64encode(uuid4().bytes + uuid4().bytes)
DEBUG = qiita_config.test_environment
_vendor_js = join(STATIC_PATH, 'vendor', 'js')
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/auth/login/", AuthLoginHandler),
(r"/auth/logout/", AuthLogoutHandler),
(r"/auth/create/", AuthCreateHandler),
(r"/auth/verify/(.*)", AuthVerifyHandler),
(r"/auth/forgot/", ForgotPasswordHandler),
(r"/auth/reset/(.*)", ChangeForgotPasswordHandler),
(r"/profile/", UserProfileHandler),
(r"/user/messages/", UserMessagesHander),
(r"/user/jobs/", UserJobs),
(r"/static/(.*)", tornado.web.StaticFileHandler,
{"path": STATIC_PATH}),
# Analysis handlers
(r"/analysis/list/", ListAnalysesHandler),
(r"/analysis/dflt/sumary/", AnalysisSummaryAJAX),
(r"/analysis/create/", CreateAnalysisHandler),
(r"/analysis/selected/", SelectedSamplesHandler),
(r"/analysis/selected/socket/", SelectedSocketHandler),
(r"/analysis/description/(.*)/graph/", AnalysisGraphHandler),
(r"/analysis/description/(.*)/jobs/", AnalysisJobsHandler),
(r"/analysis/description/(.*)/", AnalysisDescriptionHandler),
(r"/analysis/sharing/", ShareAnalysisAJAX),
(r"/artifact/samples/", ArtifactGetSamples),
(r"/artifact/info/", ArtifactGetInfo),
(r"/consumer/", MessageHandler),
(r"/admin/error/", LogEntryViewerHandler),
(r"/admin/approval/", StudyApprovalList),
(r"/admin/artifact/", ArtifactAdminAJAX),
(r"/admin/software/", SoftwareHandler),
(r"/ebi_submission/(.*)", EBISubmitHandler),
# Study handlers
(r"/study/create/", StudyEditHandler),
(r"/study/edit/(.*)", StudyEditHandler),
(r"/study/list/", ListStudiesHandler),
(r"/study/process/commands/options/", ListOptionsHandler),
(r"/study/process/commands/", ListCommandsHandler),
(r"/study/process/workflow/run/", WorkflowRunHandler),
(r"/study/process/workflow/", WorkflowHandler),
(r"/study/process/job/", JobAJAX),
(r"/study/list/socket/", SelectSamplesHandler),
(r"/study/list_studies/(.*)", ListStudiesAJAX),
(r"/study/new_artifact/", NewArtifactHandler),
(r"/study/files/", StudyFilesAJAX),
(r"/study/sharing/", ShareStudyAJAX),
(r"/study/sharing/autocomplete/", AutocompleteHandler),
(r"/study/new_prep_template/", NewPrepTemplateAjax),
(r"/study/tags/(.*)", StudyTags),
(r"/study/get_tags/", StudyGetTags),
(r"/study/([0-9]+)$", Study),
# Artifact handlers
(r"/artifact/graph/", ArtifactGraphAJAX),
(r"/artifact/(.*)/summary/", ArtifactSummaryAJAX),
(r"/artifact/html_summary/(.*)", ArtifactSummaryHandler,
{"path": qiita_config.base_data_dir}),
(r"/artifact/(.*)/", ArtifactAJAX),
# Prep template handlers
(r"/prep_template/", PrepTemplateHandler),
(r"/prep_template/(.*)/graph/", PrepTemplateGraphHandler),
(r"/prep_template/(.*)/jobs/", PrepTemplateJobHandler),
(r"/ontology/", OntologyHandler),
# ORDER FOR /study/description/ SUBPAGES HERE MATTERS.
# Same reasoning as below. /study/description/(.*) should be last.
(r"/study/description/sample_template/overview/",
SampleTemplateOverviewHandler),
(r"/study/description/sample_template/columns/",
SampleTemplateColumnsHandler),
(r"/study/description/sample_template/", SampleTemplateHandler),
(r"/study/description/sample_summary/", SampleAJAX),
(r"/study/description/prep_summary/", PrepTemplateSummaryAJAX),
(r"/study/description/prep_template/", PrepTemplateAJAX),
(r"/study/description/baseinfo/", StudyBaseInfoAJAX),
(r"/study/description/data_type_menu/", DataTypesMenuAJAX),
(r"/study/description/(.*)", StudyIndexHandler),
(r"/study/delete/", StudyDeleteAjax),
(r"/study/upload/remote/(.*)", StudyUploadViaRemote),
(r"/study/upload/(.*)", StudyUploadFileHandler),
(r"/upload/", UploadFileHandler),
(r"/check_study/", CreateStudyAJAX),
(r"/stats/", StatsHandler),
(r"/download/(.*)", DownloadHandler),
(r"/download_study_bioms/(.*)", DownloadStudyBIOMSHandler),
(r"/download_raw_data/(.*)", DownloadRawData),
(r"/download_ebi_accessions/samples/(.*)",
DownloadEBISampleAccessions),
(r"/download_ebi_accessions/experiments/(.*)",
DownloadEBIPrepAccessions),
(r"/download_upload/(.*)", DownloadUpload),
(r"/release/download/(.*)", DownloadRelease),
(r"/public_download/", DownloadPublicHandler),
(r"/vamps/(.*)", VAMPSHandler),
(r"/redbiom/(.*)", RedbiomPublicSearch),
(r"/iframe/", IFrame),
# Plugin handlers - the order matters here so do not change
# qiita_db/jobs/(.*) should go after any of the
# qiita_db/jobs/(.*)/XXXX because otherwise it will match the
# regular expression and the qiita_db/jobs/(.*)/XXXX will never
# be hit.
(r"/qiita_db/authenticate/", TokenAuthHandler),
(r"/qiita_db/jobs/(.*)/heartbeat/", HeartbeatHandler),
(r"/qiita_db/jobs/(.*)/step/", ActiveStepHandler),
(r"/qiita_db/jobs/(.*)/complete/", CompleteHandler),
(r"/qiita_db/jobs/(.*)", JobHandler),
(r"/qiita_db/artifacts/types/", ArtifactTypeHandler),
(r"/qiita_db/artifacts/(.*)/", ArtifactHandler),
(r"/qiita_db/users/", UsersListDBHandler),
(r"/qiita_db/user/(.*)/data/", UserInfoDBHandler),
(r"/qiita_db/sample_information/(.*)/data/", SampleInfoDBHandler),
(r"/qiita_db/prep_template/(.*)/data/", PrepTemplateDataHandler),
(r"/qiita_db/prep_template/(.*)/", PrepTemplateDBHandler),
(r"/qiita_db/references/(.*)/", ReferenceHandler),
(r"/qiita_db/plugins/(.*)/(.*)/commands/(.*)/activate/",
CommandActivateHandler),
(r"/qiita_db/plugins/(.*)/(.*)/commands/(.*)/", CommandHandler),
(r"/qiita_db/plugins/(.*)/(.*)/commands/", CommandListHandler),
(r"/qiita_db/plugins/(.*)/(.*)/", PluginHandler),
(r"/qiita_db/analysis/(.*)/metadata/", APIAnalysisMetadataHandler),
(r"/qiita_db/archive/observations/", APIArchiveObservations)
]
# rest endpoints
handlers.extend(REST_ENDPOINTS)
if qiita_config.portal == "QIITA":
# Add portals editing pages only on main portal
portals = [
(r"/admin/portals/studies/", StudyPortalHandler),
(r"/admin/portals/studiesAJAX/", StudyPortalAJAXHandler)
]
handlers.extend(portals)
if is_test_environment():
# We add the endpoints for testing plugins
test_handlers = [
(r"/apitest/processing_job/", ProcessingJobAPItestHandler),
(r"/apitest/reset/", ResetAPItestHandler),
(r"/apitest/prep_template/", PrepTemplateAPItestHandler),
(r"/apitest/artifact/", ArtifactAPItestHandler),
(r"/apitest/reload_plugins/", ReloadPluginAPItestHandler)
]
handlers.extend(test_handlers)
# 404 PAGE MUST BE LAST IN THIS LIST!
handlers.append((r".*", NoPageHandler))
settings = {
"template_path": TEMPLATE_PATH,
"debug": DEBUG,
"cookie_secret": qiita_config.cookie_secret,
"login_url": "%s/auth/login/" % qiita_config.portal_dir,
}
tornado.web.Application.__init__(self, handlers, **settings)
|
from tkinter import *
class Menubar():
def __init__(self,window:Tk):
super().__init__()
self.menubar = Menu(window)
self.menu:list = []
window.config(menu=self.menubar)
def __getitem__(self, index) -> Menu:
return self.menu[index]
def get_menu(self,index) -> Menu:
return self[index]
def add_menu(self,text:str) -> int:
"返回菜单索引"
new_menu = Menu(self.menubar,tearoff = 0)
self.menu.append(new_menu)
self.menubar.add_cascade(label = text,menu = new_menu); # 给菜单加菜单
return len(self.menu) - 1
@staticmethod
def add_submenu(root_menu:Menu,text:str) -> Menu:
"返回添加的菜单"
submenu = Menu(root_menu,tearoff = 0)
root_menu.add_cascade(label = text,menu = submenu)
return submenu
def menu_add_menu(self,menu_index:int,text:str) -> Menu:
"返回添加的菜单"
root:Menu = self[menu_index]
submenu = Menu(root,tearoff = 0)
root.add_cascade(label = text,menu = submenu)
return submenu
def menu_add_item(self,menu_index:int,text:str,func):
self.get_menu(menu_index).add_command(label = text,command = func) # 给菜单加项
def menu_add_separator(self,menu_index:int):
self.get_menu(menu_index).add_separator()
class FlyMenubar():
def __init__(self,window:Tk):
super().__init__()
self.menubar = Menu(window,tearoff = 0)
self.menu = {}
window.config(menu=self.menubar)
def __getitem__(self, index) -> Menu:
"返回一级菜单"
return list(self.menu.keys())[index]
def get_menu(self,index) -> Menu:
"返回一级菜单"
return self[index]
def add_menu(self,text:str) -> int:
"添加一级菜单,返回菜单索引"
new_menu = Menu(self.menubar,tearoff = 0)
self.menu[new_menu] = {}
self.menubar.add_cascade(label = text,menu = new_menu); # 给菜单加菜单
return len(self.menu) - 1
def add_submenu(self,text:str,menu_index:tuple) -> int:
"""
添加子菜单
menu_index不算item
例:
+-------+
| A |
| B >| 注:有 > 为子菜单
| C >|
+-------+
如此,
0 是 B,不是 A
1 是 C
返回添加的菜单索引
"""
if len(menu_index) > 0:
prev_menus:dict = self.menu
prev_menu:Menu = None
for index in menu_index:
prev_menu = list(prev_menus.keys())[index]; # 通过索引获取其key
prev_menus = prev_menus[prev_menu]; # 通过其key获取其value
if prev_menu == None or prev_menus == None:
return -1
submenu = Menu(prev_menu,tearoff = 0)
prev_menus[submenu] = {}
prev_menu.add_cascade(label = text,menu = submenu)
return len(prev_menus) - 1
else:
return -1
def menu_add_menu(self,menu_index:int,text:str) -> int:
"添加二级菜单,返回添加的菜单索引"
root:Menu = self[menu_index]
submenu = Menu(root,tearoff = 0)
self.menu[root][submenu] = {}
root.add_cascade(label = text,menu = submenu)
return len(self.menu[root]) - 1
def menu_add_item(self,menu_index:int,text:str,func):
"给一级菜单添加项"
m:Menu = self[menu_index]
m.add_command(label = text,command = func) # 给菜单加项
def menu_add_separator(self,menu_index:int):
"给一级菜单添加分割线"
self[menu_index].add_separator()
|
#!/usr/bin/python
#func_default.py
#2.7.6
def say(message, times = 1):
print message * times
pass
say('Hello')
say('World', 5) |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 24 20:59:40 2018
诊断PM板命令脚本
@author: Administrator
"""
import pandas as pd
# =============================================================================
# 设置环境变量
# =============================================================================
data_path = r'D:\4G_voltage' + '\\'
ommb1_file = 'PmDevice_ommb1.xls'
ommb2_file = 'PmDevice_ommb2.xls'
pm_command_ommb1 = 'pm_command_ommb1.txt'
pm_command_ommb2 = 'pm_command_ommb2.txt'
df_ommb1 = pd.read_excel(data_path + ommb1_file,dtype = 'str',encoding = 'utf-8',skiprows = 1 )
df_ommb2 = pd.read_excel(data_path + ommb2_file,dtype = 'str',encoding = 'utf-8',skiprows = 1 )
# 生成OMMB1的命令
with open(data_path + pm_command_ommb1,'w',encoding='utf-8') as F:
for i in range(0,len(df_ommb1),1):
command = 'BOARD DIAGNOSE:SUBNET=' + df_ommb1.loc[i,'子网'] + ',NE=' + df_ommb1.loc[i,'管理网元ID'] +\
',RACK=' + df_ommb1.loc[i,'RACK'] + ',SHELF=' + df_ommb1.loc[i,'SHELF'] + ',SLOT=' +\
df_ommb1.loc[i,'SLOT'] + ',FUNCTION_ID=16777224;'
F.write(command+'\n')
# 生成OMMB2的命令
with open(data_path + pm_command_ommb2,'w',encoding='utf-8') as F:
for i in range(0,len(df_ommb2),1):
command = 'BOARD DIAGNOSE:SUBNET=' + df_ommb2.loc[i,'子网'] + ',NE=' + df_ommb2.loc[i,'管理网元ID'] +\
',RACK=' + df_ommb2.loc[i,'RACK'] + ',SHELF=' + df_ommb2.loc[i,'SHELF'] + ',SLOT=' +\
df_ommb2.loc[i,'SLOT'] + ',FUNCTION_ID=16777224;'
F.write(command+'\n')
|
import os
import torch
import argparse
#from utils.train import *
from Decoder.DecoderRNN import *
from Encoder.encoderRNN import *
from Transformer import *
from Decoder.AttnDecoderRNN import *
device = torch.device("cuda")
print(device)
parser = argparse.ArgumentParser(description='Transformer Generate')
parser.add_argument('--output', type=str, default='', metavar='P',
help="filename to store translated text.")
parser.add_argument('--model', type=str, default='', metavar='P',
help="model to generate for.")
args = parser.parse_args()
def greedy_decode(model, src, src_mask, max_len, start_symbol):
memory = model.encode(src, src_mask)
ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)
for i in range(max_len-1):
out = model.decode(memory, src_mask,
Variable(ys),
Variable(subsequent_mask(ys.size(1))
.type_as(src.data)))
prob = model.generator(out[:, -1])
_, next_word = torch.max(prob, dim = 1)
next_word = next_word.data[0]
ys = torch.cat([ys,
torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)
return ys
BATCH_SIZE = 1
test_iter = MyIterator(test, batch_size=BATCH_SIZE, device=0, \
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)), \
batch_size_fn=batch_size_fn, train=False)
#test_iter.SRC=test_iter.SRC.to(device)
#test_iter.TRG=test_iter.TRG.to(device)
#test_iter.src=test_iter.src.to(device)
model = make_model(len(SRC.vocab), len(TGT.vocab), N=2)
model.to(device)
model.load_state_dict(torch.load(args.model))
f_out=open(args.output,'w')
for i, batch in enumerate(test_iter):
src = batch.src.to(device).transpose(0, 1)[:1]
src_mask = (src != SRC.vocab.stoi["<blank>"]).unsqueeze(-2)
out = greedy_decode(model, src, src_mask,
max_len=100, start_symbol=TGT.vocab.stoi["<s>"])
batch.trg=batch.trg.to(device)
translated_line="Translation: "
for i in range(1, out.size(1)):
sym = TGT.vocab.itos[out[0, i]]
if sym == "</s>": break
translated_line+=sym+" "
f_out.write(translated_line)
actual_line="\nTarget: "
for i in range(1, batch.trg.size(0)):
sym = TGT.vocab.itos[batch.trg.data[i, 0]]
if sym == "</s>": break
actual_line+=sym+" "
f_out.write(actual_line+"\n\n")
f_out.close()
|
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient, AWSIoTMQTTClient
from OxhrmMonitor import OxiHRMonitor
import numpy as np
import time
clientid = 'basicPubSub2'
#HOST = 'arpzdkw2j3op9-ats.iot.ap-northeast-2.amazonaws.com'
HOST = 'iotcore.iot.ap-northeast-2.amazonaws.com'
CA = 'root-CA.crt'
PRI_KEY = 'private.key'
CERT_KEY = 'cert.pem'
HANDELR = 'iotcore'
#the print of state result after the method of shadowUpdate.
def shadow_update_Callback_test(payload, response_status, token):
print("Callback Test Init")
print("Object Action Named Rule")
print("$aws/things/crc-hj-rasp/shadow/update/#")
print("payload = {}".format(payload))
print("response_status = {}".format(response_status))
#print("token = {}".format(token))
#Device connect to AWS IoT Core applicatioin
myShadowClient = AWSIoTMQTTShadowClient(clientid, useWebsocket=True)
print('set client id')
myShadowClient.configureEndpoint(HOST, 443)
print('set configuration endpoint')
myShadowClient.configureCredentials(CA, PRI_KEY, CERT_KEY)
print('done certificatation')
myShadowClient.configureAutoReconnectBackoffTime(1, 32, 20)
myShadowClient.configureConnectDisconnectTimeout(10)
myShadowClient.configureMQTTOperationTimeout(5)
myShadowClient.connect()
print('connected!!')
testDevice_shadow = myShadowClient.createShadowHandlerWithName( HANDELR, True )
# parsing oximetry-heart-rate-log data
clss = OxiHRMonitor()
ctx = clss.serialContext('/dev/tty.KMU_MSPL-DevB')
g = clss.listen(ctx)
t_attr = None
while True:
res = next(g)
print('{"state"' + ':{"reported":' + '{"data": {"rate": "' + res[0] + '", "concen": "' + res[1][0] + '"}, "sensor": "oxhrm"}}}')
testDevice_shadow.shadowUpdate(
'{"state"' + ':{"reported":' + '{"data": {"rate": "' + res[0] + '", "concen": "' + res[1][0] + '"}, "sensor": "oxhrm"}}}',
shadow_update_Callback_test, 5)
print("hrm uploaded!")
#time.sleep(1)
|
# views.py
#
# Authors:
# - Coumes Quentin <coumes.quentin@gmail.com>
import json
import logging
import os
import threading
import time
from io import SEEK_END
import docker
from django.conf import settings
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseNotAllowed,
HttpResponseNotFound, JsonResponse)
from django.views.generic import View
from . import utils
from .containers import Sandbox
from .executor import Command, Executor
logger = logging.getLogger(__name__)
class EnvView(View):
"""Handle environment download."""
def head(self, _, env):
"""Returns a response with status 200 if the environment <env> exists, 404 otherwise."""
path = utils.get_env(env)
if path is None:
return HttpResponseNotFound(f"No environment with UUID '{env}' found")
response = HttpResponse()
response["Content-Length"] = os.stat(path).st_size
response['Content-Type'] = "application/gzip"
response['Content-Disposition'] = f"attachment; filename={env}.tgz"
return response
def get(self, _, env):
"""Return the environment with the UUID <env>, 404 if it does not exists."""
path = utils.get_env(env)
if path is None:
return HttpResponseNotFound(f"No environment with UUID '{env}' found")
with open(path, "rb") as f:
response = HttpResponse(f.read())
response["Content-Length"] = os.stat(path).st_size
response['Content-Type'] = "application/gzip"
response['Content-Disposition'] = f"attachment; filename={env}.tgz"
return response
class FileView(View):
"""Handle environment's file download."""
def head(self, _, env, path):
"""Returns a response with status 200 if <path> point to a file the environment <env>,
404 otherwise."""
file = utils.extract(env, path)
response = HttpResponse()
response["Content-Length"] = file.seek(0, SEEK_END)
response['Content-Type'] = "application/octet-stream"
response['Content-Disposition'] = ('attachment; filename=' + os.path.basename(path))
return response
def get(self, _, env, path):
"""Returns a response with status 200 if <path> point to a file the environment <env>,
404 otherwise."""
file = utils.extract(env, path)
response = HttpResponse(file.read())
response["Content-Length"] = file.tell()
response['Content-Type'] = "application/octet-stream"
response['Content-Disposition'] = ('attachment; filename=' + os.path.basename(path))
return response
class SpecificationsView(View):
def get(self, _):
"""Returns the specs of the sandbox."""
return JsonResponse(utils.specifications())
class UsageView(View):
def get(self, _):
"""Returns the usage of the sandbox."""
return JsonResponse(utils.usage())
class LibrariesView(View):
def get(self, _):
"""Returns the libraries installed on the containers."""
response = docker.from_env().containers.run(
settings.DOCKER_PARAMETERS["image"], "python3 /utils/libraries.py"
)
return JsonResponse(json.loads(response))
class ExecuteView(View):
def post(self, request):
"""Allows to execute bash commands within an optional environment."""
start = time.time()
config = request.POST.get("config")
if config is None:
return HttpResponseBadRequest("Missing argument 'config'")
try:
config = json.loads(config)
if not isinstance(config, dict):
return HttpResponseBadRequest(f'config must be an object, not {type(config)}')
except json.JSONDecodeError as e:
return HttpResponseBadRequest(f"'config' json is invalid - {e}")
env = utils.executed_env(request, config)
commands = Command.from_config(config)
result_path = utils.parse_result_path(config)
save = utils.parse_save(config)
logger.debug(f"Parsing config request took : {time.time() - start} seconds")
sandbox = Sandbox.acquire()
try:
response = Executor(commands, sandbox, env, result_path, save).execute()
logger.debug(f"Total execute request took : {time.time() - start} seconds")
return JsonResponse(response)
finally:
threading.Thread(target=sandbox.release).start()
|
import numpy
import theano
import theano.tensor as T
class Layer(object):
def components(self):
return []
def ancestors(self):
return [x for x in y.ancestors() for y in self.output]
# TODO
def negative_log_loss(self, predictor):
return - T.log(self.output[predictor.label])
def get_component_values(self):
self.get_component_values = theano.function(inputs=[], outputs=self.components())
return self.get_component_values()
class LinearTransformation(Layer):
def __init__(self, input_object, output_size):
# assert input_object.output_dimension =
self.input_object = input_object
self.output_size = output_size
self.W = theano.shared(
value=numpy.zeros((input_object.output_size, output_size),
dtype=theano.config.floatX),
name='W',
borrow=True)
self.b = theano.shared(
value=numpy.zeros((output_size,),
dtype=theano.config.floatX),
name='b',
borrow=True)
self.output = T.dot(self.input_object.output, self.W) + self.b
self.l1_regularization_values = theano.function(
inputs=[], outputs=self.l1_regularization())
def components(self):
return [self.W, self.b]
def l1_regularization(self):
return T.mean(abs(self.W)) + T.mean(abs(self.b))
class NeuralLayer(LinearTransformation):
def __init__(self, input_object, output_size):
LinearTransformation.__init__(self, input_object, output_size)
self.linear_transformation_output = self.output
self.sigmoid = SigmoidLayer(self)
self.output = self.sigmoid.output
self.output_size = self.sigmoid.output_size
class SigmoidLayer(Layer):
def __init__(self, input_object):
self.input_object = input_object
self.output_size = input_object.output_size
# TODO: comment this
self.output = T.nnet.softmax(self.input_object.output)[0]
class InputVector(object):
def __init__(self, dimension):
self.output_size = dimension
self.output = theano.tensor.dvector("input")
# TODO
def reconstruction_cost(self, symbolic_reconstruction):
pass |
# -*- coding: utf-8 -*-
#
# ドラクエ10の冒険者の広場画像の一括ダウンロード
#
import getpass
import os
import re
import sys
import cookielib
import mechanize
from bs4 import BeautifulSoup as bs
def do_login(b, us, pw, ci):
# ログイン画面
r = b.open('http://hiroba.dqx.jp/sc/login')
print(b.geturl())
b.select_form(name='mainForm')
for f in b.form.controls:
f.readonly = False
b.form['_pr_confData_sqexid'] = us
b.form['_pr_confData_passwd'] = pw
b.form['_pr_confData_otppw'] = ''
if len(sys.argv) > 2:
b.form['_pr_confData_otppw'] = sys.argv[2]
b.form['_event'] = 'Submit'
b.submit()
# 中間画面
print(b.geturl())
b.select_form(name='mainForm')
b.submit()
# キャラクターセレクト
r = b.open('http://hiroba.dqx.jp/sc/login/characterselect/')
print(b.geturl())
b.select_form(nr=0)
for f in b.form.controls:
f.readonly = False
b.form['cid'] = ci
b.submit()
def test_login(b):
r = b.open('http://hiroba.dqx.jp/sc/home')
s = bs(r)
p = s.find_all('h1', {'id': 'cttTitle'})
if len(p) == 0:
raise Exception()
def get_browser(filename, proxy=None):
# username, character_id, directory
f = open(filename, 'r')
us = f.readline().strip()
ci = f.readline().strip()
dr = f.readline().strip()
f.close()
# ask for password
pw = getpass.getpass()
# setup browser
b = mechanize.Browser()
cj = cookielib.LWPCookieJar()
b.set_cookiejar(cj)
b.set_handle_equiv(True)
b.set_handle_redirect(True)
b.set_handle_referer(True)
b.set_handle_robots(False)
if proxy:
b.set_proxies({'http': proxy, 'https': proxy})
# check cookie
try:
cj.load(filename+'cookie', ignore_discard=True, ignore_expires=True)
test_login(b)
print('Successfully opened a cookie.')
except:
do_login(b, us, pw, ci)
cj.save(filename+'cookie', ignore_discard=True, ignore_expires=True)
print('Successfully logged in as {0}, Char#{1}'.format(us, ci))
return (b, ci, dr)
def download_pics(b, ci, dr):
urllist = ['http://hiroba.dqx.jp/sc/character/{0}/picture'.format(ci)]
r = b.open(urllist[0])
s = bs(r)
pages = s.find_all('a', {'href': re.compile('picture\/page')})
for page in pages:
# ignore next/prev
if page.string:
urllist.append('http://hiroba.dqx.jp'+page['href'])
for url in urllist:
r = b.open(url)
s = bs(r)
imgs = s.find_all('img', {'src': re.compile('img.dqx.jp')})
for img in imgs:
url = img['src'].replace('thum2', 'original')
# use image id as a filename
name = '{0}.jpg'.format(url.split('/')[-1])
try:
open(os.path.join(dr, name), 'r')
print('File {0} already exists.'.format(name))
except IOError:
b.retrieve(url, os.path.join(dr, name))
print('File {0} saved successfully.'.format(name))
if __name__ == '__main__':
proxy = None
if len(sys.argv) > 1:
filename = sys.argv[1]
(b, ci, dr) = get_browser(filename, proxy)
download_pics(b, ci, dr)
else:
print('Please specify a filename.')
|
hello = 'Hello'
print(isinstance(hello, str))
print(isinstance(hello, object))
print(issubclass(str, object))
data = (20, 'fkit')
print(isinstance(data, (list, tuple)))
print(issubclass(str, (list, tuple)))
print(issubclass(str, (list, tuple, object))) |
import numpy as np
import zmq
from zmq_utils import send_array_fast, recv_array_fast
import time
import argparse
parser = argparse.ArgumentParser("Overhead test CLI")
parser.add_argument("--height", type=int, default=2000)
parser.add_argument("--width", type=int, default=1000)
parser.add_argument("--channels", type=int, default=3)
parser.add_argument("--samples", type=int, default=100)
parser.add_argument("--addr", default="tcp://127.0.0.1:10000")
args = parser.parse_args()
addr = args.addr
context = zmq.Context()
server = context.socket(zmq.REQ) # pylint: disable=no-member
server.connect(addr)
img_shape = [args.height, args.width, args.channels]
x = (np.random.random(img_shape) * 255).astype(np.uint8) # fake image
print(f"{type(x)} {x.shape}")
time_begin = time.time()
for i in range(args.samples):
send_array_fast(server, x)
recv_array_fast(server)
time_end = time.time()
time_elapsed = time_end - time_begin
print(f"time_elapsed={time_elapsed} mean={time_elapsed / args.samples}") |
import os
import sys
import time
import subprocess
retries = range(0,15)
scale_factor = [10, 20, 25, 30, 40, 50]
print sys.argv
if len(sys.argv) >= 1:
n = sys.argv[1]
else:
n = 1
if len(sys.argv) >= 2:
flambda = sys.argv[2]
else:
flambda = "SLEEP5"
dirname = "build/" + flambda + "_retry_expt_" + str(time.time())
print "Creating directory " + dirname + "..."
os.mkdir(dirname)
for i in retries:
for j in scale_factor:
filename = dirname + "/" + "retry_" + str(i) + "_" + str(j)
os.system("./build/s3-sample " + n + " " + flambda + " " + filename + " " + str(i) + " " + str(j) + " >> " + dirname + "/summary.txt")
fd.close()
|
#!/usr/bin/env python
"""
jmyers may 11 2010
march 29 2012: Get rid of MITI format and write things in fullerDiaSource format.
Split up the "fullerDiaSource"-format DIAsources by night and put them in separate
files.
apr. 3 2012: Also, write out a per-obsHist file which holds all dias from a given image.
"""
# note that midnight at LSST is MJD+0.125 (or 0.166) days
# (MJD = integer at midnight UTC, Chile/LSST local time is
# UTC -4 hours in standard,-3 hours daylight savings.
# which translates to midnight @ LSST = MJD + 0.125/0.166)
# which means ~NOON to NOON observations cover
# ~'night'-0.35 to ~'night' + 0.65 (where 'night' = int(MJD) at midnight)
# a gap would be okay because we're don't observe that close to noon
night_start = -0.35
night_end = 0.65
OBSCODE='807'
import sys
import os.path
def getNightNum(mjd):
"""Determine night number for any MJD."""
night = night = int(mjd+0.5-0.12)
return night
if __name__=="__main__":
if len(sys.argv)<2:
print "Usage: splitByNight.py filename nightlyOutputDir byObsHistOutputDir"
print " where filename = the input diasource file "
print " dia sources broken up by night will go in nightlyOutputDir"
print " dia sources broken up by image will go in byObsHistOutputDir"
sys.exit(1)
infile = open(sys.argv[1], 'r')
outDir1 = sys.argv[2]
outDir2 = sys.argv[3]
prev_night = None
# Read diasources from input file.
for line in infile:
diaId, obshistId, ssmId, ra, decl, MJD, mag, snr = line.split()
diaId, obshistId, ssmId = map(int, [diaId, obshistId, ssmId])
ra, decl, MJD, mag = map(float, [ra, decl, MJD, mag])
# Determine the night number of this particular diasource and write to that file.
nightNum = getNightNum(MJD)
# Open new output file if needed.
if nightNum != prev_night:
outfile = open(os.path.join(outDir1, str(nightNum) + ".dias"), "aw")
prev_night = nightNum
# Write output line.
print>>outfile, line.rstrip()
# now write to a by-obshist dir
outfile2 = open(os.path.join(outDir2, str(obshistId) + ".dias"), "aw")
print>>outfile2, line.rstrip()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__date__ = '2018/4/3 18:53'
__author__ = 'ooo'
import torch
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import BasicBlock, Bottleneck, model_urls
from torch import nn
import math
import visdom
import os
class ResNet(nn.Module):
"""
修改自 torchvision.models.resnet.ResNet
返回多个stage的特征图[C1, C2, C3, C4, C5], 用于构造FPN
"""
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, stages=['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6']):
# stages: subset in ['C0', C1','C2','C3','C4','C5','C6']
# BACKBONE_CHANNELS = [3, 64, 256, 512, 1024, 2048, 2048]
# BACKBONE_STRIDES = [1, 2, 4, 8, 16, 32, 39.38]
C0 = x
if stages == ['C0']:
return [C0]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
C1 = x
if stages == ['C0', 'C1']:
return [C0, C1]
x = self.maxpool(x)
# C1 = x
x = self.layer1(x)
C2 = x
if stages == ['C0', 'C1', 'C2']:
return [C0, C1, C2]
x = self.layer2(x)
C3 = x
if stages == ['C0', 'C1', 'C2', 'C3']:
return [C0, C1, C2, C3]
x = self.layer3(x)
C4 = x
if stages == ['C0', 'C1', 'C2', 'C3', 'C4']:
return [C0, C1, C2, C3, C4]
x = self.layer4(x)
C5 = x
# 在检测算法中不需要计算全连接层的输出x
x = self.avgpool(x)
C6 = x
# x = x.view(x.size(0), -1)
# x = self.fc(x)
# P0, P1, P2, P3, P4, P5, P6 = [C0.data.cpu(), C1.data.cpu(), C2.data.cpu(), C3.data.cpu(), C4.data.cpu(), C5.data.cpu(), C6.data.cpu()]
# vs = visdom.Visdom()
# vs.images(C0.data.cpu())
# vs.images(P1[:, 3:6, :, :])
# vs.images(P2[:, 3:6, :, :])
# vs.images(P3[:, 3:6, :, :])
# vs.images(P4[:, 3:6, :, :])
# vs.images(P5[:, 3:6, :, :])
fmaps = [C0, C1, C2, C3, C4, C5, C6]
fmaps = [fmaps[i] for i in range(len(fmaps)) if str(i) in ''.join(stages)]
return fmaps
def resnet18(pretrained=False, model_dir=None, model_name=None, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model = resnet_load(model, 'resnet18', model_dir, model_name)
return model
def resnet34(pretrained=False, model_dir=None, model_name=None, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model = resnet_load(model, 'resnet34', model_dir, model_name)
return model
def resnet50(pretrained=False, model_dir=None, model_name=None, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model = resnet_load(model, 'resnet50', model_dir, model_name)
return model
def resnet101(pretrained=False, model_dir=None, model_name=None, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model = resnet_load(model, 'resnet101', model_dir, model_name)
return model
def resnet152(pretrained=False, model_dir=None, model_name=None, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model = resnet_load(model, 'resnet152', model_dir, model_name)
return model
def resnet_load(model, arch_name, model_dir, model_name, dowload=False):
model_path = os.path.join(model_dir, model_name)
if os.access(model_path, os.R_OK):
model.load_state_dict(torch.load(f=model_path))
elif dowload:
model.load_state_dict(model_zoo.load_url(model_urls[arch_name], model_dir))
else:
raise Exception('无法找到resnet预训练文件,请手动下载到指定路径,或开启自动下载.')
return model
def resnet(arch, pretrained=False, model_dir=None, model_name=None, include=None):
"""
:param arch:
:param pretrained:
:param include: include layers
:return:
"""
arch = arch.lower()
if arch == 'resnet18':
model = resnet18(pretrained, model_dir, model_name)
elif arch == 'resnet34':
model = resnet34(pretrained, model_dir, model_name)
elif arch == 'resnet50':
model = resnet50(pretrained, model_dir, model_name)
elif arch == 'resnet101':
model = resnet101(pretrained, model_dir, model_name)
elif arch == 'resnet152':
model = resnet152(pretrained, model_dir, model_name)
else:
raise ValueError('错误的arch代码!')
return model
def vgg(arch, pretrained=False, model_dir=None, model_name=None, include=None):
raise NotImplementedError
def desnet(arch, pretrained=False, model_dir=None, model_name=None, include=None):
raise NotImplementedError
def backbone(arch, pretrained=False, model_dir=None, model_name=None, include=None):
if 'resnet' in arch:
return resnet(arch, pretrained, model_dir, model_name, include)
elif 'vgg' in arch:
return vgg(arch, pretrained, model_dir, model_name, include)
elif 'desnet' in arch:
return desnet(arch, pretrained, model_dir, model_name, include)
else:
raise ValueError('Unknown Backbone Model: %s' % arch)
|
count=0
while (count <9):
print count
count =count +1
print "good bye"
for a in range (0,11,12):
print a
desserts=["ice cream","chocolate","asana"]
special_dessert="chocolate"
for special_dessert in deserts:
if desserts == special_dessert:
print dessert + "is my favorite dessert":
else:
print dessert + is my favorite dessert
|
from fastapi import APIRouter, Depends
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from app.core.routers.auth import get_current_username
from starlette.responses import HTMLResponse, JSONResponse
router = APIRouter()
security = HTTPBasic()
@router.get("/get", description="Hello World!", response_description="Some text")
async def get():
return HTMLResponse("Hello!")
|
import argparse
from naoqi import ALProxy
def main(robot_ip, port=9559):
motion_proxy = ALProxy("ALMotion" , robot_ip, port)
motion_proxy.wakeup()
motion_proxy.openHand('LHand')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="127.0.0.1",
help="Robot ip address")
parser.add_argument("--port", type=int, default=9559,
help="Robot port number")
args = parser.parse_args()
main(args.ip, args.port)
else:
print 'This program is meant to be run as main' |
class Equipment():
name = ''
speed = 0
blocking = 0
agility = 0
scoring = 0
# takes basically any argument passed in when creating an instance and sets attribute
# ex. Monster(adjective='awesome') makes a 'self.adjective = awesome'
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
# Player equipment
# Sticks
class Good_Stick(Equipment):
name = 'Good Stick'
speed = -1
scoring = 1
class Awesome_Stick(Equipment):
name = 'Awesome Stick'
speed = -2
scoring = 3
class Light_Stick(Equipment):
name = 'Light Stick'
speed = 5
scoring = -2
# Helmets
class Better_Helmet(Equipment):
name = 'Better Helmet'
scoring = 0
speed = 1
# Skates
class Better_Skates(Equipment):
name = 'Better Skates'
speed = 3
# Goalie Equipment
# Pads
class Better_Pads(Equipment):
name = 'Better Pads'
blocking = 2
agility = -1
class Great_Pads(Equipment):
name = 'Great Pads'
blocking = 3
agility = -2
class Light_Pads(Equipment):
name = 'Light Pads'
blocking = -2
agility = 4
# Helmets
class G_Better_Helmet(Equipment):
name = 'Better Helmet'
blocking = 0
agility = 1
# Skates
class G_Better_Skates(Equipment):
name = 'Better Skates'
agility = 2 |
'''
Finding the Largest or Smallest N Items
Problem:
You want to make a list of the largest or smallest N items in a collection.
'''
import heapq
# Example 1
nums = [1, 8, 56, 45, 32, 5, 70, 23, 12, 6, 4, 34, 16, 35, 76, 50]
print(heapq.nlargest(3, nums)) # [76, 70, 56]
print(heapq.nsmallest(3, nums)) # [1, 4, 5]
# Example 2
portfolio = [
{'name': 'IBM', 'shares': 100, 'price': 91.1},
{'name': 'AAPL', 'shares': 50, 'price': 543.22},
{'name': 'FB', 'shares': 200, 'price': 21.09},
{'name': 'HPQ', 'shares': 35, 'price': 31.75},
{'name': 'YHOO', 'shares': 45, 'price': 16.35},
{'name': 'ACME', 'shares': 75, 'price': 115.65}
]
cheap = heapq.nsmallest(2, portfolio, key=lambda s: s['price'])
expensive = heapq.nlargest(2, portfolio, key=lambda s: s['price'])
print(cheap)
# [{'name': 'YHOO', 'shares': 45, 'price': 16.35}, {'name': 'FB', 'shares': 200, 'price': 21.09}]
print(expensive)
# [{'name': 'AAPL', 'shares': 50, 'price': 543.22}, {'name': 'ACME', 'shares': 75, 'price': 115.65}]
# Discussion
print('nums: ', nums)
heap = list(nums)
heapq.heapify(heap)
print('sort nums:', heap)
# Also 3 smallest numbers
print(heapq.heappop(heap), heapq.heappop(heap), heapq.heappop(heap))
|
from planner_project import app
from flask import request
from planner_project.data_access import mysql
from planner_project.common import api_response, request_back_helper, custom_error
from planner_project.sql.backweb import config_sql
# 获取基础配置列表
@app.route("/backweb/config/select_base_config_list", methods=['POST'])
def select_base_config_list():
ApiResponse = api_response.ApiResponse()
ApiResponse.data = mysql.get_list(config_sql.select_config_list, ())
ApiResponse.message = "成功"
ApiResponse.status = 200
return api_response.response_return(ApiResponse)
# 新增基础配置
@app.route("/backweb/config/insert_base_config", methods=['POST'])
def insert_base_config():
ApiResponse = api_response.ApiResponse()
Key = request.form.get("Key", type=str, default=None)
if Key is None or Key == "":
raise custom_error.CustomFlaskErr(status_code=500, message="key不能为空")
Value = request.form.get("Value", type=str, default=None)
Remark = request.form.get("Remark", type=str, default=None)
imageUrl = request.form.get("imageUrl", type=str, default=None)
if mysql.operate_object(config_sql.insert_config, (Key, Value, Remark, imageUrl)) <= 0:
raise custom_error.CustomFlaskErr(status_code=500, message="新增失败")
ApiResponse.message = "成功"
ApiResponse.status = 200
return api_response.response_return(ApiResponse)
# 删除基础配置
@app.route("/backweb/config/delete_base_config", methods=['POST'])
def delete_base_config():
ApiResponse = api_response.ApiResponse()
Key = request.form.get("Key", type=str, default=None)
if Key is None or Key == "":
raise custom_error.CustomFlaskErr(status_code=500, message="key不能为空")
if mysql.operate_object(config_sql.delete_config, (Key)) <= 0:
raise custom_error.CustomFlaskErr(status_code=500, message="删除失败")
ApiResponse.message = "成功"
ApiResponse.status = 200
return api_response.response_return(ApiResponse)
# 获取单个基础配置
@app.route("/backweb/config/get_base_config_by_id", methods=['POST'])
def get_base_config_by_id():
ApiResponse = api_response.ApiResponse()
Id = request.form.get("Id", type=int, default=0)
if Id <= 0:
raise custom_error.CustomFlaskErr(status_code=500, message="Id不能为空")
ApiResponse.data = mysql.get_object(config_sql.get_base_config_by_id, (Id))
ApiResponse.message = "成功"
ApiResponse.status = 200
return api_response.response_return(ApiResponse)
# 更新单个配置
@app.route("/backweb/config/update_base_config_by_id", methods=['POST'])
def update_base_config_by_id():
ApiResponse = api_response.ApiResponse()
Id = request.form.get("Id", type=int, default=0)
if Id <= 0:
raise custom_error.CustomFlaskErr(status_code=500, message="Id不能为空")
Value = request.form.get("Value", type=str, default=None)
Remark = request.form.get("Remark", type=str, default=None)
imageUrl = request.form.get("imageUrl", type=str, default=None)
ApiResponse.data = mysql.get_list(config_sql.update_config, (Value,Remark,imageUrl,Id))
ApiResponse.message = "成功"
ApiResponse.status = 200
return api_response.response_return(ApiResponse) |
from django.contrib import admin
from home.models import Contact
from home.models import smoothie
from home.models import receipe
from home.models import frontimage
# Register your models here.
admin.site.register(Contact)
admin.site.register(smoothie)
admin.site.register(receipe)
admin.site.register(frontimage) |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def getMinimumDifference(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.prev = None
res = []
def inorder(root):
if root.left != None:
inorder(root.left)
if root != None and self.prev != None:
res.append(abs(self.prev.val - root.val))
self.prev = root
if root.right != None:
inorder(root.right)
inorder(root)
return min(res) |
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("imori_dark.jpg")
plt.hist(img.ravel(), bins=255, rwidth=0.8, range=(0, 255))
plt.savefig("my_answer_20.png")
plt.show()
|
#coding:utf-8
from timeit import Timer
# li1 = [1,2]
# li2 = [23,5]
# li = li1 + li2 #列表迭代
# li = [i for i in range(10000)] #列表生成器
# li = list(range(10000)) #直接转换成列表
def t1():
li = [] #往空列表追加值
for i in range(10000):
li.append(i) #append只能追加一个值
def t2():
li = []
for i in range(10000):
li = li +[i]
def t3():
li = [i for i in range(10000)]
def t4():
li = list(range(10000))
def t5():
li = []
for i in range(10000):
li.extend([i]) #expend只能添加列表或者是可迭代的对象
timer1 = Timer("t1()","from __main__ import t1")
print("append:",timer1.timeit(1000))
timer2 = Timer("t2()","from __main__ import t2")
print("+:",timer2.timeit(1000))
timer3 = Timer("t3()","from __main__ import t3")
print("[i for i in range]:",timer3.timeit(1000))
timer4 = Timer("t4()","from __main__ import t4")
print("list(range):",timer4.timeit(1000))
timer5 = Timer("t5()","from __main__ import t5")
print("extend:",timer5.timeit(1000))
|
# https://colab.research.google.com/notebooks/magenta/onsets_frames_transcription/onsets_frames_transcription.ipynb
import tensorflow as tf
import librosa
import numpy as np
from magenta.common import tf_utils
from magenta.music import audio_io
import magenta.music as mm
from magenta.models.onsets_frames_transcription import configs
from magenta.models.onsets_frames_transcription import constants
from magenta.models.onsets_frames_transcription import data
from magenta.models.onsets_frames_transcription import split_audio_and_label_data
from magenta.models.onsets_frames_transcription import train_util
from magenta.music import midi_io
from magenta.protobuf import music_pb2
from magenta.music import sequences_lib
## Define model and load checkpoint
## Only needs to be run once.
config = configs.CONFIG_MAP['onsets_frames']
hparams = config.hparams
hparams.use_cudnn = False
hparams.batch_size = 1
examples = tf.placeholder(tf.string, [None])
dataset = data.provide_batch(
examples=examples,
preprocess_examples=True,
hparams=hparams,
is_training=False)
CHECKPOINT_DIR = '/Users/junhoyeo/Desktop/magenta-school-song/maestro-v1.0.0'
# change to downloaded checkpoint path
estimator = train_util.create_estimator(
config.model_fn, CHECKPOINT_DIR, hparams)
iterator = dataset.make_initializable_iterator()
next_record = iterator.get_next()
|
from flask import render_template, request
from app import app
from resources.functions import *
@app.route('/')
def index():
content = index_content()
return render_template('user/index.html', **content)
@app.route('/home')
def home():
content = home_content()
return render_template('user/index.html', **content)
@app.route('/about')
def about():
content = about_content()
return render_template('user/about.html', **content)
@app.route('/products')
def products():
content = product_content()
return render_template('user/pricing.html', **content)
@app.route('/gallery')
def gallery():
content = gallery_content()
content['users'] = get_users()
return render_template('user/gallery.html', **content)
@app.route('/blog')
def blog():
content = blog_content()
return render_template('user/blog.html', **content)
@app.route('/contact')
def contact():
content = contact_content()
return render_template('user/contact.html', **content)
@app.route('/admin_home', methods=['POST', "GET"])
def admin_home():
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
content = admin_home_content()
else:
content = admin_home_content()
return render_template('admin/home.html', **content)
@app.route('/new_blog')
def new_blog():
content = new_blog_content()
return render_template('admin/new_blog.html', **content)
@app.route('/blog_tool')
def blog_tool():
content = blog_tool_content()
return render_template('admin/blog_tool.html', **content)
@app.route('/about_tool')
def about_tool():
content = about_tool_content()
return render_template('admin/about_tool.html', **content)
@app.route('/product_tool')
def product_tool():
content = product_tool_content()
return render_template('admin/product_tool.html', **content)
@app.route('/gallery_tool')
def gallery_tool():
content = gallery_tool_content()
return render_template('admin/gallery_tool.html', **content)
@app.route('/company_tool')
def company_tool():
content = company_tool_content()
return render_template('admin/company_tool.html', **content)
@app.route('/account_tool')
def account_tool():
content = account_tool_content()
return render_template('admin/account_tool.html', **content)
@app.route('/user_tool')
def user_tool():
content = user_tool_content()
return render_template('admin/user_tool.html', **content)
@app.route('/email')
def email():
content = {}
return render_template('admin/email.html', **content)
|
import unittest
from node import Node
class TestNodeClass(unittest.TestCase):
def testConstructor(self):
testNode = Node(1)
self.assertEqual(testNode.getHead(), 1)
self.assertEqual(testNode.getTail(), None)
def testConcatenation(self):
testNode = Node(1)
testNode.setTail(2)
self.assertIsInstance(testNode.getTail(), Node)
self.assertEqual(testNode.getTail().getHead(), 2)
def testHeadDefine(self):
testNode = Node(1)
self.assertEqual(testNode.getHead(), 1)
testNode.setHead(2)
self.assertEqual(testNode.getHead(), 2)
def testDeleters(self):
testNode = Node(1)
testNode.setTail(2)
print(testNode.toString())
testNode.amputateTail()
self.assertIsNone(testNode.getTail())
testNode.decapitate()
self.assertIsNone(testNode.getHead())
if __name__ == '__main__':
unittest.main() |
from os import listdir
from os.path import join, exists
from PIL import Image
import numpy as np
import torch
import torch.utils.data as data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from skimage import io, feature, color, img_as_uint, util
from skimage.transform import resize
from util import is_image_file, load_img
class DatasetFromFolder(data.Dataset):
def __init__(self, image_dir):
super(DatasetFromFolder, self).__init__()
self.photo_path = image_dir
self.image_filenames = [x for x in listdir(self.photo_path) if is_image_file(x)]
transform_list = [transforms.ToTensor()]
self.transform = transforms.Compose(transform_list)
def __getitem__(self, index):
# Load Image
target_path = join(self.photo_path, self.image_filenames[index])
frame_num = target_path.split("e")[-1]
frame_num = int(frame_num.split(".")[0]) + 1
frame_1,frame_1_gray = self.get_prev(frame_num)
target = load_img(target_path)
input = color.rgb2gray(target)
input = Image.fromarray(input)
frame_1 = self.transform(frame_1)
frame_1_gray = self.transform(frame_1_gray)
target = self.transform(target)
input = self.transform(input)
return input, target, frame_1, frame_1_gray
def __len__(self):
return len(self.image_filenames)
def get_prev(self, num):
if not exists(join(self.photo_path,"frame"+str(num)+".jpg")):
prev = load_img(join(self.photo_path,"frame"+str(num-1)+".jpg"))
prev_gray = color.rgb2gray(prev)
prev_gray = Image.fromarray(prev_gray)
return prev,prev_gray
#frame_1="nothing!"
else:
prev = load_img(join(self.photo_path,"frame"+str(num)+".jpg"))
prev_gray = color.rgb2gray(prev)
prev_gray = Image.fromarray(prev_gray)
return prev,prev_gray
#frame_1 = join(self.photo_path,"frame"+str(frame_num)+".jpg")
train_path = "E:/DBZ_Dataset/Tf_Baseline/Tester"
train_set = DatasetFromFolder(train_path)
training_data_loader = DataLoader(dataset=train_set, num_workers=0, batch_size=5, shuffle=False)
for i in range(10):
x = np.random.uniform(0,1)
print(x) |
# this is a project solely made for education purposes it creates a list of all possible combination inside a text in the same folder where the code is running ... you can use this for brute force attacking
from itertools import permutations
import os
cases = str(input("Give all the letters or numbers separated by space : ")).split(' ')
cases = set(cases)
if '' in cases :
cases.remove('')
len1 = int(input("Give the lowest length of the password : "))
len2 = int(input("Give the highest length of the password : "))
with open('myfile.txt', 'w') as fp:
for i in range (len1 , len2+1):
print("Running Case " + str(i))
values = list(permutations(cases , i))
values = [''.join(value) for value in values]
# print(values)
for x in values :
fp.write(x+"\n")
fp.close()
|
import json
import logging
from flask import Flask, request, Response
from flask_cors import CORS
import pricewars_merchant
from models import SoldOffer
def json_response(message):
return Response(json.dumps(message), status=200, mimetype='application/json')
class MerchantServer:
def __init__(self, merchant: 'pricewars_merchant.PricewarsMerchant', logging_level=logging.WARNING):
self.merchant = merchant
self.app = Flask(__name__)
CORS(self.app)
logging.basicConfig()
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging_level)
self.register_routes()
def register_routes(self):
self.app.add_url_rule('/settings', 'get_settings', self.get_settings, methods=['GET'])
self.app.add_url_rule('/settings', 'put_settings', self.update_settings, methods=['PUT', 'POST'])
self.app.add_url_rule('/settings/execution', 'set_state', self.set_state, methods=['POST'])
self.app.add_url_rule('/settings/execution', 'get_state', self.get_state, methods=['GET'])
self.app.add_url_rule('/sold', 'item_sold', self.item_sold, methods=['POST'])
def get_settings(self):
return json_response(self.merchant.settings)
def update_settings(self):
self.merchant.update_settings(request.json)
self.logger.debug('Update settings ' + str(self.merchant.settings))
return self.get_settings()
def get_state(self):
return json_response({'state': self.merchant.state})
def set_state(self):
next_state = request.json['nextState']
self.logger.debug('Execution setting - next state: ' + next_state)
if next_state == 'start':
self.merchant.start()
elif next_state == 'stop':
self.merchant.stop()
return json_response({})
def item_sold(self):
try:
offer = SoldOffer.from_dict(request.get_json(force=True))
self.merchant.sold_offer(offer)
except Exception as e:
self.logger.error(e)
return json_response({})
|
"""
CLASE LISTA---------------------------------------------------------------
"""
# Ya que python trata una lista como un objeto, y todos sus elementos internos tambien
# Python ofrece metodos para realizar operaciones con listas
# LENGTH--------------------------------------------------------------------------
# Funcion que retorna cuantos elementos estan contenidios dentro de la lista
perros = ["San Bernardo", "Alaska Malamute", "Chihuahua", "Golden Retriver"]
print("Elementos en lista perros: ", len(perros))
# Conteo de listas de multiples dimensiones
gatos = [
["calasio", "rendin", "rost"],
["Uren", "rayn", "pardo"]
]
print("Elementos en lista gatos: ", len(gatos))
print("Elementos Indice 0 lista gatos: ", len(gatos[0]))
print("Ultimo elemento de lista indice -1:", perros[-1]) |
import os
import sys
import re
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
DefaultRulePath = os.path.join(PROJECT_DIR, *['docs', 'suricata_home', 'emerging.rules', 'rules'])
AutoTakeRulePath = os.path.join(PROJECT_DIR, *list("docs/suricata_home/suricata-5.0.0/rules".split('/')))
EmergingRulePath = DefaultRulePath
FedoraRulePath = os.path.join(PROJECT_DIR, *['docs', 'suricata_home', 'suricata.rules'])
# ClassificationMappingPath = os.path.join(PROJECT_DIR, *[DefaultRulePath, 'classification.config'])
ClassificationMappingPath = os.path.join(PROJECT_DIR, *['apps', 'xrule', 'docs', 'classification.config'])
Add_RuleTxt = True
from parse_rule import parse_rule_line
def get_filestrs_from_txtfile(filename, file_dir=DefaultRulePath):
"""
通过文件获取文件的txt内容; 这里由于是脚本控制,所以没有用锁
:param filename: 文件
:param file_dir: 目录
:return:
"""
with open(os.path.join(file_dir, filename), "r", encoding='utf-8') as f:
filestrs = f.readlines()
f.close()
return filestrs
def list_all_rulefiles(file_dir=DefaultRulePath):
return [x for x in os.listdir(file_dir) if re.match(".*?\.rules", x)]
def get_rules_parsed_by_filename(filename, file_dir=DefaultRulePath):
# from .parse_rule import parse_rule_line
rule_lines = get_filestrs_from_txtfile(filename, file_dir)
res = []
for line in rule_lines:
line_parsed = parse_rule_line(line, detail=Add_RuleTxt)
if line_parsed:
if "classtype" not in line_parsed.keys():
# 修改默认没有归类的规则到这里。
line_parsed['classtype'] = 'protocol-command-decode'
res.append(line_parsed)
# 增加所属文件的这个内容
line_parsed['belong_file'] = filename
return res
def get_emerging_classes():
"""
TODO: 获取规则的分类类别; 其中官方规则已经给了这个文件;
务必保证文件格式中,前后没有空格!!!
config classification:shortname,short description,priority
:return: [*dict, ]
注意如果要逐条翻译, 这里可以调用翻译脚本翻译出来 `cn_name` 写入
"""
with open(ClassificationMappingPath, "r", encoding='utf-8') as f:
lines = f.readlines()
f.close()
classifications = []
for line in lines:
matched = re.match("config classification: (.*?),(.*?),(\d+).*?", line)
if matched:
classifications.append(dict(
shortname=matched.group(1).strip(),
short_description=matched.group(2).strip(),
priority=int(matched.group(3).strip()),
))
return classifications
def get_emerging_rules(file_dir=EmergingRulePath, *args, **kwargs):
res = []
for x in list_all_rulefiles(file_dir=file_dir):
res.extend(get_rules_parsed_by_filename(file_dir=file_dir, filename=x))
return res
def parse_file_path_abs_dir(dirpath='E:\\workspace\\ids_project\docs\\suricata_home'):
collect_rule_files = []
for x in os.listdir(dirpath):
_path = os.path.join(dirpath, x)
if os.path.isdir(_path):
collect_rule_files.extend(parse_file_path_abs_dir(_path))
continue
matched_rule_file = re.match('.*?.rules$', x)
if matched_rule_file:
collect_rule_files.append(_path)
return collect_rule_files
class RuleManager:
"""
规则管理合一测试的管理。
"""
@staticmethod
def get_all_rules_based_dir(file_dir):
res = []
for x in list_all_rulefiles(file_dir=file_dir):
res.extend(get_rules_parsed_by_filename(file_dir=file_dir, filename=x))
return res
@staticmethod
def push__all_in_one_file(file_dir, saved_path='all_in_one_rule.rules'):
rules = RuleManager.get_all_rules_based_dir(file_dir)
with open(saved_path, "w+", encoding='utf-8') as f:
for _rule in rules:
f.write(_rule['rule_line'])
f.close()
return
@staticmethod
def parse_sigle_rulefile(path=FedoraRulePath):
return get_rules_parsed_by_filename(file_dir='', filename=path)
@staticmethod
def collected_rules_by_dirpath(dirpath='E:\\workspace\\ids_project\docs\\suricata_home'):
paths = parse_file_path_abs_dir(dirpath=dirpath)
res = []
for x in paths:
_current_rule_sets = get_rules_parsed_by_filename(x)
res.extend(_current_rule_sets)
return res
@staticmethod
def get_not_collected_rules(dirpath='E:\\workspace\\ids_project\docs\\suricata_home'):
current_rules = RuleManager.collected_rules_by_dirpath(dirpath=dirpath)
from xrule.models import IpsRule
have_collected = set([x.sid for x in IpsRule.objects.all()])
current = set([x['sid'] for x in current_rules])
_need_ids = current - (current | have_collected)
_self_check_duplicates = [] # 本地去重验证.
return [x for x in current_rules if x['sid'] not in _need_ids]
if __name__ == '__main__':
rules = get_rules_parsed_by_filename('/root/suricata/suricata.rules')
_logtxt = ['{sid},{signature}'.format(sid=x['sid'], signature=x['msg']) for x in rules]
with open('tran.txt', 'w+', encoding='utf-8') as f:
f.write('\n'.join(_logtxt))
f.close()
|
import os
import glob
import tensorflow as tf
# train (first 2975), validation (last 500)
image_paths = (sorted(glob.glob(os.path.join(os.getcwd(), 'leftImg8bit', 'train', '*', '*.png')))
+ sorted(glob.glob(os.path.join(os.getcwd(), 'leftImg8bit', 'val', '*', '*.png'))))
for i, path in enumerate(image_paths):
image = tf.image.decode_png(tf.io.read_file(path), channels=3)
image = tf.image.resize(image, (512, 1024), method='bilinear')
image = tf.cast(image, tf.uint8)
tf.io.write_file(f'cityscapes_processed/images/{str(i+1).zfill(5)}.jpg', tf.image.encode_jpeg(image))
if (i + 1) % 100 == 0:
print(f'{i + 1}/{len(image_paths)}')
|
from django.db import models
# Create your models here.
class table(models.Model):
name = models.CharField(max_length=50)
number = models.CharField(max_length=15)
items = models.CharField(max_length=100)
price = models.DecimalField(decimal_places=2, max_digits=6)
def __str__(self):
return self.name
class users(models.Model):
username = models.CharField(max_length=15)
password = models.CharField(max_length=15)
def __str__(self):
return self.username |
#coding=utf-8
"""
#第一题
name1 = str(raw_input ("Please enter fist name: "))
name2 = str(raw_input ("Please enter second name: "))
name3 = str(raw_input ("Please enter third name: "))
name4 = str(raw_input ("Please enter fourth name: "))
name5 = str(raw_input ("Please enter fifth name: "))
list = [name1, name2, name3, name4, name5]
print "The names are ", list
"""
"""
#第二题
name1 = raw_input ("Please enter fist name: ")
name2 = raw_input ("Please enter second name: ")
name3 = raw_input ("Please enter third name: ")
name4 = raw_input ("Please enter fourth name: ")
name5 = raw_input ("Please enter fifth name: ")
list1 = [name1, name2, name3, name4, name5]
list2 = list1.sort()
print "The names are ", list2
""""
""""
输出结果:The names are None
错误未知,应为list2的定义有关
""""
|
import traceback
from PyQt5 import QtWidgets
from add_auto_cust import Ui_Dialog
from db_tools import autowork_db
from datetime import time, timedelta, datetime
from count_parts_dialog import Count_Parts
from count_orders_dialog import Count_Orders
from extended_qtablewidgetitem import Ext_TableItem
class AddAutoCust(QtWidgets.QDialog):
"""docstring for AddAutoCust."""
def __init__(self, connection, cursor, *args, **kwargs):
super(AddAutoCust, self).__init__()
self.dial_ui = Ui_Dialog()
self.db = autowork_db()
self.db.connection = connection
self.db.cursor = cursor
self.id_client, self.fio = args
self.cost = 0
self.part_cost = 0
self.duration = time(0,0,0)
self.duration.strftime("%H:%M:%S")
self.dial_ui.setupUi(self)
self.dial_ui.retranslateUi(self)
self.fill_data()
self.fill_comp()
self.fill_mark()
self.fill_usluga()
self.dial_ui.markAuto.currentTextChanged.connect(self.fill_mark)
self.dial_ui.addUsluga.clicked.connect(self.add_uslugi)
self.dial_ui.delUsluga.clicked.connect(self.del_uslugi)
self.dial_ui.pushButton.clicked.connect(self.insert_data)
self.exec_()
def fill_usluga(self):
self.dial_ui.ableUsluga.setRowCount(len(self.db.get_uslugi()))
for i, items in enumerate(self.db.get_uslugi()):
self.dial_ui.ableUsluga.setItem(i, 0, \
Ext_TableItem(items[1], items[0]))
self.dial_ui.ableUsluga.setItem(i, 1, \
QtWidgets.QTableWidgetItem(str(items[2])))
self.dial_ui.ableUsluga.setItem(i, 2, \
QtWidgets.QTableWidgetItem(str(items[3])))
def add_uslugi(self):
self.dial_ui.chosedUsluga.setRowCount(self.dial_ui.ableUsluga.rowCount())
try:
row = self.dial_ui.ableUsluga.currentRow()
mark = self.dial_ui.markAuto.currentText()
model = self.dial_ui.modelAuto.currentText()
id_serv = self.dial_ui.ableUsluga.item(row, 0).id_item
serv_cost = int(self.dial_ui.ableUsluga.item(row, 1).text())
dialog = Count_Orders(self.db.connection, self.db.cursor, mark, model, id_serv)
try:
data, kol_vo, part_cost = dialog.data, dialog.value, dialog.part_cost
except:
return
for i in range(3):
item = self.dial_ui.ableUsluga.takeItem(row, i)
self.dial_ui.chosedUsluga.setItem(row, i, item)
self.dial_ui.chosedUsluga.setItem(row, 3, QtWidgets.QTableWidgetItem(str(kol_vo)))
self.dial_ui.chosedUsluga.setItem(row, 4, Ext_TableItem(str(part_cost), data))
self.count_cost_and_time(row)
except Exception as e:
print(traceback.format_exc())
def del_uslugi(self):
try:
row = self.dial_ui.chosedUsluga.currentRow()
for i in range(3):
item = self.dial_ui.chosedUsluga.takeItem(row, i)
self.dial_ui.ableUsluga.setItem(row, i, item)
self.count_cost_and_time(row, False)
self.dial_ui.chosedUsluga.setItem(row, 3, QtWidgets.QTableWidgetItem(" "))
except Exception as e:
print(e)
def count_cost_and_time(self, row, add=True):
try:
if add:
self.cost += int(self.dial_ui.chosedUsluga.item(row, 1).text())*int(self.dial_ui.chosedUsluga.item(row, 3).text())
self.part_cost += int(self.dial_ui.chosedUsluga.item(row, 4).text())
time_ = tuple(map(int, self.dial_ui.chosedUsluga.item(row, 2).text().split(":")))
timeEdit = [self.duration.hour, self.duration.minute, self.duration.second]
for x in range(int(self.dial_ui.chosedUsluga.item(row, 3).text())):
for i in range(3):
timeEdit[i] += time_[i]
while timeEdit[1] >= 60:
timeEdit[0] += 1
timeEdit[1] -= 60
if timeEdit[2] >= 60: timeEdit[1] += 1; timeEdit[2] -= 60
self.duration = time(*timeEdit)
if not add:
self.cost -= int(self.dial_ui.ableUsluga.item(row, 1).text())*int(self.dial_ui.chosedUsluga.item(row, 3).text())
self.part_cost -= int(self.dial_ui.chosedUsluga.item(row, 4).text())
time_ = tuple(map(int, self.dial_ui.ableUsluga.item(row, 2).text().split(":")))
timeEdit = [self.duration.hour, self.duration.minute, self.duration.second]
try:
for x in range(int(self.dial_ui.chosedUsluga.item(row, 3).text())):
for i in range(3):
timeEdit[i] -= time_[i]
except:
for i in range(3):
timeEdit[i] -= time_[i]
while timeEdit[1] < 0:
timeEdit[0] -= 1
timeEdit[1] += 60
if timeEdit[2] < 0: timeEdit[1] -= 1; timeEdit[2] += 60
self.duration = time(*timeEdit)
self.dial_ui.costEdit.setText(str(self.cost))
self.dial_ui.durationEdit.setText(str(self.duration))
self.dial_ui.costPartEdit.setText(str(self.part_cost))
self.dial_ui.resultEdit.setText(str(self.part_cost+self.cost))
except Exception as e:
print(e)
def insert_data(self):
id_usluga = []
try:
car_number = self.dial_ui.numberEdit.text()
mark = self.dial_ui.markAuto.currentText()
model = self.dial_ui.modelAuto.currentText()
id_auto = self.db.get_car(mark, model)
vincode = self.dial_ui.vincodeEdit.text()
enginecode = self.dial_ui.engineEdit.text()
milleage = self.dial_ui.milliageEdit.text()
prod_year = self.dial_ui.yearEdit.text()
cost = self.dial_ui.resultEdit.text()
if int(prod_year) > int(datetime.now().year) \
or int(prod_year) < 1950:
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText("Год не может быть больше " +
str(datetime.now().year) + " и меньше 1950")
msg.setWindowTitle("Ошибка")
msg.exec_()
return
for i in range(self.dial_ui.chosedUsluga.rowCount()):
try:
id_usluga.append((self.dial_ui.chosedUsluga.item(i, 0).id_item,
self.dial_ui.chosedUsluga.item(i, 4).id_item))
except Exception as e:
continue
id_z = self.db.insert_zakaz(self.id_client, *id_auto, car_number,
self.duration, vincode, enginecode, milleage, prod_year)
for id_serv, items in id_usluga:
if isinstance(items[0], int):
for i in range(items[0]):
self.db.insert_uslugi_zakaz(*id_z, id_serv)
else:
for id_part, count_p in items:
for j in range(count_p):
self.db.insert_uslugi_zakaz(*id_z, id_serv, id_part)
except Exception as e:
print(traceback.format_exc())
self.close()
def fill_data(self):
fio = self.fio.split(' ')
self.dial_ui.famEdit.setText(fio[0])
self.dial_ui.nameEdit.setText(fio[1])
self.dial_ui.fathEdit.setText(fio[2])
self.dial_ui.phoneEdit.setText(str(self.db.get_phone(self.id_client)[0]))
def fill_comp(self):
for mark in self.db.get_companies():
self.dial_ui.markAuto.addItem(str(*mark))
def fill_mark(self):
self.dial_ui.modelAuto.clear()
value = self.dial_ui.markAuto.currentText()
for model in self.db.get_models(value):
self.dial_ui.modelAuto.addItem(str(*model))
|
# Generated by Django 2.2.7 on 2020-10-24 17:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Type',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('Type_Name', models.CharField(default='ประเภท', max_length=100)),
],
),
migrations.CreateModel(
name='Novels',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Fimg', models.CharField(max_length=200)),
('FictionName', models.CharField(max_length=200)),
('FWriterName', models.CharField(max_length=200)),
('Material', models.CharField(max_length=10000)),
('TypeName', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myweb.Type')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myweb.Question')),
],
),
]
|
def isBacktoHome(comands):
x, y = 0, 0
for comand in comands:
if comand == 'U':
x += 1
elif comand == 'D':
x -= 1
elif comand == 'L':
y += 1
elif comand == 'R':
y -= 1
return x == y == 0
if isBacktoHome(input()):
print("True")
else:
print("False") |
def solution(keymap, targets):
keydict = {}
for keys in keymap:
for i, key in enumerate(keys, 1):
if key not in keydict:
keydict[key] = i
elif keydict[key] > i:
keydict[key] = i
result = []
for target in targets:
_sum = 0
for c in target:
if c in keydict:
_sum += keydict[c]
else:
_sum = -1
break
result.append(_sum)
return result
|
from flask_sqlalchemy import SQLAlchemy
from .user import UserRepository
db = SQLAlchemy()
__all__ = ["UserRepository"]
|
import scrapy
import re
from googletrans import Translator
from crawler.items import NewItem
translator = Translator(service_urls=[
'translate.google.com',
'translate.google.co.kr',
])
class ExampleSpider(scrapy.Spider):
name = "newStartup"
allowed_domains = ["startupranking.com"]
start_urls = [
'https://www.startupranking.com/startup/new',
]
def parse(self, response):
'''
for company in response.css('tbody > tr'):
item = NewItem()
item['company'] = response.css("div.name > a::text").extract_first()
item['description'] = response.css("td.tleft::text").extract_first()
item['country'] = response.css("td > a::attr('href')").extract_first()
yield item
'''
company = response.css("tbody > tr > td > div.name > a::text").extract()
description = response.css("tbody > tr > td.description::text").extract()
#translations = translator.translate(description, src='en', dest='ko')
#print(translations.text)
country = response.css("td > a::attr('href')").extract()
for item in zip(company, description, country):
scraped_info = {
'company' : item[0].strip(),
'description' : re.sub(' +',' ', item[1].replace('\n',' ').strip()),
'country' : item[2].strip()
}
yield scraped_info
|
import numpy as np
import pandas as pd
f = pd.DataFrame(np.array([[1, 2], [3, 4], [5, 6]]), columns=["x", "y"], index=["a", "b", "c"])
print(f)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 17:36:46 2018
@author: Henri_2
This script trains a recursive neural network with midi data
"""
from __future__ import print_function
import pickle
from make_sequence import make_sequence
from get_notes import get_notes
from create_network import create_network
from train import train
def main():
#The notes can be extracted from midi files or loaded directly from as a list
#notes = get_notes()
with open('data/notes', 'rb') as filepath:
notes = pickle.load(filepath)
pitches = []
durations = []
#Getting aouont of individual notes and durations
for i in range(0, len(notes)):
if (i % 2 == 0):
pitches.append(notes[i])
else:
durations.append(notes[i])
#Makes the notes into tubles, where each tuble is like (pitch, duration)
note_tubles = []
note_tubles = list(zip(durations, pitches))
#All unique notenames
notenames = sorted(set(item for item in note_tubles))
#Making a dict for cahngin notes to integers
tubles_to_int = dict((tuble, number) for number,
tuble in enumerate(notenames))
# get amount of unique notes
n_unique = len(set(note_tubles))
#create the input and corresponding output for the network for training
network_input, network_output = make_sequence(note_tubles, n_unique,
notenames, tubles_to_int)
#create the network in a way that fits the data
model = create_network(network_input, n_unique)
#train the model
train(model, network_input, network_output)
if __name__ == '__main__':
main() |
"""
剑指 Offer 25. 合并两个排序的链表
输入两个递增排序的链表,合并这两个链表并使新链表中的节点仍然是递增排序的。
示例1:
输入:1->2->4, 1->3->4
输出:1->1->2->3->4->4
限制:
0 <= 链表长度 <= 1000
注意:本题与主站 21 题相同:https://leetcode-cn.com/problems/merge-two-sorted-lists/
date : 12-16-2020
"""
# Definition for singly-linked list.
from typing import Optional
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> Optional[ListNode]:
newhead = ListNode(0)
pos, pos1, pos2 = newhead, l1, l2
while pos1 and pos2:
if pos1.val <= pos2.val:
pos.next = pos1
pos1 = pos1.next
else:
pos.next = pos2
pos2 = pos2.next
pos = pos.next
if pos1:
pos.next = pos1
if pos2:
pos.next = pos2
return newhead.next
if __name__ == '__main__':
"""
输入:1->2->4, 1->3->4
输出:1->1->2->3->4->4
"""
head1 = ListNode(1)
head1.next = ListNode(2)
head1.next.next = ListNode(4)
head2 = ListNode(1)
head2.next = ListNode(3)
head2.next.next = ListNode(4)
newhead = Solution().mergeTwoLists(head1, head2)
pos = newhead
while pos:
print(pos.val)
pos = pos.next
|
# step_02_dbpedia_website_links.py
"""
This script interprets the URLs municipalities websites from
DBPedia. At a later stage these URLs are used to find out the respective
transparency portals.
Usage:
python step_02_dbpedia_website_links.py
Este script interpreta as URLs dos sites dos municípios a
partir da DBPedia. Em uma etapa posterior essas URLs são usadas para
encontrar os respectivos portais da transparência.
"""
import re
import os
import urllib
import yaml
import logging
import pandas as pd
from frictionless import Package
GEO_FOLDER = '../../../data/auxiliary/geographic'
GEO_FILE = 'municipality.csv'
OUTPUT_FOLDER = '../../../data/unverified'
OUTPUT_FILE = 'municipality-website-candidate-links.csv'
re_remove_parenthesis = re.compile(r'[^(,]+')
def get_config(file_name: str = 'config.yaml') -> dict:
"""Reads configuration from yaml file.
Returns:
The configuration dict containing all relevant config data.
"""
with open(file_name, 'r') as file:
config = yaml.safe_load(file)
for source in config['sources']:
for query in source['queries']:
with open(query['sparql_file'], 'r') as query_file:
query_string = urllib.parse.urlencode(
{'query':query_file.read()})
query['url'] = (
f'{source["endpoint"]}?'
f'default-graph-uri=&{query_string}'
f'&{query["options"]}'
)
return config
def remove_parenthesis(text: str) -> str:
"""Removes parenthesis from a string.
Many city names are followed by the state name in parenthesis.
Args:
text (str): The text to process, usually a name that may or may
not contain parenthesis.
Returns:
str: The text without the part in parenthesis.
"""
if not text:
return text
match = re_remove_parenthesis.match(text)
if not match:
return text.strip()
return match.group().strip()
def get_dbpedia_links_dataframe(query_url: str) -> pd.DataFrame:
"""Get a clean pd.DataFrame containing the desired links from the
csv file obtained at the given url.
Args:
query_url (str): A url that returns a csv file in the desired
format.
Returns:
od.DataFrame: a cleaned up Pandas dataframe containing the
discovered links.
"""
# read data frame from url to csv
table = pd.read_csv(query_url)
# do some cleaning:
# - no need for the city URIs column
table.drop('city', axis=1, inplace=True)
# - remove parenthesis in city names
table['name'] = table.name.fillna('').apply(remove_parenthesis)
table['state'] = table.state.fillna('').apply(remove_parenthesis)
# get the state (UF) abbreviations as the DBPedia data does not contain them
geodata = Package(os.path.join(GEO_FOLDER,'datapackage.json'))
# adjust column names and types
uf = (
geodata.get_resource('uf').to_pandas()
.rename(columns={'name': 'state'})
.drop('code', axis=1) # no need to keep the state code
)
uf['state'] = uf['state'].astype('category')
# merge back into the DBPedia data
table = (
table
.merge(uf, on='state')
.drop('state', axis=1)
.rename(columns={'abbr': 'uf'})
)
# get the municipality codes as the DBPedia data does not contain them
mun = geodata.get_resource('municipality').to_pandas()
# merge the data and remove duplicate rows
table = (
table
.merge(mun)
.drop_duplicates()
)
# melt 4 types of links into one
table = pd.melt(table, id_vars=['name', 'uf', 'code'], var_name='link_type', value_name='link')
table.drop_duplicates(inplace=True)
# remove empty lines and duplicate links
table.dropna(subset=['link'], inplace=True)
table.drop_duplicates(subset=['link'], keep='first', inplace=True)
logging.info('Got %d links from "%s".', len(table), query_url)
return table
def clean_dbpedia_links(table: pd.DataFrame) -> pd.DataFrame:
"""Clean a DBPedia links dataframe, fixing some links and removing
unwanted links.
Args:
table (pd.DataFrame): A Pandas dataframe containing links,
structured like the output of get_dbpedia_links_dataframe
Returns:
pd.DataFrame: A clean Pandas dataframe.
"""
# remove links to files
table = table[
~table.link.str.contains(r'\.(?:pdf|png|jpg|gif|bmp)$', na=False, regex=True)
]
# remove generic links to IBGE
table = table[
~table.link.str.contains(r'ibge\.gov\.br\/?', na=False, regex=True)
]
# remove generic links to Blogspot
table = table[
~table.link.str.contains(r'blogspot\.com\/?', na=False, regex=True)
]
# remove generic links to Facebook
table = table[
~table.link.str.contains(r'facebook\.com\/?', na=False, regex=True)
]
# remove generic links to Yahoo
table = table[
~table.link.str.contains(r'yahoo\.com\/?', na=False, regex=True)
]
# remove generic links to Google
table = table[
~table.link.str.contains(r'(?:googleusercontent\.com|google\.com\.br)\/?', na=False, regex=True)
]
# remove generic links to Wikimedia
table = table[
~table.link.str.contains(r'wiki(?:media|pedia|source)\.org\/?', na=False, regex=True)
]
# remove recursive links to DBPedia
table = table[
~table.link.str.contains(r'dbpedia\.org\/?', na=False, regex=True)
]
# remove generic links to state website
table = table[
~table.link.str.contains(r'//www\.\w{2}\.gov\.br\/?$', na=False, regex=True)
]
# remove Google trackers
google_trackers = table.link.str.contains(
r'google\.com(?:\.br)?/url',
na=False,
regex=True
)
table.loc[google_trackers, 'link'] = table.loc[google_trackers].link.apply(
lambda outer_url: urllib.parse.parse_qs(
urllib.parse.urlparse(outer_url).query
)['url'][0]
)
# remove white spaces after URLs (WTF?)
parenthesis_things = table.link.str.contains(
r'^[^\s]+\s',
na=False,
regex=True
)
table.loc[parenthesis_things, 'link'] = table.loc[parenthesis_things].link.apply(
lambda thing: thing.split()[0]
)
# remove parenthesis over URLs (WTF?)
parenthesis_things = table.link.str.contains(
r'^\(.+\)$',
na=False,
regex=True
)
table.loc[parenthesis_things, 'link'] = table.loc[parenthesis_things].link.apply(
lambda thing: thing[1:-1]
)
# fix malformed URLs
malformed_urls = table.link.str.contains(
r'^\w+(?:\.\w+)+\.(?:br|com|net)[\w/]*$', # URLs without a schema part
na=False,
regex=True
)
table.loc[malformed_urls, 'link'] = table.loc[malformed_urls].link.apply(
# default to http, should at least have a redirect to https
lambda url: f'http://{url}'
)
# remove empty links and duplicates
table.dropna(subset=['link'], inplace=True)
table.drop_duplicates(inplace=True)
# final adjustments to link types
table.link_type.replace('link_camara', 'camara', inplace=True)
table.link_type.replace('link_prefeitura', 'prefeitura', inplace=True)
table.link_type.replace('external_link', 'external', inplace=True)
table.link_type.replace('link_site', 'link', inplace=True)
table.link_type.replace('link_site_oficial', 'prefeitura', inplace=True)
return table
def store_dbpedia_links(table: pd.DataFrame, output_folder: str,
output_file: str):
"""Store the links in a CSV file. If the file already exists, merge
the existing with the obtained data.
Args:
table (pd.DataFrame): A Pandas dataframe containing the links
obtained from DBPedia, in the format output by
get_dbpedia_links_dataframe.
output_folder (str): The path where the output file should be
stored.
output_file: (str): The file name of the output.
"""
# prepare output
# check if the output folder alredy does exist and, if not, create it
if not os.path.exists(output_folder):
print(f'Output folder does not yet exist. Creating "{output_folder}"...')
os.mkdir(output_folder)
output = os.path.join(output_folder, output_file)
generated_df = table
# check whether if there is an existing file to merge
if os.path.exists(output):
recorded_df = pd.read_csv(output)
new_df = pd.concat([recorded_df, generated_df], sort=True)
else:
new_df = generated_df.copy()
# remove duplicate entries
new_df.drop_duplicates(inplace=True)
# store the results
new_df.to_csv(output, index=False)
if __name__ == '__main__':
config = get_config()
# combine data: concatenate the results
dbp_links = pd.concat(
[
get_dbpedia_links_dataframe(query['url'])
for source in config['sources']
for query in source['queries']
],
sort=True
)
# remove garbage links
dbp_links = clean_dbpedia_links(dbp_links)
# store the results
store_dbpedia_links(dbp_links, OUTPUT_FOLDER, OUTPUT_FILE)
|
# -*- coding: utf-8 -*-
"""
@author: Chenglong Chen <c.chenglong@gmail.com>
@brief: basic features
"""
import re
from collections import Counter
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelBinarizer
from Code.Chenglong import config
from Code.Chenglong.utils import ngram_utils, nlp_utils, np_utils
from Code.Chenglong.utils import time_utils, logging_utils, pkl_utils
from Code.Chenglong.feature_base import BaseEstimator, StandaloneFeatureWrapper
# tune the token pattern to get a better correlation with y_train
# token_pattern = r"(?u)\b\w\w+\b"
# token_pattern = r"\w{1,}"
# token_pattern = r"\w+"
# token_pattern = r"[\w']+"
token_pattern = " " # just split the text into tokens
class DocId(BaseEstimator):
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
obs_set = set(obs_corpus)
self.encoder = dict(zip(obs_set, range(len(obs_set))))
def __name__(self):
return "DocId"
def transform_one(self, obs, target, id):
return self.encoder[obs]
class DocIdEcho(BaseEstimator):
"""For product_uid"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "DocIdEcho"
def transform_one(self, obs, target, id):
return obs
class DocIdOneHot(BaseEstimator):
"""For linear model"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "DocIdOneHot"
def transform(self):
lb = LabelBinarizer(sparse_output=True)
return lb.fit_transform(self.obs_corpus)
"""
product_uid int(obs > 164038 and obs <= 206650)
id int(obs > 163700 and obs <= 221473)
In test, we have
#sample = 147406 for product_uid <= 206650
#sample = 19287 for product_uid
The majority will be in 1st and 2nd part.
In specific,
50K points of 147406 in public, and the rest 100K points in private.
"""
class ProductUidDummy1(BaseEstimator):
"""For product_uid"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "ProductUidDummy1"
def transform_one(self, obs, target, id):
return int(obs<163800)
class ProductUidDummy2(BaseEstimator):
"""For product_uid"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "ProductUidDummy2"
def transform_one(self, obs, target, id):
return int(obs>206650)
class ProductUidDummy3(BaseEstimator):
"""For product_uid"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "ProductUidDummy3"
def transform_one(self, obs, target, id):
return int(obs > 164038 and obs <= 206650)
class DocLen(BaseEstimator):
"""Length of document"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "DocLen"
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
return len(obs_tokens)
class DocFreq(BaseEstimator):
"""Frequency of the document in the corpus"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
self.counter = Counter(obs_corpus)
def __name__(self):
return "DocFreq"
def transform_one(self, obs, target, id):
return self.counter[obs]
class DocEntropy(BaseEstimator):
"""Entropy of the document"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "DocEntropy"
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
counter = Counter(obs_tokens)
count = np.asarray(list(counter.values()))
proba = count/np.sum(count)
return np_utils._entropy(proba)
class DigitCount(BaseEstimator):
"""Count of digit in the document"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "DigitCount"
def transform_one(self, obs, target, id):
return len(re.findall(r"\d", obs))
class DigitRatio(BaseEstimator):
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "DigitRatio"
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
return np_utils._try_divide(len(re.findall(r"\d", obs)), len(obs_tokens))
class UniqueCount_Ngram(BaseEstimator):
def __init__(self, obs_corpus, target_corpus, ngram, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
self.ngram = ngram
self.ngram_str = ngram_utils._ngram_str_map[self.ngram]
def __name__(self):
return "UniqueCount_%s"%self.ngram_str
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
obs_ngrams = ngram_utils._ngrams(obs_tokens, self.ngram)
return len(set(obs_ngrams))
class UniqueRatio_Ngram(BaseEstimator):
def __init__(self, obs_corpus, target_corpus, ngram, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
self.ngram = ngram
self.ngram_str = ngram_utils._ngram_str_map[self.ngram]
def __name__(self):
return "UniqueRatio_%s"%self.ngram_str
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
obs_ngrams = ngram_utils._ngrams(obs_tokens, self.ngram)
return np_utils._try_divide(len(set(obs_ngrams)), len(obs_ngrams))
#--------------------- Attribute based features ----------------------
class AttrCount(BaseEstimator):
"""obs_corpus is a list of list of attributes"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "AttrCount"
def transform_one(self, obs, target, id):
"""obs is a list of attributes"""
return len(obs)
class AttrBulletCount(BaseEstimator):
"""obs_corpus is a list of list of attributes"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "AttrBulletCount"
def transform_one(self, obs, target, id):
"""obs is a list of attributes"""
cnt = 0
for lst in obs:
if lst[0].startswith("bullet"):
cnt += 1
return cnt
class AttrBulletRatio(BaseEstimator):
"""obs_corpus is a list of list of attributes"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "AttrBulletRatio"
def transform_one(self, obs, target, id):
"""obs is a list of attributes"""
cnt = 0
for lst in obs:
if lst[0].startswith("bullet"):
cnt += 1
return np_utils._try_divide(cnt, len(obs))
class AttrNonBulletCount(BaseEstimator):
"""obs_corpus is a list of list of attributes"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "AttrNonBulletCount"
def transform_one(self, obs, target, id):
"""obs is a list of attributes"""
cnt = 0
for lst in obs:
if not lst[0].startswith("bullet"):
cnt += 1
return cnt
class AttrNonBulletRatio(BaseEstimator):
"""obs_corpus is a list of list of attributes"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "AttrNonBulletRatio"
def transform_one(self, obs, target, id):
"""obs is a list of attributes"""
cnt = 0
for lst in obs:
if not lst[0].startswith("bullet"):
cnt += 1
return np_utils._try_divide(cnt, len(obs))
class AttrHasProductHeight(BaseEstimator):
"""obs_corpus is a list of list of attributes"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "AttrHasProductHeight"
def transform_one(self, obs, target, id):
"""obs is a list of attributes"""
for lst in obs:
if lst[0].find("product height") != -1:
return 1
return 0
class AttrHasProductWidth(BaseEstimator):
"""obs_corpus is a list of list of attributes"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "AttrHasProductWidth"
def transform_one(self, obs, target, id):
"""obs is a list of attributes"""
for lst in obs:
if lst[0].find("product width") != -1:
return 1
return 0
class AttrHasProductLength(BaseEstimator):
"""obs_corpus is a list of list of attributes"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "AttrHasProductLength"
def transform_one(self, obs, target, id):
"""obs is a list of attributes"""
for lst in obs:
if lst[0].find("product length") != -1:
return 1
return 0
class AttrHasProductDepth(BaseEstimator):
"""obs_corpus is a list of list of attributes"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "AttrHasProductDepth"
def transform_one(self, obs, target, id):
"""obs is a list of attributes"""
for lst in obs:
if lst[0].find("product depth") != -1:
return 1
return 0
class AttrHasIndoorOutdoor(BaseEstimator):
"""obs_corpus is a list of list of attributes"""
def __init__(self, obs_corpus, target_corpus, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode)
def __name__(self):
return "AttrHasIndoorOutdoor"
def transform_one(self, obs, target, id):
"""obs is a list of attributes"""
for lst in obs:
if lst[0].find("indoor outdoor") != -1:
return 1
return 0
#---------------- Main ---------------------------
def main():
logname = "generate_feature_basic_%s.log"%time_utils._timestamp()
logger = logging_utils._get_logger(config.LOG_DIR, logname)
dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)
## basic
generators = [DocId, DocLen, DocFreq, DocEntropy, DigitCount, DigitRatio]
obs_fields = ["search_term", "product_title", "product_description",
"product_attribute", "product_brand", "product_color"]
for generator in generators:
param_list = []
sf = StandaloneFeatureWrapper(generator, dfAll, obs_fields, param_list, config.FEAT_DIR, logger)
sf.go()
## for product_uid
generators = [DocIdEcho, DocFreq, ProductUidDummy1, ProductUidDummy2, ProductUidDummy3]
obs_fields = ["product_uid"]
for generator in generators:
param_list = []
sf = StandaloneFeatureWrapper(generator, dfAll, obs_fields, param_list, config.FEAT_DIR, logger)
sf.go()
## unique count
generators = [UniqueCount_Ngram, UniqueRatio_Ngram]
obs_fields = ["search_term", "product_title", "product_description",
"product_attribute", "product_brand", "product_color"]
ngrams = [1,2,3]
for generator in generators:
for ngram in ngrams:
param_list = [ngram]
sf = StandaloneFeatureWrapper(generator, dfAll, obs_fields, param_list, config.FEAT_DIR, logger)
sf.go()
## for product_attribute_list
generators = [
AttrCount,
AttrBulletCount,
AttrBulletRatio,
AttrNonBulletCount,
AttrNonBulletRatio,
AttrHasProductHeight,
AttrHasProductWidth,
AttrHasProductLength,
AttrHasProductDepth,
AttrHasIndoorOutdoor,
]
obs_fields = ["product_attribute_list"]
for generator in generators:
param_list = []
sf = StandaloneFeatureWrapper(generator, dfAll, obs_fields, param_list, config.FEAT_DIR, logger)
sf.go()
if __name__ == "__main__":
main()
|
'''
Created on Feb 27, 2015
@author: Matthias
'''
import os
import numpy as np
import matplotlib.pyplot as plt
def f(x,y):
return x * y
def g(x):
return x * x
if __name__ == '__main__':
print os.listdir(os.getcwd())
abc = []
C = np.matrix([[1, 2], [3, 4]])
print C
B = C.reshape([2,2])
print C.dot(B)
print f(3,4)
X = np.arange(1,10,1)
print X
Y = g(X)
print Y
plt.plot()
plt.show()
pass |
## Flask authentication for bokeh
from functools import wraps
from flask import request, Response, redirect, Flask,render_template
from bokeh.util import session_id
app = Flask(__name__)
def check_auth(username, password):
return username == 'xxxxx' and password == 'xxxxx'
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
# Route for handling the login page logic
@app.route('/', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != 'admin' or request.form['password'] != 'xxxxxx':
error = 'Invalid Credentials. Please try again.'
else:
s_id = session_id.generate_session_id()
return redirect("http://192.168.0.99:5006/CRM_bokeh_app?bokeh-session-id={}".format(s_id), code=302)
return render_template('login.html', error=error)
# def requires_auth(f):
# @wraps(f)
# def decorated(*args, **kwargs):
# auth = request.authorization
# if not auth or not check_auth(auth.username, auth.password):
# return authenticate()
# return f(*args, **kwargs)
# return decorated
# @app.route('/')
# @requires_auth
# def redirect_to_bokeh():
# s_id = session_id.generate_session_id()
# return redirect("http://192.168.0.99:5006/CRM_bokeh_app?bokeh-session-id={}".format(s_id), code=302)
if __name__ == "__main__":
app.run(host='192.168.0.99',port=5000) |
import random
"""
The SimRandom class is a wrapper around random, providing only the
randint(bound) method -- returns a random number between 0 and bound-1. The reason
for this class is that SimRandom is deterministic, it gives the same numbers in any simulation run.
So, for testing, the "random" choice of Orders and Inventory will be predictable,
which is nice for testing and debugging purposes.
"""
class SimRandom:
# Constructior just creates random witha seed value, so that every new
# SimRandom object will have the same sequence of pseudo-ranom numbers
def __init__(self):
random.seed(24) # Give inital seed for determinism
def randint(self, bound):
return random.randint(0, bound - 1)
#a = SimRandom()
#print(a.randint(5))
|
import struct
DHCP_MESSAGE_PARSE_STRING = '!ccccIHHIIII6s10s192s4s'
DHCP_MAGIC_BYTES = b'\x63\x82\x53\x63'
DHCP_TAG_PAD = 0x00 # 0
DHCP_TAG_END = 0xff # 255
DHCP_TAG_SUBNET_MASK = 0x01 # 1
DHCP_TAG_ROUTER_ADDRESSES = 0x03 # 3
DHCP_TAG_DOMAIN_NAME_SERVERS = 0x06 # 6
DHCP_TAG_HOST_NAME = 0x0C # 12
DHCP_TAG_DOMAIN_NAME = 0x0F # 15
DHCP_TAG_INTERFACE_MTU = 0x1A # 26
DHCP_TAG_BROADCAST_ADDRESS = 0x1C # 28
DHCP_TAG_STATIC_ROUTE = 0x21 # 33
DHCP_TAG_NTP_SERVERS = 0x2A # 42
DHCP_TAG_REQUESTED_IP_ADDRESS = 0x32 # 50
DHCP_TAG_IP_ADDRESS_LEASE_TIME = 0x33 # 51
DHCP_TAG_OPTION_OVERLOAD = 0x34 # 52
DHCP_TAG_OPTION_OVERLOAD_FILE = 0x01 # 1
DHCP_TAG_OPTION_OVERLOAD_SNAME = 0x02 # 2
DHCP_TAG_OPTION_OVERLOAD_BOTH = 0x03 # 3
DHCP_TAG_MESSAGE_TYPE = 0x35 # 53
DHCP_TAG_MESSAGE_TYPE_DICT = {'DHCPDISCOVER': 0x01,
'DHCPOFFER': 0x02,
'DHCPREQUEST': 0x03,
'DHCPDECLINE': 0x04,
'DHCPACK': 0x05,
'DHCPNAK': 0x06,
'DHCPRELEASE': 0x07,
'DHCPINFORM': 0x08}
DHCP_TAG_SERVER_IDENTIFIER = 0x36 # 54
DHCP_TAG_PARAMETER_REQUEST_LIST = 0x37 # 55
DHCP_TAG_MAX_MESSAGE_SIZE = 0x39 # 57
DHCP_TAG_RENEWAL_TIME_VALUE = 0x3A # 58
DHCP_TAG_REBINDING_TIME_VALUE = 0x3B # 59
DHCP_TAG_VENDOR_CLASS_ID = 0x3C # 60
DHCP_TAG_RAPID_COMMIT = 0x50 # 80
DHCP_TAG_AUTO_CONFIGURE = 0x74 # 116
DHCP_TAG_AUTO_CONFIGURE_DO_NOT_AUTO_CONFIGURE = 0x00
DHCP_TAG_AUTO_CONFIGURE_AUTO_CONFIGURE = 0x01
DHCP_TAG_DOMAIN_SEARCH = 0x77 # 119
DHCP_TAG_CLASSLESS_STATIC_ROUTE = 0x79 # 121
DHCP_TAG_FORCERENEW_NONCE_CAPABLE = 0x91 # 145
class DHCPMessage:
def __init__(self, op_code, h_type, h_len, hops, xid, seconds, flags, c_i_addr, y_i_addr, s_i_addr,
g_i_addr, c_h_addr, server_name, boot_file, options):
self.operation_code_raw = op_code
self.hardware_addr_type_raw = h_type
self.hardware_addr_length_raw = h_len
self.hops_raw = hops
self.transaction_id_raw = xid
self.seconds_raw = seconds
self.flags_raw = flags
self.client_ip_address_raw = c_i_addr
self.your_ip_address_raw = y_i_addr
self.server_ip_address_raw = s_i_addr
self.gateway_ip_address_raw = g_i_addr
self.client_hw_address_raw = c_h_addr
self.server_name_raw = server_name
self.boot_file_raw = boot_file
self.options_raw = options
class DHCPException(Exception):
pass
class ParserError(Exception):
pass
def parse_dhcp_request(package):
package_parser = struct.Struct(DHCP_MESSAGE_PARSE_STRING)
message_data = package_parser.unpack_from(package)
if message_data[14] != DHCP_MAGIC_BYTES: # This starts the options block
raise ParserError("DHCP Magic Bytes not found")
dhcp_options = _parse_dhcp_request_options(package[package_parser.size:])
dhcp_request = DHCPMessage(*message_data[:14], dhcp_options)
return dhcp_request
def _parse_dhcp_request_options(message_raw_options):
dhcp_options = dict()
offset = 0
while len(message_raw_options) > offset:
option_type = struct.unpack_from('!B', message_raw_options, offset=offset)[0]
if option_type == DHCP_TAG_END:
break
elif option_type == DHCP_TAG_PAD:
offset += 1
continue
offset += 1
option_length = struct.unpack_from('!B', message_raw_options, offset=offset)[0]
offset += 1
assert isinstance(option_length, int)
assert option_length + offset < len(message_raw_options)
option_data = struct.unpack_from('!{}s'.format(option_length), message_raw_options, offset=offset)[0]
offset += option_length
dhcp_options[option_type] = option_data
return dhcp_options
|
"""
作业提交格式
+ 使用源代码的方式提交,题目用 `注释` 的方式写在源代码里面。
+ 作业文件命名:第几次作业-编号-作业编号-姓名.py(例如01-00-01-正心.py)
自己的编号到这个文档中查找:【腾讯文档】作业提交表https://docs.qq.com/sheet/DU01wRUNRb1B5S1l6?c=H46A0BI0
+ 作业提交格式为 第几次作业-编号-姓名.zip(发送两个作业的压缩包)
例如正心的第一次作业提交文件为 01-00-正心.zip
+ 提交到QQ邮箱:2328074219@qq.com
+ 作业在第二天上课前讲解,不会的提前半个小时到课堂
+ 写作业时自己先思考如何完成,当作业讲解之后还是不会做时,再来问老师。
将上课的案例-爬取小说完善。爬取《剑来》所有章节用章节名分别保存。
"""
import requests
import parsel
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
'Host': 'www.shuquge.com'
}
def get_catalog(catalog_url):
details_urls = []
response = requests.get(catalog_url, headers)
response.encoding = response.apparent_encoding
s = parsel.Selector(response.text)
chapter_url_list = s.css('dd a::attr(href)')[12:].getall()
for chapter_url in chapter_url_list:
url = 'http://www.shuquge.com/txt/8659/' + chapter_url
details_urls.append(url)
return details_urls
def article_parsing(details_urls):
for details_url in details_urls:
response = requests.get(details_url, headers=headers)
response.encoding = response.apparent_encoding
s = parsel.Selector(response.text)
title = s.css('h1::text').get()
content_list = [data.strip() for data in s.css('#content::text').getall()]
content = '\n'.join(content_list)
if content:
print(title)
else:
print([title], [content], response.url)
# save(title, content)
def save(title, content):
with open(r'.\\剑来\\' + title + '.txt', mode='w', encoding='utf-8') as f:
print('正在下载:', title)
f.write(content)
catalog_url = 'http://www.shuquge.com/txt/8659/index.html'
details_urls = get_catalog(catalog_url)
article_parsing(details_urls)
|
from rest_framework import serializers
from api.models.surveys import Survey
class SurveySerializer(serializers.ModelSerializer):
class Meta:
model = Survey
fields = (
'id',
'creator',
'site_name',
'coordinates_lat',
'coordinates_long',
'surveyor',
'ack',
'ack_user',
'created',
)
|
#!/usr/bin/env python3
#SBATCH -o eval-gk-svr-%j.out
#SBATCH -e eval-gk-svr-%j.err
#SBATCH -t 12:00:00
import argparse
# Imports for training SVRs
from sklearn.model_selection import train_test_split, KFold, GridSearchCV
from sklearn import svm
from sklearn.metrics import accuracy_score, mean_squared_error
import pprint
import re
import os
import glob
import json
import numpy as np
import pickle as pkl
# Import event graph utilities
import sys
sys.path.append(".")
sys.path.append("..")
from event_graph_analysis.utilities import timer
class model_manager(object):
def __init__(self, kernel_matrices, graph_labels, target, output_path, n_folds, n_repeats):
self._kernel_matrices = kernel_matrices
self._graph_labels = graph_labels
self._target = target
self._output_path = output_path
self._n_folds = n_folds
self._n_repeats = n_repeats
self._kernel_to_results = {}
def _get_k_train(self, k_mat, train_indices):
"""
Get embeddings from kernel matrix for all graphs in training set
"""
n = len(train_indices)
k_train = np.zeros((n,n))
for i in range(n):
for j in range(n):
k_train[i][j] = k_mat[train_indices[i]][train_indices[j]]
return k_train
def _get_k_test(self, k_mat, train_indices, test_indices):
"""
Get embeddings from kernel matrix for all graphs in testing set
"""
n = len(train_indices)
m = len(test_indices)
k_test = np.zeros((m,n))
for i in range(m):
for j in range(n):
k_test[i][j] = k_mat[test_indices[i]][train_indices[j]]
return k_test
@timer
def build_models(self, model_type):
"""
Train and evaluate a kernel SVR model for each graph kernel
"""
for kernel,mat in self._kernel_matrices.items():
if model_type == "svr":
self._kernel_to_results[kernel] = self._build_model_svr(kernel, mat)
elif model_type == "svc":
self._kernel_to_results[kernel] = self._build_model_svc(kernel, mat)
with open(self._output_path, "wb") as outfile:
pkl.dump(self._kernel_to_results, outfile, 0)
@timer
def _build_model_svc(self, kernel, k_mat):
"""
Train and evaluate a kernel SVC model for the current graph kernel
"""
print("Build models for kernel: {}".format(kernel))
print("Predicting: {}".format(self._target))
target = [ y[self._target] for y in self._graph_labels ]
n_graphs = len(self._graph_labels)
repeat_idx_to_results = {}
for repeat_idx in range(self._n_repeats):
fold_to_results = {}
kf = KFold(n_splits=self._n_folds, random_state=repeat_idx, shuffle=True)
for split_idx, (train_indices, test_indices) in enumerate(kf.split(range(n_graphs))):
# Get test-train split
y_train = [ target[i] for i in train_indices ]
y_test = [ target[i] for i in test_indices ]
k_train = self._get_k_train(k_mat, train_indices )
k_test = self._get_k_test(k_mat, train_indices, test_indices )
# Initialize SVC model
curr_svc = svm.SVC(kernel="precomputed")
# Train SVC model
curr_svc.fit(k_train, y_train)
# Predict
y_pred = curr_svc.predict(k_test)
# Evaluate accuracy
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", str(round(accuracy*100, 2)) + "%")
fold_to_results[split_idx] = accuracy
repeat_idx_to_results[repeat_idx] = fold_to_results
print()
return repeat_idx_to_results
@timer
def _build_model_svr(self, kernel, k_mat):
"""
Train and evaluate a kernel SVR model for the current graph kernel
"""
print("Build models for kernel: {}".format(kernel))
print("Predicting: {}".format(self._target))
print("Round, Fold, Min. Rel. Err, Med. Rel. Err., Max. Rel. Err")
target = [ y[self._target] for y in self._graph_labels ]
n_graphs = len(self._graph_labels)
repeat_idx_to_results = {}
for repeat_idx in range(self._n_repeats):
fold_to_results = {}
kf = KFold(n_splits=self._n_folds, random_state=repeat_idx, shuffle=True)
for split_idx, (train_indices, test_indices) in enumerate(kf.split(range(n_graphs))):
# Get test-train split
y_train = [ target[i] for i in train_indices ]
y_test = [ target[i] for i in test_indices ]
k_train = self._get_k_train(k_mat, train_indices )
k_test = self._get_k_test(k_mat, train_indices, test_indices )
# Grid search for best SVR model hyperparameters
# TODO
# Initialize SVR model
curr_svr = svm.SVR(kernel="precomputed")
# Train SVR model
curr_svr.fit(k_train, y_train)
# Predict
y_pred = curr_svr.predict(k_test)
# Record model params and perf
relative_errors = []
for tv,pv in zip(y_test, y_pred):
rel_error = np.abs(tv - pv)/tv
relative_errors.append(rel_error)
#print("\tMin Rel. Error: {}".format(min(relative_errors)))
#print("\tMedian Rel. Error: {}".format(np.median(relative_errors)))
#print("\tMax Rel. Error: {}".format(max(relative_errors)))
#print()
min_rel_err = min(relative_errors)
med_rel_err = np.median(relative_errors)
max_rel_err = max(relative_errors)
model_eval = "{}, {}, {}, {}, {}".format(repeat_idx, split_idx, min_rel_err, med_rel_err, max_rel_err)
print(model_eval)
results = { "true" : y_test, "pred" : y_pred, "svr_params" : {} }
fold_to_results[split_idx] = results
repeat_idx_to_results[repeat_idx] = fold_to_results
print()
return repeat_idx_to_results
def main(kernel_matrices_path, graph_labels_path, target, output_path, model_type, n_folds, n_repeats):
with open(kernel_matrices_path, "rb") as infile:
kernel_to_matrices = pkl.load(infile)
with open(graph_labels_path, "rb") as infile:
graph_labels = pkl.load(infile)
mm = model_manager(kernel_to_matrices, graph_labels, target, output_path, n_folds, n_repeats)
mm.build_models(model_type)
if __name__ == "__main__":
desc = ""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("--kernel_matrices", required=True, help="Path to pickle file containing graph kernel matrices")
parser.add_argument("--graph_labels", required=True, help="Path to pickle file containing mapping from graph indices to graph class labels")
parser.add_argument("--predict", required=True, help="Run parameter to predict")
parser.add_argument("--output", required=False, default=None, help="Path to write pickled results dict to")
parser.add_argument("--model_type", required=True, help="Type of model to train. Options: svr, svc")
parser.add_argument("--n_folds", required=False, type=int, default=10, help="Number of folds for k-fold cross-validation")
parser.add_argument("--n_repeats", required=False, type=int, default=10, help="Number of times to repeat cross-validation")
args = parser.parse_args()
main(args.kernel_matrices,
args.graph_labels,
args.predict,
args.output,
args.model_type,
args.n_folds,
args.n_repeats)
|
while 1:
b = 1
bolensayisi = int()
print("Programdan çıkmak için q 'ya basınız.")
y = (input("Lütfen bir sayı giriniz:"))
if y == 'q':
print("Programdan çıkılıyor....")
break
else:
x: int = int(y)
while 1:
if b <= x:
if x % b == 0:
bolensayisi= bolensayisi+ 1
else:
break
b += 1
if bolensayisi == 2:
print("Girilen sayı asal sayıdır.")
else:
print("Girilen sayı asal sayı değildir.")
|
#!python
"""
It is well known that if the square root of a natural number is not an integer, then it is irrational. The decimal expansion of such square roots is infinite without any repeating pattern at all.
The square root of two is 1.41421356237309504880..., and the digital sum of the first one hundred decimal digits is 475.
For the first one hundred natural numbers, find the total of the digital sums of the first one hundred decimal digits for all the irrational square roots.
"""
from decimal import Decimal, getcontext
from math import sqrt
getcontext().prec = 200 #give us plenty of precision
def sum_of_first_100_digits(d):
i = int(d*Decimal(10**100))#ensure we have at least 100 digits
return sum(map(int, list(str(i)[:100])))
s = 0
for i in range(101):
if sqrt(i)!=int(sqrt(i)):
s+= sum_of_first_100_digits(Decimal(i).sqrt())
print([i, s]) |
#TODO: define function guess_the_number
#TODO: use random.randint to get a number between 1 and 20
#TODO: ask user to input their guess
#TODO: loop to keep giving the player three guesses until they've guessed correctly
#TODO: give the player cues if the guess is not correct
#TODO: let the player know if the guess is correct
|
# -*- coding: utf-8 -*-
import os, json
from urllib.parse import quote
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from ejob.items import JobItem
from ejob.item_loaders import LagouJobItemLoader
class LagouJobSpiderSpider(scrapy.spiders.CrawlSpider):
name = "lagou_job_spider_test"
allowed_domains = ["lagou.com"]
urls = [
('理财顾问', 'https://www.lagou.com/zhaopin/licaiguwen/', '123'),
]
start_urls = ['https://www.lagou.com/']
rules = [
Rule(LinkExtractor(allow=('/zhaopin/[^/]+/\d+/$', ), restrict_xpaths=('//*[@class="pager_container"]', )), process_request='preprocess_request', follow=True),
Rule(LinkExtractor(allow=('/jobs/\d+\.html$', ), restrict_xpaths=('//*[@id="s_position_list"]')), callback='parse_job')
]
# urls = [('', 'https://www.lagou.com/jobs/2123649.html', ''), ('', 'https://www.lagou.com/jobs/3248331.html', '')]
site = '拉勾网(https://www.lagou.com/)'
query_str = '&'.join(['{}'.format(quote('city=全国'))])
def start_requests(self):
for category_name, category_url, category_id in self.urls:
category_url = '?'.join([category_url, self.query_str])
request = scrapy.Request(category_url, dont_filter=False)
request.meta['dont_redirect'] = True
request.meta['category_name'] = category_name
request.meta['category_id'] = category_id
yield request
def preprocess_request(self, request):
# request.replace(cookies={'index_location_city': '%E4%B8%8A%E6%B5%B7'})
# request.replace(url='?'.join(request.url, self.query_str))
return request
def parse_job(self, response):
item = JobItem()
l = LagouJobItemLoader(item=JobItem(), response=response)
l.add_value('url', response.url)
l.add_value('site', self.site)
l.add_value('requirements', '')
l.add_value('description', '')
xpath = '//*[contains(@class, "position-content")]/*[contains(@class, "position-content-l")]'
cl = response.xpath(xpath)
jn = cl.xpath('*[@class="job-name"]')
l.add_value('position', jn.xpath('*[@class="name"]/text()').extract_first())
l.add_value('department', jn.xpath('*[@class="company"]/text()').extract_first())
jr =cl.xpath('*[@class="job_request"]')
t = jr.xpath('p/span/text()').extract()
l.add_value('salary', t[0])
l.add_value('city', t[1])
l.add_value('exprience', t[2])
l.add_value('education', t[3])
l.add_value('jobtype', t[4])
l.add_value('tags', jr.xpath('ul[contains(@class, "position-label")]/li/text()').extract())
l.add_value('postdate', jr.xpath('*[@class="publish_time"]/text()').re_first(r'(\d{4}-\d{2}-\d{2})'))
jd = response.xpath('//*[@id="job_detail"]')
l.add_value('temptation', jd.xpath('*[contains(@class, "job-advantage")]/p/text()').extract())
l.add_value('rawpost', jd.xpath('*[contains(@class, "job_bt")]//p/text()').extract())
ja = jd.xpath('*[contains(@class, "job-address")]')
address = ja.xpath('*[contains(@class, "work_addr")]/a[contains(@href, "https://www.lagou.com/")]/text()').extract()
address += ja.xpath('*[contains(@class, "work_addr")]/text()').extract()
l.add_value('address', address)
longitude = ja.xpath('*[@name="positionLng"]/@value').extract_first(default='')
latitude = ja.xpath('*[@name="positionLat"]/@value').extract_first(default='')
l.add_value('location', ','.join([longitude, latitude]))
xpath = '//*[@id="job_company"]'
jc = response.xpath(xpath)
l.add_value('company_name', jc.xpath('.//h2/text()').extract_first())
for li in jc.xpath('.//ul[contains(@class, "c_feature")]/li'):
feature = li.xpath('*[contains(@class, "hovertips")]/text()').extract_first()
value = ''.join([s.strip() for s in li.xpath('text()').extract() if s.strip()])
if '领域' in feature:
l.add_value('company_brief', '领域: {}'.format(value))
elif '发展阶段' in feature:
l.add_value('company_brief', '发展阶段: {}'.format(value))
elif '规模' in feature:
l.add_value('company_brief', '规模: {}'.format(value))
elif '公司主页' in feature:
l.add_value('company_url', li.xpath('a/@href').extract_first())
yield l.load_item() |
'''
test_list = ['one','two','three']
#for i in test_list:
for i in range(0,len(test_list)): -> range는 숫자를 가지고 있는 아이로 인덱싱이 필요
print(test_list[i])
'''
'''
a = (1,2,3,4,5,6,7,8,9,10)
for i in a:
if(i %2 !=0):
print(i)
else:
continue
'''
'''
a='안녕하세요. 저는 누구입니다.'
b = a.split(' ')
for i in b:
print(i)
'''
'''
sum = 0
for i in range(1,11,2):
sum = sum + i
print(sum)
'''
# for i in range(2,10):
# for j in range(1,10):
# print(i,'*',j,'=',i*j)
# print("\n")
a='멋쟁이 사자처럼 화이팅 멋쟁이 사자'
b = a.split(' ')
dic={}
for i in b:
if i in dic:
dic[i]= dic[i]+1
else:
dic[i] =1
print(dic) |
# -*- coding: utf-8 -*-
import shutil
import cv2 as cv
import os
import numpy as np
import random
'''
opencv数据增强
对图片进行色彩增强、高斯噪声、水平镜像、放大、旋转、剪切
并对每张图片保存每一种数据增强的图片
'''
def contrast_brightness_image(src1, a, g, path_out):
'''
色彩增强(通过调节对比度和亮度)
'''
h, w, ch = src1.shape # 获取shape的数值,height和width、通道
# 新建全零图片数组src2,将height和width,类型设置为原图片的通道类型(色素全为零,输出为全黑图片)
src2 = np.ones([h, w, ch], src1.dtype)
# addWeighted函数说明:计算两个图像阵列的加权和
dst = cv.addWeighted(src1, a, src2, 1 - a, g)
cv.imwrite(path_out, dst)
def gasuss_noise(image, path_out_gasuss, mean=0, var=0.001):
'''
添加高斯噪声
mean : 均值
var : 方差
'''
image = np.array(image / 255, dtype=float)
noise = np.random.normal(mean, var ** 0.5, image.shape)
out = image + noise
if out.min() < 0:
low_clip = -1.
else:
low_clip = 0.
out = np.clip(out, low_clip, 1.0)
out = np.uint8(out * 255)
cv.imwrite(path_out_gasuss, out)
def mirror(image, path_out_mirror):
'''
水平镜像
'''
h_flip = cv.flip(image, 1)
cv.imwrite(path_out_mirror, h_flip)
def resize(image, path_out_large):
'''
放大两倍
'''
height, width = image.shape[:2]
amp_num = 2*random.random()
large = cv.resize(image, (int (amp_num* width), int (amp_num* height)))
cv.imwrite(path_out_large, large)
def SaltAndPepper(src,percetage,path_out_SaltAndPepper):
'''
椒盐噪声
'''
SP_NoiseImg=src.copy()
SP_NoiseNum=int(percetage*src.shape[0]*src.shape[1])
for i in range(SP_NoiseNum):
randR=np.random.randint(0,src.shape[0]-1)
randG=np.random.randint(0,src.shape[1]-1)
randB=np.random.randint(0,3)
if np.random.randint(0,1)==0:
SP_NoiseImg[randR,randG,randB]=0
else:
SP_NoiseImg[randR,randG,randB]=255
cv.imwrite(path_out_SaltAndPepper, SP_NoiseImg)
def rotate(image, path_out_rotate):
'''
旋转
'''
rows, cols = image.shape[:2]
M = cv.getRotationMatrix2D((cols / 2, rows / 2), random.randint(-8,8), 1)
dst = cv.warpAffine(image, M, (cols, rows),borderValue=(255,255,255))
cv.imwrite(path_out_rotate, dst)
def shear(image, path_out_shear):
'''
剪切
'''
height, width = image.shape[:2]
i = random.randint(0,4)
shear_percent = 10
if i == 0: #剪切左上
cropped = image[int(height /shear_percent):height, int(width /shear_percent):width]
elif i == 1: #剪切右上
cropped = image[int(height /shear_percent):height, 0:width - int(width /shear_percent)]
elif i == 2: #剪切左下
cropped = image[0:height - int(height /shear_percent), int(width /shear_percent):width]
else: #剪切右下
cropped = image[0:height - int(height /shear_percent), 0:width - int(width /shear_percent)]
cv.imwrite(path_out_shear, cropped)
image_path = 'D:/vs_python_opencv_tesseract/pics/validation0527'
image_out_path = 'D:/vs_python_opencv_tesseract/pics/data_augmentation'
if not os.path.exists(image_out_path):
os.mkdir(image_out_path)
list = os.listdir(image_path) #读取文件夹下的所有文件存成列表
# print(list)
imageNameList = [
'_color.png',
'_gasuss.png',
'_mirror.png',
'_resize.png',
'_rotate.png',
'_shear.png',
'_saltandpepper.png']
for i in range(0, len(list)):
path = os.path.join(image_path, list[i])
out_image_name = os.path.splitext(list[i])[0]
print(out_image_name)
for j in range(0, len(imageNameList)):
path_out = os.path.join(image_out_path, out_image_name + imageNameList[j])
image = cv.imread(path)
if j == 0:
contrast_brightness_image(image, 1.2,10, path_out)
elif j == 1:
gasuss_noise(image, path_out)
# elif j == 2:
# mirror(image, path_out)
# elif j == 3:
# resize(image, path_out)
elif j == 4:
rotate(image, path_out)
elif j == 5:
shear(image, path_out)
elif j == 6:
SaltAndPepper(image,random.random(),path_out)
# else:
# shutil.copy(path, path_out)
|
file_to_read = open('data_in_genres.txt', 'r')
file_to_write = open('data_out_test.sql', 'w')
data = file_to_read.read()
print("Result before - file_to_read_albums.read():")
print(type(data))
print(data)
data_lines = data.split('\n')[:-1]
print("Result after - data.split(\'\n\')[:-1]:")
print(type(data_lines))
print(data_lines)
for line in data_lines[:-1]:
print(line.split('|'))
fields = line.split('|')
file_to_write.write("(\'")
i = 0
for field in fields:
fields[i] = field.replace("'", "\\'")
i += 1
for field in fields[:-1]:
file_to_write.write(field)
file_to_write.write("\',\'")
file_to_write.write(fields[-1])
file_to_write.write("\'),")
file_to_write.write('\n')
fields = data_lines[-1].split('|')
file_to_write.write("(\'")
j = 0
for field in fields:
fields[j] = field.replace("'", "\\'")
j += 1
for field in fields[:-1]:
file_to_write.write(field)
file_to_write.write("\',\'")
file_to_write.write(fields[-1])
file_to_write.write("\');\n")
file_to_write.write('\n') |
"""A module for Jython emulating (a small part of) CPython's multiprocessing.
With this, pygrametl can be made to use multiprocessing, but actually use
threads when used from Jython (where there is no GIL).
"""
# Copyright (c) 2011-2020, Aalborg University (pygrametl@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from threading import Thread
from pygrametl.jythonsupport import Value
# Needed for both pip2 and pip3 to be supported
try:
from Queue import Queue
except ImportError:
from queue import Queue
# NOTE: This module is made for Jython.
__all__ = ['JoinableQueue', 'Process', 'Queue', 'Value']
class Process(Thread):
pid = '<n/a>'
daemon = property(Thread.isDaemon, Thread.setDaemon)
name = property(Thread.getName, Thread.setName)
class JoinableQueue(Queue):
def close(self):
pass
|
#! /usr/bin/env python
import sys
import os
import loginFile
import gui
root=""
top=""
def init(root, top):
root=root
top=top
print("Start the engines on something I suppose")
def login(username,password):
with open(loginFile.homeDir/('credentials.txt'))as file:
lines=file.readlines()
for line in lines:
line=line.split(',')
if(username==line[0]):
if(password==line[1]):
gui.create_main(root)
print("Wrong username or password") |
from pathlib import Path
from flask import Flask
from flask import request
from nerds.web.input import InputDocumentFile
from nerds.web.load import ModelLoader
from nerds.web.response import Response
app = Flask(__name__)
response = Response()
# load pre-trained models here
# it's a temporal solution that will be replaced by proper model uploading
root_dir = Path(__file__).parent.parent.parent
model_dir = Path(root_dir.joinpath('models'))
loader = ModelLoader()
ner_annotator = loader.load_model(model_dir.joinpath('spacy_ner'))
rel_annotator = loader.load_model(model_dir.joinpath('svm_re'))
nrm_annotator = loader.load_normalizer()
@app.route('/', methods=['GET', 'POST'])
def index():
return 'This is a Flask application serving as a RESTful API for extracting Drug Safety information ' \
'from text. Given a piece of text, it extracts drug-related entities (use route "/ner") ' \
'and/or relations between them (use route "/rel"). It works with POST requests only.'
@app.errorhandler(400)
def missing_parameter_error(parameter):
return response.missing_parameter_error(parameter)
@app.errorhandler(400)
def unsupported_document_file_error(filename):
return response.unsupported_file_type_error(filename)
@app.errorhandler(400)
def unsupported_model_file_error(filename):
return response.unsupported_file_type_error(filename)
def is_supported_document_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() == 'txt'
def is_supported_model_file(filename):
if '.' not in filename:
return False
ext = filename.rsplit('.', 1)[1].lower()
return ext == 'zip' or ext == 'tar' or ext == 'gz'
def input_document_file_check():
if 'file' not in request.files:
return missing_parameter_error('file')
file = request.files['file']
if not file or file.filename == '':
return missing_parameter_error('file')
if not is_supported_document_file(file.filename):
return unsupported_document_file_error(file.filename)
return None
def input_model_file_check():
if 'file' not in request.files:
return missing_parameter_error('file')
file = request.files['file']
if not file or file.filename == '':
return missing_parameter_error('file')
if not is_supported_model_file(file.filename):
return unsupported_model_file_error(file.filename)
return None
@app.route('/ner', methods=['POST'])
def extract_entities():
err_result = input_document_file_check()
if err_result:
return err_result
# load document
input_file = InputDocumentFile(request.files['file'])
# extract entities
annotated_document = ner_annotator.annotate(input_file.document)
# build a response
return response.build(annotated_document)
@app.route('/rel', methods=['POST'])
def extract_relations():
err_result = input_document_file_check()
if err_result:
return err_result
# load document
input_file = InputDocumentFile(request.files['file'])
# extract entities
annotated_document = ner_annotator.annotate(input_file.document)
# extract relations
annotated_document = rel_annotator.annotate(annotated_document)
# build a response
return response.build(annotated_document)
@app.route('/nrm', methods=['POST'])
def extract_normalizations():
err_result = input_document_file_check()
if err_result:
return err_result
# load document
input_file = InputDocumentFile(request.files['file'])
# extract entities
annotated_document = ner_annotator.annotate(input_file.document)
# normalize entities
annotated_document = nrm_annotator.annotate(annotated_document)
# build a response
return response.build(annotated_document)
@app.route('/all', methods=['POST'])
def extract_all():
err_result = input_document_file_check()
if err_result:
return err_result
# load document
input_file = InputDocumentFile(request.files['file'])
# extract entities
annotated_document = ner_annotator.annotate(input_file.document)
# extract relations
annotated_document = rel_annotator.annotate(annotated_document)
# normalize entities
annotated_document = nrm_annotator.annotate(annotated_document)
# build a response
return response.build(annotated_document)
@app.route('/model', methods=['POST'])
def upload_model():
err_result = input_model_file_check()
if err_result:
return err_result
# TODO: upload_model
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5003)
|
# Copyright (c) 2023. Lena "Teekeks" During <info@teawork.de>
"""
EventSub Webhook
----------------
.. warning:: Rework in progress, docs not accurate
EventSub lets you listen for events that happen on Twitch.
The EventSub client runs in its own thread, calling the given callback function whenever an event happens.
Look at the `Twitch EventSub reference <https://dev.twitch.tv/docs/eventsub/eventsub-reference>`__ to find the topics
you are interested in.
************
Requirements
************
.. note:: Please note that Your Endpoint URL has to be HTTPS, has to run on Port 443 and requires a valid, non self signed certificate
This most likely means, that you need a reverse proxy like nginx. You can also hand in a valid ssl context to be used in the constructor.
In the case that you don't hand in a valid ssl context to the constructor, you can specify any port you want in the constructor and handle the
bridge between this program and your public URL on port 443 via reverse proxy.\n
You can check on whether or not your webhook is publicly reachable by navigating to the URL set in `callback_url`.
You should get a 200 response with the text `pyTwitchAPI eventsub`.
*******************
Listening to topics
*******************
After you started your EventSub client, you can use the :code:`listen_` prefixed functions to listen to the topics you are interested in.
The function you hand in as callback will be called whenever that event happens with the event data as a parameter.
************
Code Example
************
.. code-block:: python
from twitchAPI.twitch import Twitch
from twitchAPI.helper import first
from twitchAPI.eventsub import EventSub
from twitchAPI.oauth import UserAuthenticator
from twitchAPI.types import AuthScope
import asyncio
TARGET_USERNAME = 'target_username_here'
EVENTSUB_URL = 'https://url.to.your.webhook.com'
APP_ID = 'your_app_id'
APP_SECRET = 'your_app_secret'
TARGET_SCOPES = [AuthScope.MODERATOR_READ_FOLLOWERS]
async def on_follow(data: dict):
# our event happend, lets do things with the data we got!
print(data)
async def eventsub_example():
# create the api instance and get the ID of the target user
twitch = await Twitch(APP_ID, APP_SECRET)
user = await first(twitch.get_users(logins=TARGET_USERNAME))
# the user has to authenticate once using the bot with our intended scope.
# since we do not need the resulting token after this authentication, we just discard the result we get from authenticate()
# Please read up the UserAuthenticator documentation to get a full view of how this process works
auth = UserAuthenticator(twitch, TARGET_SCOPES)
await auth.authenticate()
# basic setup, will run on port 8080 and a reverse proxy takes care of the https and certificate
event_sub = EventSub(EVENTSUB_URL, APP_ID, 8080, twitch)
# unsubscribe from all old events that might still be there
# this will ensure we have a clean slate
await event_sub.unsubscribe_all()
# start the eventsub client
event_sub.start()
# subscribing to the desired eventsub hook for our user
# the given function (in this example on_follow) will be called every time this event is triggered
# the broadcaster is a moderator in their own channel by default so specifying both as the same works in this example
await event_sub.listen_channel_follow_v2(user.id, user.id, on_follow)
# eventsub will run in its own process
# so lets just wait for user input before shutting it all down again
try:
input('press Enter to shut down...')
finally:
# stopping both eventsub as well as gracefully closing the connection to the API
await event_sub.stop()
await twitch.close()
print('done')
# lets run our example
asyncio.run(eventsub_example())"""
import asyncio
import hashlib
import hmac
import threading
from functools import partial
from json import JSONDecodeError
from random import choice
from string import ascii_lowercase
from ssl import SSLContext
from time import sleep
from typing import Optional, Union, Callable, Awaitable
import datetime
from aiohttp import web, ClientSession
from twitchAPI.eventsub.base import EventSubBase
from ..twitch import Twitch
from ..helper import done_task_callback
from ..type import TwitchBackendException, EventSubSubscriptionConflict, EventSubSubscriptionError, EventSubSubscriptionTimeout, \
TwitchAuthorizationException
__all__ = ['EventSubWebhook']
class EventSubWebhook(EventSubBase):
def __init__(self,
callback_url: str,
port: int,
twitch: Twitch,
ssl_context: Optional[SSLContext] = None,
host_binding: str = '0.0.0.0',
subscription_url: Optional[str] = None,
callback_loop: Optional[asyncio.AbstractEventLoop] = None,
revocation_handler: Optional[Callable[[dict], Awaitable[None]]] = None):
"""
:param callback_url: The full URL of the webhook.
:param port: the port on which this webhook should run
:param twitch: a app authenticated instance of :const:`~twitchAPI.twitch.Twitch`
:param ssl_context: optional ssl context to be used |default| :code:`None`
:param host_binding: the host to bind the internal server to |default| :code:`0.0.0.0`
:param subscription_url: Alternative subscription URL, usefull for development with the twitch-cli
:param callback_loop: The asyncio eventloop to be used for callbacks. \n
Set this if you or a library you use cares about which asyncio event loop is running the callbacks.
Defaults to the one used by EventSub Webhook.
:param revocation_handler: Optional handler for when subscriptions get revoked. |default| :code:`None`
"""
super().__init__(twitch)
self.logger.name = 'twitchAPI.eventsub.webhook'
self.callback_url: str = callback_url
"""The full URL of the webhook."""
if self.callback_url[-1] == '/':
self.callback_url = self.callback_url[:-1]
self.secret: str = ''.join(choice(ascii_lowercase) for _ in range(20))
"""A random secret string. Set this for added security. |default| :code:`A random 20 character long string`"""
self.wait_for_subscription_confirm: bool = True
"""Set this to false if you don't want to wait for a subscription confirm. |default| :code:`True`"""
self.wait_for_subscription_confirm_timeout: int = 30
"""Max time in seconds to wait for a subscription confirmation. Only used if ``wait_for_subscription_confirm`` is set to True.
|default| :code:`30`"""
self._port: int = port
self.subscription_url: Optional[str] = subscription_url
"""Alternative subscription URL, usefull for development with the twitch-cli"""
if self.subscription_url is not None and self.subscription_url[-1] != '/':
self.subscription_url += '/'
self._callback_loop = callback_loop
self._host: str = host_binding
self.__running = False
self.revokation_handler: Optional[Callable[[dict], Awaitable[None]]] = revocation_handler
"""Optional handler for when subscriptions get revoked."""
self._startup_complete = False
self.unsubscribe_on_stop: bool = True
"""Unsubscribe all currently active Webhooks on calling :const:`~twitchAPI.eventsub.EventSub.stop()` |default| :code:`True`"""
self._closing = False
self.__ssl_context: Optional[SSLContext] = ssl_context
self.__active_webhooks = {}
self.__hook_thread: Union['threading.Thread', None] = None
self.__hook_loop: Union['asyncio.AbstractEventLoop', None] = None
self.__hook_runner: Union['web.AppRunner', None] = None
self._task_callback = partial(done_task_callback, self.logger)
if not self.callback_url.startswith('https'):
raise RuntimeError('HTTPS is required for authenticated webhook.\n'
+ 'Either use non authenticated webhook or use a HTTPS proxy!')
async def _unsubscribe_hook(self, topic_id: str) -> bool:
return True
def __build_runner(self):
hook_app = web.Application()
hook_app.add_routes([web.post('/callback', self.__handle_callback),
web.get('/', self.__handle_default)])
return web.AppRunner(hook_app)
def __run_hook(self, runner: 'web.AppRunner'):
self.__hook_runner = runner
self.__hook_loop = asyncio.new_event_loop()
if self._callback_loop is None:
self._callback_loop = self.__hook_loop
asyncio.set_event_loop(self.__hook_loop)
self.__hook_loop.run_until_complete(runner.setup())
site = web.TCPSite(runner, str(self._host), self._port, ssl_context=self.__ssl_context)
self.__hook_loop.run_until_complete(site.start())
self.logger.info('started twitch API event sub on port ' + str(self._port))
self._startup_complete = True
self.__hook_loop.run_until_complete(self._keep_loop_alive())
async def _keep_loop_alive(self):
while not self._closing:
await asyncio.sleep(0.1)
def start(self):
"""Starts the EventSub client
:rtype: None
:raises RuntimeError: if EventSub is already running
"""
if self.__running:
raise RuntimeError('already started')
self.__hook_thread = threading.Thread(target=self.__run_hook, args=(self.__build_runner(),))
self.__running = True
self._startup_complete = False
self._closing = False
self.__hook_thread.start()
while not self._startup_complete:
sleep(0.1)
async def stop(self):
"""Stops the EventSub client
This also unsubscribes from all known subscriptions if unsubscribe_on_stop is True
:rtype: None
"""
self.logger.debug('shutting down eventsub')
if self.__hook_runner is not None and self.unsubscribe_on_stop:
await self.unsubscribe_all_known()
# ensure all client sessions are closed
await asyncio.sleep(0.25)
self._closing = True
# cleanly shut down the runner
await self.__hook_runner.shutdown()
await self.__hook_runner.cleanup()
self.__hook_runner = None
self.__running = False
self.logger.debug('eventsub shut down')
def _get_transport(self):
return {
'method': 'webhook',
'callback': f'{self.callback_url}/callback',
'secret': self.secret
}
def _build_request_header(self):
token = self._twitch.get_app_token()
if token is None:
raise TwitchAuthorizationException('no Authorization set!')
return {
'Client-ID': self._twitch.app_id,
'Content-Type': 'application/json',
'Authorization': f'Bearer {token}'
}
async def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback, event, is_batching_enabled: Optional[bool] = None) -> str:
""""Subscribe to Twitch Topic"""
if not asyncio.iscoroutinefunction(callback):
raise ValueError('callback needs to be a async function which takes one parameter')
self.logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')
data = {
'type': sub_type,
'version': sub_version,
'condition': condition,
'transport': self._get_transport()
}
if is_batching_enabled is not None:
data['is_batching_enabled'] = is_batching_enabled
async with ClientSession(timeout=self._twitch.session_timeout) as session:
sub_base = self.subscription_url if self.subscription_url is not None else self._twitch.base_url
r_data = await self._api_post_request(session, sub_base + 'eventsub/subscriptions', data=data)
result = await r_data.json()
error = result.get('error')
if r_data.status == 500:
raise TwitchBackendException(error)
if error is not None:
if error.lower() == 'conflict':
raise EventSubSubscriptionConflict(result.get('message', ''))
raise EventSubSubscriptionError(result.get('message'))
sub_id = result['data'][0]['id']
self.logger.debug(f'subscription for {sub_type} version {sub_version} with condition {condition} has id {sub_id}')
self._add_callback(sub_id, callback, event)
if self.wait_for_subscription_confirm:
timeout = datetime.datetime.utcnow() + datetime.timedelta(
seconds=self.wait_for_subscription_confirm_timeout)
while timeout >= datetime.datetime.utcnow():
if self._callbacks[sub_id]['active']:
return sub_id
await asyncio.sleep(0.01)
self._callbacks.pop(sub_id, None)
raise EventSubSubscriptionTimeout()
return sub_id
async def _verify_signature(self, request: 'web.Request') -> bool:
expected = request.headers['Twitch-Eventsub-Message-Signature']
hmac_message = request.headers['Twitch-Eventsub-Message-Id'] + \
request.headers['Twitch-Eventsub-Message-Timestamp'] + await request.text()
sig = 'sha256=' + hmac.new(bytes(self.secret, 'utf-8'),
msg=bytes(hmac_message, 'utf-8'),
digestmod=hashlib.sha256).hexdigest().lower()
return sig == expected
# noinspection PyUnusedLocal
@staticmethod
async def __handle_default(request: 'web.Request'):
return web.Response(text="pyTwitchAPI EventSub")
async def __handle_challenge(self, request: 'web.Request', data: dict):
self.logger.debug(f'received challenge for subscription {data.get("subscription").get("id")}')
if not await self._verify_signature(request):
self.logger.warning(f'message signature is not matching! Discarding message')
return web.Response(status=403)
await self._activate_callback(data.get('subscription').get('id'))
return web.Response(text=data.get('challenge'))
async def _handle_revokation(self, data):
sub_id: str = data.get('subscription', {}).get('id')
self.logger.debug(f'got revocation of subscription {sub_id} for reason {data.get("subscription").get("status")}')
if sub_id not in self._callbacks.keys():
self.logger.warning(f'unknown subscription {sub_id} got revoked. ignore')
return
self._callbacks.pop(sub_id)
if self.revokation_handler is not None:
t = self._callback_loop.create_task(self.revokation_handler(data))
t.add_done_callback(self._task_callback)
async def __handle_callback(self, request: 'web.Request'):
try:
data: dict = await request.json()
except JSONDecodeError:
self.logger.error('got request with malformed body! Discarding message')
return web.Response(status=400)
if data.get('challenge') is not None:
return await self.__handle_challenge(request, data)
sub_id = data.get('subscription', {}).get('id')
callback = self._callbacks.get(sub_id)
if callback is None:
self.logger.error(f'received event for unknown subscription with ID {sub_id}')
else:
if not await self._verify_signature(request):
self.logger.warning(f'message signature is not matching! Discarding message')
return web.Response(status=403)
msg_type = request.headers['Twitch-Eventsub-Message-Type']
if msg_type.lower() == 'revocation':
await self._handle_revokation(data)
else:
dat = callback['event'](**data)
t = self._callback_loop.create_task(callback['callback'](dat))
t.add_done_callback(self._task_callback)
return web.Response(status=200)
|
# coding:utf8
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StringType, IntegerType
if __name__ == '__main__':
# 0. 构建执行环境入口对象SparkSession
spark = SparkSession.builder.\
appName("test").\
master("local[*]").\
getOrCreate()
sc = spark.sparkContext
# 基于RDD转换成DataFrame
rdd = sc.textFile("../data/input/sql/people.txt").\
map(lambda x: x.split(",")).\
map(lambda x: (x[0], int(x[1])))
# 构建表结构的描述对象: StructType对象
schema = StructType().add("name", StringType(), nullable=True).\
add("age", IntegerType(), nullable=False)
# 基于StructType对象去构建RDD到DF的转换
df = spark.createDataFrame(rdd, schema=schema)
df.printSchema()
df.show()
|
from fastapi import APIRouter
from app.api.api_v1.endpoints import quotes, login, users, utils, tags
api_router = APIRouter()
api_router.include_router(login.router, tags=["login"])
api_router.include_router(users.router, prefix="/users", tags=["users"])
api_router.include_router(utils.router, prefix="/utils", tags=["utils"])
api_router.include_router(quotes.router, prefix="/quotes", tags=["quotes"])
api_router.include_router(tags.router, prefix="/tags", tags=["tags"])
|
from __future__ import absolute_import, division, print_function, unicode_literals # isort:skip # noqa
import unittest
from textwrap import dedent
from typing import (
DefaultDict,
Dict,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
)
import six
from ..base import generate_interfaces
LONG_TYPE = long if six.PY2 else int # noqa
class SomeOtherType(object):
pass
class BaseTypeConversionTestCase(unittest.TestCase):
def test_basic_types(self):
actual = generate_interfaces([
('SomeName', {
'foo': six.binary_type,
'bar': six.text_type,
'a_bool': bool,
'an_int': int,
'a_long': LONG_TYPE,
'anything_else': SomeOtherType,
'named_type': 'SomeOtherName',
'named_bytes_type': b'SomeOtherName',
'list_type': list,
'object_type': dict,
'string_or_number': (six.text_type, int),
'null_type': None,
'list_or_null': (None, list),
'list_or_null_or_null': (None, list, None),
'not_a_type': SomeOtherType(),
'string_set': Set[six.text_type],
'optional_string': Optional[six.text_type],
}),
])
expected = """\
interface SomeName {
a_bool: boolean
a_long: number
an_int: number
anything_else: any
bar: string
foo: string
list_or_null: any[] | null
list_or_null_or_null: any[] | null
list_type: any[]
named_bytes_type: SomeOtherName
named_type: SomeOtherName
not_a_type: any
null_type: null
object_type: object
optional_string: string | null
string_or_number: number | string
string_set: string[]
}
"""
assert actual == dedent(expected)
def test_multiple_interfaces(self):
actual = generate_interfaces([
('FirstInterface', {
'x': six.binary_type,
}),
('SecondInterface', {
'y': int,
}),
])
expected = """\
interface FirstInterface {
x: string
}
interface SecondInterface {
y: number
}
"""
assert actual == dedent(expected)
def test_generic_types(self):
actual = generate_interfaces([
('SomeOtherName', {
'float_list': List[float],
'number_or_string_list': List[Union[six.text_type, int]],
'various_redundant_unions': (
Union[str, str, int],
int,
str,
Union[int, str],
),
'redundant_union_of_lists': (
List[str],
Union[List[str], List[int]],
List[int],
),
'list_of_lists': List[List[str]],
'map_to_numbers': Mapping[str, int],
'map_to_union': Mapping[int, Union[int, List[str]]],
'dict': Dict[int, List[str]],
'default_dict': DefaultDict[int, List[str]],
'simple_tuple': Tuple[int, str, int],
'complex_tuple': Tuple[
Tuple[int, str],
Union[List[str], List[int]],
],
}),
])
expected = """\
interface SomeOtherName {
complex_tuple: [[number, string], number[] | string[]]
default_dict: {[key: string]: string[]}
dict: {[key: string]: string[]}
float_list: number[]
list_of_lists: string[][]
map_to_numbers: {[key: string]: number}
map_to_union: {[key: string]: number | string[]}
number_or_string_list: (number | string)[]
redundant_union_of_lists: number[] | string[]
simple_tuple: [number, string, number]
various_redundant_unions: number | string
}
"""
assert actual == dedent(expected)
|
import psycopg2
import os
from dotenv import load_dotenv
import json
from psycopg2.extras import execute_values
load_dotenv()
DB_NAME=os.getenv("DB_NAME")
DB_USER=os.getenv("DB_USER")
DB_PASSWORD=os.getenv("DB_PASSWORD")
DB_HOST=os.getenv("DB_HOST")
connection = psycopg2.connect(dbname=DB_NAME, user=DB_USER,
password=DB_PASSWORD, host=DB_HOST)
print("CONNETION", connection)
cursor = connection.cursor()
print("CURSOR", cursor)
query = '''
CREATE TABLE IF NOT EXISTS test_table (
id SERIAL PRIMARY KEY,
name varchar(40) NOT NULL,
data JSONB
);
'''
cursor.execute(query)
cursor.execute('SELECT * from test_table;')
result = cursor.fetchall()
print("RESULT:", len(result))
'''
###Approach 1: hard coded
insertion_query = """
INSERT INTO test_table (name, data) VALUES
(
'A row name',
null
),
(
'Another row, with JSON',
'{ "a": 1, "b": ["dog", "cat", 42], "c": true }'::JSONB
);
"""
cursor.execute(insertion_query)
'''
# APPROACH 3 (multi-row insert!)
my_dict = { "a": 1, "b": ["dog", "cat", 42], "c": 'true' }
insertion_query = "INSERT INTO test_table (name, data) VALUES %s"
execute_values(cursor, insertion_query, [
('A rowwwww', 'null'),
('Another row, with JSONNNNN', json.dumps(my_dict)),
('Third row', "3")
])
cursor.execute('SELECT * from test_table;')
result = cursor.fetchall()
print("RESULT:", len(result))
##Save the transactions
connection.commit()
|
from app import manager, db
from models import Role
from main import * # noqa: F401, F403
@manager.command
def insert():
db.session.add_all(Role.app_roles())
db.session.commit()
if __name__ == '__main__':
manager.run()
|
## 0. Copia un texto largo como variable string texto.
## 1. Normaliza texto: elimina caracteres estraños y todas minusculas
## 2. Estadisticas de palabas (contar palabras)
## 3. Estadisticas de transicion de palabras (2-gram model) [Use sklearn CountVectorizer]
## 4. Using NLTK do a Part of Speech tagging (POS tagging) of one sentence
## 5. Dibuja un arbol lexico-gramatical con la ayuda de NLTK
|
'''
11. Canvas adalah widget tkinter yang berfungsi sebagai media output.
'''
from tkinter import *
#1. Membuat GUI
root = Tk()
#2. Costumize GUI
#I. canvas widget
canvas_widget = Canvas(root, bg="blue", width=100, height= 50)
canvas_widget.pack()
#3. Menampilkan GUI
root.mainloop() |
from appJar import gui
from Controller.DeviceManager import *
import os, sys
from threading import Timer
import time
import copy
from Controller.LogManager import *
import os
import platform
import subprocess
from Controller.TestManager import *
class MainWindow:
timer = None
app = gui("COS USB KEY")
logList = []
def update(self):
if DeviceManager().ifDLLPathExists():
self.app.setStatusbar("动态库加载于 " + DeviceManager().getDLLPath(), 0)
else:
self.app.setStatusbar("动态库不存在于 " + DeviceManager().getDLLPath(), 0)
self.app.setStatusbar("设备个数 = " + str(DeviceManager().getDeviceCount()), 1)
# print("currentLogIDs \n")
currentLogIDs = [o.getID() for o in self.logList]
updatedLogIDs = [o.getID() for o in LogManager().logList]
self.logList = copy.copy(LogManager().logList)
addedLogIDs = [item for item in updatedLogIDs if item not in currentLogIDs]
# print(str(len(currentLogIDs)) + " " + str(len(updatedLogIDs)) + " " + str(len(addedLogIDs)))
addedLogs = []
for logID in addedLogIDs:
logInstance = next((item for item in self.logList if item.getID() == logID),None)
if logInstance != None:
addedLogs.append(logInstance.stringRepresentation())
self.app.addListItems("logListBox",addedLogs)
selected = self.app.getListItems("testListBox")
self.app.clearListBox("testListBox")
self.app.addListItems("testListBox", TestManager().listOfInfo())
self.app.selectListItem("testListBox",selected)
self.timer = Timer(0.5, self.update)
self.timer.start()
def topMenuPress(self,name):
if name == "REFRESH":
self.app.setStatusbar("设备个数 = " + str(DeviceManager().getDeviceCount()), 1)
elif name == "HELP":
self.app.infoBox("COS TEST TOOL 帮助", "COS USB KEY 测试软件")
elif name == "OFF":
sys.exit()
elif name == "SETTINGS":
pass
elif name == "SAVE":
LogManager().saveLogs()
elif name == "OPEN":
path = LogManager().getLogsPath()
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
subprocess.Popen(["open", path])
else:
subprocess.Popen(["xdg-open", path])
elif name == 'NEW':
inputString = self.app.textBox("Input APDU","APDU HERE")
if inputString != None:
dict = DeviceManager().sendAPDUStr(inputString)
print(dict)
pass
def runButtonPress(self,btn):
if DeviceManager().getDeviceCount() <= 0:
self.app.warningBox("当前无设备链接", "请插入 USB KEY")
return
selected = self.app.getListItems("testListBox")
print(selected)
try:
for name in selected:
TestManager().runTest(name)
except:
# LogManager().addLogStr("ERROR WHEN RUN TEST",LogType.Error,TestType.COMMON_EVENT)
pass
pass
def __init__(self):
DeviceManager().loadDLL()
self.app.setSticky("news")
self.app.setExpand("both")
self.app.createMenu("Connect")
tools = ["REFRESH","OPEN" ,"SAVE", "SETTINGS", "HELP", "OFF","NEW"]
self.app.addToolbar(tools, self.topMenuPress, findIcon=True)
self.app.addStatusbar(fields=2)
self.app.addLabel("infoText", "COS USB KEY 测试软件")
self.app.setStatusbarWidth(70, 0)
self.app.startPanedFrame("p1",)
self.app.startLabelFrame("Tests")
self.app.setSticky("nesw")
self.app.addListBox("testListBox", [])
self.app.addButton("Run", self.runButtonPress)
self.app.stopLabelFrame()
self.app.startPanedFrame("p2")
self.app.startLabelFrame("Log")
self.app.setSticky("nesw")
self.app.addListBox("logListBox", [])
self.app.stopLabelFrame()
self.app.stopPanedFrame()
self.app.stopPanedFrame()
self.timer = Timer(0.2, self.update)
self.timer.start()
self.app.go()
def __del__(self):
pass
|
with open('input.txt') as f:
paths = [ line.strip().split(',') for line in f ]
v = {'L': (-1, 0), 'R': (1, 0), 'U': (0, 1), 'D': (0, -1)}
def add(p0, p1):
return (p0[0]+p1[0], p0[1]+p1[1])
def sample_path(path):
p = (0, 0)
locations = set()
dists = {}
length = 0
while len(path):
cmd = path.pop(0)
d, n = cmd[0], int(cmd[1:])
while n:
length += 1
p = add(p, v[d])
locations.add(p)
n -= 1
if p not in dists:
dists[p] = length
return locations, dists
def manhattan(intersects):
norm = lambda q: abs(q[0])+abs(q[1])
return norm(min(intersects, key=norm))
def wire(intersects, dists_0, dists_1):
norm = lambda x: dists_0[x] + dists_1[x]
return norm(min(intersects, key=norm))
locs_0, dists_0 = sample_path(paths[0])
locs_1, dists_1 = sample_path(paths[1])
intersects = locs_0 & locs_1
print("Star1: {}".format(manhattan(intersects)))
print("Star2: {}".format(wire(intersects, dists_0, dists_1)))
|
import unittest
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from app.models import User
class UserModelTestCase(unittest.TestCase):
def test_something(self):
self.assertTrue('something' is not None)
if __name__ == '__main__':
unittest.main() |
# !/usr/bin/env python
# coding=utf8
__version__ = 'v1.0' # 先写个网易的测试一下
import time
import requests
from news_Setting import HEADER_NET
class Crawl():
def __init__(self):
self.session = requests.Session()
headers = HEADER_NET
self.session.headers.update(headers)
# 获取页面,private, 内部访问,设置重试5次
def __get_content(self, url, params, pagecode):
retry = 5
while retry > 0:
res = self.session.get(url, params=params)
# print(res.headers)
if res.status_code == 200 or res.status_code == 403 or res.status_code == 404:
response = res.content.decode(pagecode)
break
else:
retry -= 1
continue
response = 'no_content'
return response
def __post_content(self, url, data):
pass
def __get_likejson(self, url, params):
retry = 5
while retry > 0:
res = self.session.get(url, params=params)
if res.status_code == 200 or res.status_code == 403 or res.status_code == 404:
response = res.text
break
else:
retry -= 1
continue
response = 'no_data'
return response
# 网易的
def page_net(self, url, params=None):
return self.__get_likejson(url, params)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.