text stringlengths 38 1.54M |
|---|
import urllib.request
import requests
class HtmlUtil:
def __init__(self, url):
self.url = url
self.suffix = ""
def changeUrl(self,url):
self.url = url
def setSuffix(self,suffix):
self.suffix = suffix
def getPage(self):
req=urllib.request.Request(self.url+self.suffix)
resp=urllib.request.urlopen(req)
data=resp.read().decode('utf-8')
return data
def postPage(self,data):
response = requests.post(self.url+self.suffix,data,headers={'Content-Type':'application/x-www-form-urlencoded'})
return response.text |
#!/usr/bin/env python3
import os
import logging
import signal
import traceback
import zulip
import sys
import argparse
import re
import configparser
from collections import OrderedDict
from types import FrameType
from typing import Any, Callable, Dict, Optional
from matrix_client.errors import MatrixRequestError
from matrix_client.client import MatrixClient
from requests.exceptions import MissingSchema
GENERAL_NETWORK_USERNAME_REGEX = '@_?[a-zA-Z0-9]+_([a-zA-Z0-9-_]+):[a-zA-Z0-9.]+'
MATRIX_USERNAME_REGEX = '@([a-zA-Z0-9-_]+):matrix.org'
# change these templates to change the format of displayed message
ZULIP_MESSAGE_TEMPLATE = "**{username}**: {message}"
MATRIX_MESSAGE_TEMPLATE = "<{username}> {message}"
class Bridge_ConfigException(Exception):
pass
class Bridge_FatalMatrixException(Exception):
pass
class Bridge_ZulipFatalException(Exception):
pass
def matrix_login(matrix_client: Any, matrix_config: Dict[str, Any]) -> None:
try:
matrix_client.login_with_password(matrix_config["username"],
matrix_config["password"])
except MatrixRequestError as exception:
if exception.code == 403:
raise Bridge_FatalMatrixException("Bad username or password.")
else:
raise Bridge_FatalMatrixException("Check if your server details are correct.")
except MissingSchema:
raise Bridge_FatalMatrixException("Bad URL format.")
def matrix_join_room(matrix_client: Any, matrix_config: Dict[str, Any]) -> Any:
try:
room = matrix_client.join_room(matrix_config["room_id"])
return room
except MatrixRequestError as exception:
if exception.code == 403:
raise Bridge_FatalMatrixException("Room ID/Alias in the wrong format")
else:
raise Bridge_FatalMatrixException("Couldn't find room.")
def die(signal: int, frame: FrameType) -> None:
# We actually want to exit, so run os._exit (so as not to be caught and restarted)
os._exit(1)
def matrix_to_zulip(
zulip_client: zulip.Client,
zulip_config: Dict[str, Any],
matrix_config: Dict[str, Any],
no_noise: bool,
) -> Callable[[Any, Dict[str, Any]], None]:
def _matrix_to_zulip(room: Any, event: Dict[str, Any]) -> None:
"""
Matrix -> Zulip
"""
content = get_message_content_from_event(event, no_noise)
zulip_bot_user = ('@%s:matrix.org' % matrix_config['username'])
# We do this to identify the messages generated from Zulip -> Matrix
# and we make sure we don't forward it again to the Zulip stream.
not_from_zulip_bot = (
'body' not in event['content']
or event['sender'] != zulip_bot_user
)
if not_from_zulip_bot and content:
try:
result = zulip_client.send_message({
"type": "stream",
"to": zulip_config["stream"],
"subject": zulip_config["topic"],
"content": content,
})
except Exception as exception: # XXX This should be more specific
# Generally raised when user is forbidden
raise Bridge_ZulipFatalException(exception)
if result['result'] != 'success':
# Generally raised when API key is invalid
raise Bridge_ZulipFatalException(result['msg'])
return _matrix_to_zulip
def get_message_content_from_event(event: Dict[str, Any], no_noise: bool) -> Optional[str]:
irc_nick = shorten_irc_nick(event['sender'])
if event['type'] == "m.room.member":
if no_noise:
return None
# Join and leave events can be noisy. They are ignored by default.
# To enable these events pass `no_noise` as `False` as the script argument
if event['membership'] == "join":
content = ZULIP_MESSAGE_TEMPLATE.format(username=irc_nick,
message="joined")
elif event['membership'] == "leave":
content = ZULIP_MESSAGE_TEMPLATE.format(username=irc_nick,
message="quit")
elif event['type'] == "m.room.message":
if event['content']['msgtype'] == "m.text" or event['content']['msgtype'] == "m.emote":
content = ZULIP_MESSAGE_TEMPLATE.format(username=irc_nick,
message=event['content']['body'])
else:
content = event['type']
return content
def shorten_irc_nick(nick: str) -> str:
"""
Add nick shortner functions for specific IRC networks
Eg: For freenode change '@freenode_user:matrix.org' to 'user'
Check the list of IRC networks here:
https://github.com/matrix-org/matrix-appservice-irc/wiki/Bridged-IRC-networks
"""
match = re.match(GENERAL_NETWORK_USERNAME_REGEX, nick)
if match:
return match.group(1)
# For matrix users
match = re.match(MATRIX_USERNAME_REGEX, nick)
if match:
return match.group(1)
return nick
def zulip_to_matrix(config: Dict[str, Any], room: Any) -> Callable[[Dict[str, Any]], None]:
def _zulip_to_matrix(msg: Dict[str, Any]) -> None:
"""
Zulip -> Matrix
"""
message_valid = check_zulip_message_validity(msg, config)
if message_valid:
matrix_username = msg["sender_full_name"].replace(' ', '')
matrix_text = MATRIX_MESSAGE_TEMPLATE.format(username=matrix_username,
message=msg["content"])
# Forward Zulip message to Matrix
room.send_text(matrix_text)
return _zulip_to_matrix
def check_zulip_message_validity(msg: Dict[str, Any], config: Dict[str, Any]) -> bool:
is_a_stream = msg["type"] == "stream"
in_the_specified_stream = msg["display_recipient"] == config["stream"]
at_the_specified_subject = msg["subject"] == config["topic"]
# We do this to identify the messages generated from Matrix -> Zulip
# and we make sure we don't forward it again to the Matrix.
not_from_zulip_bot = msg["sender_email"] != config["email"]
if is_a_stream and not_from_zulip_bot and in_the_specified_stream and at_the_specified_subject:
return True
return False
def generate_parser() -> argparse.ArgumentParser:
description = """
Script to bridge between a topic in a Zulip stream, and a Matrix channel.
Tested connections:
* Zulip <-> Matrix channel
* Zulip <-> IRC channel (bridged via Matrix)
Example matrix 'room_id' options might be, if via matrix.org:
* #zulip:matrix.org (zulip channel on Matrix)
* #freenode_#zulip:matrix.org (zulip channel on irc.freenode.net)"""
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-c', '--config', required=False,
help="Path to the config file for the bridge.")
parser.add_argument('--write-sample-config', metavar='PATH', dest='sample_config',
help="Generate a configuration template at the specified location.")
parser.add_argument('--from-zuliprc', metavar='ZULIPRC', dest='zuliprc',
help="Optional path to zuliprc file for bot, when using --write-sample-config")
parser.add_argument('--show-join-leave', dest='no_noise',
default=True, action='store_false',
help="Enable IRC join/leave events.")
return parser
def read_configuration(config_file: str) -> Dict[str, Dict[str, str]]:
config = configparser.ConfigParser()
try:
config.read(config_file)
except configparser.Error as exception:
raise Bridge_ConfigException(str(exception))
if set(config.sections()) != {'matrix', 'zulip'}:
raise Bridge_ConfigException("Please ensure the configuration has zulip & matrix sections.")
# TODO Could add more checks for configuration content here
return {section: dict(config[section]) for section in config.sections()}
def write_sample_config(target_path: str, zuliprc: Optional[str]) -> None:
if os.path.exists(target_path):
raise Bridge_ConfigException("Path '{}' exists; not overwriting existing file.".format(target_path))
sample_dict = OrderedDict((
('matrix', OrderedDict((
('host', 'https://matrix.org'),
('username', 'username'),
('password', 'password'),
('room_id', '#zulip:matrix.org'),
))),
('zulip', OrderedDict((
('email', 'glitch-bot@chat.zulip.org'),
('api_key', 'aPiKeY'),
('site', 'https://chat.zulip.org'),
('stream', 'test here'),
('topic', 'matrix'),
))),
))
if zuliprc is not None:
if not os.path.exists(zuliprc):
raise Bridge_ConfigException("Zuliprc file '{}' does not exist.".format(zuliprc))
zuliprc_config = configparser.ConfigParser()
try:
zuliprc_config.read(zuliprc)
except configparser.Error as exception:
raise Bridge_ConfigException(str(exception))
# Can add more checks for validity of zuliprc file here
sample_dict['zulip']['email'] = zuliprc_config['api']['email']
sample_dict['zulip']['site'] = zuliprc_config['api']['site']
sample_dict['zulip']['api_key'] = zuliprc_config['api']['key']
sample = configparser.ConfigParser()
sample.read_dict(sample_dict)
with open(target_path, 'w') as target:
sample.write(target)
def main() -> None:
signal.signal(signal.SIGINT, die)
logging.basicConfig(level=logging.WARNING)
parser = generate_parser()
options = parser.parse_args()
if options.sample_config:
try:
write_sample_config(options.sample_config, options.zuliprc)
except Bridge_ConfigException as exception:
print("Could not write sample config: {}".format(exception))
sys.exit(1)
if options.zuliprc is None:
print("Wrote sample configuration to '{}'".format(options.sample_config))
else:
print("Wrote sample configuration to '{}' using zuliprc file '{}'"
.format(options.sample_config, options.zuliprc))
sys.exit(0)
elif not options.config:
print("Options required: -c or --config to run, OR --write-sample-config.")
parser.print_usage()
sys.exit(1)
try:
config = read_configuration(options.config)
except Bridge_ConfigException as exception:
print("Could not parse config file: {}".format(exception))
sys.exit(1)
# Get config for each client
zulip_config = config["zulip"]
matrix_config = config["matrix"]
# Initiate clients
backoff = zulip.RandomExponentialBackoff(timeout_success_equivalent=300)
while backoff.keep_going():
print("Starting matrix mirroring bot")
try:
zulip_client = zulip.Client(email=zulip_config["email"],
api_key=zulip_config["api_key"],
site=zulip_config["site"])
matrix_client = MatrixClient(matrix_config["host"])
# Login to Matrix
matrix_login(matrix_client, matrix_config)
# Join a room in Matrix
room = matrix_join_room(matrix_client, matrix_config)
room.add_listener(matrix_to_zulip(zulip_client, zulip_config, matrix_config,
options.no_noise))
print("Starting listener thread on Matrix client")
matrix_client.start_listener_thread()
print("Starting message handler on Zulip client")
zulip_client.call_on_each_message(zulip_to_matrix(zulip_config, room))
except Bridge_FatalMatrixException as exception:
sys.exit("Matrix bridge error: {}".format(exception))
except Bridge_ZulipFatalException as exception:
sys.exit("Zulip bridge error: {}".format(exception))
except zulip.ZulipError as exception:
sys.exit("Zulip error: {}".format(exception))
except Exception:
traceback.print_exc()
backoff.fail()
if __name__ == '__main__':
main()
|
#########################################
# Triangle - Terrain
#########################################
scl = 30
t = 0
DELAY = 5 * 100
def setup():
size(800, 800)
global w
global h
global cols
global rows
global terrain
w = 1400
h = 1200
cols = w / scl
rows = h / scl
def draw():
global t
background(242, 242, 242)
translate(width / 2, height / 2)
for y in range(rows/2):
for x in range(cols/2):
stroke(51, 51, 51, x * 10)
strokeWeight(10)
noFill()
point(xx(t + x*scl), yy(t+y*scl))
t = t + 0.05
def xx(t):
return sin(t/10) * 100
def yy(t):
return cos(t/10) * 200
def mousePressed():
pauseFrame()
saveFrame('movie_frames/lineDots_SingleFrame/points_####.png')
def pauseFrame():
delay(DELAY)
|
from itertools import product
import numpy as np
def mohapatra(a, b):
""" Matrix multiplication for nxn-matrix of positive integers
Inspired by
https://github.com/ShrohanMohapatra/matrix_multiply_quadratic
and reference:
S. Mohapatra, (2018).
"A new quadratic-time number-theoretic algorithm to solve
matrix multiplication problem"
https://arxiv.org/abs/1806.03701
tips:
a, b: given matrixes
n: length of rows/columns
m: digit of the max value among matrix a and b
p: digit of sum of n values of mxm digit
pad1: padding 0s for separating rows/columns values, or
effective digits in the solution.
pad2: interesting position.
"""
n = min(a.shape)
dom = list(range(n))
m = int(np.math.log10(max(a.max(), b.max()))) + 1
p = int(np.math.log10((10**(2*m)-1)*n)) + 1
pad1, pad2 = 10**p, 10**(p*(n-1))
c = [int(sum(a[i][j]*(pad2//(pad1**j)) for j in dom)) for i in dom]
d = [int(sum(b[-i-1][j]*(pad2//(pad1**i)) for i in dom)) for j in dom]
e = np.zeros((n, n))
for i, j in product(dom, repeat=2):
e[i][j] = int(c[i]*d[j]//pad2) % pad1
return e
def _test(a, b):
print(a)
print(b)
n = min(a.shape)
return all(a[i][j] == b[i][j] for i, j in product(range(n), repeat=2))
if __name__ == '__main__':
a = np.arange(91,100).reshape((3,3))
b = np.arange(91,100)[::-1].reshape((3,3))
print(a)
print(b)
print('-----------------')
print(_test(mohapatra(a, b), a @ b))
|
from resources.models import Users, Posts, Likes
def menu():
print("MENU")
print("1. Crear usuario")
print("2. Mostrar usuarios")
print("3. Acceder")
print("4. Salir")
selection = input("Ingrese su selección: ")
print("----------------------------------")
return selection
def menu_log():
print("MENU")
print("1. Crear post")
print("2. Likear post")
print("3. Borrar post")
print("4. Salir")
selection = input("Ingrese su selección: ")
print("----------------------------------")
return selection
def create_user():
name = input("Ingrese el nombre: ")
last_name = input("Ingrese el apellido: ")
email = input("Ingrese el email: ")
user_name = input("Ingrese el user name: ")
new_user = Users(name = name, last_name = last_name, email = email, user_name = user_name)
new_user.save()
print("Cantidad despues: ", Users.objects.all().count())
print("----------------------------------")
def show_users():
print("Usuarios:")
for user in Users.objects.all():
print("pk={}: {} {} - {} - {}".format(user.pk, user.name, user.last_name, user.user_name, user.email))
print("----------------------------------")
def login(username, email):
try:
current_user = Users.objects.get(user_name=username, email=email)
print("Bienvenido " + current_user.user_name)
except Users.DoesNotExist:
print("No hay usuarios con ese ID")
print("----------------------------------")
return current_user
def create_post():
title = input("Ingrese el título del post: ")
post_body = input("Ingrese el contenido del post: ")
new_post = Posts(title = title, post_body = post_body)
new_post.save()
print("Cantidad despues: ", Posts.objects.all().count())
print("----------------------------------")
def show_posts():
print("Posts: ")
for post in Posts.objects.all():
print("Numero={}: {} | {} ({}) -- {}".format(post.pk, post.title, post.post_body, post.number_likes.count(), post.date))
print("----------------------------------")
def like_post(current_user, liked_id):
try:
liked_post = Posts.objects.get(id=liked_id)
new_like = Likes(user = current_user, post = liked_post)
new_like.save()
print("Post likeado: " + liked_post.title)
except Posts.DoesNotExist:
print("No existe el post")
print("----------------------------------")
def delete_post(deleted_id):
try:
deleted_post = Posts.objects.get(id=deleted_id)
deleted_post.delete()
print("Cantidad despues: ", Posts.objects.all().count())
except Posts.DoesNotExist:
print("No existe el post")
print("----------------------------------") |
# Count the number of prime numbers less than a non-negative number, n.
# Example:
# Input: 10
# Output: 4
# Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.
class Solution:
def countPrimes(self, n: int) -> int:
count = 0
primes = [False for i in range(n+1)]
for i in range(2,n):
if primes[i] == False:
count+=1
j = 2
while j*i < n:
primes[j*i] = True
j+=1
return count
|
"""
This module includes various AWS-specific functions to stage data in S3 and deal with
messages in SQS queues.
This module relies on the harmony.util.config and its environment variables to be
set for correct operation. See that module and the project README for details.
"""
import boto3
from botocore.config import Config
def is_s3(url: str) -> bool:
"""Predicate to determine if a url is an S3 endpoint."""
return url is not None and url.lower().startswith('s3')
def _aws_parameters(use_localstack, localstack_host, region):
"""Constructs a configuration dict that can be used to create an aws client.
Parameters
----------
use_localstack : bool
Whether to use the localstack in this environment.
localstack_host : str
The hostname of the localstack services (if use_localstack enabled).
region : str
The AWS region to connect to.
Returns
-------
"""
if use_localstack:
return {
'endpoint_url': f'http://{localstack_host}:4566',
'use_ssl': False,
'aws_access_key_id': 'ACCESS_KEY',
'aws_secret_access_key': 'SECRET_KEY',
'region_name': region
}
else:
return {
'region_name': region
}
def _get_aws_client(config, service, user_agent=None):
"""
Returns a boto3 client for accessing the provided service. Accesses the service in us-west-2
unless "AWS_DEFAULT_REGION" is set. If the environment variable "USE_LOCALSTACK" is set to "true",
it will return a client that will access a LocalStack instance instead of AWS.
Parameters
----------
config : harmony.util.Config
The configuration for the current runtime environment.
service : string
The AWS service name for which to construct a client, e.g. "s3" or "sqs"
user_agent : string
The user agent that is requesting the aws service.
E.g. harmony/0.0.0 (harmony-sit) harmony-service-lib/4.0 (gdal-subsetter)
Returns
-------
s3_client : boto3.*.Client
A client appropriate for accessing the provided service
"""
boto_cfg = Config(user_agent_extra=user_agent)
service_params = _aws_parameters(config.use_localstack, config.localstack_host, config.aws_default_region)
return boto3.client(service_name=service, config=boto_cfg, **service_params)
def download(config, url, destination_file, user_agent=None):
"""Download an S3 object to the specified destination directory.
Parameters
----------
config : harmony.util.Config
The configuration for the current runtime environment.
destination_file : file-like
The destination file where the object will be written. Must be
a file-like object opened for binary write.
user_agent : string
The user agent that is requesting the download.
E.g. harmony/0.0.0 (harmony-sit) harmony-service-lib/4.0 (gdal-subsetter)
"""
bucket = url.split('/')[2]
key = '/'.join(url.split('/')[3:])
aws_client = _get_aws_client(config, 's3', user_agent)
aws_client.download_fileobj(bucket, key, destination_file)
def stage(config, local_filename, remote_filename, mime, logger, location=None):
"""
Stages the given local filename, including directory path, to an S3 location with the given
filename and mime-type
Requires the following environment variables:
AWS_DEFAULT_REGION: The AWS region in which the S3 client is operating
Parameters
----------
config : harmony.util.Config
The configuration for the current runtime environment.
local_filename : string
A path and filename to the local file that should be staged
remote_filename : string
The basename to give to the remote file
mime : string
The mime type to apply to the staged file for use when it is served, e.g. "application/x-netcdf4"
location : string
The S3 prefix URL under which to place the output file. If not provided, STAGING_BUCKET and
STAGING_PATH must be set in the environment
logger : logging
The logger to use
Returns
-------
url : string
An s3:// URL to the staged file
"""
key = None
staging_bucket = config.staging_bucket
if location is None:
if config.staging_path:
key = '%s/%s' % (config.staging_path, remote_filename)
else:
key = remote_filename
else:
_, _, staging_bucket, staging_path = location.split('/', 3)
key = staging_path + remote_filename
if config.env in ['dev', 'test'] and not config.use_localstack:
logger.warning(f"ENV={config.env}"
f" and not using localstack, so we will not stage {local_filename} to {key}")
return "http://example.com/" + key
s3 = _get_aws_client(config, 's3')
s3.upload_file(local_filename, staging_bucket, key, ExtraArgs={'ContentType': mime})
return 's3://%s/%s' % (staging_bucket, key)
def receive_messages(config, queue_url, visibility_timeout_s, logger):
"""
Generates successive messages from reading the queue. The caller
is responsible for deleting or returning each message to the queue
Parameters
----------
config : harmony.util.Config
The configuration for the current runtime environment.
queue_url : string
The URL of the queue to receive messages on
visibility_timeout_s : int
The number of seconds to wait for a received message to be deleted
before it is returned to the queue
Yields
------
receiptHandle, body : string, string
A tuple of the receipt handle, used to delete or update messages,
and the contents of the message
"""
if visibility_timeout_s is None:
visibility_timeout_s = 600
sqs = _get_aws_client(config, 'sqs')
logger.info('Listening on %s' % (queue_url,))
while True:
receive_params = dict(
QueueUrl=queue_url,
VisibilityTimeout=visibility_timeout_s,
WaitTimeSeconds=20,
MaxNumberOfMessages=1
)
response = sqs.receive_message(**receive_params)
messages = response.get('Messages') or []
if len(messages) == 1:
yield (messages[0]['ReceiptHandle'], messages[0]['Body'])
else:
logger.info('No messages received. Retrying.')
def delete_message(config, queue_url, receipt_handle):
"""
Deletes the message with the given receipt handle from the provided queue URL,
indicating successful processing
Parameters
----------
config : harmony.util.Config
The configuration for the current runtime environment.
queue_url : string
The queue from which the message originated
receipt_handle : string
The receipt handle of the message, as yielded by `receive_messages`
"""
sqs = _get_aws_client(config, 'sqs')
sqs.delete_message(QueueUrl=queue_url, ReceiptHandle=receipt_handle)
def change_message_visibility(config, queue_url, receipt_handle, visibility_timeout_s):
"""
Updates the message visibility timeout of the message with the given receipt handle
Parameters
----------
config : harmony.util.Config
The configuration for the current runtime environment.
queue_url : string
The queue from which the message originated
receipt_handle : string
The receipt handle of the message, as yielded by `receive_messages`
visibility_timeout_s : int
The number of additional seconds to wait for a received message to be deleted
before it is returned to the queue
"""
sqs = _get_aws_client(config, 'sqs')
sqs.change_message_visibility(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle,
VisibilityTimeout=visibility_timeout_s)
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class UserProfile(models.Model):
USER_CHOICES = (
('1', _('Producer')),
('2', _('Agency')),
('3', _('Client')),
('4', _('Guest')),
)
user = models.ForeignKey(User, unique=True)
kind = models.CharField(_('kind'), blank=False, max_length=2, choices=USER_CHOICES, default='1')
def user_post_save(sender, instance, **kwargs):
profile, new = UserProfile.objects.get_or_create(user=instance) |
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from videostream import Ui_VideoStream
from developer import DialogDeveloper
from info import DialogInfo
class Ui_MainWindow(object):
def __init__(self):
self.ui = Ui_VideoStream()
self.startBtn = None
self.cetralwidget = None
self.label = None
self.referenceBtn = None
self.nameIMG = None
self.developerBtn = None
def openMainW(self):
try:
window = QtWidgets.QMainWindow()
self.ui.setup(window, MainWindow)
window.show()
MainWindow.hide()
except Exception:
error_dialog = QtWidgets.QErrorMessage()
error_dialog.showMessage('Oh no!')
def openDeveloper(self):
Dialog = QtWidgets.QDialog()
uiDvlpr = DialogDeveloper()
uiDvlpr.setupUi(Dialog)
Dialog.show()
Dialog.setWindowFlags(Qt.CustomizeWindowHint | Qt.WindowCloseButtonHint | Qt.WindowStaysOnTopHint)
Dialog.exec_()
def openInfo(self):
Dialog = QtWidgets.QDialog()
uiInfo = DialogInfo()
uiInfo.setupUi(Dialog)
Dialog.show()
Dialog.setWindowFlags(Qt.CustomizeWindowHint | Qt.WindowCloseButtonHint | Qt.WindowStaysOnTopHint)
Dialog.exec_()
def setupUi(self, MainWindowObj):
MainWindowObj.setObjectName("MainWindow")
MainWindowObj.setWindowModality(QtCore.Qt.WindowModal)
MainWindowObj.setEnabled(True)
MainWindowObj.resize(1000, 650)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindowObj.sizePolicy().hasHeightForWidth())
MainWindowObj.setSizePolicy(sizePolicy)
MainWindowObj.setMinimumSize(QtCore.QSize(1000, 650))
MainWindowObj.setMaximumSize(QtCore.QSize(1000, 650))
MainWindowObj.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
MainWindowObj.setAutoFillBackground(False)
MainWindowObj.setStyleSheet("background-color: #364a65;\n"
"background-image: url(:/newPrefix/main.png);")
self.cetralwidget = QtWidgets.QWidget(MainWindowObj)
self.cetralwidget.setObjectName("cetralwidget")
self.label = QtWidgets.QLabel(self.cetralwidget)
self.label.setGeometry(QtCore.QRect(0, 0, 1000, 650))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap("images/main.png"))
self.label.setObjectName("label")
self.referenceBtn = QtWidgets.QPushButton(self.cetralwidget)
self.referenceBtn.setEnabled(True)
self.referenceBtn.setGeometry(QtCore.QRect(879, 600, 91, 31))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.referenceBtn.setFont(font)
self.referenceBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.referenceBtn.setLayoutDirection(QtCore.Qt.LeftToRight)
self.referenceBtn.setStyleSheet("QPushButton {\n"
"background: none;\n"
"\n"
" background-color: #949494;\n"
" color: #000; \n"
" border-radius: 10px;\n"
" padding: 0px; \n"
"}\n"
"QPushButton:hover {color: #364a65; font-size: 15px;};")
self.referenceBtn.setObjectName("referenceBtn")
self.referenceBtn.clicked.connect(self.openInfo)
self.startBtn = QtWidgets.QPushButton(self.cetralwidget)
self.startBtn.setGeometry(QtCore.QRect(750, 300, 191, 41))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.startBtn.sizePolicy().hasHeightForWidth())
self.startBtn.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(15)
font.setBold(True)
font.setWeight(75)
self.startBtn.setFont(font)
self.startBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.startBtn.setLayoutDirection(QtCore.Qt.LeftToRight)
self.startBtn.setStyleSheet("QPushButton { \n"
"background: none;\n"
" background-color: #0bb2e8;\n"
" color: #d7e7ec; \n"
" border-radius: 10px;\n"
"}\n"
"QPushButton:hover {color: #364a65;};")
self.startBtn.setObjectName("startBtn")
self.startBtn.clicked.connect(self.openMainW)
self.nameIMG = QtWidgets.QLabel(self.cetralwidget)
self.nameIMG.setGeometry(QtCore.QRect(49, 280, 381, 81))
self.nameIMG.setLayoutDirection(QtCore.Qt.LeftToRight)
self.nameIMG.setStyleSheet("background: none;")
self.nameIMG.setText("")
self.nameIMG.setPixmap(QtGui.QPixmap("images/Name.png"))
self.nameIMG.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.nameIMG.setObjectName("nameIMG")
self.developerBtn = QtWidgets.QPushButton(self.cetralwidget)
self.developerBtn.setGeometry(QtCore.QRect(749, 600, 101, 31))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.developerBtn.sizePolicy().hasHeightForWidth())
self.developerBtn.setSizePolicy(sizePolicy)
self.developerBtn.clicked.connect(self.openDeveloper)
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.developerBtn.setFont(font)
self.developerBtn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.developerBtn.setLayoutDirection(QtCore.Qt.LeftToRight)
self.developerBtn.setStyleSheet("QPushButton { \n"
"background: none;\n"
" background-color: #949494;\n"
" color: #000; \n"
" border-radius: 10px;\n"
" padding: 0px; \n"
"}\n"
"QPushButton:hover {color: #364a65; font-size: 15px;};")
self.developerBtn.setObjectName("developerBtn")
MainWindowObj.setCentralWidget(self.cetralwidget)
self.retranslateUi(MainWindowObj)
QtCore.QMetaObject.connectSlotsByName(MainWindowObj)
def retranslateUi(self, MainWindowObj):
_translate = QtCore.QCoreApplication.translate
MainWindowObj.setWindowTitle(_translate("MainWindow", "DriverCare"))
MainWindowObj.setWindowIcon(QIcon('images/icon.png'))
self.referenceBtn.setText(_translate("MainWindow", "Довідка"))
self.startBtn.setText(_translate("MainWindow", "Розпочати"))
self.developerBtn.setText(_translate("MainWindow", "Розробник"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
import sys
import json
def hw():
print 'Hello, world!'
def lines(fp):
print str(len(fp.readlines()))
def calculateSentiment(tweet):
terms = tweet.split()
global totalTerms
global termsFreq
for term in terms :
if term in termsFreq.keys():
termsFreq[term] +=1
else:
termsFreq[term] = 1
totalTerms +=1
def main():
tweet_file = open(sys.argv[1])
global totalTerms
global termsFreq
for l in tweet_file:
lineJson = json.loads(l)
if 'text' in lineJson.keys() :
calculateSentiment(lineJson['text'])
for term in termsFreq:
print term +" "+str (termsFreq[term] / float(totalTerms))
termsFreq = {}
totalTerms = 0
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#encoding=utf-8
import os,sys,time,md5
from unctrlpy.lib import osaBatchLib
from unctrlpy.lib.osaFileRecv import file_recv_main
from unctrlpy.lib import hostSocket
from unctrlpy.lib import osaSysFunc
from unctrlpy.etc.config import SOCKET,FSOCKET,DIRS
from unctrlpy.lib.osaUtil import save_log
'''
Author: osa开源团队
Description:文件清理模块
Create Date: 2012-05-22
'''
def index(rev):
'''
@批量清理文件主函数
return 结果返回给agent端
'''
if not rev:
return False
return batchFileClean(rev)
def batchFileClean(rev):
'''
@文件清理函数
'''
#需要清理的目录目录
citem = osaBatchLib.getConfigItem(rev)
dfile = citem['cleaner_sourcefile']
if not os.path.isdir(dfile):
return "{'status':'ERROR','result':'x0032'}"
#文件被移动的位置
mpath = citem['cleaner_targetpath']
if not os.path.isdir(mpath):
mpath = '/dev/null'
#操作高级选项
advance = citem['cleaner_advance']
#执行清理
if advance == 'rm_dir':
re = osaSysFunc.removeall(dfile)
if re == False :
result = "{'status':'ERROR','result':'x0031'}"
else:
result = "{'status':'OK','result':'clear over!'}"
return result
hlist = advance.split(',')
for ftype in hlist:
re = osaSysFunc.mvfile(dfile,mpath,(ftype.replace('.','\\.')))
if re == False:
return "{'status':'ERROR','result':'x0033'}"
return "{'status':'OK','result':'clear over!'}"
#rev = "{'command':'BATCH_FILE_CLEANER','iparr':'192.168.2.1|192.168.2.2','config_items':{'cleaner_sourcefile':'/root/shell/','cleaner_targetpath':'/dev/null','cleaner_advance':'.bak,.log,.bak.'},'id':'id'}"
|
import flask
from flask import request, jsonify, Response
import json
import netprog
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route('/', methods=['GET', 'POST'])
def root():
return Response(json.dumps({
"status": True,
"message": "Forbidden"
}, indent=4), status=403, mimetype='application/json')
@app.route('/config/access-port', methods=['POST'])
def access_port():
input = request.json
success = []
failed = []
for ip in input:
commands = netprog.access_port_config(input[ip])
print(ip)
print(json.dumps(commands, indent=4))
device = netprog.get_devices('access', ip)
if netprog.send_config(device, commands) == True:
success.append(ip)
else:
failed.append(ip)
result = {
'status': 'done',
'success': success,
'failed': failed
}
return Response(json.dumps(result, indent=4), status=200, mimetype='application/json')
app.run() |
#!/usr/bin/env python
import imp
import os
import re
ETC_PASSWD_REGEX = (
"(?P<username>[a-z\d_\-.]+):"
"(?P<pw_placeholder>[\d\w]*):"
"(?P<uid>\d+):"
"(?P<gid>\d+):"
"(?P<gecos>.*):"
"(?P<homedir>[/\w \-]+):"
"(?P<shell>[/\w ]+)"
)
ETC_SHADOW_REGEX = (
"(?P<username>[a-z\d_\-.]+):"
"(?P<encrypted_pw>.*):"
"(?P<pw_change_date>\d+):"
"(?P<min_days>\d*):"
"(?P<max_days>\d*):"
"(?P<warn_days>\d*):"
"(?P<disabled_after_days>\d*):"
"(?P<invalidate_days_after_expiration>\d*):"
"(?P<expiration_date>\d*)"
)
def get_notify_func():
if os.getenv("ETCPASSMONITORED_CONFIG"):
config_path = os.getenv("ETCPASSMONITORED_CONFIG")
if config_path[-3:] != ".py":
raise Exception("Your config file needs to be a .py file!")
else:
config_path = os.path.join(os.path.dirname(__file__), "config.py")
try:
config = imp.load_source("config", config_path)
except (IOError, SyntaxError):
raise Exception(
"There was a problem loading your config file < {} >. Please check to "
"ensure your path is correct".format(config_path)
)
# It would be nice to just merge the default config with the new config. Don't
# really have time to do that at the moment though so just error out if the
# NOTFIY_MODULE property does not exist
try:
config.NOTIFY_MODULE
except AttributeError:
raise Exception("Your config module needs to set the NOTIFY_MODULE property")
try:
notify_module = imp.load_source("notify", config.NOTIFY_MODULE)
except (IOError, SyntaxError):
raise Exception(
"There was a problem loading your notifications module < {} >. Please check "
"to ensure the path is correct".format(config.NOTIFY_MODULE)
)
try:
return notify_module.notify
except AttributeError:
raise Exception(
"There was a problem loading the generic notification function to use "
"please name it `notify`."
)
def validate_etc_passwd(etcpasswd_lines, notify_func):
parsed_etcpasswd = []
for line in etcpasswd_lines:
matched = re.search(ETC_PASSWD_REGEX, line)
# There is something wrong with the line here. Notify about the failure
if not matched:
notify_func(
"The line < {} > in /etc/passwd is incorrectly formatted"
.format(line.rstrip("\n"))
)
continue
parsed_etcpasswd.append(matched.groupdict())
# Check for uids = 0 for logins other than root
if parsed_etcpasswd[-1]["username"] != "root" and parsed_etcpasswd[-1]["uid"] == "0":
notify_func(
"User {} has uid 0. This is a potential security problem"
.format(parsed_etcpasswd[-1]["username"])
)
# Ensure each user has a password placeholder.
if parsed_etcpasswd[-1]["pw_placeholder"] == "":
notify_func(
"User {} has no password placeholder! This is a major security issue"
.format(parsed_etcpasswd[-1]["username"])
)
# If there are users with the same uid then complain
uid_mapping = {}
for user_mapping in parsed_etcpasswd:
# Search for users with overlapping uids besides root
if user_mapping["uid"] in uid_mapping:
uid_mapping[user_mapping["uid"]].append(user_mapping["username"])
else:
uid_mapping[user_mapping["uid"]] = [user_mapping["username"]]
for uid, usernames in uid_mapping.iteritems():
if len(usernames) > 1 and "root" not in usernames:
notify_func(
"Users {} have the same uid < {} >. This is a potential security problem"
.format(",".join(usernames), uid)
)
return parsed_etcpasswd
def validate_etc_shadow(etcshadow_lines, notify_func):
parsed_etcshadow = []
for line in etcshadow_lines:
matched = re.search(ETC_SHADOW_REGEX, line)
if not matched:
notify_func(
"The line < {} > in /etc/shadow is incorrectly formatted"
.format(line.rstrip("\n"))
)
continue
parsed_etcshadow.append(matched.groupdict())
if not parsed_etcshadow[-1]["encrypted_pw"]:
notify_func(
"The user {} has no password!"
.format(parsed_etcshadow[-1]["username"])
)
if not parsed_etcshadow[-1]["expiration_date"]:
notify_func(
"The user {} has no expiration_date."
.format(parsed_etcshadow[-1]["username"])
)
return parsed_etcshadow
def main():
notify_func = get_notify_func()
with open("/etc/passwd", "r") as etcpasswd:
etcpasswd_lines = etcpasswd.readlines()
validate_etc_passwd(etcpasswd_lines, notify_func)
with open("/etc/shadow", "r") as etcshadow:
etcshadow_lines = etcshadow.readlines()
validate_etc_shadow(etcshadow_lines, notify_func)
if __name__ == "__main__":
main()
|
from django.shortcuts import render, HttpResponse, redirect
from django.contrib import messages
import bcrypt
from.models import *
def index(request):
return render(request, "loginreg.html")
def register(request):
errorsFromValidator = User.objects.registrationValidator(request.POST)
print("ERRORS FROM VALIDATOR BELOW")
print(errorsFromValidator)
if len(errorsFromValidator) > 0:
for key, value in errorsFromValidator.items():
messages.error(request, value)
return redirect("/")
else:
newUser = User.objects.create(
first_name=request.POST['UserFirst'], last_name=request.POST['UserLast'], email=request.POST['UserEmail'], password=request.POST['UserPW'])
request.session['loggedInID'] = newUser.id
return redirect("/success")
def success(request):
if 'loggedInID' not in request.session:
messages.error(request, "You Must Be Logged In First")
return redirect("/")
context = {
'loggedInUser': User.objects.get(id=request.session['loggedInID']),
'allTrips' : Trip.objects.all(),
'favTrips' : Trip.objects.filter(favoritors=User.objects.get(id=request.session['loggedInID'])),
'nonTrips' : Trip.objects.exclude(favoritors=User.objects.get(id=request.session['loggedInID']))
}
return render(request, "loggedin.html", context)
def login(request):
errorsFromValidator = User.objects.loginValidator(request.POST)
if len(errorsFromValidator) > 0:
for key, value in errorsFromValidator.items():
messages.error(request, value)
return redirect("/")
else:
userswithmatchingemail = User.objects.filter(email = request.POST['UserEmail'])
request.session['loggedInID'] = userswithmatchingemail[0].id
return redirect("/success")
def logout(request):
request.session.clear()
return redirect("/")
def createtravel(request):
return render(request, "createtravel.html")
def uploadtravel(request):
errorsfromTripValidator = Trip.objects.createTripValidator(request.POST)
print(errorsfromTripValidator)
if len(errorsfromTripValidator) >0:
for key, value in errorsfromTripValidator.items():
messages.error(request, value)
return redirect("/trip/create")
else:
Trip.objects.create(description= request.POST['desc'], creator = User.objects.get(id=request.session['loggedInID']), start_date=request.POST['startDate'], end_date=request.POST['endDate'], plan=request.POST['plan'])
return redirect("/success")
def tripinfo(request, tripID):
context = {
'onetrip' : Trip.objects.get(id=tripID)
}
return render(request, "tripinfo.html", context)
def addtrip(request,tripID):
Trip.objects.get(id=tripID).favoritors.add(User.objects.get(id=request.session['loggedInID']))
return redirect("/success")
def removetrip(request,tripID):
Trip.objects.get(id=tripID).favoritors.remove(User.objects.get(id=request.session['loggedInID']))
return redirect("/success")
def deletetrip(request,tripID):
Trip.objects.get(id=tripID).delete()
return redirect("/success")
|
from collections import Counter
print("Welcome to the Frequency Analysis App")
#List of elements to remove from all text for analysis
non_letters = ['1','2','3','4','5','6','7','8','9','0',' ','.',',','!','?',',','"',"'",':',";",'(',')','%','&','#','$','\n','\t']
#Information for the first key phrase 1
key_phrase_1 = input("\nEnter a word or phrase to count the occurance of each letter: ").lower().strip()
#Removing all non letter from key_phrases_1
for non_letter in non_letters:
key_phrase_1 = key_phrase_1.replace(non_letter,'')
total_occurances = len(key_phrase_1)
#Create a counter object
letter_count = Counter(key_phrase_1)
#Determine the frequency analysis for the message
print("\nHere is the frequency analysis from key phrase 1: ")
print("\n\tLetter\t\tOccurance\tPercentage")
for key, value in sorted(letter_count.items()):
percentage = 100*value/total_occurances
percentage = round(percentage,2)
print("\t"+ key+"\t\t\t"+str(value)+"\t\t\t"+str(percentage)+" %")
#Make a list of letter from highest occurance to lowest
ordered_letter_count = letter_count.most_common()
key_phrase_1_ordered_letters = []
for pair in ordered_letter_count:
key_phrase_1_ordered_letters.append(pair[0])
#Print that list
print("\nLetters ordered from highest occurance to lowest: ")
for letter in key_phrase_1_ordered_letters:
print(letter,end='')
# Information for the first key phrase 2
key_phrase_2 = input("\n\nEnter a word or phrase to count the occurance of each letter: ").lower().strip()
# Removing all non letter from key_phrases_2
for non_letter in non_letters:
key_phrase_2 = key_phrase_2.replace(non_letter, '')
total_occurances = len(key_phrase_2)
# Create a counter object
letter_count = Counter(key_phrase_2)
# Determine the frequency analysis for the message
print("\nHere is the frequency analysis from key phrase 2: ")
print("\n\tLetter\t\tOccurance\tPercentage")
for key, value in sorted(letter_count.items()):
percentage = 100 * value / total_occurances
percentage = round(percentage, 2)
print("\t" + key + "\t\t\t" + str(value) + "\t\t\t" + str(percentage) + " %")
# Make a list of letter from highest occurance to lowest
ordered_letter_count = letter_count.most_common()
key_phrase_2_ordered_letters = []
for pair in ordered_letter_count:
key_phrase_2_ordered_letters.append(pair[0])
# Print that list
print("\nLetters ordered from highest occurance to lowest: ")
for letter in key_phrase_2_ordered_letters:
print(letter, end='')
|
from math import tanh
from typing import Tuple
from bot.data import Request
from bot.model_definitions import Mode, MoodCategory, AffectionCategory
from bot.pattern_recognizer import analyze_input
from bot.logger import logger
# factor 0.2 ensures steady adjustment of bots mood and affection
IMPACT_FACTOR = 0.2
def stretch_prob(probability: float) -> float:
"""
Fits probabilities between 0.75 and 1 into the range of [-1,1] depending on the result
Args:
probability: The probability with which the bot determined either mood or affection
Returns:
A value between 0 and 1 which is later passed to the tanh(2*x) function for
a more realistic change in mood and affection
"""
return 4 * probability - 3
def analyze(request: Request) -> Tuple[float, float]:
"""
Args:
request:
The request passed by the web server to the bot instance.
It contains all the necessary information to determine a new bot mood/affection.
Returns:
The new mood/affection of the bot calculated on the text of the message and the
previous mood and affection
"""
# input message of the user passed by a request
text = request.text
# Inits bots mood. It stays unchanged if the message does not contain certain signs of specificly
# positive or negative mood.
mood_bot = request.mood
# Estimate mood through the neural network
mood_result = analyze_input(text, Mode.MOODS)
if mood_result:
logger.debug('Affection: {}'.format(mood_result.category.name))
mood_probability = mood_result.probability
# checking for a negative or positive mood
if mood_result.category == MoodCategory.M_NEG:
sign = -1
else:
sign = 1
# calculate a value from the probability which could be fed to the neural network for text processing
mood_message = stretch_prob(mood_probability)
# apply message to mood
mood_bot = mood_bot + sign * mood_message * IMPACT_FACTOR
if mood_bot > 1:
mood_bot = 1.0
mood_bot = tanh(2 * mood_bot)
# Inits bots affection. It stays unchanged if the message does not contain certain signs of specificly
# positive or negative affection.
affection_bot = request.affection
# Estimate mood through the neural network
affection_result = analyze_input(text, Mode.AFFECTIONS)
if affection_result:
logger.debug('Affection: {}'.format(affection_result.category.name))
affection_probability = affection_result.probability
# checking for a negative or positive affection
if affection_result.category == AffectionCategory.A_NEG:
sign = -1
else:
sign = 1
# calculate a value from the probability which could be fed to the neural network for text processing
affection_message = stretch_prob(affection_probability)
# apply message to affection
affection_bot = affection_bot + sign * affection_message * IMPACT_FACTOR
if affection_bot > 1:
affection_bot = 1.0
affection_bot = tanh(2 * affection_bot)
return (mood_bot, affection_bot)
|
# -*- coding: utf-8 -*-
"""
Numba function wrapper.
"""
from __future__ import print_function, division, absolute_import
import types
import ctypes
from functools import partial
from itertools import starmap
from numba2.rules import typeof
from numba2.compiler.overloading import (lookup_previous, overload, Dispatcher,
flatargs)
# TODO: Reuse numba.numbawrapper.pyx for autojit Python entry points
class FunctionWrapper(object):
"""
Result of @jit for functions.
"""
def __init__(self, dispatcher, py_func, abstract=False, opaque=False):
self.dispatcher = dispatcher
self.py_func = py_func
# self.signature = signature
self.abstract = abstract
self.llvm_funcs = {}
self.ctypes_funcs = {}
self.envs = {}
self.opaque = opaque
self.implementor = None
def __call__(self, *args, **kwargs):
from numba2.representation import byref, stack_allocate
from numba2.conversion import (
toctypes, fromctypes, toobject, fromobject, ctype)
from numba2.ctypes_support import CTypesStruct
from numba2.types import Function
# Keep this alive for the duration of the call
keepalive = list(args) + list(kwargs.values())
# Order arguments
args = flatargs(self.dispatcher.f, args, kwargs)
argtypes = [typeof(x) for x in args]
# Translate
cfunc, restype = self.translate(argtypes)
# Construct numba values
arg_objs = list(starmap(fromobject, zip(args, argtypes)))
# Map numba values to a ctypes representation
args = []
for arg, argtype in zip(arg_objs, argtypes):
c_arg = toctypes(arg, argtype, keepalive)
if byref(argtype) and stack_allocate(argtype):
c_arg = ctypes.pointer(c_arg)
args.append(c_arg)
# We need this cast since the ctypes function constructed from LLVM
# IR has different structs (which are structurally equivalent)
c_restype = ctype(restype)
if byref(restype):
c_result = c_restype() # dummy result value
args.append(ctypes.pointer(c_result))
c_restype = None # void
c_signature = ctypes.PYFUNCTYPE(c_restype, *[type(arg) for arg in args])
cfunc = ctypes.cast(cfunc, c_signature)
# Handle calling convention
if byref(restype):
cfunc(*args)
else:
c_result = cfunc(*args)
# Map ctypes result back to a python value
result = fromctypes(c_result, restype)
result_obj = toobject(result, restype)
return result_obj
def translate(self, argtypes):
from . import phase, environment
key = tuple(argtypes)
if key in self.ctypes_funcs:
env = self.envs[key]
return self.ctypes_funcs[key], env["numba.typing.restype"]
# Translate
env = environment.fresh_env(self, argtypes)
llvm_func, env = phase.codegen(self, env)
cfunc = env["codegen.llvm.ctypes"]
# Cache
self.llvm_funcs[key] = llvm_func
self.ctypes_funcs[key] = cfunc
self.envs[key] = env
return cfunc, env["numba.typing.restype"]
@property
def signatures(self):
return [signature for func, signature, _ in self.overloads]
@property
def overloads(self):
return self.dispatcher.overloads
def __str__(self):
return "<numba function (%s)>" % str(self.dispatcher)
def __get__(self, instance, owner=None):
if instance is not None:
return partial(self.py_func, instance)
return self
def wrap(py_func, signature, scope, inline=False, opaque=False, abstract=False, **kwds):
"""
Wrap a function in a FunctionWrapper. Take care of overloading.
"""
func = lookup_previous(py_func, [scope])
if isinstance(func, FunctionWrapper):
func = func.dispatcher
elif isinstance(func, types.FunctionType) and func != py_func:
raise TypeError(
"Function %s in current scope is not overloadable" % (func,))
else:
func = Dispatcher()
dispatcher = overload(signature, func=func, inline=inline, **kwds)(py_func)
if isinstance(py_func, types.FunctionType):
return FunctionWrapper(dispatcher, py_func,
opaque=opaque, abstract=abstract)
else:
assert isinstance(py_func, FunctionWrapper), py_func
return py_func |
def times_table(num):
n=1
while n<=12:
print(n,"X", num,"=", n*num)
n=n+1
times_table=(9)
|
# 변수의 값을 교환하는 튜플
a,b = 10,20
print("# 교환 전 값")
print("a:", a)
print("b:", b)
print()
# 값을 교환합니다.
a,b = b,a
print("# 교환 한 값")
print("a:", a)
print("b:", b)
print() |
from api_tests.admin_backend import AdminBackendTestCase
import entities
class SimpleTariffTests(AdminBackendTestCase):
def test_tariff_list(self):
tariff_list = self.default_admin_client.tariff.list()
self.assertEqual(tariff_list['total'], len(tariff_list['items']))
def test_tariff_update_remove_service(self):
services = list(self.get_immutable_services())[:2]
tariff = self.create_tariff(services=[(services[0], 125), (services[1], 125)])
self.assertEqual(len(tariff['services']), 2)
tariff = self.default_admin_client.tariff.update(tariff['tariff_id'], services=[{'service_id': services[0], 'price': 124}])
self.assertEqual(len(tariff['services']), 1)
self.assertEqual(tariff['services'][0]['service']['service_id'], services[0])
def test_tariff_mutable_default(self):
tariff = self.create_tariff()
with self.assertRaisesHTTPError(409):
self.default_admin_client.tariff.set_default(tariff['tariff_id'])
class DefaultTariffTests(AdminBackendTestCase):
def setUp(self):
super().setUp()
self.tariff1 = self.create_tariff(immutable=True)
self.tariff2 = self.create_tariff(immutable=True)
existed_default_tariff = self.check_default_tariff()
if existed_default_tariff:
self.addCleanupBeforeDelete(self.default_admin_client.tariff.set_default, existed_default_tariff['tariff_id'])
def test_default_tariff_setting(self):
self.assertFalse(self.tariff1['default'])
self.assertFalse(self.tariff2['default'])
self.default_admin_client.tariff.set_default(self.tariff1['tariff_id'])
self.tariff1 = self.default_admin_client.tariff.get(self.tariff1['tariff_id'])
self.assertTrue(self.tariff1['default'])
tariff = self.default_admin_client.tariff.get_default()
self.assertEqual(tariff['tariff_id'], self.tariff1['tariff_id'])
self.default_admin_client.tariff.set_default(self.tariff2['tariff_id'])
self.tariff2 = self.default_admin_client.tariff.get(self.tariff2['tariff_id'])
self.tariff1 = self.default_admin_client.tariff.get(self.tariff1['tariff_id'])
self.assertTrue(self.tariff2['default'])
self.assertFalse(self.tariff1['default'])
tariff = self.default_admin_client.tariff.get_default()
self.assertEqual(tariff['tariff_id'], self.tariff2['tariff_id'])
def test_default_tariff_make_default(self):
self.tariff1 = self.default_admin_client.tariff.set_default(self.tariff1['tariff_id'])
self.assertTrue(self.tariff1['default'])
self.tariff1 = self.default_admin_client.tariff.set_default(self.tariff1['tariff_id'])
self.assertTrue(self.tariff1['default'])
def test_default_tariff_deleted(self):
self.default_admin_client.tariff.delete(self.tariff1['tariff_id'])
with self.assertRaisesHTTPError(409):
self.default_admin_client.tariff.set_default(self.tariff1['tariff_id'])
class OneTariffTests(AdminBackendTestCase):
def search_tariff_in_list(self, tariff, tariff_list):
for tariff in tariff_list['items']:
if tariff['tariff_id'] == tariff['tariff_id']:
break
else:
self.fail('Tariff {} not found in tariff list'.format(tariff['tariff_id']))
def search_event_in_history(self, history:list, event:str) -> list:
resp = [h for h in history if h['event'] == event]
if len(resp) == 0:
self.fail('Event {} not found in history list.'.format(event))
return resp
class TariffOperationsTests(OneTariffTests):
def setUp(self):
super().setUp()
self.tariff = self.create_tariff()
def test_tariff_conflict_creation(self):
self.addCleanupDelete('tariff')
tariff_info = entities.Tariff(self).generate(localized_name=self.tariff['localized_name'])
with self.assertRaisesHTTPError(409):
self.default_admin_client.tariff.create(**tariff_info)
def test_tariff_in_list(self):
def test_tariff_in_list(**params:dict):
tariff_list = self.default_admin_client.tariff.list(**params)
self.search_tariff_in_list(self.tariff, tariff_list)
test_tariff_in_list()
test_tariff_in_list(name=self.tariff['localized_name']['en'])
test_tariff_in_list(name=self.tariff['localized_name']['ru'])
test_tariff_in_list(description=self.tariff['description'])
test_tariff_in_list(currency=self.tariff['currency'])
test_tariff_in_list(parent=None)
def test_tariff_update_description(self):
new_description = entities.Tariff(self).basic_name('Описание')
self.tariff = self.default_admin_client.tariff.update(self.tariff['tariff_id'], description=new_description)
self.assertEqual(self.tariff['description'], new_description)
self.tariff = self.default_admin_client.tariff.get(self.tariff['tariff_id'])
self.assertEqual(self.tariff['description'], new_description)
def test_tariff_update_name(self):
new_name = entities.Tariff(self).localized_name()
self.tariff = self.default_admin_client.tariff.update(self.tariff['tariff_id'], localized_name=new_name)
self.assertDictEqual(self.tariff['localized_name'], new_name)
self.tariff = self.default_admin_client.tariff.get(self.tariff['tariff_id'])
self.assertDictEqual(self.tariff['localized_name'], new_name)
new_name = entities.Tariff(self).localized_name()
self.tariff = self.default_admin_client.tariff.update(self.tariff['tariff_id'], localized_name=new_name)
self.assertDictEqual(self.tariff['localized_name'], new_name)
self.tariff = self.default_admin_client.tariff.get(self.tariff['tariff_id'])
self.assertDictEqual(self.tariff['localized_name'], new_name)
def test_trariff_update_currency(self):
new_currency = 'USD'
self.tariff = self.default_admin_client.tariff.update(self.tariff['tariff_id'], currency=new_currency)
self.assertEqual(self.tariff['currency'], new_currency)
def test_trariff_update_services(self):
new_service = {'service_id': next(self.get_immutable_services()), 'price': '125'}
self.tariff = self.default_admin_client.tariff.update(self.tariff['tariff_id'], services=[new_service])
self.assertIn(new_service['price'], self.tariff['services'][0]['price'])
self.assertEqual(self.tariff['services'][0]['service']['service_id'], new_service['service_id'])
class ImmutableTariffTests(OneTariffTests):
def setUp(self):
super().setUp()
self.tariff = self.create_tariff()
def test_tariff_immutable(self):
self.default_admin_client.tariff.update(self.tariff['tariff_id'], description=self.tariff['description'])
self.default_admin_client.tariff.immutable(self.tariff['tariff_id'])
history = self.default_admin_client.tariff.get_history(self.tariff['tariff_id'])
self.search_event_in_history(history, 'immutable')
with self.assertRaisesHTTPError(409):
self.default_admin_client.tariff.update(self.tariff['tariff_id'], description=self.tariff['description'])
class TariffDeleteTests(OneTariffTests):
def setUp(self):
super().setUp()
self.tariff = self.create_tariff()
def test_tariff_delete(self):
self.default_admin_client.tariff.delete(self.tariff['tariff_id'])
history = self.default_admin_client.tariff.get_history(self.tariff['tariff_id'])
self.search_event_in_history(history, 'delete')
with self.assertRaisesHTTPError(409):
self.default_admin_client.tariff.update(self.tariff['tariff_id'], description=self.tariff['description'])
class TariffParentTests(OneTariffTests):
def setUp(self):
super().setUp()
self.parent_tariff = self.create_tariff()
def test_tariff_create_invalid_parent(self):
with self.assertRaisesHTTPError(404):
self.create_tariff(parent_id=112985791285)
def test_tariff_create_invalid_currency(self):
with self.assertRaisesHTTPError(409):
self.create_tariff(parent_id=self.parent_tariff['tariff_id'], currency='USD')
def test_tariff_create_with_parent(self):
tariff = self.create_tariff(parent_id=self.parent_tariff['tariff_id'])
self.assertEqual(tariff['parent_id'], self.parent_tariff['tariff_id'])
def test_tariff_list_with_parent(self):
tariff = self.create_tariff(parent_id=self.parent_tariff['tariff_id'])
tariff_list = self.default_admin_client.tariff.list(parent=self.parent_tariff['tariff_id'])
self.search_tariff_in_list(tariff, tariff_list) |
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from eagleEye import create_app
from eagleEye.exts import db
from eagleEye.models import Movie, Cinema, Hall, HallScheduling, Seat, SeatScheduling, Order
app = create_app()
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command("db", MigrateCommand)
if __name__ == '__main__':
manager.run()
|
class Db:
def __init__(self, dictionary):
self.__dict__ = dictionary
def record(name, dictionary):
globals()[name]=Db(dictionary)
|
import urllib.request
import json
import login
import whichweek
from datetime import date
rawhtml = urllib.request.urlopen("https://cfb-scoreboard-api.herokuapp.com/v1/date/" + whichweek.getweek()["last"][1])
data = json.loads(rawhtml.read().decode("utf-8"))
users = open('users.json', 'r')
userdata = json.loads(users.read())
def checker(user):
try:
userpicks = open('./picks/picks' + whichweek.getweek()["last"][0] + '.json', 'r')
try:
userpicksdata = json.loads(userpicks.read())[user]
except KeyError:
print("User " + user + " was either just added or has not made any picks for this week")
exit()
except FileNotFoundError:
print("You have no picks for week " + whichweek.getweek()["last"][0])
exit()
wins = 0
losses = 0
for gameselection in userpicksdata:
for game in data["games"]:
#If the games are the same
if gameselection["id"] == game["id"]:
if int(game["awayTeam"]["rank"]) < 26:
awayrank = str(game["awayTeam"]["rank"]) + " "
if int(game["homeTeam"]["rank"]) < 26:
homerank = str(game["homeTeam"]["rank"]) + " "
#If the game is actually over
if game["status"]["type"] == "STATUS_FINAL":
#If the spread is tilted towards the home team
if gameselection["spread"].startswith(game["homeTeam"]["abbreviation"]):
favored = "home"
notfavored = "away"
else:
favored = "away"
notfavored = "home"
#If the favored team's score minus the spread is still better than the losing team
if float(game["scores"][favored]) + float(gameselection["spread"][len(game[favored + "Team"]["abbreviation"])+1:]) >= float(game["scores"][notfavored]):
#If your pick (home or away) matches the favored team that covered the spread
if gameselection["pick"] == favored:
wins += 1
else:
losses += 1
else:
#If your pick (home or away) matches the favored team that did not cover the spread
if gameselection["pick"] == favored:
losses += 1
else:
wins += 1
print("Your record this week was " + str(wins) + "-" + str(losses))
if date.today().weekday() != 5:
try:
weekrecord = userdata[user]["week" + whichweek.getweek()["last"][0]]
print("Your record was already updated for this week.")
except KeyError:
userdata[user]["week" + whichweek.getweek()["last"][0]] = str(wins) + "-" + str(losses)
userdata[user]["wins"] += wins
userdata[user]["losses"] += losses
else:
print("You cannot update your record on Saturday when some games may still be in progress.")
users = open('users.json','w')
users.write(json.dumps(userdata))
print("Your overall record is " + str(userdata[user]["wins"]) + "-" + str(userdata[user]["losses"]))
checker(login.login())
|
# Generated by Django 2.2.13 on 2021-03-24 18:20
import ckeditor_uploader.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Notice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('title', models.CharField(max_length=255)),
('notice_type', models.CharField(choices=[('h', 'Holiday'), ('n', 'None')], default='n', max_length=3)),
('file', models.FileField(blank=True, null=True, upload_to='files/notices/')),
('content', ckeditor_uploader.fields.RichTextUploadingField()),
],
options={
'ordering': ['-created'],
},
),
migrations.CreateModel(
name='NotifyGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('group_name', models.CharField(max_length=55)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('users', models.ManyToManyField(related_name='notify_groups', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['group_name', '-created'],
},
),
migrations.CreateModel(
name='NoticeResponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('notice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='responses', to='notices.Notice')),
('responder', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notice_responses', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='notice',
name='notfiy_groups',
field=models.ManyToManyField(related_name='notfied_notices', to='notices.NotifyGroup'),
),
migrations.AddField(
model_name='notice',
name='uploaded_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='published_notices', to=settings.AUTH_USER_MODEL),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 17 15:00:01 2016
@author: Shane Yu
"""
import json
import sys
import re
import os
import operator
filename1 = sys.argv[1] #filename1 is 'question.json'
#filename2 = sys.argv[2] #result of querying KCM model
with open(filename1, 'r') as f1:
JsonStr = f1.read()
JsonOption = json.loads(JsonStr)
#for sorting the filelist numberically
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
FileList = os.listdir('./QueryKcmResult')
FileList = natural_sort(FileList)
#print(FileList) in ordered checked!
n=0 #for incrementing the options index
for file in FileList:
with open('./QueryKcmResult/'+file, 'r') as f2:
print('./QueryKcmResult/'+file+' has been computed')
KcmStr = f2.read() #KcmStr is the result of querying KCM Model
#KcmStr's string processing
KcmStr = KcmStr.replace("(", "")\
.replace(")", "")\
.replace("'", "")\
.replace(",", "")\
.replace(" ", "")
KcmStr = re.sub("\d+", "", KcmStr)
#KcmList is a list of the results from querying KCM model
KcmList = KcmStr.split('\n')
times = {'A':"", 'B':"", 'C':""}
times['A'] = KcmList.count(JsonOption[n]['A'])
times['B'] = KcmList.count(JsonOption[n]['B'])
times['C'] = KcmList.count(JsonOption[n]['C'])
#print 'computation result: ', times <== Cancel the comment to see the result dictionary
Sorted_Dict = sorted(times.items(), key=operator.itemgetter(1), reverse=True)
print('===> Question', (n+1), "'s answer is : ",Sorted_Dict[0][0] , "<===")
n = n + 1
|
import commands,sys
from glob import glob
from configLocal import OUTPUTDIR
from math import ceil
objName = sys.argv[1]
nInRow = 3
status, output = commands.getstatusoutput("find %s/ -name %sCHARTS -print" % (OUTPUTDIR, objName))
ntot = 0
cmd = ""
for chf in output.split('\n'):
print chf
lf = glob("%s/*fits.png" % chf)
ntot+=len(lf)
cmd+=" ".join(lf)
cmd += " "
print("montage %s -tile %dx%d MONTAGE%s.png" % (cmd, 3,ceil(ntot/3.0), objName))
import os
os.system("montage %s -tile %dx%d -geometry 640x480+0+0 MONTAGE%s.png" % (cmd, 3,ceil(ntot/3.0),objName))
|
# Generated by Django 2.0.6 on 2018-06-07 11:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dissertation', '0002_dissertation_description'),
]
operations = [
migrations.CreateModel(
name='Cluster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('active', models.BooleanField()),
],
),
migrations.CreateModel(
name='DissertationMatched',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='DissertationPreference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)])),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(max_length=200)),
],
),
migrations.AddField(
model_name='dissertation',
name='active',
field=models.BooleanField(default=False),
preserve_default=False,
),
migrations.AddField(
model_name='person',
name='active',
field=models.BooleanField(default=False),
preserve_default=False,
),
migrations.AlterField(
model_name='dissertation',
name='description',
field=models.CharField(default='', max_length=400),
),
migrations.AddField(
model_name='dissertationpreference',
name='diss',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dissertation.Dissertation'),
),
migrations.AddField(
model_name='dissertationpreference',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dissertation.Person'),
),
migrations.AddField(
model_name='dissertationmatched',
name='diss',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dissertation.Dissertation'),
),
migrations.AddField(
model_name='dissertationmatched',
name='person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dissertation.Person'),
),
]
|
# -*- coding: utf-8 -*-
__author__ = 'theme'
__date__ = '2017/9/24 下午12:08'
import xadmin
from xadmin import views
from xadmin.plugins.auth import UserAdmin
from .models import UserProfile
class UserProfileAdmin(UserAdmin):
pass
class BaseSetting(object):
# enable_themes = True
use_bootswatch = True
class GlobalSettings(object):
site_title = "Contacts平台"
site_footer = "theme开发"
menu_style = "accordion"
xadmin.site.register(views.BaseAdminView, BaseSetting)
# xadmin.site.register(UserProfile, UserProfileAdmin)
xadmin.site.register(views.CommAdminView, GlobalSettings)
|
#-*-coding: utf-8-*-
from config import *
import random,time
from googletrans import Translator
def translate_data(html_to_Language,data):
#如果遇到错误就最多重试8次
success_num = 0
while success_num < 8:
try:
ua = random.choice(fake_UserAgent)
translator = Translator(service_urls=[
'translate.google.cn',
'translate.google.com',
'translate.google.com.hk',
],
user_agent=ua,
timeout=30,
proxies=proxies,
)
en = translator.translate(data, dest=html_to_Language, src=html_base_Language).text
return str(en).replace(", ",",")
break
except Exception as e:
print(e,"正在重试:",data)
success_num = success_num + 1
continue
def translate_list_data(html_to_Language,data):
#解析str,转换为list
data = data.replace("*|||*", "", 1).split('*|||*')
en_list = []
#如果遇到错误就最多重试8次
success_num = 0
while success_num < 8:
try:
ua = random.choice(fake_UserAgent)
translator = Translator(service_urls=[
'translate.google.cn',
'translate.google.com',
'translate.google.com.hk',
],
user_agent=ua,timeout=30,proxies=proxies,
)
for en in translator.translate(data, dest=html_to_Language, src=html_base_Language):
en_list.append(str(en.text).replace(", ",","))
return en_list
break
except Exception as e:
print("正在重试:",e)
success_num = success_num + 1
continue
# 解析list 转为flask能接受的str
def parse_list(data_list):
data = ''
for i in data_list:
data = data+'*|||*'+i
return data
if __name__ == '__main__':
# 列表翻译
data_list = ["测试文件","湖南第一师范学院"]
print(translate_list_data(html_to_Language="en",data=parse_list(data_list)))
# 短语翻译
data_str = "湖南第一师范学院"
print(translate_data(html_to_Language="en",data=data_str))
|
# encoding=utf-8
from urllib.request import urlopen
from urllib.parse import quote
from urllib.error import HTTPError
from bs4 import BeautifulSoup
import re
import os
import string
import time
import socket
socket.setdefaulttimeout(120)
def getTime():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def log(info):
with open('log.log', 'a', encoding="utf-8") as myfile:
myfile.write(f'[{getTime()}]:{info}\n')
myfile.close()
def errlog(err):
with open('err.log', 'a', encoding="utf-8") as myfile:
myfile.write(f'[{getTime()}]:{err}\n')
myfile.close()
def getDetailPages(page):
listUrl = f'http://www.yomoer.cn/catalog/templateList?catalogCode=PPTmoban&orderBy=4&catalogId=144&pager.offset={12 * page}'
log(listUrl)
detailRes = urlopen(listUrl)
detailSoup = BeautifulSoup(detailRes.read(), "html.parser", from_encoding="utf-8")
tags = detailSoup.select('[data-preview]')
if len(tags) == 0 or page > 250:
return
for tag in tags:
detailId = tag['data-preview']
nextUrl = f'http://www.yomoer.cn/template/detail/{detailId}.html'
downloadPPTfromDetailPage(nextUrl)
getDetailPages(page + 1)
def downloadPPTfromDetailPage(url):
log(url)
try:
detailRes = urlopen(url)
detailSoup = BeautifulSoup(detailRes.read(), "html.parser", from_encoding="utf-8")
except Exception:
errlog(f'err, {url}')
return
imgTag = detailSoup.find(class_="oldImg")
tags = detailSoup.select('.catalog-detailmore .tips span')
imgDataDsrc = imgTag['data-dsrc']
try:
tagStr = ','.join(map(lambda tag: tag.contents[0], tags))
dateFd = re.findall(r"/cover/(\d+)/cover", imgDataDsrc)[0]
idStr = re.findall(r"\d+/cover(.+)/", imgDataDsrc)[0]
fileName = imgTag['alt']
durl = f"http://www.yomoer.cn/storeData/ppt/{dateFd}/ppt{idStr}/{quote(fileName)}.pptx"
# durl = quote(durl)
dDir = os.path.abspath(os.path.join(os.getcwd(), f'./download/ppt/{dateFd}/'))
dist = os.path.abspath(os.path.join(dDir, f'{tagStr}&&{fileName}.pptx'))
except IndexError as err:
errlog(f'indexError: {url}')
return
# return
try:
f = urlopen(durl)
data = f.read()
if not os.path.exists(dDir):
os.makedirs(dDir)
with open(dist, 'wb') as outfile:
outfile.write(data)
outfile.close()
log(f'{fileName} over')
except HTTPError as e:
errlog(f'error:{e.code}, {durl}')
except Exception:
errlog(f'download error: {durl}')
getDetailPages(116)
# downloadPPTfromDetailPage('http://www.yomoer.cn/template/detail/5244.html') |
# Generated by Django 2.2 on 2020-02-19 05:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('alias', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('desc', models.TextField()),
('rating', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews_posted', to='appy.User')),
('reviewed_book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviewed', to='appy.Book')),
],
),
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('notes', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('books', models.ManyToManyField(related_name='authors', to='appy.Book')),
],
),
]
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
res = []
def pathSum_helper(root, sum_until, sum, arr):
if root is None:
return
if sum_until + root.val == sum and root.left is None and root.right is None:
arr.append(root.val)
res.append(arr)
pathSum_helper(root.left, sum_until+root.val,sum, arr + [root.val] )
pathSum_helper(root.right, sum_until+root.val, sum, arr + [root.val])
pathSum_helper(root, 0, sum, [])
return res
|
#!/usr/bin/env python3
import sys
from pystalkd.Beanstalkd import SocketError
from archiver.connection import Connection
from archiver.backup import Backup
from archiver.job import Job, JobDecoder
from archiver.alert import Email
import settings
import json
import socket
from time import sleep
def send_alert(error):
if settings.alerts['enabled']:
alerts = []
msg = 'An error ocurred during "{}" execution.\nPlease, connect to host "{}" and check the process logs.\n'.format(__file__, socket.getfqdn())
alert = Email(settings.alerts['email']['server'], settings.alerts['email']['port'], settings.alerts['email']['sender'], ', '.join(settings.alerts['email']['recipients']), msg)
alert.send()
#
# main
#
setup = settings.beanstalkd
try:
c = Connection(*setup['connection'])
c.watchMany(setup['tubes']['watch'])
c.ignoreMany(setup['tubes']['ignore'])
print("Watching tubes {}".format(c.watching()))
print("Ignoring tubes {}".format(setup['tubes']['ignore']))
except ConnectionRefusedError as e:
print(e)
send_alert(e)
sys.exit(1)
except SocketError as e:
print(e)
send_alert(e)
sys.exit(1)
b = Backup()
while True:
if c.isBroken():
try:
c.reconnect()
c.watchMany(setup['tubes']['watch'])
c.ignoreMany(setup['tubes']['ignore'])
except ConnectionRefusedError as e:
print(e)
send_alert(e)
sys.exit(3)
except SocketError as e:
print(e)
send_alert(e)
sys.exit(4)
job = c.reserve(setup['timeout'])
if job:
archiverJob = json.loads(job.body, cls=JobDecoder)
if b.run(archiverJob):
print("Success backuping file {} from {}".format(archiverJob.filename, archiverJob.host))
job.delete()
else:
print("Error while backuping file {} from {}".format(archiverJob.filename, archiverJob.host))
job.release()
sleep(setup['timeout'])
|
from django.apps import AppConfig
class RetriesConfig(AppConfig):
name = 'retries'
def coolfunction(self, parameter, option=1000):
"""
This is a docstring.
Parameters
----------
option : int, optional, default = 10
Description of the option.
another : int, optional, default = 200
Description of another option.
"""
pass
|
import os
import unittest
from unittest import mock
from alligator.backends.sqlite_backend import Client as SQLiteClient
from alligator.constants import ALL
from alligator.gator import Gator, Options
from alligator.tasks import Task
def add(a, b):
return a + b
class CustomBackendTestCase(unittest.TestCase):
def setUp(self):
super(CustomBackendTestCase, self).setUp()
self.conn_string = "sqlite:///tmp/alligator_test.db"
try:
os.unlink("/tmp/alligator_test.db")
except OSError:
pass
self.gator = Gator(self.conn_string, backend_class=SQLiteClient)
self.gator.backend.setup_tables()
def test_everything(self):
self.assertEqual(self.gator.backend.len(ALL), 0)
t1 = self.gator.task(add, 1, 3)
t2 = self.gator.task(add, 5, 7)
t3 = self.gator.task(add, 3, 13)
t4 = self.gator.task(add, 9, 4)
self.assertEqual(self.gator.backend.len(ALL), 4)
task_1 = self.gator.pop()
self.assertEqual(task_1.result, 4)
task_3 = self.gator.get(t3.task_id)
self.assertEqual(task_3.result, 16)
task_2 = self.gator.pop()
self.assertEqual(task_2.result, 12)
self.assertEqual(self.gator.backend.len(ALL), 1)
self.gator.backend.drop_all(ALL)
self.assertEqual(self.gator.backend.len(ALL), 0)
@mock.patch("time.time")
def test_delay_until(self, mock_time):
mock_time.return_value = 12345678
self.assertEqual(self.gator.backend.len(ALL), 0)
with self.gator.options(delay_until=12345777):
t1 = self.gator.task(add, 2, 2)
with self.gator.options(delay_until=12345999):
t2 = self.gator.task(add, 3, 8)
with self.gator.options(delay_until=12345678):
t3 = self.gator.task(add, 4, 11)
with self.gator.options():
t4 = self.gator.task(add, 7, 1)
self.assertEqual(self.gator.backend.len(ALL), 4)
task_1 = self.gator.pop()
self.assertEqual(task_1.result, 4)
mock_time.return_value = 123499999
task_2 = self.gator.pop()
self.assertEqual(task_2.result, 11)
|
#! /usr/bin/env python
'''
Use this script to "install" the TimeClock application.
'''
import os
import sys
import shutil
# Check OS compatibility
WINDOWS = sys.platform == 'win32'
LINUX = sys.platform == 'linux2'
if not (WINDOWS or LINUX):
print "Unknown or unsupported OS. Exiting script."
exit(1)
# Set location of application data
if LINUX:
app_data = r"%s/.timeclock" % os.getenv('HOME')
if not os.path.exists(app_data):
os.mkdir(app_data)
if WINDOWS:
app_data = r"%s\AppData\TimeClock" % os.getenv('USERPROFILE')
if not os.path.exists(app_data):
os.makedirs(app_data)
# Copy data templates to app_data directory
shutil.copyfile("data/timeclock_test.db", r"%s/timeclock_test.db" % app_data)
shutil.copyfile("data/hoursreport.plt", r"%s/hoursreport.plt" % app_data)
|
#!/usr/bin/env python
import torch
import torch.nn as nn
import torch.utils.data as data
import torchvision
import layers
import argparse
import cnn
import logger
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--gamma', type=float, default=0.2)
parser.add_argument('--approx_m', type=int, default=100, help="m")
parser.add_argument('--R', type=float, default=1)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--hunt', action='store_true', help="Hyperparameter search mode with orion")
parser.add_argument('--test', action='store_true', help="Evaluate on test set during training")
parser.add_argument('--eval_all', action='store_true', help="Test all the layers of the CCNN at the end")
parser.add_argument('--cnn', action='store_true', help="Switch from CCNN to CNN")
parser.add_argument('--activation', type=str, default="relu", help="Activation for CNN")
parser.add_argument('-v', '--verbose', action='store_true')
@torch.no_grad()
def test(model: nn.Module, dataloader: data.DataLoader) -> float:
acc = 0.
for i, (x, y) in enumerate(dataloader):
pred = model(x)
acc += (pred.max(-1)[1] == y).float().mean().item()
acc /= i + 1
return acc
@torch.no_grad()
def test_all(model: nn.Module, dataloader: data.DataLoader) -> float:
print("Beginning testing")
acc = torch.zeros(len(model.layers))
for i, (x, y) in enumerate(dataloader):
pred = model.forward_all(x)
acc += (pred.max(-1)[1] == y[None,:]).float().mean(-1)
acc /= i + 1
return acc
def mnist_experiment(args):
#60k samples
dataset_for_test = torchvision.datasets.MNIST('dataset', train=True , transform=torchvision.transforms.ToTensor(), download=True)
#10k samples
dataset_for_trainval = torchvision.datasets.MNIST('dataset', train=False, transform=torchvision.transforms.ToTensor(), download=True)
if args.test:
n_train = 5000
n_out = len(dataset_for_trainval) - n_train
dataset_train, _ = data.random_split(dataset_for_trainval, [n_train, n_out])
n_out = len(dataset_for_test) - n_train
dataset_test, _ = data.random_split(dataset_for_test, [n_train, n_out])
else:
n_train = 5000
n_val = len(dataset_for_trainval) - n_train
dataset_train, dataset_test = data.random_split(dataset_for_trainval, [n_train, n_val])
dataloader_test = data.DataLoader(dataset_test, batch_size=64, num_workers=layers.NUM_WORKERS)
if args.test:
logger.Logger.dataloader_test = dataloader_test
print("Split for {}: {}/{} samples".format("test" if args.test else "validation", len(dataset_train), len(dataset_test)))
print("lr = {:.5f} gamma = {:.5f}".format(args.lr, args.gamma))
layer1 = {
'm':args.approx_m, 'd2':10, 'R':args.R, 'patch_dim':5, 'patch_stride':1, 'kernel':'rbf', 'avg_pooling_kernel_size':2, 'r':16, 'gamma':args.gamma,
}
layer2 = {
'm':2*args.approx_m, 'd2':10, 'R':args.R, 'patch_dim':5, 'patch_stride':1, 'kernel':'rbf', 'avg_pooling_kernel_size':2, 'r':32, 'gamma':args.gamma,
}
if args.cnn:
model = cnn.CNN(img_shape=(1, 28, 28), layer_confs=[layer1, layer2], activation_func=args.activation)
else:
model = layers.CCNN(img_shape=(1, 28, 28), layer_confs=[layer1, layer2])
loggers = model.train(dataset_train, nn.CrossEntropyLoss(), 'fro', n_epochs=args.epochs, batch_size=64, lr=args.lr, verbose=args.verbose)
if args.test:
for i, log in enumerate(loggers):
log.save('layer_{}'.format(i))
elif args.eval_all:
acc = test_all(model, dataloader_test)
for l, layer_acc in enumerate(acc):
print("Accuracy: {:.2f}% on {} samples for layer {}".format(layer_acc*100, len(dataset_test), l))
if layers.SAFETY_CHECK:
assert torch.norm(acc[-1] - test(model, dataloader_test)) <=1e-4
return acc[-1].item()
else:
acc = test(model, dataloader_test)
print("Accuracy: {:.2f}% on {} samples".format(acc*100, len(dataset_test)))
return acc
if __name__ == '__main__':
args = parser.parse_args()
acc = mnist_experiment(args)
if args.hunt:
#Orion is needed at this point
import orion
import orion.client
assert not args.test
orion.client.report_results([{
'name': 'test_error_rate',
'type': 'objective',
'value': (1 - acc),
}])
|
# Set
# -------------------------------------
# - It is collection of elements
# - It is unorderer collection of elements
# - It doesn't allow duplicate elements
set = {}
print(set)
set = {1, 2, 3, 4, 5, 6}
print(set)
set = {1, 5, 6, 2, 0, 1, 5}
print(set)
set = {1, 5, "hello"}
print(set)
set.add(0)
print(set)
set.remove(5)
print(set)
set.update([11, 22, 33, 44, 66, 55, 10])
print(set)
set.discard(11)
print(set)
# removendo
set1 = {0, 1, 2, 3}
set2 = {1, 3, 5}
set = set1 - set2
print(set) # {0, 2}
# juntar os dois set
set1 = {0, 1, 2, 3}
set2 = {2, 3, 4, 5, 6}
set = set1 | set2
print(set) # {0, 1, 2, 3, 4, 5, 6}
set = set1 & set2
print(set) # {2, 3}
|
import pymysql.cursors
import get_config
cfg = get_config.cfg['mysql']
dbConfig = {
'user': cfg['user'],
'password': cfg['password'],
'host': cfg['host'],
'database': cfg['database'],
'cursorclass' : pymysql.cursors.DictCursor
}
def insertTransaction(data):
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT COUNT(id) as tally FROM transaction WHERE accountnumber=%s AND date=%s AND description=%s AND amount=%s AND ballance=%s"
insert_sql = "INSERT INTO transaction(accountnumber, date, description, amount, ballance) VALUES (%s, %s, %s, %s, %s)"
# Insert records
for row in data:
cursor.executemany(select_sql,[(row['accountnumber'],row['Date'],row['Description'],row['Amount'],row['Balance'])])
check = cursor.fetchone()
if check['tally'] > 0:
print('Oops Duplicate transaction' + ' : ' + row['accountnumber'] + ' : ' + row['Date'] + ' : ' + row['Description'] + ' : ' + row['Amount'] + ' : ' + row['Balance'])
else:
cursor.executemany(insert_sql,[(row['accountnumber'],row['Date'],row['Description'],row['Amount'],row['Balance'])])
cnx.commit()
finally:
cnx.close()
def insertTrades(data, symbol, exchange):
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT COUNT(id) as tally FROM trades WHERE exchange=%s AND symbol=%s AND exchangeId=%s AND orderId=%s"
insert_sql = "INSERT INTO trades(exchange, symbol, exchangeId, orderId, price, qty, commission, commissionAsset, time, isBuyer, isMaker, isBestMatch) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
# Insert records
for row in data:
cursor.executemany(select_sql,[(exchange,symbol,row['id'],row['orderId'])])
check = cursor.fetchone()
if check['tally'] > 0:
print('Oops Duplicate trade' + ' : ' + exchange + ' : ' + str(row['id']) + ' : ' + str(row['orderId']) + ' : ' + symbol + ' : ' + str(row['price']))
else:
cursor.executemany(insert_sql,[(exchange,symbol,row['id'],row['orderId'],row['price'],row['qty'],row['commission'],row['commissionAsset'],row['time'],row['isBuyer'],row['isMaker'],row['isBestMatch'])])
cnx.commit()
finally:
cnx.close()
def getLastTradeId(exchange, symbol):
# a function to get the most recent exchangeId for the trade symbol and exchange
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT MAX(exchangeId) as exchangeId FROM trades WHERE exchange=%s AND symbol=%s"
# get the data
cursor.executemany(select_sql,[(exchange,symbol)])
lastTradeId = cursor.fetchone()
lastTradeId = lastTradeId['exchangeId']
#print(lastTradeId)
if lastTradeId:
result = lastTradeId
else:
result = 'no trades'
finally:
cnx.close()
return (result)
def getCategory(isIncome):
# a function to get the catagories
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT id, name from category where isIncome=%s"
# get the data
cursor.executemany(select_sql,[(isIncome)])
result = cursor.fetchall()
finally:
cnx.close()
return (result)
def getSubCategory(lnk):
# a function to get the catagories by category link
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT id, name from subCategory where category_lnk=%s"
# get the data
cursor.executemany(select_sql,[(lnk)])
result = cursor.fetchall()
finally:
cnx.close()
return (result)
def getSubCategoryById(lnk):
# a function to get the sub catagory by ID to return the name.
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT name from subCategory where id=%s"
# get the data
cursor.executemany(select_sql,[(lnk)])
result = cursor.fetchall()
finally:
cnx.close()
return (result)
def getAllSubCategories():
# a function to get all of the sub catagories.
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT id, name from subCategory"
# get the data
cursor.execute(select_sql)
result = cursor.fetchall()
finally:
cnx.close()
return (result)
def getAccounts():
# a function to get the accounts in the database
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT * from accounts"
# get the data
cursor.execute(select_sql)
result = cursor.fetchall()
finally:
cnx.close()
return (result)
def getTransactionDataByAccount(accountnumber, startDate, endDate):
# a function to get transactions based on account number
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT * from transaction where accountnumber=%s AND date BETWEEN %s AND %s"
# get the data
cursor.executemany(select_sql, [(accountnumber, startDate, endDate)])
result = cursor.fetchall()
finally:
cnx.close()
return (result)
def getTransactionData():
# a function to get all transactions
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT * from transaction"
# get the data
cursor.execute(select_sql)
result = cursor.fetchall()
finally:
cnx.close()
return (result)
def getBal(accountnumber, date):
# a function to get opening and closing balance based on account number and month
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT id, date, balance from transaction where accountnumber=%s AND date <= %s ORDER BY DATEDIFF( date, %s ) DESC LIMIT 1"
# get the data
cursor.executemany(select_sql, [(accountnumber, date, date)])
result = cursor.fetchall()
finally:
cnx.close()
return (result)
def getAccountNames(AccType):
# a function to get a list of accounts by type
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT id, accountname from accounts where accountType IN (%s)"
# get the data
print (AccType)
cursor.executemany(select_sql, [(AccType)])
result = cursor.fetchall()
finally:
cnx.close()
return (result)
def getAssets(date):
# a function to get the list of assets we care about for a given month.
# given the date, we will list the most recent value inserted for asset by name, and also filter out an asset sold before the given date.
cnx = pymysql.connect(**dbConfig)
try:
with cnx.cursor() as cursor:
select_sql = "SELECT name, value FROM assets ast1 WHERE ast1.insertDate = (SELECT max(ast2.insertDate) FROM assets ast2 WHERE ast2.name = ast1.name AND id IN ( SELECT id FROM assets WHERE insertDate < %s)) AND ast1.soldDate IS NULL"
# get the data
cursor.executemany(select_sql, [(date)])
result = cursor.fetchall()
finally:
cnx.close()
return (result)
|
MANAGED_SHOPS_NAMES = ('*****', '****')
TARGET_CUSTOMERS = 1620000
REFERENCE_SHOP_ID = 7559926
MIN_MARKET_SHARE = 0.005 # минимальная доля рынка
MAX_MARKET_SHARE = 0.4 # максимальная доля рынка
MAX_MARKET_SHARE_STOCK = 0.8 # максимальный запас относительно рынка
MAX_SALES_ADJUSTMENT = 0.1 # максимальных шаг изменения продаж
MAX_PRICE_ADJUSTMENT = 0.02 # максимальных шаг изменения цены
ELASTICITY = 40 # эластичность спроса
SALES_PRICE_FACTOR = 2 # множитель к распродажной цене для новых товаров
TARGET_STOCK_RATIO = 0.8 #
ECO_FACTORS = (
'Промышленный и бытовой мусор',
'Загрязнение автотранспортом',
'Промышленные стоки',
'Выбросы электростанций'
)
INDUSTRIAL_CITIES = ['Борисполь',] # managed differently from other cities
SUPPORTED_PARTIES = [
'Украинская партия',
'Партия Власти',
#'"Фронт национального освобождения имени Фарабундо Марти"',
]
unit_seasons = {
7429138: {5: 'Зерно',
6: 'Сахар',
7: 'Сахар',
8: 'Сахар',
9: 'Кукуруза',
10: 'Кукуруза',
11: 'Помидоры',
},
7549945: {8: 'Апельсины',
9: 'Апельсины',
10: 'Оливки',
11: 'Оливки',
},
}
EQUIPMENT_SUPPLIERS = {
8415404: 'office',
6715974: ('workshop', 'mill'),
3329984: ('farm', 'orchard'),
8395882: 'educational',
4974307: 'lab',
8197411: 'restaurant',
8535772: 'repair',
8206859: 'medicine',
}
MAX_TECHNOLOGIES = {
'animalfarm': 30,
'farm': 25,
'mill': 32,
'mine': 23,
'orchard': 25,
'sawmill': 32,
'workshop': 32,
'power': 22,
}
TENDER_ACTIVE_PLAYERS = [5526168, 6451449][:1] |
# coding: utf-8
def quick_sort(arr):
less = []
more = []
pivot_arr = []
if len(arr) <= 1:
return arr
else:
pivot = arr[0]
for i in arr:
if i < pivot:
less.append(i)
elif i > pivot:
more.append(i)
else:
pivot_arr.append(pivot)
less = quick_sort(less)
more = quick_sort(more)
return less + pivot_arr + more
if __name__=="__main__":
a = [4, 65, 2, -31, 0, 99, 83, 782, 1]
print quick_sort(a) |
# -*- coding: utf-8 -*-
# ////////////////////////////////////////////////////////////////失败的版本数组变长以后结果就不对
# class Solution(object):
# def __init__(self):
# pass
#
# def maxProfit(self, prices):
# p = 0
# q = len(prices) - 1
# flag = 0
# if not prices:
# return 0
# _min = prices[p]
# _max = prices[q]
# res=0
# while q > p:
# if flag == 0:
# if prices[p + 1]<_min:
# flag = 1
# else:
# if prices[p+1]>_max:
# res=max((prices[p+1]-prices[p]),res)
# _min = min(_min, prices[p+1])
# p += 1
# elif flag == 1:
# if prices[q - 1]>_max:
# flag = 0
# else:
# if prices[q-1]<_min:
# res=max((prices[q]-prices[q-1]),res)
# _max = max(_max, prices[q-1])
# q -= 1
# if _max - _min < 0:
# return 0
#
# return max((_max - _min),res)
#
# a = Solution()
# print(a.maxProfit([1,4,2]))
# print(a.maxProfit([3,2,6,5,0,3]))
# print(a.maxProfit([3,3,5,0,0,3,1,4]))
# print(a.maxProfit([1,2,4,2,5,7,2,4,9,0,9]))
# print(a.maxProfit([5,2,5,6,8,2,3,0,1,8,5,2,1]))
# ////////////////////////////////////////////////////////////////// 重新思考以后的版本
class Solution(object):
def __init__(self):
pass
def maxProfit(self, prices):
p = 0
q = 1
t = []
while q < len(prices):
if (prices[p] >= prices[q]):
p = q
q += 1
else:
t.append(prices[q] - prices[p])
q += 1
return max(t) if t else 0
a = Solution()
print(a.maxProfit([1, 4, 2]))
print(a.maxProfit([3, 2, 6, 5, 0, 3]))
print(a.maxProfit([3, 3, 5, 0, 0, 3, 1, 4]))
print(a.maxProfit([1, 2, 4, 2, 5, 7, 2, 4, 9, 0, 9]))
print(a.maxProfit([5, 2, 5, 6, 8, 2, 3, 0, 1, 8, 5, 2, 1]))
print(a.maxProfit([2,1]))
|
"""Tests for surface averaging etc."""
import numpy as np
import pytest
from desc.compute.utils import (
_get_grid_surface,
compress,
expand,
line_integrals,
surface_averages,
surface_integrals,
surface_integrals_transform,
surface_max,
surface_min,
surface_variance,
)
from desc.examples import get
from desc.grid import ConcentricGrid, LinearGrid, QuadratureGrid
def benchmark_surface_integrals(grid, q=np.array([1.0]), surface_label="rho"):
"""Compute a surface integral for each surface in the grid.
Notes
-----
It is assumed that the integration surface has area 4π^2 when the
surface label is rho and area 2π when the surface label is theta or
zeta. You may want to multiply q by the surface area Jacobian.
Parameters
----------
grid : Grid
Collocation grid containing the nodes to evaluate at.
q : ndarray
Quantity to integrate.
The first dimension of the array should have size ``grid.num_nodes``.
When ``q`` is 1-dimensional, the intention is to integrate,
over the domain parameterized by rho, theta, and zeta,
a scalar function over the previously mentioned domain.
When ``q`` is 2-dimensional, the intention is to integrate,
over the domain parameterized by rho, theta, and zeta,
a vector-valued function over the previously mentioned domain.
When ``q`` is 3-dimensional, the intention is to integrate,
over the domain parameterized by rho, theta, and zeta,
a matrix-valued function over the previously mentioned domain.
surface_label : str
The surface label of rho, theta, or zeta to compute the integration over.
Returns
-------
integrals : ndarray
Surface integral of the input over each surface in the grid.
"""
_, _, spacing, has_endpoint_dupe = _get_grid_surface(grid, surface_label)
weights = (spacing.prod(axis=1) * np.nan_to_num(q).T).T
surfaces = {}
nodes = grid.nodes[:, {"rho": 0, "theta": 1, "zeta": 2}[surface_label]]
# collect node indices for each surface_label surface
for grid_row_idx, surface_label_value in enumerate(nodes):
surfaces.setdefault(surface_label_value, []).append(grid_row_idx)
# integration over non-contiguous elements
integrals = []
for _, surface_idx in sorted(surfaces.items()):
integrals.append(weights[surface_idx].sum(axis=0))
if has_endpoint_dupe:
integrals[0] += integrals[-1]
integrals[-1] = integrals[0]
return np.asarray(integrals)
# arbitrary choice
L = 6
M = 6
N = 3
NFP = 5
class TestComputeUtils:
"""Tests for grid operations, surface averages etc."""
@pytest.mark.unit
def test_compress_expand_inverse_op(self):
"""Test that compress & expand are inverse operations for surface functions.
Each test should be done on different types of grids
(e.g. LinearGrid, ConcentricGrid) and grids with duplicate nodes
(e.g. endpoint=True).
"""
def test(surface_label, grid):
r = np.random.random_sample(
size={
"rho": grid.num_rho,
"theta": grid.num_theta,
"zeta": grid.num_zeta,
}[surface_label]
)
expanded = expand(grid, r, surface_label)
assert expanded.size == grid.num_nodes
s = compress(grid, expanded, surface_label)
np.testing.assert_allclose(r, s, err_msg=surface_label)
lg_endpoint = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=True, endpoint=True)
cg_sym = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=True)
test("rho", lg_endpoint)
test("theta", lg_endpoint)
test("zeta", lg_endpoint)
test("rho", cg_sym)
test("theta", cg_sym)
test("zeta", cg_sym)
@pytest.mark.unit
def test_surface_integrals(self):
"""Test surface_integrals against a more intuitive implementation.
This test should ensure that the algorithm in implementation is correct
on different types of grids (e.g. LinearGrid, ConcentricGrid). Each test
should also be done on grids with duplicate nodes (e.g. endpoint=True).
"""
def test_b_theta(surface_label, grid, eq):
q = eq.compute("B_theta", grid=grid)["B_theta"]
integrals = surface_integrals(grid, q, surface_label, expand_out=False)
assert (
integrals.size
== {
"rho": grid.num_rho,
"theta": grid.num_theta,
"zeta": grid.num_zeta,
}[surface_label]
)
desired = benchmark_surface_integrals(grid, q, surface_label)
np.testing.assert_allclose(
integrals, desired, atol=1e-16, err_msg=surface_label
)
eq = get("W7-X")
lg = LinearGrid(L=L, M=M, N=N, NFP=eq.NFP, endpoint=False)
lg_endpoint = LinearGrid(L=L, M=M, N=N, NFP=eq.NFP, endpoint=True)
cg_sym = ConcentricGrid(L=L, M=M, N=N, NFP=eq.NFP, sym=True)
for label in ("rho", "theta", "zeta"):
test_b_theta(label, lg, eq)
test_b_theta(label, lg_endpoint, eq)
if label != "theta":
# theta integrals are poorly defined on concentric grids
test_b_theta(label, cg_sym, eq)
@pytest.mark.unit
def test_surface_integrals_transform(self):
"""Test surface integral of a kernel function."""
def test(surface_label, grid):
ints = np.arange(grid.num_nodes)
# better to test when all elements have the same sign
q = np.abs(np.outer(np.cos(ints), np.sin(ints)))
# This q represents the kernel function
# K_{u_1} = |cos(x(u_1, u_2, u_3)) * sin(x(u_4, u_5, u_6))|
# The first dimension of q varies the domain u_1, u_2, and u_3
# and the second dimension varies the codomain u_4, u_5, u_6.
integrals = surface_integrals_transform(grid, surface_label)(q)
assert integrals.shape == (
{
"rho": grid.num_rho,
"theta": grid.num_theta,
"zeta": grid.num_zeta,
}[surface_label],
grid.num_nodes,
), surface_label
desired = benchmark_surface_integrals(grid, q, surface_label)
np.testing.assert_allclose(integrals, desired, err_msg=surface_label)
cg = ConcentricGrid(L=L, M=M, N=N, sym=True, NFP=NFP)
lg = LinearGrid(L=L, M=M, N=N, sym=True, NFP=NFP, endpoint=True)
test("rho", cg)
test("theta", lg)
test("zeta", cg)
@pytest.mark.unit
def test_surface_averages_vector_functions(self):
"""Test surface averages of vector-valued, function-valued integrands."""
def test(surface_label, grid):
g_size = grid.num_nodes # not a choice; required
f_size = g_size // 10 + (g_size < 10)
# arbitrary choice, but f_size != v_size != g_size is better to test
v_size = g_size // 20 + (g_size < 20)
g = np.cos(np.arange(g_size))
fv = np.sin(np.arange(f_size * v_size).reshape(f_size, v_size))
# better to test when all elements have the same sign
q = np.abs(np.einsum("g,fv->gfv", g, fv))
sqrt_g = np.arange(g_size).astype(float)
averages = surface_averages(grid, q, sqrt_g, surface_label)
assert averages.shape == q.shape == (g_size, f_size, v_size), surface_label
desired = (
benchmark_surface_integrals(grid, (sqrt_g * q.T).T, surface_label).T
/ benchmark_surface_integrals(grid, sqrt_g, surface_label)
).T
np.testing.assert_allclose(
compress(grid, averages, surface_label), desired, err_msg=surface_label
)
cg = ConcentricGrid(L=L, M=M, N=N, sym=True, NFP=NFP)
lg = LinearGrid(L=L, M=M, N=N, sym=True, NFP=NFP, endpoint=True)
test("rho", cg)
test("theta", lg)
test("zeta", cg)
@pytest.mark.unit
def test_surface_area(self):
"""Test that surface_integrals(ds) is 4pi^2 for rho, 2pi for theta, zeta.
This test should ensure that surfaces have the correct area on grids
constructed by specifying L, M, N and by specifying an array of nodes.
Each test should also be done on grids with duplicate nodes
(e.g. endpoint=True) and grids with symmetry.
"""
def test(surface_label, grid):
areas = surface_integrals(
grid, surface_label=surface_label, expand_out=False
)
correct_area = 4 * np.pi**2 if surface_label == "rho" else 2 * np.pi
np.testing.assert_allclose(areas, correct_area, err_msg=surface_label)
lg = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=False, endpoint=False)
lg_sym = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=True, endpoint=False)
lg_endpoint = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=False, endpoint=True)
lg_sym_endpoint = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=True, endpoint=True)
rho = np.linspace(1, 0, L)[::-1]
theta = np.linspace(0, 2 * np.pi, M, endpoint=False)
theta_endpoint = np.linspace(0, 2 * np.pi, M, endpoint=True)
zeta = np.linspace(0, 2 * np.pi / NFP, N, endpoint=False)
zeta_endpoint = np.linspace(0, 2 * np.pi / NFP, N, endpoint=True)
lg_2 = LinearGrid(
rho=rho, theta=theta, zeta=zeta, NFP=NFP, sym=False, endpoint=False
)
lg_2_sym = LinearGrid(
rho=rho, theta=theta, zeta=zeta, NFP=NFP, sym=True, endpoint=False
)
lg_2_endpoint = LinearGrid(
rho=rho,
theta=theta_endpoint,
zeta=zeta_endpoint,
NFP=NFP,
sym=False,
endpoint=True,
)
lg_2_sym_endpoint = LinearGrid(
rho=rho,
theta=theta_endpoint,
zeta=zeta_endpoint,
NFP=NFP,
sym=True,
endpoint=True,
)
cg = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=False)
cg_sym = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=True)
for label in ("rho", "theta", "zeta"):
test(label, lg)
test(label, lg_sym)
test(label, lg_endpoint)
test(label, lg_sym_endpoint)
test(label, lg_2)
test(label, lg_2_sym)
test(label, lg_2_endpoint)
test(label, lg_2_sym_endpoint)
if label != "theta":
# theta integrals are poorly defined on concentric grids
test(label, cg)
test(label, cg_sym)
@pytest.mark.unit
def test_line_length(self):
"""Test that line_integrals(dl) is 1 for rho, 2pi for theta, zeta.
This test should ensure that lines have the correct length on grids
constructed by specifying L, M, N and by specifying an array of nodes.
"""
def test(grid):
if not isinstance(grid, ConcentricGrid):
for theta_val in grid.nodes[grid.unique_theta_idx, 1]:
result = line_integrals(
grid,
line_label="rho",
fix_surface=("theta", theta_val),
expand_out=False,
)
np.testing.assert_allclose(result, 1)
for rho_val in grid.nodes[grid.unique_rho_idx, 0]:
result = line_integrals(
grid,
line_label="zeta",
fix_surface=("rho", rho_val),
expand_out=False,
)
np.testing.assert_allclose(result, 2 * np.pi)
for zeta_val in grid.nodes[grid.unique_zeta_idx, 2]:
result = line_integrals(
grid,
line_label="theta",
fix_surface=("zeta", zeta_val),
expand_out=False,
)
np.testing.assert_allclose(result, 2 * np.pi)
lg = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=False)
lg_sym = LinearGrid(L=L, M=M, N=N, NFP=NFP, sym=True)
rho = np.linspace(1, 0, L)[::-1]
theta = np.linspace(0, 2 * np.pi, M, endpoint=False)
zeta = np.linspace(0, 2 * np.pi / NFP, N, endpoint=False)
lg_2 = LinearGrid(rho=rho, theta=theta, zeta=zeta, NFP=NFP, sym=False)
lg_2_sym = LinearGrid(rho=rho, theta=theta, zeta=zeta, NFP=NFP, sym=True)
cg = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=False)
cg_sym = ConcentricGrid(L=L, M=M, N=N, NFP=NFP, sym=True)
test(lg)
test(lg_sym)
test(lg_2)
test(lg_2_sym)
test(cg)
test(cg_sym)
@pytest.mark.unit
def test_surface_averages_identity_op(self):
"""Test flux surface averages of surface functions are identity operations."""
eq = get("W7-X")
grid = ConcentricGrid(L=L, M=M, N=N, NFP=eq.NFP, sym=eq.sym)
data = eq.compute(["p", "sqrt(g)"], grid=grid)
pressure_average = surface_averages(grid, data["p"], data["sqrt(g)"])
np.testing.assert_allclose(data["p"], pressure_average)
@pytest.mark.unit
def test_surface_averages_homomorphism(self):
"""Test flux surface averages of surface functions are additive homomorphisms.
Meaning average(a + b) = average(a) + average(b).
"""
eq = get("W7-X")
grid = ConcentricGrid(L=L, M=M, N=N, NFP=eq.NFP, sym=eq.sym)
data = eq.compute(["|B|", "|B|_t", "sqrt(g)"], grid=grid)
a = surface_averages(grid, data["|B|"], data["sqrt(g)"])
b = surface_averages(grid, data["|B|_t"], data["sqrt(g)"])
a_plus_b = surface_averages(grid, data["|B|"] + data["|B|_t"], data["sqrt(g)"])
np.testing.assert_allclose(a_plus_b, a + b)
@pytest.mark.unit
def test_surface_integrals_against_shortcut(self):
"""Test integration against less general methods."""
grid = ConcentricGrid(L=L, M=M, N=N, NFP=NFP)
ds = grid.spacing[:, :2].prod(axis=-1)
# something arbitrary that will give different sum across surfaces
q = np.arange(grid.num_nodes) ** 2
# The predefined grids sort nodes in zeta surface chunks.
# To compute a quantity local to a surface, we can reshape it into zeta
# surface chunks and compute across the chunks.
result = grid.expand(
(ds * q).reshape((grid.num_zeta, -1)).sum(axis=-1),
surface_label="zeta",
)
np.testing.assert_allclose(
surface_integrals(grid, q, surface_label="zeta"),
desired=result,
)
@pytest.mark.unit
def test_surface_averages_against_shortcut(self):
"""Test averaging against less general methods."""
# test on zeta surfaces
grid = LinearGrid(L=L, M=M, N=N, NFP=NFP)
# something arbitrary that will give different average across surfaces
q = np.arange(grid.num_nodes) ** 2
# The predefined grids sort nodes in zeta surface chunks.
# To compute a quantity local to a surface, we can reshape it into zeta
# surface chunks and compute across the chunks.
mean = grid.expand(
q.reshape((grid.num_zeta, -1)).mean(axis=-1),
surface_label="zeta",
)
# number of nodes per surface
n = grid.num_rho * grid.num_theta
np.testing.assert_allclose(np.bincount(grid.inverse_zeta_idx), desired=n)
ds = grid.spacing[:, :2].prod(axis=-1)
np.testing.assert_allclose(
surface_integrals(grid, q / ds, surface_label="zeta") / n,
desired=mean,
)
np.testing.assert_allclose(
surface_averages(grid, q, surface_label="zeta"),
desired=mean,
)
# test on grids with a single rho surface
eq = get("W7-X")
rho = np.array((1 - 1e-4) * np.random.default_rng().random() + 1e-4)
grid = LinearGrid(rho=rho, M=eq.M_grid, N=eq.N_grid, NFP=eq.NFP, sym=eq.sym)
data = eq.compute(["|B|", "sqrt(g)"], grid=grid)
np.testing.assert_allclose(
surface_averages(grid, data["|B|"], data["sqrt(g)"]),
np.mean(data["sqrt(g)"] * data["|B|"]) / np.mean(data["sqrt(g)"]),
err_msg="average with sqrt(g) fail",
)
np.testing.assert_allclose(
surface_averages(grid, data["|B|"]),
np.mean(data["|B|"]),
err_msg="average without sqrt(g) fail",
)
@pytest.mark.unit
def test_surface_variance(self):
"""Test correctness of variance against less general methods."""
grid = LinearGrid(L=L, M=M, N=N, NFP=NFP)
# something arbitrary that will give different variance across surfaces
q = np.arange(grid.num_nodes) ** 2
# The predefined grids sort nodes in zeta surface chunks.
# To compute a quantity local to a surface, we can reshape it into zeta
# surface chunks and compute across the chunks.
chunks = q.reshape((grid.num_zeta, -1))
# Test weighted sample variance with different weights.
# positive weights to prevent cancellations that may hide implementation error
weights = np.cos(q) * np.sin(q) + 5
biased = surface_variance(
grid, q, weights, bias=True, surface_label="zeta", expand_out=False
)
unbiased = surface_variance(
grid, q, weights, surface_label="zeta", expand_out=False
)
# The ds weights are built into the surface variance function.
# So weights for np.cov should be ds * weights. Since ds is constant on
# LinearGrid, we need to get the same result if we don't multiply by ds.
weights = weights.reshape((grid.num_zeta, -1))
for i in range(grid.num_zeta):
np.testing.assert_allclose(
biased[i],
desired=np.cov(chunks[i], bias=True, aweights=weights[i]),
)
np.testing.assert_allclose(
unbiased[i],
desired=np.cov(chunks[i], aweights=weights[i]),
)
# Test weighted sample variance converges to unweighted sample variance
# when all weights are equal.
chunks = grid.expand(chunks, surface_label="zeta")
np.testing.assert_allclose(
surface_variance(grid, q, np.e, bias=True, surface_label="zeta"),
desired=chunks.var(axis=-1),
)
np.testing.assert_allclose(
surface_variance(grid, q, np.e, surface_label="zeta"),
desired=chunks.var(axis=-1, ddof=1),
)
@pytest.mark.unit
def test_min_max(self):
"""Test the surface_min and surface_max functions."""
for grid_type in [LinearGrid, QuadratureGrid, ConcentricGrid]:
grid = grid_type(L=3, M=4, N=5, NFP=3)
rho = grid.nodes[:, 0]
theta = grid.nodes[:, 1]
zeta = grid.nodes[:, 2]
# Make up an arbitrary function of the coordinates:
B = (
1.7
+ 0.4 * rho * np.cos(theta)
+ 0.8 * rho * rho * np.cos(2 * theta - 3 * zeta)
)
Bmax_alt = np.zeros(grid.num_rho)
Bmin_alt = np.zeros(grid.num_rho)
for j in range(grid.num_rho):
Bmax_alt[j] = np.max(B[grid.inverse_rho_idx == j])
Bmin_alt[j] = np.min(B[grid.inverse_rho_idx == j])
np.testing.assert_allclose(Bmax_alt, compress(grid, surface_max(grid, B)))
np.testing.assert_allclose(Bmin_alt, compress(grid, surface_min(grid, B)))
|
from uwupy.generics import CustomUwU
import json
class HttpUwU(Exception):
def uwuHttpError(self, status_code):
exception = "n_" + str(status_code)
if status_code is None:
raise CustomUwU("Status_Code param", "Ooops!", "Needs a big strong error_code")
try:
method = getattr(self, exception)
except AttributeError:
raise CustomUwU("Status_Code param", "o̲wo̲", "I missed that one... Sorry Senpai")
return method()
# : Info Responses
def n_100(self):
return(json.dumps({100: "UwU Keep going.."}))
# : n_2XX Success
def n_200(self):
return(json.dumps({200: "(U ᵕ U❁) Reqwest was a Suckess"}))
def n_201(self):
return(json.dumps({201: "ᕦ( ˘ᴗ˘ )ᕤ Reswource Cweated just for you OwO"}))
def n_202(self):
return(json.dumps({202: "(⁄˘⁄⁄ω⁄⁄˘⁄)♡ Your reqwest was accepted Senpai"}))
def n_204(self):
return(json.dumps({204: "(°﹏°) You saw nothing!"}))
def n_226(self):
return(json.dumps({226: "OuO I am curwently being used Senpai"}))
# :def n_3XX redirection
def n_301(self):
return(json.dumps({301: "owO oh.. sowwy.. The code monkeys have moved me permanently"}))
def n_302(self):
return(json.dumps({302: "uw- Oopsie! You've fwound me!"}))
def n_304(self):
return(json.dumps({304: "UwU Don't worwy I weft this one alone for you."}))
# :def n_4XX Client Errors
def n_400(self):
return(json.dumps({400: "ღ(O꒳Oღ) RawR! Fucky Wucky Request."}))
def n_401(self):
return(json.dumps({401: "Oooh. You're not suppose to be here... Its not what it looks like uwu"}))
def n_403(self):
return(json.dumps({402: "(°꒳°) GWET OUT! The door was supposed to be locked!"}))
def n_404(self):
return(json.dumps({403: "UwU Emptwy Fiwwelds. Nothingness"}))
def n_405(self):
return(json.dumps({405: "𝒪𝓌𝒪 -Senpai! Are you sure this is the right way?"}))
def n_409(self):
return(json.dumps({406: "WONG HOLE -W-"}))
def n_418(self):
return ("I am a teapot... (*´∀`)_旦",)
def n_429(self):
return(json.dumps({429: "End of the line OwO"}))
# :def n_5XX Server Errors
def n_500(self):
return(json.dumps({500: "UwU My insides are messy"}))
def n_501(self):
return(json.dumps({501: "Not Compweted"}))
def n_502(self):
return(json.dumps({502: "Sowwy we couldn't find what your were looking for"}))
def n_503(self):
return(json.dumps({503: "Sowwy our code monkeys are working vewy hard to fix this"}))
|
"""
Project Euler
Problem 39: Integer right triangles
Answer: 840
"""
import math
def integerRightTrianglesWithPerimeterLessThan(maxPerimeter):
"""
Returns a map of perimeter value to integer right triangles with that
perimeter.
"""
perimeterToRightTriangleSidesMap = {}
for a in xrange(1, maxPerimeter - 2):
b = a
c = math.sqrt(a**2 + b**2)
int_c = int(c)
perimeter = a + b + int_c
while perimeter < maxPerimeter:
if a**2 + b**2 == int_c**2:
if perimeter in perimeterToRightTriangleSidesMap:
perimeterToRightTriangleSidesMap[perimeter] += 1
else:
perimeterToRightTriangleSidesMap[perimeter] = 1
b += 1
c = math.sqrt(a**2 + b**2)
int_c = int(c)
perimeter = a + b + int_c
return perimeterToRightTriangleSidesMap
def getPerimeterWithMaxIntegerRightTriangles(upperBound):
"""
Returns the perimeter < upperBound with the most integer right triangles
whose sides add up to it.
"""
perimeterToRightTriangleSidesMap = integerRightTrianglesWithPerimeterLessThan(upperBound)
return max(perimeterToRightTriangleSidesMap.iterkeys(), key=(lambda key: perimeterToRightTriangleSidesMap[key]))
print getPerimeterWithMaxIntegerRightTriangles(1000)
|
import datetime
import glob
import os
import logging
from python_lib import parse
from core_jukebox import os_common, templates
from core_jukebox import jukebox
logger = logging.getLogger(__name__)
class Tape(object):
@classmethod
def from_filepath(cls, filepath):
if not os.path.exists(filepath):
logging.warning("No Tape found in: {} ".format(filepath))
return
asset_parse = parse.search(templates.ASSET_WORKAREA, filepath)
if asset_parse:
return AssetTape(asset_parse.named.get("asset"),
asset_type=asset_parse.named.get("asset_type", None),
task=asset_parse.named.get("task", None),
workfile=os.path.splitext(os.path.basename(filepath))[0])
shot_parse = parse.search(templates.SHOT_WORKAREA, filepath)
if shot_parse:
return ShotTape(shot_parse.named.get("instance"), task=shot_parse.named.get("task", None))
def __init__(self, name):
self.name = name
class AssetTape(Tape):
@classmethod
def from_filepath(cls, filepath):
parsed = parse.search(templates.ASSET_WORKAREA, filepath)
if not parsed:
logger.warning(
"Invalid filepath: {} Expected: {}".format(filepath, templates.ASSET)
)
else:
return cls(
parsed.named.get("asset"),
asset_type=parsed.named.get("asset_type"),
task=parsed.named.get("task"),
project_root=jukebox.project.find_project_from_path(os.path.dirname(filepath)),
workfile=os.path.splitext(os.path.basename(filepath))[0]
)
def __init__(
self,
name,
asset_type=None,
task=None,
dcc_root=templates.MAYA_PROJECT_ROOT,
project_root=None,
workfile=None
):
super(AssetTape, self).__init__(name)
self.is_shot = False
self.asset_type = asset_type
self.dcc_root = dcc_root
self.task = task
self.root = templates.ASSET_WORKAREA.format(
DCC_ROOT=self.dcc_root,
asset_type=self.asset_type,
asset=self.name,
task=self.task,
)
self.project_root = project_root or jukebox.project.get_project_root()
self.project_root = r"C:/Users/their/Documents/AJ_test"
self.absolute_path = os.path.join(self.project_root, self.root) if self.project_root and self.root else None
self.workfile = workfile
def get_workfile_archive_path(self):
timestamp=datetime.datetime.utcnow().strftime('%Y%m%d_%H%M')
path = templates.ASSET_WORKFILE_ARCHIVE.format(DCC_ROOT=self.dcc_root,
asset_type=self.asset_type,
asset=self.name,
task=self.task,
name=self.workfile,
representation="ma",
timestamp=timestamp)
return os.path.join(self.project_root, path)
class ShotTape(Tape):
@classmethod
def from_filepath(cls, filepath):
parsed = parse.search(templates.SHOT_WORKAREA, filepath)
if not parsed:
logger.warning(
"Invalid filepath: {} Expected: {}".format(filepath, templates.SHOT)
)
else:
return cls(
parsed.named.get("shot"),
task=parsed.named.get("task"),
project_root=jukebox.project.find_project_from_path(os.path.dirname(filepath)),
workfile=os.path.splitext(os.path.basename(filepath))[0]
)
def __init__(
self, name, task=None, dcc_root=templates.MAYA_PROJECT_ROOT, project_root=None, workfile=None
):
super(ShotTape, self).__init__(name)
self.is_shot = True
self.name = name
self.task = task
self.dcc_root = dcc_root
self.root = templates.SHOT.format(
DCC_ROOT=self.dcc_root, shot=self.name, task=self.task
)
self.project_root = project_root or jukebox.project.get_project_root()
self.absolute_path = os.path.join(self.project_root, self.root)
self.workfile=workfile
def get_workfile_archive_path(self):
timestamp=datetime.date.today().strftime('%Y%m%d')
path = templates.SHOT_WORKFILE_ARCHIVE.format(DCC_ROOT=self.dcc_root,
shot=self.name,
task=self.task,
name=self.workfile,
representation="ma",
timestamp=timestamp)
return os.path.join(self.project_root, path)
def get_outputs(self, datatype=None):
# TODO: Use the template
path = os.path.join(self.root, "outputs/")
if datatype:
path = os.path.join(datatype)
return [
jukebox.song.Song(output)
for sub_folder in os.listdir(path)
for output in os.listdir(sub_folder)
]
class SequenceTape(Tape):
@classmethod
def from_filepath(cls, filepath):
parsed = parse.search(templates.SHOT, filepath)
if not parsed:
logger.warning(
"Invalid filepath: {} Expected: {}".format(filepath, templates.SHOT)
)
else:
return cls(
parsed.named.get("shot"),
task=parsed.named.get("task"),
project_root=jukebox.project.find_project_from_path(os.path.dirname(filepath)),
)
def __init__(
self, name, task=None, dcc_root=templates.MAYA_PROJECT_ROOT, project_root=None, filepath=None
):
super(ShotTape, self).__init__(name)
self.name = name
self.task = task
self.filepath = filepath
def get_shots():
shots = subfolders = [ f.path for f in os.scandir(self.filepath) if f.is_dir() and f.name.startswith("SHOT_")]
return [ShotTape.from_filepath(shot) for shot in shots]
def retrieve_tapes_with_types(project_root, dcc_root=templates.MAYA_PROJECT_ROOT):
"""
Returns dict of type: Tape object
"""
assets_path = os.path.join(project_root,
templates.ASSETS_ROOT.format(dcc_root)
)
assets = {}
for asset_dir_name in os.listdir(assets_path):
asset_dir = os.path.join(assets_path, asset_dir_name)
for asset_filename in os.listdir(asset_dir):
filepath = os.path.join(asset_dir, asset_filename)
if os.path.isfile(filepath):
if asset_dir_name not in assets:
assets[asset_dir_name] = tape.Tape.from_filepath(filepath)
else:
assets[asset_dir_name].append(tape.Tape.from_filepath(filepath))
return assets |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
print('I','love', 'China')
print(100+200)
print('100+200=', 100+200)
print(3>2)
print(True)
print(False)
print('中文字符串')
print(ord('A'))
print(ord('a'))
print(ord('中'))
print(chr(97))
print(b'ABC'.decode('ascii'))
print(b'\xe4\xb8\xad\xe6\x96\x87'.decode('utf-8'))
#print(b'\xe4\xb8\xad\xff'.decode('utf-8'))
print(b'\xe4\xb8\xad\xff'.decode('utf-8', errors='ignore'))
print(r'\\\t\\')
print('''l1
l2
l3''')
#len()函数计算的是str的字符数
print(len('中文'))
print(len('ac'))
print(len(b'\xe4\xb8\xad\xe6\x96\x87'))
print(len('中文'.encode('utf-8')))
print("i 'm \"ok\" ! \n thank you !")
print('hello , %s' % 'world')
print('hi, %s, you have $%d' %('mike', 100000))
print('age : %s. gender: %s' % (25, True))
print('growth rate:%d %%' % 7)
print('hello ,{0}, 成绩提升了 {1:.1f}%'.format('晓明', 17.125))
print('hello, {0}, 成绩提升了 {1:.1f}%'.format('小明', (85-72)/72*100))
a = 100
if a>=0 :
print(a)
else :
print(-a)
name = input('enter your name:')
print('hello', name)
|
import sys
sys.path.append("..")
from plugins import *
def analyze_functions(path):
# initilize defination list
defination_list = list()
meta_list = list()
# read the function file
lines = open(path, "r")
# append all the defination lines
for line in lines:
if "def" in line:
defination_list.append(line)
for each_def in defination_list:
idx_1 = each_def.index('f') # index of f
idx_2 = each_def.index('(') # index of (
idx_3 = each_def.index(')') # index of )
idx_4 = each_def.index('>') # index of >
function_dict = dict()
function_dict["function_name"] = each_def[idx_1 + 1:idx_2].strip()
function_dict["return_type"] = each_def[idx_4 + 1: -2].strip()
arguments = list()
for each_arg in each_def[idx_2 + 1:idx_3].split(","):
arguments.append(tuple(each_arg.strip().split(":")))
function_dict["arguments"] = dict(arguments)
meta_list.append(function_dict)
return meta_list
def function_builder(data_list, path):
string_build = str("import sys\nsys.path.append('../')\n\n" + "import pytest\nfrom src." + path.split("/")[-1].replace(".py", "") + " import *\n\n\n")
for each in data_list:
string_build += (
'# testcase for ' + each['function_name'] + "\n"
'def test_' + each['function_name'] + "():\n\n\t" +
'# replace with value\n\t' +
'\n\t'.join([arg[0] + ' = ' + arg[1]+"()" for arg in each['arguments'].items()]) + "\n\n\t" +
'# assert happy path' + "\n\t" +
"assert " + each['function_name'] + "("+ ', '.join(each['arguments'].keys()) +") == " + each['return_type'] + "()\n\t" +
"assert type(" + each['function_name'] + "(" + ', '.join(each['arguments'].keys()) + ")) == " + each['return_type'] + "\n\n\t" +
'# assert raise (Change exception as per requirement)' + "\n\t" +
'with pytest.raises(Error):\n\t\t' +
each['function_name'] + "(" + ', '.join('None'.split(" ") * len(each['arguments'].keys())) + ")\n\n\n"
)
return string_build
def process_analyze(path):
analyze_data = analyze_functions(path)
return function_builder(analyze_data, path)
|
from flask import render_template, request, Blueprint
from flaskbook.modelss import Post
main = Blueprint('main', __name__)
@main.route("/")
@main.route("/home")
def home():
stranica = request.args.get('pageee', 1, type=int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=stranica, per_page=2)
return render_template('home.html', posts=posts) |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
@no_csrf
@login_not_required
def index():
pass |
def add_to_each_line():
newf=""
with open('esdata.json','r') as f:
for line in f:
newf+=line.strip()+",\n"
f.close()
with open('esdata.json','w') as f:
f.write(newf)
f.close() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Tony <stayblank@gmail.com>
# Time: 2019/5/30 12:31
import time
import unittest
import logging
from onion_decorator.qps import qps
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def dummy():
pass
class TestQps(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@staticmethod
def gen_f(n):
return qps(n)(dummy)
def test_qps(self):
for max_qps in xrange(0, 50000, 1000):
if max_qps == 0:
max_qps = 1
seconds = 10
count = 0
f = self.gen_f(max_qps)
start = time.time()
while time.time() - start <= seconds:
f()
count += 1
real_qps = count / seconds
logger.info("%s,%s", max_qps, real_qps)
self.assertTrue(real_qps <= max_qps)
|
from django.contrib.auth.models import Group
from rest_framework import serializers
from users.models import User, Address
class AddressSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Address
fields = ('country', 'city', 'address1', 'address2', 'address3')
class UserSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='api:user-detail')
primary_address = AddressSerializer()
class Meta:
model = User
fields = ('id', 'url', 'first_name', 'last_name', 'email', 'birth_year', 'primary_address')
class GroupSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='api:group-detail')
permissions = serializers.SlugRelatedField(slug_field='codename', many=True)
class Meta:
model = Group
fields = ('url', 'name', 'permissions')
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
print("식수예측 데이콘용 함수와 변수들을 저장한 파일 _ 210717 수정본")
# 밥류 필터용
# a1: 쌀밥류, b1:덮밥국밥류, c1:비빔밥볶음밥류 ,d1: 김초밥류, e1: 국수류
a1 = ["(쌀밥류)", "영양밥", "흑미밥", "오곡밥", "수수밥", "귀리밥", "팥밥", "콩밥", "기장밥", "치자밥", "찹쌀밥"]
b1 = ["(덮밥국밥류)", "카레라이스", "짜장밥", "오므라이스", "잡채밥", "시금치무쌉", "카레소스", "수제비", "커리",
"카레돈까스"]
c1 = ["(비빔밥볶음밥류)", "빠에야", "필라프", "곤드레밥", "콩나물밥", "보리밥", "볶음밥/", "비빔밥/"]
d1 = ["(김초밥류)", "주먹밥", "쌈밥"]
e1 =["냉면", "국수", "짜장면", "채소라면", "스파게티", "파스타", "나가사키면", "알리오올리오",
"우동", "소바", "비빔면", "쫄면", "메밀면", "고추짜장", "컵라면"]
f1 = ["(죽류)" , "해물누룽지"]
exceptions_rice = []
rice_filter = [a1, b1, c1, d1, e1, f1, exceptions_rice]
# 국물류 필터용
a2 = ["(국탕류)", "나베", "백숙", '샤브샤브', "대구지리"]
b2 = ["(찌개류)"]
c2 = ["(스프류)"]
exceptions_soup = []
soup_filter = [a2, b2, c2, exceptions_soup]
# 반찬 및 기타류 필터용
a5 = ["(김치류)", "오이소배기", '오이소박이']
b5 = ["(샐러드류)", "쌈", "스틱", "양배추", "야채", "풋고추", "상추", "채소", "브로컬리", "브로콜리", "영양부추",
"오이&당근", '오이선', "알배기", "배추깻잎", "콘슬로우", "다시마", "콘치즈", "단호박범벅", "물미역", "미역줄기",
"단호박", "카프레제", "호박잎", "탕평채", "봄동숙", "파래김", "연근", "새송이", "호박숙", "범벅", "병아리콩",
"곰피초장", "삼색콜리", '재래김', "매쉬드포테이토"]
c5 = ["(튀김류)", "꿔바로우", "깐풍", "카츠", "요거닭", "유린기", "커틀릿", "교촌", "고로케", "멘보샤", "깐쇼새우",
"웨지감자", "춘권", "후라이드", "크림새우", "칠리새우", "베이비크랩", "맛탕", '해쉬포테이토', "회오리감자",
'오지치즈후라이']
d5 = ['(전류)', "동그랑땡", "부침", "산적", "오꼬노미야끼", '빈대떡', "수수부꾸미", '계란후라이']
e5 = ["(찜류)", "수육", "메쉬드", "찐", "보쌈", "숙회", "오징어", "숙쌈", "오향장육", "프리타타"]
f5 = ["(볶음류)", "두루치기", "주물럭", "마파두부", "유산슬", "류산슬", "치즈불닭", "호박꼬지", '궁보계정', '양장피',
'마늘쫑건새우', "블랙페퍼쉬림프", '김자반']
g5 = ["(조림류)", "국물쪼리닭", "스위트칠리미트볼", "지짐", "동파육", "콩자반", '간장계란장']
h5 = ["(구이류)", "스테이크", "너비아니", "대패", "떡갈비", "함박", "수원왕갈비", "폭립", "훈제오리",
"숯불양념꼬치어묵", "숯불양념꼬지어묵", "모듬소세지", "그라탱", "그라탕", "스크램블", '군고구마', "소떡소떡",
"어떡햄", '양념김', '허니버터옥수수']
i5 = ["(무침류)", "파채", "오복지", "무말랭이", "냉채", "겨자채", "묵"]
j5 = ["(장아찌류)", "깻잎지", "깻잎양념지", "고추지", "간장지", "절이", '오이지']
exceptions_vege = []
vege_filter = [a5, b5, c5, d5, e5, f5, g5, h5, i5, j5, exceptions_vege]
# 디저트류 필터용
a3 = ["(음료)", "코코뱅", "피크닉", "쥬시쿨"]
b3 = ["(유제품)", "프로바이오틱", "짜요짜요", "파르페"]
c3 = ["(빵과자류)", "나쵸", "퀘사디아", "바게트", '바게뜨', "티라미수", "도너츠", "퀘사디야", "버거", "츄러스",
"추러스", "와플", "도넛", "아이스슈", "수제과일잼샌드", "타꼬야끼", '타코야끼', "슈크림", "라면땅", "시리얼"
,'탱크보이', "푸딩"]
d3 = ['(과일류)', "사과", "바나나", "과일", "조각사과", "오렌지", "아오리사과", "귤", "천도복숭아", "참외", "포도",
"배", "열대과일", "단감", "수박", "홍시","토마토" ,"수박화채", "수떡수떡화채", "방울토마토",
"청포도", '모둠과일', "토마토설탕절인"]
exceptions_dessert = ["꿀호떡", '떡밤초', '떡꼬지', '어묵고추장떡',
"찹쌀호떡", "송편", "인절미"]
dessert_filter = [a3, b3, c3, d3, exceptions_dessert]
# 재료 필터용
# 김, 무 는 음식의 이름 속에 포함될 수 있다. ex) 김치 , 나물무침 따라서 해당 한글자 재료들은 세부적으로 메뉴를 넣어줘야한다.
# 메뉴의 모든 원재료가 아니라, 직관적으로 떠오르는 메인 메뉴를 기준으로 분류한다.
# 버섯은 균류에 들어가야 하지만 채소류에 분류해줬다.
# 양파는 뿌리류에 들어가야하지만 채소류에 분류해줬다.
# 메뉴 중에 함박, 스테이크, 함박스테이크 3개가 있다. count를 할 때, 중복으로 되지만 해당 음식들에 가중치가 절대값 1정도는 무관하다고 판단.
견과및두류 = list(set([ '청국장', '흑임자', '부럼','캐슈넛','두부','콩','팥','녹두','들깨','땅콩','두유','유부','아몬드','잣','호두','모밀','견과','밤']))
묵류 = list(set(['탕평채','우무묵','도토리묵','청포묵','모듬묵','곤약','올방개묵','묵볶음','삼색묵','모둠묵','이색묵']))
어패류 = list(set(['류산슬' , '유산슬' ,'알탕', '쭈꾸미', '북엇국' , '우렁' ,'타코야끼', '올갱이' , '조기' , '임연수','쥐어','꼬막','방어','골뱅이','해물파전','해물동그랑땡전','재첩','다슬기','쥐포','조기','홍어','전복','삼치','노가리','조갯살','북어','양장피','미더덕','삼치','홍합','꽁치','맛살','봉골레','골뱅이','문어','쭈삼','날치','굴비','연어','명란','쥐포','대구','굴','조개','북어','오징어','참치','바지락','진미채','삼치','참치','아귀','홍합','코다리','장어','꽁치','동태','갈치','미더덕','해파리','주꾸미','생선','명태','자반고등어','황태','꽁치','가자미','명엽','바지락','어묵','낙지','날치','멸치', "어떡햄", "고갈비"]))
육류 = list(set(['육개장','깐풍기' ,'깐풍육' , '소세지' , '제육', '동파육','류산','유산','소떡소떡','수제맛쵸킹','함박','스테이크','윙','너겟','초계','등심찹쌀','유린기','사골','우육','설렁탕','갈비탕' ,'퀘사디','히레카츠','돼지','차돌','선지','순대','동그랑땡','함박','곰탕','산적','등뼈','햄','쇠고기','돈육','미트','오향장육','삼겹살','쇠불고기','수육','삼계','베이컨','너비아니','보쌈','탕수육','치킨','부대찌개','닭','등갈비','떡갈비', '소갈비',"갈비통통만두", "갈비만두", "LA갈비", "수원왕갈비", "돈갈비" ,'사태','함박','목살','바베큐폭립',"바베큐장각오븐구이" ,'오리훈제','스팸','백숙','소시지','삼겹살','비엔나','돈가스','돈까스','족발','대패']))
채소류 = list(set(['단호박','애호박','호박죽','짜사', '쨔샤','짜샤','돈나물','육개장' , '아욱' , '마늘종', '마늘쫑', '브로컬리', '우엉', '하루나', '고구마줄기','무청' ,'바질','곰취' ,'홍초', '치자','락교','부추','유채겉절이','콩가루배춧국','유채나물','청양','양송이','초나물','비빔밥','쪽파','알배기','해물파전','반달나물','양상추','근대나물','쑥','섬초','오복지','머위','탕평채','비름나물','취나물','보름나물','대파','초나물','퀘사디','꽈리초조','두릅','죽순','방풍','냉이','시래기','노각','느타리','케일','피망','느타리팽이','달래','파채','머위','깻잎','양장피','두릅','우거지','쑥갓','양파','호박잎','취나물','부추','숙주','쌈추','고춧잎','채소탕수','고사리','산채','꼬들배기','생파','쪽파','양송이','단무지','매실','얼갈이','치커리','참나물','피클','돌나물','파프리카','취나물','세발','쌈','미나리','새싹','땡초','쑥갓','콜리','봄동','채소','가지','상추','샐러드','양배추','가지','허브','버섯','청경채','냉이','달래','오이','콩나물','부추','깻잎','고추','배추','브로콜리','봄동','시금치', '명이']))
뿌리류 = list(set(['콘' '비트','옹심이','치킨무','말랭이무침','무생채','열무','구구마','무쌈','쌈무','열무','무나물','무말랭이','홍삼','무김치','당근','옥수수','더덕','토란','고구마','마늘','우엉','감자','도라지','갈릭','연근','포테이토','생강']))
해조류 = list(set(['톳','매생이','해초','김주먹밥','다시마','파래','미역','충무김','김구이','꼬시래기','김자반','김가루','곰피']))
면류 = list(set(['옹심이', '펜네' ,'쫄면', '소면','라면','우동','당면','국수','냉면','마제소바','마카로니','짬뽕','수제비','김말이', "잡채"]))
갑각류 = list(set(['멘보샤 ','새우','꽃게','크랩','크래미','게살', '쉬림프']))
계란류 = list(set(['오므라이스','메추리알','에그','계란','달걀','스크램']))
떡류 = list(set(['빈대떡','떡국','새알','인절미','증편','어떡햄','절편','떡볶이','떡잡채','조랭이','소떡소떡']))
유제품 = list(set([ '프로바이오틱' , '요플레' ,'그라탕' , '그라탱','플레인','요거트','요구르트','치즈','버터','우유','두유']))
만두류 = list(set(['춘권','비빔만두','통만두','맑은만두','완자','물만두','만두찜','쌈만두','야채만두','찜만두','당면계란만두','갈비통통만두','만둣국','만두국','교자만두','물만두탕수','군만두']))
간식류 = list(set(['연유버터베이글 ','바나나와플','아이스슈','씨리얼', '시리얼','애플파이','치즈팡샌드','트위스터버거','붕어빵','카스텔라','호빵','크로아상샌드위치','커피','살라미샌드위치','바나나시나몬토스트','햄야채샌드','조각티라미','롤케익','고구마고로케','고구마치즈빵','삼색샌드위치','꿀호떡','비엘티샌드위치','마약토스트','탱크보이','호떡맥모닝','츄러스채소맛탕','아메리카노','쿠키','버거','베이컨맥모닝','스틱치즈케익','갈릭파이','영양모듬견과','식빵','생크림와플','남친샌드위치','인절미토스트','인절미츄러스맛탕','오렌지케익빵','피자빵','페스츄리','호떡','고구마치즈빵','수제마늘바게트','모닝샌드','주스','식빵피자','살라미샌드위치','파르페','애플파이','복숭아아이스티','팝콘','찐빵','꿀호떡','도너츠','핫도그','맥모닝','도넛','바나나시나몬토스트','치즈베이글','생크림단팥빵','미니햄버거','찹쌀호떡','컵케익','고로케','호두견과','식혜','모닝롤','머핀','홍루이젠','화채','카스테라','프렌치토스트','푸딩','시나몬페스츄리','아이스티','소보루빵','식빵','피자','케익','케이크','호빵']))
과일류 = list(set(['두부카프레제', "카프레제샐러드",'과일','유자','레몬','매실','참외','살구','홍시','오미자','배','복숭아','블루베리','포도','청포도','바나나','망고','오렌지','애플망고','홍시','건포도','코코넛','레몬','단감','크랜베리','적포도','사과','키위','라즈베리','살구','딸기','파인애플','황도','수박','토마토', '화채']))
food_material = [견과및두류,묵류,어패류,육류,채소류,뿌리류,해조류,면류,갑각류,계란류,떡류,유제품,만두류,간식류,과일류]
# 괄호 제거 함수
def bracket_remover(x):
import re
pattern1 = r'\([^)]*\)'
pattern2 = r'\<[^>]*\>'
text = re.sub(pattern=pattern1, repl='', string= x)
text = re.sub(pattern=pattern2, repl='', string= text)
# text = re.sub("/", repl=' ', string= text)
return text.split()
# * 제거 함수
def star_remove(x):
for menu in x:
if menu.startswith("*"):
x.remove(menu)
elif "*" in menu:
x[x.index(menu)] = menu.split("*")[0]
elif "&" in menu:
x[x.index(menu)] = menu.split("&")[0]
return x
# 메뉴에서 '쌀밥류' 통일 및 쌀밥/다른메뉴 분리
def rice_sort(x):
for menu in x:
if (menu.startswith('쌀밥')) or (menu.startswith('흑미밥')):
if ("/" not in menu):
x[x.index(menu)] = "(쌀밥류)"
elif ("/" in menu):
temp = menu.split("/")
for temps in temp:
if "밥" in temps:
temp[temp.index(temps)] = "(쌀밥류)"
else:
pass
x.extend(temp)
x.remove(menu)
elif ("작은밥" in menu) or ("추가밥" in menu):
if ("/" not in menu):
x.remove(menu)
elif ("/" in menu):
temp = menu.split("/")
for temps in temp:
if (temps == "작은밥") or (temps == "추가밥"):
temp.remove(temps)
else:
pass
x.extend(temp)
x.remove(menu)
elif menu.endswith("짬뽕"):
x[x.index(menu)] = "(국수류)"
elif ("비빔밥" in menu) or ("볶음밥" in menu) or ("필라프" in menu) or ("콩나물밥" in menu):
x[x.index(menu)] = "(비빔밥볶음밥류)"
elif (menu.endswith("덮밥")) or (menu.endswith("국밥")):
x[x.index(menu)] = "(덮밥국밥류)"
elif (menu.endswith("김밥")) or (menu.endswith("초밥")) or (menu.endswith("주먹밥")):
x[x.index(menu)] = "(김초밥류)"
return x
# 국탕찌개류 -> (국탕찌개류)로 변경
def soup_sort(x):
for menu in x:
if (menu.endswith('국')) or (menu.endswith('탕')) or (menu.endswith('개장')) or (menu.endswith('미소시루')):
x[x.index(menu)] = "(국탕류)"
elif (menu.endswith('찌개')):
x[x.index(menu)] = "(찌개류)"
elif "스프" in menu:
x[x.index(menu)] = "(스프류)"
elif (menu.endswith('죽')):
x[x.index(menu)] = "(죽류)"
return x
# 김치류 -> (김치류)로 변경
def kimchi_sort(x):
for menu in x:
if ("김치" in menu) or ("깍두기" in menu) or ("석박지" in menu) or ("겉절이" in menu):
if ("/" not in menu):
x[x.index(menu)] = "(김치류)"
elif ("/" in menu):
temp = menu.split("/")
for temps in temp:
if ("김치" in temps[-2:]) or ("깍두기" in temps[-3:]) or ("석박지" in temps[-3:]) or ("겉절이" in temps[-3:]):
temp[temp.index(temps)] = "(김치류)"
else:
pass
x.extend(temp)
x.remove(menu)
return x
# 샐러드류 -> (샐러드류)로 변경
def salad_sort(x):
for menu in x:
if ("샐러드" in menu) and ("파스타" not in menu):
if "/" in menu:
temp = menu.split("/")
for temps in temp:
if temps.endswith("샐러드"):
temp[temp.index(temps)] = "(샐러드류)"
else:
pass
x.extend(temp)
x.remove(menu)
else:
x[x.index(menu)] = "(샐러드류)"
return x
# 튀김 -> (튀김류)로 변경
def fry_sort(x):
for menu in x:
check = 0
for fry in ["튀김", "까스", "가스", "김말이", "강정", "커틀렛"]:
if "튀김우동" in menu:
x.append("(튀김류)")
break
elif (fry in menu) and ("/" in menu):
temp = menu.split("/")
for temps in temp:
if temps.endswith(fry):
temp[temp.index(temps)] = "(튀김류)"
else:
pass
x.extend(temp)
x.remove(menu)
break
elif menu.endswith(fry):
x[x.index(menu)] = "(튀김류)"
break
return x
# 전 -> (전류)
def pancake_sort(x):
for menu in x:
if (menu.endswith('전')) or (menu.endswith('전병')):
x[x.index(menu)] = "(전류)"
elif ("전병" in menu) or ("계란말이" in menu):
x[x.index(menu)] = "(전류)"
return x
# 찜 -> (찜류)
def zzim_sort(x):
for menu in x:
if ("찜" in menu) or ("탕수" in menu):
if "/" not in menu:
x[x.index(menu)] = "(찜류)"
else:
temp = menu.split("/")
for temps in temp:
if "찜" in temps:
temp[temp.index(temps)] = "(찜류)"
else:
pass
x.extend(temp)
x.remove(menu)
return x
# 만두 -> 찜과 튀김 분류
def mandu_sort(x):
for menu in x:
if ("만두" in menu) or ("딤섬" in menu):
temp = 0
for word in ["물", "찐", "왕", "감자", "메밀", "부추", "딤섬"]:
if word not in menu:
temp += 1
if temp ==6:
x[x.index(menu)] = "(튀김류)"
elif temp >0:
x[x.index(menu)] = "(찜류)"
return x
# 볶음 -> (볶음류)
def stirfry_sort(x):
for menu in x:
if ("볶음" in menu) and ("밥" not in menu):
if "/" not in menu:
x[x.index(menu)] = "(볶음류)"
else:
temp = menu.split("/")
for temps in temp:
if "볶음" in temps:
temp[temp.index(temps)] = "(볶음류)"
else:
pass
x.extend(temp)
x.remove(menu)
elif "불고기" in menu:
x[x.index(menu)] = "(볶음류)"
return x
# 조림 -> (조림류)
def boiled_sort(x):
for menu in x:
if ("조림" in menu):
x[x.index(menu)] = "(조림류)"
return x
# 구이 -> (구이류)
def goo2_sort(x):
for menu in x:
if ("구이" in menu):
x[x.index(menu)] = "(구이류)"
elif menu.endswith('데리야끼'):
x[x.index(menu)] = "(구이류)"
return x
# 무침 -> (무침류)
def mix_sort(x):
for menu in x:
if ("무침" in menu) or ("잡채" in menu) or ("나물" in menu) or ("생채" in menu) or ("오복지" in menu):
if ("/" not in menu) and ("밥" not in menu):
x[x.index(menu)] = "(무침류)"
elif ("/" in menu):
temp = menu.split("/")
for temps in temp:
if ("무침" in menu) or ("잡채" in menu) or ("나물" in menu) or ("생채" in menu) or ("오복지" in menu):
temp[temp.index(temps)] = "(무침류)"
else:
pass
x.extend(temp)
x.remove(menu)
return x
# 치킨 포함 단어 분류
def chicken_sort(x):
for menu in x:
if "치킨무" in menu:
if ("/" not in menu):
x[x.index(menu)] = "(장아찌류)"
elif ("/" in menu):
temp = menu.split("/")
for temps in temp:
if temps == "치킨무":
temp[temp.index(temps)] = "(장아찌류)"
else:
pass
x.extend(temp)
x.remove(menu)
elif (menu.endswith('치킨')) or (menu.endswith('파닭')) or (menu.endswith('통닭')):
x[x.index(menu)] = "(튀김류)"
elif (menu.startswith('치킨')) and ("밥" not in menu) and ("퀘사디야" not in menu):
x[x.index(menu)] = "(튀김류)"
elif ("닭갈비" in menu) and ("밥" not in menu):
x[x.index(menu)] = "(볶음류)"
return x
#음료-> (음료)
def drink_sort(x):
for menu in x:
if ("주스" in menu) or ("쥬스" in menu) or ("음료" in menu) or ("식혜" in menu) or ("차" in menu) or ("에이드" in menu) or ("즙" in menu) or ("아이스티" in menu) or ("두유" in menu):
if ("/" not in menu):
x[x.index(menu)] = "(음료)"
elif ("/" in menu):
temp = menu.split("/")
for temps in temp:
if ("주스" in temps[-2:]) or ("쥬스" in temps[-2:]) or ("음료" in temps[-2:]) or ("식혜" in temps[-2:]) or ("차" in temps[-2:]) or ("에이드" in temps[-3:]) or ("즙" in temps[-1:]) or ("아이스티" in temps[-4:]) or ("두유" in temps[-3:]):
temp[temp.index(temps)] = "(음료)"
else:
pass
x.extend(temp)
x.remove(menu)
return x
#유제품-> (유제품)
def milk_sort(x):
for menu in x:
if ("요플레" in menu) or ("요거트" in menu) or ("요구르트" in menu):
if ("/" not in menu) & ("D" not in menu) & ("파르페" not in menu):
x[x.index(menu)] = "(유제품)"
elif ("/" in menu):
temp = menu.split("/")
for temps in temp:
if ("D" in temps) or ('푸딩' in temps) or ("파르페" in temps) :
pass
else:
temp[temp.index(temps)] = "(유제품)"
x.extend(temp)
x.remove(menu)
return x
# 절임 -> 장아찌류
def zzul_im_sort(x):
for menu in x:
if ("절임" in menu) or ("장아찌" in menu) or ("짱아찌" in menu) or ("피클" in menu) or ("단무지" in menu):
if ("/" not in menu):
x[x.index(menu)] = "(장아찌류)"
elif ("/" in menu):
temp = menu.split("/")
for temps in temp:
if (temps == "쌈무") or (temps == "무쌈") or (temps == "락교"):
temp.remove(temps)
elif ("절임" in menu) or ("장아찌" in menu) or ("짱아찌" in menu) or ("피클" in menu) or ("단무지" in menu):
temp[temp.index(temps)] = "(장아찌류)"
else:
pass
x.extend(temp)
x.remove(menu)
elif ("쌈무" in menu) or ("무쌈" in menu) or ("락교" in menu):
if ("/" not in menu):
x[x.index(menu)] = "(장아찌류)"
elif ("/" in menu):
temp = menu.split("/")
for temps in temp:
if ("쌈무" in menu) or ("무쌈" in menu) or ("락교" in menu):
temp[temp.index(temps)] = "(장아찌류)"
else:
pass
x.extend(temp)
x.remove(menu)
return x
# 빵류 -> 빵
def bread_sort(x):
for menu in x:
if ("빵" in menu) or ("핫도그" in menu) or ("피자" in menu) or ("또띠아" in menu) or ("케익" in menu) or ("칩" in menu):
if ("," not in menu) and ("/" not in menu):
x[x.index(menu)] = "(빵과자류)"
elif ("," in menu):
temp = menu.split(",")
for temps in temp:
if ("빵" in menu) or ("핫도그" in menu) or ("피자" in menu) or ("또띠아" in menu) or ("케익" in menu) or ("칩" in menu) :
temp[temp.index(temps)] = "(빵과자류)"
else:
pass
x.extend(temp)
x.remove(menu)
elif ("/" in menu):
temp = menu.split("/")
for temps in temp:
if ("빵" in menu) or ("핫도그" in menu) or ("피자" in menu) or ("또띠아" in menu) or ("케익" in menu) or ("칩" in menu):
temp[temp.index(temps)] = "(빵과자류)"
else:
pass
x.extend(temp)
x.remove(menu)
return x
#떡볶이-> (볶음류)
def dduk_sort(x):
for menu in x:
if ("볶이" in menu) :
if ("/" not in menu):
x[x.index(menu)] = "(볶음류)"
elif ("/" in menu):
temp = menu.split("/")
for temps in temp:
if ("볶이" not in menu) :
pass
elif ("볶이" in menu):
temp[temp.index(temps)] = "(볶음류)"
x.extend(temp)
x.remove(menu)
return x
# 모든 메뉴 한 리스트에 모으기
def all_menu_list(a, b):
from iteration_utilities import flatten
temp = a.tolist() + b.tolist()
temp = list(set(flatten(temp)))
return temp
# dataframe에 적용하여 메뉴별 벡터 생성하기 위한 함수
# dic에는 분류하고 싶은 사전 넣어줘야함 (ex. dic = dict_rice)
def class_major(menu, dic):
import numpy as np
rice_dum = list(np.zeros(len(dic)))
for bab in menu:
for key in dic.keys():
if bab in dic[key]:
rice_dum[list(dic.keys()).index(key)] = 1
return rice_dum
# class_major와 함께 사용, 각 메뉴별 벡터를 데이터프레임에 추가
# df에는 어느 데이터프레임에 concat할 것인지 입력해야함
def to_dataframe(x, df):
import numpy as np
import pandas as pd
temp = []
for lists in x:
temp += lists
temp = np.asarray(temp)
temp = temp.reshape(len(df),-1)
temp = pd.DataFrame(temp)
return temp
####################################### 아래는 조대리가 생성한 함수 #######################################
def del_bracket(data):
import re
x = []
for i in data:
p = re.sub(r'\([^)]*\)',"",i)
p = re.sub(r'\<[^)]*\>',"",p)
p = re.findall('[가-힣|a-z|A-Z|ㄱ-ㅎ|*]+',p)
p = ' '.join(p)
x.append(p)
return x
def change_column(t,x):
k = del_bracket(t[x].tolist())
del t[x]
t[x] = k
def sauce_punish(data):
sauce_1 = {}
for i in range(len(data)):
x = data[i].split()
k = ''
for j in range(len(x)):
temp = False
if '*' in x[j]:
t = x[j].split('*')
x[j] = t[0]
if k == '':
k += ' '.join(t[1:])
else:
k += ','
k += ' '.join(t[1:])
sauce_1[i] = k
data[i] = ' '.join(x)
sauce = {}
for i,v in sauce_1.items():
if v != '':
sauce[i] = v
return sauce, data
def make_sauce(data):
temp = data.copy()
hihi = ['조식','중식','석식']
for i in hihi:
train_bf = temp['{}메뉴'.format(i)].tolist()
sauce, data = sauce_punish(train_bf)
temp['{}메뉴'.format(i)] = data
temp['소스 여부({})'.format(i)] = 0
temp['소스 종류({})'.format(i)] = 0
temp['소스 여부({})'.format(i)].iloc[list(sauce.keys())] = 1
temp['소스 종류({})'.format(i)].iloc[list(sauce.keys())] = list(sauce.values())
return temp
def Show10(menu):
cnt = 0
while True:
x = input("엔터를 입력하세요")
del x
print("{} ~ {}".format(cnt, cnt+10))
print(menu[cnt:cnt+10])
cnt += 10
if cnt >= len(menu):
break
|
"""Permission test views."""
# Pyramid
from pyramid.response import Response
# Websauna
from websauna.system.core.route import simple_route
@simple_route("test_authenticated", permission="authenticated")
def test_authenticated(request):
return Response("<span id='ok'></span>", content_type="text/html")
|
# -*- encoding:UTF-8 -*-
import os
import sys
suite_list = ['pay_test']
reqid = 0
work_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
server_IP = '10.101.70.236'
server_port = 81
smartGW_IP = '10.101.70.247'
smartGW_port = 20001
server_IP2 = '10.101.70.236'
server_port2 = 9046
cloud_server_IP = '47.106.21.206'
cloud_server_port = 38080
login_url = "/scp-usermgmtcomponent/admin/login?username=test&password=dGVzdA=="
cloud_login_url = "/egc-cloudapicomponent/admin/login?username=test&password=dGVzdA=="
u'''小区平台UAT数据库'''
PostgreSQL1 = {
"host": "10.101.70.238",
"user": "hdsc_postgres",
"password": "hdsc_postgres",
"db": "hdsc_db",
"port": "5432"
}
PostgreSQL = {
"host": "10.101.70.238",
"user": "test_zhouhanbo",
"password": "GfD1hfVxIev",
"db": "hdsc_db",
"port": "5432"
}
replayPath = r'C:\\replayit\\'
devsimPath = r'C:\\testtoolsx\\'
|
# import bisect
# t = [2, 4, 6, 8]
# print(t)
# print(bisect.bisect_left(t, 7))
# print(t)
# print(bisect.bisect_left(t, 4))
# print(t)
import mmap
mmap_file = None
## 从内存中读取信息,
def read_mmap_info():
global mmap_file
mmap_file.seek(0)
## 把二进制转换为字符串
info_str=mmap_file.read().translate(None, b'\x00').decode()
print("in read_mmap_info")
print(info_str)
## 如果内存中没有对应信息,则向内存中写信息以供下次调用使用
def get_mmap_info():
global mmap_file
## 第二个参数1024是设定的内存大小,单位:字节。如果内容较多,可以调大一点
mmap_file = mmap.mmap(-1, 1024, access = mmap.ACCESS_WRITE, tagname = 'share_mmap')
##读取有效比特数,不包括空比特
cnt=mmap_file.read_byte()
if cnt==0:
print("Load data to memory")
mmap_file = mmap.mmap(0, 1024, access = mmap.ACCESS_WRITE, tagname = 'share_mmap')
mmap_file.write(b"This is the test data")
read_mmap_info()
else :
print("The data is in memory")
read_mmap_info()
## 修改内存块中的数据
def reset_mmp_info():
global mmap_file
mmap_file.seek(0)
mmap_file.write(b'\x00')
mmap_file.write(b"Load data to memory agine")
if __name__=="__main__":
get_mmap_info() |
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
import random
from bindsnet.encoding import BernoulliEncoder
from bindsnet.network import Network
from bindsnet.network.monitors import Monitor
from bindsnet.network.monitors import NetworkMonitor
from bindsnet.analysis.plotting import plot_spikes, plot_voltages, plot_input, plot_weights
from bindsnet.network.nodes import Input, LIFNodes, IFNodes
from bindsnet.network.topology import Connection
from bindsnet.learning import PostPre, Hebbian, WeightDependentPostPre, MSTDP, MSTDPET
from bindsnet.evaluation import all_activity, proportion_weighting, assign_labels
from bindsnet.utils import get_square_weights, get_square_assignments
### Input Data Parameters ###
# number of training samples
training_samples = 10
testing_samples = 10
# set number of classes
n_classes = 2
### Network Configuration Parameters ###
# configure number of input neurons
input_layer_name = "Input Layer"
input_neurons = 9
# configure the number of output lif neurons
output_layer_name = "Output Layer"
output_neurons = 2
### Simulation Parameters ###
# simulation time
time = 100
dt = 1
# number of training iterations
epochs = 1
# ratio of neurons to classes
per_class = int(output_neurons / n_classes)
# store unique images in a list
imgs = []
# Class 0 Image
img0 = {"Label" : 0, "Image" : torch.FloatTensor([[1,1,1],[1,0,1],[1,1,1]])}
imgs.append(img0)
# Class 1 Image
img1 = {"Label" : 1, "Image" : torch.FloatTensor([[0,1,0],[0,1,0],[0,1,0]])}
imgs.append(img1)
# initialize list of inputs for training
training_dataset = []
# for the number of specified training samples
for i in range(training_samples):
# randomly select a training sample
# rand_sample = random.randint(0,n_classes-1)
# provide an even number of training samples
rand_sample = i % n_classes
# add the sample to the list of training samples
training_dataset.append(imgs[rand_sample])
# initialize the encoder
encoder = BernoulliEncoder(time=time, dt=dt)
# list of encoded images for random selection during training
encoded_train_inputs = []
# loop through encode each image type and store into a list of encoded images
for sample in training_dataset:
# encode the image
encoded_img = encoder(torch.flatten(sample["Image"]))
# encoded image input for the network
encoded_img_input = {input_layer_name: encoded_img}
# encoded image label
encoded_img_label = sample["Label"]
# add to the encoded input list along with the input layer name
encoded_train_inputs.append({"Label" : encoded_img_label, "Inputs" : encoded_img_input})
# initialize list of inputs for testing
testing_dataset = []
# for the number of specified testing samples
for i in range(testing_samples):
# randomly select a training sample
rand_sample = random.randint(0,n_classes-1)
# add the sample to the list of training samples
testing_dataset.append(imgs[rand_sample])
# list of encoded images for random selection during training
encoded_test_inputs = []
# loop through encode each image type and store into a list of encoded images
for sample in testing_dataset:
# encode the image
encoded_img = encoder(torch.flatten(sample["Image"]))
# encoded image input for the network
encoded_img_input = {input_layer_name: encoded_img}
# encoded image label
encoded_img_label = sample["Label"]
# add to the encoded input list along with the input layer name
encoded_test_inputs.append({"Label" : encoded_img_label, "Inputs" : encoded_img_input})
### NETWORK CONFIGURATION ###
# initialize network
network = Network()
# configure weights for the synapses between the input layer and LIF layer
#w = torch.round(torch.abs(2 * torch.randn(input_neurons, lif_neurons)))
w = torch.zeros(input_neurons,output_neurons)
# Optimal Weights for this task
# w = torch.FloatTensor([
# [1,-2],
# [1,4],
# [1,-2],
# [1,0],
# [-2,4],
# [1,0],
# [1,-2],
# [1,4],
# [1,-2]])
# w = w / w.norm()
# initialize input and LIF layers
# spike traces must be recorded (why?)
# initialize input layer
input_layer = Input(
n=input_neurons,
traces=True
)
# initialize input layer
# lif_layer = LIFNodes(n=lif_neurons,traces=True)
output_layer = IFNodes(
n = output_neurons,
thresh = 8,
reset = 0,
traces=True
)
# initialize connection between the input layer and the LIF layer
# specify the learning (update) rule and learning rate (nu)
connection = Connection(
#source=input_layer, target=lif_layer, w=w, update_rule=PostPre, nu=(1e-4, 1e-2)
source=input_layer,
target=output_layer,
w=w,
update_rule=PostPre,
nu=(1, 1),
norm=1
)
# add input layer to the network
network.add_layer(
layer=input_layer, name=input_layer_name
)
# add lif neuron layer to the network
network.add_layer(
layer=output_layer, name=output_layer_name
)
# add connection to network
network.add_connection(
connection=connection, source=input_layer_name, target=output_layer_name
)
### SIMULATION VARIABLES ###
# record the spike times of each neuron during the simulation.
spike_record = torch.zeros(1, int(time / dt), output_neurons)
# record the mapping of each neuron to its corresponding label
assignments = -torch.ones_like(torch.Tensor(output_neurons))
# how frequently each neuron fires for each input class
rates = torch.zeros_like(torch.Tensor(output_neurons, n_classes))
# the likelihood of each neuron firing for each input class
proportions = torch.zeros_like(torch.Tensor(output_neurons, n_classes))
# label(s) of the input(s) being processed
labels = torch.empty(1,dtype=torch.int)
# create a spike monitor for each layer in the network
# this allows us to read the spikes in order to assign labels to neurons and determine the predicted class
layer_monitors = {}
for layer in set(network.layers):
# initialize spike monitor at the layer
# do not record the voltage if at the input layer
state_vars = ["s","v"] if (layer != input_layer_name) else ["s"]
layer_monitors[layer] = Monitor(network.layers[layer], state_vars=state_vars, time=int(time/dt))
# connect the monitor to the network
network.add_monitor(layer_monitors[layer], name="%s_spikes" % layer)
weight_history = None
num_correct = 0.0
### DEBUG ###
### can be used to force the network to learn the inputs in a specific way
supervised = True
### used to determine if status messages are printed out at each sample
log_messages = False
### used to show weight changes
graph_weights = False
###############
# show current weights
#print("Current Weights:")
#print(network.connections[("Input Layer", "LIF Layer")].w)
# iterate for epochs
for step in range(epochs):
# index of the sample in the list of encoded trainining inputs
sample_num = 0
for sample in encoded_train_inputs:
print("Current Weights:")
print(network.connections[(input_layer_name, output_layer_name)].w)
# print sample number
print("Training Sample:",str(sample_num)+"/"+str(training_samples))
if sample_num < 5:
print("Current Weights:")
print(network.connections[(input_layer_name, output_layer_name)].w)
sample_num += 1
# get the label for the current image
labels[0] = sample["Label"]
# randomly decide which output neuron should spike if more than one neuron corresponds to the class
# choice will always be 0 if there is one neuron per output class
choice = np.random.choice(per_class, size=1, replace=False)
# clamp on the output layer forces the node corresponding to the label's class to spike
# this is necessary in order for the network to learn which neurons correspond to which classes
# clamp: Mapping of layer names to boolean masks if neurons should be clamped to spiking.
# The ``Tensor``s have shape ``[n_neurons]`` or ``[time, n_neurons]``.
clamp = {output_layer_name: per_class * labels[0] + torch.Tensor(choice).long()} if supervised else {}
#print(sample["Inputs"])
### Step 1: Run the network with the provided inputs ###
network.run(inputs=sample["Inputs"], time=time, clamp=clamp)
### Step 2: Get the spikes produced at the output layer ###
spike_record[0] = layer_monitors[output_layer_name].get("s").view(time, output_neurons)
### Step 3: ###
# Assign labels to the neurons based on highest average spiking activity.
# Returns a Tuple of class assignments, per-class spike proportions, and per-class firing rates
# Return Type: Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
assignments, proportions, rates = assign_labels( spike_record, labels, n_classes, rates )
### Step 4: Classify data based on the neuron (label) with the highest average spiking activity ###
# Classify data with the label with highest average spiking activity over all neurons.
all_activity_pred = all_activity(spike_record, assignments, n_classes)
### Step 5: Classify data based on the neuron (label) with the highest average spiking activity
### weighted by class-wise proportion ###
proportion_pred = proportion_weighting(spike_record, assignments, proportions, n_classes)
### Update Accuracy
num_correct += 1 if (labels.numpy()[0] == all_activity_pred.numpy()[0]) else 0
######## Display Information ########
if log_messages:
print("Actual Label:",labels.numpy(),"|","Predicted Label:",all_activity_pred.numpy(),"|","Proportionally Predicted Label:",proportion_pred.numpy())
print("Neuron Label Assignments:")
for idx in range(assignments.numel()):
print(
"\t Output Neuron[",idx,"]:",assignments[idx],
"Proportions:",proportions[idx],
"Rates:",rates[idx]
)
print("\n")
print("Input:")
print(sample["Inputs"])
print("Output Spikes:")
print(spike_record)
#####################################
### For Weight Plotting ###
if graph_weights:
weights = network.connections[(input_layer_name, output_layer_name)].w[:,0].numpy().reshape((1,input_neurons))
weight_history = weights.copy() if step == 0 else np.concatenate((weight_history,weights),axis=0)
print("Neuron 0 Weights:\n",network.connections[(input_layer_name, output_layer_name)].w[:,0])
print("Neuron 1 Weights:\n",network.connections[(input_layer_name, output_layer_name)].w[:,1])
print("====================")
#############################
if log_messages:
print("Epoch #",step,"\tAccuracy:", num_correct / ((step + 1) * len(encoded_train_inputs)) )
print("===========================\n\n")
## Print Final Class Assignments and Proportions ###
print("Neuron Label Assignments:")
for idx in range(assignments.numel()):
print(
"\t Output Neuron[",idx,"]:",assignments[idx],
"Proportions:",proportions[idx],
"Rates:",rates[idx]
)
### For Weight Plotting ###
# Plot Weight Changes
# if graph_weights:
# [plt.plot(weight_history[:,idx]) for idx in range(weight_history.shape[1])]
# plt.show()
#############################
### Print Final Class Assignments and Proportions ###
# print("Neuron Label Assignments:")
# for idx in range(assignments.numel()):
# print(
# "\t Output Neuron[",idx,"]:",assignments[idx],
# "Proportions:",proportions[idx],
# "Rates:",rates[idx]
# )
#### Test Data ####
num_correct = 0
log_messages = False
# disable training mode
network.train(False)
# loop through each test example and record performance
for sample in encoded_test_inputs:
# get the label for the current image
labels[0] = sample["Label"]
### Step 1: Run the network with the provided inputs ###
network.run(inputs=sample["Inputs"], time=time)
### Step 2: Get the spikes produced at the output layer ###
spike_record[0] = layer_monitors[output_layer_name].get("s").view(time, output_neurons)
### Step 3: ###
# Assign labels to the neurons based on highest average spiking activity.
# Returns a Tuple of class assignments, per-class spike proportions, and per-class firing rates
# Return Type: Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
assignments, proportions, rates = assign_labels( spike_record, labels, n_classes, rates )
### Step 4: Classify data based on the neuron (label) with the highest average spiking activity ###
# Classify data with the label with highest average spiking activity over all neurons.
all_activity_pred = all_activity(spike_record, assignments, n_classes)
### Step 5: Classify data based on the neuron (label) with the highest average spiking activity
### weighted by class-wise proportion ###
proportion_pred = proportion_weighting(spike_record, assignments, proportions, n_classes)
### Update Accuracy
num_correct += 1 if (labels.numpy()[0] == all_activity_pred.numpy()[0]) else 0
######## Display Information ########
if log_messages:
print("Actual Label:",labels.numpy(),"|","Predicted Label:",all_activity_pred.numpy(),"|","Proportionally Predicted Label:",proportion_pred.numpy())
print("Neuron Label Assignments:")
for idx in range(assignments.numel()):
print(
"\t Output Neuron[",idx,"]:",assignments[idx],
"Proportions:",proportions[idx],
"Rates:",rates[idx]
)
print("\n")
#####################################
plot_spikes({output_layer_name : layer_monitors[output_layer_name].get("s")})
plot_voltages({output_layer_name : layer_monitors[output_layer_name].get("v")}, plot_type="line")
plt.show(block=True)
print("Accuracy:", num_correct / len(encoded_test_inputs) ) |
# -- coding:utf-8 --
# filter是筛选器,参数有两个,第一个是函数,第二个是列表,作用是将列表中每个元素执行函数,根据返回值时True or False决定是否保留,删除返回true的值
# 本程序实现删除1-100中的素数
def is_su(m):
fin = False
if(m==1):
fin = True
else:
for i in range(2,m):
if(m%i==0):
fin = True
return fin
l = range(1,101)
print l
print filter(is_su, l)
|
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError
class fedia_pfe_activite(models.Model):
_name = 'fedia_pfe.activite'
_description = "Adhérents Activité"
_parent_name = "parent_id"
_parent_store = True
_rec_name = 'complete_name'
_order = 'complete_name'
name = fields.Char('Activité',index=True, required=True, translate=True)
complete_name = fields.Char(
'Complete Name', compute='_compute_complete_name',
store=True)
parent_id = fields.Many2one('fedia_pfe.activite', 'Marché', index=True, ondelete='cascade')
parent_path = fields.Char(index=True)
child_id = fields.One2many('fedia_pfe.activite', 'parent_id', 'Child Categories')
cotation = fields.Integer(compute='_clacul_cotation', store=True, string='Cotation')
risque = fields.Selection([
('1', 'Faible'),
('2','Moyen'),
('3','Elevé')
],string='Risque',)
@api.depends('name', 'parent_id.complete_name')
def _compute_complete_name(self):
for category in self:
if category.parent_id:
category.complete_name = '%s / %s' % (category.parent_id.complete_name, category.name)
else:
category.complete_name = category.name
@api.constrains('parent_id')
def _check_category_recursion(self):
if not self._check_recursion():
raise ValidationError(_('You cannot create recursive categories.'))
return True
@api.model
def name_create(self, name):
return self.create({'name': name}).name_get()[0]
@api.depends('risque')
def _clacul_cotation(self):
for cot in self:
risque = dict(self._fields['risque'].selection).get(cot.risque)
if risque == 'Faible':
cot.cotation = 1
if risque == 'Moyen':
cot.cotation = 2
if risque == 'Elevé':
cot.cotation = 3 |
# Generated by Django 2.2.13 on 2020-10-27 00:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mentors', '0002_auto_20201027_0655'),
]
operations = [
migrations.AlterField(
model_name='mentor',
name='cover_photo_1',
field=models.ImageField(blank=True, upload_to='photos/mentors/cover/%Y/%m/%d/'),
),
migrations.AlterField(
model_name='mentor',
name='mentors_picture',
field=models.ImageField(blank=True, upload_to='photos/dp/mentors/%Y/%m/%d/'),
),
]
|
import os, re
import subprocess
ROOT_PATH = "e:/download/maxcso_v1.12.0_windows/work"
print( ">> Convert CHD to CSO")
def findFile( root, ext ):
return [ f for f in os.listdir(root) if re.match(f'.*\\.{ext}$',f) ]
def toIso( root, path ):
# extractcd -i "#{filepath}" -o "#{dir}\#{name}.cue" -ob "#{dir}\#{name}.bin"
fullpath = f"{root}/{path}"
base = os.path.splitext(fullpath)[0]
cmd = f"chdman extractcd -f -i \"{fullpath}\" -o \"{base}.cue\" -ob \"{base}.iso\""
print( cmd )
subprocess.Popen(cmd).wait()
def toCso( root, path ):
base = os.path.splitext(f"{root}/{path}")[0]
fullpath = f"{base}.iso"
cmd = f"maxcso \"{base}.iso\""
print( cmd )
subprocess.Popen(cmd).wait()
if( os.path.getsize(f"{base}.cso") == 0 ):
os.remove(f"{base}.cso")
cmd = f"CisoPlus -com -l9 \"{base}.iso\" \"{base}.cso\""
print( cmd )
subprocess.Popen(cmd).wait()
os.rename(f"{base}.chd", f"{base}.chd.fail")
else:
os.rename(f"{base}.chd", f"{base}.chd.done")
os.remove(f"{base}.iso")
os.remove(f"{base}.cue")
for i, file in enumerate(findFile(ROOT_PATH, "chd")) :
print( f"{i}: {file}" )
toIso(ROOT_PATH,file)
toCso(ROOT_PATH,file)
# if( i >= 0 ):
# break
|
# SMTP/Text configuration, see https://docs.python.org/3/library/logging.handlers.html
# Example is given for using send grid
mailhost=('smtp.sendgrid.net', 587)
fromaddr='glass@rollingblueglass.com'
toaddrs=['where@domain.com']
credentials=('apikey','GET THIS FROM THEM')
secure=() #This means use TLS
|
from typing import List
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
'''最长公共前缀
@Note:
纵向比较
'''
if len(strs)==0:
return ''
for j in range(len(strs[0])):
for i in range(1,len(strs)):
if len(strs[i])<j+1 or strs[i][j]!=strs[0][j]:
return strs[0][:j]
return strs[0]#注意这,如果前面条件都满足,那么返回strs[0]
if __name__=='__main__':
string=input().strip()
solution=Solution()
print(solution.longestCommonPrefix(string)) |
from __future__ import with_statement
import os
import pickle
try:
from redis import Redis
except ImportError:
Redis = None
class Cache(object):
def __init__(self):
self._cache = {}
def get(self, k):
return self._cache.get(k)
def set(self, k, v):
self._cache[k] = v
class PickleCache(Cache):
def __init__(self, filename='cache.db'):
self.filename = filename
self._cache = self.load()
def load(self):
if os.path.exists(self.filename):
with open(self.filename, 'rb') as fh:
return pickle.load(fh)
return {}
def save(self):
with open(self.filename, 'wb') as fh:
pickle.dump(self._cache, fh)
if Redis:
class RedisCache(Cache):
"""
:param str namespace: key prefix.
:param int timeout: expiration timeout in seconds
"""
def __init__(self, namespace='micawber', timeout=None, **conn):
self.namespace = namespace
self.timeout = timeout
self.conn = Redis(**conn)
def key_fn(self, k):
return '%s.%s' % (self.namespace, k)
def get(self, k):
cached = self.conn.get(self.key_fn(k))
if cached:
return pickle.loads(cached)
def set(self, k, v):
ck, cv = self.key_fn(k), pickle.dumps(v)
if self.timeout is not None:
self.conn.setex(ck, cv, self.timeout)
else:
self.conn.set(ck, cv)
|
from elftools.elf.elffile import ELFFile
from collections import defaultdict
from argparse import ArgumentParser
from util import u16, u32, c_str, hexdump
from indent import indent, iprint
from elf import ElfParser
from core import CoreParser
str_stop_reason = defaultdict(str, {
0: "No reason",
0x30002: "Undefined instruction exception",
0x30003: "Prefetch abort exception",
0x30004: "Data abort exception",
0x60080: "Division by zero",
})
str_status = defaultdict(str, {
1: "Running",
8: "Waiting",
16: "Not started",
})
str_attr = defaultdict(str, {
5: "RX",
6: "RW",
})
reg_names = {
13: "SP",
14: "LR",
15: "PC",
}
core = None
isPC = True
def print_module_info(module):
iprint(module.name)
with indent():
for x, segment in enumerate(module.segments):
iprint("Segment {}".format(x + 1))
with indent():
iprint("Start: 0x{:x}".format(segment.start))
iprint("Size: 0x{:x} bytes".format(segment.size))
iprint("Attributes: 0x{:x} ({})".format(segment.attr, str_attr[segment.attr & 0xF]))
iprint("Alignment: 0x{:x}".format(segment.align))
def print_thread_info(thread, elf=None):
iprint(thread.name)
with indent():
iprint("ID: 0x{:x}".format(thread.uid))
iprint("Stop reason: 0x{:x} ({})".format(thread.stop_reason, str_stop_reason[thread.stop_reason]))
iprint("Status: 0x{:x} ({})".format(thread.status, str_status[thread.status]))
pc = core.get_address_notation("PC", thread.pc)
iprint(pc.to_string(elf))
if not pc.is_located():
iprint(core.get_address_notation("LR", thread.regs.gpr[14]).to_string(elf))
def main():
global core
parser = ArgumentParser()
parser.add_argument("-s", "--stack-size-to-print", dest="stacksize",
type=int, help="Number of addresses of the stack to print", metavar="SIZE", default=24)
parser.add_argument("corefile")
parser.add_argument("elffile")
args = parser.parse_args()
stackSize = args.stacksize
elf = ElfParser(args.elffile)
core = CoreParser(args.corefile)
# iprint("=== MODULES ===")
# with indent():
# for module in core.modules:
# print_module_info(module)
# iprint()
iprint("=== THREADS ===")
crashed = []
with indent():
for thread in core.threads:
if thread.stop_reason != 0:
crashed.append(thread)
print_thread_info(thread, elf)
iprint()
for thread in crashed:
iprint('=== THREAD "{}" <0x{:x}> CRASHED ({}) ==='.format(thread.name, thread.uid, str_stop_reason[thread.stop_reason]))
pc = core.get_address_notation('PC', thread.pc)
pc.print_disas_if_available(elf)
lr = core.get_address_notation('LR', thread.regs.gpr[14])
lr.print_disas_if_available(elf)
iprint("REGISTERS:")
with indent():
for x in range(14):
reg = reg_names.get(x, "R{}".format(x))
iprint("{}: 0x{:x}".format(reg, thread.regs.gpr[x]))
iprint(pc)
iprint(lr)
iprint()
iprint("STACK CONTENTS AROUND SP:")
with indent():
sp = thread.regs.gpr[13]
for x in range(-16, stackSize):
addr = 4 * x + sp
data = core.read_vaddr(addr, 4)
if data:
data = u32(data, 0)
prefix = " "
if addr == sp:
prefix = "SP =>"
data_notation = core.get_address_notation("{} 0x{:x}".format(prefix, addr), data)
iprint(data_notation.to_string(elf))
if __name__ == "__main__":
main()
|
from mercury.logic.auth import check_token
from typing import Optional
from fastapi.params import Cookie
from mercury.types.survey import Survey
from mercury.logic.surveys import create_survey, delete_survey, get_all_surveys, get_one_survey
from fastapi.responses import JSONResponse
from fastapi import APIRouter
router = APIRouter(prefix='/surveys')
@router.get('/')
async def get_all():
surveys = await get_all_surveys()
return JSONResponse(content=surveys, status_code=200)
@router.get('/{survey_id}')
async def get_one(survey_id: str):
survey = await get_one_survey(survey_id)
return JSONResponse(content=survey, status_code=200)
@router.post('/')
async def create(survey: Survey, token: Optional[str] = Cookie(None)):
id = await create_survey(survey.title, await check_token(token))
return JSONResponse(content={"id": id}, status_code=201)
@router.delete('/{survey_id}')
async def delete(survey_id: str, token: Optional[str] = Cookie(None)):
id = await delete_survey(survey_id, await check_token(token))
return JSONResponse(content={"id": id}, status_code=202)
|
from testing import *
from testing.tests import *
from testing.assertions import *
with cumulative(skip_after_fail=True):
with all_or_nothing(), tested_function_name('compress'):
compress = reftest()
compress('')
compress('a')
compress('aa')
compress('aaa')
compress('aaaaaaa')
compress('aaaaabbbbbbbbbaaaaaaa')
compress('aabcccc ccddeeeee f g h')
compress('AAAAaaaa')
compress('(aa)(bbbbb)')
with all_or_nothing(), tested_function_name('decompress'):
decompress = reftest()
decompress('')
decompress('1a')
decompress('2a')
decompress('5a')
decompress('5a3b8c')
decompress('1a 1b 1c')
decompress('7abc')
decompress('(4b) (3c)')
decompress('15a 10b')
|
from django.apps import AppConfig
class RedactedArchiverConfig(AppConfig):
name = 'plugins.redacted_archiver'
|
import FWCore.ParameterSet.Config as cms
#-------------------------------------------------
#AlCaReco filtering for HCAL HBHEMuon:
#-------------------------------------------------
import HLTrigger.HLTfilters.hltHighLevel_cfi
ALCARECOHcalCalHBHEMuonFilterHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
eventSetupPathsKey = 'HcalCalHBHEMuonFilter',
throw = False #dont throw except on unknown path name
)
from Calibration.HcalAlCaRecoProducers.AlcaHBHEMuonFilter_cfi import *
seqALCARECOHcalCalHBHEMuonFilter = cms.Sequence(ALCARECOHcalCalHBHEMuonFilterHLT *
AlcaHBHEMuonFilter)
|
from flask import Flask, render_template
app = Flask(__name__)
# Import CRUD operations and database classes
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Restaurant, Base, MenuItem
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/')
@app.route('/hello')
def hello_world():
restaurant = session.query(Restaurant).first()
items = session.query(MenuItem).filter_by(restaurant_id=restaurant.id)
return render_template('menu.html', restaurant=restaurant, items=items)
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
class Solution:
def titleToNumber(self, s: str) -> int:
num = 0
for i in range(len(s)-1, -1, -1):
num += (ord(s[i]) - 64) * pow(26, len(s)-1-i)
return num
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="index"),
path('hapus/<str:kode>', views.hapus, name="hapus" ),
path('edit/<str:kode>', views.edit, name="edit" ),
]
|
###### Writer : "Atia"
####Importing Libraries
from urllib.request import Request, urlopen
import pandas as pd
import requests
from bs4 import BeautifulSoup
import os
from PIL import Image
import shutil
os.chdir("/Users/macbook/Documents/pyhton/portfolio/Collecting_Image")
city= "sacramento"
url = "https://www.visitsacramento.com/"
sec = "https://www.visitsacramento"
folder_name = "city"
#### removing the folder if it already exist
if not os.path.exists(folder_name):
os.makedirs(folder_name)
##### setting up saving teh images
image_name = ("%s/%s_image" %(city,city))
if not os.path.exists(image_name ):
os.makedirs(image_name )
##### opening and saving the links
req = Request(url)
html_page = urlopen(req)
soup = BeautifulSoup(html_page)
links = [url]
for link in soup.findAll('a'):
links.append(link.get('href'))
all_data= []
for i in links:
try:
if sec in i:
all_data.append(i)
except:
print("no")
data= [url]
for i in all_data:
if not i in data:
data.append(i)
print(i)
second = []
for i in data:
req = Request(i)
try:
html_page = urlopen(req)
except:
pass
soup = BeautifulSoup(html_page)
temp = []
for link in soup.findAll('a'):
temp.append(link.get('href'))
for i in temp:
try:
if sec in i:
print(i)
second.append(i)
except:
print("print(no")
for i in second:
if not i in data:
data.append(i)
print(i)
print(len(data))
table = []
count = 0
for site in data:
r = requests.get(site)
# Parse HTML Code
soup = BeautifulSoup(r.text, 'html.parser')
# find all images in URL
images = soup.findAll('img')
# checking if images is not zero
if len(images) != 0:
for i, image in enumerate(images):
print(count)
print("printing link: ", i, image)
# From image tag ,Fetch image Source URL
# 1.data-srcset
# 2.data-src
# 3.data-fallback-src
# 4.src
# se exception handling
# first we will search for "data-srcset" in img tag
try:
# In image tag ,searching for "data-srcset"
image_link = image["data-lazy-src"]
# then we will search for "data-src" in img
# tag and so on..
except:
try:
# In image tag ,searching for "data-src"
image_link = image["data-src"]
except:
try:
# In image tag ,searching for "data-fallback-src"
image_link = image["src"]
except:
try:
# In image tag ,searching for "src"
image_link = image["data-srcset"]
# if no Source URL found
except:
pass
# After getting Image Source URL
# We will try to get the content of image
try:
r = requests.get(image_link).content
try:
# possibility of decode
r = str(r, 'utf-8')
except UnicodeDecodeError:
# After checking above condition, Image Download start
name = ("images%s.jpg"%count)
fname = os.path.join(image_name, name)
if os.path.exists(fname):
os.remove(fname)
with open(fname, "wb+") as f:
f.write(r)
# counting number of image downloaded
count += 1
number= ("images%s.jpg"%count)
temp = [city, site,number, count ]
table.append(temp)
print(temp)
except:
pass
# There might be possible, that all
# images not download
# if all images download
if count == len(images):
print("All Images Downloaded!")
# if all images not download
else:
print(f"Total {count} Images Downloaded Out of {len(images)}")
df = pd.DataFrame(table)
name = ("%s_excel_image.xlsx" %city)
df.to_excel(name)
##### removing he tumbnails
filter_folder = ("%s/%s_image_filter"%(city,city))
if not os.path.exists(filter_folder):
os.makedirs(filter_folder)
count = 0
for root, dir, files in os.walk(image_name) :
for file in files:
dis = os.path.join(image_name, file)
img = Image.open(dis)
if img.size[0] > 30:
shutil.copy(dis, filter_folder)
count +=1
# fname = "sacramento.xlsx"
##### saving the final files
df = pd.read_excel(name)
df.columns = ["number", "city", "link", "name", "number"]
print(len(df))
names= []
for root, dir, files in os.walk(filter_folder):
for file in files:
print(type(file))
t = file.split(".")
print(t)
name = t[0][:5]+"_"+t[0][6:]
print(name)
names.append(name)
print(name)
dn = pd.DataFrame(names)
dn.columns = ["name"]
print(len(dn))
data = pd.merge(df, dn, on= "name", how= "outer")
print(len(data))
data = data[["city", "link", "name"]]
name = ("%s_final_ready.xlsx"%city)
nn = os.path.join(folder_name,name )
data.to_excel(nn)
|
"""
This type stub file was generated by pyright.
"""
import vtkmodules.vtkCommonExecutionModel as __vtkmodules_vtkCommonExecutionModel
class vtkHierarchicalBinningFilter(__vtkmodules_vtkCommonExecutionModel.vtkPolyDataAlgorithm):
"""
vtkHierarchicalBinningFilter - uniform binning of points into a
hierarchical structure
Superclass: vtkPolyDataAlgorithm
vtkHierarchicalBinningFilter creates a spatial, hierarchical ordering
of input points. This hierarchy is suitable for level-of-detail
rendering, or multiresolution processing. Each level of the hierarchy
is based on uniform binning of space, where deeper levels (and its
bins) are repeatedly subdivided by a given branching factor. Points
are associated with bins at different levels, with the number of
points in each level proportional to the number of bins in that
level. The output points are sorted according to a bin number, where
the bin number is unique, monotonically increasing number
representing the breadth first ordering of all of the levels and
their bins. Thus all points in a bin (or even a level) are segmented
into contiguous runs.
Note that points are associated with different bins using a pseudo
random process. No points are repeated, and no new points are
created, thus the effect of executing this filter is simply to
reorder the input points.
The algorithm proceeds as follows: Given an initial bounding box, the
space is uniformally subdivided into bins of (M x N x O) dimensions;
in turn each subsequent level in the tree is further divided into (M
x N x O) bins (note that level 0 is a single, root bin). Thus the
number of bins at level L of the hierarchical tree is: Nbins=(M^L x
N^L x O^L). Once the binning is created to a specified depth, then
points are placed in the bins using a pseudo-random sampling
proportional to the number of bins in each level. All input points
are sorted in the order described above, with no points repeated.
The output of this filter are sorted points and associated point
attributes represented by a vtkPolyData. In addition, an offset
integral array is associated with the field data of the output,
providing offsets into the points list via a breadth-first traversal
order. Metadata describing the output is provided in the field data.
Convenience functions are also provided here to access the data in a
particular bin or across a level. (Using the offset array directly
may result in higher performance.)
While any vtkPointSet type can be provided as input, the output is
represented by an explicit representation of points via a
vtkPolyData. This output polydata will populate its instance of
vtkPoints, but no cells will be defined (i.e., no vtkVertex or
vtkPolyVertex are contained in the output).
@warning
This class has been threaded with vtkSMPTools. Using TBB or other
non-sequential type (set in the CMake variable
VTK_SMP_IMPLEMENTATION_TYPE) may improve performance significantly.
@sa
vtkPointCloudFilter vtkQuadricClustering vtkStaticPointLocator
"""
def AutomaticOff(self):
"""
V.AutomaticOff()
C++: virtual void AutomaticOff()
Specify whether to determine the determine the level divisions,
and the bounding box automatically (by default this is on). If
off, then the user must specify both the bounding box and bin
divisions. (Computing the bounding box can be slow for large
point clouds, manual specification can save time.)
"""
...
def AutomaticOn(self):
"""
V.AutomaticOn()
C++: virtual void AutomaticOn()
Specify whether to determine the determine the level divisions,
and the bounding box automatically (by default this is on). If
off, then the user must specify both the bounding box and bin
divisions. (Computing the bounding box can be slow for large
point clouds, manual specification can save time.)
"""
...
def GetAutomatic(self):
"""
V.GetAutomatic() -> bool
C++: virtual bool GetAutomatic()
Specify whether to determine the determine the level divisions,
and the bounding box automatically (by default this is on). If
off, then the user must specify both the bounding box and bin
divisions. (Computing the bounding box can be slow for large
point clouds, manual specification can save time.)
"""
...
def GetBinBounds(self, p_int, p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):
"""
V.GetBinBounds(int, [float, float, float, float, float, float])
C++: void GetBinBounds(int globalBin, double bounds[6])
Convenience methods for extracting useful information about a bin
tree. Given a global bin number, return the bounds
(xmin,xmax,ymin,ymax,zmin,zmax) for that bin. Invoke this method
after the bin tree has been built.
"""
...
def GetBinOffset(self, p_int, p_int_1):
"""
V.GetBinOffset(int, int) -> int
C++: vtkIdType GetBinOffset(int globalBin, vtkIdType &npts)
Convenience methods for extracting useful information about this
bin tree. Given a global bin number, return the point id and
number of points for that bin. Invoke this method after the bin
tree has been built.
"""
...
def GetBounds(self):
"""
V.GetBounds() -> (float, float, float, float, float, float)
C++: virtual double *GetBounds()
Set the bounding box of the point cloud. If Automatic is enabled,
then this is computed during filter execution. If manually
specified (Automatic is off) then make sure the bounds is
represented as (xmin,xmax, ymin,ymax, zmin,zmax). If the bounds
specified is does not enclose the points, then points are clamped
to lie in the bounding box.
"""
...
def GetDivisions(self):
"""
V.GetDivisions() -> (int, int, int)
C++: virtual int *GetDivisions()
Set the number of branching divisions in each binning direction.
Each level of the tree is subdivided by this factor. The
Divisions[i] must be >= 1. Note: if Automatic subdivision is
specified, the Divisions are set by the filter.
"""
...
def GetLevelOffset(self, p_int, p_int_1):
"""
V.GetLevelOffset(int, int) -> int
C++: vtkIdType GetLevelOffset(int level, vtkIdType &npts)
Convenience methods for extracting useful information about this
bin tree. Given a level, return the beginning point id and
number of points that level. Invoke this method after the bin
tree has been built.
"""
...
def GetLocalBinBounds(self, p_int, p_int_1, p_float=..., p_float=..., p_float=..., p_float=..., p_float=..., p_float=...):
"""
V.GetLocalBinBounds(int, int, [float, float, float, float, float,
float])
C++: void GetLocalBinBounds(int level, int localBin,
double bounds[6])
Convenience methods for extracting useful information about a bin
tree. Given a level, and a local bin number, return the bounds
(xmin,xmax,ymin,ymax,zmin,zmax) for that bin. Invoke this method
after the bin tree has been built.
"""
...
def GetLocalBinOffset(self, p_int, p_int_1, p_int_2):
"""
V.GetLocalBinOffset(int, int, int) -> int
C++: vtkIdType GetLocalBinOffset(int level, int localBin,
vtkIdType &npts)
Convenience methods for extracting useful information about this
bin tree. Given a level, and the bin number in that level,
return the offset point id and number of points for that bin.
Invoke this method after the bin tree has been built.
"""
...
def GetNumberOfBins(self, p_int):
"""
V.GetNumberOfBins(int) -> int
C++: int GetNumberOfBins(int level)
Convenience methods for extracting useful information about this
bin tree. Return the number of bins in a particular level of the
tree. Invoke this method after the bin tree has been built.
"""
...
def GetNumberOfGenerationsFromBase(self, string):
"""
V.GetNumberOfGenerationsFromBase(string) -> int
C++: vtkIdType GetNumberOfGenerationsFromBase(const char *type)
override;
Standard methods for instantiating, obtaining type information,
and printing information.
"""
...
def GetNumberOfGenerationsFromBaseType(self, string):
"""
V.GetNumberOfGenerationsFromBaseType(string) -> int
C++: static vtkIdType GetNumberOfGenerationsFromBaseType(
const char *type)
Standard methods for instantiating, obtaining type information,
and printing information.
"""
...
def GetNumberOfGlobalBins(self):
"""
V.GetNumberOfGlobalBins() -> int
C++: int GetNumberOfGlobalBins()
Convenience methods for extracting useful information about this
bin tree. Return the number of total bins across all levels
(i.e., the total global bins). Invoke this method after the bin
tree has been built.
"""
...
def GetNumberOfLevels(self):
"""
V.GetNumberOfLevels() -> int
C++: virtual int GetNumberOfLevels()
Specify the number of levels in the spatial hierarchy. By
default, the number of levels is three.
"""
...
def GetNumberOfLevelsMaxValue(self):
"""
V.GetNumberOfLevelsMaxValue() -> int
C++: virtual int GetNumberOfLevelsMaxValue()
Specify the number of levels in the spatial hierarchy. By
default, the number of levels is three.
"""
...
def GetNumberOfLevelsMinValue(self):
"""
V.GetNumberOfLevelsMinValue() -> int
C++: virtual int GetNumberOfLevelsMinValue()
Specify the number of levels in the spatial hierarchy. By
default, the number of levels is three.
"""
...
def IsA(self, string):
"""
V.IsA(string) -> int
C++: vtkTypeBool IsA(const char *type) override;
Standard methods for instantiating, obtaining type information,
and printing information.
"""
...
def IsTypeOf(self, string):
"""
V.IsTypeOf(string) -> int
C++: static vtkTypeBool IsTypeOf(const char *type)
Standard methods for instantiating, obtaining type information,
and printing information.
"""
...
def NewInstance(self):
"""
V.NewInstance() -> vtkHierarchicalBinningFilter
C++: vtkHierarchicalBinningFilter *NewInstance()
Standard methods for instantiating, obtaining type information,
and printing information.
"""
...
def SafeDownCast(self, vtkObjectBase):
"""
V.SafeDownCast(vtkObjectBase) -> vtkHierarchicalBinningFilter
C++: static vtkHierarchicalBinningFilter *SafeDownCast(
vtkObjectBase *o)
Standard methods for instantiating, obtaining type information,
and printing information.
"""
...
def SetAutomatic(self, bool):
"""
V.SetAutomatic(bool)
C++: virtual void SetAutomatic(bool _arg)
Specify whether to determine the determine the level divisions,
and the bounding box automatically (by default this is on). If
off, then the user must specify both the bounding box and bin
divisions. (Computing the bounding box can be slow for large
point clouds, manual specification can save time.)
"""
...
def SetBounds(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5):
"""
V.SetBounds(float, float, float, float, float, float)
C++: virtual void SetBounds(double _arg1, double _arg2,
double _arg3, double _arg4, double _arg5, double _arg6)
V.SetBounds((float, float, float, float, float, float))
C++: virtual void SetBounds(const double _arg[6])
Set the bounding box of the point cloud. If Automatic is enabled,
then this is computed during filter execution. If manually
specified (Automatic is off) then make sure the bounds is
represented as (xmin,xmax, ymin,ymax, zmin,zmax). If the bounds
specified is does not enclose the points, then points are clamped
to lie in the bounding box.
"""
...
def SetDivisions(self, p_int, p_int_1, p_int_2):
"""
V.SetDivisions(int, int, int)
C++: virtual void SetDivisions(int _arg1, int _arg2, int _arg3)
V.SetDivisions((int, int, int))
C++: virtual void SetDivisions(const int _arg[3])
Set the number of branching divisions in each binning direction.
Each level of the tree is subdivided by this factor. The
Divisions[i] must be >= 1. Note: if Automatic subdivision is
specified, the Divisions are set by the filter.
"""
...
def SetNumberOfLevels(self, p_int):
"""
V.SetNumberOfLevels(int)
C++: virtual void SetNumberOfLevels(int _arg)
Specify the number of levels in the spatial hierarchy. By
default, the number of levels is three.
"""
...
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
...
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
...
def __init__(self, *args, **kwargs) -> None:
...
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
...
def __repr__(self, *args, **kwargs):
""" Return repr(self). """
...
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
...
def __str__(self, *args, **kwargs) -> str:
""" Return str(self). """
...
__this__ = ...
__dict__ = ...
__vtkname__ = ...
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""fichier contenant la configuration de base des stats des personnages."""
cfg_stats = r"""
# Ce fichier permet de configurer les stats des personnages.
# Dans ce fichier, vous pourrez par exemple définir qu'un personnage a
# une force, une agilité, une constitution, positionner certaines valeurs
# à défaut, définir des marges et des maximums.
# A noter que ce fichier n'est pas fait pour être modifié trop souvent.
# Une fois que des joueurs se sont créés dans votre univers, il n'est pas
# très bon de retirer ou modifier des stats (vous pouvez en revanche en
# ajouter sans problème).
# Si vous souhaitez supprimer des stats, il vous faudra vérifier
# soigneusement qu'elles ne sont pas impliquées dans des calculs dans le
# reste du MUD. Si par exemple vous supprimez la stat 'agilite', il vous
# faudra sûrement modifier le calcul de réussite pour le talent 'pêche'.
# Les calculs impliquant des stats doivent se trouver dans le fichier de
# configuration 'calcul.cfg' du module les utilisant. Vous n'avez donc pas
# besoin de toucher au code pour modifier ces calculs. En outre, cela vous
# permet, même en diffusant le code de votre MUD, de laisser le joueur curieux
# dans le doute quant aux calculs qui sont fait pour réussir un talent
# particulier.
## Tableaux des stats
# Les stats sont donnés sous la forme d'un tuple de lignes. Chaque ligne
# est un tuple contenant plusieurs informations, décrites ici.
# Inspirez-vous du modèle fourni.
# Pour supprimer une stat, supprimez sa ligne.
# Les informations à entrer, dans l'ordre, sont :
# * nom : le nom de la stat, sans espaces ni caractères spéciaux
# C'est ce nom que vous allez utiliser dans le personnage. Vous pourrez
# appeler sa valeur courante par 'personnage.nom_stat' (par exemple
# 'personnage.force')
# * symbole : le symbole donné à la stat. C'est ce symbole, précédé du signe
# '%', que le joueur utilisera dans sa configuration de prompt
# * défaut : la valeur par défaut de la stat pour n'importe quel personnage
# lambda
# * marge : la marge maximum. Cette marge ne peut pas être dépassée par la
# stat (on peut par exemple souhaiter que la force ne puisse jamais
# dépasser 100). Vous pouvez préciser -1 ici pour définir une marge
# infinie.
# Notez que la marge minimum est 0.
# * max : une chaîne représentant la stat à prendre comme stat maximum.
# Ceci est indépendant de la marge. La vitalité (ou vie) est par
# exemple limitée par la vitalité max (ou vie max). Dans le tableau
# représentant cette stat, le nom de la stat maximum doit être donnée
# * flags : la liste des flags de la stat, séparés par le pipe ('|')
# Quand une stat atteint un certain nombre, une exception peut être levée.
# Cette exception peut être interceptée au niveau de la modification pour
# permettre des messages plus personnalisés, en cas de la mort du
# personnage par exemple.
# Chaque stat peut lever une exception quand elle dépasse un certain seuil.
# Si vous laissez la colonne stat vide, l'exception I0 sera appliquée
# par défaut.
# Les flags existants sont :
# * NX : la stat ne lève aucune exception lors de sa modification
# * I0 : la stat lève une exception si elle est strinctement inférieure à 0
# * IE0 : la stat lève une exception si elle est inférieure ou égale à 0
# * SM : la stat lève une exception si elle est supérieure au MAX
# * SEM : la stat lève une exception si elle est supérieure ou égale au MAX
stats = (
# Nom # Symbole | # Défaut # Marge # Max # Flags
( "vitalite_max" , 'vx' , 50 , 10000 , "" , ),
( "mana_max" , 'mx' , 50 , 10000 , "" , ),
( "endurance_max" , 'ex' , 50 , 10000 , "" , ),
( "vitalite" , 'v' , 50 , 10000 , "vitalite_max" , IE0),
( "mana" , 'm' , 50 , 10000 , "mana_max" , ),
( "endurance" , 'e' , 50 , 10000 , "endurance_max" , ),
( "force" , 'f' , 5 , 100 , "" , ),
( "agilite" , 'a' , 5 , 100 , "" , ),
( "robustesse" , 'r' , 5 , 100 , "" , ),
( "intelligence" , 'i' , 5 , 100 , "" , ),
( "charisme" , 'c' , 5 , 100 , "" , ),
( "sensibilite" , 's' , 5 , 100 , "" , ),
)
## Stats pouvant être entraînées
# Ce dictionnaire définit, en clé, le nom des stats pouvant être entraînées
# et en valeur le message de progression.
entrainables = {
"force": "Votre force musculaire est maintenant plus importante.",
"agilite": "Votre agilité est plus importante.",
"robustesse": "Votre robustesse est plus importante.",
"intelligence": "Vous vous sentez plus intelligent",
"charisme": "Vous sentez votre charisme augmenter.",
"sensibilite": "Vous sentez votre sensibilité augmenter",
}
## Stats liées
# Ce dictionnaire définit les stats dont la progression influence une
# autre stat. Indiquez en clé la stat à entraîner pour que la seconde
# progresse.
entrainement_liees = {
"agilite": "endurance_max",
"robustesse": "vitalite_max",
"intelligence": "mana_max",
}
"""
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
from System.Collections.Generic import *
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
def TempIsolateElements(view, items):
if not items: return False
elif not isinstance(items, list): items = [items]
ielements = List[ElementId]([x.Id for x in UnwrapElement(items)])
try:
UnwrapElement(view).IsolateElementsTemporary(ielements)
return True
except: return False
doc = DocumentManager.Instance.CurrentDBDocument
TransactionManager.Instance.EnsureInTransaction(doc)
if isinstance(IN[1], list):
if isinstance(IN[0], list): OUT = [TempIsolateElements(x, y) for x, y in zip(IN[1], IN[0])]
else: OUT = [TempIsolateElements(x, IN[0]) for x in IN[1]]
else: OUT = TempIsolateElements(IN[1], IN[0])
TransactionManager.Instance.TransactionTaskDone() |
import datetime
import csv
import os
import logging
import pathlib
from sql_crawler import cloud_integration
class CrawlerLog(object):
""" Logs the status of the SQL crawler, including websites and queries.
The CrawlerLog keeps track of which websites were explored, how many
queries were found, and creates a CSV with all the queries. It also
logs any errors encountered. The log is saved into Logs subdirectory
with name based on start time. Queries are saved into Queries
subdirectory.
"""
def __init__(self, stream):
""" Initializes crawler log to keep track of crawler progress and
instantiates instance variables.
"""
self.start_time = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M")
folder_path = str(pathlib.Path(__file__).parent)
log_folder_path = folder_path + "/logs"
query_folder_path = folder_path + "/queries"
# Create directory for logs if it does not already exists
if not os.path.exists(log_folder_path):
os.mkdir(log_folder_path)
logName = "{0}/log-{1}.log".format(log_folder_path, self.start_time)
logging.basicConfig(filename=logName, filemode="a", level=logging.INFO)
logging.info("Beginning crawl at time %s.", self.start_time)
if not os.path.exists(query_folder_path):
os.mkdir(query_folder_path)
self.stream = stream
self.query_name = "{0}/queries_{1}.csv".format(query_folder_path, self.start_time)
if not self.stream:
self.csv_file = open(self.query_name, "a")
self.queries = csv.writer(self.csv_file)
self.queries.writerow(["Query", "URL"])
self.save_to_gcs = False
self.save_to_bq = False
self.batch_data = []
self.error_log_count = 0
def log_queries(self, queries, url):
""" Caches queries to be logged into CSV file or BigQuery. Periodically
flushes cache and writes queries once reaching maximum size.
Args:
queries: Queries to be logged
url: URL for page containing queries
"""
self.batch_data += [[query, url] for query in queries]
while (len(self.batch_data) > 1000):
self.flush_data(self.batch_data[:1000])
self.batch_data = self.batch_data[1000:]
def flush_data(self, data):
""" Flushes data directly to CSV file or BigQuery.
Args:
data: Rows to be flushed to CSV file or BigQuery table
"""
if self.save_to_bq:
err = cloud_integration.insert_rows(self.bq_project, self.bq_dataset, self.bq_table, data)
if err:
self.log_error(err)
if not self.stream:
self.queries.writerows(data)
def log_page(self, url, count):
""" Logs results of crawling one page using provided arguments.
Args:
url: URL of page being crawled
count: Number of queries found on the page
"""
logging.info("Crawled %s. Found %s queries.", url, str(count))
def log_error(self, errorMessage):
""" Logs crawler error to logfile.
Args:
str: Error message to be logged.
"""
self.error_log_count += 1
logging.error("ERROR: %s", errorMessage)
def parse_location_arg(self, location):
""" Validates and splits location argument for cloud upload
into two parts. Should be formatted as project_id.dataset.
Args:
location: String with name of project ID and dataset.
Returns
List of separate strings after splitting location.
"""
if location.count(".") != 1:
self.log_error("Argument not formatted correctly: {0}".format(location))
return None, None
return location.split(".")
def set_gcs(self, location):
""" Sets variables for uploading data to Google Cloud Storage.
Args:
location: String with name of project ID and bucket name,
separated by a period.
"""
self.gcs_project, self.gcs_bucket = self.parse_location_arg(location)
if self.gcs_project and self.gcs_bucket:
self.save_to_gcs = True
def set_bq(self, location):
""" Sets variables for uploading data to Google BigQuery.
Args:
location: String with name of project ID and dataset name,
separated by a period.
"""
self.bq_project, self.bq_dataset = self.parse_location_arg(location)
self.bq_table = "queries_{0}".format(self.start_time)
if self.bq_project and self.bq_dataset:
self.save_to_bq = cloud_integration.create_bigquery_table(self.bq_project, self.bq_dataset, self.bq_table)
if not self.save_to_bq:
self.log_error("Unable to create bigquery table.")
def close(self):
""" Flushes remaining querise and closes the crawler log. Uploads file
to Google Cloud. Prints message if there are handled errors logged
during crawling process.
"""
logging.info("Finished crawling.")
# Flush remaining queries and close file
self.flush_data(self.batch_data)
if not self.stream:
self.csv_file.close()
# Save file to GCS, if applicable
file_name = "queries_{0}".format(self.start_time)
if self.save_to_gcs:
status, message = cloud_integration.upload_gcs_file(self.gcs_project,
self.gcs_bucket, file_name, self.query_name)
if status:
logging.info(message)
else:
self.log_error(message)
if self.error_log_count > 0:
print("Logged {0} errors. See log for details.".format(self.error_log_count))
|
#%%
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision as tv
import matplotlib.pyplot as plt
import os
get_ipython().run_line_magic('matplotlib', 'inline')
#%% [markdown]
# 参考:https://pytorch.org/tutorials/intermediate/spatial_transformer_tutorial.html
#
# # 0.开始
#
#
# 
#
# 在本篇中,通过使用基于视觉注意机制(visual attentin mechanism)的空间变换网络(Spactial Transformer Network),来学习如何增强网络.
#
# STN是对任何空间变换可区别注意的一种泛化. 为增强网络的几何不变性,STN允许神经网络学习对输入图像进行空间变换,来达到几何不变性的目的.
#
# 例如,可以裁剪感兴趣的区域,缩放和纠正图像的方向. 由于CNN对于旋转、缩放以及其他更一般的仿射变换是可变的,因此这就是一种有用的机制.
#
# 关于STN最棒的其中一点就是,它可以简单插入现有的神经网络,而不用多做修改.
#%% [markdown]
# # 1.数据加载
#
# 在本篇中,我们使用经典的 MNIST 数据集. 使用具备STN增强的标准神经网络
#%%
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
data_dir='../data/mnist/'
process=['train','test']
mnist_transforms=tv.transforms.Compose([tv.transforms.ToTensor(),tv.transforms.Normalize([0.1307],[0.3081])])
mnist_datasets={x:tv.datasets.MNIST(data_dir,train=(x=='train'),download=False,transform=mnist_transforms) for x in process}
mnist_dataloader={x:torch.utils.data.DataLoader(mnist_datasets[x],shuffle=True,batch_size=64) for x in process}
#%%
len(mnist_datasets['train'])
#%%
len(mnist_datasets['test'])
#%%
item=mnist_datasets['train'][0]
#%%
len(item)
#%%
type(item[0])
#%%
img=item[0]
img.size
#%%
##由于在dataset中进行了正则化,这里需要再逆操作
mean=torch.tensor([0.1307])
std=torch.tensor([0.3081])
for i in range(4):
ax=plt.subplot(1,4,i+1)
item=mnist_datasets['train'][i]
img=item[0]*std+mean
img=tv.transforms.functional.to_pil_image(img)
img=tv.transforms.functional.resize(img,size=(224,224))
ax.set_title(item[1].item())
plt.imshow(img,cmap='gray');
#%%
##无正则化
for i in range(4):
ax=plt.subplot(1,4,i+1)
item=mnist_datasets['train'][i]
img=tv.transforms.functional.to_pil_image(item[0])
ax.set_title(item[1].item())
plt.imshow(img,cmap='gray');
#%%
#批图片
batch,_=next(iter(mnist_dataloader['train']))
batch.shape
#%%
##通过make_grid,通道变成了3
grid=tv.utils.make_grid(batch)
grid=grid.permute(1,2,0)
grid.shape
#%%
##注意,如果是3通道数据,反正则化的时候,通道要放到后面,这样才能广播.
mean=torch.tensor([0.1307,0.1307,0.1307])
std=torch.tensor([0.3081,0.3081,0.3081])
grid=grid*std+mean
#%%
grid_img=tv.transforms.functional.to_pil_image(grid.permute(2,0,1))
plt.imshow(grid_img);
#%%
grid=tv.utils.make_grid(batch)
grid=grid.permute(1,2,0)
mean=torch.tensor([0.485, 0.456, 0.406])
std=torch.tensor([0.229, 0.224, 0.225])
grid=grid*std+mean
grid_img=tv.transforms.functional.to_pil_image(grid.permute(2,0,1))
plt.imshow(grid_img);
#%% [markdown]
# # 2. 对STN的描述
#
# STN网络主要归结为3部分:
#
# 
#
# - 局部化网络. 该网络是个常规的CNN网络,用于对变换的参数进行回归. 网络自动的从空间变换中学习能增强整体准确率的空间变换,而不是显式的从数据集中学习.
#
#
# - 网格产生器. 从输入图像中生成与输出图像每个像素都对应的坐标网格.
#
#
# - 采样器. 采样器使用变换的参数,并作用于输入图像.
#%%
class NetWithSTN(nn.Module):
def __init__(self,withSTN=True):
super(NetWithSTN,self).__init__()
self.withSTN=withSTN
self.features=nn.Sequential(
nn.Conv2d(1,10,kernel_size=3,padding=1),
nn.ReLU(True),
nn.MaxPool2d(2,stride=2),
nn.Conv2d(10,20,kernel_size=3,padding=1),
nn.ReLU(True),
nn.MaxPool2d(2,stride=2)
)
self.classifier=nn.Sequential(
nn.Linear(20*7*7,100),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(100,10)
)
self.localization=nn.Sequential(
nn.Conv2d(1,10,3,padding=1),
nn.ReLU(True),
nn.MaxPool2d(2,2),
nn.Conv2d(10,20,3,padding=1),
nn.ReLU(True),
nn.MaxPool2d(2,2),
)
self.localization_fc=nn.Sequential(
nn.Linear(20*7*7,100),
nn.ReLU(True),
nn.Linear(100,6)
)
self.localization_fc[-1].weight.data.zero_()
self.localization_fc[-1].bias.data.copy_(torch.tensor([1,0,0,0,1,0],dtype=torch.float))
def STN(self,x):
local_out=self.localization(x)
local_out=local_out.reshape(-1,20*7*7)
theta=self.localization_fc(local_out)
theta=theta.reshape(-1,2,3)
grid=F.affine_grid(theta,x.size())
x=F.grid_sample(x,grid)
return x
def forward(self,x):
if self.withSTN:
x=self.STN(x)
x=self.features(x)
x=x.reshape(-1,20*7*7)
x=self.classifier(x)
return x
#%% [markdown]
# # 3.训练模型
#
# 现在使用SGD对模型进行训练. 网络以监督学习的方式学习分类任务.同时模型也以端到端的方式自动学习STN.
#%%
len(mnist_dataloader['train'])*mnist_dataloader['train'].batch_size
#%%
len(mnist_dataloader['train'])
#%%
len(mnist_datasets['train'])
#%%
E=nn.CrossEntropyLoss()
def train(mod,op,epoch,dataloader=None):
model=mod
opt=op
model.train()
if dataloader is None:
dataloader=mnist_dataloader
for i,(input,target) in enumerate(dataloader['train']):
input,target=input.to(device),target.to(device)
opt.zero_grad()
out=model(input)
loss=E(out,target)
loss.backward()
opt.step()
if i% 500 == 0:
progress=(i+1)*dataloader['train'].batch_size
progress=progress/len(dataloader['train'].dataset)
print('Train Epoch:{},Progress:{:.1f},Loss:{:.4f}'.format(epoch,progress,loss.item()))
#%%
def test(mod,op,epoch,dataloader=None):
model=mod
opt=op
with torch.no_grad():
model.eval()
correct=0
loss_all=0.
if dataloader is None:
dataloader=mnist_dataloader
for input,target in dataloader['test']:
input,target=input.to(device),target.to(device)
out=model(input)
loss=E(out,target)
loss_all+=loss.item()*input.shape[0]
_,pred=torch.max(out,dim=1)
correct+=torch.sum(pred==target).item()
data_size=len(dataloader['test'].dataset)
print('Test Loss:{:.4f},Acc:{:.4f}'.format(loss_all/data_size,correct/data_size))
#%% [markdown]
# # 可视化STN结果
#
# 现在可以检测一下所学的视觉注意机制.
#
# 定义一个帮助函数,来可视化训练中的变换:
#%%
def convert_img_np(img):
img_np=img.numpy().transpose((1,2,0))
##反正则化
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img_np=img_np*std+mean
img_np=np.clip(img_np,0,1)
return img_np
#%% [markdown]
# 经过对模型的训练之后,我们打算可视化STN的输出,即:可视化一批输入的图片,以及对应的STN的输出.
#%%
def visualize_stn(mod):
model=mod
with torch.no_grad():
input_img=next(iter(mnist_dataloader['test']))[0].to(device)
##通过plt展示,需要转回到cpu上
input_tensor=input_img.cpu()
tf_input_tensor=model.STN(input_img).cpu()
in_grid=tv.utils.make_grid(input_tensor)
out_grid=tv.utils.make_grid(tf_input_tensor)
f,axes=plt.subplots(1,2)
axes[0].imshow(convert_img_np(in_grid))
axes[0].set_title('Dataset Image')
axes[1].imshow(convert_img_np(out_grid))
axes[1].set_title('Transformed Image')
#%%
#使用STN
model=NetWithSTN().to(device)
opt=optim.SGD(model.parameters(),lr=0.01)
for e in range(1,4):
train(model,opt,e)
test(model,opt,e)
#%% [markdown]
# __使用STN,准确率最高达到98%__
#%%
visualize_stn(model);
#%%
##未使用STN
model2=NetWithSTN(withSTN=False).to(device)
opt2=optim.SGD(model2.parameters(),lr=0.01)
for e in range(1,4):
train(model2,opt2,e)
test(model2,opt2,e)
#%% [markdown]
# __未使用STN,最好的结果是96%__
|
f1 = open("leet_message.txt", "r")
msg = f1.read()
f2 = open("translation.txt", "w")
leetMap = {
"4": "A",
"8": "B",
"C": "C",
"D": "D",
"3": "3",
"F": "F",
"G": "G",
"H": "H",
"I": "I",
"J": "J",
"K": "K",
"1": "L",
"M": "M",
"N": "N",
"0": "O",
"P": "P",
"Q": "Q",
"R": "R",
"5": "S",
"7": "T",
"U": "U",
"V": "V",
"W": "W",
"X": "X",
"Y": "Y",
"Z": "Z",
"@": "a",
"b": "b",
"c": "c",
"d": "d",
"3": "e",
"f": "f",
"g": "g",
"h": "h",
"i": "i",
"j": "j",
"k": "k",
"1": "l",
"m": "m",
"n": "n",
"0": "o",
"p": "p",
"q": "q",
"r": "r",
"5": "s",
"7": "t",
"u": "u",
"v": "v",
"w": "w",
"x": "x",
"y": "y",
"z": "z",
"!": "!",
",": ",",
":": ":",
"-": "-",
"_": "_",
"'": "'",
".": ".",
}
def main():
for line in msg:
for char in line:
if char in leetMap:
f2.write(leetMap.get(char))
elif char == " ":
f2.write(" ")
elif char == "\n":
f2.write("\n")
main() |
import os, logging, time
import argparse
from fingerprint import FingerprintDB, AudioFingerprint
logging.basicConfig(level=logging.WARNING)
# paths
# database_path = "../fp_data_dummy/db/"
# query_path = "../fp_data_dummy/query/"
# fingerprint_path = "./fingerprints_dummy/"
# output_file = "./output/output_dummy.txt"
database_path = "../fp_data/database_recordings/"
query_path = "../fp_data/query_recordings/"
fingerprint_path = "./fingerprints/"
output_file = "./output/output.txt"
# params
sr = 8000
n_fft = 2048
n_hop = 512
n_freq = 1025
# constellation map
tau = 3
kappa = 20
# target zone
n_target_dist = 50
f_target_dist = 100
params = {
"sr": sr,
"n_fft": n_fft,
"n_hop": n_hop,
"n_freq": n_freq,
"tau": tau,
"kappa": kappa,
"n_target_dist": n_target_dist,
"f_target_dist": f_target_dist
}
def fingerprintBuilder(database_path, fingerprint_path, params=params):
t = time.time()
FDB = FingerprintDB(database_path, fingerprint_path, params)
t = time.time() - t
print("Building time: {} secs".format(t))
def audioIdentification(query_path, fingerprint_path, output_file, params=params):
t = time.time()
FDB = FingerprintDB(None, fingerprint_path, params)
t = time.time() - t
print("Re-Building time: {} secs".format(t))
query_list = os.listdir(query_path)
# query_list = query_list[:10]
t_cum = 0
top1 = 0
top3 = 0
prec = 0
with open(output_file, "w") as f:
for query_file in query_list:
t = time.time()
FP_q = AudioFingerprint(os.path.join(query_path, query_file), params)
best_ref, hit, ranked = FDB.search(FP_q, report=True)
t_cum += time.time() - t
prec += 1/(hit+1)
if hit == 0:
top1 += 1
if hit <= 2:
top3 += 1
f.write("{}\t{}\t{}\t{}\n".format(query_file, ranked[0], ranked[1], ranked[2]))
acc_top1 = top1 / len(query_list)
acc_top3 = top3 / len(query_list)
avg_prec = prec / len(query_list)
avg_t = t_cum / len(query_list)
print("Top1 acc: {} Top3 acc: {} MAP: {} Time: {}".format(acc_top1, acc_top3, avg_prec, avg_t))
return acc_top1, acc_top3, avg_prec, avg_t
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--database_path', type=str, default=database_path)
parser.add_argument('--fingerprint_path', type=str, default=fingerprint_path)
parser.add_argument('--query_path', type=str, default=query_path)
parser.add_argument('--output_file', type=str, default=output_file)
args = parser.parse_args()
print(args)
database_path = args.database_path
fingerprint_path = args.fingerprint_path
query_path = args.query_path
output_file = args.output_file
fingerprintBuilder(database_path, fingerprint_path, params)
audioIdentification(query_path, fingerprint_path, output_file, params)
|
from distutils.core import setup
setup(
name = 'mr-streams',
packages = ['mr_streams'],
version = '0.03',
description= "A wrapper that makes chaining list-comprehensions simpler",
author = "u/caffeine_potent",
author_email= "caffeine-potent@protonmail.com",
url = 'https://github.com/caffeine-potent/Mr-Streams',
download_url= 'https://github.com/caffeine-potent/Mr-Streams/archive/0.03.tar.gz',
keywords = ['map-reduce', 'list-comprehension', 'map', 'reduce', 'stream'], # arbitrary keywords
classifiers= []
) |
import Nio
import numpy as np
#CONSTANTS
g = 9.81 #m/s**2
EARTH_RADIUS = 6371.0 #km
DEGREES_TO_RADIANS = np.pi/180.0
RADIANS_TO_DEGREES = 180.0/np.pi
#WRF time index
time = 0
#NOTE: in the WRF Users Guide pages 212 and 213 should have all
# WRF perturbation correction equations. (section 5 page 112/113)
def openWRF(main_directory, file_name):
try:
wrf = Nio.open_file(main_directory + file_name, format='nc4')
return wrf
except:
print 'Failure when opening WRF file'
def findNetCDFLatLonIndex(wrf, lat, lon):
lats = wrf.variables['XLAT'][time,:,:]
lons = wrf.variables['XLONG'][time,:,:]
error_lat = 0
error_lon = 0
previous_error_lat = 9999
previous_error_lon = 9999
index_i=0
index_j=0
for j in range(len(lats)):
for i in range(len(lats[j])):
error_lat = abs(lat - lats[j][i])
error_lon = abs(lon - lons[j][i])
if ((error_lat + error_lon) < (previous_error_lat + previous_error_lon)):
index_i = i
index_j = j
previous_error_lat = error_lat
previous_error_lon = error_lon
return index_i, index_j
def findNetCDFAltIndex(wrf, index_i, index_j, alt):
PH = wrf.variables["PH"][time,:,index_j,index_i]
PHB = wrf.variables["PHB"][time,:,index_j,index_i]
ALT = [(0.5*(PHB[i] + PH[i] + PH[i+1] + PHB[i+1])/g) for i in range(len(PH)-1)]
error = 0
previous_error = 9999
index_k = 0
for k in range(len(ALT)):
error = abs(alt - ALT[k])
if error < previous_error:
index_k = k
previous_error = error
return index_k
def getWindSpeedAndDirection(wrf, index_i, index_j, index_k):
#Website for unstaggering
#http://www.openwfm.org/wiki/How_to_interpret_WRF_variables
U = (wrf.variables["U"][time, index_k, index_j, index_i] + wrf.variables["U"][time, index_k, index_j, index_i + 1]) * 0.5
V = (wrf.variables["V"][time, index_k, index_j, index_i] + wrf.variables["V"][time, index_k, index_j + 1, index_i]) * 0.5
W = (wrf.variables["W"][time, index_k, index_j, index_i] + wrf.variables["W"][time, index_k + 1, index_j, index_i]) * 0.5
COSALPHA = wrf.variables["COSALPHA"][time, index_j, index_i]
SINALPHA = wrf.variables["SINALPHA"][time, index_j, index_i]
U = U*COSALPHA - V*SINALPHA
V = V*COSALPHA + U*SINALPHA
windDir = RADIANS_TO_DEGREES * np.arctan2(U, V)
windSpd = np.sqrt(U**2 + V**2)
windVertical = W
return windSpd, windDir, windVertical
def getTerrainHeight(wrf, index_i, index_j):
HGT = wrf.variables["HGT"][time, index_j, index_i]
return HGT
def getHydPressure(wrf, index_i, index_j, index_k):
P_HYD = wrf.variables["P_HYD"][time, index_k, index_j, index_i] #Pa
pressure = P_HYD #Pa
return pressure
def getPressure(wrf, index_i, index_j, index_k):
P = wrf.variables["P"][time,index_k,index_j,index_i] #perturbation pressure
PB = wrf.variables["PB"][time,index_k,index_j,index_i]
pressure = P + PB #Pa
return pressure
def getTemperature(wrf, index_i, index_j, index_k):
#Convert from perturbation potential temperature to temperature
#http://mailman.ucar.edu/pipermail/wrf-users/2010/001896.html
#Step 1: convert to potential temperature by adding 300
#Step 2: convert potential temperatuer to temperature
# https://en.wikipedia.org/wiki/Potential_temperature
# Note: p_0 = 1000. hPa and R/c_p = 0.286 for dry air
T = wrf.variables["T"][time, index_k, index_j, index_i]
potential_temp = T + 300 #K
pressure = getPressure(wrf, index_i, index_j, index_k) #Pa
temperature = potential_temp * (pressure/100000.)**(0.286) #K
return temperature
|
file = open('input.txt')
fileInput = file.readline()
file.close()
namesList = fileInput.replace('\"', '').split(',')
def getScore(name, pos):
score = 0
for letter in name:
score += ord(letter) - 64
return score * (pos + 1)
namesList.sort()
totalScore = 0
for pos, name in enumerate(namesList):
totalScore += getScore(name, pos)
print(totalScore) |
import argparse
import fire
import logging
import sys
from datetime import datetime
from neural_nlp import score as score_function
_logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument('--log_level', type=str, default='INFO')
FLAGS, FIRE_FLAGS = parser.parse_known_args()
logging.basicConfig(stream=sys.stdout, level=logging.getLevelName(FLAGS.log_level))
_logger.info(f"Running with args {FLAGS}, {FIRE_FLAGS}")
for ignore_logger in ['transformers.data.processors', 'botocore', 'boto3', 'urllib3', 's3transfer']:
logging.getLogger(ignore_logger).setLevel(logging.INFO)
def run(benchmark, model, layers=None, subsample=None):
start = datetime.now()
score = score_function(model=model, layers=layers, subsample=subsample, benchmark=benchmark)
end = datetime.now()
print(score)
print(f"Duration: {end - start}")
if __name__ == '__main__':
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
fire.Fire(command=FIRE_FLAGS)
|
import cv2
import numpy as np
def AddText(img, text, x, y):
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (int(x), int(y))
fontScale = 0.5
fontColor = (0,0,255)
lineType = 1
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
def reSize(img, scale_percent):
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
return cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
def reSizeImage(img):
width = 320
height = 320
dim = (width, height)
# resize image
resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
return resized
def AddPadding(im, bordersize):
row, col = im.shape[:2]
bottom = im[row-2:row, 0:col]
mean = cv2.mean(bottom)[0]
white = [255,255,255]
return cv2.copyMakeBorder(
im,
top=bordersize,
bottom=bordersize,
left=bordersize,
right=bordersize,
borderType=cv2.BORDER_CONSTANT,
value=white
)
def Thresholds(imgB):
# cv2.fastNlMeansDenoisingColored(img, None, 2, 2, 7, 21)
imgB = AddPadding(imgB, 10)
gray = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY)
image_enhanced = cv2.equalizeHist(gray)
#cv2.imshow('image_enhanced', image_enhanced)
ret, thresh = cv2.threshold(gray, 160, 255, cv2.THRESH_BINARY)
#ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
thresh = AddPadding(thresh, 10)
#cv2.imshow('thresh', thresh)
kernel = np.ones((1, 1), np.uint8)
erosion = cv2.dilate(thresh, kernel, iterations=20)
#cv2.imshow('erosion', erosion)
kernel = np.ones((2, 2), np.uint8)
opening = cv2.morphologyEx(erosion, cv2.MORPH_CLOSE, kernel, iterations=4)
return opening
#cv2.imshow('opening', opening)
def BlackMouseColorRange(imgBGR):
# (hMin = 24 , sMin = 23, vMin = 0), (hMax = 85 , sMax = 255, vMax = 254)
hsv = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2HSV_FULL)
# Get current positions of all trackbars
hMin = 0
sMin = 0
vMin = 0
hMax = 70
sMax = 70
vMax = 70
# Set minimum and maximum HSV values to display
lower = np.array([hMin, sMin, vMin])
upper = np.array([hMax, sMax, vMax])
masked = cv2.inRange(hsv, lower, upper)
return masked
def BlueMouseColorRange(imgBGR):
## (hMin = 0 , sMin = 40, vMin = 39), (hMax = 179 , sMax = 255, vMax = 255)
hsv = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2HSV_FULL)
# Get current positions of all trackbars
hMin = 120
sMin = 120
vMin = 0
hMax = 179
sMax = 255
vMax = 255
# Set minimum and maximum HSV values to display
lower = np.array([hMin, sMin, vMin])
upper = np.array([hMax, sMax, vMax])
masked = cv2.inRange(hsv, lower, upper)
return masked |
#coding: latin-1
# struct sensortype
# {
# double onYaw; // +4
# double onPitch; // +4 = 8
# double onRoll; // +4 = 12
# float T; // +4 = 16
# float P; // +4 = 20
# double light; // +4 = 24
# int yaw; // +2 = 26
# int pitch; // +2 = 28
# int roll; // +2 = 30
# int geoYaw; // +2 = 32
# int geoPitch; // +2 = 34
# int geoRoll; // +2 = 36
# int sound; // +2 = 38
# int freq; // +2 = 40
# int counter; // +2 = 42
# int distance; // +2 = 44
#
# } sensor;
# struct sensortype {
# int counter; // 2
# int encoder; // 2
# float cx; // 4
# float cy; // 4
# float cz; // 4
# float angle; // 4
# int wrist; // 2
# int elbow; // 2
# int fps; // 2
# } sensor; // 26
import serial
import time
import datetime
from struct import *
import os
import socket
import sys
import Proprioceptive as prop
import Configuration
def readsomething(ser, length):
#data = smnr.read(38)
data = ''
while(len(data)<length):
byte = ser.read(1)
if (len(byte)>0):
data = data + byte
return data
def gimmesomething(ser):
while True:
line = ser.readline()
if (len(line)>0):
break
return line
class Sensorimotor:
def __init__(self, name, length, mapping):
self.name = name
self.keeprunning = True
self.ip = Configuration.controllerip
self.telemetryport = Configuration.telemetryport
self.sensors = None
self.data = None
self.length = length
self.mapping = mapping
def start(self):
# Sensor Recording
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S')
self.f = open('../data/sensor.'+self.name+'.'+st+'.dat', 'w')
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.server_address = (self.ip, self.telemetryport)
self.counter = 0
def cleanbuffer(self, ser):
# Cancel sensor information.
ser.write('X')
time.sleep(6)
# Ser should be configured in non-blocking mode.
buf = ser.readline()
print str(buf)
buf = ser.readline()
print str(buf)
buf = ser.readline()
print str(buf)
# Reactive sensor information
ser.write('S')
def send(self,data):
sent = self.sock.sendto(data, self.server_address)
def picksensorsample(self, ser):
# read Embed this in a loop.
self.counter=self.counter+1
if (self.counter>100):
ser.write('P')
ser.write('S')
self.counter=0
myByte = ser.read(1)
if myByte == 'S':
readcount = 0
#data = readsomething(ser,44)
self.data = readsomething(ser,self.length)
myByte = readsomething(ser,1)
if len(myByte) >= 1 and myByte == 'E':
# is a valid message struct
#new_values = unpack('ffffffhhhhhhhhhh', data)
new_values = unpack(self.mapping, self.data)
#print new_values
self.sensors = new_values
#self.f.write( str(new_values[0]) + ' ' + str(new_values[1]) + ' ' + str(new_values[2]) + ' ' + str(new_values[3]) + ' ' + str(new_values[4]) + ' ' + str(new_values[5]) + ' ' + str(new_values[6]) + ' ' + str(new_values[7]) + ' ' + str(new_values[8]) + ' ' + str(new_values[9]) + ' ' + str(new_values[10]) + ' ' + str(new_values[11]) + ' ' + str(new_values[12]) + ' ' + str(new_values[13]) + ' ' + str(new_values[14]) + '\n')
self.f.write(' '.join(map(str, new_values)) + '\n')
return new_values
def close(self):
self.f.close()
self.sock.close()
def restart(self):
self.close()
self.start()
if __name__ == "__main__":
[smnr, mtrn] = prop.serialcomm()
sensorimotor = Sensorimotor()
sensorimotor.start()
sensorimotor.cleanbuffer(smnr)
while True:
sensorimotor.sendsensorsample(smnr)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from oj_helper import *
class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
if (not secret) or (len(secret) != len(guess)):
return '0A0B'
# bulls count, cows count
bc, cc = 0, 0
secret_miss_count = {}
guess_miss_count = {}
for secret_char, guess_char in zip(secret, guess):
if int(secret_char) == int(guess_char):
bc += 1
continue
secret_miss_count[secret_char] = secret_miss_count.get(secret_char, 0) + 1
guess_miss_count[guess_char] = guess_miss_count.get(guess_char, 0) + 1
for k in guess_miss_count:
cc += min(guess_miss_count[k],
secret_miss_count.get(k, 0))
return '%dA%dB' % (bc, cc)
if __name__ == '__main__':
s = Solution()
print s.getHint('1807', '7810')
print s.getHint('1123', '0111')
|
from django.db import models
from location.models import Location
# Create your models here.
class Character(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
birthplace = models.ForeignKey(Location, related_name='birthplace')
description = models.CharField(max_length=255)
times_killed = models.IntegerField(default=0, )
location_of_death = models.ForeignKey(Location, blank=True, null=True, related_name='location_of_death')
murderer = models.ForeignKey('self', blank=True, null=True)
manner_of_death = models.CharField(max_length=255, blank=True, null=True)
times_won_throne = models.IntegerField(default=0)
times_survived = models.IntegerField(default=0)
image = models.ImageField(upload_to='/images/', null=True, blank=True)
def __str__(self):
return self.first_name + " " + self.last_name
|
from sys import exit
from pygame import quit
from pygame.event import get, post, Event
from pygame.locals import *
from keyboard import Keyboard
from mouse import Mouse
class EventHandler:
@staticmethod
def pollEvents ():
for e in get():
if e.type == QUIT:
print ("quitting...")
quit()
exit()
Keyboard.handleEvents (e)
Mouse.handleEvents(e)
Keyboard.handlePressed()
@staticmethod
def postQuit ():
post (Event(QUIT))
|
# 141, Суптеля Владислав
# 【Дата】:「19.03.20」
# 2. Даний рядок, що містить повне ім'я файлу (наприклад, 'C:\WebServers\home\testsite\www\myfile.txt').
# Виділіть з цього рядка ім'я файлу без розширення.
import os
str = "C:\WebServers\home\\testsite\www\myfile.txt"
print("Метод 1 [OS]: \n", os.path.splitext(os.path.basename(str))[0])
# [0] = 'myfile', [1] = '.txt'
a = str.split("\\")
a = a[5]
print("Метод 2 [.split]: \n", a[:-4])
|
def user_select():
"""Allow user to select their name."""
users = Employee.select().order_by(Employee.name.desc())
def determine_user():
"""Determine whether active user is existing or new."""
c_s()
print(
"""Welcome, wage slave! Keep reaching for that rainbow!\n
This work log has been provided by your benevolent masters.\n
Please confirm your identity or select New User to open your account.
""")
if input("Please enter N for new user and ENTER for existing user:\n> ").lower() == 'n':
c_s()
global active_user
active_user = input("Please enter your name:\n> ")
main_menu()
else:
print("Please enter the NUMBER corresponding with your name and hit ENTER")
employees = Employee.select().order_by(Employee.name.desc())
# The above var contains an iterable of ALL records - they are just SORTED by name. They don't actually contain just the name.
# Below the records are looped through as an enumerate() object, with the start being 1 instead of 0.
# This necessitates subtracting one from whatever choice the user enters to retrieve the proper name by index.
holder = []
for employee in enumerate(employees, start=1):
# The index+1 is printed out, followed by the .name attribute of index 1 of each tuple, which is still the whole record itself.
holder.append(employee)
print("[{}] {}".format(employee[0], employee[1].name))
choice = int(input("\n> "))
global active_user
active_user = holder[choice-1][1]
main_menu() |
import os
import falcon
from falcr.config import getLogger, ROOT
log = getLogger(__name__)
class StaticResource(object):
def __init__(self, filename, content_type):
self.filename = os.path.join(ROOT, filename)
self.content_type = content_type
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.content_type = self.content_type
with open(self.filename, 'rb') as f:
resp.body = f.read()
|
# Generated by Django 3.1.4 on 2020-12-23 09:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users_endpoint', '0004_auto_20201223_1205'),
]
operations = [
migrations.AddField(
model_name='user',
name='uniqueUrlName',
field=models.CharField(blank=True, max_length=255),
),
]
|
import numpy as np
import torch
def PSNR(im,gt,shave_border=0):
"""
im: image with noise,value in [0,255]
gt: GroundTurth image,value in [0,255]
shave_border: the border width need to shave
"""
im_shape = im.shape
gt_shape = gt.shape
if gt_shape != im_shape:
return -1
im = np.array(im,dtype = np.float32)
gt = np.array(gt,dtype = np.float32)
if len(im_shape) == 3:
c,h,w = im_shape
im = im[:,shave_border:h - shave_border,shave_border:w - shave_border]
gt = gt[:,shave_border:h - shave_border,shave_border:w - shave_border]
elif len(im_shape) == 2:
h,w = im_shape
im = im[shave_border:h - shave_border,shave_border:w - shave_border]
gt = gt[shave_border:h - shave_border,shave_border:w - shave_border]
mse = np.mean((gt - im)**2)
if mse == 0:
return 100
psnr = 10*np.log10(255**2/mse)
return psnr
def SSIM(im,gt):
im_shape = im.shape
gt_shape = gt.shape
if gt_shape != im_shape:
return -1
# C1=(K1*L)^2,
# C2=(K2*L)^2
# C3=C2/2, 1=0.01, K2=0.03, L=255
C1 = (0.01*255)**2
C2 = (0.03*255)**2
C3 = C2/2.0
mean_x = im.mean() # mean of im
mean_y = gt.mean() # mean of gt
cov = np.cov([gt.flatten(),im.flatten()])
cov_xx = cov[0,0]
cov_x = np.sqrt(cov_xx)
cov_yy= cov[1,1]
cov_y = np.sqrt(cov_yy)
cov_xy = cov[0,1]
l_xy = (2*mean_x*mean_y + C1) / (mean_x**2 + mean_y**2 + C1)
c_xy = (2*cov_x*cov_y + C2) / (cov_xx + cov_yy + C2)
s_xy = (cov_xy + C3) / (cov_x*cov_y + C3)
ssim = l_xy*c_xy*s_xy
return ssim
class Normalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
Args:
mean (sequence): Sequence of means for R, G, B channels respecitvely.
std (sequence): Sequence of standard deviations for R, G, B channels
respecitvely.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
for t, m, s in zip(tensor, self.mean, self.std):
#t.sub_(m).div_(s)
t = t.mul(s).add(m)
return tensor
class deNormalize(object):
""" de Normalize an tensor image with mean and standard deviation.
Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel*std + mean)
Args:
mean (sequence): Sequence of means for R, G, B channels respecitvely.
std (sequence): Sequence of standard deviations for R, G, B channels
respecitvely.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
for t, m, s in zip(tensor, self.mean, self.std):
#t.sub_(m).div_(s)
t.mul_(s).add_(m)
return tensor |
import tkinter
from .ctk_canvas import CTkCanvas
from ..theme_manager import ThemeManager
from ..draw_engine import DrawEngine
from .widget_base_class import CTkBaseClass
class CTkEntry(CTkBaseClass):
def __init__(self, *args,
bg_color=None,
fg_color="default_theme",
text_color="default_theme",
placeholder_text_color="default_theme",
text_font="default_theme",
placeholder_text=None,
corner_radius="default_theme",
border_width="default_theme",
border_color="default_theme",
width=140,
height=28,
state=tkinter.NORMAL,
textvariable: tkinter.Variable = None,
**kwargs):
# transfer basic functionality (bg_color, size, _appearance_mode, scaling) to CTkBaseClass
if "master" in kwargs:
super().__init__(*args, bg_color=bg_color, width=width, height=height, master=kwargs.pop("master"))
else:
super().__init__(*args, bg_color=bg_color, width=width, height=height)
# configure grid system (1x1)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
# color
self.fg_color = ThemeManager.theme["color"]["entry"] if fg_color == "default_theme" else fg_color
self.text_color = ThemeManager.theme["color"]["text"] if text_color == "default_theme" else text_color
self.placeholder_text_color = ThemeManager.theme["color"]["entry_placeholder_text"] if placeholder_text_color == "default_theme" else placeholder_text_color
self.text_font = (ThemeManager.theme["text"]["font"], ThemeManager.theme["text"]["size"]) if text_font == "default_theme" else text_font
self.border_color = ThemeManager.theme["color"]["entry_border"] if border_color == "default_theme" else border_color
# shape
self.corner_radius = ThemeManager.theme["shape"]["button_corner_radius"] if corner_radius == "default_theme" else corner_radius
self.border_width = ThemeManager.theme["shape"]["entry_border_width"] if border_width == "default_theme" else border_width
# placeholder text
self.placeholder_text = placeholder_text
self.placeholder_text_active = False
self.pre_placeholder_arguments = {} # some set arguments of the entry will be changed for placeholder and then set back
# textvariable
self.textvariable = textvariable
self.state = state
self.canvas = CTkCanvas(master=self,
highlightthickness=0,
width=self.apply_widget_scaling(self._current_width),
height=self.apply_widget_scaling(self._current_height))
self.canvas.grid(column=0, row=0, sticky="nswe")
self.draw_engine = DrawEngine(self.canvas)
self.entry = tkinter.Entry(master=self,
bd=0,
width=1,
highlightthickness=0,
font=self.apply_font_scaling(self.text_font),
state=self.state,
textvariable=self.textvariable,
**kwargs)
self.entry.grid(column=0, row=0, sticky="nswe",
padx=self.apply_widget_scaling(self.corner_radius) if self.corner_radius >= 6 else self.apply_widget_scaling(6),
pady=(self.apply_widget_scaling(self.border_width), self.apply_widget_scaling(self.border_width + 1)))
super().bind('<Configure>', self.update_dimensions_event)
self.entry.bind('<FocusOut>', self.entry_focus_out)
self.entry.bind('<FocusIn>', self.entry_focus_in)
self.activate_placeholder()
self.draw()
def set_scaling(self, *args, **kwargs):
super().set_scaling( *args, **kwargs)
self.entry.configure(font=self.apply_font_scaling(self.text_font))
self.entry.grid(column=0, row=0, sticky="we",
padx=self.apply_widget_scaling(self.corner_radius) if self.corner_radius >= 6 else self.apply_widget_scaling(6))
self.canvas.configure(width=self.apply_widget_scaling(self._desired_width), height=self.apply_widget_scaling(self._desired_height))
self.draw()
def set_dimensions(self, width=None, height=None):
super().set_dimensions(width, height)
self.canvas.configure(width=self.apply_widget_scaling(self._desired_width),
height=self.apply_widget_scaling(self._desired_height))
self.draw()
def draw(self, no_color_updates=False):
self.canvas.configure(bg=ThemeManager.single_color(self.bg_color, self._appearance_mode))
requires_recoloring = self.draw_engine.draw_rounded_rect_with_border(self.apply_widget_scaling(self._current_width),
self.apply_widget_scaling(self._current_height),
self.apply_widget_scaling(self.corner_radius),
self.apply_widget_scaling(self.border_width))
if requires_recoloring or no_color_updates is False:
if ThemeManager.single_color(self.fg_color, self._appearance_mode) is not None:
self.canvas.itemconfig("inner_parts",
fill=ThemeManager.single_color(self.fg_color, self._appearance_mode),
outline=ThemeManager.single_color(self.fg_color, self._appearance_mode))
self.entry.configure(bg=ThemeManager.single_color(self.fg_color, self._appearance_mode),
disabledbackground=ThemeManager.single_color(self.fg_color, self._appearance_mode),
highlightcolor=ThemeManager.single_color(self.fg_color, self._appearance_mode),
fg=ThemeManager.single_color(self.text_color, self._appearance_mode),
disabledforeground=ThemeManager.single_color(self.text_color, self._appearance_mode),
insertbackground=ThemeManager.single_color(self.text_color, self._appearance_mode))
else:
self.canvas.itemconfig("inner_parts",
fill=ThemeManager.single_color(self.bg_color, self._appearance_mode),
outline=ThemeManager.single_color(self.bg_color, self._appearance_mode))
self.entry.configure(bg=ThemeManager.single_color(self.bg_color, self._appearance_mode),
disabledbackground=ThemeManager.single_color(self.bg_color, self._appearance_mode),
highlightcolor=ThemeManager.single_color(self.bg_color, self._appearance_mode),
fg=ThemeManager.single_color(self.text_color, self._appearance_mode),
disabledforeground=ThemeManager.single_color(self.text_color, self._appearance_mode),
insertbackground=ThemeManager.single_color(self.text_color, self._appearance_mode))
self.canvas.itemconfig("border_parts",
fill=ThemeManager.single_color(self.border_color, self._appearance_mode),
outline=ThemeManager.single_color(self.border_color, self._appearance_mode))
if self.placeholder_text_active:
self.entry.config(fg=ThemeManager.single_color(self.placeholder_text_color, self._appearance_mode))
def bind(self, *args, **kwargs):
self.entry.bind(*args, **kwargs)
def configure(self, require_redraw=False, **kwargs):
if "state" in kwargs:
self.state = kwargs.pop("state")
self.entry.configure(state=self.state)
if "fg_color" in kwargs:
self.fg_color = kwargs.pop("fg_color")
require_redraw = True
if "text_color" in kwargs:
self.text_color = kwargs.pop("text_color")
require_redraw = True
if "border_color" in kwargs:
self.border_color = kwargs.pop("border_color")
require_redraw = True
if "corner_radius" in kwargs:
self.corner_radius = kwargs.pop("corner_radius")
if self.corner_radius * 2 > self._current_height:
self.corner_radius = self._current_height / 2
elif self.corner_radius * 2 > self._current_width:
self.corner_radius = self._current_width / 2
self.entry.grid(column=0, row=0, sticky="we", padx=self.apply_widget_scaling(self.corner_radius) if self.corner_radius >= 6 else self.apply_widget_scaling(6))
require_redraw = True
if "width" in kwargs:
self.set_dimensions(width=kwargs.pop("width"))
if "height" in kwargs:
self.set_dimensions(height=kwargs.pop("height"))
if "placeholder_text" in kwargs:
self.placeholder_text = kwargs.pop("placeholder_text")
if self.placeholder_text_active:
self.entry.delete(0, tkinter.END)
self.entry.insert(0, self.placeholder_text)
else:
self.activate_placeholder()
if "placeholder_text_color" in kwargs:
self.placeholder_text_color = kwargs.pop("placeholder_text_color")
require_redraw = True
if "textvariable" in kwargs:
self.textvariable = kwargs.pop("textvariable")
self.entry.configure(textvariable=self.textvariable)
if "text_font" in kwargs:
self.text_font = kwargs.pop("text_font")
self.entry.configure(font=self.apply_font_scaling(self.text_font))
if "show" in kwargs:
if self.placeholder_text_active:
self.pre_placeholder_arguments["show"] = kwargs.pop("show")
else:
self.entry.configure(show=kwargs.pop("show"))
if "bg_color" in kwargs:
super().configure(bg_color=kwargs.pop("bg_color"), require_redraw=require_redraw)
else:
super().configure(require_redraw=require_redraw)
self.entry.configure(**kwargs) # pass remaining kwargs to entry
def activate_placeholder(self):
if self.entry.get() == "" and self.placeholder_text is not None and (self.textvariable is None or self.textvariable == ""):
self.placeholder_text_active = True
self.pre_placeholder_arguments = {"show": self.entry.cget("show")}
self.entry.config(fg=ThemeManager.single_color(self.placeholder_text_color, self._appearance_mode), show="")
self.entry.delete(0, tkinter.END)
self.entry.insert(0, self.placeholder_text)
def deactivate_placeholder(self):
if self.placeholder_text_active:
self.placeholder_text_active = False
self.entry.config(fg=ThemeManager.single_color(self.text_color, self._appearance_mode))
self.entry.delete(0, tkinter.END)
for argument, value in self.pre_placeholder_arguments.items():
self.entry[argument] = value
def entry_focus_out(self, event=None):
self.activate_placeholder()
def entry_focus_in(self, event=None):
self.deactivate_placeholder()
def delete(self, *args, **kwargs):
self.entry.delete(*args, **kwargs)
if self.entry.get() == "":
self.activate_placeholder()
def insert(self, *args, **kwargs):
self.deactivate_placeholder()
return self.entry.insert(*args, **kwargs)
def get(self):
if self.placeholder_text_active:
return ""
else:
return self.entry.get()
def focus(self):
self.entry.focus()
def focus_force(self):
self.entry.focus_force()
|
import os
datas = []
if len(os.popen("tmutil listlocalsnapshotdates").read().split("\n")) == 2:
print("Your system is clean!")
else:
data = os.popen("tmutil listlocalsnapshotdates").read().split("\n")
for d in data:
datas.append(d)
del datas[0]
del datas[-1]
for n in range(len(datas)):
os.system(f'tmutil deletelocalsnapshots { datas[n] }') |
from flask import Flask
from marshmallow import Schema, fields, pre_load, validate
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy, BaseQuery
ma = Marshmallow()
db = SQLAlchemy()
#PROFILE
class Profile(db.Model):
__tablename__ = 'profiles'
id = db.Column(db.Integer, primary_key=True)
#created = db.Column(db.DataTime())
name = db.Column(db.String(20),unique=True, nullable=False)
mobile = db.Column(db.String(10), unique=True, nullable=False)
country = db.Column(db.String(15), nullable=False)
email = db.Column(db.String(50), unique=True, nullable=False)
username = db.Column(db.String(15), unique=True, nullable=False)
password = db.Column(db.String(80), nullable=False)
def __init__(self, name, mobile, country, email, username, password):
self.name = name
self.mobile = mobile
self.country = country
self.email = email
self.username = username
self.password = password
class ProfileSchema(ma.Schema):
id = fields.Integer()
name = fields.String(required=True)
mobile = fields.String(required=True)
country = fields.String(required=True)
email = fields.String(required=True)
username = fields.String(required=True)
password = fields.String(required=True)
#WALLET
class Wallet(db.Model):
__tablename__ = 'wallets'
id = db.Column(db.Integer, primary_key=True)
profile_id = db.Column(db.Integer, db.ForeignKey('profiles.id', onupdate='CASCADE', ondelete='CASCADE'))
profiles = db.relationship("Profile", backref=db.backref("profiles", uselist=False))
total_balance = db.Column(db.Integer)
list_of_coin = db.Column(db.String())
def __init__(self, profile_id, total_balance, list_of_coin):
self.profile_id = profile_id
self.total_balance = total_balance
self.list_of_coin = list_of_coin
class WalletSchema(ma.Schema):
id = fields.Integer()
profile_id = fields.Integer()
total_balance = fields.Integer()
list_of_coin = fields.String(required=True)
|
# Generated by Django 3.2.2 on 2021-05-31 07:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_sys', '0003_alter_queryform_des'),
]
operations = [
migrations.CreateModel(
name='LaboratoryBooking',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobileNumber', models.IntegerField()),
('email', models.CharField(max_length=30)),
('addressField', models.TextField()),
('cityField', models.CharField(max_length=30)),
('stateField', models.CharField(max_length=30)),
('lastCropSown', models.CharField(max_length=30)),
('soilType', models.CharField(max_length=30)),
('cropType', models.CharField(max_length=30)),
],
),
migrations.AlterField(
model_name='queryform',
name='phone',
field=models.IntegerField(),
),
]
|
"""WWWairlines URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
import flights.views
urlpatterns = [
path('', flights.views.main),
re_path(r'flight/([0-9]+)/', flights.views.flight, name="flight"),
path('admin/', admin.site.urls),
path('ajax/checkLoginStatus/', flights.views.checkLoginStatus, name="checkLoginStatus"),
path('ajax/getFlightsOnDate/', flights.views.getFlightsOnDate, name="getFlightsOnDate"),
path('ajax/changeCrewAssignment/', flights.views.changeCrewAssignment, name="changeCrewAssignment"),
path('ajax/getCrewMembers/', flights.views.getCrewMembers, name="getCrewMembers"),
path('ajax/changeMembership/', flights.views.changeMembership, name="changeMembership"),
path('ajax/createNewCrew/', flights.views.createNewCrew, name="createNewCrew"),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.