text stringlengths 38 1.54M |
|---|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Auto screenshot base class."""
import unittest
try:
from PIL import ImageGrab
except ImportError:
ImageGrab = None
SCREENSHOTMASK = "scr-{name}.jpg"
def save_screenshot(name):
"""
Try to save a screenshot.
Do nothing if ImageGrab was not imported.
Use this method instead of direct `ImageGrab.grab()` call in your tests,
to be sure a screenshot named according to the CI config.
"""
if ImageGrab is not None:
ImageGrab.grab().save(SCREENSHOTMASK.format(name=name), "JPEG")
class AutoScreenshotTestCase(unittest.TestCase):
"""
Base class for pywinauto UI tests.
Make screen shots if a test fails.
"""
def _proxify(self, method_name):
"""
Proxy call for a regular unittest.TestCase method.
It is the only solution to intercept an error immediately
and immediately make a screenshot.
Screenshots names example:
scr-testEnableDisable.jpg - failed in the main test section.
scr-testEnableDisable_setUp - failed in the setUp method.
"""
# save original method to a local variable
original_method = getattr(self, method_name)
def proxy(*args, **kwargs):
"""A proxy of the original method."""
try:
original_return = original_method(*args, **kwargs)
except BaseException:
if self._testMethodName == method_name:
# test's main execution section
name = method_name
else:
# setUp or tearDown failed
name = "{test_name}_{method_name}".format(
test_name=self._testMethodName,
method_name=method_name)
save_screenshot(name)
# re-raise the original exception
raise
else:
return original_return
# replace the original method by own handler
setattr(self, method_name, proxy)
def __init__(self, *args, **kwargs):
"""Register methods to check for failures/errors."""
super(AutoScreenshotTestCase, self).__init__(*args, **kwargs)
# proxify needed methods
self._proxify(self._testMethodName)
self._proxify('setUp')
self._proxify('tearDown')
|
# encoding: utf-8
import os
from pkg_resources import resource_listdir, resource_filename
from .util import Cache
__all__ = ['Resolver']
class Resolver(Cache):
def __init__(self, default=None, capacity=50):
super(Resolver, self).__init__(capacity)
self.default = default
def parse(self, path):
# Split the engine and template parts.
engine, _, template = path.rpartition(':')
# Handle Windows paths.
if engine and len(engine) == 1:
template = engine + ':' + template
engine = ''
if not engine: engine = self.default
if not template: return (engine, None, None)
if template[0] in ('/', '.') or template[1] == ':': return (engine, None, template)
package, _, path = template.partition('/')
return (engine, package, path if path else None)
def __call__(self, template):
# If we already have this value cached, return the cached copy.
if template in self:
return self[template]
# Parse the passed template.
engine, package, path = self.parse(template)
if not package:
if not path:
# Handle bare engines, e.g. serializers.
self[template] = (engine, None)
return self[template]
# Handle absolute and relative paths.
path = path.replace('/', os.path.sep)
self[template] = (engine, os.path.abspath(path))
return self[template]
parts = package.split('.')
if not path: parts, path = parts[:-1], parts[-1]
path = path.split('/')
possibilities = set([i.rstrip('~') for i in resource_listdir('.'.join(parts), '/'.join(path[:-1])) if i.startswith(path[-1] + '.')])
if len(possibilities) == 1:
path[-1] = list(possibilities)[0]
elif len(possibilities) > 1:
if path[-1] not in possibilities:
raise ValueError('Ambiguous template name. Please use the following template path syntax: %s/%s.[%s]' % (
'.'.join(parts),
'/'.join(path),
','.join([i.split('.')[-1] for i in possibilities])
))
self[template] = (engine, resource_filename('.'.join(parts), '/'.join(path)))
return self[template]
|
import sqlite3
class DBManager:
def __init__(self, database):
"""Подключаемся к БД"""
self.connection = sqlite3.connect(database)
self.cursor = self.connection.cursor()
def select_all(self):
""" Получаем все пункты меню """
with self.connection:
return self.cursor.execute('SELECT * FROM django_app_food').fetchall()
def select_all_workers(self):
""" Получаем telegram user id всех работников компании """
with self.connection:
return self.cursor.execute('SELECT telegram_user_id FROM django_app_workers').fetchall()
def insert_order(self, order_date, telegram_user_id, order_sum, order_numbers):
""" Записываем заказ в БД """
with self.connection:
self.cursor.execute('INSERT INTO django_app_orders (date_of_creation, customer_id, sum )'
'VALUES (?, ?, ?)', (order_date, telegram_user_id, order_sum))
last_id = self.cursor.lastrowid
"""Записываем заказанные блюда в заказ"""
for i in range(0, len(order_numbers), 1):
self.cursor.execute('INSERT INTO django_app_orders_dishes (orders_id, food_id)'
'VALUES (?, ?)', (last_id, order_numbers[i]))
def select_month_results(self, beginning_month, end_month):
""" Получаем все заказы за текущий или предыдущий месяц """
with self.connection:
return self.cursor.execute('SELECT sum, customer_id FROM django_app_orders WHERE date_of_creation >= ? '
'AND date_of_creation <= ?', (beginning_month, end_month)).fetchall()
def insert_month_results(self, month_results):
""" Записываем итоги месяца в БД """
with self.connection:
for i in range(0, len(month_results), 1):
self.cursor.execute('INSERT INTO django_app_resultsmonth (amount_per_month, worker_id)'
'VALUES (?, ?)', (month_results[i][1], month_results[i][0]))
def delete_month_results(self):
""" Удаляем предудыщие итоги месяца перед записью новых """
with self.connection:
self.cursor.execute('DELETE FROM django_app_resultsmonth')
def close(self):
""" Закрываем текущее соединение с БД """
self.connection.close() |
# -*- coding: utf-8 -*-
# @File : removeElement.py
# @Author: ZRN
# @Date : 2018/9/29
"""
给定一个数组 nums 和一个值 val,你需要原地移除所有数值等于 val 的元素,返回移除后数组的新长度。
不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。
元素的顺序可以改变。你不需要考虑数组中超出新长度后面的元素.
"""
class Solution:
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
if not nums:
return 0
i = 0
while i < len(nums):
if nums[i] == val:
del nums[i]
else:
i += 1
# while val in nums:
# nums.remove(val)
return len(nums)
|
number = int(input("Enter a number "))
number = int(number)
if number%2 == 0:
print("Great! %d is an even number "%(number))
else:
print("Nope! %d is an odd number "%(number)) |
class Solution:
def sumSubseqWidths(self, A: List[int]) -> int:
A.sort()
N = len(A)
res = 0
MOD = 10 ** 9 + 7
for i in range(N):
res = (res + ((1<<i) - (1<<(N-i-1))) * A[i]) % MOD
return res % MOD
|
# @Time : 18-3-12 下午9:31
# @Author : DioMryang
# @File : Processor.py
# @Description :
import logging
from DioFramework.Base.Mixin.LoadClassToolMixin import LoadClassToolMixin
class Processor(LoadClassToolMixin):
"""
处理器通用继承类
处理json配置:
```
{
"id": 1, # 处理器对应id
"before_middleware": [2, 3, 4], # 处理前 middle 对应id数组
"after_middleware": [5, 6, 7] # 处理后 middle 对应id数组
"params": {...} # 处理器参数
}
```
MetaClass:
LoadClassToolMixin:
Attributes:
`logger`: logger
`config`: 配置参数
`pid`: 处理器id
`params`: 处理器参数
Methods:
`execute`: 内置初始跑数函数
`run`: 处理器的主要处理逻辑函数
`beforeRun`: 跑数之前的加载处理
`afterRun`: 跑数之后的加载处理
`handleError`: 处理跑数异常
"""
# 处理器初始化format
processorInitFormat = "load {} {}: {}"
def __init__(self, *args, **kwargs):
self.logger = logging.getLogger(self.__class__.__name__)
self.config = kwargs.get("config", {})
self.params = self.config.get("params", {})
self.pid = self.config.get("id", -1)
# 打印处理器初始化
self.logger.info(self.processorInitFormat.format(self.__class__.__name__, self.pid, str(self.config)))
def execute(self, *args, **kwargs):
"""
程序默认运行函数
:param args:
:param kwargs:
:return:
"""
try:
self.beforeExecute(*args, **kwargs)
result = self.run(*args, **kwargs)
self.afterExecute(result, *args, **kwargs)
except Exception as e:
self.handleError(error=e, *args, **kwargs)
raise e
return result
def run(self, *args, **kwargs):
"""
处理标准数据流函数
:return:
"""
pass
def beforeExecute(self, *args, **kwargs):
"""
Middleware插入位置
:param args:
:param kwargs:
:return:
"""
pass
def afterExecute(self, *args, **kwargs):
"""
处理器
:param args:
:param kwargs:
:return:
"""
pass
def handleError(self, *args, **kwargs):
"""
处理异常middleware
:param args:
:param kwargs:
:return:
"""
self.logger.error(kwargs.get("error").__class__.__name__)
if __name__ == '__main__':
class DioProcessor(Processor):
pass
dio = DioProcessor("喵喵")
|
#!/usr/bin/env python
import numpy as np
from utils import wrapToPi
# Import message definition
import rospy
from std_msgs.msg import Float32
# command zero velocities once we are this close to the goal
RHO_THRES = 0.05
ALPHA_THRES = 0.1
DELTA_THRES = 0.1
class PoseController:
""" Pose stabilization controller """
def __init__(self, k1, k2, k3, V_max=0.5, om_max=1):
self.k1 = k1
self.k2 = k2
self.k3 = k3
self.V_max = V_max
self.om_max = om_max
# Define Publishers
self.alpha_pub = rospy.Publisher('/controller/alpha', Float32, queue_size=10)
self.rho_pub = rospy.Publisher('/controller/rho', Float32, queue_size=10)
self.delta_pub = rospy.Publisher('/controller/delta', Float32, queue_size=10)
# Init the node
#rospy.init_node('PoseControlNode', anonymous=True)
def load_goal(self, x_g, y_g, th_g):
""" Loads in a new goal position """
self.x_g = x_g
self.y_g = y_g
self.th_g = th_g
def compute_control(self, x, y, th, t):
"""
Inputs:
x,y,th: Current state
t: Current time (you shouldn't need to use this)
Outputs:
V, om: Control actions
Hints: You'll need to use the wrapToPi function. The np.sinc function
may also be useful, look up its documentation
"""
########## Code starts here ##########
rho = np.sqrt((x - self.x_g)**2 + (y - self.y_g)**2)
goal_heading = np.arctan2(self.y_g - y, self.x_g - x)
alpha = wrapToPi(goal_heading - th)
delta = wrapToPi(goal_heading - self.th_g)
# Publish rho, delta, alpha
self.alpha_pub.publish(alpha)
self.rho_pub.publish(rho)
self.delta_pub.publish(delta)
if ((np.abs(alpha) < ALPHA_THRES) and (np.abs(delta) < DELTA_THRES))\
and (rho < RHO_THRES):
om = 0 # stay put
V = 0
else:
om = self.k2*alpha + self.k1*(np.cos(alpha)*np.sinc(alpha/np.pi))*(alpha+self.k3*delta)
V = self.k1*rho*np.cos(alpha)
########## Code ends here ##########
# apply control limits
V = np.clip(V, -self.V_max, self.V_max)
om = np.clip(om, -self.om_max, self.om_max)
return V, om
|
# -*- coding: utf-8 -*-
"""Runs the ranking of drug targets using the HumanBase data as PPI network."""
import logging
import os
import time
import traceback
import warnings
import matplotlib.pyplot as plt
import pandas as pd
from guiltytargets.pipeline import rank_targets, write_gat2vec_input_files
from guiltytargets.ppi_network_annotation import generate_ppi_network, parse_dge
from guiltytargets.ppi_network_annotation.parsers import parse_association_scores, parse_gene_list
# Suppress warnings
warnings.simplefilter('ignore')
# Log the run
logger = logging.getLogger()
fh = logging.FileHandler('classify_diseases_hb.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
logger.addHandler(fh)
# Paths
data_base_dir = r'/home/bit/lacerda/data'
assert os.path.isdir(data_base_dir), "Update your data_basedir folder for this environment."
assert os.path.isdir(data_base_dir)
targets_file = os.path.join(data_base_dir, r'OpenTargets/ad/ot_entrez.txt')
assoc_file = os.path.join(data_base_dir, r'OpenTargets/ad/ot_assoc_entrez.txt')
g2v_path = os.path.join(data_base_dir, r'gat2vec_files/ppi/ad')
phewas_path = None # Phewas file need to be converted to Entrez
ppi_base_path = os.path.join(data_base_dir, r'HumanBase')
dge_base_path = os.path.join(data_base_dir, r'DGE/AMP-AD/DifferentialExpression_%s.csv')
graph_paths = ['hippocampus_top', 'cerebral_cortex_top']
lfc_cutoff = 1.5 # no significance when changed
ppi_edge_min_confidence = 0.0
# for differential expression
max_padj = 0.05
base_mean_name = 'baseMean' or None # it was None
log_fold_change_name = 'log2FoldChange'
adjusted_p_value_name = 'padj'
entrez_id_name = 'entrez'
split_char = '///'
diff_type = 'all'
def main():
for dge in ['BM10', 'BM22', 'BM36', 'BM44']:
dge_path = dge_base_path % dge
gene_list = parse_dge(
dge_path=dge_path,
entrez_id_header=entrez_id_name,
log2_fold_change_header=log_fold_change_name,
adj_p_header=adjusted_p_value_name,
entrez_delimiter=split_char,
base_mean_header=base_mean_name,
csv_separator=';'
)
dim = len(graph_paths)
fig, axs = plt.subplots(ncols=dim, sharey='all', squeeze=False)
fig.set_size_inches(10, 5)
fig.suptitle(f'DGE {dge}')
df = pd.DataFrame()
axs_ind = 0
for ppi_graph_path in graph_paths:
max_log2_fold_change, min_log2_fold_change = lfc_cutoff, lfc_cutoff * -1
network = generate_ppi_network(
ppi_graph_path=os.path.join(ppi_base_path, ppi_graph_path),
dge_list=gene_list,
max_adj_p=max_padj,
max_log2_fold_change=max_log2_fold_change,
min_log2_fold_change=min_log2_fold_change,
ppi_edge_min_confidence=ppi_edge_min_confidence,
current_disease_ids_path='',
disease_associations_path=phewas_path,
)
targets = parse_gene_list(targets_file, network.graph)
assoc_score = assoc_file and parse_association_scores(assoc_file)
# File with no weights
write_gat2vec_input_files(
network=network,
targets=targets,
home_dir=g2v_path
)
auc_df, _ = rank_targets(
directory=g2v_path,
network=network,
)
df['rr'] = auc_df['auc']
auc_df, _ = rank_targets(
directory=g2v_path,
network=network,
evaluation='svm',
class_weights='balanced'
)
df['bsvm'] = auc_df['auc']
# File with weights
write_gat2vec_input_files(
network=network,
targets=targets,
home_dir=g2v_path,
assoc_score=assoc_score
)
auc_df, _ = rank_targets(
directory=g2v_path,
network=network,
)
df['wrr'] = auc_df['auc']
auc_df, _ = rank_targets(
directory=g2v_path,
network=network,
evaluation='svm',
class_weights='balanced'
)
df['wbsvm'] = auc_df['auc']
df.boxplot(column=['rr', 'wrr', 'bsvm', 'wbsvm'], ax=axs[0][axs_ind])
axs[0][axs_ind].set_title(f'PPI {ppi_graph_path}"')
axs_ind += 1
fig.savefig(f'comparison_humanbase({dge}).png')
if __name__ == '__main__':
start_time = time.time()
try:
main()
except Exception as e:
logger.error(type(e))
logger.error(traceback.format_exc())
finally:
logger.info(f"Total time: {time.time() - start_time}")
|
from requests import request
from ..models import (Scraper, ArticleSpider, ArticleThread,
Article, CrawlerSet, CrawlerItem, ScraperAnalysis)
from ..serializers import (ScraperSerializer, ArticleSpiderSerializer, ArticleThreadSerializer, ArticleSerializer,
CrawlerSetSerializer, CrawlerItemSerializer, ScraperAnalysisSerializer)
from ..pagination import CrawlerItemPagination, ScraperPagination, ArticleSpiderPagination, ArticleThreadPagination, ArticlePagination
# from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated, IsAdminUser, AllowAny
from rest_framework import viewsets, status, filters
from rest_framework.views import APIView
from rest_framework.response import Response
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from django.utils.decorators import method_decorator
# from django.views.decorators.cache import cache_page
# from django.views.decorators.vary import vary_on_cookie
import datetime, time, json, math, statistics
from django.conf import settings
from django.db.models import Avg
# FOR ARTICLE ERRORS
def get_article_errors(request):
articles = CrawlerItem.objects.filter(article_status="Error")#.select_related()
if articles.exists():
return articles
# serializer = CrawlerItemSerializer(articles, many=True)
# return serializer.data
return None
def get_article_timeout(request):
articles = CrawlerItem.objects.filter(article_error_status="Timeout Error")#.select_related()
if articles.exists():
return articles
# serializer = CrawlerItemSerializer(articles, many=True)
# return serializer.data
return None
def get_base_error(request):
articles = CrawlerItem.objects.filter(article_error_status="Base Error")#.select_related()
if articles.exists():
return articles
# serializer = CrawlerItemSerializer(articles, many=True)
# return serializer.data
return None
def get_done_articles(request):
articles = CrawlerItem.objects.filter(article_status="Done")#.select_related()
if articles.exists():
return articles
# serializer = CrawlerItemSerializer(articles, many=True)
# return serializer.data
return None
def get_http_503(request):
articles = CrawlerItem.objects.filter(article_error_status="HTTP Error 403")#.select_related()
if articles.exists():
return articles
# serializer = CrawlerItemSerializer(articles, many=True)
# return serializer.data
return None
def get_dns_error(request):
articles = CrawlerItem.objects.filter(article_error_status="DNS Error")
if articles.exists():
return articles
return None
def get_no_content(request):
articles = CrawlerItem.objects.filter(article_error_status="No content")#.select_related()
if articles.exists():
return articles
# serializer = CrawlerItemSerializer(articles, many=True)
# return serializer.data
return None
def get_article_fqdn_none(request):
articles = CrawlerItem.objects.filter(fqdn=None)
if articles.exists():
return articles
return None
# FOR ARTICLE DOWNLOAD LATENCY
def get_download_latency_not_none(request):
articles = CrawlerItem.objects.filter(article_status = "Done")
if articles.exists():
return articles
return None |
import tensorflow as tf
from tensorflow import keras
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# # (60000, 28, 28) 60000张训练集,大小为28*28
# print(train_images.shape)
#
# # 60000个标签 和训练集一致
# print(len(train_labels))
#
# # 0~9 一共有十个标签,即十个种类
# print(train_labels)
#
# # 10000张测试集
# print(test_images.shape)
#
# # 10000个标签 和测试集一致
# print(len(test_labels))
# plt.figure()
# plt.imshow(train_images[0])
# plt.colorbar()
# plt.grid(False)
# plt.show()
# 归一化操作
train_images = train_images / 255.0
test_images = test_images / 255.0
# plt.figure(figsize=(10, 10))
# for i in range(25):
# plt.subplot(5, 5, i+1)
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
# plt.imshow(train_images[i], cmap=plt.cm.binary)
# plt.xlabel(class_names[test_labels[i]])
# plt.show()
# 搭建网络结构
model = keras.Sequential([
# 拉直层 就是把多维的变成一维的
keras.layers.Flatten(input_shape=(28, 28)),
# 全连接层 使用relu函数作为激活函数
keras.layers.Dense(128, activation='relu'),
# 全连接层
keras.layers.Dense(10)
])
# 配置训练方法
model.compile(
# 优化器使用adam优化器
optimizer='adam',
# 损失函数使用交叉熵损失函数
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
# 使用数值型评测指标
metrics=['accuracy']
)
# 训练->即执行训练过程
model.fit(train_images, train_labels, epochs=10)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc) |
# Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import hashlib
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.compat import json
import boto
class SNSConnection(AWSQueryConnection):
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'sns.us-east-1.amazonaws.com'
APIVersion = '2010-03-31'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
connection_cls=SNSConnection)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs)
def _required_auth_capability(self):
return ['sns']
def get_all_topics(self, next_token=None):
"""
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {'ContentType': 'JSON'}
if next_token:
params['NextToken'] = next_token
response = self.make_request('ListTopics', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_topic_attributes(self, topic):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
"""
params = {'ContentType': 'JSON',
'TopicArn': topic}
response = self.make_request('GetTopicAttributes', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def set_topic_attributes(self, topic, attr_name, attr_value):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
:type attr_name: string
:param attr_name: The name of the attribute you want to set.
Only a subset of the topic's attributes are mutable.
Valid values: Policy | DisplayName
:type attr_value: string
:param attr_value: The new value for the attribute.
"""
params = {'ContentType': 'JSON',
'TopicArn': topic,
'AttributeName': attr_name,
'AttributeValue': attr_value}
response = self.make_request('SetTopicAttributes', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def add_permission(self, topic, label, account_ids, actions):
"""
Adds a statement to a topic's access control policy, granting
access for the specified AWS accounts to the specified actions.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the new policy statement.
:type account_ids: list of strings
:param account_ids: The AWS account ids of the users who will be
give access to the specified actions.
:type actions: list of strings
:param actions: The actions you want to allow for each of the
specified principal(s).
"""
params = {'ContentType': 'JSON',
'TopicArn': topic,
'Label': label}
self.build_list_params(params, account_ids, 'AWSAccountId.member')
self.build_list_params(params, actions, 'ActionName.member')
response = self.make_request('AddPermission', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def remove_permission(self, topic, label):
"""
Removes a statement from a topic's access control policy.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the policy statement
to be removed.
"""
params = {'ContentType': 'JSON',
'TopicArn': topic,
'Label': label}
response = self.make_request('RemovePermission', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def create_topic(self, topic):
"""
Create a new Topic.
:type topic: string
:param topic: The name of the new topic.
"""
params = {'ContentType': 'JSON',
'Name': topic}
response = self.make_request('CreateTopic', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def delete_topic(self, topic):
"""
Delete an existing topic
:type topic: string
:param topic: The ARN of the topic
"""
params = {'ContentType': 'JSON',
'TopicArn': topic}
response = self.make_request('DeleteTopic', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def publish(self, topic, message, subject=None):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type message: string
:param message: The message you want to send to the topic.
Messages must be UTF-8 encoded strings and
be at most 4KB in size.
:type subject: string
:param subject: Optional parameter to be used as the "Subject"
line of the email notifications.
"""
params = {'ContentType': 'JSON',
'TopicArn': topic,
'Message': message}
if subject:
params['Subject'] = subject
response = self.make_request('Publish', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def subscribe(self, topic, protocol, endpoint):
"""
Subscribe to a Topic.
:type topic: string
:param topic: The ARN of the new topic.
:type protocol: string
:param protocol: The protocol used to communicate with
the subscriber. Current choices are:
email|email-json|http|https|sqs
:type endpoint: string
:param endpoint: The location of the endpoint for
the subscriber.
* For email, this would be a valid email address
* For email-json, this would be a valid email address
* For http, this would be a URL beginning with http
* For https, this would be a URL beginning with https
* For sqs, this would be the ARN of an SQS Queue
"""
params = {'ContentType': 'JSON',
'TopicArn': topic,
'Protocol': protocol,
'Endpoint': endpoint}
response = self.make_request('Subscribe', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def subscribe_sqs_queue(self, topic, queue):
"""
Subscribe an SQS queue to a topic.
This is convenience method that handles most of the complexity involved
in using an SQS queue as an endpoint for an SNS topic. To achieve this
the following operations are performed:
* The correct ARN is constructed for the SQS queue and that ARN is
then subscribed to the topic.
* A JSON policy document is contructed that grants permission to
the SNS topic to send messages to the SQS queue.
* This JSON policy is then associated with the SQS queue using
the queue's set_attribute method. If the queue already has
a policy associated with it, this process will add a Statement to
that policy. If no policy exists, a new policy will be created.
:type topic: string
:param topic: The ARN of the new topic.
:type queue: A boto Queue object
:param queue: The queue you wish to subscribe to the SNS Topic.
"""
t = queue.id.split('/')
q_arn = queue.arn
sid = hashlib.md5(topic + q_arn).hexdigest()
sid_exists = False
resp = self.subscribe(topic, 'sqs', q_arn)
attr = queue.get_attributes('Policy')
if 'Policy' in attr:
policy = json.loads(attr['Policy'])
else:
policy = {}
if 'Version' not in policy:
policy['Version'] = '2008-10-17'
if 'Statement' not in policy:
policy['Statement'] = []
# See if a Statement with the Sid exists already.
for s in policy['Statement']:
if s['Sid'] == sid:
sid_exists = True
if not sid_exists:
statement = {'Action': 'SQS:SendMessage',
'Effect': 'Allow',
'Principal': {'AWS': '*'},
'Resource': q_arn,
'Sid': sid,
'Condition': {'StringLike': {'aws:SourceArn': topic}}}
policy['Statement'].append(statement)
queue.set_attribute('Policy', json.dumps(policy))
return resp
def confirm_subscription(self, topic, token,
authenticate_on_unsubscribe=False):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type token: string
:param token: Short-lived token sent to and endpoint during
the Subscribe operation.
:type authenticate_on_unsubscribe: bool
:param authenticate_on_unsubscribe: Optional parameter indicating
that you wish to disable
unauthenticated unsubscription
of the subscription.
"""
params = {'ContentType': 'JSON',
'TopicArn': topic,
'Token': token}
if authenticate_on_unsubscribe:
params['AuthenticateOnUnsubscribe'] = 'true'
response = self.make_request('ConfirmSubscription', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def unsubscribe(self, subscription):
"""
Allows endpoint owner to delete subscription.
Confirmation message will be delivered.
:type subscription: string
:param subscription: The ARN of the subscription to be deleted.
"""
params = {'ContentType': 'JSON',
'SubscriptionArn': subscription}
response = self.make_request('Unsubscribe', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_all_subscriptions(self, next_token=None):
"""
Get list of all subscriptions.
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {'ContentType': 'JSON'}
if next_token:
params['NextToken'] = next_token
response = self.make_request('ListSubscriptions', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_all_subscriptions_by_topic(self, topic, next_token=None):
"""
Get list of all subscriptions to a specific topic.
:type topic: string
:param topic: The ARN of the topic for which you wish to
find subscriptions.
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {'ContentType': 'JSON',
'TopicArn': topic}
if next_token:
params['NextToken'] = next_token
response = self.make_request('ListSubscriptionsByTopic', params,
'/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
|
#
import pandas as pd
import requests as rq
import numpy as np
import re
def download_data(url,option='xls',gid=None):
"""
extrai planilha do google sheets utilizando a API google sheets.
EXEMPLO: download_data('https://docs.google.com/spreadsheets/d/1N6JFMIQR71HF5u5zkWthqbgpA8WYz_0ufDGadeJnhlo/',gid='0',option='csv')
option: mime do arquivo. opções são xls e csv
url : url da planhilha.
gid : id da planilha. Necessário para formato csv
saida : dados da requisição GET
"""
flavor_url = '/export?download'
if (option == 'csv') & (gid is not None):
flavor_url = '/export?format=csv&gid=%s' % gid
base_url = 'https://docs.google.com/spreadsheets/d/'
flag = url.find(base_url)
if flag <0:
return None #'incorrect url. Try %s/KEY/' % base_url
end_key = url[len(base_url):].find('/')
if end_key < 0:
return None #'incorrect url. Missing KEY. Try %s/KEY/' % base_url
download_url = base_url+ url[len(base_url):(end_key+len(base_url))] + flavor_url
#
# TODO: usar memoização para evitar multiplos downloads
#
#
# TODO: colocar clausulas *try* para erros de conexao
#
return rq.get(download_url)
def create_dataframes_from_excel(url=None,xls=None):
"""
cria um conjunto de dataframes do pandas a partir de uma url valida para o google sheets
ou nome de arquivo excel
parametros:
url :: url que aponta para a planilha do google. EX: https://docs.google.com/spreadsheets/d/1N6JFMIQR71HF5u5zkWthqbgpA8WYz_0ufDGadeJnhlo/
xls :: caminho para o arquivo xls
df :: saida. lista de dataframes
"""
if url:
arquivo = download_data(url=url,option='xls') # TODO colocar uma clausula *if arquivo: ...*
xls = 'arquivo_url.xls'
with open(xls,'wb') as f:
f.write(arquivo.content)
# a string xls existe e tem valor definido
if xls:
sheets = pd.ExcelFile(xls).sheet_names # obtem todas as planilhas
df = [pd.read_excel(xls, sheet_name = i) for i in sheets] # para cada planilha é criado um dataframe
return df
return None
def create_dataframes_from_csv(sheets=[],url=None,gids=None,sep=','):
"""
Cria um conjunto de dataframes do pandas a partir de arquivos csv (sheets).
Se os nomes dos arquivos csv não forem passados, assume-se que as planilhas
serão baixadas por meio da url e das IDs das planilhas
parametros:
sheets :: lista com os caminhos dos arquivos csv
url :: url da planilha do google
gids :: lista com ID de cada planilha (não é a key da planilha)
sep :: separador dos arquivos csv
df :: saída. lista de dataframes
"""
df=[]
if sheets:
#
# TODO : chamar sys para verificar se arquivo csv existe
#
for sheet in sheets:
df.append( pd.read_csv( sheet ,sep = sep ) )
return df
if (url is not None) & (gids is not None):
sheets=[]
for x in gids:
arquivo = download_data(url=url,gid=x,option='csv')
sheets.append( arquivo.text )
from io import StringIO
for sheet in sheets:
df.append( pd.read_csv( StringIO(sheet) ,sep = sep ) )
return df
def sanitize_dataframes_nan(*dataframe):
"""
substitui os locais sem preenchimento (NaN) por espaço vazio
parametro:
dataframe :: conjunto de dataframes
"""
for data in dataframe:
data.fillna('', inplace=True)
return
## todo: quando tiver uma letra no numero de telefone retornar uma mensagem de erro e zerar o telefone
def sanitize_phone_brazil(phonenumber):
"""
transforma o número de telefone para o formato +55DDDNUMERO
parametros:
phonenumber :: numero do telefone a ser formatado
numero :: saida
"""
number_size = 10 # configuração dos número: 3DDD 3NUMEROS-4NUMEROS.
number_size_max = 12 # configuração dos número: 2DDI 3DDD 3NUMEROS-4NUMEROS
res = re.findall('([0-9]+)',str(phonenumber))
number=''.join(res)
if (len(number) < number_size) or (len(number) > number_size_max):
return ''
return '+55'+number[-number_size:]
def sanitize_money(money):
"""
transforma o dinheiro para o formato NUMEROS_REAIS,NUMEROS_CENTAVOS
parametros:
money :: valor do dinheiro a ser formatado
"""
moneys = re.findall( '([0-9]+)',str(money))
cents = '00'
if (len(moneys) > 0):
if (len(moneys)>1) & (len(moneys[-1]) < 3) : # centavos tem no maximo 2 digitos
cents = ''.join(moneys[-1])
cents = cents.ljust(2,'0')
moneys.pop(-1)
return ''.join(moneys)+','+cents
return ''
def sanitize_discount(desconto):
"""
retira os caracteres inválidos dos valores de desconto, substituindo-os por 0
parametro:
desconto :: valor do desconto
"""
return ''.join(re.findall('[0-9]+', str(desconto))) or '0'
def convert_tointeger(var_input):
"""
converte as entradas do dinheiro para inteiro
parametro:
var_input :: valor do dinheiro
"""
return int(sanitize_money(var_input).replace(',',''))
def convert_tostring(var_input):
"""
converte a entrada para string
parametro:
var_input :: valor do dinheiro
"""
return "%.2f" % var_input
def final_value(value,discount='0'):
"""
calcula o valor com o desconto
parametros:
value :: valor do dinheiro
discount :: valor do desconto
saida :: valor com desconto
"""
money = np.float64(convert_tointeger(value))/100.0
delta = np.float64(discount)/100.0
return sanitize_money(convert_tostring(money*(1e0 - delta)))
def merge_by_key(df0,df1,key0='id',key1='user_id',how='left'):
"""
retorna a junção de duas tabelas de acordo com campos em comum key0 e key1
df0 :: dataframe alvo.
df1 :: dataframe auxiliar
key0 :: string. chave de df0.
key1 :: string. chave de df1.
how :: string. maneira como o merge é feito. pode ser left, right, inner
saida:: df0 é atualizada com os campos correspondentes de df1
"""
#return df0.merge(df1,left_on=key0,right_on=key1,how=how)
return pd.merge(df0,df1,left_on=key0,right_on=key1,how=how)
## funções de teste:
if __name__ == "__main__":
def debug_excel():
url = 'https://docs.google.com/spreadsheets/d/1N6JFMIQR71HF5u5zkWthqbgpA8WYz_0ufDGadeJnhlo/export?download'
df = create_dataframes_from_excel(url=url)
if df:
return True
return False
def debug_csv():
url = 'https://docs.google.com/spreadsheets/d/1N6JFMIQR71HF5u5zkWthqbgpA8WYz_0ufDGadeJnhlo/'
df = create_dataframes_from_csv(url=url,gids=['0','822929440'])
if df:
return True
return False
def debug_sanitize_nan():
url = 'https://docs.google.com/spreadsheets/d/1N6JFMIQR71HF5u5zkWthqbgpA8WYz_0ufDGadeJnhlo/export?download'
df = create_dataframes_from_excel(url=url)
sanitize_dataframes_nan(*df)
#
# TODO:
# df.isna() retorna True mesmo quando não há valores NaN
# workaround: define função (poderia ser lambda tbm)
# que usa .isna() em cada coluna do dataframe
#
def helper(data):
return any( [ any(data[x].isna()) for x in data.columns] )
return not any( [helper(data) for data in df] )
def debug_sanitize(tentativa,f):
# tentativa é um dicionário que contém as chaves num formato, e o dado correspondente no formato desejado
return not any( [ not (f(x) == tentativa[x]) for x in tentativa] )
def debug_sanitize_money():
# tentativa é um dicionário que contém as chaves num formato, e o dado correspondente no formato desejado
tentativa={'1.1':'1,10', '7':'7,00' , '1.230':'1230,00' , '1.230.20': '1230,20' }
return debug_sanitize(tentativa,sanitize_money)
def debug_sanitize_phone_brazil():
# tentativa é um dicionário que contém as chaves num formato, e o dado correspondente no formato desejado
tentativa={'+559991234567':'+559991234567','(999)1234567':'+559991234567','999123456':'','+5999123-4567':'+559991234567', '9'*13:''}
return debug_sanitize(tentativa,sanitize_phone_brazil)
def debug_sanitize_discount():
# tentativa é um dicionário que contém as chaves num formato, e o dado correspondente no formato desejado
tentativa={'3':'3','30':'30','-':'0'}
return debug_sanitize(tentativa,sanitize_discount)
def debug_merge_by_key():
url = 'https://docs.google.com/spreadsheets/d/1N6JFMIQR71HF5u5zkWthqbgpA8WYz_0ufDGadeJnhlo/'
df = create_dataframes_from_csv(url=url,gids=['0','822929440'])
DF = merge_by_key(df[0],df[1],how='inner')
return (len(DF) == len(df[1]))
#------------------------------------------------
print("Starting tests::")
print('Reading excel file from internet ...',end=" ") # end remove o caracter de nova linha # TODO usar zfill
print(debug_excel())
print('Reading csv files from internet ...',end=' ')
print(debug_excel())
print('Sanitizing dataframes ...',end=' ')
print(debug_sanitize_nan())
print('Sanitizing money ...',end=' ')
print(debug_sanitize_money())
print('Sanitizing telephone ...',end=' ')
print(debug_sanitize_phone_brazil())
print('Sanitizing discount ...',end=' ')
print(debug_sanitize_discount())
print('Checking merge ...',end=' ')
print(debug_merge_by_key())
|
from django.shortcuts import render
from .models import *
from django.http import JsonResponse
import json
# Create your views here.
def store(request):
products = Product.objects.all()
context={'products':products}
return render(request, 'store/store.html',context)
def cart(request):
if request.user.is_authenticated:
customer=request.user.customer
order, created = Order.objects.get_or_create(customer=customer,complete=False)
items = order.orderitem_set.all()
else:
items = []
order ={'get_cart_total':0,'get_cart_items':0}
context={'items':items, 'order':order}
return render(request, 'store/cart.html',context)
def checkout(request):
if request.user.is_authenticated:
customer=request.user.customer
order, created = Order.objects.get_or_create(customer=customer,complete=False)
items = order.orderitem_set.all()
else:
items = []
order ={'get_cart_total':0,'get_cart_items':0}
context={'items':items, 'order':order}
return render(request, 'store/checkout.html',context)
def product_info(request,productId):
product=Product.objects.get(id=productId)
context = {'product':product}
return render(request, 'store/product_info.html',context)
def updateItem(request):
data=json.loads(request.data)
productId=data['productId']
action= data['action']
print("Action:",action)
print("ProductId:",productId)
return JsonResponse('Item was added',safe=False) |
"""
Access the main thread
This module allows you to run code on the main thread easely. This can be used for modifiying the UI.
Example:
.. highlight:: python
.. code-block:: python
from UIKit import UIScreen
import mainthread
def set_brightness():
inverted = int(not int(UIScreen.mainScreen.brightness))
UIScreen.mainScreen.setBrightness(inverted)
mainthread.run_async(set_brightness)
"""
import pyto
from time import sleep
from __check_type__ import check
from __check_type__ import func as _func
import threading
__PyMainThread__ = pyto.PyMainThread
def mainthread(func):
"""
A decorator that makes a function run in synchronously on the main thread.
Example:
.. code-block:: python
import mainthread
from UIKit import UIApplication
@mainthread.mainthread
def run_in_background():
app = UIApplication.sharedApplication
app.beginBackgroundTaskWithExpirationHandler(None)
run_in_background()
"""
check(func, "func", [_func])
def run(*args, **kwargs):
import mainthread
def _run():
return func(*args, **kwargs)
return mainthread.run_sync(_run)
return run
def run_async(code):
"""
Runs the given code asynchronously on the main thread.
:param code: Code to execute in the main thread.
"""
check(code, "code", [_func])
def code_() -> None:
code()
__PyMainThread__.runAsync(code_)
_ret_value = None
_exc = None
def run_sync(code):
"""
Runs the given code asynchronously on the main thread.
Supports return values as opposed to :func:`~mainthread.run_async`
:param code: Code to execute in the main thread.
"""
global _exc
check(code, "code", [_func])
try:
script_path = threading.current_thread().script_path
except AttributeError:
script_path = None
def code_() -> None:
global _ret_value
global _exc
threading.current_thread().script_path = script_path
try:
_ret_value = code()
except Exception as e:
_exc = e
threading.current_thread().script_path = None
__PyMainThread__.runSync(code_)
sleep(0.1)
if _exc is not None:
__exc = _exc
_exc = None
raise __exc
return _ret_value
|
from django.urls import path
from django.contrib.auth.views import LogoutView
from . import views
app_name = "accounts"
urlpatterns = [
path("login/", views.SubmittableLoginView.as_view(), name="login"),
path("logout/", LogoutView.as_view(), name="logout"),
path("password-change/", views.SubmittablePasswordChange.as_view(), name="password_change"),
path("sign-up/", views.SignUpView.as_view(), name="sign_up"),
] |
# Pathfinding - Part 1
# Graphs
# KidsCanCode 2017
import pygame as pyg
from collections import deque
from queue import LifoQueue
import heapq
from math import pow
from os import path
import matplotlib.pyplot as plt
vector = pyg.math.Vector2
TILE_SIZE = 25
TILE_WIDTH = 20
TILE_HEIGHT = 20
GRID_WIDTH = TILE_SIZE * TILE_WIDTH
GRID_HEIGHT = TILE_SIZE * TILE_HEIGHT
FPS = 30
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
BLUE=(0,0,255)
MAGENTA = (255, 0, 255)
YELLOW = (255, 255, 0)
DARKGRAY = (40, 40, 40)
LIGHTGRAY = (255, 255, 255)
MEDGRAY = (75, 75, 75)
EXPLORED=(255, 204, 128)
pyg.init()
screen = pyg.display.set_mode((GRID_WIDTH, GRID_HEIGHT))
clock = pyg.time.Clock()
class SquareGrid:
def __init__(self, width, height):
self.width = width
self.height = height
self.walls = []
self.connections = [vector(1, 0), vector(-1, 0), vector(0, 1), vector(0, -1), vector(-1, -1), vector(1, 1), vector(1, -1), vector(-1, 1)]
def in_bounds(self, node):
return 0 <= node.x < self.width and 0 <= node.y < self.height
def passable(self, node):
return node not in self.walls
def find_neighbors(self, node):
neighbors = [node + connection for connection in self.connections]
neighbors = filter(self.in_bounds, neighbors)
neighbors = filter(self.passable, neighbors)
return neighbors
def draw(self):
for wall in self.walls:
rect = pyg.Rect(wall * TILE_SIZE, (TILE_SIZE, TILE_SIZE))
pyg.draw.rect(screen, BLACK, rect)
class CostGrid(SquareGrid):
def __init__(self,width,height):
super().__init__(width,height)
self.weights={}
def cost(self,from_node,to_node):
if((vector(to_node)-vector(from_node)).length_squared()==1):
return self.weights.get(to_node,0)+10
else:
return self.weights.get(to_node,0)+14
class PriorityHeapQueue:
def __init__(self):
self.nodes=[]
def put(self,node,cost):
heapq.heappush(self.nodes,(cost,node))
def get(self):
return heapq.heappop(self.nodes)[1]
def empty(self):
return len(self.nodes)==0
def draw_icons():
startnode_center = (endnode.x * TILE_SIZE + TILE_SIZE / 2, endnode.y * TILE_SIZE + TILE_SIZE / 2)
screen.blit(home_img, home_img.get_rect(center=startnode_center))
endnode_center = (startnode.x * TILE_SIZE + TILE_SIZE / 2, startnode.y * TILE_SIZE + TILE_SIZE / 2)
screen.blit(cross_img, cross_img.get_rect(center=endnode_center))
def draw_grid():
for x in range(0, GRID_WIDTH, TILE_SIZE):
pyg.draw.line(screen, BLACK, (x, 0), (x, GRID_HEIGHT))
for y in range(0, GRID_HEIGHT, TILE_SIZE):
pyg.draw.line(screen, BLACK, (0, y), (GRID_WIDTH, y))
# vector is not hashable so cant be a key value
def vector_to_int(vectorA):
return (int(vectorA.x),int(vectorA.y))
def heuristic(from_node,to_node):
#manhattan distance
return (abs(from_node.x-to_node.x)+abs(from_node.y-to_node.y))*10
def BFS(graph, startnode,endnode):
q=deque()
q.append(startnode)
visitednodes=[]
path={}
path[vector_to_int(startnode)]=None
visitednodes.append(startnode)
while(len(q)>0):
currentnode=q.popleft()
if currentnode == endnode:
break
for nextnode in graph.find_neighbors(currentnode):
#if(nextnode not in visitednodes):
if(vector_to_int(nextnode) not in path):
q.append(nextnode)
visitednodes.append(nextnode)
path[vector_to_int(nextnode)] = nextnode - currentnode
print("********************VISITED***********************")
print(visitednodes)
print("************************PATH***********************")
for key, value in path.items() :
print (key)
return path
def Dijkstras(graph,startnode,endnode):
q=PriorityHeapQueue()
q.put(vector_to_int(startnode),0)
path={}
cost={}
path[vector_to_int(startnode)]=None
cost[vector_to_int(startnode)]=0
while not q.empty():
current=q.get()
if vector(current)==endnode:
break
for next in graph.find_neighbors(vector(current)):
next_cost=cost[current]+graph.cost(current,vector_to_int( next))
if ((vector_to_int(next) not in cost) or (next_cost< cost[vector_to_int(next)])):
cost[vector_to_int(next)]=next_cost
priority=next_cost
q.put(vector_to_int(next),priority)
path[vector_to_int(next)]=vector(next)-vector(current)
return path
def AStar(graph,startnode,endnode):
q=PriorityHeapQueue()
q.put(vector_to_int(startnode),0)
path={}
cost={}
path[vector_to_int(startnode)]=None
cost[vector_to_int(startnode)]=0
while not q.empty():
current=q.get()
if vector(current)==endnode:
break
for next in graph.find_neighbors(vector(current)):
next_cost=cost[current]+graph.cost(current,vector_to_int( next))
if ((vector_to_int(next) not in cost) or (next_cost< cost[vector_to_int(next)])):
cost[vector_to_int(next)]=next_cost
priority=heuristic(next,endnode)
q.put(vector_to_int(next),priority)
path[vector_to_int(next)]=vector(next)-vector(current)
return path
def DFS(graph, startnode,endnode):
s=LifoQueue()
s.put(startnode)
visitednodes=[]
path={}
path[vector_to_int(startnode)]=None
while(not s.empty()):
currentnode=s.get()
if currentnode == endnode:
break
visitednodes.append(startnode)
for nextnode in graph.find_neighbors(currentnode):
#if(nextnode not in visitednodes):
if(vector_to_int(nextnode) not in path):
s.put(nextnode)
visitednodes.append(nextnode)
path[vector_to_int(nextnode)] = nextnode - currentnode
print(visitednodes)
print("*******************************************")
print(path)
return path
icon_dir = path.join(path.dirname(__file__), '')
home_img = pyg.image.load(path.join(icon_dir, 'd.png')).convert_alpha()
home_img = pyg.transform.scale(home_img, (30, 30))
home_img.fill((255, 255, 255, 255), special_flags=pyg.BLEND_RGBA_MULT)
cross_img = pyg.image.load(path.join(icon_dir, 's.png')).convert_alpha()
cross_img = pyg.transform.scale(cross_img, (30, 30))
cross_img.fill((255, 255, 255, 255), special_flags=pyg.BLEND_RGBA_MULT)
arrows = {}
arrow_img = pyg.image.load(path.join(icon_dir, 'arrow3.png')).convert_alpha()
arrow_img = pyg.transform.scale(arrow_img, (15, 15))
for dir in [(1,0),(0,1),(-1,0),(0,-1),(1,1),(1,-1),(-1,1),(-1,-1)]:
arrows[dir]=pyg.transform.rotate(arrow_img,vector(dir).angle_to(vector(1,0)))
graph=CostGrid(TILE_WIDTH, TILE_HEIGHT)
walls = [(2, 1), (3, 1), (4, 1), (1, 1), (4, 2), (4, 3), (4, 4), (4, 5), (3, 5), (2, 5), (2, 3), (1, 3), (0, 3), (0, 4),(0, 6), (0, 5),
(0, 7), (1, 7), (2, 7), (2, 8), (2, 9), (2, 10), (2, 11), (0, 11), (0, 12), (0, 13), (1, 13), (2, 13), (3, 13), (4, 13), (5, 13),
(3, 8), (4, 8), (5, 8), (7, 8), (6, 8), (5, 15), (5, 16), (5, 17), (5, 18), (6, 16), (7, 16), (8, 16), (9, 16),(18, 13),
(10, 16), (11, 16), (7, 10), (8, 10), (9, 10), (10, 10), (11, 10), (11, 9), (11, 8), (11, 7), (11, 6), (11, 5), (12, 5), (13, 5),
(12, 10), (13, 10), (14, 10), (17, 10), (16, 10), (17, 9), (17, 8), (17, 7), (16, 7), (15, 7), (16, 6), (16, 5), (16, 4), (17, 4),
(18, 4), (18, 2), (17, 2), (16, 2), (15, 2), (14, 2), (14, 1), (13, 1), (12, 1), (11, 1), (10, 1), (9, 1), (8, 1), (8, 2), (8, 3),
(8, 4), (8, 5), (7, 4), (13, 11), (13, 13), (13, 14), (12, 13), (14, 13), (15, 13), (16, 13), (16, 14), (16, 15), (16, 16),
(18, 12), (19, 12), (8, 13), (9, 13), (9, 14), (9, 19), (11, 19), (12, 19), (10, 19), (13, 19), (0, 16), (1, 16), (2, 16), (2, 17),
(2, 18), (14, 17), (15, 17), (16, 17), (17, 17), (18, 18)]
for wall in walls:
graph.walls.append(vector(wall))
startnode=vector(5,2)
endnode=vector(14,9)
path=AStar(graph,startnode,endnode)
running = True
while running:
clock.tick(FPS)
for event in pyg.event.get():
if event.type == pyg.QUIT:
running = False
if event.type == pyg.KEYDOWN:
if event.key == pyg.K_ESCAPE:
running = False
if event.key == pyg.K_m:
# dump the wall list for saving
print([(int(loc.x), int(loc.y)) for loc in g.walls])
if event.type == pyg.MOUSEBUTTONDOWN:
mpos = vector(pyg.mouse.get_pos()) // TILE_SIZE
if event.button == 1:
if mpos in graph.walls:
graph.walls.remove(mpos)
else:
graph.walls.append(mpos)
startnode=mpos
endnode=(4,4)
path=AStar(graph,startnode,endnode)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1")
print(startnode)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!11")
pyg.display.set_caption("{:.2f}".format(clock.get_fps()))
screen.fill(WHITE)
# fill explored area
for node in path:
x, y = node
rect = pyg.Rect(x * TILE_SIZE, y * TILE_SIZE, TILE_SIZE, TILE_SIZE)
pyg.draw.rect(screen, EXPLORED, rect)
draw_grid()
graph.draw()
# for node,dir in path.items():
# if dir:
# x,y=node
# x=x*TILE_SIZE+TILE_SIZE/2
# y=y*TILE_SIZE+TILE_SIZE/2
# img=arrows[vector_to_int(dir)]
# r=img.get_rect(center=(x,y))
# screen.blit(img,r)
#draw path from start to goal
currentnode = endnode
while currentnode != startnode:
x = currentnode.x * TILE_SIZE + TILE_SIZE / 2
y = currentnode.y * TILE_SIZE + TILE_SIZE / 2
img = arrows[vector_to_int(path[(currentnode.x, currentnode.y)])]
r = img.get_rect(center=(x, y))
screen.blit(img, r)
# find next in path
currentnode = currentnode - path[vector_to_int(currentnode)]
draw_icons()
pyg.display.flip()
fig = plt.figure(figsize=(4, 5)) # size in inches
# use plot(), etc. to create your plot.
# Pick one of the following lines to uncomment
# save_file = None
# save_file = os.path.join(your_directory, your_file_name)
#plt.savefig(pyg.display.flip())
#plt.close(fig) |
import asyncio
from dffml import Model, Features, Feature, train
async def main():
# Load the model using the entrypoint listed on the model plugins page
SLRModel = Model.load("slr")
# Configure the model
model = SLRModel(
features=Features(Feature("Years", int, 1)),
predict=Feature("Salary", int, 1),
location="slr-model",
)
# Train the model
await train(
model,
{"Years": 0, "Expertise": 1, "Trust": 0.1, "Salary": 10},
{"Years": 1, "Expertise": 3, "Trust": 0.2, "Salary": 20},
)
if __name__ == "__main__":
asyncio.run(main())
|
from pose_estimation import PoseEstimation
import cv2
from args import get_args, show_args
import numpy as np
import pickle
from collections import deque
import time
import telegram_send
import threading
from concurrent.futures import ThreadPoolExecutor
class FallDetector:
"""A Class that sends an alert via Telegram in case it detects a fall or recovery in the video inputs."""
def __init__(self, args):
# Get args
self.frame_rate = args.frame_rate
self.telegram_alert = args.telegram_alert
self.chunk_seconds = args.chunk_seconds
self.display_video = args.display_video
self.posenet_model_path = args.posenet_model_path
self.video_name = "temp/state"
show_args(args)
# Load human state classifier model
self.model = pickle.load(open(args.fall_model_path, 'rb'))
with open(args.path_model_header) as f:
self.model_header = f.readline().split(",")
# Threads to send Telegram alerts
if self.telegram_alert:
self.threads_pool = ThreadPoolExecutor(max_workers=1)
def estimate_pose(self, camera_url, camera_name="Unknow"):
""" Gets the keypoints using a PoseNet model of each frame in `camera_url` video and once the queue is full,
check the status by calling check_status method, empties the queue and repeat the process.
:param camera_url: The Url/path to the input video
:param camera_name: A descriptive camera name (default: "Unknow")
"""
pe = PoseEstimation(self.posenet_model_path)
camera = cv2.VideoCapture(camera_url)
video_rate = int(np.round(camera.get(cv2.CAP_PROP_FPS) / self.frame_rate))
total_frames = self.frame_rate * self.chunk_seconds
print("input frames per seconds", camera.get(cv2.CAP_PROP_FPS))
state = "Nothing"
video_keypoints = deque(maxlen=total_frames)
video_to_send = deque(maxlen=total_frames)
frame_number = -1
while True:
ret, frame = camera.read()
if not ret:
print("No image could be obtained from the camera")
break
frame_number = (frame_number + 1) % video_rate
if frame_number == 0:
# Resize img
frame = pe.make_square(frame, pe.expected_pixels)
# Save the frame
if self.telegram_alert:
video_to_send.append(np.copy(frame))
# Get body parts
pose = pe.get_pose_estimation(frame)
# Normalizing position
# average_x = np.mean(pose[:, 0])
# average_y = np.mean(pose[:, 1])
# pose[:, 0] = pose[:, 0] - average_x
# pose[:, 1] = pose[:, 1] - average_y
# Normalizing scale
max_val = np.abs(np.max(pose))
pose[:] = pose[:] / max_val
video_keypoints.append(np.reshape(pose, -1))
if len(video_keypoints) == total_frames:
state = self.report_state(np.reshape(video_keypoints, (1, -1)), np.copy(video_to_send),
camera_name, pe.expected_pixels)
# Clear queue
video_keypoints.clear()
video_to_send.clear()
if self.display_video:
self.show_results(frame, camera_name, state)
if cv2.waitKey(1) and 0xFF == ord('q') and self.display_video:
break
camera.release()
cv2.destroyAllWindows()
def report_state(self, video_keypoints, video_to_send, camera_name, video_dim):
""" Checks if in `video_keypoints` there is a fall or recovery using the HSC model, if so, calls the report
method and return the state.
:param video_keypoints: A queue containing the keypoints of a video chunk
:param video_to_send: A queue containing the frames of a video chunk
:param camera_name: A descriptive camera name
:param video_dim: The dimension of the output video
:return: A string with the state obtained: "Fall" / "Nothing" / "Recover"
"""
state = str(self.model.predict(video_keypoints)[0])
if (state == "Fall" or state == "Recover") and self.telegram_alert:
self.threads_pool.submit(self.report, video_to_send, video_dim, state, camera_name)
return state
def report(self, video_to_send, video_dim, state, camera_name):
""" Sends a message containing the `state`, the `camera_name` that detected it and the `video_to_send`
with dimensions `vid_dim`x`vid_dim` via Telegram.
:param video_to_send: A queue containing the frames of the fall / recover
:param video_dim: The dimension of the output video
:param state: A string with the state obtained: "Fall" / "Nothing" / "Recover"
:param camera_name: A descriptive camera name
"""
file_video_name = "{}{}.mp4".format(self.video_name, camera_name)
video = cv2.VideoWriter(file_video_name, cv2.VideoWriter_fourcc(*'mp4v'),
self.frame_rate, (video_dim, video_dim))
for frame in video_to_send:
video.write(frame)
video.release()
alert_icon = "⚠"
if state == "Recover":
alert_icon = "👍🏼"
message = "{}{} detected{}: {}".format(alert_icon, state, camera_name, alert_icon)
telegram_send.send(messages=[message])
telegram_send.send(captions=[message], videos=[open(file_video_name, 'rb')])
def show_results(self, frame, camera, state):
""" Displays the current `frame` of the live video from the `camera` and the detected `state`
:param frame: The current frame to show
:param camera: The name of the camera which the frame is obtained.
:param state: The state detected by the method `check_state`.
"""
org = (40, 40)
color = (255, 255, 0)
thickness = 1
font_scale = 1
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.imshow(camera, cv2.putText(frame, state, org, font, font_scale, color, thickness, cv2.LINE_AA))
if __name__ == "__main__":
# Get args
parser = get_args()
args = parser.parse_args()
# Start the program
fall_detector = FallDetector(args)
# Get the cameras
path_cameras = open(args.path_cameras, 'r')
for line in path_cameras.readlines():
if line.startswith("#") or line == '':
continue
parts = line.strip().split(", ")
print("Starting camera {}".format(parts[1]))
threading.Thread(target=fall_detector.estimate_pose,
args=(parts[0], parts[1],)).start() |
import numpy as np
from src.data import Case, Matter
def trivial_reducer(c: Case) -> np.array:
# pile up
# paste background
repr_values = np.ones(c.shape, dtype=np.int) * c.background_color
# collect values
m: Matter
for m in c.matter_list:
if not m.bool_show:
continue
for i in range(m.shape[0]):
for j in range(m.shape[1]):
if m.values[i, j] != m.background_color:
if m.values[i, j] != repr_values[m.x0 + i, m.y0 + j]:
assert repr_values[m.x0 + i, m.y0 + j] == c.background_color
repr_values[m.x0 + i, m.y0 + j] = m.values[i, j]
return repr_values
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import homepilot_utils
import xbmc
class HomePilotBaseObject:
"""
class which represents a single device
"""
def __init__(self, device):
"""
constructor of the class
Arguments:
device -- dictionary with the device attributes
"""
self.device = device
self._name = device["name"]
self._descriprion = device["description"]
self._did = device["did"]
self._position = device["position"]
self._deviceGroup = device["deviceGroup"]
self._status = device["position"]
self._sync = device["sync"]
self._icon_set = device["iconsetKey"]
self._icon_set_inverted = device.get("iconSetInverted")
def get_name(self):
"""
returns the device name
"""
return self._name
def get_device_id(self):
"""
return the device id
"""
return self._did
def get_position(self):
"""
gets the current position of the device on a scale from 0 to 100
"""
return self._position
def get_devicegroup(self):
"""
returns the devicegroup the device belongs to
Schalter:1, Sensoren:3, Rollos:2, Thermostate:5, Dimmer:4, Tore:8
"""
return self._deviceGroup
def get_status(self):
"""
returns the current status
"""
return self._status
def get_description(self):
return self._descriprion
def get_sync(self):
return self._sync
def get_iconset_inverted(self):
return homepilot_utils.get_iconset_inverted(self._icon_set_inverted)
def get_icon(self):
return homepilot_utils.get_icon(self._icon_set, self._icon_set_inverted, self._position, self._deviceGroup)
def get_display_value(self):
position = self.get_position()
group = self.get_devicegroup()
return homepilot_utils.get_display_value(position, group)
class Automation():
def __init__(self, properties):
self.properties = properties
def get_dawn(self):
return self.properties["dawn"]
def get_dusk(self):
return self.properties["dusk"]
def get_time(self):
return self.properties["time"]
def get_wind(self):
return self.properties["wind"]
def get_temperature(self):
return self.properties["temperature"]
def get_generic(self):
return self.properties["generic"]
def get_trigger(self):
return self.properties["trigger"]
def get_closing_contact(self):
return self.properties["closingContact"]
def get_smoke(self):
return self.properties["smoke"]
def get_sun(self):
return self.properties["sun"]
def get_manual(self):
return self.properties["manual"]
def get_dust(self):
return self.properties["dust"]
def get_favored(self):
return self.properties["favored"]
def get_smartphone(self):
return self.properties["smartphone"]
def get_motion(self):
return self.properties["motion"]
def get_temperator(self):
return self.properties["temperator"]
def get_warning(self):
return self.properties["warning"]
def get_rain(self):
return self.properties["rain"]
class Device(HomePilotBaseObject):
def __init__(self, device):
HomePilotBaseObject.__init__(self, device)
self._available = device["avail"]
self._hasErrors = device["hasErrors"] != 0
self._groups = device["groups"]
self._favoredId = device["favoredId"]
self._automated = device["automated"] != 0
self._properties = device["properties"]
def has_errors(self):
"""
returns if the device has errors
"""
return self._hasErrors
def is_available(self):
"""
returns if the device is available
"""
return self._available
def get_favoredId (self):
return self._favoredId
def get_icon(self):
icon = HomePilotBaseObject.get_icon(self)
if self.is_available() == False or self.has_errors():
icon = "warnung_72.png"
return icon
def is_automated(self):
return self._automated
def get_automationen(self):
return Automation(self._properties)
def is_favored(self):
return self._favoredId != -1
class Meter(HomePilotBaseObject):
def __init__(self, device, data):
"""
constructor of the class
Arguments:
meter -- dictionary with the sensor attributes
"""
HomePilotBaseObject.__init__(self, device)
self._data = data
def get_data (self):
return self._data
class Group:
def __init__(self, group):
self.group = group
self._name = group["name"]
self._description = group["description"]
self._gid = group["gid"]
def get_group_id(self):
return self._gid
def get_name(self):
return self._name
def get_description(self):
return self._description
class Action:
def __init__(self, action):
self._did = action["did"]
self._type = action["type"]
self._name = action["name"]
self._description = action["description"]
self._iconset = action ["iconset"]
self._iconsetInverted = action["iconsetInverted"]
self._cmdId = action["cmdId"]
if "param" in action:
self._param = action["param"]
else:
self._param = None
def get_did(self):
return self._did
def get_name(self):
return self._name
def get_description(self):
return self._description
def get_icon(self):
if self._cmdId == 666:#Sensor
return homepilot_utils.get_action_sensor_icon()
elif self._param is not None:
return homepilot_utils.get_icon(self._iconset, self._iconsetInverted, self._param, type)
elif self._cmdId == 10 or self._cmdId == 2:
return homepilot_utils.get_icon(self._iconset, self._iconsetInverted, 100, type)
else:
return homepilot_utils.get_icon(self._iconset, self._iconsetInverted, 0, type)
def get_cmdId(self):
return self._cmdId
def get_device_group(self):
return self._type
def get_param(self):
return self._param
class Scene:
def __init__(self, scene):
self._sid = scene["sid"]
self._name = scene["name"]
self._description = scene["description"]
self._is_executable = scene["isExecutable"]
self._sync = scene["sync"]
self._groups = scene["groups"]
if 'actions' in scene:
self._actions = scene["actions"]
self._properties = scene["properties"]
self._is_active = scene["isActive"]
self._favored = scene["favoredId"]
def get_id(self):
return self._sid
def get_name(self):
return self._name
def get_actions(self):
return map(lambda x: Action(x), self._actions)
def get_automationen(self):
return Automation(self._properties)
def is_executable(self):
return self._is_executable == 1
def is_active(self):
return self._is_active == 1
def is_favored(self):
return self._favored > 0
def get_sync(self):
return self._sync
def get_description(self):
return self._description |
make_new_list=lambda l:[l[x]+l[x+1] for x in range(len(l)-1)]
'''
You get a list of integers. Return a new list by adding 2 elements
surrounding each comma in the list.
Examples
If you get [1, 1, 1, 1], return [2, 2, 2] because [1+1, 1+1, 1+1]
If you get [1, 2, 3, 4], return [3, 5, 7] because [1+2, 2+3, 3+4]
If you get [1, 10, 100], return [11, 110] because [1+10, 10+100]
Restriction
Your code length should not be longer than 60 characters.
''' |
#!/usr/bin/env python
import logging
import numpy as np
from argparse import ArgumentParser
from theano import tensor
from blocks.algorithms import GradientDescent, Scale, Adasecant, AdaDelta, Momentum
from blocks.bricks import MLP, WEIGHT, Logistic
from blocks.bricks.cost import SquaredError, BinaryCrossEntropy
from blocks.initialization import IsotropicGaussian, Constant, Sparse
from fuel.streams import DataStream
from fuel.transformers import Flatten
from fuel.datasets import MNIST
from fuel.schemes import SequentialScheme, ShuffledScheme
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.model import Model
from blocks.monitoring import aggregation
from blocks.extensions import FinishAfter, Timing, Printing
from blocks.extensions.saveload import Checkpoint
from blocks.extensions.monitoring import (DataStreamMonitoring,
TrainingDataMonitoring)
from blocks.extensions.plot import Plot
from blocks.main_loop import MainLoop
def create_model():
"""Create the deep autoencoder model with Blocks, and load MNIST."""
mlp = MLP(activations=[Logistic(), Logistic(), Logistic(), None,
Logistic(), Logistic(), Logistic(), Logistic()],
dims=[784, 1000, 500, 250, 30, 250, 500, 1000, 784],
weights_init=Sparse(15, IsotropicGaussian()),
biases_init=Constant(0))
mlp.initialize()
x = tensor.matrix('features')
x_hat = mlp.apply(tensor.flatten(x, outdim=2))
squared_err = SquaredError().apply(tensor.flatten(x, outdim=2), x_hat)
cost = BinaryCrossEntropy().apply(tensor.flatten(x, outdim=2), x_hat)
return x, cost, squared_err
def main(save_to, num_epochs, bokeh=False, step_rule=False,
batch_size=200, lambda1=0.0, lambda2=0.0):
random_seed = 0xeffe
x, cost, squared_err = create_model()
cg = ComputationGraph([cost])
weights = VariableFilter(roles=[WEIGHT])(cg.variables)
if lambda1 > 0.0:
cost += lambda1 * sum([w.__abs__().sum() for w in weights])
if lambda2 > 0.0:
cost += lambda2 * sum([(w ** 2).sum() for w in weights])
cost.name = 'final_cost'
mnist_train = MNIST("train", sources=['features'])
mnist_test = MNIST("test", sources=['features'])
if step_rule == 'sgd':
step_rule = Momentum(learning_rate=0.01, momentum=.95)
print "Using vanilla SGD"
elif step_rule == 'adadelta':
step_rule = AdaDelta(decay_rate=0.95)
print "Using Adadelta"
else:
step_rule = Adasecant(delta_clip=25, use_adagrad=True)
print "Using Adasecant"
algorithm = GradientDescent(
cost=cost, params=cg.parameters,
step_rule=step_rule)
extensions = [Timing(),
FinishAfter(after_n_epochs=num_epochs),
DataStreamMonitoring(
[cost],
Flatten(
DataStream.default_stream(
mnist_test,
iteration_scheme=SequentialScheme(
mnist_test.num_examples, 500)),
which_sources=('features',)),
prefix="test"),
TrainingDataMonitoring(
[cost, squared_err,
aggregation.mean(algorithm.total_gradient_norm)],
prefix="train",
after_epoch=True),
Checkpoint(save_to, save_separately=['log', '_model']),
Printing()]
if bokeh:
extensions.append(
Plot('MNIST Autoencoder',
channels=[
['test_final_cost'],
['train_total_gradient_norm']], after_epoch=True))
main_loop = MainLoop(
algorithm,
Flatten(
DataStream.default_stream(
mnist_train,
iteration_scheme=ShuffledScheme(
mnist_train.num_examples, batch_size=batch_size,
rng=np.random.RandomState(random_seed))),
which_sources=('features',)),
model=Model(cost),
extensions=extensions)
main_loop.run()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = ArgumentParser("Training an Autoencoder on MNIST.")
parser.add_argument("--num-epochs", type=int, default=200,
help="Number of training epochs to do.")
parser.add_argument("save_to", default="mnist_ae.pkl", nargs="?",
help=("Destination to save the state of the training "
"process."))
parser.add_argument("--bokeh", action='store_true',
help="Set if you want to use Bokeh ")
parser.add_argument("--step-rule", default='Adasecant',
help="Optimizer")
parser.add_argument("--batch-size", type=int, default=200,
help="Batch size.")
args = parser.parse_args()
main(args.save_to, args.num_epochs, args.bokeh, args.step_rule,
args.batch_size)
|
import connexion
import six
import csv
import os, fnmatch
import numpy as np
from PIL import Image
from io import BytesIO
import base64
from swagger_server.models.enrollment import Enrollment # noqa: E501
from swagger_server import util
from swagger_server.face_vector.face_vector import FaceVector
faceVector = FaceVector()
storageFile = "users.csv"
storageFolder = "/home/inno/enrollment"
def fileParser(folder, filename):
registrations = dict()
if os.path.isfile(folder + '/' + filename):
with open(folder + '/' + filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
if len(row) == 129:
array = np.zeros(128)
for i in range(0, 128):
array[i] = row[i+1]
registrations[row[0]] = array
return registrations
def fileWriter(folder, filename, registrations):
with open(folder + '/' + filename, mode='w+', newline='') as csv_file:
registration_writer = csv.writer(csv_file, delimiter=' ', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for key, value in registrations.items():
row_list = []
row_list.append(key)
for i in range(0, 128):
row_list.append(value[i])
registration_writer.writerow(row_list)
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
def delete_enrollment(name): # noqa: E501
"""delete the enrollment for the ticket id
# noqa: E501
:param name: name (of person) to delete enrollment for
:type name: str
:rtype: None
"""
return 'do some magic!'
def get_enrollment(name): # noqa: E501
"""get the information of an enrollment
# noqa: E501
:param name: name (of persone) for which enrollment was done
:type name: str
:rtype: Enrollment
"""
filenames = find(name + '.*', storageFolder)
if len(filenames) > 0:
file = open(filenames[0])
return file, 200
return 'No enrollment found!', 400
def get_enrollment_image(name, image_nr): # noqa: E501
"""get an image enrolled for the name, enumeration starts w/ 0
# noqa: E501
:param name: name(of person) for which enrollment was done
:type name: str
:param image_nr: image number to get
:type image_nr: int
:rtype: ByteArray
"""
return 'do some magic!'
def update_enrollment(name, file): # noqa: E501
"""enrolls an identification with an image of the persons face
If there is a enrolled identification the image is added to the set of images to be used as references. To remove a single reference image, use DELETE /enroll and then POST /enroll. # noqa: E501
:param name: name of the holder of the ticket
:type name: str
:param file: file to upload
:type file: werkzeug.datastructures.FileStorage
:rtype: Enrollment
"""
# check if storage folder exists
if not os.path.exists(storageFolder):
os.makedirs(storageFolder)
extension = file.filename.split(".")[-1]
# calculate the face vector
img = Image.open(file)
face_vec = faceVector.get_face_vector(img, storageFolder + '/' + name + '.' + extension)
if face_vec is None:
return 'No face detected!', 400
# get all existing entries
registration_dict = fileParser(storageFolder, storageFile)
# check if the vector is similar to an existing vector and remove the old entry in this case
twin = None
for key, values in registration_dict.items():
dist = np.linalg.norm(values - face_vec)
if dist < 0.6:
twin = key
break
if twin:
# remove the entry in te dictionary
del registration_dict[twin]
# adding the new vector to the list
registration_dict[name] = face_vec
fileWriter(storageFolder, storageFile, registration_dict)
#return "Success", 200
enrollment = Enrollment(None, name, 1, None)
return enrollment, 200
def update_enrollment_base64(name, image): # noqa: E501
"""enrolls an identification with a Base64 image of the persons face
If there is a enrolled identification the image is added to the set of images to be used as references. To remove a single reference image, use DELETE /enroll and then POST /enroll. # noqa: E501
:param name: name of the holder of the ticket
:type name: str
:param image: Base64 image to upload
:type image: str
:rtype: Enrollment
"""
img = Image.open(BytesIO(base64.b64decode(image.encode("utf-8"))))
face_vec = faceVector.get_face_vector(img, storageFolder + '/' + name + '.jpg')
if face_vec is None:
return 'No face detected!', 400
# get all existing entries
registration_dict = fileParser(storageFolder, storageFile)
# check if the vector is similar to an existing vector and remove the old entry in this case
twin = None
for key, values in registration_dict.items():
dist = np.linalg.norm(values - face_vec)
if dist < 0.6:
twin = key
break
if twin:
# remove the entry in te dictionary
del registration_dict[twin]
# adding the new vector to the list
registration_dict[name] = face_vec
fileWriter(storageFolder, storageFile, registration_dict)
#return 'Success', 200
enrollment = Enrollment(None, name, 1, None)
return enrollment, 200
|
import math
import numpy
# Flatten an array of n images to an array of n normalized vectors of pixels
# Input.shape : (n, x, y,)
# Output.shape : (n, x*y,)
def to_normalized_vector_list(images):
if len(images.shape) == 2:
return images/255
elif len(images.shape) == 3:
pixels_qt = images.shape[1] * images.shape[2] # Compute x*y
return images.reshape(images.shape[0], pixels_qt) / 255 # Flatten & normalize
else:
return # TODO Handle unexpected shape
# Reconstitute an array of square image from an array of pixels vectors
# Input.shape : (n, x*y,)
# Output.shape : (n, x, y, 1)
def to_normalized_images(pixels_vectors):
# Compute image size from its resolution
i = math.sqrt(pixels_vectors.shape[1])
if i != int(i): # Ensure image square
raise ValueError('pixels_vectors do not correspond to a square image')
i = int(i) # Cast to int for array dim
return numpy.array([pixels_vector.reshape(i, i, 1) for pixels_vector in pixels_vectors]) # Format array
# Reconstitute an array of square image from an array of pixels vectors
# Input.shape : (n, x*y,) OR (n, x, y, 1)
# Output.shape : (n, x, y,)
def to_drawable_images(pixels_arrays):
if not isinstance(pixels_arrays, (numpy.ndarray, numpy.generic)): # Not np array
pixels_arrays = numpy.array(pixels_arrays) # Try to cast to np array
# TODO Ensure correct shape and write generic pixel-size handling
# Get rid of the channel dim (4D array) or add the Y dim (array of pixel vectors)
return numpy.array([pixels_array.reshape(28, 28) for pixels_array in pixels_arrays])
|
from py2neo import Graph
#pip install py2neo
graph = Graph(password="test")
#Neo4j in esecuzione su cui è creato un grafo vuoto (in esecuzione, cliccare start) avente password test
graph.run("MATCH(n) DETACH DELETE n") #Cancello tutti i nodi in caso il grafo non è vuoto
graph.run("CREATE (p:Person {name: 'Giuseppe', age:19})")
graph.run("CREATE CONSTRAINT on (p:Person) ASSERT p.name IS UNIQUE")
graph.run("CREATE (p:Person {name: 'Pasquale', age:17})")
graph.run("CREATE CONSTRAINT on (p:Person) ASSERT p.name IS UNIQUE")
graph.run("CREATE (p:Person {name: 'Sergio', age:18})")
graph.run("CREATE CONSTRAINT on (p:Person) ASSERT p.name IS UNIQUE")
graph.run("CREATE (p:Person {name: 'Roberto', age:15})")
graph.run("CREATE CONSTRAINT on (p:Person) ASSERT p.name IS UNIQUE")
graph.run("CREATE (p:Person {name: 'Luigi', age:11})")
graph.run("CREATE CONSTRAINT on (p:Person) ASSERT p.name IS UNIQUE")
graph.run("MATCH (p:Person {name: 'Luigi'}), (q:Person {name: 'Roberto'}) CREATE (p)-[:KNOWS]->(q)")
graph.run("MATCH (p:Person {name: 'Sergio'}), (q:Person {name: 'Pasquale'}) CREATE (p)-[:KNOWS]->(q)")
graph.run("MATCH (p:Person {name: 'Pasquale'}), (q:Person {name: 'Giuseppe'}) CREATE (p)-[:KNOWS]->(q)")
graph.run("MATCH (p:Person {name: 'Giuseppe'}), (q:Person {name: 'Sergio'}) CREATE (p)-[:KNOWS]->(q)")
graph.run("MATCH (p:Person {name: 'Luigi'}), (q:Person {name: 'Giuseppe'}) CREATE (p)-[:KNOWS]->(q)")
graph.run("CREATE (:Person {name:'Matteo'})-[:KNOWS]->(:Person {name:'Francesco'})")
graph.run("CREATE (p:Car {brand: 'Ford', targa: '123'})")
graph.run("CREATE CONSTRAINT on (c:Car) ASSERT c.targa IS UNIQUE")
graph.run("CREATE (p:Car {brand: 'Seat', targa: '124'})")
graph.run("CREATE CONSTRAINT on (c:Car) ASSERT c.targa IS UNIQUE")
graph.run("CREATE (p:Car {brand: 'Suzuki', targa: '143'})")
graph.run("CREATE CONSTRAINT on (c:Car) ASSERT c.targa IS UNIQUE")
graph.run("CREATE (p:Car {brand: 'Fiat', targa: '125'})")
graph.run("CREATE CONSTRAINT on (c:Car) ASSERT c.targa IS UNIQUE")
graph.run("CREATE (p:Car {brand: 'Audi', targa: '127'})")
graph.run("CREATE CONSTRAINT on (c:Car) ASSERT c.targa IS UNIQUE")
graph.run("MATCH (p:Person {name: 'Luigi'}), (q:Car {targa: '123'}) CREATE (p)-[:OWNS]->(q)")
graph.run("MATCH (p:Person {name: 'Sergio'}), (q:Car {targa: '124'}) CREATE (p)-[:OWNS]->(q)")
graph.run("MATCH (p:Person {name: 'Pasquale'}), (q:Car {targa: '127'}) CREATE (p)-[:OWNS]->(q)")
graph.run("MATCH (p:Person {name: 'Giuseppe'}), (q:Car {targa: '143'}) CREATE (p)-[:OWNS]->(q)")
result = graph.run("MATCH (b:Person {name: 'Luigi'})-[:KNOWS]->(a:Person)-[:OWNS]->(c:Car) RETURN a.name, a.age")
while result.forward():
print(result.current)
result = graph.run("MATCH (b:Person)-[r]->(:Car) RETURN b, type(r)")
while result.forward():
print(result.current) |
#**********LIBRARY IMPORTS************
#os for system calls , time for delays so user can read output
import os, time
#**********INSTALLATION AND UPDATES***************************
#This script utilizes ffmpeg, youtube-dl and cdrdao
print "Checking for youtube-dl and FFMpeg..."
time.sleep(3)
os.system("cd /usr/local/bin")
if not os.path.exists('/usr/local/bin/youtube-dl'):
print "youtube-dl is not installed. Installing now."
time.sleep(3)
os.system("sudo wget https://yt-dl.org/downloads/2014.05.12/youtube-dl -O /usr/local/bin/youtube-dl")
os.system("sudo chmod a+x /usr/local/bin/youtube-dl")
os.system("sudo chmod rwx /usr/local/bin/youtube-dl")
print "youtube-dl has been installed."
print "Now updating youtube-dl..."
os.system("sudo /usr/local/bin/youtube-dl -U")
else:
print "Checking for update to youtube-dl..."
os.system("sudo /usr/local/bin/youtube-dl -U")
if not os.path.exists('/usr/local/bin/ffmpeg'):
print("FFMpeg is not installed. Installing now.")
time.sleep(3)
os.system("sudo wget http://ffmpeg.gusari.org/static/32bit/ffmpeg.static.32bit.latest.tar.gz -O /usr/local/bin/ffmpeg.tar.gz")
os.system("sudo tar -zxvf /usr/local/bin/*.tar.gz -C /usr/local/bin")
os.system("sudo chmod a+x /usr/local/bin/ffmpeg")
os.system("sudo chmod a+x /usr/local/bin/ffprobe")
os.system("sudo rm ffmpeg.tar.gz")
print "FFMpeg has been installed."
else:
print "FFMpeg is already installed."
print "Installing/Updating cdrdao through apt-get. This is for burning to CD-R. Install \
manually if you do not use apt-get and wish to burn CDs with this program instead of an external one."
time.sleep(5)
os.system("sudo apt-get install cdrdao")
os.system("clear")
#*************DOWNLOADING VIDEOS/CONVERTING TO MP3**********************
urls = []
currenturl = "1"
while currenturl != "":
currenturl = raw_input('Enter URL (just hit ENTER to stop and begin downloading): ')
if currenturl == "":
break
urls.append(currenturl)
print "Done with queue entry. Downloading videos from YouTube:"
time.sleep(3)
count = 1
for i in urls:
if count <= 9:
os.system("/usr/local/bin/youtube-dl -o 'Track_0" + str(count) + "_-_%(title)s.%(ext)s' --restrict-filenames " + i)
else:
os.system("/usr/local/bin/youtube-dl -o 'Track_" + str(count) + "_-_%(title)s.%(ext)s' --restrict-filenames " + i)
count = count + 1
print "Finished downloading queue. Finding downloaded videos: "
downloaded = []
for file in os.listdir('.'):
if file.endswith(".mp4"):
os.rename(file, file[:-4])
print file[:-4]
downloaded.append(file[:-4])
print "Here are the found files: "
print '[%s]' % ', '.join(map(str, downloaded))
print "Now converting videos: "
time.sleep(3)
downloaded.sort()
for x in downloaded:
os.system('/usr/local/bin/ffmpeg -i ' + x + " " + x + '.mp3')
print "Finished converting. Cleaning up: "
time.sleep(3)
for file in downloaded:
print "Deleting file " + file + "..."
os.system("rm " + file)
#*************BURNING TO CD-R*******************
switch = raw_input("Would you like to burn the downloaded MP3 to CD-R? \
'y' for yes or anything else for no: ")
if switch == "y":
for file in os.listdir('.'):
if file.endswith(".mp3"):
os.system("/usr/local/bin/ffmpeg -i " + file + " " + file + ".wav")
wave = []
for file in os.listdir('.'):
if file.endswith(".wav"):
wave.append(file)
wave.sort()
os.system("touch cd.toc")
os.system("sudo chmod 777 cd.toc")
f = open('cd.toc', 'w')
f.write('CD_DA\n\n')
for z in wave:
f.write('\n\nTRACK AUDIO\n')
f.write('AUDIOFILE "' + z + '" 0')
f.close()
raw_input("Please place a blank CD-R into your CD drive, then hit ENTER:")
print "Now burning CD..."
os.system("cdrdao write cd.toc")
for y in wave:
print "Deleting file " + y + "..."
os.system("rm " + y)
os.system("rm cd.toc")
else:
print "Skipping CD burning."
#*************POST-OPERATION ORGANIZATION*****************
name = raw_input("Give a name to the compilation you've made: ")
name = name.replace(" ", "_")
os.system("mkdir " + name)
os.system("mv *.mp3 " + name)
print "Moved MP3 into a folder called " + name + "."
print "All finished. Enjoy! Hit enter to terminate program."
raw_input("")
|
# print(eval(input()))
while True:
try:
n1 = float(input('введите первое число\n'))
sign = input('введите знак\n')
n2 = float(input('введите второе число\n'))
if sign == '+':
s = n1 + n2
elif sign == '-':
s = n1 - n2
elif sign == '*':
s = n1 * n2
elif sign == '/':
s = n1 / n2
s = str(s)
if s[-2:] == '.0':
s = s[:-2]
print(f'Ответ: {s}')
except ValueError:
print("До скорых встреч!")
break
except ZeroDivisionError:
print('Чувак, делить на ноль нельзя :) !!!') |
import boto3
import telemetry.telescope_ec2_age.desc_launch_conf as desc_launch_conf
import sys
from datetime import datetime
from telemetry.telescope_ec2_age.logger import get_app_logger
from botocore.exceptions import ClientError
logger = get_app_logger()
ec2_client = boto3.client('ec2', region_name='eu-west-2')
def dictionary_handler_assign():
logger.info("Fetching EC2 image details from all Launch Configuration resources...")
return launch_dict(receive_launch_confs_from_launch_conf())
def launch_dict(launch_conf_dict):
dictionary = {}
for asg_name, launch_d in launch_conf_dict.items():
for conf, image in launch_d.items():
logger.debug('asg name: ' + str(asg_name))
logger.debug('imageid: ' + str(image))
image_age = ami_time_handler(describe_image_ids(image))
if image_age is not None:
dictionary[asg_name] = {image: image_age}
return dictionary
def receive_launch_confs_from_launch_conf():
return desc_launch_conf.handler_launch_images()
def describe_image_ids(image_id):
try:
logger.debug("Fetching details for EC2 image with ID %s" % image_id)
response = ec2_client.describe_images(
ImageIds=[
image_id
]
)
creation_date = None
for image in response["Images"]:
logger.debug(image)
creation_date = image["CreationDate"]
return creation_date
except ClientError as e:
logger.error(str(e))
return None
except Exception as e:
logger.error(str(e))
return None
except:
logger.error("Unexpected error:", sys.exc_info()[0])
return None
def ami_time_handler(creation_date_string):
if creation_date_string is None:
return creation_date_string
time_obj = datetime.strptime(
creation_date_string, "%Y-%m-%dT%H:%M:%S.%fZ")
timedelta = datetime.now(time_obj.tzinfo) - time_obj
logger.debug(('ami Timestamp: creationDate: ' + creation_date_string))
logger.debug('ami Timestamp: timeobj: ' + str(time_obj))
logger.debug('ami Timestamp: delta: ' + str(timedelta.total_seconds()))
return timedelta.total_seconds()
|
# -*- coding: utf-8 -*-
# author: inspurer(月小水长)
# create_time: 2021/8/24 8:25
# 运行环境 Python3.6+
# github https://github.com/inspurer
# 微信公众号 月小水长
# todo: add proxy
import requests
from lxml import etree
from time import sleep
HEADERS_LIST = [
'Mozilla/5.0 (Windows; U; Windows NT 6.1; x64; fr; rv:1.9.2.13) Gecko/20101203 Firebird/3.6.13',
'Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201',
'Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16',
'Mozilla/5.0 (Windows NT 5.2; RW; rv:7.0a1) Gecko/20091211 SeaMonkey/9.23a1pre'
]
timeout = 10
import random
import traceback
'''
没把 cookie 配置在文件中的原因是
json 只能双引号包裹,cookie 中就有双引号,懒得转义,容易出错,直接放在 python 的单引号中,
'''
Cookie = 'bidxxxxx 改成你自己的 cookie, path 是 statues'
def get_user_status(user_url='https://www.douban.com/people/170796758'):
# todo status 详情里的回应内容,转发,点赞数
global config_json
statuses_url = f'{user_url}/statuses'
result_json = []
title = None
params = {
'p': cur_user_cur_page_index
}
while True:
try:
response = requests.get(url=statuses_url, headers={'User-Agent': random.choice(HEADERS_LIST), 'Cookie': Cookie},
timeout=timeout, params=params)
except:
print(traceback.format_exc())
break
if response.status_code == 403:
print('被识别出爬虫了,请打开豆瓣网页填写验证码')
config_json['cur_user_cur_page_index'] = params['p']
saveConfig()
saveData(result_json, title)
import sys
sys.exit(0)
html = etree.HTML(response.text.encode(response.encoding).decode('utf-8'))
if not title:
title = html.xpath('//title/text()')[0].strip()
search_items = html.xpath('//div[@class="stream-items"]/div[starts-with(@class,"new-status status-wrapper")]')
if len(search_items) == 0:
if params['p'] <= 1:
print(f'如果 {statuses_url} 主页有数据的话,请留意 cookie 是否失效')
print('爬完了')
break
for si in search_items:
if len(si.xpath('.//span[@class="reshared_by"]')) > 0 or len(si.xpath('.//div[contains(@class, "reshared")]')) > 0:
# todo
print('转发的暂不处理')
continue
created_at = si.xpath('.//span[@class="created_at"]/@title')[0]
status_url = si.xpath('.//div[@class="hd "]/@data-status-url')[0]
status_text = si.xpath('.//div[@class="text"]')[0].xpath('string(.)').strip()
if '看过' in status_text or '在看' in status_text or '想看' in status_text:
# 电影
status_movie = si.xpath('.//div[@class="content"]/div[@class="title"]/a')[0]
status_movie_title = status_movie.xpath('./text()')[0]
status_movie_url = status_movie.xpath('./@href')[0]
result_json.append({
'created_at': created_at,
'status_type': 'movie',
'status_url': status_url,
'status_text': status_text,
'status_movie_title': status_movie_title,
'status_movie_url': status_movie_url,
})
print(status_url, status_text, status_movie_title, status_movie_url, '\n')
elif '说:' in status_text:
# 单纯的说说
try:
status_saying = si.xpath('.//div[@class="status-saying"]/blockquote/p/text()')[0]
except:
# 纯图片
status_saying = ''
pic_group = si.xpath(
'.//div[starts-with(@class,"attachments-saying group-pics")]/span[starts-with(@class,"group-pic")]')
pic_url_list = []
if len(pic_group) > 0:
print(status_url, len(pic_group))
for pic in pic_group:
pic_url = pic.xpath('./img/@data-original-url')[0]
pic_url_list.append(pic_url)
result_json.append({
'created_at': created_at,
'status_type': 'saying',
'status_url': status_url,
'status_text': status_text,
'status_saying': status_saying,
'status_pic_list': pic_url_list,
})
print(status_saying, pic_url_list, '\n')
elif '听过' in status_text or '在听' in status_text or '想听' in status_text:
# 音乐,解析其实和电影一样
status_music = si.xpath('.//div[@class="content"]/div[@class="title"]/a')[0]
status_music_title = status_music.xpath('./text()')[0]
status_music_url = status_music.xpath('./@href')[0]
result_json.append({
'created_at': created_at,
'status_type': 'music',
'status_url': status_url,
'status_text': status_text,
'status_music_title': status_music_title,
'status_music_url': status_music_url,
})
print(status_url, status_text, status_music_title, status_music_url, '\n')
elif '关注了话题' in status_text:
status_topic = si.xpath('.//div[@class="content"]/div[starts-with(@class,"title")]/a')[0]
status_topic_title = status_topic.xpath('./text()')[0]
status_topic_url = status_topic.xpath('./@href')[0]
result_json.append({
'created_at': created_at,
'status_type': 'topic',
'status_url': status_url,
'status_text': status_text,
'status_topic_title': status_topic_title,
'status_topic_url': status_topic_url,
})
print(status_url, status_text, status_topic_title, status_topic_url, '\n')
elif '读过' in status_text or '在读' in status_text or '想读' in status_text:
# 书,解析其实和电影一样
status_book = si.xpath('.//div[@class="content"]/div[@class="title"]/a')[0]
status_book_title = status_book.xpath('./text()')[0]
status_book_url = status_book.xpath('./@href')[0]
result_json.append({
'created_at': created_at,
'status_type': 'topic',
'status_url': status_url,
'status_text': status_text,
'status_book_title': status_book_title,
'status_book_url': status_book_url,
})
print(status_url, status_text, status_book_title, status_book_url, '\n')
params['p'] = params['p'] + 1
if params['p'] % 5 == 0:
print(' saving per 5 page ')
saveData(result_json, title)
print(f'\n\n\n parsing page {params["p"]}\n\n\n')
sleep(3)
saveData(result_json, title)
import os
import json
config_path = 'user_config.json'
def loadConfig():
# 加载入参
if not os.path.exists(config_path):
raise Exception(f"没有配置文件 {config_path}")
with open(config_path, 'r', encoding='utf-8-sig') as f:
config_json = json.loads(f.read())
return config_json
def saveConfig():
# 保存配置
with open(config_path, 'w', encoding='utf-8-sig') as f:
f.write(json.dumps(config_json, indent=2, ensure_ascii=False))
data_path = 'output'
if not os.path.exists(data_path):
os.mkdir(data_path)
def saveData(data, title):
data_file = os.path.join(data_path, f'{title}.json')
with open(data_file, 'w+', encoding='utf-8-sig') as f:
f.write(json.dumps(data, indent=2, ensure_ascii=False))
if __name__ == '__main__':
global config_json, cur_user_index, cur_user_cur_page_index
config_json = loadConfig()
# 待爬取的用户列表
users = config_json.get('users', None)
# 当前爬取到哪一个用户了
cur_user_index = config_json.get('cur_user_index', None)
# 当前爬取的用户到哪一页了
cur_user_cur_page_index = config_json.get('cur_user_cur_page_index', None)
# 用户列表还没爬完
while cur_user_index <= len(users):
cur_user = users[cur_user_index]
try:
get_user_status(cur_user)
# get_user_status('https://www.douban.com/people/G16022222')
cur_user_index += 1
except:
print(traceback.format_exc())
config_json['cur_user_index'] = cur_user_index
saveConfig()
break
|
from forex_python.converter import CurrencyRates
currencies = {"EUR":"Euro-Member-Countries","IDR":"Indonesia-Rupiah","BGN":"Bulgaria-Lev","ILS":"Israel-Shekel","GBP":"United-Kingdom Pound","DKK":"Denmark-Krone","CAD":"Canada-Dollar","JPY":"Japan-Yen","HUF":"Hungary-Forint","RON":"Romania-New-Leu","MYR":"Malaysia-Ringgit","SEK":"Sweden-Krona","SGD":"Singapore-Dollar","HKD":"Hong-Kong-Dollar","AUD":"Australia Dollar","CHF":"Switzerland-Franc","KRW":"Korea-(South)-Won","CNY":"China-Yuan-Renminbi","TRY":"Turkey-Lira","HRK":"Croatia-Kuna","NZD":"New-Zealand-Dollar","THB":"Thailand-Baht","USD":"United-States-Dollar","NOK":"Norway-Krone","RUB":"Russia-Ruble","INR":"India-Rupee","MXN":"Mexico-Peso","CZK":"Czech-Republic-Koruna","BRL":"Brazil-Real","PLN":"Poland-Zloty","PHP":"Philippines-Peso","ZAR":"South-Africa-Rand"}
rate = CurrencyRates()
def is_valid_currency(cur):
test_cur = [val for val in currencies.keys()]
if cur.upper() in test_cur:
return True
else:
return False
def convert_currency(from_cur,to_cur,ammt):
try:
float(ammt)
except ValueError:
return "Not Valid Ammount"
if is_valid_currency(from_cur) and is_valid_currency(to_cur):
return rate.convert(from_cur.upper(),to_cur.upper(),float(ammt))
else:
return "Not Valid Code" |
import extract
import transform
import os
start_page = 1
end_page = 1
current_recipe_page = start_page
# create empty list for individual recipe urls to be saved in
recipe_links = []
# local path for xml files to be saved in
#print(output_path)
# Loop through each `all_recipe_pages`
while current_recipe_page <= end_page:
# add current recipe page to url
all_recipe_pages = 'https://www.brewtoad.com/recipes?page={page}&sort=rank'.format(page=current_recipe_page)
# at each page use `get request` to bring back raw html
response = extract.get_html(all_recipe_pages)
# extract links to each recipe from recipe-container
recipe_link = 'test'
# append this information to recipe_links list
recipe_links = recipe_links.append(recipe_link)
for i in recipe_links:
output_path = 'base_path/{i}'.format(i)
if not os.path.isfile(output_path): # if file is not in the given dir
print('Downloading data at ' + output_path)
try:
# bring back
extract.get_html()
# inside the html, get the links for each of the recipes pages
recipe_links = transform.get_recipe_urls_from_html(response)
transform.save_output_to_file(response, output_path)
except:
print('Recipe file for ' + str(i) + ' not found')
else: # if file is in given dir
print('File already exists')
# increase page number by one before returning to start of loop
current_recipe_page = current_recipe_page + 1
print(recipe_links)
# for each recipe page, get the get_html
# in each recipe html, find the xml file download link
# download the xml into output_path
|
from indicnlp.tokenize import indic_tokenize
from collections import Counter
indic_string='सुनो, कुछ आवाज़ आ रही है। फोन?'
x=[]
stopwords={"है","।","?"}
print('Input String: {}'.format(indic_string))
print('Tokens: ')
for t in indic_tokenize.trivial_tokenize(indic_string):
x.append(t)
print(t)
print(x)
my_doc = [ele for ele in x if ele not in stopwords]
print("\nToken List after Preprocessing")
print(my_doc)
print("\n")
print('After PreProcessing n_Tokens: ', len(my_doc))
print("\n")
print("Word frequency of Grams");
gram=Counter(my_doc)
print(gram)
gram=sorted(gram, key=lambda i: gram[i], reverse=True)[:10]
print("\nTop 10 grams");
print(gram)
print("\n")
print("Word frequency of Bigrams");
bigram=Counter(zip(my_doc[::],my_doc[1::]))
print(bigram)
bigram=sorted(bigram, key=lambda i: bigram[i], reverse=True)[:10]
print("\nTop 10 Bigrams");
print(bigram)
print("\n")
|
from common import Moons, total
m = Moons()
for i in range(1000):
m.iterate()
print(sum(total(moon) for moon in m.moons))
|
from fastapi import FastAPI
from algorithm.nl2color import predict
from algorithm.pre_process import pre_process
from fastapi.responses import JSONResponse
from algorithm.util import rgb2hex
from algorithm.classification import classification
app = FastAPI()
headers = {"Access-Control-Allow-Origin": "*"}
@app.get("/function_analysis")
def get_layout(data: str):
layouts = []
words = pre_process(data)
for word in words:
unfold_word = (list(classification(word))[0])[0]
layouts.append(unfold_word.replace("__label__", ''))
res = {
"layouts": layouts,
"words": words
}
return JSONResponse(content=res, headers=headers)
@app.get("/style_analysis")
def get_style(data: str):
colorList = []
words = pre_process(data)
for word in words:
colorList.append(rgb2hex(predict(word)))
res = {
"colorList": colorList,
"words": words
}
return JSONResponse(content=res, headers=headers)
|
from model.drawable import Drawable
class HomeHeader(Drawable):
def update(self):
super().update()
def draw(self, view):
first_line = view.font_72.render("Ball Sort Puzzle", True, (255, 255, 255))
view.screen.blit(first_line, (view.width // 2 - first_line.get_width() // 2, view.height // 8))
|
# ------------------------------------------------------------------------------
# Access to the CodeHawk Binary Analyzer Analysis Results
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
"""Extracts features from the executable and saves them in a json file.
Feature sets extracted include: branch predicates, dll calls, unresolved
calls, and iocs (indicators of compromise). Format: feature set ->
feature -> feature count.
"""
import argparse
import json
import chb.app.AppAccess as AP
import chb.util.fileutil as UF
def parse():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('filename',help='name of executable to be analyzed')
args = parser.parse_args()
return args
def get_ioc_name(ioc,rolename):
if rolename.startswith('infox'):
return rolename
if __name__ == '__main__':
args = parse()
try:
(path,filename) = UF.get_path_filename('x86-pe',args.filename)
UF.check_analysis_results(path,filename)
except UF.CHBError as e:
print(str(e.wrap()))
exit(1)
app = AP.AppAccess(path,filename)
dllcalls = app.get_dll_calls()
branchpredicates = app.get_branch_predicates()
unresolvedcalls = app.get_unresolved_calls()
try:
(iocarguments,_) = app.get_ioc_arguments() # ioc -> role-name -> (faddr,iaddr,arg)
except UF.CHBError as e:
print(str(e.wrap()))
exit(1)
callcounts = {} # dll:name -> count
for faddr in dllcalls:
for instr in dllcalls[faddr]:
tgt = instr.get_call_target().get_stub()
dll = tgt.get_dll().lower()
fname = tgt.get_name()
name = dll + ':' + fname
callcounts.setdefault(name,0)
callcounts[name] += 1
predicates = {} # predicate -> count
for faddr in branchpredicates:
for instr in branchpredicates[faddr]:
predicate = str(instr.get_branch_predicate())
if '?' in predicate: continue
if 'val@' in predicate: continue
predicates.setdefault(predicate,0)
predicates[predicate] += 1
unrcallcounts = {} # expression -> count
for faddr in unresolvedcalls:
for instr in unresolvedcalls[faddr]:
tgt = str(instr.get_unresolved_call_target())
unrcallcounts.setdefault(tgt,0)
unrcallcounts[tgt] += 1
iocresults = {} # iocname -> iocvalue -> count
for ioc in iocarguments:
for rolename in iocarguments[ioc]:
iocname = get_ioc_name(ioc,rolename)
if iocname is None: continue
if iocname.startswith('infox'):
iocresults.setdefault('infox',{})
infoxitem = iocname[6:]
iocresults['infox'].setdefault(infoxitem,0)
iocresults['infox'][infoxitem] += 1
else:
for (_,_,arg) in iocarguments[ioc][rolename]:
if not arg.is_const(): continue
iocresults.setdefault(iocname,{})
iocvalue = str(arg)
iocresults[iocname].setdefault(iocvalue,0)
iocresults[iocname][iocvalue] += 1
result = {}
result['predicates'] = predicates
result['dllcalls'] = callcounts
result['unresolvedcalls'] = unrcallcounts
result['iocs'] = iocresults
filename = UF.get_features_filename(path,filename)
with open(filename,'w') as fp:
json.dump(result,fp,indent=2)
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2tmp.testing import *
@assert_compilation_succeeds()
def test_if_else_success():
from tmppy import Type
def f(x: bool):
if x:
return Type('int')
else:
return Type('float')
assert f(True) == Type('int')
@assert_compilation_succeeds()
def test_if_else_defining_local_var_success():
from tmppy import Type
def f(x: bool):
if x:
y = x
return Type('int')
else:
return Type('float')
assert f(True) == Type('int')
@assert_compilation_succeeds()
def test_if_else_only_if_returns_success():
from tmppy import Type
def f(x: bool):
if x:
return Type('int')
else:
y = Type('float')
return y
assert f(True) == Type('int')
@assert_compilation_succeeds()
def test_if_else_only_else_returns_success():
from tmppy import Type
def f(x: bool):
if x:
y = Type('int')
else:
return Type('float')
return y
assert f(True) == Type('int')
@assert_compilation_succeeds()
def test_if_returns_success():
from tmppy import Type
def f(x: bool):
if x:
return Type('int')
return Type('float')
assert f(True) == Type('int')
@assert_compilation_succeeds()
def test_if_else_neither_returns_success():
from tmppy import Type
def f(x: bool):
if x:
y = Type('int')
else:
y = Type('float')
return y
assert f(True) == Type('int')
@assert_compilation_succeeds()
def test_if_else_assert_in_if_branch_never_taken_ok():
def f(x: bool):
if False:
b = False
assert b
return True
assert f(True) == True
@assert_compilation_succeeds()
def test_if_else_assert_in_else_branch_never_taken_ok():
def f(x: bool):
if True:
b = True
assert b
else:
b = False
assert b
return True
assert f(True) == True
@assert_compilation_succeeds()
def test_if_else_assert_in_continuation_never_executed_ok():
from tmppy import Type
def f(x: bool):
if True:
return Type('int')
b = False
assert b
return Type('void')
assert f(True) == Type('int')
@assert_compilation_succeeds()
def test_if_else_with_comparisons_success():
from tmppy import Type
def f(x: Type):
if x == Type('int'):
b = x == Type('int')
else:
return x == Type('float')
return b == True
assert f(Type('int')) == True
@assert_compilation_succeeds()
def test_if_else_variable_forwarded_to_if_branch_success():
def f(x: bool):
if x:
return x
else:
return False
assert f(True) == True
@assert_compilation_succeeds()
def test_if_else_variable_forwarded_to_else_branch_success():
def f(x: bool):
if x:
return False
else:
return x
assert f(False) == False
@assert_compilation_succeeds()
def test_if_else_variable_forwarded_to_continuation_success():
def f(x: bool):
if False:
return False
return x
assert f(True) == True
@assert_compilation_succeeds()
def test_if_else_variable_forwarded_to_both_branches_success():
def f(x: bool):
if x:
return x
else:
return x
assert f(True) == True
@assert_conversion_fails
def test_if_else_condition_not_bool_error():
from tmppy import Type
def f(x: Type):
if x: # error: The condition in an if statement must have type bool, but was: Type
return Type('int')
else:
return Type('float')
@assert_conversion_fails
def test_if_else_defining_same_var_with_different_types():
from tmppy import Type
def f(x: Type):
if True:
y = Type('int') # note: A previous definition with type Type was here.
else:
y = True # error: The variable y is defined with type bool here, but it was previously defined with type Type in another branch.
return True
@assert_conversion_fails
def test_if_else_returning_different_types_error():
from tmppy import Type
def f(x: Type):
if True:
return Type('int') # note: A previous return statement returning a Type was here.
else:
return True # error: Found return statement with different return type: bool instead of Type.
@assert_compilation_succeeds()
def test_if_else_if_branch_defining_additional_var_success():
from tmppy import Type
def f(x: bool):
if x:
y = Type('int')
b = True
else:
y = Type('float')
return y
assert f(True) == Type('int')
@assert_compilation_succeeds()
def test_if_else_else_branch_defining_additional_var_success():
from tmppy import Type
def f(x: bool):
if x:
y = Type('int')
else:
y = Type('float')
b = True
return y
assert f(True) == Type('int')
@assert_conversion_fails
def test_if_else_defining_different_vars_possibly_undefined_var_used_in_continuation_error():
from tmppy import Type
def f(x: bool):
if x:
y = Type('int')
else:
y = Type('float')
b = True # note: b might have been initialized here
return b # error: Reference to a variable that may or may not have been initialized \(depending on which branch was taken\)
@assert_conversion_fails
def test_if_else_defining_different_vars_definitely_undefined_var_from_if_branch_used_in_continuation_error():
'''
from tmppy import Type
def f(x: bool):
if x:
y = Type('int')
b = True
return True
else:
y = Type('float')
return b # error: Reference to undefined variable/function
'''
@assert_conversion_fails
def test_if_else_defining_different_vars_definitely_undefined_var_from_else_branch_used_in_continuation_error():
'''
from tmppy import Type
def f(x: bool):
if x:
y = Type('int')
else:
y = Type('float')
b = True
return True
return b # error: Reference to undefined variable/function
'''
@assert_conversion_fails
def test_if_else_if_branch_does_not_return_error():
from tmppy import Type
def f(x: bool):
if x:
y = Type('int') # error: Missing return statement.
else:
return True
@assert_conversion_fails
def test_if_else_else_branch_does_not_return_error():
from tmppy import Type
def f(x: bool):
if x:
return True
else:
y = Type('int') # error: Missing return statement.
@assert_conversion_fails
def test_if_else_missing_else_branch_no_return_after_error():
def f(x: bool):
if x: # error: Missing return statement. You should add an else branch that returns, or a return after the if.
return True
@assert_compilation_succeeds()
def test_if_else_sequential_success():
from tmppy import Type
def f(x: bool):
if x:
return False
else:
y = Type('int')
if y == Type('float'):
return False
else:
return True
assert f(False) == True
@assert_conversion_fails
def test_if_else_sequential_reassigned_var_if_if_error():
from tmppy import Type
def f(x: bool, y: bool):
if x:
z = Type('int') # note: It might have been initialized here \(depending on which branch is taken\).
else:
p1 = True
if y:
z = Type('int') # error: z could be already initialized at this point.
else:
p2 = True
return True
@assert_conversion_fails
def test_if_else_sequential_reassigned_var_if_without_else_if_error():
from tmppy import Type
def f(x: bool, y: bool):
if x:
z = Type('int') # note: It might have been initialized here \(depending on which branch is taken\).
if y:
z = Type('int') # error: z could be already initialized at this point.
else:
p1 = True
return True
@assert_conversion_fails
def test_if_else_sequential_reassigned_var_if_else_error():
from tmppy import Type
def f(x: bool, y: bool):
if x:
z = Type('int') # note: It might have been initialized here \(depending on which branch is taken\).
else:
p1 = True
if y:
p2 = True
else:
z = Type('int') # error: z could be already initialized at this point.
return True
@assert_conversion_fails
def test_if_else_sequential_reassigned_var_if_without_else_else_error():
from tmppy import Type
def f(x: bool, y: bool):
if x:
z = Type('int') # note: It might have been initialized here \(depending on which branch is taken\).
if y:
p1 = True
else:
z = Type('int') # error: z could be already initialized at this point.
return True
@assert_conversion_fails
def test_if_else_sequential_reassigned_var_else_if_error():
from tmppy import Type
def f(x: bool, y: bool):
if x:
p1 = True
else:
z = Type('int') # note: It might have been initialized here \(depending on which branch is taken\).
if y:
z = Type('int') # error: z could be already initialized at this point.
else:
p2 = True
return True
@assert_conversion_fails
def test_if_else_sequential_reassigned_var_else_if_without_else_error():
from tmppy import Type
def f(x: bool, y: bool):
if x:
p1 = True
else:
z = Type('int') # note: It might have been initialized here \(depending on which branch is taken\).
if y:
z = Type('int') # error: z could be already initialized at this point.
return True
@assert_conversion_fails
def test_if_else_sequential_reassigned_var_else_else_error():
from tmppy import Type
def f(x: bool, y: bool):
if x:
p1 = True
else:
z = Type('int') # note: It might have been initialized here \(depending on which branch is taken\).
if y:
p2 = True
else:
z = Type('int') # error: z could be already initialized at this point.
return True
|
#!/usr/bin/python
# coding:utf-8
import os.path
import sys
import logging
import shutil
import re
import subprocess
logging.getLogger().setLevel(logging.INFO)
search_path = str(sys.argv[1])
search_string_pattern= r'(.*)\.(\d\d\.\d\d\.\d\d)\.(.*)-.*\.mp4'
for parent, dirnames, filenames in os.walk(search_path):
for filename in filenames:
if (filename.lower().endswith('rmvb') or filename.lower().endswith('mp4') or filename.lower().endswith('m4v') or filename.lower().endswith('mkv') or filename.lower().endswith('avi') or filename.lower().endswith('wmv')) and not filename.startswith("."):
full_path = os.path.join(parent, filename)
name, ext = os.path.splitext(full_path)
fanart_file = os.path.join(parent, name + '-fanart.jpg')
poster_file = os.path.join(parent, name + '-poster.jpg')
nfo_file = os.path.join(parent, name + '.nfo')
if os.path.exists(fanart_file) and os.path.exists(poster_file) and os.path.exists(nfo_file):
pass
#logging.info('Succeed.')
else:
search_target = filename.lower()
if search_target.lower().startswith("babes"):
# print search_target
target_match = re.match(search_string_pattern, search_target)
if target_match:
cmd = "./docker-search.sh %s %s %s %s" %('babes', target_match.group(3),target_match.group(2),'False')
print "#%s" % cmd
#p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
#output, err = p.communicate()
#scrapycmd = output.split(b'\n')[0]
#print "#output - [[[%s]]]" % scrapycmd
#if scrapycmd == "Nothing found":
# pass
#else:
# print scrapycmd
# targetFileName = scrapycmd.split(' ')[2]
# print "mv %s %s.mp4" % (full_path, os.path.join(parent,targetFileName))
else:
pass
#logging.info("skip file " + filename)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import MySQLdb
class cMySql:
conn = None
cur = None
conf = None
def __init__(self, **kwargs):
self.conf = kwargs
self.conf["keep_alive"] = kwargs.get("keep_alive", False)
self.conf["charset"] = kwargs.get("charset", "utf8")
self.conf["host"] = kwargs.get("host", "localhost")
self.conf["port"] = kwargs.get("port", 3306)
self.conf["autocommit"] = kwargs.get("autocommit", False)
self.connect()
def connect(self):
"""Connect to the mysql server"""
try:
self.conn = MySQLdb.connect(db=self.conf['db'], host=self.conf['host'],
port=self.conf['port'], user=self.conf['user'],
passwd=self.conf['passwd'],
charset=self.conf['charset'])
self.cur = self.conn.cursor()
self.conn.autocommit(self.conf["autocommit"])
# print "connected to ", self.conf['host'], self.conf['db']
except:
print ("MySQL connection failed")
raise
def is_open(self):
"""Check if the connection is open"""
return self.conn.open
def end(self):
"""Kill the connection"""
self.cur.close()
self.conn.close()
def query(self, sql, params = None):
try:
with self.conn:
self.cur.execute(sql)
except:
print("Query failed")
raise
return self.cur
if (0):
db = cMySql(
host="localhost",
db="mqtt_log",
user="root",
passwd="password",
keep_alive=True # try and reconnect timedout mysql connections?
)
db.query("INSERT INTO `test`(`topic`, `message`) VALUES ('abc','123')");
|
# -*- coding: utf-8 -*-
from django.db import models
class Cliente(models.Model):
codigo_cliente=models.IntegerField(unique=True)
nombre_completo=models.CharField(max_length=200)
apellido=models.CharField(max_length=100)
nombre=models.CharField(max_length=100)
tipo_documento=models.CharField(max_length=6)
numero_documento=models.IntegerField(db_index=True)
telefono=models.CharField(max_length=50)
calle=models.CharField(max_length=50)
altura=models.CharField(max_length=10)
barrio=models.CharField(max_length=50)
codigo_postal=models.IntegerField()
parcela=models.CharField(max_length=10)
ss=models.CharField(max_length=10)
ss = models.CharField(max_length=10)
ss_gs = models.CharField(max_length=10)
emi = models.CharField(max_length=10)
cualidad=models.IntegerField()
inhumados=models.CharField(max_length=4)
parcela_inhumados=models.IntegerField()
meses_deuda=models.IntegerField()
monto_deuda=models.DecimalField(decimal_places=2,max_digits=20)
productos=models.CharField(max_length=100)
def __str__(self):
return u'%s' % self.codigo_cliente
def __unicode__(self):
return u'%s' % self.codigo_cliente |
import os
from optparse import OptionParser
import pandas as pd
import numpy as np
import common
import cy_lda
import file_handling as fh
def main():
usage = "%prog data_dir model_dir"
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
data_dir = args[0]
model_dir = args[1]
print("Loading training vocabulary")
vocab = fh.read_json(os.path.join(data_dir, 'train.vocab.json'))
print("Loading test data")
test_X, _, test_items = common.load_data(data_dir, 'test', vocab)
n_test, _ = test_X.shape
test_X_list = common.convert_data_to_list_of_lists(test_X)
# create a model with an arbitrary number of topics
model = cy_lda.LDA(K=1, V=len(vocab))
# load the parameters from the model file
model.load_parameters(os.path.join(model_dir, 'train.model.npz'))
# fit the model to the test data and compute perplexity
print("Evaluating on test data")
perplexity = model.evaluate(test_X_list)
print("Perplexity = %0.4f" % perplexity)
# save the resulting document representations
print("Saving document-topic matrix")
gammas = model.get_document_representations()
n_items, n_topics = gammas.shape
document_topic_matrix = pd.DataFrame(gammas, index=test_items, columns=np.arange(n_topics))
document_topic_matrix.to_csv(os.path.join(model_dir, 'test.document_topic_gammas.csv'))
gammas_norm = gammas / np.sum(gammas, axis=1).reshape((len(test_X_list), 1))
document_topic_matrix = pd.DataFrame(gammas_norm, index=test_items, columns=np.arange(n_topics))
document_topic_matrix.to_csv(os.path.join(model_dir, 'test.document_topic_means.csv'))
if __name__ == '__main__':
main()
|
"""
Base spec for Forcepoint NGFW Management Center connections. This is a session
that will be re-used for multiple operations against the management
server.
"""
import inspect
import traceback
from ansible.module_utils.basic import AnsibleModule
from distutils.version import StrictVersion
try:
from smc import session, set_file_logger
from smc.base.model import lookup_class
import smc.elements.network as network
import smc.elements.netlink as netlink
import smc.elements.group as group
import smc.elements.service as service
import smc.elements.protocols as protocol
from smc.core.engine import Engine
from smc.base.collection import Search
from smc.elements.other import Category
from smc.api.exceptions import ConfigLoadError, SMCException, \
UserElementNotFound, ElementNotFound, DeleteElementFailed, \
UnsupportedEntryPoint
HAS_LIB = True
except ImportError:
HAS_LIB = False
class Cache(object):
"""
Convenience cache object to reduce number of queries for a
given playbook and store unfound elements in `missing`. This
is not intended to have a `get_or_create` logic, therefore when
validating the existence of elements, you should check missing
before continuing the playbook run.
"""
def __init__(self):
self.missing = []
self.cache = {} # typeof: [Element1, Element2, ..]
def add_many(self, list_of_entries):
"""
Add many elements into cache. Format should be:
element = [{'network': [network1,network2]},
{'host': [host1, host2]}
...]
Where the key is a valid 'typeof' (SMC entry point)
and value is a list of names to search
"""
for elements in list_of_entries:
for typeof, values in elements.items():
for name in values:
self._add_entry(typeof, name)
def add(self, dict_of_entries):
"""
Add entry as dict of list, format:
element = {'network': [network1,network2]}
"""
for typeof, values in dict_of_entries.items():
for name in values:
self._add_entry(typeof, name)
def _add_user_entries(self, typeof, users):
# User elements are fetched by direct href
domain_dict = {}
for user in users:
_user, _domain = user.split(',domain=')
domain_dict.setdefault(_domain, []).append(_user)
for domain, uids in domain_dict.items():
# Get domain first
entry_point = 'external_ldap_user_domain' if domain != \
'InternalDomain' else 'internal_user_domain'
ldap = Search.objects.entry_point(entry_point)\
.filter(domain, exact_match=True).first()
if not ldap:
self.missing.append(
dict(msg='Cannot find specified element',
name=domain,
type=entry_point))
continue
for uid in uids:
try:
result = ldap.browse()
self.cache.setdefault('user_element', []).extend(
result)
except UserElementNotFound as e:
self.missing.append(
dict(msg='Cannot find specified element: %s' % str(e),
name=uid,
type=typeof))
def _add_entry(self, typeof, name):
# Add entry if it doesn't already exist
if self.get(typeof, name):
return
try:
if typeof == 'engine':
result = Search.objects.context_filter('engine_clusters')\
.filter(name, exact_match=True).first()
else:
result = Search.objects.entry_point(typeof)\
.filter(name, exact_match=True).first()
if result:
self.cache.setdefault(typeof, []).append(
result)
else:
self.missing.append(
dict(msg='Cannot find specified element',
name=name,type=typeof))
except UnsupportedEntryPoint:
self.missing.append(
dict(msg='An invalid element type was specified',
name=name,type=typeof))
def get_href(self, typeof, name):
result = self.get(typeof, name)
if result:
return result.href
def get(self, typeof, name):
"""
Get element by type and name
:param str typeof: typeof element
:param str name: name of element
:rtype: element or None
"""
for value in self.cache.get(typeof, []):
if value.name == name:
return value
def get_type(self, typeof):
"""
Get all elements of a specific type
:param str typeof: typeof element
:rtype: list
"""
if typeof in self.cache:
return self.cache[typeof]
return []
@property
def as_string(self):
out = {}
for typeof, values in self.cache.items():
out.setdefault(typeof, []).extend(
[(value.name, value.href) for value in values])
return out
def get_method_argspec(clazz, method=None):
"""
Get method argspec. Return a 2-tuple:
([required_args], [valid_args])
Each tuple holds a list of the relevant args either
required or valid.
:rtype: tuple
"""
argspec = inspect.getargspec(getattr(clazz, method if method else 'create'))
valid_args = argspec.args[1:]
args = []
if argspec.defaults:
args = argspec.args[:-len(argspec.defaults)][1:]
return (args, valid_args)
def required_args(clazz, method=None):
"""
Return only the required arguments for the given class and method.
:param Element clazz: class for lookup
:param str method: method, default to `create` if None
:rtype: list
"""
return get_method_argspec(clazz, method)[0]
def allowed_args(clazz, method=None):
"""
Provide a list of allowed args for the specified method. This will
include kwargs as well as args. To find required args, use the
required_args function instead.
:param Element clazz: class derived from base class Element
:param str method: method to check args, or `create` if not provided
:rtype: list(str)
"""
return get_method_argspec(clazz, method)[1]
def allowed_args_by_lookup(typeof, method=None):
"""
Return the allowed arguments and kwargs by name based on the
classes typeof attribute. You should validate that the typeof is
valid descendent of `smc.base.model.Element` before calling
this method
:return: list of argument names
:rtype: list
"""
clazz = lookup_class(typeof)
return allowed_args(clazz, method)
def element_type_dict(map_only=False):
"""
Type dict constructed with valid `create` constructor arguments.
This is used in modules that support update_or_create operations
"""
types = dict(
host=dict(type=network.Host),
network=dict(type=network.Network),
address_range=dict(type=network.AddressRange),
router=dict(type=network.Router),
ip_list=dict(type=network.IPList),
group=dict(type=group.Group),
netlink=dict(type=netlink.StaticNetlink),
interface_zone=dict(type=network.Zone),
domain_name=dict(type=network.DomainName))
if map_only:
return types
for t in types.keys():
clazz = types.get(t)['type']
types[t]['attr'] = allowed_args(clazz)
return types
def ro_element_type_dict(map_only=False):
"""
Type dict of read-only network elements. These elements can be
fetched but not created
"""
types = dict(
alias=dict(type=network.Alias),
country=dict(type=network.Country),
expression=dict(type=network.Expression),
engine=dict(type=Engine))
if map_only:
return types
for t in types.keys():
clazz = types.get(t)['type']
types[t]['attr'] = allowed_args(clazz, '__init__')
return types
def service_type_dict(map_only=False):
"""
Type dict for serviec elements and groups.
"""
types = dict(
tcp_service=dict(type=service.TCPService),
udp_service=dict(type=service.UDPService),
ip_service=dict(type=service.IPService),
ethernet_service=dict(type=service.EthernetService),
icmp_service=dict(type=service.ICMPService),
icmp_ipv6_service=dict(type=service.ICMPIPv6Service),
service_group=dict(type=group.ServiceGroup),
tcp_service_group=dict(type=group.TCPServiceGroup),
udp_service_group=dict(type=group.UDPServiceGroup),
ip_service_group=dict(type=group.IPServiceGroup),
icmp_service_group=dict(type=group.ICMPServiceGroup))
if map_only:
return types
for t in types.keys():
clazz = types.get(t)['type']
types[t]['attr'] = allowed_args(clazz)
return types
def ro_service_type_dict():
"""
Type dict of read-only service elements. These elements can be
fetched but not created
"""
types = dict(
url_category=dict(type=service.URLCategory),
application_situation=dict(type=service.ApplicationSituation),
protocol=dict(type=protocol.ProtocolAgent),
rpc_service=dict(type=service.RPCService))
for t in types.keys():
clazz = types.get(t)['type']
types[t]['attr'] = allowed_args(clazz, '__init__')
return types
def update_or_create(element, type_dict, check_mode=False):
"""
Update or create the element specified. Set check_mode to only
perform a get against the element versus an actual action.
:param dict element: element dict, key is typeof element and values
:param dict type_dict: type dict mappings to get class mapping
:param str hint: element attribute to use when finding the element
:raises CreateElementFailed: may fail due to duplicate name or other
:raises ElementNotFound: if fetch and element doesn't exist
:return: The result as type Element
"""
for typeof, values in element.items():
_type_dict = type_dict.get(typeof)
result = None
if check_mode:
element = _type_dict['type'].get(values.get('name'), raise_exc=False)
if element is None:
result = dict(
name=values.get('name'),
type=typeof,
msg='Specified element does not exist')
else:
result = element_dict_from_obj(element, type_dict)
else:
attr_names = _type_dict.get('attr', []) # Constructor args
provided_args = set(values)
# Guard against calling create for elements that may not exist
# and do not have valid `create` constructor arguments
if set(attr_names) == set(['name', 'comment']) or \
any(arg for arg in provided_args if arg not in ('name',)):
element, modified, created = _type_dict['type'].update_or_create(
with_status=True, **values)
result = dict(
name=element.name,
type=element.typeof)
if modified or created:
result['action'] = 'created' if created else 'updated'
else:
element = _type_dict['type'].get(values.get('name'), raise_exc=False)
result = dict(
name=values.get('name'),
type=_type_dict['type'].typeof)
if element is None:
result['msg'] = 'Specified element does not exist and parameters did not exist to create'
else:
result['action'] = 'fetched'
return result
def delete_element(element, ignore_if_not_found=True):
"""
Delete an element of any type.
:param Element element: the smc api element
:param bool ignore_if_not_found: ignore raising an exception when
a specified element is not found. This will still be returned
for the state result.
:raises DeleteElementFailed: failed to delete an element. This is
generally thrown when a another configuration area has a
dependency on this element (i.e. used in policy, etc).
:return: list or None
"""
msg = {}
try:
element.delete()
msg['action'] = 'deleted'
except ElementNotFound:
msg['msg'] = 'Element not found, skipping delete'
except DeleteElementFailed as e:
msg['msg'] = str(e)
finally:
if ignore_if_not_found:
return dict(
name=element.name,
type=element.typeof,
**msg)
raise
def format_element(element):
"""
Format a raw json element doc
"""
for key in ('link', 'key', 'system_key', 'system', 'read_only'):
element.data.pop(key, None)
return element.data.data
def element_dict_from_obj(element, type_dict, expand=None):
"""
Resolve the element to the type and return a dict
with the values of defined attributes
:param Element element
:return dict representation of the element
"""
expand = expand if expand else []
known = type_dict.get(element.typeof)
if known:
elem = {'type': element.typeof}
for attribute in known.get('attr', []):
if 'group' in element.typeof and 'group' in expand:
if attribute == 'members':
elem[attribute] = []
for member in element.obtain_members():
m_expand = ['group'] if 'group' in member.typeof else None
elem[attribute].append(
element_dict_from_obj(member, type_dict, m_expand))
else:
elem[attribute] = getattr(element, attribute, None)
else:
elem[attribute] = getattr(element, attribute, None)
return elem
else:
return dict(name=element.name, type=element.typeof)
def is_sixdotsix_compat():
"""
Switch to validate version of SMC. There were changes in 6.6 that
break backwards compatibility and require this check to ensure
idempotence against several areas such as rule actions and virtual
engines.
:rtype: bool
"""
try:
result = StrictVersion(session.api_version) >= StrictVersion("6.6")
except ValueError:
pass
return result
def smc_argument_spec():
return dict(
smc_address=dict(type='str'),
smc_api_key=dict(type='str', no_log=True),
smc_api_version=dict(type='str'),
smc_timeout=dict(default=30, type='int'),
smc_domain=dict(type='str'),
smc_alt_filepath=dict(type='str'),
smc_extra_args=dict(type='dict'),
smc_logging=dict(type='dict')
)
def fact_argument_spec():
return dict(
filter=dict(type='str'),
limit=dict(default=0, type='int'),
exact_match=dict(default=False, type='bool'),
case_sensitive=dict(default=True, type='bool'),
as_yaml=dict(default=False, type='bool')
)
class ForcepointModuleBase(object):
def __init__(self, module_args, required_if=None, bypass_checks=False,
no_log=False, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False,
supports_check_mode=False, is_fact=False):
argument_spec = smc_argument_spec()
if is_fact:
argument_spec.update(fact_argument_spec())
argument_spec.update(module_args)
self.module = AnsibleModule(
argument_spec=argument_spec,
required_if=required_if,
bypass_checks=bypass_checks,
no_log=no_log,
#check_invalid_arguments=check_invalid_arguments, # Deprecated in 2.9
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_one_of=required_one_of,
add_file_common_args=add_file_common_args,
supports_check_mode=supports_check_mode)
if not HAS_LIB:
self.module.fail_json(msg='Could not import smc-python required by this module')
self.check_mode = self.module.check_mode
self.connect(self.module.params)
result = self.exec_module(**self.module.params)
self.success(**result)
def connect(self, params):
"""
Get the SMC connection. If the credentials are provided in the module,
then use them. Otherwise credentials gathering falls back to using
smc-python native methods. Session is maintained for ansible run.
:param dict params: dict of the SMC credential information
"""
try:
if params.get('smc_logging') is not None:
if 'path' not in params['smc_logging']:
self.fail(msg='You must specify a path parameter for SMC logging.')
set_file_logger(
log_level=params['smc_logging'].get('level', 10),
path=params['smc_logging']['path'])
if 'smc_address' and 'smc_api_key' in params:
extra_args = params.get('smc_extra_args')
# When connection parameters are defined, alt_filepath is ignored.
session.login(
url=params.get('smc_address'),
api_key=params.get('smc_api_key'),
api_version=params.get('smc_api_version'),
timeout=params.get('smc_timeout'),
domain=params.get('smc_domain'),
**(extra_args or {}))
elif 'smc_alt_filepath' in params:
# User specified to look in file
session.login(alt_filepath=params['smc_alt_filepath'])
else:
# From user ~.smcrc or environment
session.login()
except (ConfigLoadError, SMCException) as err:
self.fail(msg=str(err), exception=traceback.format_exc())
def disconnect(self):
"""
Disconnect session from SMC after ansible run
"""
try:
session.logout()
except SMCException:
pass
def exec_module(self):
self.fail(msg='Override in sub-module. Called from: {}'.format(self.__class__.__name__))
def search_by_context(self):
"""
Only used by fact modules. Fact modules need to implement a single
attribute `element` that identifies the SMC entry point used for
the search. See engine_facts for an example.
This is a generic iterator using SMC context_filters
:return: list of metadata results
:rtype: list
"""
if self.filter:
# Find specific
iterator = Search.objects\
.context_filter(self.element)\
.filter(self.filter,
exact_match=self.exact_match,
case_sensitive=self.case_sensitive)
else:
# Find all
iterator = Search.objects.context_filter(self.element)
if self.limit >= 1:
iterator = iterator.limit(self.limit)
return list(iterator)
def search_by_type(self, typeof):
"""
Only used by fact modules. Fact modules need to implement a single
attribute `element` that identifies the SMC entry point used for
the search. See engine_facts for an example.
This is an iterator by the specific SMC element type.
:param str typeof: SMC API entry point
:return: list of metadata results
:rtype: list
"""
if self.filter:
iterator = typeof.objects\
.filter(self.filter,
exact_match=self.exact_match,
case_sensitive=self.case_sensitive)
else:
iterator = typeof.objects.all()
if self.limit >= 1:
iterator = iterator.limit(self.limit)
return list(iterator)
def fetch_element(self, cls):
"""
Fetch an element by doing an exact match.
Name should be set on self.
:param Element cls: class of type Element
:return: element or None
"""
return cls.objects.filter(self.name, exact_match=True).first()
def add_tags(self, element, tags):
"""
Add tag/s to an element.
:param Element element: the element to add a tag.
:param list tags: list of tags by name
:return: boolean success or fail
"""
changed = False
current_tags = [tag.name for tag in element.categories]
add_tags = set(tags) - set(current_tags)
if add_tags:
element.add_category(list(add_tags))
changed = True
return changed
def remove_tags(self, element, tags):
"""
Remove tag/s from an element
:param Element element: the element to add a tag.
:param list tags: list of tags by name
:return: boolean success or fail
"""
changed = False
current_tags = [tag.name for tag in element.categories]
for tag in tags:
if tag in current_tags:
category = Category(tag)
category.remove_element(element)
changed = True
return changed
def clear_tags(self, element):
"""
Clear all tags from the element
:param Element element: the element for which to remove tags
:return: boolean success or fail
"""
changed = False
for category in element.categories:
category.remove_element(element)
changed = True
return changed
def fail(self, msg, **kwargs):
"""
Fail the request with message
"""
self.disconnect()
self.module.fail_json(msg=msg, **kwargs)
def success(self, **result):
"""
Success with result messages
"""
self.disconnect()
self.module.exit_json(**result)
def is_element_valid(self, element, type_dict, check_required=True):
"""
Used by modules that want to create an element (network and service).
This will check that all provided arguments are valid for this element
type. When creating an element, name and comment are valid for all.
Key of dict should be the valid typeof element. Value is the data
for the element.
:param dict element: dict of element, key is typeof
:param dict type_dict: provide a type dict to specify which elements
are supported for the given context of the call. Default type
dict examples are defined in smc_util.
:param bool check_required: check required validates that at least
one of the required arguments are provided. Skip this when
checking group members that may only provide the 'name' field
to reference a member to be added to a group versus creating the
member.
:return: error message on fail, otherwise None
"""
if not isinstance(element, dict):
self.fail(msg='Elements must be defined as a dict with the key identifying '
'the type of element')
for key, values in element.items():
if key not in type_dict:
self.fail(msg='Unsupported element type: {} provided'.format(key))
# Verify that all attributes are supported for this element type
provided_values = values.keys() if isinstance(values, dict) else []
valid_values = type_dict.get(key).get('attr', [])
if provided_values:
# Name is always required
if 'name' not in provided_values:
self.fail(msg='Entry: {}, missing required name field'.format(key))
if check_required:
# Do not enforce extra arguments be provided as kwargs may be supported.
# In addition, update_or_create will always take kwargs
required_arg = [arg for arg in valid_values if arg not in ('name', 'comment')]
if required_arg: #Something other than name and comment fields
if not any(arg for arg in required_arg if arg in provided_values):
self.fail(msg='Missing a required argument for {} entry: {}, Valid values: {}'\
.format(key, values['name'], valid_values))
if 'group' in key and values.get('members', []):
if not isinstance(values['members'], dict):
self.fail("Group members should be defined as a dict. Received: %s" %
type(values['members']))
for member_type, type_value in values['members'].items():
if not isinstance(type_value, list):
self.fail(msg='Member type: {} must be in list format'.format(member_type))
if member_type not in type_dict:
self.fail(msg='Group member type is not valid: {}'.format(member_type))
else:
self.fail(msg='Entry type: {} has no values. Valid values: {} '\
.format(key, valid_values))
|
from django.db import models
# Create your models here.
class Poll(models.Model):
name = models.CharField(max_length=60, null=True, blank=True)
def __str__(self):
return '{}'.format(self.name)
class Meta:
verbose_name = "Poll"
verbose_name_plural = "Polls"
|
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "Untitled3.ipynb",
"provenance": [],
"authorship_tag": "ABX9TyP/t+bTW1yYGaPDicgL+K4r",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/dhanasekar9894/workday-jobscraping-automation/blob/main/scraper.py\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 231
},
"id": "S8apDmidUAOV",
"outputId": "2cf5cd51-bd6b-42c7-9932-9df3c69bd9ed"
},
"source": [
"import requests\n",
"import re\n",
"import json\n",
"ses = requests.Session()\n",
"import pandas as pd\n",
"\n",
"class JOBS:\n",
"\n",
" def __init__(self, url=None, filename=None, output=None):\n",
" if url is None:\n",
" raise TypeError(\"Please enter url from myworkday site!!!\")\n",
" self.url = url\n",
" self.output = output\n",
" self.filename = filename\n",
" self.ID = \"efc49a3159bb428ab71e15425e0f4c13\"\n",
"\n",
" def parse(self):\n",
" url_parse = {\n",
" \"host\": re.compile(\"(\\w+?[-.].+?)[/]\"),\n",
" \"query\": re.compile(\"\\w?/[^\\/\\s]+\\/?(.*)\"),\n",
" \"protocol\": re.compile(\"(\\w+)[:]\")\n",
" }\n",
" return url_parse\n",
"\n",
" def Regexp(self):\n",
" url = JOBS.parse(self)\n",
" List = []\n",
" for N,R in url.items():\n",
" try:\n",
" List.append(R.findall(self.url)[0])\n",
" except IndexError:\n",
" return None\n",
" yt = {\n",
" \"h\": List[0],\n",
" \"q\": List[1]\n",
" }\n",
" return yt\n",
"\n",
" @property\n",
" def Headers(self):\n",
" headers = {\n",
" \"Accept\": \"application/json,application/xml\",\n",
" \"workday-client-manifest-id\": \"mvp\",\n",
" \"X-Workday-Client\": \"2021.20.011\",\n",
" \"User-Agent\": \"Mozilla/5.0 (Linux; Android 10; SM-J400F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.66 Mobile Safari/537.36\",\n",
" \"Content-Type\": \"application/x-www-form-urlencoded\"\n",
" }\n",
" return headers\n",
"\n",
" def req(self):\n",
" title = []\n",
" location = []\n",
" link = []\n",
" date = []\n",
" R = []\n",
" total = []\n",
" dts = self.Regexp()\n",
" fo = ses.get(\n",
" url=f\"https://%s/{dts['q']}?clientRequestID={self.ID}\"%(dts['h']),\n",
" headers=self.Headers\n",
" ).json()\n",
" total.append(fo)\n",
" pagination = fo['body']['children'][0]\n",
" sg = re.findall(\"/\\S+searchPagination/\\w+\", re.findall(\"'uri': '/.*Pagination.+'\", str(pagination))[0])[0]\n",
"\n",
" Known_Pages= [50, 100, 150, 200]\n",
" \n",
" for i in Known_Pages:\n",
" ur = f\"https://{dts['h']}{sg}/%s?clientRequestID={self.ID}\" % (i)\n",
" tot = ses.get(url=ur, headers=self.Headers)\n",
" if not '404' in str(tot.status_code):\n",
" total.append(tot.json())\n",
"\n",
" for pages in total:\n",
" main = pages['body']['children'][0]['children']\n",
" \n",
" for items in main:\n",
" for dat in items['listItems']:\n",
" title.append(dat['title']['instances'][0]['text'])\n",
" link.append(f\"https://{dts['h']}%s?{dts['q']}={self.ID}\"%(dat['title']['commandLink']))\n",
" R.append(dat['subtitles'][0]['instances'][0]['text'])\n",
" location.append(dat['subtitles'][1]['instances'][0]['text'])\n",
" date.append(dat['subtitles'][2]['instances'][0]['text'])\n",
" \n",
"\n",
"# for desp in link:\n",
"# req1 = ses.get(desp, headers=self.Headers).json()\n",
"# for t6 in re.findall(\"t':\\s+'(<p><b><span>.*?>)', \", str(req1)):\n",
"# description.append(t6)\n",
"\n",
"#case 1\n",
"### \"e': '(<p>.*</p>)'}, \" some issues in matching\n",
"\n",
"#case 2\n",
"### \"t':\\s+'(<p><b><span>.*?>)', works & tested on \"https://regex101.com/\"\n",
"\n",
"\n",
" cx = pd.DataFrame({\n",
" \"Job-Tile\": title,\n",
" \"R\": R,\n",
"# \"Job-Description\": description,\n",
" \"Link\": link,\n",
" \"Job-Location\": location,\n",
" \"Job-Posted\": date,\n",
" \n",
" })\n",
" try:\n",
" from time import ctime\n",
" if self.output and self.filename is not None:\n",
" if self.output.lower() == \"csv\":\n",
" cx.to_csv(f\"{self.filename}.{self.output}\", index=False)\n",
" elif self.output.lower() == \"xlsx\":\n",
" cx.to_excel(f\"{self.filename}.{self.output}\", engine='xlsxwriter')\n",
" else:\n",
" return \"setting default output\"\n",
" cx.to_csv(\"Job-Datas-%s.csv\" % (ctime()), index=False)\n",
" except IndexError:\n",
" return \"try-again\"\n",
"\n",
"scraped_contents = JOBS(\n",
" url=\"https://brocku.wd3.myworkdayjobs.com/brocku_careers/\",\n",
" filename=\"preetii\",\n",
" output=\"csv\"\n",
" \n",
").req()\n",
"scraped_contents.to_csv(\"./drive/My Drive/preetiii.csv\")\n",
"\n",
"scraped_contents\n"
],
"execution_count": null,
"outputs": [
{
"output_type": "error",
"ename": "AttributeError",
"evalue": "ignored",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-2-b253fa300eb7>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 124\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 125\u001b[0m ).req()\n\u001b[0;32m--> 126\u001b[0;31m \u001b[0mscraped_contents\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"./drive/My Drive/preetiii.csv\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 127\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 128\u001b[0m \u001b[0mscraped_contents\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mAttributeError\u001b[0m: 'NoneType' object has no attribute 'to_csv'"
]
}
]
},
{
"cell_type": "code",
"metadata": {
"id": "krkwjrcCWg0x"
},
"source": [
""
],
"execution_count": null,
"outputs": []
}
]
} |
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from src.data import MNISTDataModule
from src.model import MLPMixerLightning
def main():
data_module = MNISTDataModule(data_dir="./data/", batch_size=128)
# Define callbacks
logger = TensorBoardLogger("lightning_logs", name="mlp-mixer-mnist")
checkpoint_callback = ModelCheckpoint(
dirpath="./",
filename="mlp_mixer_mnist" + "-{epoch}-{validation_loss:.2f}",
monitor="validation_loss",
every_n_epochs=1,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
params = {
"max_epochs": 10,
"logger": logger,
"log_every_n_steps": 10,
"accumulate_grad_batches": 8,
"callbacks": [lr_monitor, checkpoint_callback],
}
model = MLPMixerLightning(
image_size=28,
patch_size=7,
input_channels=1,
num_features=128,
num_mixer_blocks=6,
num_classes=10,
)
trainer = pl.Trainer(*params)
trainer.fit(model, data_module)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import serial
import sys
def isBin(x):
try:
a = int(x,base=2)
except ValueError:
return False
else:
return True
if __name__ == '__main__':
# Parse arguments
argNum = len(sys.argv)
if (argNum != 5):
print >> sys.stderr,"Invalid number of arguments ("\
+ str(argNum - 1) + ")."
print >> sys.stderr,"Should be 4: UP, DOWN, LEFT, RIGHT"
sys.exit(1)
if not isBin(sys.argv[1]):
print >> sys.stderr, "Invalid value (must be binary)"
sys.exit(1)
else:
up = int(sys.argv[1])
if not isBin(sys.argv[2]):
print >> sys.stderr, "Invalid value (must be binary)"
sys.exit(1)
else:
down = int(sys.argv[2])
if not isBin(sys.argv[3]):
print >> sys.stderr, "Invalid value (must be binary)"
sys.exit(1)
else:
left = int(sys.argv[3])
if not isBin(sys.argv[4]):
print >> sys.stderr, "Invalid value (must be binary)"
sys.exit(1)
else:
right = int(sys.argv[4])
print "UP:%d DOWN:%d LEFT:%d RIGHT:%d" % (up, down, left, right)
cmd = 0
cmd = (cmd | (down << 0))
cmd = (cmd | (up << 1))
cmd = (cmd | (right << 2))
cmd = (cmd | (left << 3))
# connect to serial port
ser = serial.Serial()
ser.port = '/dev/ttyUSB0'
ser.baudrate = 9600
ser.bytesize = serial.EIGHTBITS
ser.stopbits = serial.STOPBITS_ONE
ser.parity = serial.PARITY_NONE
ser.rtscts = False
ser.xonxoff = False
ser.timeout = None
try:
ser.open()
except serial.SerialException, msg:
print >> sys.stderr, "Could not open serial port:\n" + msg
sys.exit(1)
ser.write(chr(cmd))
ser.close()
|
from abc import ABC
class QRCodeHandler(ABC):
"""
An interface describing an object that can handle the content of a detected QR code
"""
def handle_code(self, code_content: str):
# TODO: Implement a code filter
pass
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.forms import ChoiceField, MultipleChoiceField
from django.http import Http404
from django.contrib import messages
from ..controllers.groupcontroller import groupcontroller, is_in_group, return_group_from_id
from ..controllers.friendcontroller import are_friends
from ..controllers.utilities import get_profile_from_uid, AlreadyExistsException
from ..forms import AddGroupForm, MyGroupSelectForm, ManageGroupMemberForm
"""
Let's try to make this a bit more... restful? Whatever that really means
We will create dicts and pass those to render for get requests
This will make it easier to implement json views later
"""
@login_required
def create_group(request):
"""
Will only receive post, create a group, redirect to manage_groups page
:param request:
:return: redirect to manage_groups
"""
# only take post for doing anything
if request.method == 'POST':
groupcontrol = groupcontroller(request.user.id)
form = AddGroupForm(request.POST)
if form.is_valid():
newgroupname = form.cleaned_data['name']
# we can't have an empty newgroupname
# todo: add as not null database constraint
if len(newgroupname) == 0:
# todo: pass in error, need to implement db constraint to take care of this
return redirect("manage_groups")
try:
groupcontrol.create(newgroupname)
except AlreadyExistsException as e:
messages.error(request, "Group name must be unique")
# always redirect the same regardless of request type
return redirect("manage_groups")
@login_required
def delete_group(request):
"""
Delete a group
:param request: receive a post request with field idname of the group id
:return: redirect to manage groups page
"""
# todo: unit test
if request.method == 'POST':
groupcontrol = groupcontroller(request.user.id)
form = MyGroupSelectForm(request.user.id, ChoiceField, request.POST)
if form.is_valid():
# list of group ids
groupid = form.cleaned_data['idname']
group = return_group_from_id(groupid)
groupcontrol.delete_group(group)
return redirect("manage_groups")
@login_required
def manage_groups(request):
"""
This needs to only be accessible to the current user for themselves
:param request: only get
:return: render manage_groups page
"""
groupcontrol = groupcontroller(request.user.id)
groups = groupcontrol.return_groups()
addform = AddGroupForm() # how would I render a django form in a java android app?
deleteform = MyGroupSelectForm(request.user.id, ChoiceField)
retdict = {'groups': groups, 'addform': addform, 'delform': deleteform} # how will this translate to a json view? Test in browser
return render(request, 'camelot/managegroups.html', retdict)
@login_required
def manage_group(request, id):
"""
View to manage an individual group
add/remove members
:param request:
:param id: id of the group to manage
:return:
"""
groupcontrol = groupcontroller(request.user.id)
group = return_group_from_id(id)
retdict = {
"group": group,
"addform": ManageGroupMemberForm(request.user.profile, group),
"delform": ManageGroupMemberForm(request.user.profile, group, remove=True)
}
return render(request, "camelot/editgroupmembers.html", retdict)
@login_required
def remove_friend_from_group(request, groupid):
"""
Remove friends from a group from the group management page
:param request:
:param groupid:
:return:
"""
if request.method == 'POST':
group = return_group_from_id(groupid)
groupcontrol = groupcontroller(request.user.id)
form = ManageGroupMemberForm(request.user.profile, group, True, request.POST)
if form.is_valid():
profiles = [get_profile_from_uid(int(x)) for x in form.cleaned_data['idname']]
for profile in profiles:
groupcontrol.delete_member(group, profile)
return redirect("manage_group", group.id)
@login_required
def add_friend_to_group_mgmt(request, groupid):
"""
Add friends to a group from the group management page
:param request:
:param groupid:
:return:
"""
if request.method == 'POST':
group = return_group_from_id(groupid)
groupcontrol = groupcontroller(request.user.id)
form = ManageGroupMemberForm(request.user.profile, group, False, request.POST)
if form.is_valid():
profiles = [get_profile_from_uid(int(x)) for x in form.cleaned_data['idname']]
for profile in profiles:
groupcontrol.add_member(group.id, profile)
return redirect("manage_group", group.id)
@login_required
def add_friend_to_group(request, userid):
"""
View to add a friend to a group after creating friendship
Need to check if a friendship exists before allowing access
:param request:
:param userid: user id of the friend to add to groups
:return:
"""
# check that the users are at least pending friends before rendering
if not (are_friends(get_profile_from_uid(request.user.id), get_profile_from_uid(userid), confirmed=True) \
or are_friends(get_profile_from_uid(request.user.id), get_profile_from_uid(userid), confirmed=False)):
raise Http404
if request.method == 'POST':
groupcontrol = groupcontroller(request.user.id)
form = MyGroupSelectForm(request.user.id, MultipleChoiceField, request.POST)
if form.is_valid():
profile = get_profile_from_uid(userid)
# list of group ids
groups = [int(x) for x in form.cleaned_data['idname']]
for groupid in groups:
group = return_group_from_id(groupid)
if is_in_group(group, profile):
pass
else:
try:
# this assert may need to be handled at a higher level depending on what django does
assert groupcontrol.add_member(groupid, profile)
except Exception as e:
raise e
# if we are in pending requests, we want to redirect to the pending page... hmm... :\
return redirect("show_profile", userid)
form = MyGroupSelectForm(request.user.id, MultipleChoiceField)
retdict = {'uid': userid, 'form': form}
return render(request, 'camelot/addfriendtogroup.html', retdict)
|
# Funções
x = 2
y = 3
def somar(x, y):
resultado = x+y
return resultado
print('Soma: ' + str(somar(x,y)))
def subtrair(x,y):
return(y-x)
print('Subtração: ' + str(subtrair(x,y)))
def multiplicar(x, y):
return(x*y)
print('Multiplicação: ' + str(multiplicar(x,y)))
def dividir(x,y):
return(y/x)
print('Divisão: ' + str(dividir(x,y)))
def x_quadrado(x):
return(x**2)
print('x²: ' + str(x_quadrado(x)))
def y_quadrado(y):
return(y**2)
print('y²: ' + str(y_quadrado(y)))
def soma_dos_quadrados(x,y):
return((x**2)+(y**2))
print('x² + y²: ' + str(soma_dos_quadrados(x,y)))
def x_elevado_a_y(x,y):
return(x**y)
print('x^y: ' + str(x_elevado_a_y(x,y)))
def y_elevado_a_x(x,y):
return(y**x)
print('y^x: ' + str(y_elevado_a_x(x,y)))
def raiz_x(x):
return(x**(1/2))
print(raiz_x(x))
def raiz_y(y):
return(y**(1/2))
print(raiz_y(y))
|
'''# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from modeltranslation.translator import TranslationOptions, translator
from pydaygal.speakers.models import Speaker
class SpeakerTranslationOptions(TranslationOptions):
fallback_languages = {'default': ('gl', 'es', 'ca', 'en')}
translator.register(Speaker, SpeakerTranslationOptions)
'''
|
#https://www.hackerrank.com/challenges/closest-numbers/problem
n=int(input())
lst=[int(x) for x in input().split()]
sortedlst=sorted(lst)
difference=[]
for i in range(1,n):
difference.append(sortedlst[i]-sortedlst[i-1])
minimum=min(difference)
#print(sortedlst)
#print(difference)
for i in range(n-1):
if difference[i]==minimum:
print(sortedlst[i],sortedlst[i+1],end=" ")
|
import os
from keras.applications.vgg16 import VGG16
from keras.models import Sequential, Model
from keras.layers import Input, Activation, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
"""
学習済み重みをロードしてテストデータで精度を求める
"""
result_dir = 'results'
classes = ['Tulip', 'Snowdrop', 'LilyValley', 'Bluebell', 'Crocus',
'Iris', 'Tigerlily', 'Daffodil', 'Fritillary', 'Sunflower',
'Daisy', 'ColtsFoot', 'Dandelion', 'Cowslip', 'Buttercup',
'Windflower', 'Pansy']
batch_size = 32
nb_classes = len(classes)
img_rows, img_cols = 150, 150
channels = 3
#InceptionV3のボトルネック特徴量を入力とし、正解クラスを出力とするFCNNを作成する
input_tensor = Input(shape=(IMG_ROWS, IMG_COLS, CHANNELS))
#入力テンソル(画像の縦横ピクセルとRGBチャンネルによる3階テンソル)
base_model = InceptionV3(weights='imagenet', include_top=False,input_tensor=input_tensor)
x = base_model.output
x = GlobalAveragePooling2D()(x)
#出力テンソルをflatten
x = Dense(1024, activation='relu')(x)
#全結合,ノード数1024,活性化関数relu
predictions = Dense(N_CLASSES, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in base_model.layers:
layer.trainable = False
model.load_weights(os.path.join(result_dir, 'vermins.h5'))
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_directory(directory=train_data_dir,
target_size=(IMG_ROWS, IMG_COLS),
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=True
)
# 学習済みの重みをロード
model.load_weights(os.path.join(result_dir, 'finetuning.h5'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
# テスト用データを生成するジェネレータ
test_data_dir = 'test_images'
nb_test_samples = 170
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_generator = test_datagen.flow_from_directory(
test_data_dir,
target_size=(img_rows, img_cols),
color_mode='rgb',
classes=classes,
class_mode='categorical',
batch_size=batch_size,
shuffle=True)
# 精度評価
loss, acc = model.evaluate_generator(test_generator, val_samples=nb_test_samples)
print(loss, acc)
|
"""
======================
Make a multiview image
======================
Make one image from multiple views.
"""
print __doc__
from surfer import Brain
sub = 'fsaverage'
hemi = 'lh'
surf = 'inflated'
brain = Brain(sub, hemi, surf)
###############################################################################
# Save a set of images as a montage
brain.save_montage('/tmp/fsaverage_h_montage.png',
['l', 'v', 'm'], orientation='v')
brain.close()
###############################################################################
# View created image
import Image
import pylab as pl
image = Image.open('/tmp/fsaverage_h_montage.png')
fig = pl.figure(figsize=(5, 3))
pl.imshow(image, origin='lower')
pl.xticks(())
pl.yticks(())
|
import pymysql
import gevent
import time
"""
1.使用pymysql多行插入(提高效率)--executemany
2.使用python协程(遇到I/O操作就切换任务,无需等待,提高效率)gevent.spwan + gevent.joinall
30w条数据耗时8s
"""
class MyPyMysql:
def __init__(self):
self.host = 'localhost'
self.port = 3306
self.username = 'root'
self.password = 'xu13939201399@'
self.db = 'dbtest'
self.charset = 'utf8'
self.pymysql_connect()
def pymysql_connect(self):
# pymysql连接MySQL数据库
self.conn = pymysql.connect(
host=self.host,
port=self.port,
user=self.username,
password=self.password,
db=self.db,
charset=self.charset)
# 连接MySQL后执行的函数
self.asynchronous()
def run(self,nmin,nmax):
# 创建游标
self.cur = self.conn.cursor()
# 定义sql语句,插入数据
sql = "insert into bigtest(id,value) values (%s,%s)"
data_list = []
for i in range(nmin,nmax):
result = (i,f"The value is {i}.")
data_list.append(result)
# 执行多行插入,executemany(sql语句,数据(需要是一个元组类型的数据))
content = self.cur.executemany(sql,data_list)
if content:
print('成功插入第{}条数据'.format(nmax-1))
# 提交数据,否则数据不会保存
self.conn.commit()
def asynchronous(self):
# g_l是一个任务列表
# 定义了异步的函数,这里用到了一个gevent.spawn方法
max_line = 10000 # 定义每次最大插入行数(max_line=10000,即一次插入10000行)
g_l = [gevent.spawn(self.run,i,i+max_line) for i in range(1,300001,max_line)]
# gevent.joinall方法等待所有操作都执行完毕
gevent.joinall(g_l)
self.cur.close() # 关闭游标
self.conn.close() # 关闭pymysql连接
if __name__ == '__main__':
start_time = time.time() # 计算程序开始时间
st = MyPyMysql() # 实例化类
print('程序耗时{:.2f}'.format(time.time()-start_time)) # 计算程序总耗时 |
from rest_framework import serializers
from .models import *
from drf_role.models import *
from django.db import transaction
from django.contrib.auth.hashers import make_password, check_password
import datetime
from django.http import JsonResponse
class CoordinateSerializer(serializers.ModelSerializer):
class Meta:
model = Coordinate
depth = 1
fields = '__all__'
def create(self, validated_data):
user = self.context.get('request').user
Coordinate.objects.create(
longitude = self.data.get('longitude'),
latitude = self.data.get('latitude'),
time = datetime.datetime.now(),
user_id = user.id
)
return JsonResponse({'status':'Success'}, status=200)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
depth = 1
fields = ('id', 'date_joined', 'last_login', 'email')
class ProfileSerializer(serializers.ModelSerializer):
# user = UserSerializer()
class Meta:
model = Profile
depth = 1
fields = '__all__'
def create(self, validated_data):
with transaction.atomic():
email = validated_data.pop('email')
password = validated_data.pop('password')
hashed_pwd = make_password(password)
User.objects.create(
email=email, password=hashed_pwd,
)
#found_role = Role.objects.filter(type=0).first()
found_user = User.objects.filter(email=email).first()
created_profile = Profile.objects.create(
password=hashed_pwd,
user = found_user,
status = validated_data.pop('status'),
first_name=validated_data.pop('first_name'),
last_name=validated_data.pop('last_name'),
phone=validated_data.pop('phone'),
email=email,
)
created_profile.roles.add(Role.objects.filter(type=0).first())
return created_profile
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
depth = 1
fields = '__all__'
class EventTimetableSerializer(serializers.ModelSerializer):
class Meta:
model = EventTimetable
depth = 1
fields = '__all__'
class MapBorderSerializer(serializers.ModelSerializer):
class Meta:
model = MapBorder
depth = 1
fields = '__all__' |
#using my first module
import mymodule
mymodule.say_hi()
print('Version', mymodule.__version__)
print('Module name', mymodule.__name__)
|
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth.models import User
from src.tasks.models import Task
class UsersTests(APITestCase):
def setUp(self):
user = User.objects.create(username='firstUser')
user.set_password('12345')
user.save()
user = User.objects.create(username='secondUser')
user.set_password('12345')
user.save()
def test_unauthorized_accounts(self):
"""
Ensure we can't get objects without authorization.
"""
url = '/api/v1/users/'
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_authorized_accounts(self):
"""
Ensure we can't get objects without authorization.
"""
url = '/api/v1/users/'
self.client.login(username='firstUser', password='12345')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_retrive_unauthorize_tasks(self):
"""
Ensure we can't get objects without authorization.
"""
url = '/api/v1/tasks/'
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_retrive_authorize_tasks(self):
"""
Ensure we can't get objects without authorization.
"""
url = '/api/v1/tasks/'
self.client.login(username='firstUser', password='12345')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_create_tasks(self):
"""
Ensure we can create object.
"""
url = '/api/v1/tasks/'
self.client.login(username='firstUser', password='12345')
task = {
"name": "TestTask",
"description": "",
}
response = self.client.post(url, task, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_tasks_with_user(self):
"""
Ensure we can create object with user.
"""
url = '/api/v1/tasks/'
self.client.login(username='firstUser', password='12345')
secondUser = User(username='secondUser')
task = {
"name": "TestTask",
"description": "",
"author": secondUser.id,
}
response = self.client.post(url, task, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_cant_update_not_your_task(self):
"""
Ensure we can't update object with another user.
"""
url = '/api/v1/tasks/'
self.client.login(username='firstUser', password='12345')
secondUser = User.objects.get(username='secondUser')
task = {
"name": "TestTask",
"description": "",
"author": secondUser.id,
}
response = self.client.post(url, task, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
url = '/api/v1/tasks/1/'
response = self.client.put(url, task, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cant_delete_not_your_task(self):
"""
Ensure we can't update object with another user.
"""
url = '/api/v1/tasks/'
self.client.login(username='firstUser', password='12345')
secondUser = User.objects.get(username='secondUser')
task = {
"name": "TestTask",
"description": "",
"author": secondUser.id,
}
response = self.client.post(url, task, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
url = '/api/v1/tasks/1/'
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) |
# -*- coding: utf-8 -*-
{
'name' : 'LC Report Generator',
'summary': "LC report management",
'description': 'Simplly creat your LC report',
'author': "Metaporphosis.com.bd",
'website': "http://www.metamorphosis.com.bd/",
'version': '0.1',
'depends': [
'base',
'account',
],
'data': [
'views/customer_invoices_records.xml',
'views/commercial_invoices.xml',
'views/lc_informations.xml',
'views/invoice_name_sequence.xml',
'views/country_origin.xml',
'views/delivery_transport.xml',
# 'views/delivery_address.xml',
# 'views/shipping_factory_name_address.xml',
# 'views/proforma_invoice.xml',
'views/packing_list.xml',
'views/truck_challan.xml',
'views/delivery_challan.xml',
'views/beneficiary_certificate.xml',
'views/certificate_of_origin.xml',
'views/forwarding_letter.xml',
'views/bill_of_exchange.xml',
'views/terms_conditions.xml',
'views/supplier_factory_name_addr.xml',
# 'views/customer_factory_name_addr.xml',
# 'views/bank_names.xml',
# 'views/bank_branch.xml',
'views/bank_names_branch_address.xml',
# 'views/beneficiary_full_name.xml',
'views/reimbursement.xml',
'views/method_of_payment.xml',
'views/product_type.xml',
'views/terms_of_delivery.xml',
# 'views/commodity.xml',
# 'views/beneficiary_bank_branch.xml',
'views/summery_reports/proforma_invoice_status.xml',
'views/signature_upload.xml',
],
'auto_install':False,
'installable': True,
} |
import time
import pyautogui
import keyboard
time.sleep(5) # execute paint
distance = 300
while distance > 0:
if keyboard.is_pressed('p'):
while True:
if keyboard.is_pressed('r'):
break
pyautogui.drag(distance, 0, duration=0.2) # move right
distance -= 10
pyautogui.drag(0, distance, duration=0.2) # move down
pyautogui.drag(-distance, 0, duration=0.2) # move left
distance -= 10
pyautogui.drag(0, -distance, duration=0.1) # move up
|
import numpy as np
import experiment as ex
import sys
sys.path.append('../marcos_client')
import matplotlib.pyplot as plt
import pdb
st = pdb.set_trace
def trapezoid(plateau_a, total_t, ramp_t, ramp_pts, total_t_end_to_end=True, base_a=0):
"""Helper function that just generates a Numpy array starting at time
0 and ramping down at time total_t, containing a trapezoid going from a
level base_a to plateau_a, with a rising ramp of duration ramp_t and
sampling period ramp_ts."""
# ramp_pts = int( np.ceil(ramp_t/ramp_ts) ) + 1
rise_ramp_times = np.linspace(0, ramp_t, ramp_pts)
rise_ramp = np.linspace(base_a, plateau_a, ramp_pts)
# [1: ] because the first element of descent will be repeated
descent_t = total_t - ramp_t if total_t_end_to_end else total_t
t = np.hstack([rise_ramp_times, rise_ramp_times[:-1] + descent_t])
a = np.hstack([rise_ramp, np.flip(rise_ramp)[1:]])
return t, a
def trap_cent(centre_t, plateau_a, trap_t, ramp_t, ramp_pts, base_a=0):
"""Like trapezoid, except it generates a trapezoid shape around a centre
time, with a well-defined area given by its amplitude (plateau_a)
times its time (trap_t), which is defined from the start of the
ramp-up to the start of the ramp-down, or (equivalently) from the
centre of the ramp-up to the centre of the ramp-down. All other
parameters are as for trapezoid()."""
t, a = trapezoid(plateau_a, trap_t, ramp_t, ramp_pts, False, base_a)
return t + centre_t - (trap_t + ramp_t)/2, a
def turbo_spin_echo(self, plotSeq):
trap_ramp_pts=5
rf_pi_duration=None
grad_board = "ocra1"
# plot_rx=False, init_gpa=False,
# dbg_sc=0.5, # set to 0 to avoid RF debugging pulses in each RX window, otherwise amp between 0 or 1
# lo_freq=0.2, # MHz
# rf_amp=1, # 1 = full-scale
#
# rf_pi2_duration=50, # us, rf pi/2 pulse length
#
# # trapezoid properties - shared between all gradients for now
# trap_ramp_duration=50, # us, ramp-up/down time
# trap_ramp_pts=5, # how many points to subdivide ramp into
#
# # spin-echo properties
# echos_per_tr=5, # number of spin echoes (180 pulses followed by readouts) to do
# echo_duration=2000, # us, time from the centre of one echo to centre of the next
#
# readout_amp=0.8, # 1 = gradient full-scale
# readout_duration=500, # us, time in the centre of an echo when the readout occurs
# rx_period=10/3, # us, 3.333us, 300 kHz rate
# readout_grad_duration=700, # us, readout trapezoid lengths (mid-ramp-up to mid-ramp-down)
# # (must at least be longer than readout_duration + trap_ramp_duration)
#
# phase_start_amp=0.6, # 1 = gradient full-scale, starting amplitude (by default ramps from +ve to -ve in each echo)
# phase_grad_duration=150, # us, phase trapezoid lengths (mid-ramp-up to mid-ramp-down)
# phase_grad_interval=1200, # us, interval between first phase trapezoid and its negative-sign counterpart within a single echo
#
# # slice trapezoid timing is the same as phase timing
# slice_start_amp=0.3, # 1 = gradient full-scale, starting amplitude (by default ramps from +ve to -ve in each TR)
#
# tr_pause_duration=3000, # us, length of time to pause from the end of final echo's RX pulse to start of next TR
# trs=5 # number of TRs
#
"""
readout gradient: x
phase gradient: y
slice/partition gradient: z
"""
shim_x: int = self.shim[0]
shim_y: int = self.shim[1]
shim_z: int = self.shim[2]
if rf_pi_duration is None:
rf_pi_duration = 2 * self.rf_pi2_duration
phase_amps = np.linspace(self.phase_start_amp, -self.phase_start_amp, self.echos_per_tr)
slice_amps = np.linspace(self.slice_start_amp, -self.slice_start_amp, self.trs)
# create appropriate waveforms for each echo, based on start time, echo index and TR index
# note: echo index is 0 for the first interval (90 pulse until first 180 pulse) thereafter 1, 2 etc between each 180 pulse
def rf_wf(tstart, echo_idx):
pi2_phase = 1 # x
pi_phase = 1j # y
if echo_idx == 0:
# do pi/2 pulse, then start first pi pulse
return np.array([tstart + (self.echo_duration - self.rf_pi2_duration)/2, tstart + (self.echo_duration + self.rf_pi2_duration)/2,
tstart + self.echo_duration - rf_pi_duration/2]), np.array([pi2_phase, 0, pi_phase]) * self.rf_amp
elif echo_idx == self.echos_per_tr:
# finish final RF pulse
return np.array([tstart + rf_pi_duration/2]), np.array([0])
else:
# finish last pi pulse, start next pi pulse
return np.array([tstart + rf_pi_duration/2, tstart + self.echo_duration - rf_pi_duration/2]), np.array([0, pi_phase]) * self.rf_amp
def tx_gate_wf(tstart, echo_idx):
tx_gate_pre = 2 # us, time to start the TX gate before each RF pulse begins
tx_gate_post = 1 # us, time to keep the TX gate on after an RF pulse ends
if echo_idx == 0:
# do pi/2 pulse, then start first pi pulse
return np.array([tstart + (self.echo_duration - self.rf_pi2_duration)/2 - tx_gate_pre,
tstart + (self.echo_duration + self.rf_pi2_duration)/2 + tx_gate_post,
tstart + self.echo_duration - rf_pi_duration/2 - tx_gate_pre]), \
np.array([1, 0, 1])
elif echo_idx == self.echos_per_tr:
# finish final RF pulse
return np.array([tstart + rf_pi_duration/2 + tx_gate_post]), np.array([0])
else:
# finish last pi pulse, start next pi pulse
return np.array([tstart + rf_pi_duration/2 + tx_gate_post, tstart + self.echo_duration - rf_pi_duration/2 - tx_gate_pre]), \
np.array([0, 1])
def readout_grad_wf(tstart, echo_idx):
if echo_idx == 0:
return trap_cent(tstart + self.echo_duration*3/4, self.readout_amp, self.readout_grad_duration/2,
self.trap_ramp_duration, trap_ramp_pts)
else:
return trap_cent(tstart + self.echo_duration/2, self.readout_amp, self.readout_grad_duration,
self.trap_ramp_duration, trap_ramp_pts)
def readout_wf(tstart, echo_idx):
if echo_idx != 0:
return np.array([tstart + (self.echo_duration - self.readout_duration)/2, tstart + (self.echo_duration + self.readout_duration)/2 ]), np.array([1, 0])
else:
return np.array([tstart]), np.array([0]) # keep on zero otherwise
def phase_grad_wf(tstart, echo_idx):
t1, a1 = trap_cent(tstart + (self.echo_duration - self.phase_grad_interval)/2, phase_amps[echo_idx-1], self.phase_grad_duration,
self.trap_ramp_duration, trap_ramp_pts)
t2, a2 = trap_cent(tstart + (self.echo_duration + self.phase_grad_interval)/2, -phase_amps[echo_idx-1], self.phase_grad_duration,
self.trap_ramp_duration, trap_ramp_pts)
if echo_idx == 0:
return np.array([tstart]), np.array([0]) # keep on zero otherwise
elif echo_idx == self.echos_per_tr: # last echo, don't need 2nd trapezoids
return t1, a1
else: # otherwise do both trapezoids
return np.hstack([t1, t2]), np.hstack([a1, a2])
def slice_grad_wf(tstart, echo_idx, tr_idx):
t1, a1 = trap_cent(tstart + (self.echo_duration - self.phase_grad_interval)/2, slice_amps[tr_idx], self.phase_grad_duration,
self.trap_ramp_duration, trap_ramp_pts)
t2, a2 = trap_cent(tstart + (self.echo_duration + self.phase_grad_interval)/2, -slice_amps[tr_idx], self.phase_grad_duration,
self.trap_ramp_duration, trap_ramp_pts)
if echo_idx == 0:
return np.array([tstart]), np.array([0]) # keep on zero otherwise
elif echo_idx == self.echos_per_tr: # last echo, don't need 2nd trapezoids
return t1, a1
else: # otherwise do both trapezoids
return np.hstack([t1, t2]), np.hstack([a1, a2])
#tr_total_time = self.echo_duration * (self.echos_per_tr + 1) + self.tr_pause_duration
expt = ex.Experiment(lo_freq=self.lo_freq, rx_t=self.rx_period, init_gpa=self.init_gpa)
global_t = 20 # start the first TR at 20us
for tr in range(self.trs):
for echo in range(self.echos_per_tr + 1):
tx_t, tx_a = rf_wf(global_t, echo)
tx_gate_t, tx_gate_a = tx_gate_wf(global_t, echo)
readout_t, readout_a = readout_wf(global_t, echo)
rx_gate_t, rx_gate_a = readout_wf(global_t, echo)
readout_grad_t, readout_grad_a = readout_grad_wf(global_t, echo)
phase_grad_t, phase_grad_a = phase_grad_wf(global_t, echo)
slice_grad_t, slice_grad_a = slice_grad_wf(global_t, echo, tr)
global_t += self.echo_duration
if grad_board == "gpa-fhdo":
gpa_fhdo_offset = (1 / 0.2 / 3.1) # microseconds; offset between channels to avoid parallel updates (default update rate is 0.2 Msps, so 1/0.2 = 5us, 5 / 3.1 gives the offset between channels; extra 0.1 for a safety margin)
phase_grad_t = phase_grad_t + gpa_fhdo_offset # can't use += because of casting rules
slice_grad_t = slice_grad_t + 2*gpa_fhdo_offset
expt.add_flodict({
'tx0': (tx_t, tx_a),
'tx1': (tx_t, tx_a),
'grad_vx': (readout_grad_t, readout_grad_a+shim_x),
'grad_vy': (phase_grad_t, phase_grad_a+shim_y),
'grad_vz': (slice_grad_t, slice_grad_a+shim_z),
'rx0_en': (readout_t, readout_a),
'rx1_en': (readout_t, readout_a),
'tx_gate': (tx_gate_t, tx_gate_a),
'rx_gate': (rx_gate_t, rx_gate_a),
})
global_t += self.tr_pause_duration
if plotSeq==1:
expt.plot_sequence()
plt.show()
expt.__del__()
elif plotSeq==0:
rxd, msgs = expt.run()
expt.__del__()
return rxd['rx0'].real, msgs
|
#!/usr/bin/python
import os # to get environment vars
import argparse # to deal with given arguments
import shutil # for copying files over
# function that returns the string of the input file corresponding to the given nodes and ppn
def get_input_file_name(nodes, proc_per_node, default=False):
if default:
return "default_hpccinf.txt"
else:
return "hpccinf.txt." + str(proc_per_node) + "x" + str(nodes)
# function that tries to copy a file from the source to the destination
def copy_file_over(source_path, dest_path):
try:
shutil.copy(source_path, dest_path)
print("Successfully copied over proper input file to HOME directory")
except IOError as e:
# printing suggestions to possibly fix the issue
print("\33[91m" + str(e) + "\33[0m")
print("Possibly you wanted to use the default file? If so, use the -D/--default flag")
#print("Otherwise, add the desired file to " + hpcc_inputs_dir + " or just rename it to hpccinf.txt in the desired directory")
print("(By default this script copies the file into the HOME directory)")
print("Use the -v flag for more information or -h for help")
if __name__ == "__main__":
# parsing arguments given in
parser = argparse.ArgumentParser()
parser.add_argument("--nofile", action="store_true", default=False,
help="used when you don't want to copy over any files and just start the shell")
parser.add_argument("-v","--verbose", action="store_true", help="increase output verbosity")
# another argument/way describing how this process works? Description?
parser.add_argument("-D", "--default", action="store_true", help="use default input file")
parser.add_argument("-n","--nodes", type=int, default=1,
help="specify the number of nodes hpcc will be running on (default=1)")
parser.add_argument("-ppn","--proc_per_node", type=int, default=1,
help="specify the number of processes/cores per node (default=1)")
# another argument to potentially specify what directory to copy it into? - potential future
args = parser.parse_args()
# for better verbose readability
verbose_start = "\33[33mDEBUG START========================== \33[0m "
verbose_end = "\33[33m============================DEBUG END \33[0m "
# checks if need to do anything
if args.nofile:
print("Nofile option specified. Exiting to shell.")
quit()
# printing warning to ensure intentional usage
if args.default:
print("\33[93mWARNING: Default file chosen, -n and -ppn arguments ignored \33[0m")
# verbose
if args.verbose:
print(verbose_start)
print("Choosing file with following requirements: ")
print("Nodes:", args.nodes)
print("Processes Per Node:", args.proc_per_node)
print("Default:", args.default)
print("Now setting up the paths to copy")
print(verbose_end)
# setting up paths
hpcc_inputs_dir = os.environ.get("inputsLoc") + "hpcc/"
hpcc_input_name = get_input_file_name(args.nodes, args.proc_per_node, args.default)
input_file_path = hpcc_inputs_dir + hpcc_input_name
dest_path = os.environ.get("HOME") + "/hpccinf.txt" # perhaps want specification option?
#verbose
if args.verbose:
print(verbose_start)
print("Input file name: " + hpcc_input_name)
print("Full path: " + input_file_path)
print("Destination path: " + dest_path)
print("Attempting to copy input file to destination")
print(verbose_end)
#attempting to copy over file
copy_file_over(input_file_path, dest_path)
|
from pairSum import *
__author__ = 'Mohamed Fawzy'
arr = [2, 3, 4, 5, 6, 7, 8, 7]
print pair_sum(arr, 4)
print pair_sum(arr, 5)
print pair_sum(arr, 10)
print pair_sum(arr, 13)
print pair_sum(arr, 15)
print pair_sum(arr, 17)
|
words= 'cat', 'dog', 'hamster', 'chicken'.split()
# takes a string in python and converts it to a list variable. each space creates a new element list
def get_name():
#Ask user their name and return name.
name = input("What is your name?").capitalize()
return name
def long_name():
# Ask user name and then capitialize.
name = input (' What is your name?')
new_name = name.split()
final_name = ' '
for i in new_name:
final_name += i.capitalize()
final_name += ' '
return final_name.strip()
|
# Generated by Django 3.2.4 on 2021-07-06 15:56
from django.db import migrations, models
import uuid
import zigida.core.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('datetime_created', models.DateTimeField(auto_now_add=True, verbose_name='DATE CREATED')),
('datetime_updated', models.DateTimeField(auto_now=True, verbose_name='DATE UPDATED')),
('last_updated_by', models.CharField(blank=True, max_length=50, null=True, verbose_name='LAST UPDATED BY')),
('bool_deleted', models.BooleanField(default=False, verbose_name='IS DELETED?')),
('uuid_code', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(default=zigida.core.utils.randcode_gen, max_length=100, verbose_name='CODE')),
('ipaddress', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP ADDRESS')),
('macaddress', models.CharField(blank=True, max_length=100, null=True, verbose_name='MAC ADDRESS')),
('street', models.CharField(blank=True, max_length=100, null=True, verbose_name='STREET')),
('city', models.CharField(blank=True, max_length=100, null=True, verbose_name='CITY')),
('zip_code', models.CharField(blank=True, max_length=100, null=True, verbose_name='POST CODE')),
('region', models.CharField(blank=True, max_length=100, null=True, verbose_name='REGION')),
('country_code', models.CharField(blank=True, max_length=100, null=True, verbose_name='COUNTRY CODE.')),
('country_name', models.CharField(blank=True, max_length=100, null=True, verbose_name='COUNTRY NAME')),
('lat', models.CharField(blank=True, max_length=100, null=True, verbose_name='LATITUDE')),
('lng', models.CharField(blank=True, max_length=100, null=True, verbose_name='LONGITUDE')),
('phone_number', models.CharField(blank=True, max_length=100, null=True, verbose_name='PHONE')),
('area_code', models.CharField(blank=True, max_length=100, null=True, verbose_name='AREA CODE')),
('mcc', models.CharField(blank=True, max_length=100, null=True, verbose_name='MCC')),
('mnc', models.CharField(blank=True, max_length=100, null=True, verbose_name='MNC')),
('mobile_brand', models.CharField(blank=True, max_length=100, null=True, verbose_name='MOBILE BRAND')),
('device_type', models.CharField(blank=True, max_length=100, null=True, verbose_name='DEVICE TYPE')),
('device_model', models.CharField(blank=True, max_length=100, null=True, verbose_name='DEVICE MODEL')),
('op_system', models.CharField(blank=True, max_length=100, null=True, verbose_name='OPERAT. SYS')),
('isp', models.CharField(blank=True, max_length=100, null=True, verbose_name='ISP')),
('domain', models.CharField(blank=True, max_length=100, null=True, verbose_name='DOMAIN')),
('timezone', models.CharField(blank=True, max_length=100, null=True, verbose_name='TIMEZONE')),
('netspeed', models.CharField(blank=True, max_length=100, null=True, verbose_name='NET SPEED')),
('idd_code', models.CharField(blank=True, max_length=100, null=True, verbose_name='IDD CODE')),
('usage_type', models.CharField(blank=True, max_length=100, null=True, verbose_name='USAGE TYPE')),
('weather_code', models.CharField(blank=True, max_length=100, null=True, verbose_name='WEATHER CODE')),
('weather_name', models.CharField(blank=True, max_length=100, null=True, verbose_name='WEATHER NAME')),
('bool_active', models.BooleanField(default=True, verbose_name='IS ACTIVE')),
],
options={
'verbose_name_plural': 'locations',
'db_table': 'locations',
},
),
]
|
from base import ControllerBase
from model.activity import Activity as ActivityModel
class AdminIndex(ControllerBase):
def get(self):
view_model = {}
self.template('admin/index', view_model)
class AdminActivity(ControllerBase):
def get(self):
view_model = {
'activities': ActivityModel().getAll(),
}
self.template('admin/activity', view_model)
def post(self):
activity = ActivityModel()
activity.name = self.request.get('name')
activity.put()
self.redirect('/admin/activity') |
from pynput import keyboard
# The event listener will be running in this block
with keyboard.Events() as events:
for event in events:
if event.key == keyboard.Key.esc:
break
else:
print('Received event {}'.format(event))
|
# Ejercicio 3
# Crea un script llamado generador.py que cumpla las siguientes necesidades:
#
# Debe incluir una función llamada leer_numero().
# Esta función tomará 3 valores: ini, fin y mensaje.
# El objetivo es leer por pantalla un número >= que ini y <= que fin.
# Además a la hora de hacer la lectura se mostrará en el input el mensaje enviado a la función.
# Finalmente se devolverá el valor. Esta función tiene que devolver un número,
# y tiene que repetirse hasta que el usuario no lo escriba bien
# (lo que incluye cualquier valor que no sea un número del ini al fin).
# Una vez la tengas creada, deberás crear una nueva función llamada generador,
# no recibirá ningún parámetro.
# Dentro leerás dos números con la función leer_numero():
#
# El primer numero será llamado numeros,
# deberá ser entre 1 y 20, ambos incluidos,
# y se mostrará el mensaje ¿Cuantos números quieres generar? [1-20]:
# El segundo número será llamado modo y requerirá un número entre 1 y 3, ambos incluidos.
# El mensaje que mostrará será: ¿Cómo quieres redondear los números? [1]Al alza [2]A la baja [3]Normal:.
# Una vez sepas los números a generar y cómo redondearlos, tendrás que realizar lo siguiente:
#
# Generarás una lista de números aleatorios decimales entre 0 y 100 con tantos números como el usuario haya indicado.
# A cada uno de esos números deberás redondearlos en función de lo que el usuario ha especificado en el modo.
# Para cada número muestra durante el redondeo, el número normal y después del redondeo.
# Finalmente devolverás la lista de números redondeados.
#
# El objetivo de este ejercicio es practicar la reutilización de código y los módulos random y math.
#
# Recordatorio
#
# El redondeo tradicional round() no requiere importar ningún módulo, es una función por defecto.
|
"""
Todoist API 를 이용해서 대량의 Task 를 자동으로 추가합니다.
기본 사용 방법
=========================
python -m task_script --tasks TASKS
"""
import argparse
import yaml
from .constants import TODOIST_API_KEY
# fmt: off
parser = argparse.ArgumentParser()
parser.add_argument("--tasks", type=str, required=True, help="Task 목록 파일 위치")
# fmt: on
def main(args: argparse.Namespace):
if TODOIST_API_KEY is None:
raise RuntimeError("TODOIST_API_KEY is not found. Did you specify `.env` file properly?")
with open(args.tasks, "r", encoding="utf-8") as f:
tasks = yaml.load(f, Loader=yaml.BaseLoader)
print("Done! Let's go finish some tasks! ✨ 🍰 ✨")
main(parser.parse_args())
|
import logging
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from brokenapp.lib.base import BaseController, render
log = logging.getLogger(__name__)
class XssController(BaseController):
def index(self):
# Return a rendered template
#return render('/xss.mako')
# or, return a string
response.set_cookie("SESSIONID", "ABCDEF123456789")
return render('/xss.mako')
def inject(self):
response.headerlist.append( ("X-XSS-Protection", 0) )
c.data = request.params['data']
return render('/xssinject.mako')
|
# -*- coding: utf-8 -*-
from contracts import contract
from mcdp_hdb import DiskMap, Schema, SchemaString
from mcdp_hdb_tests.testcases import get_combinations
@contract(returns='dict(str:isinstance(DataTestCase))')
def testcases_arrays():
db_schema = Schema()
db_schema.list('alist', SchemaString())
db0 = {
'alist': ['one', 'two']
}
db_schema.validate(db0)
disk_maps= {}
disk_maps['vanilla'] = DiskMap()
prefix = 'array1'
res = get_combinations(db_schema, db0, prefix, operation_sequences, disk_maps)
return res
operation_sequences = []
def add_seq(f):
operation_sequences.append(f)
return f
@add_seq
def seq_delete0(view):
view.alist.delete(0)
@add_seq
def seq_delete1(view):
view.alist.delete(1)
@add_seq
def seq_delete_all(view):
view.alist.delete(0)
view.alist.delete(0)
@add_seq
def seq_append(view):
view.alist.append('appended')
@add_seq
def seq_insert(view):
view.alist.insert(1, 'between')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***************************************************************************
# Copyright (c) 2019 西安交通大学
# All rights reserved
#
# 文件名称:Main.py
#
# 摘 要:针对G4问题的skco方法
#
# 创 建 者:上官栋栋
#
# 创建日期:2019年1月10号
#
# 修改记录
# 日期 修改者 版本 修改内容
# ------------- ------- ------------------------
# ***************************************************************************
from Kriging import Kriging,writeFile,filterSamples
from DOE import LatinHypercube
import numpy as np
from ADE import ADE
from sklearn import svm as SVM_SKLearn
import matplotlib.pyplot as plt
class TestFunction_G4_Simple(object):
'''
测试函数G4 \n
变量维度 : 5\n
搜索空间 : l=(27,27,29),u=(45,45,40),li<xi<ui,i=1...5\n
全局最优值 : x* = (29.996,45,36.7758),f(x*) = -30665.539
'''
def __init__(self):
'''建立目标和约束函数'''
self.dim = 3
self.l = [27,27,29]
self.u = [45,45,40]
self.optimum = [29.9952560256815985,45,36.7758129057882073]
self.data = np.loadtxt('./Data/G4简化函数测试1/G4简化函数空间0.txt')
def aim_Matrix(self,M):
A = 5.3578547*M[:,0]**2+0.8356891*78*M[:,2]+37.293239*78-40792.141
return A
def aim(self,x):
A = 5.3578547*x[0]**2+0.8356891*78*x[2]+37.293239*78-40792.141
return A
def isOK_Matrix(self,M):
'''检查样本点x是否违背约束,是返回-1,否返回1\n
input : \n
x : 样本点,一维向量\n
output : \n
mark : int,-1表示违反约束,1表示不违反约束\n'''
if M.shape[1] != self.dim:
raise ValueError('isOK:参数维度与测试函数维度不匹配')
if np.sum(M<self.l)>0 or np.sum(M>self.u)>0:
raise ValueError('TestFunction_G16: 参数已超出搜索空间')
u = 85.334407+0.0056858*33*M[:,2]+0.0006262*78*M[:,1]-0.0022053*M[:,0]*M[:,2]
v = 80.51249+0.0071317*33*M[:,2]+0.0029955*78*33+0.0021813*M[:,0]**2
w = 9.300961+0.0047026*M[:,0]*M[:,2]+0.0012547*78*M[:,0]+0.0019085*M[:,0]*M[:,1]
#约束函数,小于等于0为满足约束
g = np.zeros((M.shape[0],6))
g[:,0] = u-92
g[:,1] = -u
g[:,2] = v-110
g[:,3] = -v+90
g[:,4] = w-25
g[:,5] = -w+20
mark = np.sum(g>0,axis=1)
mark[np.where(mark>0)] = -1
mark[np.where(mark==0)] = 1
return mark
def isOK(self,x):
if len(x) != self.dim:
raise ValueError('isOK:参数维度与测试函数维度不匹配')
u = 85.334407+0.0056858*33*x[2]+0.0006262*78*x[1]-0.0022053*x[0]*x[2]
v = 80.51249+0.0071317*33*x[2]+0.0029955*78*33+0.0021813*x[0]**2
w = 9.300961+0.0047026*x[0]*x[2]+0.0012547*78*x[0]+0.0019085*x[0]*x[1]
#约束函数,小于等于0为满足约束
g = np.zeros(6)
g[0] = u-92
g[1] = -u
g[2] = v-110
g[3] = -v+90
g[4] = w-25
g[5] = -w+20
mark = np.sum(g>0)
if mark>0:
return -1
else:
return 1
def report(self,svm):
'''比较SVM与实际分类差异'''
pointNum = self.data.shape[0]
points_mark = self.data[:,self.dim]
points = self.data[:,0:self.dim]
svm_mark = svm.predict(points)
TP = 0
FN = 0
TN = 0
FP = 0
points_pos = points_mark==1
points_neg = ~points_pos
svm_pos = svm_mark==1
svm_neg = ~svm_pos
TP = np.sum(points_pos & svm_pos)
FP = np.sum(svm_pos & points_neg)
TN = np.sum(points_neg & svm_neg)
FN = np.sum(svm_neg & points_pos)
E = (FP + FN)/(pointNum)
acc = 1-E
if TP == 0:
P = 0
R = 0
F1 = 0
else:
P = TP/(TP+FP)
R = TP/(TP+FN)
F1 = 2*P*R/(P+R)
print('........................')
print('样本点总数目:%d'%pointNum)
print('正例数目:%d'%int(TP+FN))
print('反例数目:%d'%int(TN+FP))
print('真正例(将正例判定为正例):%d'%TP)
print('假正例(将反例判定为正例):%d'%FP)
print('真反例(将反例判定为反例):%d'%TN)
print('假反例(将正例判定为反例):%d'%FN)
# print('错误率:%.4f'%E)
print('精度:%.8f'%acc)
print('查准率:%.8f'%P)
print('查全率:%.8f'%R)
print('F1:%.8f'%F1)
x = self.optimum
# print('实际最优值坐标:',x)
# print('实际最优值:%.6f'%self.aim(x))
print('SVM对实际最优值判定:%.8f'%svm.decision_function([x]))
def TestData(self):
pointNum = 1
dimNum = [50,50,50]
weight = np.zeros(self.dim)
for i in range(self.dim):
pointNum *= dimNum[i]
weight[i] = (self.u[i]-self.l[i])/(dimNum[i]-1)
maxPointNum = 10000000.0
iterNum = int(np.ceil(pointNum/maxPointNum))
for fileIndex in range(iterNum):
if fileIndex == iterNum-1:
iterPointNum = int(pointNum%maxPointNum)
else:
iterPointNum = maxPointNum
points = np.zeros((iterPointNum,self.dim))
points_mark = np.zeros(iterPointNum)
for i in range(self.dim):
points[:,i] = self.l[i]
for i in range(iterPointNum):
index = i+fileIndex*maxPointNum
for j in range(self.dim):
points[i,j] += index%dimNum[j]*weight[j]
index = index // dimNum[j]
if index == 0:
break
points_mark = self.isOK_Matrix(points)
points_mark = points_mark.reshape((-1,1))
data = np.hstack((points,points_mark))
np.savetxt('./Data/G4简化函数测试1/G4简化函数空间%d.txt'%fileIndex,data)
class SKCO(object):
'''基于SVM和kriging的含约束优化方法\n
input :\n
func : 求解问题实例。结构可参考本文件中的TestFunction_G4,必须包含目标函数,约束,自变量区间等数据\n
logPath : 日志文件存储位置,用于存储计算中产生的数据,日志'''
def __init__(self,func,logPath):
'''初始化函数'''
self.f = func
self.logPath = logPath
import os
if not os.path.exists(logPath):
os.makedirs(logPath)
#采样点
self.samples = None
self.value = None
self.mark = None
def Step_A(self,initSampleNum = 100,auxiliarySampleNum = 10):
'''初步搜索设计空间\n
input : \n
initSampleNum : 整型,初始采样点数目\n
auxiliarySampleNum : 整型,附加采样点数目\n'''
#生成采样点
mark = np.zeros(initSampleNum)-1
while np.sum(mark == 1)==0:
lh=LatinHypercube(self.f.dim,initSampleNum,self.f.l,self.f.u)
samples=lh.realSamples
mark = self.f.isOK_Matrix(samples)
np.savetxt(self.logPath+'/InitSamples.txt',samples,delimiter=',')
# samples = np.loadtxt(self.logPath+'/InitSamples.txt',delimiter=',')
value = self.f.aim_Matrix(samples)
#建立响应面
theta = [6.39935517, 0.663649334, 14.2249506, 6.65649918, 0.001]
kriging = Kriging()
kriging.fit(samples, value, self.f.l, self.f.u,theta)
# print('正在优化theta参数...')
# theta = kriging.optimize(10000,self.logPath+'/theta优化种群数据.txt')
for k in range(auxiliarySampleNum):
print('第%d次加点...'%(k+1))
nextSample = kriging.nextPoint_Varience()
samples = np.vstack([samples,nextSample])
value = np.append(value,self.f.aim(nextSample))
kriging.fit(samples, value, self.f.l, self.f.u, theta)
# kriging.optimize(100)
#检测样本点中是否有可行解,如果没有继续加点
mark = np.zeros(samples.shape[0])
for i in range(samples.shape[0]):
mark[i] = self.f.isOK(samples[i,:])
if np.sum(mark==1)>0:
value = value.reshape((-1,1))
mark = mark.reshape((-1,1))
storeData = np.hstack((samples,value,mark))
np.savetxt(self.logPath+'/A_Samples.txt',storeData)
return
else:
print('在所有样本中未能发现可行域,继续加点...')
i = 0
while mark[-1]==-1:
i += 1
print('第%d次加点...'%(auxiliarySampleNum+i))
nextSample = kriging.nextPoint_Varience()
samples = np.vstack([samples,nextSample])
value = np.append(value,self.f.aim(nextSample))
mark = np.append(mark,self.f.isOK(nextSample))
kriging.fit(samples, value, self.f.l, self.f.u, theta)
# kriging.optimize(100)
value = value.reshape((-1,1))
mark = mark.reshape((-1,1))
storeData = np.hstack((samples,value,mark))
np.savetxt(self.logPath+'/A_Samples.txt',storeData)
def Step_B(self,T0_list,T1_list):
'''
应用SVM分割设计空间,并按照T1_list中的参数设置优化超平面
'''
if len(T0_list) != len(T1_list):
raise ValueError('T0列表与T1列表数目不相符')
#理论分割函数
f = self.f
data = np.loadtxt(self.logPath+'/A_Samples.txt')
samples = data[:,0:f.dim]
mark = data[:,f.dim+1]
# svm=SVM_SKLearn.SVC(C=1000,kernel='linear')
svm=SVM_SKLearn.SVC(C=1000,kernel='rbf',gamma=0.0005)
print('训练初始支持向量机...')
svm.fit(samples,mark)
f.report(svm)
#记录每轮加点的数目
pointNum = np.zeros(len(T1_list)+1)
pointNum[0] = samples.shape[0]
for k in range(len(T1_list)):
print('\n第%d轮加点...'%(k+1))
new_x = self.infillSample2(svm,samples,T0_list[k],T1_list[k])
if new_x is None:
print('当T1设置为%.2f时,加点数目为0'%T1_list[k])
pointNum[k+1] = samples.shape[0]
continue
else:
num = new_x.shape[0]
new_mark = f.isOK_Matrix(new_x)
samples = np.vstack((samples,new_x))
mark = np.append(mark,new_mark)
print('训练支持向量机...')
svm.fit(samples,mark)
f.report(svm)
pointNum[k+1] = samples.shape[0]
print('本轮样本点总数目:%d'%pointNum[k+1])
value = np.zeros(samples.shape[0])
for i in range(samples.shape[0]):
value[i] = f.aim(samples[i,:])
value = value.reshape((-1,1))
mark = mark.reshape((-1,1))
storeData = np.hstack((samples,value,mark))
np.savetxt(self.logPath+'/B_Samples.txt',storeData)
print('样本点数目:')
print(pointNum)
print('加点结束')
def Step_C(self):
#违反约束的惩罚系数
#惩罚系数必须足够大,足以弥补EI函数与y之间的数量差距
penalty = 10000000000000
# 加载支持向量机
svm=SVM_SKLearn.SVC(C=1000,kernel='rbf',gamma=0.0005)
# 提取已采样样本的坐标,值,是否违反约束的标志
testFunc = self.f
data = np.loadtxt(self.logPath+'/B_Samples.txt')
samples = data[:,0:testFunc.dim]
value = data[:,testFunc.dim]
mark = data[:,testFunc.dim+1]
print('训练初始支持向量机...')
svm.fit(samples,mark)
self.f.report(svm)
#建立响应面
kriging = Kriging()
theta = [28.9228845, 0.001, 0.63095945]
kriging.fit(samples, value, self.f.l, self.f.u, theta)
# print('正在优化theta参数....')
# kriging.fit(samples, value, self.f.l, self.f.u)
# theta = kriging.optimize(10000,self.logPath+'/ADE_theta.txt')
# 搜索kriging模型在可行域中的最优值
def kriging_optimum(x):
y = kriging.get_Y(x)
penaltyItem = penalty*min(0,svm.decision_function([x])[0])
return y-penaltyItem
#kriging的global_optimum函数只能找到全局最优,而不是可行域最优
print('搜索kriging模型在约束区间的最优值.....')
ade = ADE(self.f.l, self.f.u, 200, 0.5, kriging_optimum,True)
opt_ind = ade.evolution(maxGen=5000)
kriging.optimumLocation = opt_ind.x
kriging.optimum = kriging.get_Y(opt_ind.x)
print('最优值的实际判定结果%.4f'%testFunc.isOK(opt_ind.x))
print('最优值的SVM判定结果%.4f'%svm.decision_function([opt_ind.x]))
print('最优值实际函数值%.4f'%testFunc.aim(opt_ind.x))
#目标函数是EI函数和约束罚函数的组合函数
def EI_optimum(x):
ei = kriging.EI(x)
penaltyItem = penalty*min(0,svm.decision_function([x])[0])
return ei + penaltyItem
def Varience_optimum(x):
s = kriging.get_S(x)
penaltyItem = penalty*min(0,svm.decision_function([x])[0])
return s + penaltyItem
iterNum = 100 #迭代次数
maxEI_threshold = 0.0001
smallestDistance = 0.05
for k in range(iterNum):
print('\n第%d轮加点.........'%k)
#每轮加点为方差最大值,EI函数最大值
print('搜索EI函数在约束区间的最优值.....')
ade = ADE(self.f.l, self.f.u, 200, 0.5, EI_optimum,False)
opt_ind = ade.evolution(maxGen=5000)
nextSample = opt_ind.x
maxEI = EI_optimum(opt_ind.x)
while maxEI < 0:
print('EI函数最优值求解失败,重新求解...')
ade = ADE(self.f.l, self.f.u, 200, 0.5, EI_optimum,False)
opt_ind = ade.evolution(maxGen=5000)
nextSample = opt_ind.x
maxEI = EI_optimum(opt_ind.x)
print('EI函数最优值实际约束判定:%d'%testFunc.isOK(opt_ind.x))
print('搜索方差在约束区间的最优值.....')
ade = ADE(self.f.l, self.f.u,200,0.5,Varience_optimum,False)
opt_ind = ade.evolution(5000,0.8)
nextSample = np.vstack((nextSample,opt_ind.x))
print('方差最优值实际约束判定:%d'%testFunc.isOK(opt_ind.x))
#如果加点过于逼近,只选择一个点
nextSample = filterSamples(nextSample,samples,smallestDistance)
#判定终止条件
# 当MaxEI小于EI门限值说明全局已经没有提升可能性
if maxEI < maxEI_threshold:
print('EI全局最优值小于%.5f,计算终止'%maxEI_threshold)
break
else:
print('EI全局最优值%.5f'%maxEI)
# 当加点数目为0,说明新加点与原有点的距离过近
if nextSample.shape[0] < 2:
print('新加点的数目小于2 ,计算终止')
break
else:
print('本轮加点数目%d'%nextSample.shape[0])
# 检查新样本点是否满足约束,并检查SVM判定结果。
# 如果SVM判定失误,重新训练SVM模型
# 如果SVM判定正确,但是采样点不满足约束,惩罚系数×2。
nextSampleNum = nextSample.shape[0]
nextValue = np.zeros(nextSampleNum)
nextFuncMark = np.zeros(nextSampleNum)
for i in range(nextSampleNum):
nextValue[i] = testFunc.aim(nextSample[i,:])
nextFuncMark[i] = testFunc.isOK(nextSample[i,:])
samples = np.vstack((samples,nextSample))
value = np.append(value,nextValue)
mark = np.append(mark,nextFuncMark)
# 如果只在发现SVM判断错误的前提下重训练,一般只会提高查准率,而不利于查全率的提升。
# 如果发现最优点满足约束,也应重训练,以增大附近可行区域
print('训练支持向量机...')
svm.fit(samples,mark)
self.f.report(svm)
kriging.fit(samples, value,self.f.l, self.f.u, theta)
print('搜索kriging模型在约束区间的最优值.....')
ade = ADE(self.f.l, self.f.u, 200, 0.5, kriging_optimum ,True)
opt_ind = ade.evolution(maxGen=5000)
kriging.optimumLocation = opt_ind.x
kriging.optimum = kriging.get_Y(opt_ind.x)
print('最优值的实际判定结果%.4f'%testFunc.isOK(kriging.optimumLocation))
print('最优值实际函数值%.4f'%testFunc.aim(opt_ind.x))
Data = np.hstack((samples,value.reshape((-1,1)),mark.reshape((-1,1))))
np.savetxt(self.logPath+'/全部样本点.txt',Data,delimiter='\t')
nextSample = kriging.optimumLocation
nextValue = testFunc.aim(nextSample)
nextFuncMark = testFunc.isOK(nextSample)
samples = np.vstack((samples,nextSample))
value = np.append(value,nextValue)
mark = np.append(mark,nextFuncMark)
Data = np.hstack((samples,value.reshape((-1,1)),mark.reshape((-1,1))))
np.savetxt(self.logPath+'/全部样本点.txt',Data,delimiter='\t')
while testFunc.isOK(kriging.optimumLocation)==-1:
print('区间错误,训练支持向量机...')
svm.fit(samples,mark)
self.f.report(svm)
print('搜索kriging模型在约束区间的最优值.....')
kriging.fit(samples, value,self.f.l, self.f.u,theta)
ade = ADE(self.f.l, self.f.u, 200, 0.5, kriging_optimum ,True)
opt_ind = ade.evolution(maxGen=5000)
kriging.optimumLocation = opt_ind.x
kriging.optimum = kriging.get_Y(opt_ind.x)
print('最优值的实际判定结果%.4f'%testFunc.isOK(kriging.optimumLocation))
nextSample = kriging.optimumLocation
nextValue = testFunc.aim(nextSample)
nextFuncMark = testFunc.isOK(nextSample)
samples = np.vstack((samples,nextSample))
value = np.append(value,nextValue)
mark = np.append(mark,nextFuncMark)
Data = np.hstack((samples,value.reshape((-1,1)),mark.reshape((-1,1))))
np.savetxt(self.logPath+'/全部样本点.txt',Data,delimiter='\t')
print('全局最优值:',kriging.optimum)
print('全局最优值坐标:',kriging.optimumLocation)
def infillSpace(self,labelNum):
'''按照指定的参数用采样点密布整个设计空间,返回采样点的坐标\n
in : \n
labelNum : 各维度的划分数目\n
out :\n
samples : 二维数组,每一行是一个样本点\n
'''
#检查各参数维度是否匹配
dim = self.f.dim
if dim != len(labelNum):
raise ValueError('infillSpace:参数维度不匹配')
up = self.f.u
low = self.f.l
coordinates = []
pointNum = 1
for i in range(dim):
coordinate = np.linspace(low[i],up[i],labelNum[i])
coordinates.append(coordinate)
pointNum*=labelNum[i]
samples = np.zeros((pointNum,dim))
for i in range(dim):
samples[:,i] = low[i]
for i in range(pointNum):
ans = i
remainder = 0
for j in range(dim):
remainder = ans%labelNum[j]
ans = ans//labelNum[j]
samples[i,j] = coordinates[j][remainder]
if ans==0:
break
return samples
def AssemblageDistance(self,A,B):
'''计算样本集A中每个样本距离样本集B中最近样本的距离\n
in :\n
A : 样本集A,二维矩阵,每一行代表一个样本\n
B : 样本集B,二维矩阵,每一行代表一个样本\n
out : \n
distances : 一维向量,数目与样本集A的数目相同,表示样本集A中每个样本距离样本集B中最近样本的距离\n
'''
num_A = A.shape[0]
if A.shape[1]!=B.shape[1]:
raise ValueError('AssemblageDistance:样本集A与B的维度不匹配')
distances = np.zeros(num_A)
for i in range(num_A):
vector = B-A[i,:]
dis = np.linalg.norm(vector,axis=1)
distances[i] = np.min(dis)
return distances
def infillSample4(self,svm,samples,candidateNum,sampleCMaximum,labelNum):
'''超平面边界加点算法,选取距离超平面最近的数目为candidateNum的样本点,同时用加入样本集C的数目来限制加点密度\n
in : \n
svm : 支持向量机实例\n
samples : 已计算的样本点\n
candidateNum : Sample_A的初始样本数目\n
sampleCMaximum : sample_C的最大数目,如果超过此数,加点终止\n
labelNum : 一维向量,维度与fit函数的x的shape[1]相同,表示产生初始候选集时各维度的划分数目\n
out : \n
samples_C : 二维矩阵,每一行是一个样本点。若为None,代表超平面两侧采样点密度满足要求\n
'''
#检查参数维度是否匹配
dim = self.f.dim
if dim!=len(labelNum):
raise ValueError('infillSample:参数维度不匹配')
#生成样本集A,B,C
samples_A = self.infillSpace(labelNum)
samples_B = samples
samples_C = None
#筛选样本集A,B,保留距离分割超平面距离T0之内的样本点
num_A = samples_A.shape[0]
if num_A < candidateNum:
candidateNum = num_A
dis_A = svm.decision_function(samples_A)
dis_A_sorted = np.sort(dis_A)
samples_A = samples_A[np.where(dis_A<=dis_A_sorted[candidateNum-1])]
#对于样本集B门限约束固定为1.1
T0_B = 1.1
dis_B = svm.decision_function(samples_B)
samples_B = samples_B[np.where(np.abs(dis_B)<T0_B)]
if samples_B.shape[0] == 0:
raise ValueError('infillSample:T0设置过小,区域内没有已采样点')
#计算样本集A与样本集B的距离
distances = self.AssemblageDistance(samples_A,samples_B)
L_max = np.max(distances)
print('............支持向量机加点日志.............')
print('备选采样点数目:%d'%samples_A.shape[0])
print('样本集B采样点数目:%d'%samples_B.shape[0])
print('最大距离:%.4f'%L_max)
for i in range(sampleCMaximum):
pos = np.where(distances==L_max)[0]
if samples_C is None:
samples_C = samples_A[pos[0],:].reshape((1,-1))
else:
samples_C = np.vstack((samples_C,samples_A[pos[0],:]))
if samples_C.shape[0]>sampleCMaximum:
break
samples_B = np.vstack((samples_B,samples_A[pos[0],:]))
samples_A = np.delete(samples_A,pos,axis=0)
distances = self.AssemblageDistance(samples_A,samples_B)
L_max = np.max(distances)
if samples_C is None:
print('sample_C集合为空,分割超平面两侧点密度达到要求')
return samples_C
else:
print('加点数目:%d'%samples_C.shape[0])
print('加点之后最大距离:%.4f'%L_max)
if self.f.dim == 2:
plt.scatter(samples_A[:,0],samples_A[:,1],c='r',marker='.')
plt.scatter(samples_B[:,0],samples_B[:,1],c='b',marker='.')
plt.scatter(samples_C[:,0],samples_C[:,1],c='c',marker='.')
plt.xlim(self.f.l[0]-0.1,self.f.u[0]+0.1)
plt.ylim(self.f.l[1]-0.1,self.f.u[1]+0.1)
import time
timemark = time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime(time.time()))
path = self.logPath+'/SVM_Photo_{0}.png'.format(timemark)
plt.savefig(path)
plt.show(5)
return samples_C
def infillSample1(self,svm,samples,T0,T1,labelNum):
'''超平面边界加点算法,选取距离超平面最近的数目为candidateNum的样本点,同时用加入样本集C的数目来限制加点密度\n
in : \n
svm : 支持向量机实例\n
samples : 已计算的样本点\n
T0 : 初始样本集门限\n
T1 : 入选sampleC门限\n
labelNum : 一维向量,维度与fit函数的x的shape[1]相同,表示产生初始候选集时各维度的划分数目\n
out : \n
samples_C : 二维矩阵,每一行是一个样本点。若为None,代表超平面两侧采样点密度满足要求\n
'''
#检查参数维度是否匹配
dim = self.f.dim
if dim!=len(labelNum):
raise ValueError('infillSample:参数维度不匹配')
#生成样本集A,B,C
samples_A = self.infillSpace(labelNum)
samples_B = samples
samples_C = None
dis_A = svm.decision_function(samples_A)
samples_A = samples_A[np.where(np.abs(dis_A)<=T0)]
#对于样本集B门限约束固定为1.1
T0_B = 1.1
dis_B = svm.decision_function(samples_B)
samples_B = samples_B[np.where(np.abs(dis_B)<T0_B)]
if samples_B.shape[0] == 0:
raise ValueError('infillSample:T0设置过小,区域内没有已采样点')
#计算样本集A与样本集B的距离
distances = self.AssemblageDistance(samples_A,samples_B)
L_max = np.max(distances)
print('............支持向量机加点日志.............')
print('备选采样点数目:%d'%samples_A.shape[0])
print('样本集B采样点数目:%d'%samples_B.shape[0])
print('最大距离:%.4f'%L_max)
while L_max>T1:
pos = np.where(distances==L_max)[0]
if samples_C is None:
samples_C = samples_A[pos[0],:].reshape((1,-1))
else:
samples_C = np.vstack((samples_C,samples_A[pos[0],:]))
samples_B = np.vstack((samples_B,samples_A[pos[0],:]))
samples_A = np.delete(samples_A,pos,axis=0)
distances = self.AssemblageDistance(samples_A,samples_B)
L_max = np.max(distances)
if samples_C is None:
print('sample_C集合为空,分割超平面两侧点密度达到要求')
return samples_C
else:
print('加点数目:%d'%samples_C.shape[0])
print('加点之后最大距离:%.4f'%L_max)
if self.f.dim == 2:
plt.scatter(samples_A[:,0],samples_A[:,1],c='r',marker='.')
plt.scatter(samples_B[:,0],samples_B[:,1],c='b',marker='.')
plt.scatter(samples_C[:,0],samples_C[:,1],c='c',marker='.')
plt.xlim(self.f.l[0]-0.1,self.f.u[0]+0.1)
plt.ylim(self.f.l[1]-0.1,self.f.u[1]+0.1)
import time
timemark = time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime(time.time()))
path = self.logPath+'/SVM_Photo_{0}.png'.format(timemark)
plt.savefig(path)
plt.show(5)
return samples_C
def infillSample2(self,svm,samples,T0,sampleCMaximum):
'''超平面边界加点算法,选取距离超平面T0之内的样本点,同时用加入样本集C的数目来限制加点密度\n
in : \n
svm : 支持向量机实例\n
samples : 已计算的样本点\n
T0 : 初始样本集门限\n
sampleCMaximum : sample_C的最大数目,如果超过此数,加点终止\n
labelNum : 一维向量,维度与fit函数的x的shape[1]相同,表示产生初始候选集时各维度的划分数目\n
out : \n
samples_C : 二维矩阵,每一行是一个样本点。若为None,代表超平面两侧采样点密度满足要求\n
'''
#生成样本集A,B,C
samples_A = self.f.data[:,:self.f.dim]
samples_B = samples
samples_C = None
dis_A = svm.decision_function(samples_A)
samples_A = samples_A[np.where(np.abs(dis_A)<=T0)]
if samples_A.shape[0] < sampleCMaximum:
return samples_A
#对于样本集B门限约束固定为1.1
T0_B = 1.1
dis_B = svm.decision_function(samples_B)
samples_B = samples_B[np.where(np.abs(dis_B)<T0_B)]
if samples_B.shape[0] == 0:
raise ValueError('infillSample:T0设置过小,区域内没有已采样点')
#计算样本集A与样本集B的距离
distances = self.AssemblageDistance(samples_A,samples_B)
L_max = np.max(distances)
print('............支持向量机加点日志.............')
print('备选采样点数目:%d'%samples_A.shape[0])
print('样本集B采样点数目:%d'%samples_B.shape[0])
print('最大距离:%.4f'%L_max)
for i in range(sampleCMaximum):
pos = np.where(distances==L_max)[0]
if samples_C is None:
samples_C = samples_A[pos[0],:].reshape((1,-1))
else:
samples_C = np.vstack((samples_C,samples_A[pos[0],:]))
if samples_C.shape[0]>sampleCMaximum:
break
samples_B = np.vstack((samples_B,samples_A[pos[0],:]))
samples_A = np.delete(samples_A,pos[0],axis=0)
distances = self.AssemblageDistance(samples_A,samples_B)
L_max = np.max(distances)
if samples_C is None:
print('sample_C集合为空,分割超平面两侧点密度达到要求')
return samples_C
else:
print('加点数目:%d'%samples_C.shape[0])
print('加点之后最大距离:%.4f'%L_max)
return samples_C
def infillSample3(self,svm,samples,T0,sampleCMaximum):
'''针对高维问题的超平面边界加点算法,选取距离超平面T0之内的样本点,同时用加入样本集C的数目来限制加点密度\n
in : \n
svm : 支持向量机实例\n
samples : 已计算的样本点\n
T0 : 初始样本集门限\n
sampleCMaximum : sample_C的最大数目,如果超过此数,加点终止\n
labelNum : 一维向量,维度与fit函数的x的shape[1]相同,表示产生初始候选集时各维度的划分数目\n
out : \n
samples_C : 二维矩阵,每一行是一个样本点。若为None,代表超平面两侧采样点密度满足要求\n
'''
#生成样本集A,B,C
samples_B = samples
samples_C = None
samples_A = np.array([])
bins = np.array([2,2,2,2,2,2,2,2,2,2,2,2,2])
while samples_A.shape[0]<sampleCMaximum*10:
mc = PseudoMonteCarlo(bins,10,self.f.l,self.f.u)
points = mc.realSamples
dis_A = svm.decision_function(points)
points = points[np.where(np.abs(dis_A)<=T0)]
if points.shape[0]==0:
continue
else:
if samples_A.shape[0]==0:
samples_A = points
else:
samples_A = np.vstack((samples_A, points))
#对于样本集B门限约束固定为1.1
T0_B = 1.1
dis_B = svm.decision_function(samples_B)
samples_B = samples_B[np.where(np.abs(dis_B)<T0_B)]
if samples_B.shape[0] == 0:
raise ValueError('infillSample:T0设置过小,区域内没有已采样点')
#计算样本集A与样本集B的距离
distances = self.AssemblageDistance(samples_A,samples_B)
L_max = np.max(distances)
print('............支持向量机加点日志.............')
print('备选采样点数目:%d'%samples_A.shape[0])
print('样本集B采样点数目:%d'%samples_B.shape[0])
print('最大距离:%.4f'%L_max)
for i in range(sampleCMaximum):
pos = np.where(distances==L_max)[0]
if samples_C is None:
samples_C = samples_A[pos[0],:].reshape((1,-1))
else:
samples_C = np.vstack((samples_C,samples_A[pos[0],:]))
if samples_C.shape[0]>sampleCMaximum:
break
samples_B = np.vstack((samples_B,samples_A[pos[0],:]))
samples_A = np.delete(samples_A,pos,axis=0)
distances = self.AssemblageDistance(samples_A,samples_B)
L_max = np.max(distances)
if samples_C is None:
print('sample_C集合为空,分割超平面两侧点密度达到要求')
return samples_C
else:
print('加点数目:%d'%samples_C.shape[0])
print('加点之后最大距离:%.4f'%L_max)
if self.f.dim == 2:
plt.scatter(samples_A[:,0],samples_A[:,1],c='r',marker='.')
plt.scatter(samples_B[:,0],samples_B[:,1],c='b',marker='.')
plt.scatter(samples_C[:,0],samples_C[:,1],c='c',marker='.')
plt.xlim(self.f.l[0]-0.1,self.f.u[0]+0.1)
plt.ylim(self.f.l[1]-0.1,self.f.u[1]+0.1)
import time
timemark = time.strftime('%Y-%m-%d_%H-%M-%S',time.localtime(time.time()))
path = self.logPath+'/SVM_Photo_{0}.png'.format(timemark)
plt.savefig(path)
plt.show(5)
return samples_C
def Test_SVM_Kernal(self):
#理论分割函数
f = self.f
data = np.loadtxt(self.logPath+'/A_Samples.txt')
samples = data[:,0:f.dim]
mark = data[:,f.dim+1]
# for i in range(0,10):
# if i == 0:
# d = 1
# else:
# d = i*0.1
# # print('线性核函数,指数:%.4f'%d)
# print('线性核函数,惩罚系数:%.4f'%d)
# svm=SVM_SKLearn.SVC(C=d,kernel='linear')
# svm.fit(samples,mark)
# # print(svm.decision_function(samples))
# f.report(svm)
# 用同样的方法检测高斯核函数
# for i in range(1,10):
# g = i*0.00001
# print('\n高斯核函数,指数:%.6f'%g)
# svm=SVM_SKLearn.SVC(C=1000,kernel='rbf',gamma=g)
# # print('\n高斯核函数,惩罚系数:%.6f'%g)
# # svm=SVM_SKLearn.SVC(C=g,kernel='rbf',gamma=0.00008)
# svm.fit(samples,mark)
# f.report(svm)
# for i in range(1,10):
# g = i*0.0001
# print('\n高斯核函数,指数:%.6f'%g)
# svm=SVM_SKLearn.SVC(C=1000,kernel='rbf',gamma=g)
# # print('\n高斯核函数,惩罚系数:%.6f'%g)
# # svm=SVM_SKLearn.SVC(C=g,kernel='rbf',gamma=0.00008)
# svm.fit(samples,mark)
# f.report(svm)
for i in range(1,10):
g = i*1000
# print('\n高斯核函数,指数:%.6f'%g)
# svm=SVM_SKLearn.SVC(C=1000,kernel='rbf',gamma=g)
print('\n高斯核函数,惩罚系数:%.6f'%g)
svm=SVM_SKLearn.SVC(C=g,kernel='rbf',gamma=0.0001)
svm.fit(samples,mark)
f.report(svm)
def SKCO_test():
f = TestFunction_G4_Simple()
# f.TestData()
skco = SKCO(f,'./Data/G4简化函数测试1')
# skco.Step_A(31,10)
# skco.Test_SVM_Kernal()
# skco.Step_B([0.1,0.1,0.1,0.1,0.1],[10,10,10,10,10])
skco.Step_C()
if __name__=='__main__':
SKCO_test()
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Agent)
admin.site.register(Client)
admin.site.register(CustomUser)
admin.site.register(Pro)
admin.site.register(Specialty)
|
import sqlite3
conn = sqlite3.connect(r"C:\Users\Wizard\Documents\Python\Udemy\Python3Bootcamp\SQL\friends.db")
# create cursor object
c = conn.cursor()
# c.execute("SELECT * FROM friends WHERE first_name IS 'Cara'")
c.execute("SELECT * FROM friends WHERE closeness > 5 ORDER BY closeness DESC")
# Iterate over cursor
# for result in c:
# print(result)
# Fetch One Result
# print(c.fetchone())
# Fetch all results as list
print(c.fetchall())
# commit changes
conn.commit()
conn.close()
|
# https://atcoder.jp/contests/arc097/tasks/arc097_b
class UnionFind:
def __init__(self, elements):
self.elements = elements
def same(self, a, b):
return self.find(a) == self.find(b)
def find(self, a):
parent = self.elements[a]
if parent < 0:
return a
self.elements[a] = self.find(parent)
return self.elements[a]
def unite(self, a, b):
a_parent = self.find(a)
b_parent = self.find(b)
# already same union
if a_parent == b_parent:
return
self.elements[a_parent] += self.elements[b_parent]
self.elements[b_parent] = a_parent
N, M = map(int, input().split())
P = list(map(int, input().split()))
union = UnionFind([-1 for _ in range(N)])
for _ in range(M):
a, b = map(int, input().split())
union.unite(a - 1, b - 1)
answer = 0
for i in range(N):
answer += union.same(P[i] - 1, i)
print(answer)
|
from app.main.model.candidate_model import CandidateModel
from app.main.model.recruiter_model import RecruiterModel
from flask_restx.inputs import email
from app.main.util.response import response_object
from functools import wraps
from flask_jwt_extended import jwt_required
from flask_jwt_extended.utils import get_jwt_identity
from flask import jsonify
def HR_only(func):
@wraps(func)
def wrapper(*args, **kwargs):
identity = get_jwt_identity()
email = identity['email']
is_HR = identity['is_HR']
if not is_HR:
return response_object(code=403, message="Bạn không có quyền thực hiện chức năng này!"), 403
else:
hr = RecruiterModel.query.filter_by(email=email).first()
if not hr:
return response_object(code=403, message="Bạn không có quyền thực hiện chức năng này!"), 403
else:
return func(*args, **kwargs)
return jwt_required(wrapper)
def Candidate_only(func):
@wraps(func)
def wrapper(*args, **kwargs):
identity = get_jwt_identity()
email = identity['email']
is_HR = identity['is_HR']
# must
if is_HR == True:
return response_object(code=403, message="Bạn không có quyền thực hiện chức năng này!"), 403
else:
candidate = CandidateModel.query.filter_by(email=email).first()
if not candidate:
return response_object(code=403, message="Bạn không có quyền thực hiện chức năng này!"), 403
else:
return func(*args, **kwargs)
return jwt_required(wrapper) |
# ex 7 Circle Intersection. Created by SaidakbarP 12/24/2019
from graphics import *
def main():
#r = int(input("Enter Radius of the circle: "))
win = GraphWin("Circle Intersection", 640, 640)
win.setCoords(-10, -10, 10, 10)
# entry box for radius
radius_entry = Entry(Point(-3, 9), 10)
radius_entry.setText("7.0")
radius_entry.draw(win)
Text(Point(-7, 9), "Enter radius (x<10): ").draw(win)
# entry box for y-intercept
y_int = Entry(Point(-3, 8), 10)
y_int.setText("-3.0")
y_int.draw(win)
Text(Point(-7, 8), "Enter y-intercept (y<10) : ").draw(win)
Rectangle(Point(-1, 7.7), Point(2, 8.8)).draw(win)
button = Text(Point(0.5, 8.2), "Calculate!")
button.draw(win)
# get values
win.getMouse()
r = float(radius_entry.getText())
y = float(y_int.getText())
# draw circle
circle = Circle(Point(0,0), r)
circle.draw(win)
# draw line
y_ints = Line(Point(-10,y), Point(10,y))
y_ints.draw(win)
# Intersections x-values
x1 = -round((r**2-y**2)**(1/2))
x2 = round((r**2-y**2)**(1/2))
# draw intersections
Text(Point(x1+0.5,y-0.5), "x1: {}".format(x1)).draw(win)
win.plot(x1, y, "red")
Text(Point(x2+0.5,y-0.5), "x2: {}".format(x2)).draw(win)
win.plot(x2, y, "red")
button.setText("Quit!")
win.getMouse()
win.close()
main()
|
from django.urls import include, path
from django.conf.urls import url
from tracking.views import update_view, allupdate_view, sticky_impression, clicks_impression
urlpatterns = [
url(r'^update_view/$', update_view, name='update_view'),
url(r'^allupdate_view/$', allupdate_view, name='allupdate_view'),
url(r'^sticky_impression/$', sticky_impression, name='sticky_impression'),
url(r'^clicks_impression/$', clicks_impression, name='clicks_impression')
]
|
verhoeff_table_d = (
(0,1,2,3,4,5,6,7,8,9),
(1,2,3,4,0,6,7,8,9,5),
(2,3,4,0,1,7,8,9,5,6),
(3,4,0,1,2,8,9,5,6,7),
(4,0,1,2,3,9,5,6,7,8),
(5,9,8,7,6,0,4,3,2,1),
(6,5,9,8,7,1,0,4,3,2),
(7,6,5,9,8,2,1,0,4,3),
(8,7,6,5,9,3,2,1,0,4),
(9,8,7,6,5,4,3,2,1,0))
verhoeff_table_p = (
(0,1,2,3,4,5,6,7,8,9),
(1,5,7,6,2,8,3,0,9,4),
(5,8,0,3,7,9,6,1,4,2),
(8,9,1,6,0,4,3,5,2,7),
(9,4,5,3,1,2,6,8,7,0),
(4,2,8,6,5,7,3,9,0,1),
(2,7,9,3,8,0,6,4,1,5),
(7,0,4,6,9,1,3,2,5,8))
verhoeff_table_inv = (0,4,3,2,1,5,6,7,8,9)
def calcsum(number):
"""For a given number returns a Verhoeff checksum digit"""
c = 0
for i, item in enumerate(reversed(str(number))):
c = verhoeff_table_d[c][verhoeff_table_p[(i+1)%8][int(item)]]
return verhoeff_table_inv[c]
def checksum(number):
"""For a given number generates a Verhoeff digit and
returns number + digit"""
c = 0
for i, item in enumerate(reversed(str(number))):
c = verhoeff_table_d[c][verhoeff_table_p[i % 8][int(item)]]
return c
def validate(number):
"""Validate subject id checksummed number (checksum is last digit)"""
return checksum(number) == 0 |
from uuid import UUID
from django.conf import settings
from django.contrib.auth import login as auth_login
from tokenauth.models import Token
def login(request, silence=False):
param_name = 'token'
if hasattr(settings, 'TOKENAUTH_PARAMETER_NAME'):
param_name = settings.TOKENAUTH_PARAMETER_NAME
hex = request.GET.get(param_name, None)
if hex:
try:
# Ensure uuid4 format.
value = UUID(hex, version=4)
except ValueError:
raise ValueError('Invalid token format.')
try:
token = Token.objects.get(uuid=value.hex)
ALLOW_ADMINS = False
if hasattr(settings, 'TOKENAUTH_ALLOW_ADMINS'):
ALLOW_ADMINS = settings.TOKENAUTH_ALLOW_ADMINS
if not ALLOW_ADMINS\
and token.user.is_superuser:
raise Exception('Super users cannot login via token.')
auth_login(request, token.user)
except Token.DoesNotExist:
raise ValueError('The token does not exists.')
elif not silence:
raise ValueError('You should provide a token.')
|
#!/bin/python3
import sys
a = [0]
for x in range(1, 200):
newTerm = x ^ a[x-1]
a.append(newTerm)
for x, y in enumerate(a):
print(x, y)
# Q = int(input().strip())
# for a0 in range(Q):
# L,R = input().strip().split(' ')
# L,R = [int(L),int(R)]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-01 09:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def set_branch_creator(apps, schema_editor):
ProblemBranch = apps.get_model("problems", "ProblemBranch")
db_alias = schema_editor.connection.alias
for obj in ProblemBranch.objects.using(db_alias).all():
if obj.name != "master":
if obj.working_copy:
obj.creator = obj.working_copy.author
else:
obj.creator = obj.head.author
obj.save()
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problems', '0084_auto_20170427_0939'),
]
operations = [
migrations.AddField(
model_name='problembranch',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='creator'),
),
migrations.RunPython(set_branch_creator, migrations.RunPython.noop),
migrations.AlterField(
model_name='problem',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='creator'),
),
]
|
"""
Test the `m.Bits` type
"""
import operator
import pytest
import magma as m
from magma import Bits
from magma.testing import check_files_equal
from magma.simulator import PythonSimulator
from hwtypes import BitVector
ARRAY2 = m.Array[2, m.Bit]
ARRAY4 = m.Array[4, m.Bit]
def test_bits_basic():
"""
Basic bits tests
"""
bits_2 = m.Bits[2]
bits_in_2 = m.In(m.Bits[2])
bits_out_2 = m.Out(m.Bits[2])
assert bits_in_2.undirected_t == bits_2
assert bits_2 == m.Bits[2]
assert bits_in_2 == m.In(bits_2)
assert bits_out_2 == m.Out(bits_2)
assert bits_2 == bits_in_2
assert bits_2 == bits_out_2
assert bits_in_2 == bits_out_2
assert bits_2 is not bits_in_2
assert bits_2 is not bits_out_2
assert bits_in_2 is not bits_out_2
bits_4 = m.Bits[4]
assert bits_4 == ARRAY4
assert bits_2 != bits_4
assert m.Bits[11]().is_oriented(m.Direction.Undirected)
def test_qualify_bits():
assert str(m.In(Bits)) == "In(Bits)"
assert str(m.Out(m.In(Bits))) == "Out(Bits)"
assert str(m.In(Bits)[5, m.Bit]) == "In(Bits[5])"
assert str(m.Out(m.In(Bits))[5, m.Bit]) == "Out(Bits[5])"
# Bits qualifer overrides child qualifer
assert str(m.In(Bits)[5, m.Out(m.Bit)]) == "In(Bits[5])"
assert m.In(Bits) is m.In(Bits)
assert m.Out(m.In(Bits)) is m.Out(Bits)
assert m.In(Bits)[5, m.Bit] is m.Bits[5, m.In(m.Bit)]
assert m.Out(m.In(Bits))[5, m.Bit] is Bits[5, m.Out(m.Bit)]
# Bits qualifer overrides child qualifer
assert m.In(Bits)[5, m.Out(m.Bit)] is Bits[5, m.In(m.Bit)]
def test_val():
"""
Test instances of Bits[4] work correctly
"""
bits_4_in = m.In(m.Bits[4])
bits_4_out = m.Out(m.Bits[4])
assert m.Flip(bits_4_in) == bits_4_out
assert m.Flip(bits_4_out) == bits_4_in
a_0 = bits_4_out(name='a0')
print(a_0, type(a_0))
a_1 = bits_4_in(name='a1')
print(a_1, type(a_1))
a_1.wire(a_0)
b_0 = a_1[0]
assert b_0 is a_1[0], "getitem failed"
a_3 = a_1[0:2]
assert all(a is b for a, b in zip(a_3, a_1[0:2])), "getitem of slice failed"
def test_flip():
"""
Test flip interface
"""
bits_2 = m.Bits[2]
a_in = m.In(bits_2)
a_out = m.Out(bits_2)
print(a_in)
print(a_out)
assert a_in == ARRAY2
assert a_out == ARRAY2
assert a_in == a_out
assert a_in is not ARRAY2
assert a_out is not ARRAY2
assert a_in is not a_out
in_a_out = m.In(a_out)
assert in_a_out == a_in
print(in_a_out)
a_out_flipped = m.Flip(a_out)
assert a_out_flipped == a_in
out_a_in = m.Out(a_in)
assert out_a_in == a_out
a_in_flipped = m.Flip(a_in)
assert a_in_flipped == a_out
print(a_in_flipped)
def test_construct():
"""
Test `m.bits` interface
"""
class Foo(m.Circuit):
a_1 = m.bits([1, 1])
print(type(a_1))
assert isinstance(a_1, m.BitsType)
# test promote
assert isinstance(m.Bits[16](a_1), m.Bits)
assert repr(m.Bits[16](
a_1)) == "Bits[16](3)"
def test_const():
"""
Test constant constructor interface
"""
def check_equal(x, y):
return int(x) == int(y)
class Foo(m.Circuit):
data = m.Bits[16]
zero = data(0)
assert check_equal(zero, m.bits(0, 16))
assert check_equal(data(16), m.Bits[16].make_constant(16))
assert check_equal(m.Bits[4](0xe), m.Bits[16].make_constant(0xe, 4))
assert check_equal(m.Bits[4](0xe), m.Bits.make_constant(0xe, 4))
def test_setitem_bfloat():
"""
Test constant constructor interface
"""
class TestCircuit(m.Circuit):
io = m.IO(I=m.In(m.BFloat[16]), O=m.Out(m.BFloat[16]))
a = io.I
b = a[0:-1].concat(m.bits(0, 1))
io.O <= b
assert repr(TestCircuit) == """\
TestCircuit = DefineCircuit("TestCircuit", "I", In(BFloat[16]), "O", Out(BFloat[16]))
wire(TestCircuit.I[0], TestCircuit.O[0])
wire(TestCircuit.I[1], TestCircuit.O[1])
wire(TestCircuit.I[2], TestCircuit.O[2])
wire(TestCircuit.I[3], TestCircuit.O[3])
wire(TestCircuit.I[4], TestCircuit.O[4])
wire(TestCircuit.I[5], TestCircuit.O[5])
wire(TestCircuit.I[6], TestCircuit.O[6])
wire(TestCircuit.I[7], TestCircuit.O[7])
wire(TestCircuit.I[8], TestCircuit.O[8])
wire(TestCircuit.I[9], TestCircuit.O[9])
wire(TestCircuit.I[10], TestCircuit.O[10])
wire(TestCircuit.I[11], TestCircuit.O[11])
wire(TestCircuit.I[12], TestCircuit.O[12])
wire(TestCircuit.I[13], TestCircuit.O[13])
wire(TestCircuit.I[14], TestCircuit.O[14])
wire(GND, TestCircuit.O[15])
EndCircuit()\
""" # noqa
@pytest.mark.parametrize("n", [1, 3])
def test_invert(n):
class TestInvert(m.Circuit):
io = m.IO(I=m.In(m.Bits[n]), O=m.Out(m.Bits[n]))
io.O <= ~io.I
assert repr(TestInvert) == f"""\
TestInvert = DefineCircuit("TestInvert", "I", In(Bits[{n}]), "O", Out(Bits[{n}]))
magma_Bits_{n}_not_inst0 = magma_Bits_{n}_not()
wire(TestInvert.I, magma_Bits_{n}_not_inst0.in)
wire(magma_Bits_{n}_not_inst0.out, TestInvert.O)
EndCircuit()\
"""
m.compile(f"build/TestBits{n}Invert", TestInvert, output="coreir-verilog")
assert check_files_equal(__file__, f"build/TestBits{n}Invert.v",
f"gold/TestBits{n}Invert.v")
sim = PythonSimulator(TestInvert)
for _ in range(2):
I = BitVector.random(n)
sim.set_value(TestInvert.I, I)
sim.evaluate()
assert sim.get_value(TestInvert.O) == ~I
@pytest.mark.parametrize("n", [1, 3])
@pytest.mark.parametrize("op", ["and_", "or_", "xor", "lshift", "rshift"])
def test_binary(op, n):
class TestBinary(m.Circuit):
io = m.IO(I0=m.In(m.Bits[n]), I1=m.In(m.Bits[n]), O=m.Out(m.Bits[n]))
io.O <= getattr(operator, op)(io.I0, io.I1)
magma_op = op.replace("_", "")
magma_op = magma_op.replace("lshift", "shl")
magma_op = magma_op.replace("rshift", "lshr")
assert repr(TestBinary) == f"""\
TestBinary = DefineCircuit("TestBinary", "I0", In(Bits[{n}]), "I1", In(Bits[{n}]), \
"O", Out(Bits[{n}]))
magma_Bits_{n}_{magma_op}_inst0 = magma_Bits_{n}_{magma_op}()
wire(TestBinary.I0, magma_Bits_{n}_{magma_op}_inst0.in0)
wire(TestBinary.I1, magma_Bits_{n}_{magma_op}_inst0.in1)
wire(magma_Bits_{n}_{magma_op}_inst0.out, TestBinary.O)
EndCircuit()\
"""
m.compile(f"build/TestBits{n}{magma_op}",
TestBinary, output="coreir-verilog")
assert check_files_equal(__file__, f"build/TestBits{n}{magma_op}.v",
f"gold/TestBits{n}{magma_op}.v")
sim = PythonSimulator(TestBinary)
for _ in range(2):
I0 = BitVector.random(n)
I1 = BitVector.random(n)
sim.set_value(TestBinary.I0, I0)
sim.set_value(TestBinary.I1, I1)
sim.evaluate()
assert sim.get_value(TestBinary.O) == getattr(operator, op)(I0, I1)
@pytest.mark.parametrize("n", [1, 3])
def test_ite(n):
class TestITE(m.Circuit):
io = m.IO(I0=m.In(m.Bits[n]), I1=m.In(m.Bits[n]), S=m.In(m.Bits[n]),
O=m.Out(m.Bits[n]))
io.O <= io.S.ite(io.I0, io.I1)
assert repr(TestITE) == f"""\
TestITE = DefineCircuit("TestITE", "I0", In(Bits[{n}]), "I1", In(Bits[{n}]), "S", In(Bits[{n}]), "O", Out(Bits[{n}]))
magma_Bit_not_inst0 = magma_Bit_not()
magma_Bits_{n}_eq_inst0 = magma_Bits_{n}_eq()
magma_Bits_{n}_ite_Out_Bits_{n}_inst0 = magma_Bits_{n}_ite_Out_Bits_{n}()
wire(magma_Bits_{n}_eq_inst0.out, magma_Bit_not_inst0.in)
wire(TestITE.S, magma_Bits_{n}_eq_inst0.in0)
wire(BitVector[{n}](0), magma_Bits_{n}_eq_inst0.in1)
wire(TestITE.I0, magma_Bits_{n}_ite_Out_Bits_{n}_inst0.in0)
wire(TestITE.I1, magma_Bits_{n}_ite_Out_Bits_{n}_inst0.in1)
wire(magma_Bit_not_inst0.out, magma_Bits_{n}_ite_Out_Bits_{n}_inst0.sel)
wire(magma_Bits_{n}_ite_Out_Bits_{n}_inst0.out, TestITE.O)
EndCircuit()\
"""
m.compile(f"build/TestBits{n}ITE", TestITE, output="coreir-verilog")
assert check_files_equal(__file__, f"build/TestBits{n}ITE.v",
f"gold/TestBits{n}ITE.v")
sim = PythonSimulator(TestITE)
for S in [0, 1]:
I0 = BitVector.random(n)
I1 = BitVector.random(n)
sim.set_value(TestITE.I0, I0)
sim.set_value(TestITE.I1, I1)
sim.set_value(TestITE.S, S)
sim.evaluate()
assert sim.get_value(TestITE.O) == (I1 if S else I0)
@pytest.mark.parametrize("n", [1, 3])
def test_eq(n):
class TestBinary(m.Circuit):
io = m.IO(I0=m.In(m.Bits[n]), I1=m.In(m.Bits[n]), O=m.Out(m.Bit))
# Nasty precidence issue with <= operator means we need parens here
io.O <= (io.I0 == io.I1)
assert repr(TestBinary) == f"""\
TestBinary = DefineCircuit("TestBinary", "I0", In(Bits[{n}]), "I1", In(Bits[{n}]), "O", Out(Bit))
magma_Bits_{n}_eq_inst0 = magma_Bits_{n}_eq()
wire(TestBinary.I0, magma_Bits_{n}_eq_inst0.in0)
wire(TestBinary.I1, magma_Bits_{n}_eq_inst0.in1)
wire(magma_Bits_{n}_eq_inst0.out, TestBinary.O)
EndCircuit()\
"""
m.compile(f"build/TestBits{n}eq", TestBinary, output="coreir-verilog")
assert check_files_equal(__file__, f"build/TestBits{n}eq.v",
f"gold/TestBits{n}eq.v")
sim = PythonSimulator(TestBinary)
for i in range(2):
I0 = BitVector.random(n)
I1 = BitVector.random(n)
sim.set_value(TestBinary.I0, I0)
sim.set_value(TestBinary.I1, I1)
sim.evaluate()
assert sim.get_value(TestBinary.O) == (I0 == I1)
@pytest.mark.parametrize("n", [1, 3])
def test_zext(n):
class TestExt(m.Circuit):
io = m.IO(I=m.In(m.Bits[n]), O=m.Out(m.Bits[n + 3]))
# Nasty precidence issue with <= operator means we need parens here
io.O <= io.I.zext(3)
if n > 1:
i_wire = f"wire(TestExt.I, TestExt.O[slice(0, {n}, None)])"
else:
i_wire = 'wire(TestExt.I[0], TestExt.O[0])'
gnd_wires = '\n'.join(f'wire(GND, TestExt.O[{i + n}])' for i in range(3))
assert repr(TestExt) == f"""\
TestExt = DefineCircuit("TestExt", "I", In(Bits[{n}]), "O", Out(Bits[{n + 3}]))
{i_wire}
{gnd_wires}
EndCircuit()\
"""
m.compile(f"build/TestBits{n}ext", TestExt, output="coreir-verilog")
assert check_files_equal(__file__, f"build/TestBits{n}ext.v",
f"gold/TestBits{n}ext.v")
sim = PythonSimulator(TestExt)
for i in range(2):
I = BitVector.random(n)
sim.set_value(TestExt.I, I)
sim.evaluate()
assert sim.get_value(TestExt.O) == I.zext(3)
@pytest.mark.parametrize("n", [1, 3])
def test_bvcomp(n):
class TestBinary(m.Circuit):
io = m.IO(I0=m.In(m.Bits[n]), I1=m.In(m.Bits[n]), O=m.Out(m.Bits[1]))
# Nasty precidence issue with <= operator means we need parens here
io.O <= io.I0.bvcomp(io.I1)
print(repr(TestBinary))
assert repr(TestBinary) == f"""\
TestBinary = DefineCircuit("TestBinary", "I0", In(Bits[{n}]), "I1", In(Bits[{n}]), "O", Out(Bits[1]))
magma_Bits_{n}_eq_inst0 = magma_Bits_{n}_eq()
wire(TestBinary.I0, magma_Bits_{n}_eq_inst0.in0)
wire(TestBinary.I1, magma_Bits_{n}_eq_inst0.in1)
wire(magma_Bits_{n}_eq_inst0.out, TestBinary.O[0])
EndCircuit()\
"""
m.compile(f"build/TestBits{n}bvcomp", TestBinary, output="coreir-verilog")
assert check_files_equal(__file__, f"build/TestBits{n}bvcomp.v",
f"gold/TestBits{n}bvcomp.v")
sim = PythonSimulator(TestBinary)
for i in range(2):
I0 = BitVector.random(n)
I1 = BitVector.random(n)
sim.set_value(TestBinary.I0, I0)
sim.set_value(TestBinary.I1, I1)
sim.evaluate()
assert sim.get_value(TestBinary.O) == (I0 == I1)
@pytest.mark.parametrize("n", [1, 3])
@pytest.mark.parametrize("x", [4, 7])
def test_repeat(n, x):
class TestRepeat(m.Circuit):
io = m.IO(I=m.In(m.Bits[n]), O=m.Out(m.Bits[n * x]))
io.O <= io.I.repeat(x)
if n == 1:
wires = "\n".join(f"wire(TestRepeat.I[{i}], TestRepeat.O[{i + j * n}])"
for j in range(x) for i in range(n))
else:
assert n == 3
wires = "\n".join(f"wire(TestRepeat.I, TestRepeat.O[slice({i * n}, {(i + 1) * n}, None)])"
for i in range(x))
assert repr(TestRepeat) == f"""\
TestRepeat = DefineCircuit("TestRepeat", "I", In(Bits[{n}]), "O", Out(Bits[{n * x}]))
{wires}
EndCircuit()\
"""
m.compile(f"build/TestBits{n}x{x}Repeat",
TestRepeat, output="coreir-verilog")
assert check_files_equal(__file__, f"build/TestBits{n}x{x}Repeat.v",
f"gold/TestBits{n}x{x}Repeat.v")
sim = PythonSimulator(TestRepeat)
for i in range(2):
I = BitVector.random(n)
sim.set_value(TestRepeat.I, I)
sim.evaluate()
assert sim.get_value(TestRepeat.O) == I.repeat(x)
@pytest.mark.parametrize("op", [operator.and_, operator.or_, operator.xor,
operator.lshift, operator.rshift, operator.add,
operator.sub, operator.mul])
def test_rops(op):
x = BitVector.random(5)
class Main(m.Circuit):
io = m.IO(I=m.In(m.Bits[5]), O=m.Out(m.Bits[5]))
io.O @= op(x, io.I)
sim = PythonSimulator(Main)
I = BitVector.random(5)
sim.set_value(Main.I, I)
sim.evaluate()
assert sim.get_value(Main.O) == op(x, I)
@pytest.mark.parametrize("op, op_str", [
(operator.and_, "&"),
(operator.or_, "|"),
(operator.xor, "^"),
(operator.lshift, "<<"),
(operator.rshift, ">>"),
(operator.add, "+"),
(operator.sub, "-"),
(operator.mul, "*")
])
def test_rop_type_error(op, op_str):
class Main(m.Circuit):
io = m.IO(I=m.In(m.Bits[2]))
with pytest.raises(TypeError) as e:
print(op(BitVector[32](0xDEADBEEF), io.I))
assert str(e.value) == (
f"unsupported operand type(s) for {op_str}: 'BitVector[32]' and "
"'Bits[(2, Out(Bit))]'"
)
x = m.Bits[5]()
y = m.Bits[4]()
with pytest.raises(TypeError) as e:
op(x, y)
assert str(e.value) == (
f"unsupported operand type(s) for {op_str}: 'Bits[(5, Bit)]' and "
"'Bits[(4, Bit)]'"
)
def test_python_bits_from_int_truncation_error():
class Foo(m.Circuit):
with pytest.raises(ValueError) as e:
m.Bits[2](4)
assert str(e.value) == (
"Cannot construct Bits[2] with integer 4 (requires truncation)")
with pytest.raises(ValueError) as e:
m.bits(4, 2)
assert str(e.value) == "Cannot convert 4 to a Bits of length 2"
def test_bits_promote():
class TestBinary(m.Circuit):
io = m.IO(I=m.In(m.Bits[3]), O=m.Out(m.Bits[6]))
io.O @= Bits[6](io.I)
assert int(io.O[4].trace()) == 0
assert int(io.O[5].trace()) == 0
def test_bits_coerce_typeerror():
class Dummy:
def __rand__(self, other):
return other
class Foo(m.Circuit):
io = m.IO(I=m.In(m.Bits[8]))
# Bits.__and__ should get a TypeError in _coerce so we then use
# Dummy.__rand__
assert (io.I & Dummy()) is io.I
|
import re
t = int(input())
for _ in range(t):
s = input().strip().split(" ")
m = re.match(r'[a-z][\w\.-]+@[a-z]+\.[a-z]{1,3}$', s[1][1:len(s[1])-1])
if bool(m):
print(s[0], s[1])
|
import nose.tools
from angr import SimState, SimHeapPTMalloc
# TODO: Make these tests more architecture-independent (note dependencies of some behavior on chunk metadata size)
def chunk_iterators_are_same(iterator1, iterator2):
for ck in iterator1:
ck2 = next(iterator2)
if ck.base != ck2.base:
return False
if ck.is_free() != ck2.is_free():
return False
try:
next(iterator2)
except StopIteration:
return True
return False
def same_heap_states(state1, state2):
return chunk_iterators_are_same(state1.heap.chunks(), state2.heap.chunks())
def max_sym_var_val(state):
return state.libc.max_variable_size
def run_malloc_maximizes_sym_arg(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
x = s.solver.BVS("x", 32)
s.solver.add(x.UGE(0))
s.solver.add(x.ULE(max_sym_var_val(s)))
s.heap.malloc(x)
sc.heap.malloc(max_sym_var_val(sc))
nose.tools.assert_true(same_heap_states(s, sc))
def test_malloc_maximizes_sym_arg():
for arch in ('X86', 'AMD64'):
yield run_malloc_maximizes_sym_arg, arch
def run_free_maximizes_sym_arg(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
p = s.heap.malloc(50)
sc = s.copy()
x = s.solver.BVS("x", 32)
s.solver.add(x.UGE(0))
s.solver.add(x.ULE(p))
s.heap.free(x)
sc.heap.free(p)
nose.tools.assert_true(same_heap_states(s, sc))
def test_free_maximizes_sym_arg():
for arch in ('X86', 'AMD64'):
yield run_free_maximizes_sym_arg, arch
def run_calloc_maximizes_sym_arg(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
x = s.solver.BVS("x", 32)
s.solver.add(x.UGE(0))
s.solver.add(x.ULE(20))
y = s.solver.BVS("y", 32)
s.solver.add(y.UGE(0))
s.solver.add(y.ULE(6))
s.heap.calloc(x, y)
sc.heap.calloc(20, 6)
nose.tools.assert_true(same_heap_states(s, sc))
def test_calloc_maximizes_sym_arg():
for arch in ('X86', 'AMD64'):
yield run_calloc_maximizes_sym_arg, arch
def run_realloc_maximizes_sym_arg(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
p = s.heap.malloc(50)
sc = s.copy()
x = s.solver.BVS("x", 32)
s.solver.add(x.UGE(0))
s.solver.add(x.ULE(p))
y = s.solver.BVS("y", 32)
s.solver.add(y.UGE(0))
s.solver.add(y.ULE(max_sym_var_val(s)))
s.heap.realloc(x, y)
sc.heap.realloc(p, max_sym_var_val(sc))
nose.tools.assert_true(same_heap_states(s, sc))
def test_realloc_maximizes_sym_arg():
for arch in ('X86', 'AMD64'):
yield run_realloc_maximizes_sym_arg, arch
def run_malloc_no_space_returns_null(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
p1 = s.heap.malloc(0x2000)
nose.tools.assert_equal(p1, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_malloc_no_space_returns_null():
for arch in ('X86', 'AMD64'):
yield run_malloc_no_space_returns_null, arch
def run_calloc_no_space_returns_null(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
p1 = s.heap.calloc(0x500, 4)
nose.tools.assert_equal(p1, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_calloc_no_space_returns_null():
for arch in ('X86', 'AMD64'):
yield run_calloc_no_space_returns_null, arch
def run_realloc_no_space_returns_null(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
p1 = s.heap.malloc(20)
sc = s.copy()
p2 = s.heap.realloc(p1, 0x2000)
nose.tools.assert_equal(p2, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_realloc_no_space_returns_null():
for arch in ('X86', 'AMD64'):
yield run_realloc_no_space_returns_null, arch
def run_first_fit_and_free_malloced_makes_available(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(20)
p1 = s.heap.malloc(50)
s.heap.free(p1)
p2 = s.heap.malloc(30)
nose.tools.assert_equal(p1, p2)
def test_first_fit_and_free_malloced_makes_available():
for arch in ('X86', 'AMD64'):
yield run_first_fit_and_free_malloced_makes_available, arch
def run_free_calloced_makes_available(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.calloc(20, 5)
p1 = s.heap.calloc(30, 4)
s.heap.free(p1)
p2 = s.heap.calloc(15, 8)
nose.tools.assert_equal(p1, p2)
def test_free_calloced_makes_available():
for arch in ('X86', 'AMD64'):
yield run_free_calloced_makes_available, arch
def run_realloc_moves_and_frees(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(20)
p1 = s.heap.malloc(60)
s.heap.malloc(200)
p2 = s.heap.realloc(p1, 300)
p3 = s.heap.malloc(30)
nose.tools.assert_equal(p1, p3)
nose.tools.assert_less(p1, p2)
def test_realloc_moves_and_frees():
for arch in ('X86', 'AMD64'):
yield run_realloc_moves_and_frees, arch
def run_realloc_near_same_size(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(20)
p1 = s.heap.malloc(61)
s.heap.malloc(80)
sc = s.copy()
p2 = s.heap.realloc(p1, 62)
nose.tools.assert_equal(p1, p2)
nose.tools.assert_true(same_heap_states(s, sc))
def test_realloc_near_same_size():
for arch in ('X86', 'AMD64'):
yield run_realloc_near_same_size, arch
def run_needs_space_for_metadata(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
sc = s.copy()
p1 = s.heap.malloc(0x1000)
nose.tools.assert_equal(p1, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_needs_space_for_metadata():
for arch in ('X86', 'AMD64'):
yield run_needs_space_for_metadata, arch
def run_unusable_amount_returns_null(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(0x1000 - 4 * s.heap._chunk_size_t_size)
sc = s.copy()
p = s.heap.malloc(1)
nose.tools.assert_equal(p, 0)
nose.tools.assert_true(same_heap_states(s, sc))
def test_unusable_amount_returns_null():
for arch in ('X86', 'AMD64'):
yield run_unusable_amount_returns_null, arch
def run_free_null_preserves_state(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(30)
p = s.heap.malloc(40)
s.heap.malloc(50)
s.heap.free(p)
s2 = s.copy()
s2.heap.free(0)
nose.tools.assert_true(same_heap_states(s, s2))
def test_free_null_preserves_state():
for arch in ('X86', 'AMD64'):
yield run_free_null_preserves_state, arch
def run_skips_chunks_too_small(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(30)
p = s.heap.malloc(50)
s.heap.malloc(40)
s.heap.free(p)
p2 = s.heap.calloc(20, 5)
nose.tools.assert_less(p, p2)
def test_skips_chunks_too_small():
for arch in ('X86', 'AMD64'):
yield run_skips_chunks_too_small, arch
def run_calloc_multiplies(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.heap.malloc(30)
sc = s.copy()
s.heap.malloc(100)
sc.heap.calloc(4, 25)
nose.tools.assert_true(same_heap_states(s, sc))
def test_calloc_multiplies():
for arch in ('X86', 'AMD64'):
yield run_calloc_multiplies, arch
def run_calloc_clears(arch):
s = SimState(arch=arch, plugins={'heap': SimHeapPTMalloc(heap_base=0xd0000000, heap_size=0x1000)})
s.memory.store(0xd0000000 + 2 * s.heap._chunk_size_t_size, s.solver.BVV(-1, 100 * 8))
sc = s.copy()
p1 = s.heap.calloc(6, 5)
p2 = sc.heap.malloc(30)
v1 = s.memory.load(p1, 30)
v2 = sc.memory.load(p2, 30)
nose.tools.assert_true(s.solver.is_true(v1 == 0))
nose.tools.assert_true(sc.solver.is_true(v2 == -1))
def test_calloc_clears():
for arch in ('X86', 'AMD64'):
yield run_calloc_clears, arch
if __name__ == "__main__":
g = globals().copy()
for func_name, func in g.items():
if func_name.startswith("test_") and hasattr(func, '__call__'):
for r, a in func():
r(a)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import os
import sys
import subprocess
import platform
from six import PY2, string_types
import logging
logger = logging.getLogger(__name__)
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
if platform.system() == "Windows":
def quote(s):
if " " in s:
return '"' + s + '"'
return s
else:
try:
from shlex import quote
except ImportError:
from pipes import quote
def _encoding():
return TERM_ENCODING or sys.getfilesystemencoding()
class Commander(object):
def __init__(self, system=platform.system(), py2=PY2):
self.system = system
self.py2 = py2
self.pre_command = [[]]
def create_pre_commander(self, pre_command=None):
new_commander = Commander(self.system, self.py2)
new_commander.pre_command = [pre_command or []]
return new_commander
def __call__(self, cmd=None, cwd=None, bash=True, cmds=None):
new_cmd = []
_cmds = [cmd] if cmd else []
_cmds += cmds or []
for _cmd in self.pre_command + _cmds:
if isinstance(cmd, (list, tuple)):
_cmd = [quote(x) for x in _cmd]
_cmd = " ".join(_cmd)
new_cmd.append(_cmd)
new_cmd = [x for x in new_cmd if x]
if self.system == "Windows":
cmd_joiner = " & "
else:
cmd_joiner = " ; "
cmd_str = cmd_joiner.join(new_cmd)
if self.system == "Linux" and bash:
# cmd_str = cmd_str.replace('"', '\\"').replace("'", "\\'")
return '/bin/bash -c "{}"'.format(cmd_str)
else:
return cmd_str
def check_exist(self, cmds):
which_cmd = "which" if platform.system() != "Windows" else "where"
for cmd in cmds:
# noinspection PyBroadException
try:
output = subprocess.check_output(self([which_cmd, cmd]), stderr=None, shell=True)
except FileNotFoundError:
return False
except subprocess.CalledProcessError:
return False
if output:
return True
return False
def which(self, cmd):
if self.system == "Windows":
which_cmd = "where"
else:
which_cmd = "which"
result = self.check_output("{} {}".format(which_cmd, cmd), shell=True)
if result:
for line in result.splitlines():
return line
return None
def check_output(self, cmd, stderr=None, shell=False):
try:
output = subprocess.check_output(self(cmd), stderr=stderr, shell=shell)
except FileNotFoundError:
logger.error("FileNotFoundError:{}".format(self(cmd)))
return None
except subprocess.CalledProcessError:
logger.error("Call Error:{}".format(self(cmd)))
return None
try:
return output.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
logger.error("UnicodeDecodeError:{}".format(output))
return None
def call(self, cmd, stderr=None, shell=False):
try:
code = subprocess.call(self(cmd), stderr=stderr, shell=shell)
except FileNotFoundError:
logger.error("FileNotFoundError:{}".format(self(cmd)))
return False
except subprocess.CalledProcessError:
logger.error("Call Error:{}".format(self(cmd)))
return False
return code == 0
def open_terminal(self, path):
# type: (string_types) -> None
cwd = os.path.normpath(path)
if self.py2:
cwd = cwd.encode(sys.getfilesystemencoding())
# noinspection PyBroadException
try:
if platform.system() == "Windows":
subprocess.Popen("cmd", cwd=cwd)
elif platform.system() == "Darwin":
subprocess.Popen("open", cwd=cwd)
else:
subprocess.Popen("gnome-terminal", cwd=cwd)
except:
logger.error("Open Terminal Error.")
def show_directory(self, path):
# type: (string_types) -> None
path = os.path.normpath(path)
if platform.system() == "Windows":
cmd = ["explorer", quote(path)]
elif platform.system() == "Darwin":
cmd = ["open", quote(path)]
else:
cmd = ["xdg-open", quote(path)]
# print(" ".join(cmd))
self.launch(" ".join(cmd), path)
def open(self, path):
path = os.path.normpath(path)
if self.system == "Windows":
cmd = [path]
elif self.system == "Darwin":
cmd = ["open", path]
else:
cmd = self(["xdg-open", path])
# print(" ".join(cmd))
self.launch(cmd)
@staticmethod
def launch(cmd, cwd=None):
# type: (string_types, string_types or None) -> None
if platform.system() == "Windows":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.CREATE_NEW_PROCESS_GROUP | subprocess.STARTF_USESHOWWINDOW
if PY2:
cmd = cmd.encode(_encoding())
cwd = cwd.encode(sys.getfilesystemencoding()) if cwd else cwd
subprocess.Popen(
cmd,
cwd=cwd,
shell=True,
startupinfo=startupinfo
)
else:
subprocess.Popen(
cmd,
cwd=cwd,
shell=True,
env=os.environ.copy(),
)
def console(self, cmd, cwd=None):
# type: (string_types, string_types) -> None or subprocess.Popen
if platform.system() == "Windows":
cmd = self(cmd)
if PY2:
cmd = cmd.encode(_encoding())
cwd = cwd.encode(sys.getfilesystemencoding()) if cwd else cwd
return subprocess.Popen(
cmd,
cwd=cwd,
creationflags=subprocess.CREATE_NEW_CONSOLE,
)
elif platform.system() == "Linux":
cmd = 'gnome-terminal -e "{}"'.format(self(cmd, bash=False))
return subprocess.Popen(cmd, cwd=cwd, shell=True)
else:
# cmd = command(cmd)
# subprocess.Popen(cmd, cwd=cwd, shell=True)
logger.error("Non Impliment")
return None
def exec_(self, cmd, cwd=None):
# type: (string_types, string_types) -> int
shell = True
if platform.system() == "Windows":
cmd = self(('cmd.exe /C "' + cmd + '"'))
if PY2:
cmd = cmd.encode(_encoding())
cwd = cwd.encode(_encoding())
shell = False
else:
cmd = self(cmd)
p = subprocess.Popen(
cmd,
cwd=cwd if cwd else None,
shell=shell,
)
p.wait()
return p.returncode
@staticmethod
def make_command(make_cmd, cwd):
# type: (string_types, string_types) -> string_types
if platform.system() == "Windows":
make_bat = os.path.join(cwd, "make.bat")
return make_bat + " " + make_cmd
else:
return "make " + make_cmd
commander = Commander()
|
'''
Using sqlalchemy which the necessary code to:
- Select all the actors with the first name of your choice
- Select all the actors and the films they have been in
- Select all the actors that have appeared in a category of you choice comedy
- Select all the comedic films and that and sort them by rental rate
- Using one of the statements above, add a GROUP BY
- Using on of the statements above, add a ORDER BY
'''
import sqlalchemy
from pprint import pprint
engine = sqlalchemy.create_engine('mysql+pymysql://root:'xxxyyxx'@localhost/sakila')
connection = engine.connect()
metadata = sqlalchemy.MetaData()
actor = sqlalchemy.Table('actor', metadata, autoload=True, autoload_with=engine)
film = sqlalchemy.Table('film', metadata, autoload=True, autoload_with=engine)
film_actor = sqlalchemy.Table('film_actor', metadata, autoload=True, autoload_with=engine)
category = sqlalchemy.Table('category', metadata, autoload=True, autoload_with=engine)
film_category = sqlalchemy.Table('film_category', metadata, autoload=True, autoload_with=engine)
join_statement1 = actor.join(film_actor, film_actor.columns.actor_id == actor.columns.actor_id).join(film, film.columns.film_id == film_actor.columns.film_id)
join_statement2 = actor.join(film_actor, film_actor.columns.actor_id == actor.columns.actor_id).join(film_category, film_category.columns.film_id == film_actor.columns.film_id).join(category, category.columns.category_id == film_category.columns.category_id)
join_statement3 = film.join(film_category, film_category.columns.film_id == film.columns.film_id).join(category, category.columns.category_id == film_category.columns.category_id)
query1 = sqlalchemy.select([actor]).where(actor.columns.first_name == 'JOHN')
query2 = sqlalchemy.select([actor.columns.first_name, actor.columns.last_name, film.columns.title]).select_from(join_statement1)
query3 = sqlalchemy.select([actor.columns.first_name, actor.columns.last_name, category.columns.name]).select_from(join_statement2).where(category.columns.name == 'Comedy')
query4 = sqlalchemy.select([film.columns.title, film.columns.rental_rate]).select_from(join_statement3).where(category.columns.name == 'Comedy').group_by(film.columns.film_id).order_by(sqlalchemy.asc(film.columns.rental_rate))
result_proxy = connection.execute(query4)
result_final = result_proxy.fetchall()
pprint(result_final)
|
import io
import os
from itertools import chain
from typing import List, Optional, Tuple, Union
import requests
from sending.parsers import Entry, parse_flatfile
from sending.curation_files import NewFiles, PepFiles, SubFiles, TrEMBLFiles
class NewAccessionChecker:
"""Checks secondary accessions in NewFiles.
The file types checked are NewFiles (*.new, excluding curated TrEMBL
entries), SubFiles (*.sub) and PepFiles (*.pep). For these entries a
new UniProt accession is normally created. If the primary accession is
new, none of the secondary accessions must be present in TrEMBL.
Attributes:
entries: list of Entry objects.
accessions: list of all accessions in the entries.
trembl_accessions: list of accessions in the entries which are also
in TrEMBL.
entries_with_error: list of tuples containing primary accession of
the failing entry, and a list of the erroneous accessions.
Methods:
check: runs the checking procedure.
"""
def __init__(self, files: Union[NewFiles, PepFiles, SubFiles]):
self.entries: List[Entry] = files.get_entries()
self.accessions: List[str] = files.get_accessions()
self.trembl_accessions: Optional[List[str]] = None
self.entries_with_error: List[Tuple[str, List[str]]] = []
@property
def ok(self) -> bool:
"""Signals whether errors have been detected in the files.
Returns:
bool: False if errors are found in the entries, otherwise True.
Defaults to False if the check method has not been run yet.
"""
if self.trembl_accessions is None:
return False # Default to False if check() has not been run
elif self.entries_with_error:
return False
else:
return True
def check(self) -> None:
"""Runs the checking procedure by searching a TrEMBL server for
entries by accession, and updating the trembl_accessions and
trembl_pids attributes.
"""
self.trembl_accessions = _query_trembl(self.accessions, format="list").split()
self._check_secondary_accessions()
def _check_secondary_accessions(self) -> None:
"""Checks that secondary accessions in the entries are valid.
If the primary accession is not a TrEMBL accession, none of the
secondary accessions should be present in TrEMBL either.
If errors are found, the attribute entries_with_error is updated with
a tuple of:
(primary_accession: str, secondary_trembl_accessions: List[str])
"""
for entry in self.entries:
if entry.accessions[0] in self.trembl_accessions:
continue # Primary accession is in TrEMBL which is fine
else:
secondary_accessions = entry.accessions[1:]
secondary_trembl_accessions = [
i for i in secondary_accessions if i in self.trembl_accessions
]
if secondary_trembl_accessions:
self.entries_with_error.append(
(entry.accessions[0], secondary_trembl_accessions)
)
class TrEMBLAccessionChecker:
"""Checks accessions and protein ids in curated TrEMBL entries.
Attributes:
accessions: list of accessions in curated TrEMBL entries.
pids: list of protein ids in curated TrEMBL entries.
trembl_accessions: list of accessions in original TrEMBL entries.
trembl_pids: list of EMBL protein ids in original TrEMBL entries.
Methods:
check: runs the checking procedure.
"""
def __init__(self, trembl_files: TrEMBLFiles) -> None:
self.accessions: List[str] = sorted(trembl_files.get_accessions())
self.pids: List[str] = sorted(trembl_files.get_pids())
self.trembl_accessions: List[str] = []
self.trembl_pids: List[str] = []
@property
def ok(self) -> bool:
"""Signals whether errors have been detected in the files.
Returns:
bool: False if errors are found in the entries, otherwise True.
Defaults to False if the check method has not been run yet.
"""
if self.accessions == self.trembl_accessions and self.pids == self.trembl_pids:
return True
else:
return False
def check(self) -> None:
"""Runs the checking procedure by searching a TrEMBL server for
entries by accession, and updating the trembl_accessions and
trembl_pids attributes.
"""
trembl_entries = _query_trembl(self.accessions, format="txt")
self._get_trembl_identifiers(trembl_entries)
def _get_trembl_identifiers(self, trembl_entries: str) -> None:
"""Gets accessions and protein ids from text of UniProt entries in
flat-file format.
Args:
trembl_entries: one or more UniProt entries in text format.
"""
handle = io.StringIO(trembl_entries)
entries = list(parse_flatfile(handle))
accessions = list(chain.from_iterable(x.accessions for x in entries))
pids = list(chain.from_iterable(x.pids for x in entries))
self.trembl_accessions = sorted(accessions)
self.trembl_pids = sorted(pids)
def _query_trembl(accessions: List[str], format: str) -> str:
"""Searches TrEMBL server for UniProt entries based on accession.
The server to use is set as an environment variable 'TREMBL_SERVER'.
Normally this would be the internal TrEMBL server which contains the most
up-to-date version of the database.
Args:
accessions: list of UniProt accessions to be passed as query
parameter.
format: format of matched UniProt entries (txt, fasta, xml, list are
valid formats).
Returns:
str: UniProt entries in flat file format.
"""
server = os.environ["TREMBL_SERVER"]
url = f"{server}/uniprot/?"
query = f"id:{' OR id:'.join(i for i in accessions)}"
params = {"query": query, "format": format}
uniprot_query = requests.get(url, params=params)
uniprot_query.raise_for_status()
return uniprot_query.text
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def booklist(request):
return render(request, 'booklist.html')
#return HttpResponse('<h1>Welcome to our store!</h1>') |
# Generated by Django 3.0.7 on 2020-06-28 15:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('OFFICE_PIS', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='address',
old_name='office_id',
new_name='office',
),
migrations.RenameField(
model_name='address',
old_name='staff_id',
new_name='staff',
),
migrations.RenameField(
model_name='appointment',
old_name='office_id',
new_name='office',
),
migrations.RenameField(
model_name='appointment',
old_name='post_id',
new_name='post',
),
migrations.RenameField(
model_name='darbandiinfo',
old_name='office_id',
new_name='office',
),
migrations.RenameField(
model_name='darbandiinfo',
old_name='post_id',
new_name='post',
),
migrations.RenameField(
model_name='desiredperson',
old_name='staff_id',
new_name='staff',
),
migrations.RenameField(
model_name='educationalinfo',
old_name='staff_id',
new_name='staff',
),
migrations.RenameField(
model_name='family',
old_name='staff_id',
new_name='staff',
),
migrations.RenameField(
model_name='leaveinfo',
old_name='staff_id',
new_name='staff',
),
migrations.RenameField(
model_name='punishmentinfo',
old_name='staff_id',
new_name='staff',
),
migrations.RenameField(
model_name='service',
old_name='post_id',
new_name='post',
),
migrations.RenameField(
model_name='service',
old_name='staff_id',
new_name='staff',
),
migrations.RenameField(
model_name='staff',
old_name='post_id',
new_name='post',
),
]
|
def addition_of_powers(a,b,c,d):
print(a**b+c**d)
if __name__ == "__main__":
a=int(input())
b=int(input())
c=int(input())
d=int(input())
addition_of_powers(a,b,c,d)
|
print("Please enter a number!")
first_number = float(input())
print("Please enter second number!")
second_number = float(input())
if second_number > first_number:
print("The second number is bigger!")
elif first_number < second_number:
print("The first is bigger!")
else:
print("Both numbers are equal")
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
version='0.3.0',
description='Ade, a templated file system manager',
author='Lorenzo Angeli',
name='ade',
author_email='lorenzo.angeli@gmail.com',
packages=find_packages(exclude=["test"]),
test_suite="test",
entry_points={
'console_scripts': [
'ade = ade.main:run',
],
},
install_requires=[
'argparse',
'sphinx',
'sphinx_rtd_theme'
],
)
|
from django.contrib.auth.decorators import login_required
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from course_api.blocks.api import get_blocks
def require_level(level):
"""
Decorator with argument that requires an access level of the requesting
user. If the requirement is not satisfied, returns an
HttpResponseForbidden (403).
Assumes that request is in args[0].
Assumes that course_id is in kwargs['course_id'].
`level` is in ['instructor', 'staff']
if `level` is 'staff', instructors will also be allowed, even
if they are not in the staff group.
"""
if level not in ['instructor', 'staff']:
raise ValueError("unrecognized level '{}'".format(level))
def decorator(func): # pylint: disable=missing-docstring
def wrapped(*args, **kwargs): # pylint: disable=missing-docstring
request = args[0]
course = get_course_by_id(CourseKey.from_string(kwargs['course_id']))
if has_access(request.user, level, course):
return func(*args, **kwargs)
else:
return HttpResponseForbidden()
return wrapped
return decorator
#return real values of select fields' input
def return_select_value(key,value,kwarg):
for indice in kwarg:
if key == indice.get('name'):
if indice.get('type') == 'select':
if indice.get("options") is not None:
if len(indice.get("options")) > 0:
for _row in indice.get("options"):
if str(value) == str(_row.get('value')):
value = _row.get('name')
return value
|
"""Board URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('',views.BoardListView.as_view(), name = 'home'),
path('boards/<int:board_id>/', views.TopicListView.as_view(), name = 'board_topics'),
path('boards/<int:board_id>/new', views.new_topic, name = 'new_topic'),
path('boards/<int:board_id>/topics/<int:topic_id>/',
views.PostListView.as_view(), name = 'topic_posts'),
path('boards/<int:board_id>/topics/<int:topic_id>/reply/',
views.reply_topic, name='reply_topic'),
path('boards/<int:board_id>/topics/<int:topic_id>/posts/<int:post_id>/edit/',
views.PostUpdateView.as_view(), name='edit_post'),
]
|
import sys, os, time, random
import serial
sys.path.append('..')
import ts_usb
if os.name == 'nt':
import dev_tools
TIMEOUT = 1
MAX_DELAY = 10
PORT = sys.argv[1]
dev = None
def open_port():
return serial.Serial(PORT, timeout=TIMEOUT, writeTimeout=TIMEOUT)
def send_req(com, req):
try:
com.write(ts_usb.cmd_serialize(req))
except:
print >> sys.stderr, '\nwrite failed'
return False
try:
resp = com.read(ts_usb.PKT_LEN)
if len(resp) != ts_usb.PKT_LEN:
print >> sys.stderr, '\nread', len(resp), 'bytes'
return False
if not ts_usb.chk_cmd_response(resp, req):
print >> sys.stderr, '\nbad response'
return False
r = ts_usb.cmd_deserialize(resp)
if r.err != 0:
print >> sys.stderr, '\nerr', r.err
return False
if (r.status & ts_usb.ERR_ANY) != 0:
print >> sys.stderr, '\nstatus', r.status
return False
return True
except:
print >> sys.stderr, '\nread failed'
return False
def reset_port(com):
req = ts_usb.cmd_request(ts_usb.RESET, False, random.randint(0, 255), '', need_resp=True)
return send_req(com, req)
def reconnect_port(com):
delay = .1
while True:
if com.isOpen():
com.close()
time.sleep(delay)
if dev is not None:
try:
dev_tools.dev_touch(dev)
time.sleep(delay)
except:
pass
try:
com = open_port()
w = com.inWaiting()
if w:
com.read(w)
return com
except:
delay = min(MAX_DELAY, delay*2)
print >> sys.stderr, 'reopen failed'
com = open_port()
if os.name == 'nt':
dev = dev_tools.dev_find_port(PORT)
assert dev is not None
while True:
sn = 0
if reset_port(com):
while True:
sn = (sn + 1) & 0xff
req = ts_usb.cmd_request(ts_usb.INFO, True, sn, '', need_resp=True)
if not send_req(com, req):
break
print >> sys.stderr, '*',
else:
print >> sys.stderr, 'reset failed'
com = reconnect_port(com)
|
__author__ = 'kayzhao'
from pymongo import MongoClient
def getDBTypes():
from utils.typeutils import compare_types
print("typing")
client = MongoClient('mongodb://zkj1234:zkj1234@192.168.1.113:27017/disease')
db = client.disease.do
docs = []
for n, doc in enumerate(db.find()):
docs.append(doc)
if n > 100:
break
compare_types(docs)
if __name__ == "__main__":
getDBTypes() |
#! --*--coding:utf-8--*--
#正则表达式 regular expression
import re
# \d \转义 d data
string = '007899'
st = re.match('00\d',string).span()
print(string[st[0]:st[1]])
# span --- 跨度
print(re.match('www','www.baidu.com').span())
# math默认从其实位置开始匹配
print(re.match("com",'www.baidu.com'))
# group
line = "Cats are smarter than dogs"
print(re.match(".*",line).span())
#print(line[0:26])
# \d --- 数字
# .----任意一个字符
# *不定长的数量
data="98787d867543"
print(re.match('\d*',data).span())
line = "Cats are smarter than dogs"
matchRes = re.match("(.*) are (.*) (.*)",line)
print(matchRes.group())
print(matchRes.group(1))
print(matchRes.group(2))
print(matchRes.group(3))
# 贪婪模式 vs 非贪婪模式
matchRes = re.match("(.*) are (.*?) (.*)",line)
print(matchRes.group())
print(matchRes.group(1))
print(matchRes.group(2))
print(matchRes.group(3))
# re.search
print(re.match("com",'www.baidu.com')) # 默认从起始位置匹配
print(re.search("com",'www.baidu.com')) # 不从起始位置匹配
# regular
# 00\d* \d -- 一个数字
# \w ---- 一个字母或数字
print(re.match("00\w",'007'))
print(re.match("\w*","hello world").span())
print(re.match("\w*.","hello world").span())
line = "Cats are smarter than dogs"
res = re.match("(\w*) (\w*) ",line)
print(res.group())
print(res.group(1))
print(res.group(2))
# .
# \d* \d{3}
print(re.match("\d{3}","00011100203").span())
print(re.match("\d{3}","aaa11100203"))
print(re.search("\d{3}","aaa11100203").span())
# \s 一个空格
print(re.search("\s","aaaa aaaa").span())
print(re.search("\s","aaaa aaaa").span()) |
import functools
import sys
n = int(sys.argv[1])
def factorial(n):
if n == 0:
return 1
else:
#print(range(1, n+1))
return functools.reduce(lambda x, y: x * y, range(1, n+1))
print(f"Factorial of ({n}) is {factorial(n)}")
|
import re
import numpy as np
class Label:
# Constants
SPACE_TOKEN = '<space>'
SPACE_INDEX = 0
FIRST_INDEX = ord('a') - 1 # 0 is reserved to space
def __init__(self, transcription: str):
transcription = re.sub('[;:!@#$?.,_\'\"\-]', '', transcription)
self.__text: str = transcription
# Delete blanks at the beginning and the end of the transcription, transform to lowercase,
# delete numbers in the beginning, etc.
self.__targets = (' '.join(transcription.strip().lower().split(' ')[2:]).replace('.', '')).replace(' ', ' ').split(' ')
self.__indices = None
self.__indices = self.toIndex()
if True in (self.__indices < 0):
print('Character not supported')
def getTranscription(self) -> str:
return self.__text
def toIndex(self) -> np.ndarray:
if self.__indices is None:
# Adding blank label
index = np.hstack([self.SPACE_TOKEN if x == '' else list(x) for x in self.__targets])
# Transform char into index
index = np.asarray([self.SPACE_INDEX if x == '<space>' else ord(x) - self.FIRST_INDEX for x in index])
return index
else:
return self.__indices
def __str__(self):
return str(self.__indices)
@staticmethod
def fromFile(file_name: str):
with open(file_name, 'r') as f:
transcription = f.readlines()[0] # This method assumes that the transcription is in the first line
return Label(transcription) # Create Label class from transcription |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.