index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
3,500 | 4f7b689c06383673b510092932b051c644306b84 | # -*- coding:utf-8 -*-
from odoo import api, fields, _, models
Type_employee = [('j', 'Journalier'), ('m', 'Mensuel')]
class HrCnpsSettings(models.Model):
_name = "hr.cnps.setting"
_description = "settings of CNPS"
name = fields.Char("Libellé", required=True)
active = fields.Boolean("Actif", default=True)
sequence = fields.Integer('Sequence', default=10)
amount_min = fields.Float("Montant Min")
amount_max = fields.Float('Montant Max')
type = fields.Selection(Type_employee, 'Type', required=False, default=False)
class HrCnpsCotisationLineTemplate(models.Model):
_name = "hr.cnps.cotisation.line.template"
_description = "hr cnps cotisation line template"
name = fields.Char("Designation", required=True)
company_id = fields.Many2one("res.company", "Société", required=True, default=lambda self: self.env.user.company_id.id)
taux = fields.Float("Taux")
sequence = fields.Integer("Sequence", default=10)
active = fields.Boolean("Actif", default=True)
type = fields.Selection([('cnps', 'Régime de retraite'), ('other', 'Autres régimes')], 'Type')
account_id = fields.Many2one('account.account', 'Compte comptable associé', required=False, domain="[('company_id', '=', company_id)]") |
3,501 | f0f4573808253ca4bff808104afa9f350d305a9c | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This script is a wrapper for JSON primitives, such as validation.
# Using routines of this module permits us to replace the underlying
# implementation with a better one without disrupting client code.
#
# In particular, at the time of this writing, there weren't really great
# json validation packages available for python. We initially settled
# on validictory, but it has a number of shortcomings, such as:
# * format error diagnostic message isn't always helpful for diagnosis
# * doesn't support references
# * doesn't support application of defaults
# * doesn't support dependencies
#
# TODO: offer a combined json parsing/validation function that applies
# defaults from the schema
# TODO: duplicate of 'validate', 'ValidationError', 'loadJSONValueFromFile'
# in swarming.hypersearch.utils -- will want to remove that later
import json
import math
import os
import validictory
class ValidationError(validictory.ValidationError):
pass
class NaNInvalidator(validictory.SchemaValidator):
""" validictory.SchemaValidator subclass to not accept NaN values as numbers.
Usage:
validate(value, schemaDict, validator_cls=NaNInvalidator)
"""
def validate_type_number(self, val):
return not math.isnan(val) \
and super(NaNInvalidator, self).validate_type_number(val)
def validate(value, **kwds):
""" Validate a python value against json schema:
validate(value, schemaPath)
validate(value, schemaDict)
value: python object to validate against the schema
The json schema may be specified either as a path of the file containing
the json schema or as a python dictionary using one of the
following keywords as arguments:
schemaPath: Path of file containing the json schema object.
schemaDict: Python dictionary containing the json schema object
Returns: nothing
Raises:
ValidationError when value fails json validation
"""
assert len(kwds.keys()) >= 1
assert 'schemaPath' in kwds or 'schemaDict' in kwds
schemaDict = None
if 'schemaPath' in kwds:
schemaPath = kwds.pop('schemaPath')
schemaDict = loadJsonValueFromFile(schemaPath)
elif 'schemaDict' in kwds:
schemaDict = kwds.pop('schemaDict')
try:
validictory.validate(value, schemaDict, **kwds)
except validictory.ValidationError as e:
raise ValidationError(e)
def loadJsonValueFromFile(inputFilePath):
""" Loads a json value from a file and converts it to the corresponding python
object.
inputFilePath:
Path of the json file;
Returns:
python value that represents the loaded json value
"""
with open(inputFilePath) as fileObj:
value = json.load(fileObj)
return value
def test():
"""
"""
import sys
schemaDict = {
"description":"JSON schema for jsonhelpers.py test code",
"type":"object",
"additionalProperties":False,
"properties":{
"myBool":{
"description":"Some boolean property",
"required":True,
"type":"boolean"
}
}
}
d = {
'myBool': False
}
print "Validating schemaDict method in positive test..."
validate(d, schemaDict=schemaDict)
print "ok\n"
print "Validating schemaDict method in negative test..."
try:
validate({}, schemaDict=schemaDict)
except ValidationError:
print "ok\n"
else:
print "FAILED\n"
sys.exit(1)
schemaPath = os.path.join(os.path.dirname(__file__), "testSchema.json")
print "Validating schemaPath method in positive test using %s..." % \
(os.path.abspath(schemaPath),)
validate(d, schemaPath=schemaPath)
print "ok\n"
print "Validating schemaPath method in negative test using %s..." % \
(os.path.abspath(schemaPath),)
try:
validate({}, schemaPath=schemaPath)
except ValidationError:
print "ok\n"
else:
print "FAILED\n"
sys.exit(1)
return
if __name__ == "__main__":
test()
|
3,502 | 610610e7e49fc98927a4894efe62686e26e0cb83 | from pythonforandroid.recipe import CompiledComponentsPythonRecipe
from multiprocessing import cpu_count
from os.path import join
class NumpyRecipe(CompiledComponentsPythonRecipe):
version = '1.18.1'
url = 'https://pypi.python.org/packages/source/n/numpy/numpy-{version}.zip'
site_packages_name = 'numpy'
depends = ['setuptools', 'cython']
patches = [
join('patches', 'add_libm_explicitly_to_build.patch'),
join('patches', 'do_not_use_system_libs.patch'),
join('patches', 'remove_unittest_call.patch'),
]
call_hostpython_via_targetpython = False
def build_compiled_components(self, arch):
self.setup_extra_args = ['-j', str(cpu_count())]
super().build_compiled_components(arch)
self.setup_extra_args = []
def rebuild_compiled_components(self, arch, env):
self.setup_extra_args = ['-j', str(cpu_count())]
super().rebuild_compiled_components(arch, env)
self.setup_extra_args = []
recipe = NumpyRecipe()
|
3,503 | 600b49c7884f8b6e3960549702a52deb20089f5a | import time
import os
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from app.wechat_subscription.object_page.home_page import HomePage
from conf.decorator import teststep, teststeps
from conf.base_page import BasePage
from selenium.webdriver.common.by import By
from utils.toast_find import Toast
class LoginPage(BasePage):
"""登录界面"""
@teststeps
def __init__(self):
self.home = HomePage()
self.toast = Toast()
@teststeps
def wait_check_wx(self):
"""以微信主界面“tab:微信”的text为依据"""
try:
main_ele = (By.XPATH, "//android.widget.TextView[contains(@text,'微信')]")
WebDriverWait(self.driver, 15, 0.5).until(EC.presence_of_element_located(main_ele))
return True
except :
return False
@teststeps
def wait_check_test1(self):
"""以微信主界面“tab:微信”的text为依据"""
try:
main_ele = (By.XPATH, "//android.widget.TextView[contains(@text,'测试1')]")
WebDriverWait(self.driver, 15, 0.5).until(EC.presence_of_element_located(main_ele))
return True
except:
return False
@teststeps
def wait_check_tbs(self):
"""以tbs页面 标题作为检查点"""
try:
tbs_title = (By.XPATH, "//android.widget.TextView[contains(@text,'tbs调试页面')]")
WebDriverWait(self.driver, 15, 0.5).until(EC.presence_of_element_located(tbs_title))
return True
except:
return False
@teststeps
def wait_check_delete_x5core(self):
"""删除内核弹框检查"""
try:
tbs_title = (By.XPATH, "//android.widget.TextView[contains(@text,'删除内核')]")
WebDriverWait(self.driver, 15, 0.5).until(EC.presence_of_element_located(tbs_title))
return True
except:
return False
@teststeps
def wait_check_find_exp(self):
"""搜索页面检查"""
try:
tbs_title = (By.ID, "com.tencent.mm:id/ht")
WebDriverWait(self.driver, 15, 0.5).until(EC.presence_of_element_located(tbs_title))
return True
except:
return False
@teststep
def launch_app(self):
"""Start on the device the application specified in the desired capabilities.
"""
os.system ("adb shell am start -n com.tencent.mm/com.tencent.mm.ui.LauncherUI/")
time.sleep (5)
@teststep
def close_app(self):
"""Close on the device the application specified in the desired capabilities.
"""
os.system ('adb shell am force-stop com.tencent.mm')
@teststeps
def app_status(self):
"""判断应用当前状态"""
if self.wait_check_wx(): # 在 微信 界面
print('微信主界面:')
# self.clear_tbs()
elif self.home.wait_check_parent_title(): # 家长端 主界面
print('家长端 主界面:')
else:
print('其他情况:')
self.close_app()
self.launch_app()
if self.wait_check_wx(): # 在 微信 主界面
print('微信主界面:')
@teststep
def chat_test1_click(self):
"""点击置顶好友test1"""
self.driver.find_elements_by_id ("com.tencent.mm:id/np")[0].click()
@teststep
def tbs_link_click(self):
"""点击test1发送的tbs链接"""
self.driver.find_element_by_id ("com.tencent.mm:id/lz").click ()
@teststep
def click_clear_tbs_btn(self):
"""点击清除tbs内核选项"""
self.driver.find_element_by_xpath ("//android.widget.TextView[contains(@text,'清除TBS内核')]").click()
@teststep
def confirm_delete(self):
"""确认清除"""
self.driver.find_element_by_id ("android:id/button1").click()
time.sleep (2)
@teststep
def back_to_test1(self):
"""点击返回按钮(X) 返回到聊天框"""
self.driver.find_element_by_id ("com.tencent.mm:id/j7").click()
@teststep
def back_to_wx_home(self):
self.driver.find_element_by_id ("com.tencent.mm:id/iz").click()
@teststep
def clear_tbs(self):
"""进入清除内核页面,并返回主页面"""
self.chat_test1_click()
if self.wait_check_test1():
self.tbs_link_click() #点击链接
if self.wait_check_tbs():
self.click_clear_tbs_btn() #点击清除tbs
if self.wait_check_delete_x5core():
self.confirm_delete() #确认清除
self.back_to_test1() # 退出tbs页面
if self.wait_check_test1():
self.back_to_wx_home() #退出聊天页面
if self.wait_check_wx():
print("已清除TBS内核\n")
@teststeps
def clear_tbs_to_retry(self):
"""内核清除"""
if "QQ浏览器X5内核提供技术支持" in self.home.driver.page_source:
print("X5内核已恢复,需重新清除TBS")
self.home.back_to_club_home() # 返回
if self.home.wait_check_parent_title(): # 在线助教家长公众号主界面检查
self.home.back_to_find() # 退出公众号
self.home.back_to_wx_home() # 退出搜搜页面
self.clear_tbs() # 清除内核
self.home.click_sub() # 重新进入公众号
else:
print("X5内核未恢复,但依然未发现元素")
self.home.back_to_club_home()
@teststeps
def check_login_error_info(self, toast_info):
"""检查登录toast信息"""
if toast_info == '':
pass
elif self.toast.find_toast_by_xpath(toast_info):
print(toast_info)
else:
print("未发现错误提示信息",toast_info)
|
3,504 | b0bc55ab05d49605e2f42ea036f8405727c468d2 | import pandas
from sklearn.externals import joblib
import TrainTestProcesser
from sklearn.ensemble import RandomForestClassifier
from Select_OF_File import get_subdir
import matplotlib.pyplot as mp
import sklearn.model_selection as ms
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import numpy as np
import itertools
def main():
#获取数据集
#不使用第一列作为行索引
data_set = pandas.read_csv("dataset.csv",index_col=False,encoding='gbk')
print("数据集的shape:",data_set.shape)
#将数据集分为特征x和标签y
dnumpy_x,dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)
#使用StratifiedKFold将数据集分为训练集和测试集
folds= TrainTestProcesser.split_dnumpy_train_test(dnumpy_x, dnumpy_y)
#创建模型
model=RandomForestClassifier(n_estimators=23)
#使用kfol交叉验证
TrainTestProcesser.apply_SKfold(model, folds)
#训练模型
TrainTestProcesser.train_model(model, dnumpy_x, dnumpy_y)
#保存模型以备将来使用
joblib.dump(model,"RFC_model.plk")
def getconfusion_matrix():
mp.rcParams['font.family'] = ['sans-serif']
mp.rcParams['font.sans-serif'] = ['SimHei']
classes=get_subdir("音频文件")
data_set = pandas.read_csv("dataset.csv",index_col=False,encoding='gbk')
dnumpy_x, dnumpy_y = TrainTestProcesser.split_dframe_x_y(data_set)
train_x, test_x, train_y, test_y = ms.train_test_split(dnumpy_x, dnumpy_y, test_size=0.25, random_state=7)
model=joblib.load("RFC_model.plk")
pred_test_y = model.predict(test_x)
#混淆矩阵
cm=confusion_matrix(test_y, pred_test_y)
# 获取分类报告
r = classification_report(test_y, pred_test_y)
print('分类报告为:', r, sep='\n')
mp.figure()
plot_confusion_matrix(cm, classes=classes, normalize=True,
title='随机森林分类器混淆矩阵')
def plot_confusion_matrix(cm, classes,normalize=False,title='Confusion matrix',
cmap=mp.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("混淆矩阵归一化")
else:
print('混淆矩阵未归一化')
print("混淆矩阵为:",cm)
mp.imshow(cm, interpolation='nearest', cmap=cmap)
mp.title(title)
mp.colorbar()
tick_marks = np.arange(len(classes))
mp.xticks(tick_marks, classes, rotation=45)
mp.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
mp.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
mp.tight_layout()
mp.ylabel('True label')
mp.xlabel('Predicted label')
mp.savefig('confusion_matrix_RFC.png', format='png')
mp.show()
if __name__ == "__main__":
main()
getconfusion_matrix() |
3,505 | 8f971ee3b98691a887ee0632afd613bbf4f19aa0 | import pytest
from homeworks.homework6.oop_2 import (
DeadLineError,
Homework,
HomeworkResult,
Student,
Teacher,
)
def test_creating_objects():
teacher = Teacher("Daniil", "Shadrin")
student = Student("Roman", "Petrov")
homework = teacher.create_homework("Learn OOP", 1)
homework_result = student.do_homework(homework, "I have done this hw")
assert isinstance(teacher, Teacher)
assert isinstance(student, Student)
assert isinstance(homework, Homework)
assert isinstance(homework_result, HomeworkResult)
def test_do_homework_exception():
teacher = Teacher("Daniil", "Shadrin")
student = Student("Lev", "Sokolov")
homework = teacher.create_homework("Learn OOP", 0)
with pytest.raises(DeadLineError, match=r"You are late"):
student.do_homework(homework, "I have done this hw")
def test_creating_and_resetting_homework_results_by_teacher():
teacher = Teacher("Daniil", "Shadrin")
student = Student("Roman", "Petrov")
homework_1 = teacher.create_homework("Learn OOP", 1)
homework_1_result = student.do_homework(homework_1, "I have done this hw")
assert teacher.check_homework(homework_1_result) is True
assert homework_1_result in teacher.homework_done[homework_1]
homework_2 = teacher.create_homework("homework 2", 1)
homework_2_result = student.do_homework(homework_2, "zero")
assert teacher.check_homework(homework_2_result) is False
assert teacher.homework_done.get(homework_2) is None
homework_3 = teacher.create_homework("homework 3", 1)
homework_3_result = student.do_homework(homework_3, "I have done this hw")
assert teacher.check_homework(homework_3_result) is True
assert homework_3_result in teacher.homework_done.get(homework_3)
assert len(teacher.homework_done) == 2
Teacher.reset_results(homework_3)
assert len(teacher.homework_done) == 1
Teacher.reset_results()
assert len(teacher.homework_done) == 0
|
3,506 | bc32518e5e37d4055f1bf5115953948a2bb24ba6 | import sys
from reportlab.graphics.barcode import code39
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import mm
from reportlab.pdfgen import canvas
from parseAccessionNumbers import parseFile
def main():
if len(sys.argv) <= 1:
print "No filepath argument passed."
return
c = canvas.Canvas("barcode_example.pdf", pagesize=letter)
accessionNumberList = parseFile(sys.argv[1])
# Page specs
totalHeight = 265 * mm
xColumnMargin = 70 * mm
yBarcodeMargin = 20 * mm
# Specs for lower right status info
xPageStatus = 165 * mm
yPageStatus = 17 * mm
yBarcodeStatus = 12 * mm
# Initial values
x = 1 * mm
y = totalHeight
x1 = 6.4 * mm
# Initialize barcode counts and page counts
currentBarcodeTotalCount = 0
currentPageCount = 0
currentPage = 1
totalPages = int(len(accessionNumberList) / 32)
if len(accessionNumberList) % 32 > 0:
totalPages += 1
for accessionNumber in accessionNumberList:
if currentBarcodeTotalCount % 32 == 0 and currentBarcodeTotalCount != 0:
c.drawString(xPageStatus, yPageStatus, "Page " + str(currentPage) + " of " + str(totalPages))
c.drawString(xPageStatus, yBarcodeStatus, str(currentPageCount) + " barcodes")
c.showPage()
# Reset values for a new page
x = 1 * mm
y = totalHeight
x1 = 6.4 * mm
currentPageCount = 0
# Increase to next page
currentPage += 1
currentBarcodeTotalCount += 1
currentPageCount += 1
barcode = code39.Extended39(accessionNumber)
# Draw the barcode on the canvas
barcode.drawOn(c, x, y)
x1 = x + 6.4 * mm
y -= 5 * mm
# Draw the actual string
c.drawString(x1, y, accessionNumber)
x = x
y -= yBarcodeMargin
if int(y) < 20:
x += xColumnMargin
y = totalHeight
c.drawString(xPageStatus, yPageStatus, "Page " + str(currentPage) + " of " + str(totalPages))
c.drawString(xPageStatus, yBarcodeStatus, str(currentPageCount) + " barcodes")
c.showPage()
c.save()
print "File successfully created"
main()
|
3,507 | c199b2f87b7a4ac820001dab13f24fdd287a1575 | # https://py.checkio.org/blog/design-patterns-part-1/
class ImageOpener(object):
@staticmethod
def open(filename):
raise NotImplementedError()
class PNGImageOpener(ImageOpener):
@staticmethod
def open(filename):
print('PNG: open with Paint')
class JPEGImageOpener(ImageOpener):
@staticmethod
def open(filename):
print('JPG/JPEG: open with ImageViewer')
class SVGImageOpener(ImageOpener):
@staticmethod
def open(filename):
print('SVG: open with Illustrator')
class UnknownImageOpener(ImageOpener):
@staticmethod
def open(filename):
print("You don't hame program for %s extension" %filename.split('.')[-1].upper())
class Image(object):
@classmethod
def open_file(cls, filename):
ext = filename.split('.')[-1]
if ext == 'png':
opener = PNGImageOpener
elif ext in ('jpg', 'jpeg'):
opener = JPEGImageOpener
elif ext == 'svg':
opener = SVGImageOpener
else:
opener = UnknownImageOpener
bytearray = opener.open(filename)
return cls(bytearray, filename)
def __init__(self, byterange, filename):
self._byterange = byterange
self._filename = filename
Image.open_file('picture.png')
Image.open_file('picture.jpg')
Image.open_file('picture.svg')
Image.open_file('picture.raw')
|
3,508 | c54a046ebde1be94ec87061b4fba9e22bf0f4d0a | from e19_pizza import *
print("\n----------导入模块中的所有函数----------")
# 由于导入了每个函数,可通过名称来调用每个函数,无需使用句点表示法
make_pizza(16, 'pepperoni')
make_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')
# 注意:
# 使用并非自己编写的大型模块时,最好不要采用这种导入方法,如果模块中
# 有函数的名称与你的项目中使用的名称相同,可能导致意想不到的结果。
# Python可能遇到多个名称相同的函数或变量,进而覆盖函数,而不是分别导
# 入所有的函数。
# 最佳做法:
# 导入需要使用的函数,或者导入整个模块并使用句点表示法。
|
3,509 | 3f86227afd60be560ac3d4ce2bee1f6cf74a744d | from flask_admin.contrib.sqla import ModelView
from flask_admin import Admin
from flask import abort
import flask_login
import logging
from .models import User, sendUserMail, db as userdb
from .box_models import Box, Image, db as boxdb
from .box_queue import BoxQueue
logger = logging.getLogger('labboxmain')
class AuthModel(ModelView):
def is_accessible(self):
if not flask_login.current_user.is_authenticated:
abort(400, "Permission Denied")
return False
now_user = flask_login.current_user
if now_user.groupid != 0:
abort(400, "Permission Denied")
return False
logger.warning('[Admin] ' + now_user.name)
return True
class UserModel(AuthModel):
column_list = ["id", "name", "disable", "groupid", "email", "passtime", "quota", "use_quota", "password"]
column_descriptions = {
'password': "Password(Left empty for forgot or newly create, It will send email to whom)",
'passtime': "The time for manually changing password(0 = never)"
}
def on_model_change(self, form, model, is_created):
if is_created:
logger.warning("[Admin] Create for " + model.email)
sendUserMail(model, "register")
return
if not model.password:
logger.warning("[Admin] Reset Password and sent to " + model.email)
sendUserMail(model, "forgetpass")
return
if not model.password.startswith("$6$"):
logger.warning("[Admin] Reset Password " + model.email)
model.setPassword(model.password)
admin = Admin()
admin.add_view(AuthModel(Box, boxdb.session))
admin.add_view(AuthModel(Image, boxdb.session))
admin.add_view(UserModel(User, userdb.session))
admin.add_view(AuthModel(BoxQueue, boxdb.session))
|
3,510 | 5f00cd446b219203c401799ba7b6205c7f1f8e9f | # -*- coding: utf-8 -*-
from numpy import *
def loadDataSet(fileName, delim = '\t'):
fr = open(fileName)
stringArr = [line.strip().split(delim) for line in fr.readlines()]
datArr = [map(float,line) for line in stringArr]
return mat(datArr)
def pca(dataMat, topNfeat = 9999999):
meanVals = mean(dataMat, axis = 0)
# 首先去平均值
meanRemoved = dataMat - meanVals
covMat = cov(meanRemoved, rowvar =False)
eigVals, eigVects = linalg.eig(mat(covMat))
eigValInd = argsort(eigVals)
# 从小到大对N个值排序
eigValInd = eigValInd[: -(topNfeat+1) : -1]
redEigVects = eigVects[:, eigValInd]
# 将数据切换到新的空间
lowDDataMat = meanRemoved * redEigVects
reconMat = (lowDDataMat * redEigVects.T) + meanVals
return lowDDataMat, reconMat
def replaceNanWithMean():
dataMat = loadDataSet('secom.data.txt', '')
numFeat = shape(dataMat)[1]
for i in range(numFeat):
# 计算所有非 NaN 的平均值
meanVal = mean(dataMat[nonzero(~isnan(dataMat[:,i].A))[0],i])
# 将所有 NaN 置为平均值
dataMat[nonzero(isnan(dataMat[:,i].A))[0], i] = meanVal
return dataMat
|
3,511 | 79522db1316e4a25ab5a598ee035cf9b9a9a9411 | import torch
from torch import nn
from torch.nn import functional as F
import torchvision
import math
from torchvision.models.resnet import Bottleneck
from dataset import load_image, load_text, ALPHABET, MAX_LEN
class ResNetFeatures(nn.Module):
def __init__(self, pretrained=True):
super().__init__()
# Input images x of handwritten text-lines, which might have
# arbitrary lengths, are first processed by a Convolutional
# Neural Network. We obtain an intermediate visual feature
# representation Fc of size f. We use the ResNet50 [26] as
# our backbone convolutional architecture.
# Such visual feature representation has a contextualized global view of the
# whole input image while remaining compact.
self.resnet = torchvision.models.resnet50(pretrained=pretrained)
# self.resnet.inplanes = 512
# self.layer3 = self.resnet._make_layer(Bottleneck, 256, 6, stride=1, dilate=False)
def forward(self, x):
# From https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
x = self.resnet.conv1(x.repeat(1, 3, 1, 1))
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
return x
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len, dropout=0.1):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerHTR(nn.Module):
def __init__(self, alphabet, freeze_resnet=False, use_encoder=False, dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1, text_len=100):
super(TransformerHTR, self).__init__()
# (Visual Feature) Encoder
self.resnet = ResNetFeatures()
if freeze_resnet:
print('Freezing-resnet')
for param in self.resnet.resnet.parameters():
param.requires_grad = False
self.fc = nn.Linear(f*4, f)
self.pe_encode = PositionalEncoding(f, 140, dropout)
self.fc_bar = nn.Linear(f, f)
if use_encoder:
print('Transformer Encoder')
encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f, dropout)
self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers)
else:
print('Identity encoder')
self.transformer_encoder = nn.Identity()
self.layer_norm = nn.LayerNorm(f)
print('freeze-resnet', freeze_resnet)
print('use_encoder', use_encoder)
# (Text Transcriber) Decoder
self.ebl = nn.Embedding(dict_size, f)
self.pe_decode = PositionalEncoding(f, text_len, dropout)
decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=num_heads, dim_feedforward=f, dropout=dropout)
self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_layers)
self.linear = nn.Linear(f, dict_size)
# General
self.f = f
self.text_len = text_len
self.alphabet = alphabet
self.inv_alphabet = {j: i for i, j in alphabet.items()}
self.init_weights()
def init_weights(self):
initrange = 0.1
self.fc.bias.data.zero_()
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc_bar.bias.data.zero_()
self.fc_bar.weight.data.uniform_(-initrange, initrange)
self.ebl.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.zero_()
self.linear.weight.data.uniform_(-initrange, initrange)
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def encode(self, x):
x = self.resnet(x)
b, f, h, w = x.size()
x = x.view(b, f*h, w).permute(0, 2, 1)
# x = F.relu(self.fc(x))
x = self.fc(x)
x = self.pe_encode(x.permute(1, 0, 2))
# x = F.relu(self.fc_bar(x))
x = self.fc_bar(x)
x = self.transformer_encoder(x)
# x = self.layer_norm(x)
return x
def decode(self, x, y):
kpm = (x == self.alphabet['<P>']).transpose(1, 0)
x = self.ebl(x)*math.sqrt(self.f)
x = self.pe_decode(x)
dim = x.size()[0]
a = self.generate_square_subsequent_mask(dim).to(x.device)
x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)
return self.linear(x).permute(1, 0, 2)
def forward(self, x, y):
return self.decode(x, self.encode(y))
@torch.no_grad()
def to_text_(self, x, bulk=True):
txt = []
p = {self.alphabet["<E>"]}
s = {self.alphabet["<S>"], self.alphabet["<P>"]}
for idx in x:
if not bulk:
if idx in p:
break
if idx in s:
continue
txt.append(self.inv_alphabet[idx])
return (txt if bulk else "".join(txt))
@torch.no_grad()
def to_text(self, x, bulk=False):
x = x.cpu().numpy()
if len(x.shape) == 2:
return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]
else:
return self.to_text_(x, bulk=bulk)
@torch.no_grad()
def gen(self, y, bulk=False):
y = self.encode(y)
output_tokens = torch.full((y.size()[1], self.text_len), self.alphabet["<P>"]).long()
output_tokens[:, 0] = self.alphabet["<S>"]
output_tokens = output_tokens.to(y.device)
for j in range(1, self.text_len):
x = output_tokens[:, :j].permute(1, 0)
x = self.decode(x, y)
a = torch.argmax(x, dim=-1)
output_tokens[:, j] = a[:,-1]
if bulk:
return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens
else:
return self.to_text(output_tokens)
# DEBUG
import os
import torchvision
import numpy as np
from torchvision.transforms.functional import resize, pil_to_tensor
import PIL
def load_batch_image(max_img=2):
# Each batch should have
return torch.cat([load_image(os.path.join('debug-data', f"{i}.png")) for i in range(1, max_img+1)], dim=0).unsqueeze(1)
character_dict = dict()
def get(x):
a = character_dict.get(x, None)
if a is None:
idx = len(character_dict)
character_dict[x] = idx
return idx
else:
return a
TXT = ["A|MOVE|to|stop|Mr.|Gaitskell|from", "nominating|any|more|Labour|life|Peers"]
def load_text_tensor(txt):
return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)
def load_batch_text():
return torch.cat([load_text_tensor(TXT[i]) for i in range(2)], dim=1)
if __name__ == "__main__":
# load two images
transformer = TransformerHTR(ALPHABET, text_len=MAX_LEN)
bt = load_batch_text()
print(bt.size())
b = transformer(bt[0:transformer.text_len, :], load_batch_image())
criterion = nn.CrossEntropyLoss()
loss = 0
trgt = bt[1:, :]
for i in range(trgt.size()[1]):
loss += criterion(b[i], trgt[:, i])
loss.backward()
out = transformer.gen(load_batch_image())
print(out) |
3,512 | 7301a521586049ebb5e8e49b604cc96e3acc1fe9 | import os, pygame
import sys
from os import path
from random import choice
WIDTH = 1000
HEIGHT = 800
FPS = 60
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
GRAY80 = (204, 204, 204)
GRAY = (26, 26, 26)
screen = pygame.display.set_mode((1000, 800))
game_folder = os.path.dirname(__file__)
img_folder = os.path.join(game_folder, "img")
def draw_text(surf, text, size, x, y):
font_name = pygame.font.match_font('OCR A Extended')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surf.blit(text_surface, text_rect)
def button(msg,x,y,w,h,ic,ac,action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
print(click)
if x+w > mouse[0] > x and y+h > mouse[1] > y:
pygame.draw.rect(screen, ac,(x,y,w,h))
if click[0] == 1 and action != None:
if action == quit:
pygame.quit()
quit()
else:
pygame.draw.rect(screen, ic,(x,y,w,h))
def main():
# Initialise screen
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((1000, 800))
pygame.display.set_caption('Credits')
# Fill background
background = pygame.image.load(os.path.join(img_folder, "STARS1.jpg")).convert_alpha()
clock = pygame.time.Clock()
start_ticks=pygame.time.get_ticks()
screen.blit(background, (0, 0))
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
quit()
screen.blit(background, (0, 0))
pygame.draw.rect(screen, GRAY,(400,650,190,60))
draw_text(screen, "Credits", 60, 500, 100)
draw_text(screen, "Vincent", 30, 500, 250)
draw_text(screen, "Chevery", 30, 500, 330)
draw_text(screen, "Charlie", 30, 500, 410)
draw_text(screen, "Julian", 30, 500, 490)
draw_text(screen, "Sheriyar", 30, 500, 570)
draw_text(screen, "Julian", 30, 500, 650)
mouse = pygame.mouse.get_pos()
if 400+190 > mouse[0] > 400 and 650+60 > mouse[1] > 650:
pygame.draw.rect(screen, GRAY80,(400,650,190,60))
else:
pygame.draw.rect(screen, GRAY,(400,650,190,60))
draw_text(screen, "EXIT", 40, 488, 660)
#screen.blit(arrow, imagerect)
button("EXIT",400,650,190,60,GRAY,GRAY80,quit)
pygame.display.flip()
if __name__ == '__main__':
main()
pygame.quit()
|
3,513 | aed6e1966d9e4ce7250ae3cacaf8854cab4b590c | from nltk.tokenize import RegexpTokenizer
token = RegexpTokenizer(r'\w+')
from nltk.corpus import stopwords
# with open('microsoft.txt','r+',encoding="utf-8") as file:
# text = file.read()
# text = '''
# Huawei Technologies founder and CEO Ren Zhengfei said on Thursday the Chinese company is willing to license its Ren told reporters he was not afraid of creating a rival by making Huawei's technology available to competitors, and the offer could also include chip design know-how.Huawei, the world's largest telecoms gear maker, has been on a US trade blacklist since May over concerns that its equipment could be used by Beijing to spy. Huawei has repeatedly denied such allegations.The sanctions cut off Huawei's access to essential US technologies. The latest version of its Mate 30 flagship phone, unveiled last week in Europe, will not come with Google Mobile Services.Ren's remarks come after he said this month that he is open to selling the firm's 5G technology - including patents, code, blueprints and production know-how - to Western firms for a one-off fee.The offer to license out 5G technology marks the latest attempt by Huawei, also the world's No.2 smartphone vendor, to minimise the impact of the US trade ban. It expects a drop of some $10bn in revenue from its phone business this year.
# '''
def word_freq_improved_summarize(text):
sen = text.split('.')
#normalise
small = [s.lower() for s in sen]
#remove punctuation
punc_free = []
for p in small: punc_free.extend(token.tokenize(p))
#remove stopwords
stop_words = set(stopwords.words('english'))
words = []
for x in punc_free:
if x not in stop_words: words.append(x)
#weighted frequency
wgt = {}
for x in words: wgt[x] = words.count(x)
max_freq = max(wgt.values())
for x in wgt.keys(): wgt[x] = wgt[x]/max_freq
#replace with weighted_frequency
order = {}
avg = len(sen)/2
for i in range(len(sen)):
sum = 0
wrd = sen[i].split()
for w in wrd:
current = (str(token.tokenize(w))[2:-2]).lower()
if current in wgt:
sum += wgt[current]
order[sen[i]] = sum*(1+0.1*abs(avg-i)/avg)
sorted_sen = dict(sorted(order.items(), key = lambda x:x[1], reverse=True))
# print('\n1.Text\n',text)
# print('\n2.List of Sentences\n',sen)
# print('\n3.List of sentences in small case\n',small)
# print('\n4.Removing punctuation\n',punc_free)
# print('\n5.Removing stop words\n',words)
# print('\n6.Word frequency\n',wgt)
# print('\n7.Sentences with sum of frequency of their words\n',order)
# print('\n8.Sorted sentences\n',sorted_sen)
# print('\n9.Final Summary:')
final_summary = ""
while True and len(sorted_sen)>0:
summ = max(sorted_sen, key=lambda x:sorted_sen[x])
if (len(final_summary)+len(summ))<240:
final_summary += summ
del sorted_sen[summ]
else:
if len(final_summary)<1:
del sorted_sen[summ]
continue
else:
break
return final_summary
if __name__ == "__main__":
with open('./passages/harmonyos.txt','r+',encoding="utf-8") as file:
text = file.read()
word_freq_improved_summarize(text)
|
3,514 | 6c98be473bf4cd458ea8a801f8b1197c9d8a07b3 | import serial
import time
import struct
# Assign Arduino's serial port address
# Windows example
# usbport = 'COM3'
# Linux example
# usbport = '/dev/ttyUSB0'
# MacOSX example
# usbport = '/dev/tty.usbserial-FTALLOK2'
# basically just see what ports are open - >>> ls /dev/tty*
# Set up serial baud rate
usbport = '/dev/ttyS3'
ser = serial.Serial(usbport,9600,timeout=1)
# time.sleep is necessary - it takes some time to open serial port
time.sleep(2)
def write(i):
ser.write(struct.pack('>BBB',255,0,i))
write(0)
time.sleep(1)
|
3,515 | 93b12d1e936331c81522790f3f45faa3383d249e | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 19:47, 08/04/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
#-------------------------------------------------------------------------------------------------------%
from mealpy.evolutionary_based.FPA import BaseFPA
from opfunu.cec_basic.cec2014_nobias import *
## Setting parameters
objective_func = F1
problem_size = 30
domain_range = [-15, 15]
log = True
epoch = 100
pop_size = 50
p = 0.8
md1 = BaseFPA(objective_func, problem_size, domain_range, log, epoch, pop_size, p)
best_pos1, best_fit1, list_loss1 = md1._train__()
print(best_fit1) |
3,516 | 01153a695b4744465b706acb4c417217c5e3cefd | from django.db import models
import os
from uuid import uuid4
class Card_profile(models.Model):
def path_and_rename(self, filename):
upload_to = 'uploads'
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid4().hex, ext)
return os.path.join(upload_to, filename)
MALE = 'M'
FEMALE = 'F'
CHOICES_GENDER = (
(MALE, 'M'),
(FEMALE, 'F'),
)
username = models.CharField(max_length=255, unique=True)
repository_name = models.CharField(max_length=255, unique=True)
page_title = models.CharField(max_length=255)
description = models.CharField(max_length=255)
baseurl = models.CharField(max_length=255, default="/")
url = models.URLField(max_length=200, unique=True)
avatar = models.ImageField(upload_to=path_and_rename, height_field=None, width_field=None,
max_length=255, blank=True, null=True)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
gender = models.CharField(max_length=1, choices=CHOICES_GENDER)
title = models.CharField(max_length=255, blank=True, null=True)
company = models.CharField(max_length=255, blank=True, null=True)
email = models.EmailField(max_length=254, unique=True)
phone = models.CharField(max_length=255, blank=True, null=True)
website = models.URLField(max_length=200, blank=True, null=True)
facebook_url = models.URLField(max_length=200, blank=True, null=True)
linkedin_url = models.URLField(max_length=200, blank=True, null=True)
instagram_url = models.URLField(max_length=200, blank=True, null=True)
pinterest_url = models.URLField(max_length=200, blank=True, null=True)
twitter_url = models.URLField(max_length=200, blank=True, null=True)
youtube_url = models.URLField(max_length=200, blank=True, null=True)
snapchat_url = models.URLField(max_length=200, blank=True, null=True)
whatsapp_url = models.URLField(max_length=200, blank=True, null=True)
tiktok_url = models.URLField(max_length=200, blank=True, null=True)
telegram_url = models.URLField(max_length=200, blank=True, null=True)
skype_url = models.URLField(max_length=200, blank=True, null=True)
github_url = models.URLField(max_length=200, blank=True, null=True)
gitlab_url = models.URLField(max_length=200, blank=True, null=True)
markdown = models.CharField(max_length=255, default="kramdown")
def __str__(self):
return self.username
|
3,517 | 56b8b9884b8500ff70f59058484c4a351b709311 | import sys
def ReadFile(array, fileName):
with open(fileName, 'r') as f:
if f.readline().rstrip() != 'MS':
print("prosze podac macierz sasiedztwa")
for i in f:
el = list(map(int, i.rstrip().split()))
if len(el) > 1:
array.append(el)
def Prim(matrix, vertex_to_start):
heap_map = [100000 for i in range(len(matrix))]
heap_map[vertex_to_start] = 0
length = len(matrix)
# tablica poprzednikow
p = [0 for i in range(len(matrix))]
# tablica gotowych wierzcholkow
ready_vertices = []
# obecny index na ktorym wykonywane sa operacje
index = vertex_to_start
while len(ready_vertices) != length:
for i in range(len(matrix[index])):
# sprawdzam czy wierzcholek juz nie jest gotowy i
# czy jest polaczenie miedzy wierzcholkami
if i not in ready_vertices and matrix[index][i] != 0:
# jezeli nowe polaczenie miedzy danym wierzcholkiem i
# jakas krawedzia lepsze(krotsze) to zamieniam poprzednika
if matrix[index][i] < heap_map[i]:
heap_map[i] = matrix[index][i]
p[i] = index
# dodaje wierzcholek do gotowych
ready_vertices.append(index)
# sztucznie usuwam z heap_map
heap_map[index] = 100000
# wybieram nowy indeks - minimalny
index = heap_map.index(min(heap_map))
# wierzcholek poczatkowy
p[vertex_to_start] = 'x'
print(p)
def main():
if len(sys.argv) < 2:
print("prosze podac plik")
sys.exit()
fileName = sys.argv[1]
matrix = []
ReadFile(matrix, fileName)
#print(matrix[1].index(min(matrix[0])))
Prim(matrix, 0)
if __name__ == "__main__":
main() |
3,518 | ae5dfa7fa6a0d7349d6ae29aeac819903facb48f | import sys
import os
import unittest
from wireless.trex_wireless_manager import APMode
from wireless.trex_wireless_manager_private import *
class APInfoTest(unittest.TestCase):
"""Tests methods for the APInfo class."""
def test_init_correct(self):
"""Test the __init__ method when parameters are correct."""
# mocks of files
rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)
ap = APInfo(port_id=1, ip="2.2.2.2", mac="bb:bb:bb:bb:bb:bb", radio_mac="bb:bb:bb:bb:bb:00", udp_port=12345, wlc_ip='1.1.1.1',
gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)
self.assertEqual(ap.ip, '2.2.2.2')
def test_init_no_mac(self):
"""Test the __init__ method when parameter 'mac' is None.
Should raise an AttributeError.
"""
# mocks of files
rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)
with self.assertRaises(ValueError):
ap = APInfo(port_id=1, ip="2.2.2.2", mac=None, radio_mac="bb:bb:bb:bb:bb:00", udp_port=12345, wlc_ip='1.1.1.1',
gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)
def test_init_no_ip(self):
"""Test the __init__ method when parameter 'ip' is None.
Since the field is optional, it should pass.
"""
# mocks of files
rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)
ap = APInfo(port_id=1, ip=None, mac="bb:bb:bb:bb:bb:bb", radio_mac="bb:bb:bb:bb:bb:00", udp_port=12345, wlc_ip='1.1.1.1',
gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)
self.assertEqual(ap.ip, None)
def test_str(self):
"""Test the __str__ method."""
# mocks of files
rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)
ap = APInfo(port_id=1, ip="2.2.2.2", mac="bb:bb:bb:bb:bb:bb", radio_mac="bb:bb:bb:bb:bb:00", udp_port=12345, wlc_ip='1.1.1.1',
gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)
self.assertEqual(str(ap), 'APbbbb.bbbb.bbbb')
self.assertEqual(str(ap), ap.name)
class ClientInfoTest(unittest.TestCase):
"""Tests methods for the ClientInfo class."""
def setUp(self):
# mocks of files
rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)
self.ap = APInfo(port_id=1, ip="2.2.2.2", mac="bb:bb:bb:bb:bb:bb", radio_mac="bb:bb:bb:bb:bb:00", udp_port=12345, wlc_ip='1.1.1.1',
gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)
def test_init_correct(self):
"""Test the __init__ method when parameters are correct."""
client = ClientInfo("cc:cc:cc:cc:cc:cc", ip="3.3.3.3", ap_info=self.ap)
self.assertEqual(client.ip, "3.3.3.3")
self.assertEqual(client.ip_bytes, b'\x03\x03\x03\x03')
def test_init_no_mac(self):
"""Test the __init__ method when mandatory parameter 'mac' is None."""
with self.assertRaises(ValueError):
client = ClientInfo(None, ip="3.3.3.3", ap_info=self.ap)
def test_init_no_ip(self):
"""Test the __init__ method when parameter 'ip' is None.
Since the field is optional, it should pass.
"""
client = ClientInfo("cc:cc:cc:cc:cc:cc", ip=None, ap_info=self.ap)
self.assertEqual(client.ip, None)
self.assertEqual(client.ip_bytes, None)
def test_init_wrong_ap_type(self):
"""Test the __init__ method when mandatory parameter 'ap_info' is of wrnong type."""
ap_wrong = object()
with self.assertRaises(ValueError):
client = ClientInfo("cc:cc:cc:cc:cc:cc",
ip="3.3.3.3", ap_info=ap_wrong)
def test_str(self):
"""Test the __str__ method."""
client = ClientInfo("cc:cc:cc:cc:cc:cc", ip="3.3.3.3", ap_info=self.ap)
self.assertEqual(str(client), "Client cc:cc:cc:cc:cc:cc - 3.3.3.3")
self.assertEqual(str(client), client.name)
|
3,519 | 2060f57cfd910a308d60ad35ebbbf9ffd5678b9c |
# coding: utf-8
import pandas as pd
import os
import numpy as np
import json as json
import mysql.connector as sqlcnt
import datetime as dt
import requests
from mysql.connector.constants import SQLMode
import os
import glob
import re
import warnings
warnings.filterwarnings("ignore")
from pathlib import Path
# In[289]:
import os,sys
lib_path = r"\\"
#lib_path = r"C:\Users\300068241\Documents\Covid_Data\Daily"
os.chdir(lib_path)
covid_pred=pd.read_csv(r'total_cases_data.csv')
data=covid_pred
import scipy
import patsy
import statsmodels.api as sm
X=data.Time
X=sm.add_constant(X)
data['logTotal']=np.log(data.Total)
y=data.logTotal
mod=sm.OLS(y,X)
res=mod.fit()
print(res.summary())
import math
initial_value_exponent=2.2588
X0=math.exp(initial_value_exponent)
X0
growth_factor_exponent=0.1730
# In[304]:
b=math.exp(growth_factor_exponent)
# In[305]:
b
# In[306]:
from datetime import date
start_date = date(2020, 3, 2) #1st case is assumed to be of 2nd Mar'20
# In[307]:
import datetime
today = datetime.date.today()
t = today + datetime.timedelta(days = 1) #+1 in days as 1st case was on 2nd and another +1 days as we're predicting for tomorrow
delta = t - start_date
time=delta.days
Xt = X0 * (math.pow(b,time))
#Xt
predicted = round(Xt)
tomorrow = t - datetime.timedelta(days=1)
covid_actual=pd.read_csv(r'total_cases_data.csv')
covid_actual.loc[:, 'Date':'human_date']
covid_predicted=pd.DataFrame({'Date':["26/3/2020","27/3/2020","28/3/2020"],'Total':["721","857","1022"], 'human_date':["26th Mar","27th Mar","28th Mar"]}) #change here
covid_predicted.to_csv('predicted_data.csv',index=False)
covid_merge = pd.merge(covid_actual,covid_predicted,left_on=['Date'],right_on=['Date'],how = 'left')
covid_accuracy = covid_merge[(covid_merge['Date']=='26/3/2020') | (covid_merge['Date']=='27/3/2020') | (covid_merge['Date']=='28/3/2020')] #change here
#covid_accuracy
covid_accuracy['Total_y']=covid_accuracy['Total_y'].astype(int)
covid_accuracy['Total_x']=covid_accuracy['Total_x'].astype(int)
covid_accuracy.loc[covid_accuracy['Total_x']>=covid_accuracy['Total_y'], 'Accuracy'] = (covid_accuracy['Total_y']/covid_accuracy['Total_x'])*100
covid_accuracy.loc[covid_accuracy['Total_x']<covid_accuracy['Total_y'], 'Accuracy'] = (covid_accuracy['Total_x']/covid_accuracy['Total_y'])*100
accuracy_final=covid_accuracy.mean(axis = 0)
|
3,520 | fb0dcb641dfb379751264dc0b18007f5d058d379 | import numpy as np
import matplotlib.pyplot as plt
def cos_Taylor2(x, n):
s = 0
a = 1
for i in range(0, n+1):
s = s+a
a = -a*x**2 / ((2*i+1)*(2*i+2))
return s, abs(a)
vcos = np.vectorize(cos_Taylor2)
def cos_two_terms(x):
s = 0
a = 1
s = s+a
a = -a*x**2 / ((2*0+1)*(2*0+2))
s = s + a
a = -a*x**2 / ((2*1+1)*(2*1+2))
s = s + a
a = -a*x**2 / ((2*2+1)*(2*2+2))
return s, abs(a)
def test_cos_Taylor():
x = 0.63
tol = 1e-14
s_expected, a_expected = cos_two_terms(x)
s_computed, a_computed = cos_Taylor2(x,2)
success1 = abs(s_computed - s_expected) < tol
success2 = abs(a_computed - a_expected) < tol
success = success1 and success2
message = 'Output is different from expected!'
assert success, message
test_cos_Taylor()
x = np.linspace(-5,5,100)
n = [0,2,4,6]
for i in n:
y = vcos(x, i)
plt.plot(x, y[0], label='n = %g' % i)
y = np.cos(x)
plt.plot(x, y, 'b-', label = 'expected')
plt.ylim(-1.1,1.1)
plt.legend()
plt.savefig('cos_Taylor_series_diffeq.png')
plt.show()
'''
Terminal> cos_Taylor_series_diffeq.py"
Process finished with exit code 0
''' |
3,521 | 0e7d4b73cedf961677e6b9ea5303cdb3a5afa788 | #!/usr/bin/env python3
import fileinput
mem = [int(n.strip()) for n in next(fileinput.input()).split()]
size = len(mem)
states = set()
states.add('.'.join(str(n) for n in mem))
part2 = None
steps = 0
while True:
i = mem.index(max(mem))
x = mem[i]
mem[i] = 0
while x > 0:
i += 1
mem[i % size] += 1
x -= 1
steps += 1
statehash = '.'.join(str(n) for n in mem)
if statehash in states:
if not part2:
print("Part 1:", steps)
part2 = statehash
part1_steps = steps
else:
if statehash == part2:
print("Part 2:", steps - part1_steps)
break
else:
states.add(statehash)
|
3,522 | a4f4137b9310ebc68515b9cae841051eda1f0360 | import random
consonants = [
'b', 'c', 'd', 'f', 'g',
'h', 'j', 'k', 'l', 'm',
'n', 'p', 'q', 'r', 's',
't', 'v', 'w', 'x', 'y',
'z'
]
vowels = [
'a', 'e',' i', 'o', 'u'
]
def make_word(user_input):
word = ""
for letter in user_input:
letter = letter.lower()
if letter == 'c':
word += random.choice(consonants)
elif letter == 'v':
word += random.choice(vowels)
elif letter.isspace():
word += ' '
else :
print('Incorrect character passed. You must supply either a [c]onsonant, or a [vowel]\n')
return word
def main():
pattern = input('Enter your lexical pattern, c for consonant. v for vowel\n')
print(make_word(pattern))
main()
|
3,523 | 2e2de50a7d366ca1a98d29b33ed157a1e8445ada | # (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""ipyhi
ipyhi is a Jupyter notebook notification system.
It is based on the jupyter-notify package.
"""
import os
from setuptools import find_packages, setup
MAJOR = 0
MINOR = 1
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
FULLVERSION = VERSION
DOCLINES = __doc__.split('\n')
DESCRIPTION = DOCLINES[0]
LONG_DESCRIPTION = "\n".join(DOCLINES[2:])
def git_short_hash():
try:
git_str = "+" + os.popen('git log -1 --format="%h"').read().strip()
except: # pylint: disable=bare-except
git_str = ""
else:
if git_str == '+': #fixes setuptools PEP issues with versioning
git_str = ''
return git_str
if not ISRELEASED:
FULLVERSION += '.dev'+str(MICRO)+git_short_hash()
def write_version_py(filename='ipyhi/version.py'):
cnt = """\
# THIS FILE IS GENERATED FROM IPYHI SETUP.PY
# pylint: disable=missing-module-docstring
short_version = '%(version)s'
version = '%(fullversion)s'
release = %(isrelease)s
"""
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION, 'fullversion':
FULLVERSION, 'isrelease': str(ISRELEASED)})
finally:
a.close()
setup(
name='ipyhi',
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author='Paul Nation',
author_email='nonhermitian@gmail.com',
url='https://github.com/nonhermitian/ipyhi',
license='Apache-2',
packages=find_packages(exclude=('tests', 'docs')),
package_data={'ipyhi': ['js/*.js']},
install_requires=[
'ipython',
'jupyter',
'ipywidgets'
],
classifiers=[
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
]
)
|
3,524 | 48311ee17a3f2eca8db32d7672f540fa45a7a900 | #!/usr/bin/env python
from LCClass import LightCurve
import matplotlib.pyplot as plt
import niutils
def main():
lc1821 = LightCurve("PSR_B1821-24/PSR_B1821-24_combined.evt")
lc0218 = LightCurve("PSR_J0218+4232/PSR_J0218+4232_combined.evt")
fig, ax = plt.subplots(2, 1, figsize=(8, 8))
ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate=False)
ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False)
ax[1].set_xlabel("Pulse Phase", fontsize=25)
ax[0].text(.08, .95, r'PSR B1821$-$24', ha='left', va='top',
fontsize=20, transform=ax[0].transAxes,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.6))
ax[1].text(.08, .95, r'PSR J0218$+$4232', ha='left', va='top',
fontsize=20, transform=ax[1].transAxes,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.6))
ax[0].tick_params(labelbottom=False)
#plt.setp(ax[0].get_yticklabels()[0], visible=False)
fig.text(.04, .5, r'Photon Counts', ha='center', va='center',
rotation='vertical', fontsize=25)
plt.subplots_adjust(hspace=0, bottom=.08, top=.94, right=.98, left=.15)
fig.savefig("poster_plot.svg")
if __name__ == '__main__':
main()
|
3,525 | 426a8fb6d1adf5d4577d299083ce047c919dda67 | '''
EXERCICIO: Faça um programa que leia quantidade de pessoas que serão convidadas para uma festa.
O programa irá perguntar o nome de todas as pessoas e colcar num lista de convidados.
Após isso deve imprimir todos os nomes da lista
'''
'''
qtd = int(input("Quantas pessoas vão ser convidadas?"))
lista_pessoas = []
while qtd > 0:
lista_pessoas.append(input('Nome: '))
qtd -= 1
for pessoa in lista_pessoas:
print(pessoa)
'''
# Resolução do exercício
print('Programinha de controle de festinhas 1.0')
print('#' * 20)
numero_de_convidados = int(input('Coloque o número de convidados: '))
lista_de_convidados =[]
i = 1
while i <= numero_de_convidados:
nome_do_convidado = input('Coloque o nome do convidado #' + str(i) + ': ')
lista_de_convidados.append(nome_do_convidado)
i += 1
print('Serão ', numero_de_convidados, 'convidados')
print('\nLISTA DE CONVIDADOS')
for convidado in lista_de_convidados:
print(convidado)
|
3,526 | e59404149c739a40316ca16ab767cbc48aa9b685 | # -*- coding: utf-8 -*-
import scrapy
from selenium import webdriver
import datetime
class GoldpriceSpider(scrapy.Spider):
name = 'goldprice'
allowed_domains = ['g-banker.com']
start_urls = ['https://g-banker.com/']
def __init__(self):
self.browser = webdriver.PhantomJS()
self.price = None
def parse(self, response):
# print response.text
self.browser.get(response.url)
self.price = float(self.browser.find_element_by_xpath('//*[@id="J_price"]').text)
def close(self,spider, reason):
hour = datetime.datetime.now().hour
if(self.price != None):
if int(hour) < 22:
if(self.price > 278 or self.price < 270):
from scrapy.mail import MailSender
# mailer = MailSender.from_settings(settings)# 出错了,没找到原因
mailer = MailSender(
smtphost = "smtp.163.com", # 发送邮件的服务器
mailfrom = "18607970065@163.com", # 邮件发送者
smtpuser = "18607970065@163.com", # 用户名
smtppass = "yan18779865344", # 发送邮箱的密码不是你注册时的密码,而是授权码!!!切记!
smtpport = 25 # 端口号
)
body = u"""
实时爬取的黄金价格为:
""" + str(self.price)
subject = u'爬取的黄金实时价格'
# 如果说发送的内容太过简单的话,很可能会被当做垃圾邮件给禁止发送。
mailer.send(to=["363918226@qq.com"], subject = subject.encode("utf-8"), body = body.encode("utf-8"))
def __del__(self):
self.browser.close()
|
3,527 | ba78a1e29736c4f109a0efc6f5b9993994661058 | '''
Created on June 24, 2019
@author: Andrew Habib
'''
import json
import jsonref
import sys
from jsonsubschema.api import isSubschema
def main():
assert len(
sys.argv) == 3, "jsonsubschema cli takes exactly two arguments lhs_schema and rhs_schema"
s1_file = sys.argv[1]
s2_file = sys.argv[2]
with open(s1_file, 'r') as f1:
s1 = json.load(f1)
# s1 = jsonref.load(f1)
with open(s2_file, 'r') as f2:
s2 = json.load(f2)
# s2 = jsonref.load(f2)
print("LHS <: RHS", isSubschema(s1, s2))
print("RHS <: LHS", isSubschema(s2, s1))
if __name__ == "__main__":
main()
|
3,528 | 4f116f3eec9198a56a047ab42ed8e018ebb794bb | def Hello_worlder(x):
a=[]
for i in range(x):
a.append('Hello world')
for i in a:
print(i)
Hello_worlder(10)
|
3,529 | f33190df35a6b0b91c4dd2d6a58291451d06e29a | # -*- coding: utf-8 -*-
import scrapy
import json, time, sys, random, re, pyssdb
from scrapy.utils.project import get_project_settings
from spider.items import GoodsSalesItem
goods_list = []
'''获取店铺内产品信息'''
class PddMallGoodsSpider(scrapy.Spider):
name = 'pdd_mall_goods'
mall_id_hash = 'pdd_mall_id_hash'
hash_num = 0
ssdb_client = ''
process_nums = 1
limit = 100
def __init__(self, hash_num = 0, process_nums = 1):
self.ssdb_client = pyssdb.Client(get_project_settings().get('SSDB_HOST'), 8888)
self.hash_num = int(hash_num) ##当前脚本号
self.process_nums = int(process_nums) ##脚本总数
self.pageSize = 500 ##每次抓取的产品数 最大只返回500
def start_requests(self):
mall_nums = self.limit * int(self.process_nums) ##一次查询的数量
is_end = False
start_mall_id = '' ##起始查询的店铺key
while not is_end:
mall_ids = self.ssdb_client.hkeys(self.mall_id_hash, start_mall_id, '', mall_nums)
if not mall_ids: ##没有数据返回
is_end = True
continue
for mall_id in mall_ids:
mall_id = int( mall_id.decode('utf-8') )
start_mall_id = mall_id
if mall_id % self.process_nums != self.hash_num:
continue
goods_list=[]
page = 1
headers = self.make_headers()
url = 'http://apiv4.yangkeduo.com/api/turing/mall/query_cat_goods?category_id=0&type=0&sort_type=_sales&mall_id='+str(mall_id)+'&page_no='+str(page)+'&page_size=500'
meta = {'page':page, 'mall_id':mall_id, 'goods_list':goods_list}
yield scrapy.Request(url, meta=meta, callback=self.parse, headers=headers)
def parse(self, response):
pass
goods_list=response.meta['goods_list'] ##产品集合
mall_id = response.meta['mall_id'] ##店铺ID
page = response.meta['page'] ##每返回一次页面数据 记录页数
mall_goods = response.body.decode('utf-8') ##bytes转换为str
mall_goods = json.loads(mall_goods)
goods_len = len(mall_goods['goods_list'])
if goods_len > 0:
goods_list = goods_list + mall_goods['goods_list'] ##合并产品列表
if goods_len > self.pageSize - 100:
page += 1
##继续采集下一页面
url = 'http://apiv4.yangkeduo.com/api/turing/mall/query_cat_goods?category_id=0&type=0&sort_type=_sales&mall_id='+str(mall_id)+'&page_no='+str(page)+'&page_size=500'
meta = {'page':page, 'mall_id':mall_id, 'goods_list':goods_list}
headers = self.make_headers()
yield scrapy.Request(url, meta=meta, callback=self.parse, headers=headers)
else:
if goods_list:
item = GoodsSalesItem()
item['goods_list'] = goods_list
item['mall_id'] = mall_id
yield item
'''生成headers头信息'''
def make_headers(self):
chrome_version = str(random.randint(59,63))+'.0.'+str(random.randint(1000,3200))+'.94'
headers = {
"Host":"yangkeduo.com",
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language":"zh-CN,zh;q=0.9,en;q=0.8",
"Accept-Encoding":"gzip, deflate",
"Host":"yangkeduo.com",
"Referer":"http://yangkeduo.com/goods.html?goods_id=442573047&from_subject_id=935&is_spike=0&refer_page_name=subject&refer_page_id=subject_1515726808272_1M143fWqjQ&refer_page_sn=10026",
"Connection":"keep-alive",
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'+chrome_version+' Safari/537.36',
}
ip = str(random.randint(100, 200))+'.'+str(random.randint(1, 255))+'.'+str(random.randint(1, 255))+'.'+str(random.randint(1, 255))
headers['CLIENT-IP'] = ip
headers['X-FORWARDED-FOR']= ip
return headers |
3,530 | d84a7e16471c604283c81412653e037ecdb19102 | import os
bind = '0.0.0.0:8000'
workers = os.environ['GET_KEYS_ACCOUNTS_WORKERS']
|
3,531 | 076b852010ddcea69a294f9f2a653bb2fa2f2676 | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 1 10:18:11 2017
@author: Duong
"""
import pandas as pd
import matplotlib.pyplot as plt
import psycopg2
from pandas.core.frame import DataFrame
# DBS verbinden
database = psycopg2.connect(database="TeamYellow_election", user="student", password="password", host="agdbs-edu01.imp.fu-berlin.de", port="5432")
# SQl-Abfrage
cursor = database.cursor()
cursor.execute(
'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC')
result = cursor.fetchall()
# Dataframe erstellen
data=DataFrame(result, columns=['tweet_date', 'count'])
#Umwandlung des Datentyp der Spalte tweet_date
data['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')
data['week_number'] = data['tweet_date_with_time'].dt.week
data['weekday']= data['tweet_date_with_time'].dt.dayofweek
# Gruppierung der Kalendarwochen mit einzelnen Counts
data2=data.copy()
del data2['tweet_date']
del data2['tweet_date_with_time']
del data2['weekday']
print(data2.groupby('week_number')['count'].apply(list))
# Aufbau Dataframe auf Erkenntnisse aus data2-Prints
data3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0],
'KW02': [3, 1, 7, 1, 0, 1, 0],
'KW03': [0, 2, 6, 1, 11, 3, 2],
'KW04': [13, 5, 1, 3, 6, 2, 1],
'KW05': [0, 1, 2, 0, 4, 3, 4],
'KW06': [2, 6, 1, 2, 1, 5, 0],
'KW07': [1, 3, 5, 2, 5, 2, 1],
'KW08': [2, 7, 1, 3, 5, 1, 3],
'KW09': [3, 10, 9, 3, 3, 6, 2],
'KW10': [0, 1, 2, 0, 2, 4, 0],
'KW11': [2, 3, 8, 0, 3, 10, 5],
'KW12': [0, 11, 4, 1, 0, 0, 0],
'KW13': [1, 0, 3, 2, 1, 6, 5],
'KW14': [4, 5, 0, 0, 1, 1, 2],
'KW15': [2, 4, 1, 2, 0, 4, 2],
'KW16': [0, 11, 4, 2, 3, 4, 1],
'KW17': [2, 6, 0, 1, 1, 0, 0],
'KW18': [4, 8, 0, 1, 1, 0, 0],
'KW19': [2, 8, 3, 0, 0, 0, 0],
'KW20': [1, 1, 1, 0, 5, 0, 1],
'KW21': [0, 0, 2, 1, 1, 0, 0],
'KW22': [0, 0, 1, 4, 2, 3, 0],
'KW23': [0, 0, 1, 0, 1, 2, 0],
'KW24': [0, 0, 3, 0, 1, 4, 1],
'KW25': [0, 0, 1, 10, 0, 0, 0],
'KW26': [1, 1, 0, 0, 2, 3, 0],
'KW27': [1, 0, 0, 2, 0, 0, 0],
'KW28': [1, 2, 2, 1, 0, 1, 0],
'KW29': [0, 1, 2, 7, 2, 1, 0],
'KW30': [1, 3, 3, 4, 0, 1, 1],
'KW31': [3, 2, 2, 0, 1, 4, 1],
'KW32': [1, 6, 0, 0, 0, 1, 0],
'KW33': [0, 0, 4, 0, 1, 1, 0],
'KW34': [1, 0, 1, 2, 1, 2, 1],
'KW35': [2, 0, 1, 3, 1, 0, 0],
'KW36': [1, 1, 2, 2, 2, 0, 0],
'KW37': [0, 1, 1, 2, 4, 0, 0],
'KW38': [0, 3, 0, 2, 1, 1, 0],
'KW39': [3, 18, 0, 0, 0, 0, 0]})
data4= data3.transpose()
data4.columns =['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag', 'Sonntag']
data4['Kalendarwoche']=data4.index
############################# Bau eines Stacked Bar Chart ############################################
#Grundgerüst des Balkendiagramms
f, ax1 = plt.subplots(1, figsize=(25,20))
# Balkengröße
bar_width = 0.75
# Balken fangen von links an
bar_l = [i+1 for i in range(len(data4['Montag']))]
# Position der X-Achsen Werte
tick_pos = [i+(bar_width/2) for i in bar_l]
# Beginn der Erstellung der Balken nach Wochentagen
ax1.bar(bar_l,
data4['Montag'],
width=bar_width,
label='Montag',
alpha=0.5,
color='#1858ef')
ax1.bar(bar_l,
data4['Dienstag'],
width=bar_width,
bottom=data4['Montag'],
label='Dienstag',
alpha=0.5,
color='#6618ef')
ax1.bar(bar_l,
data4['Mittwoch'],
width=bar_width,
bottom=[i+j for i,j in zip(data4['Montag'],data4['Dienstag'])],
label='Mittwoch',
alpha=0.5,
color='#ef1829')
ax1.bar(bar_l,
data4['Donnerstag'],
width=bar_width,
bottom=[i+j+k for i,j,k in zip(data4['Montag'],data4['Dienstag'], data4['Mittwoch'])],
label='Donnerstag',
alpha=0.5,
color='#ef7c18')
ax1.bar(bar_l,
data4['Freitag'],
width=bar_width,
bottom=[i+j+k+l for i,j,k,l in zip(data4['Montag'],data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'])],
label='Freitag',
alpha=0.5,
color='#efc718')
ax1.bar(bar_l,
data4['Samstag'],
width=bar_width,
bottom=[i+j+k+l+m for i,j,k,l,m in zip(data4['Montag'],data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'])],
label='Samstag',
alpha=0.5,
color='#63ef18')
ax1.bar(bar_l,
data4['Sonntag'],
width=bar_width,
bottom=[i+j+k+l+m+n for i,j,k,l,m,n in zip(data4['Montag'],data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'],
data4['Samstag'])],
label='Sonntag',
alpha=0.5,
color='#18efa3')
# X-Achse mit Werte versehen
plt.xticks(tick_pos, data4['Kalendarwoche'])
#Legende
ax1.set_ylabel("Häufigkeit")
ax1.set_xlabel("Kalendarwoche")
plt.legend(loc='upper left')
# Zwischen den Diagrammen Platz lassen
plt.xlim([min(tick_pos)-bar_width, max(tick_pos)+bar_width])
############### Balkendiagramm nach Kalendarwoche#########################################
kw = lambda x: x.isocalendar()[1]
grouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg({'count': 'sum'})
grouped['calendar week']= ('KW1','KW2','KW3','KW4','KW5','KW6','KW7','KW8','KW9','KW10','KW11','KW12','KW13',
'KW14','KW15','KW16','KW17','KW18','KW19','KW20','KW21','KW22','KW23','KW24','KW25','KW26', 'KW27','KW28','KW29',
'KW30','KW31','KW32','KW33','KW34','KW35','KW36','KW37','KW38','KW39')
#Balkendiagramm für alle Hashtag in Kalendarwoche
grouped.set_index('calendar week').plot.bar(rot=45, title='Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15,10), fontsize=10)
############## Balkendiagramm für alle Hashtag pro Tag #####################################
data5=data[['tweet_date','count']].copy()
#Balkendiagramm für alle Hashtag in Tagen
data5.set_index('tweet_date').plot.bar(rot=90, title='Häufigkeit aller Hashtag in Tagen', figsize=(50,25), color ='#ef6618', fontsize=14)
|
3,532 | 005ea8a1e75447b2b1c030a645bde5d0cdc8fb53 | t3 = float(input('Digite um numero: '))
print('o dobro deste numero é', t3 * 2)
print('O triplo deste numero é', t3 * 3)
print('E a raiz quadrada deste numero é', t3**(1/2)) |
3,533 | fc0c8deb3a5a57934c9e707911c352af55100c3c | print(sum([int(d) for d in str(pow(2,1000))]))
|
3,534 | f566c42674728f1874d89b15102627c3b404c9a0 | #!/usr/bin/env python3
import sys
import cksm
from pathlib import Path
VIRTUAL_TO_ROM = 0x800ff000
def patch_rom(rom_path, payload_path, c_code_path, entry_code_path, out_path):
rom = list(Path(rom_path).read_bytes())
payload = list(Path(payload_path).read_bytes())
c_code = list(Path(c_code_path).read_bytes())
entry_code = list(Path(entry_code_path).read_bytes())
jump = [0x3C, 0x1F, 0xB0, 0x78, 0x03, 0xE0, 0xF8, 0x09] # code that jumps to payload
jump_ram = [0x3C, 0x1F, 0x80, 0x40, 0x03, 0xE0, 0xF8, 0x09]
entry_inject = 0x1000
jump_address = 0x40370
render_inject = 0x8017FF10-VIRTUAL_TO_ROM
payload_address = 0x780000
c_code_address = 0x780200
force_easy_ball = 0x3A6D0 # nop this address to always make ball behave like easy mode
for i in range(0, len(jump)):
rom[jump_address+i] = jump_ram[i]
rom[render_inject+i] = jump_ram[i]
# need to nop the call right after render_inject
for i in range(8, 12):
rom[render_inject+i] = 0x00
for i in range(0, len(payload)):
rom[payload_address+i] = payload[i]
for i in range(0, len(c_code)):
rom[c_code_address+i] = c_code[i]
for i in range(0, len(entry_code)):
rom[entry_inject+i] = entry_code[i]
# same as this gs code:
# 801396D0 0000
# 801396D1 0000
# 801396D2 0000
# 801396D3 0000
rom[force_easy_ball] = 0x00
rom[force_easy_ball+1] = 0x00
rom[force_easy_ball+2] = 0x00
rom[force_easy_ball+3] = 0x00
buf = bytearray(rom)
cksm.update_checksum(buf)
f = open(out_path, 'w+b')
f.write(buf)
f.close()
if __name__ == '__main__':
if len(sys.argv) < 5:
print("Usage: glovepatch.py <glover_rom> <binary to inject> <c code binary> <entry code> <output>")
sys.exit(0)
patch_rom(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
|
3,535 | 82f8bfd95fea3025bed2b4583c20526b0bd5484f | from flask import Flask, abort, url_for, render_template, request
app = Flask(__name__)
@app.route('/')
def index():
return render_template('login.html')
# #redirect demo
# @app.route('/login', methods=['POST', 'GET'])
# def login():
# if request.method == 'POST' and request.form['username'] == 'admin':
# return redirect(url_for('success'))
# return redirect(url_for('index'), 302)
@app.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
if request.form['username'] == 'admin':
return redirect(url_for('success'))
else:
abort(401)
else:
return redirect(url_for('index'))
@app.route('/success')
def success():
return'logged in successfully'
# from flask import Flask, render_template, request
# from werkzeug.utils import secure_filename
# import os
# app = Flask(__name__)
# app.config['UPLOAD_FOLDER'] = os.getcwd()+'/media/'
# app.config['MAX_CONTENT_LENGTH'] = 16*1024*1024 #限制大小16MB
# @app.route('/')
# def upload():
# return render_template('upload.html')
# @app.route('/uploader', methods = ['GET', 'POST'])
# def upload_file():
# if request.method == 'POST':
# f = request.files['file']
# f.save(os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(f.filename)))
# return 'flie uploaded successfully' |
3,536 | 028c2193e180ccdbfdcc51e5d061904ea1d6164e | #!/usr/bin/python
import errno
import fuse
import stat
import time
#from multiprocessing import Queue
from functools import wraps
from processfs.svcmanager import Manager
import processfs.svcmanager as svcmanager
fuse.fuse_python_api = (0, 2)
_vfiles = ['stdin', 'stdout', 'stderr', 'cmdline', 'control', 'status']
def has_ent (func):
@wraps(func)
def wrapper(self, path, *args,**kwargs):
print 'called %s %s %s' % (func, path, args)
print self._svcmanager.procs.keys()
vpaths = ['%s/%s' % (x,z) for x in self._svcmanager.procs.keys() \
for z in _vfiles]
vpaths.append('/')
vpaths.extend(self._svcmanager.procs.keys())
if path not in vpaths:
return -errno.ENOENT
return func(self, path, *args,**kwargs)
return wrapper
class processfs(fuse.Fuse):
def __init__(self, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
self._svcmanager = Manager()
self._svc_queue = self._svcmanager.queue
print type(self._svc_queue)
# start the process manager thread
print 'starting svc manager'
self._svcmanager.start()
## NEED - returns dir and file stat struct
@has_ent
def getattr(self, path):
print 'getattr(%s)' % path
st = fuse.Stat()
if path in self._svcmanager.procs.keys() or path == '/':
st.st_nlink = 2
st.st_mode = stat.S_IFDIR | 0777
else:
st.st_mode = stat.S_IFREG | 0600
st.st_nlink = 1
st.st_atime = int(time.time())
st.st_mtime = st.st_atime
st.st_ctime = st.st_atime
st.st_size = 100
return st
# returns the contents of a directory
def readdir(self, path, offset):
## always return . and ..
for p in ['.', '..']:
yield fuse.Direntry(p)
procs = self._svcmanager.procs.keys()
if path == '/':
for p in procs:
yield fuse.Direntry(p[1:])
elif path in procs:
for p in _vfiles:
yield fuse.Direntry(p)
# obvious - see the syscall
# Note, offset is always ignored. There'll be no appending here
## if we are not creating a new file, buf should be sent to proc
## stdin
@has_ent
def write(self, path, buf, offset):
print 'write(%s, %s)' % (path, buf.strip())
if path not in ['%s/%s' % (x,z) \
for x in self._svcmanager.procs.keys() \
for z in _vfiles]:
return -errno.EOPNOTSUPP
else:
# Implement later
return -errno.EACCES
# obvious - see the syscall
@has_ent
def open(self, path, flags):
print 'open(%s)' % path
return 0
# called after create to set times
@has_ent
def utime(self, path, times):
print 'utime(%s)' % path
# called after write to "commit" data to "disk"
@has_ent
def flush(self, path):
print 'flush(%s)' % path
# should connect to proc ring buffer
@has_ent
def read(self, path, len, offset):
return self.files[path]['process'][offset:offset+len]
@has_ent
def unlink(self, path):
print 'unlink(%s)' % path
# another noop - makes some file writes happy
@has_ent
def truncate(self, path, size):
print 'truncate(%s)' % path
return 0
def mkdir(self, path, mode):
print 'mkdir(%s, %s)' % (path, mode)
self._svc_queue.put([svcmanager.MKPROC, path])
self._svc_queue.join()
return 0
def fsdestroy(self, *args, **kw):
self._svcmanager.stop()
|
3,537 | 430b5ca7212983743cadc36a2ada987bb721174a | import numpy as np
import sympy as sp
from copy import copy
from typing import Any, get_type_hints, Dict
from inspect import getclosurevars, getsource, getargs
import ast
from ast import parse, get_source_segment
from .numpy import NumPy
from .torch import torch_defs
defines = {}
defines.update(torch_defs)
def check_type(item, target):
assert item == target
def exec_lines(source: str, body, loc: Dict[str, Any], glob: Dict[str, Any], ret: Any):
def get_value(v):
if isinstance(v, ast.BinOp):
a = get_value(v.left)
b = get_value(v.right)
return a
elif isinstance(v, ast.Name):
return loc.get(v.id)
elif isinstance(v, ast.Call):
args = [get_value(a) for a in v.args]
func = loc.get(v.func.id, None) or glob.get(v.func.id, None)
return func(*args)
elif isinstance(v, ast.List):
return [get_value(e) for e in v.elts]
elif isinstance(v, ast.Constant):
return v.value
seg = get_source_segment(source, v)
return eval(seg, glob, loc)
for line in body:
if isinstance(line, ast.Return):
value = get_value(line.value)
check_type(value, ret)
elif isinstance(line, ast.If):
loc1, loc2 = copy(loc), copy(loc)
exec_lines(source, line.body, loc1, glob, ret)
exec_lines(source, line.orelse, loc2, glob, ret)
elif isinstance(line, ast.Assign):
value = get_value(line.value)
t = line.targets
else:
exec(get_source_segment(source, line), glob, loc)
def check(func):
args = getargs(func.__code__)
hints = get_type_hints(func)
cv = getclosurevars(func)
loc_vars = {n: Any for n in args.args}
ret = hints.pop('return') if 'return' in hints else None
loc_vars.update(hints)
glob_vars = {}
for k, v in cv.globals.items():
if v is np:
glob_vars[k] = NumPy()
else:
glob_vars[k] = defines.get(v, None) or v
source = getsource(func)
f_ast = parse(source).body[0]
body = f_ast.body
exec_lines(source, body, loc_vars, glob_vars, ret)
defines[func] = 1
return func
|
3,538 | 22b8ecfecc0e76d758f14dea865a426db56c6343 | import json
import unittest
from pathlib import Path
from deepdiff import DeepDiff
from electricitymap.contrib import config
CONFIG_DIR = Path(__file__).parent.parent.joinpath("config").resolve()
class ConfigTestcase(unittest.TestCase):
def test_generate_zone_neighbours_two_countries(self):
exchanges = {
"DE->FR": {"parsers": {"exchange": "source"}},
}
zones = {
"DE": {},
"FR": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {"DE": ["FR"], "FR": ["DE"]})
def test_generate_zone_neighbours_one_country_one_subzone(self):
exchanges = {
"DE->SE-SE4": {"parsers": {"exchange": "source"}},
}
zones = {
"DE": {},
"SE": {
"subZoneNames": ["SE-SE4"],
},
"SE-SE4": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {"DE": ["SE-SE4"], "SE-SE4": ["DE"]})
def test_generate_zone_neighbours_two_subzones(self):
exchanges = {
"NO-NO1->SE-SE3": {"parsers": {"exchange": "source"}},
"NO-NO3->SE-SE2": {"parsers": {"exchange": "source"}},
"NO-NO4->SE-SE1": {"parsers": {"exchange": "source"}},
"NO-NO4->SE-SE2": {"parsers": {"exchange": "source"}},
}
zones = {
"NO": {
"subZoneNames": ["NO-NO1", "NO-NO2", "NO-NO3", "NO-NO4", "NO-NO5"],
},
"NO-NO1": {},
"NO-NO2": {},
"NO-NO3": {},
"NO-NO4": {},
"NO-NO5": {},
"SE": {
"subZoneNames": ["SE-SE1", "SE-SE2", "SE-SE3", "SE-SE4"],
},
"SE-SE1": {},
"SE-SE2": {},
"SE-SE3": {},
"SE-SE4": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(
zone_neighbours,
{
"NO-NO1": ["SE-SE3"],
"NO-NO3": ["SE-SE2"],
"NO-NO4": ["SE-SE1", "SE-SE2"],
"SE-SE1": ["NO-NO4"],
"SE-SE2": ["NO-NO3", "NO-NO4"],
"SE-SE3": ["NO-NO1"],
},
)
def test_generate_zone_neighbours_two_subzones_from_same(self):
exchanges = {
"SE-SE1->SE-SE2": {"parsers": {"exchange": "source"}},
}
zones = {
"SE": {
"subZoneNames": ["SE-SE1", "SE-SE2", "SE-SE3", "SE-SE4"],
},
"SE-SE1": {},
"SE-SE2": {},
"SE-SE3": {},
"SE-SE4": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(
zone_neighbours,
{"SE-SE1": ["SE-SE2"], "SE-SE2": ["SE-SE1"]},
)
def test_generate_zone_neighbours_GB(self):
# That's an interesting case as GB has islands, which are not subzones
# It means that GB->GB-NIR are valid exchanges and that
# GB and GB-NIR are neighbours
exchanges = {
"GB->GB-NIR": {"parsers": {"exchange": "source"}},
"GB->GB-ORK": {"parsers": {"exchange": "source"}},
}
zones = {
"GB": {},
"GB-NIR": {},
"GB-ORK": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(
zone_neighbours,
{"GB": ["GB-NIR", "GB-ORK"], "GB-NIR": ["GB"], "GB-ORK": ["GB"]},
)
def test_generate_zone_neighbours_no_exchange_parser(self):
exchanges = {
"DE->FR": {"parsers": {}},
}
zones = {
"DE": {},
"FR": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {})
def test_ZONE_NEIGHBOURS(self):
zone_neighbours = config.generate_zone_neighbours(
config.ZONES_CONFIG, config.EXCHANGES_CONFIG
)
self.assertIn("DK-DK1", zone_neighbours.keys())
dk_neighbours = zone_neighbours["DK-DK1"]
self.assertGreater(
len(dk_neighbours), 1, "expected a few neighbours for DK-DK1"
)
if __name__ == "__main__":
unittest.main(buffer=True)
|
3,539 | ac14e88810b848dbf4ff32ea99fd274cd0285e1c | """ Codewars kata: Evaluate mathematical expression. https://www.codewars.com/kata/52a78825cdfc2cfc87000005/train/python """
#######################################################################################################################
#
# Import
#
#######################################################################################################################
import operator
import re
#######################################################################################################################
#
# Calculator
#
#######################################################################################################################
class Calculator(object):
re_num = r"(([-+])?(\d+)(\.\d+)?)"
def _float_to_string_(self, f, p=40):
# decimal.Decimal would let us avoid these shenanigans, but it's not available.
result = f"{f:+1.{p}f}"
if "." in result:
result = result.rstrip("0")
if result[-1] == ".": result += "0"
return result
def _muldiv_(self, m):
op = operator.mul if m.group("op") == "*" else operator.truediv
return self._float_to_string_(op(float(m.group('n1')), float(m.group('n2'))))
def _subber_(self, search, replace, target):
subs = -1
while subs != 0:
target, subs = re.subn(search, replace, target, count=1)
target = target.replace("--", "+")
target = target.replace("-+", "-")
return target
def _evaluate_(self, thing):
if type(thing) != str:
thing = thing[1]
thing = self._subber_(r"\(([^\(\)]*?)\)", self._evaluate_, thing)
thing = self._subber_(rf"(?P<n1>{self.re_num})(?P<op>\*|\/)(?P<n2>{self.re_num})", self._muldiv_, thing)
return self._float_to_string_(sum(float(val[0]) for val in re.findall(self.re_num, thing)))
def evaluate(self, thing):
return float(self._evaluate_(thing.replace(" ", "")))
def calc(expression):
return Calculator().evaluate(expression)
#######################################################################################################################
#
# __main__
#
#######################################################################################################################
if __name__ == "__main__":
print(f"result = {calc('-(-13) - (84 + 51 * (40)) * (5 / ((((83 * -32)))) / -93)')}") # 12.957005441119316
|
3,540 | fdf6c28e65b50c52550a95c2d991b1eb3ec53a2f | """
@file
@brief One class which visits a syntax tree.
"""
import inspect
import ast
from textwrap import dedent
import numpy
from scipy.spatial.distance import squareform, pdist
from .node_visitor_translator import CodeNodeVisitor
def py_make_float_array(cst, op_version=None):
"""
Creates an array with a single element
from a constant.
@param cst constant
@param op_version unused
@return array
.. runpython::
:showcode:
:warningout: DeprecationWarning
from mlprodict.onnx_tools.onnx_grammar.onnx_translation import py_make_float_array
print(py_make_float_array(5.5))
"""
return numpy.array([cst], dtype=numpy.float32)
def py_pow(x, p, op_version=None):
"""
Function for python operator ``**``.
@param x float
@param p power
@param op_version unused
@return :math:`x^p`
"""
return x ** p
def py_mul(*x, op_version=None):
"""
Function for python operator ``*``.
@param x floats
@param op_version unused
@return `x*y`
"""
if len(x) == 2:
return x[0] * x[1]
p = x[0]
for y in x[1:]:
p *= y
return p
def py_opp(x, op_version=None):
"""
Function for python unary operator ``-``.
@param x floats
@param op_version unused
@return `-x`
"""
return -x
def squareform_pdist(X, metric='sqeuclidean', op_version=None):
"""
Replacements for `squareform
<http://scipy.github.io/devdocs/generated/scipy.spatial.distance.squareform.html>`_
and `pdist
<http://scipy.github.io/devdocs/generated/scipy.spatial.distance.pdist.html>`_.
"""
return squareform(pdist(X, metric=metric))
def get_default_context():
"""
Returns a default context useful for most of the conversion
from a function using :epkg:`numpy` into :epkg:`ONNX`.
"""
context = {'py_pow': py_pow, 'py_make_float_array': py_make_float_array,
'py_mul': py_mul, 'py_opp': py_opp,
'cdist': 'cdist', 'squareform_pdist': 'squareform_pdist'}
allow = set(('abs add ceil arccos arccosh arcsin arcsinh arctan arctanh ceil cos cosh divide'
'equal exp floor greater invert less log matmul maximum minimum mod'
'multiply power sign sin sinh sqrt square subtract tan tanh transpose').split())
for k, v in numpy.__dict__.items():
if k not in allow:
continue
context[f'numpy.{k}'] = v
context[f'np.{k}'] = v
return context
def get_default_context_cpl():
"""
Returns a default useful context to compile the converter
returned by @see fn translate_fct2onnx.
"""
ctx = {'py_make_float_array': py_make_float_array,
'py_pow': py_pow, 'py_mul': py_mul, 'py_opp': py_opp,
'numpy': numpy}
try:
from skl2onnx.algebra.complex_functions import onnx_squareform_pdist # delayed
from skl2onnx.algebra.complex_functions import onnx_cdist # delayed
ctx['onnx_squareform_pdist'] = onnx_squareform_pdist
ctx['onnx_cdist'] = onnx_cdist
except ImportError: # pragma: no cover
# Too old version for skl2onnx.
pass
from skl2onnx.algebra import onnx_ops # delayed
from skl2onnx.algebra.onnx_operator import OnnxOperator # delayed
d = onnx_ops.__dict__
for k, v in d.items():
try:
if k.startswith("Onnx") and issubclass(v, OnnxOperator):
ctx[k] = v
except TypeError as e:
if inspect.isfunction(v):
continue
raise RuntimeError( # pragma: no cover
f"Issue with {k}={v} (type={type(v)})") from e
return ctx
def translate_fct2onnx(fct, context=None, cpl=False,
context_cpl=None, output_names=None,
dtype=numpy.float32,
verbose=0, fLOG=None):
"""
Translates a function into :epkg:`ONNX`. The code it produces
is using classes *OnnxAbs*, *OnnxAdd*, ...
@param fct function to convert
@param context context of the function to convert
something like ``{'numpy.transpose': numpy.transpose}``,
if *context* is None, it receives a default value
returnd by @see fn get_default_context
@param cpl compile the function after it was
created
@param context_cpl context used at compiling time
if *context_cpl* is None, it receives a default value
returnd by @see fn get_default_context_cpl
@param output_names names of the output in the :epkg:`ONNX` graph
@param dtype :epkg:`numpy` float type used to produce the model
@param verbose integer, display more information
@param fLOG logging function
@return code or compiled code
.. exref::
:title: Convert a function into ONNX code
The following code parses a python function and returns
another python function which produces an :epkg:`ONNX`
graph if executed.
.. runpython::
:showcode:
:warningout: DeprecationWarning
:process:
:store_in_file: fct2onnx2.py
import numpy
from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx
def trs(x, y):
z = x + numpy.transpose(y, axes=[1, 0])
return x * z
onnx_code = translate_fct2onnx(
trs, context={'numpy.transpose': numpy.transpose})
print(onnx_code)
Next example goes further and compile the outcome.
.. exref::
:title: Convert a function into ONNX code and run
The following code parses a python function and returns
another python function which produces an :epkg:`ONNX`
graph if executed. The example executes the function,
creates an :epkg:`ONNX` then uses @see cl OnnxInference
to compute *predictions*. Finally it compares
them to the original.
.. runpython::
:showcode:
:warningout: DeprecationWarning
:process:
:store_in_file: fct2onnx3.py
import numpy
from mlprodict.onnx_tools.onnx_grammar import translate_fct2onnx
from mlprodict.plotting.text_plot import onnx_simple_text_plot
from mlprodict.onnxrt import OnnxInference
from mlprodict.npy.xop import loadop
OnnxAdd, OnnxTranspose, OnnxMul, OnnxIdentity = loadop(
'Add', 'Transpose', 'Mul', 'Identity')
ctx = {'OnnxAdd': OnnxAdd,
'OnnxTranspose': OnnxTranspose,
'OnnxMul': OnnxMul,
'OnnxIdentity': OnnxIdentity}
def trs(x, y):
z = x + numpy.transpose(y, axes=[1, 0])
return x * z
inputs = {'x': numpy.array([[1, 2]], dtype=numpy.float32),
'y': numpy.array([[-0.3, 0.4]], dtype=numpy.float32).T}
original = trs(inputs['x'], inputs['y'])
print('original output:', original)
onnx_fct = translate_fct2onnx(
trs, context={'numpy.transpose': numpy.transpose},
cpl=True, context_cpl=ctx, output_names=['Z'])
onnx_code = onnx_fct('x', 'y', op_version=12)
onnx_g = onnx_code.to_onnx(inputs, target_opset=12)
print("ONNX model")
print(onnx_simple_text_plot(onnx_g))
oinf = OnnxInference(onnx_g)
res = oinf.run(inputs)
print('-----------')
print("ONNX inference:", res['Z'])
The function to be converted may include python functions
which must not be converted. In that case, their name
must be prefixed by ``py_``. The execution of the function
this one builds produces the following error::
TypeError: Parameter to MergeFrom() must be instance of same class:
expected onnx.TensorProto got onnx.AttributeProto.
It indicates that constants in the code marges multiple types,
usually floats and tensor of floats. Floats should be converted
using the following function::
def py_make_float_array(cst):
return numpy.array([cst], dtype=numpy.float32)
The function replaces empty contexts by default values which
covers many :epkg:`numpy` functions. The tutorial
:ref:`l-onnx-tutorial` gives an example of how it can be used
on a more complex function.
"""
def compile_code(name, code, context=None):
"""
Compiles a python function with the given
context.
@param name function name
@param code python code
@param context context used at compilation
@return compiled function
"""
if context is None:
context = {} # pragma: no cover
try:
obj = compile(code, "", "exec")
except SyntaxError as e: # pragma: no cover
raise SyntaxError(f"Unable to compile\n{code}") from e
context_g = context.copy()
context_l = context.copy()
exec(obj, context_g, context_l) # pylint: disable=W0122
return context_l[name]
if isinstance(fct, str):
code = fct
elif callable(fct):
code = inspect.getsource(fct)
else:
raise TypeError( # pragma: no cover
f"Unable to guess code from type {type(fct)}.")
node = ast.parse(dedent(code))
v = CodeNodeVisitor()
v.visit(node)
if context is None:
context = get_default_context()
onnx_code = v.export(context=context,
output_names=output_names)
if not cpl:
return onnx_code
if verbose > 0 and fLOG is not None: # pragma: no cover
fLOG('[translate_fct2onnx] python code')
fLOG(code)
fLOG('[translate_fct2onnx] ONNX code')
fLOG(onnx_code)
if context_cpl is None:
context_cpl = get_default_context_cpl()
if 'numpy' not in context_cpl:
context_cpl = context_cpl.copy()
context_cpl['numpy'] = numpy
return compile_code(fct.__name__, onnx_code, context_cpl)
|
3,541 | cef6b5ef2082dc5910806550d9a9c96357752baf | from unittest import TestCase, main as unittest_main, mock
from app import app
from bson.objectid import ObjectId
'''
dummy data to use in testing create, update, and delete routes
(U and D not yet made)
Inspiration taken from Playlister tutorial.
'''
sample_offer_id = ObjectId('5349b4ddd2781d08c09890f4')
sample_offer = {
'name': 'Muhammad Ali',
'offer': '4500',
'email': 'bogus@yahoo.com',
'location': 'Fort Worth, TX'
}
sample_form_data = {
'name': sample_offer['name'],
'offer': sample_offer['offer'],
'email': sample_offer['email'],
'location': sample_offer['location']
}
class HomelyTests(TestCase):
"""Flask tests."""
def setUp(self):
"""Get Flask test client."""
self.client = app.test_client()
# Show Flask errors that happen during tests
app.config['TESTING'] = True
def test_properties_index(self):
"""Test the properties homepage."""
result = self.client.get('/')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Welcome', result.data)
def test_offers_new(self):
"""Test the new offer creation page."""
result = self.client.get('/offers_new')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Make an Offer', result.data)
def test_offers_show_every(self):
"""Test showing the page of all offers."""
result = self.client.get('/offers_show_every')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Offers', result.data)
@mock.patch('pymongo.collection.Collection.insert_one')
def test_submit_offer(self, mock_insert):
"""Test submitting a new offer. Entry point for route
is called offers_show_all.
"""
result = self.client.post('offers_show', data=sample_form_data)
# After submitting, should redirect to the offers_show page.
self.assertEqual(result.status, '302 FOUND')
mock_insert.assert_called_with(sample_offer)
@mock.patch('pymongo.collection.Collection.find_one')
def test_show_offer(self, mock_find):
"""Test showing a single offer."""
mock_find.return_value = sample_offer
result = self.client.get(f'/offers/{sample_offer_id}')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Description', result.data)
@mock.patch('pymongo.collection.Collection.find_one')
def test_offers_edit(self, mock_find):
"""Test rendering of the edit offer form."""
mock_find.return_value = sample_offer
result = self.client.get(f'/offers/{sample_offer_id}/edit')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Edit This Offer', result.data)
@mock.patch('pymongo.collection.Collection.find_one')
def test_edit_offer(self, mock_find):
"""Test submitted an edited offer."""
mock_find.return_value = sample_offer
result = self.client.get(f'/offers/{sample_offer_id}')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Description', result.data)
@mock.patch('pymongo.collection.Collection.delete_one')
def test_offers_delete(self, mock_delete):
"""Test deletion of an offer."""
form_data = {'_method': 'DELETE'}
result = self.client.post(f'/offers/{sample_offer_id}/delete',
data=form_data)
self.assertEqual(result.status, '302 FOUND')
mock_delete.assert_called_with({'_id': sample_offer_id})
if __name__ == '__main__':
unittest_main()
|
3,542 | 67904f3a29b0288a24e702f9c3ee001ebc279748 | class ListNode:
def __init__(self, val: int, next=None):
self.val = val
self.next = next
def reverseKGroup(head: ListNode, k: int) -> ListNode:
prev, cur, rs, successor = None, head, head, None
def reverseK(node: ListNode, count: int) -> ListNode:
nonlocal successor
nonlocal prev
if count + 1 == k:
successor = node.next
return node
first = reverseK(node.next, count + 1)
node.next.next = node
node.next = successor
if prev: prev.next = first
return first
index = 1
while cur:
if index % k == 0:
sub_head = reverseK(rs, 0)
prev = rs
if index == k: head = sub_head
rs, cur = successor, successor
else:
cur = cur.next
index += 1
return head
def print_list(head: ListNode):
node = head
while node:
print(str(node.val) + '-->')
node = node.next
print('---end--')
if __name__ == '__main__':
five = ListNode(5)
four = ListNode(4, five)
three = ListNode(3, four)
two = ListNode(2, three)
one = ListNode(1, two)
# print_list(one)
reversed_node = reverseKGroup(one, 5)
print_list(reversed_node)
|
3,543 | 1810fee40ff8a99871ecc1d024f6794a68ee54e8 | from marshmallow import fields
from server.common.database import Media
from server.common.schema.ref import ma
class MediaSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Media
fields = ('id', 'name', 'mimetype', 'extension', 'owner', '_links')
dump_only = ('id', 'owner', '_links')
include_fk = True
id = fields.UUID()
owner = ma.auto_field('owner_id')
_links = ma.Hyperlinks({
'self': ma.URLFor('media', values={'media_id': '<id>'}),
'collection': ma.URLFor('medias'),
'image': ma.URLFor('media_file', values={'media_id': '<id>'}),
'thumbnail': ma.URLFor('media_file', values={'media_id': '<id>', 'thumb': ''}),
'owner': ma.URLFor('user', values={'user_id': '<owner>'})
})
Media.__marshmallow__ = MediaSchema
|
3,544 | 49c3c3b8c4b097f520456736e31ac306a9f73ac7 |
class Virus:
def __init__(self, _name, _age, _malignancy):
self.name = _name
self.age = _age
self.malignancy = _malignancy
def set_name(self, _name):
self.name = _name
def set_age(self, _age):
self.age = _age
def set_malignancy(self, _malignancy):
self.malignancy = _malignancy
def update(self):
self.age += 1
if self.age % 3 == 0:
self.malignancy += 1
if self.malignancy < 0:
self.malignancy = 0
if self.malignancy > 99:
self.malignancy = 99
def __str__(self):
return "Nama: {}; Usia: {}; Tingkat Keganasan: {}".format(self.name, str(self.age), str(self.malignancy))
if __name__ == "__main__":
tmp = input().split()
number_of_virus = int(tmp[0])
number_of_day = int(tmp[1])
viruses = []
for index_of_virus in range(0, number_of_virus):
tmp = input().split()
virus_name = tmp[0]
virus_age = int(tmp[1])
virus_malignancy = int(tmp[2])
tmp_virus = Virus(virus_name, virus_age, virus_malignancy)
viruses.append(tmp_virus)
for day in range(1, number_of_day + 1):
print("Hari #{}".format(str(day)))
for index_of_virus in range(0, len(viruses)):
viruses[index_of_virus].update()
print(viruses[index_of_virus])
|
3,545 | aec5280869a780bbd93ef24b659d9959f7b81426 | import imp
from django.shortcuts import render
# ***************** API ****************
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser,FileUploadParser,MultiPartParser,FormParser
from .models import *
from django.http import Http404
from .serializers import *
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status,viewsets,permissions
from rest_framework import generics
from rest_framework.permissions import AllowAny, IsAuthenticated
from django.contrib.auth import get_user_model
from client.models import ClientModel
from adminapp.models import SchoolModel
from adminapp.serializers import SchoolSerializer
from .custompermissions import *
from client.permissions import *
from rest_framework.authentication import SessionAuthentication
from Student.permissions import IsStudent
User = get_user_model()
def get_user_from_token(request):
token = request.user.auth_token #auth key(token) of current user 91391f4c12b94b753d08008150d2315d9d8d7e1e
print("token.user_id",token.user_id) #gives id of user (pk) 2
user = User.objects.get(id=token.user_id) #gives user name
return user
# Create your views here.
# class UserListView(generics.ListAPIView):
# parser_classes = (MultiPartParser,FormParser)
# queryset = UserModel.objects.all()
# serializer_class = UserSerializer
# class UserDetailView(generics.RetrieveAPIView):
# parser_classes = (MultiPartParser,FormParser)
# queryset = UserModel.objects.all()
# serializer_class = UserSerializer
class AddArticleView(generics.CreateAPIView):
#All authenticated users can add articles
permission_classes = (IsAuthenticated, )
serializer_class = ArticleSerializer
queryset = ArticleModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ListArticleView(generics.ListAPIView):
#Anyone can see the published Articles
permission_classes = (AllowAny, )
serializer_class = ArticleSerializer
queryset = ArticleModel.objects.filter(status__exact="P")
class ArticleDetail(generics.RetrieveAPIView):
#anyone can see detail of published article
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = ArticleSerializer
queryset = ArticleModel.objects.filter(status__exact="P")
class ArticleDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):
'''
Get: superadmin can see all articles (draft, published)
PATCH : superadmin can mark article as published by changing status = P
Delete: superadmin can delete article.
'''
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = UpdateArticleSerializer
queryset = ArticleModel.objects.all()
class AddQuestions(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = QuestionSerializer
queryset = QuestionModel.objects.all()
class ViewQuestion(generics.ListAPIView):
permission_classes = (IsClient, )
serializer_class = QuestionSerializer
queryset = QuestionModel.objects.all()
class QuestionDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsClient, )
serializer_class = QuestionSerializer
queryset = QuestionModel.objects.all()
class QuestionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = QuestionSerializer
queryset = QuestionModel.objects.all()
class AddSchools(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class ViewSchool(generics.ListAPIView):
permission_classes = (IsClient, )
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class SchoolDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsClient, )
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class SchoolDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = SchoolSerializer
queryset = SchoolModel.objects.all()
class AddBlogs(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class ViewBlog(generics.ListAPIView):
permission_classes = (IsClient, )
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class BlogDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsClient, )
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class BlogDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = BlogSerializer
queryset = BlogModel.objects.all()
class AddEventView(generics.CreateAPIView):
#only super user can add events
permission_classes = (IsSuperUser, )
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class ListEventView(generics.ListAPIView):
#Anyone can see the events
permission_classes = (AllowAny, )
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class EventDetailView(generics.RetrieveAPIView):
#Anyone can see the detail of events
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class EventDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
#only superadmin can delete and update events
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = EventSerializer
queryset = EventModel.objects.all()
class AddBusinessPartners(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class ViewBusinessPartner(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class BusinessPartnerDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class BusinessPartnerDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = BusinessPartnersSerializer
queryset = BusinessPartnersModel.objects.all()
class AddKidStory(generics.CreateAPIView):
#Students can add kidstory
permission_classes = (IsStudent, )
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewKidStory(generics.ListAPIView):
# anyone can see published kids story
permission_classes = (AllowAny, )
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.filter(status__exact="P")
class KidStoryDetailView(generics.RetrieveAPIView):
#anyone can see detail of published kids story
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = KidStorySerializer
queryset = KidStoryModel.objects.filter(status__exact="P")
class KidStoryDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
'''
Get: superadmin can see all stories (draft, published)
PATCH : superadmin can mark stories as published by changing status = P
Delete: superadmin can delete stories.
'''
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = UpdateKidsStorySerializer
queryset = KidStoryModel.objects.all()
class AddKidTalent(generics.CreateAPIView):
#Students or client can add KidsTalent
permission_classes = (IsStudentORClient, )
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.all()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewKidTalent(generics.ListAPIView):
# anyone can see published kids talent
permission_classes = (AllowAny, )
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.filter(status__exact="P")
class KidTalentDetailView(generics.RetrieveAPIView):
#anyone can see detail of published kids talent
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = KidTalentSerializer
queryset = KidTalentModel.objects.filter(status__exact="P")
class KidTalentDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
'''
Get: superadmin can see all kids talent (draft, published)
PATCH : superadmin can mark kids talent as published by changing status = P
Delete: superadmin can delete kids talent.
'''
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = UpdateKidsTalentSerializer
queryset = KidTalentModel.objects.all()
class AddCourses(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class ViewCourse(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class CourseDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class CourseDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = CourseSerializer
queryset = CourseModel.objects.all()
class AddQuizContext(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class ViewQuizContext(generics.ListAPIView):
permission_classes = (IsClient, )
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class QuizContextDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsClient, )
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class QuizContextDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = QuizContextSerializer
queryset = QuizContextModel.objects.all()
class AddFeedback(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class ViewFeedback(generics.ListAPIView):
permission_classes = (IsClient, )
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class FeedbackDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsClient, )
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class FeedbackDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = ClientFeedbackSerializer
queryset = ClientFeedBackModel.objects.all()
class AddWebsiteAd(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class ViewWebsiteAd(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class WebsiteAdDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
class WebsiteAdDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = WebsiteAdSerializer
queryset = WebsiteAdModel.objects.all()
# class AddApproval(generics.CreateAPIView):
# permission_classes = (IsSuperUser, )
# serializer_class = ApprovalSerializer
# queryset = ApprovalModel.objects.all()
# class ViewApproval(generics.ListAPIView):
# permission_classes = (IsClient, )
# serializer_class = ApprovalSerializer
# queryset = ApprovalModel.objects.all()
# class ApprovalDetailView(generics.RetrieveAPIView):
# lookup_field = 'slug'
# permission_classes = (IsClient, )
# serializer_class = ApprovalSerializer
# queryset = ApprovalModel.objects.all()
# class ApprovalDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
# lookup_field = 'slug'
# permission_classes = (IsSuperUser, )
# serializer_class = ApprovalSerializer
# queryset = ApprovalModel.objects.all()
class AddBusinessPromotion(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class ViewBusinessPromotion(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class BusinessPromotionDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class BusinessPromotionDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = BusinessPromotionSerializer
queryset = BusinessPromotionModel.objects.all()
class AddTeam(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class ViewTeam(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class TeamDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class TeamDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = TeamSerializer
queryset = TeamModel.objects.all()
class AddAdvisoryBoard(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class ViewAdvisoryBoard(generics.ListAPIView):
permission_classes = (IsSuperUser, )
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AdvisoryBoardDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AdvisoryBoardDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = AdvisoryBoardSerializer
queryset = AdvisoryBoardModel.objects.all()
class AddAnnouncement(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class ListAnnouncement(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class AnnouncementDetail(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class AnnouncementDeleteUpdate(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = AnnouncementSerializer
queryset = AnnouncementModel.objects.all()
class SuperadminProfileView(APIView):
permission_classes = (IsSuperUser, )
def get(self, request, *args, **kwargs):
user = get_user_from_token(request)
data = {
'name': user.username,
'email': user.email
}
return Response(data)
class AddJobClassified(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class ViewJobClassified(generics.ListAPIView):
permission_classes = (AllowAny, )
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class JobClassifiedDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (AllowAny, )
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class JobClassifiedDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = JobClassifiedSerializer
queryset = JobClassifiedModel.objects.all()
class AddCustomerReviews(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class ViewCustomerReview(generics.ListAPIView):
permission_classes = (IsClient, )
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class CustomerReviewDetailView(generics.RetrieveAPIView):
lookup_field = 'slug'
permission_classes = (IsClient, )
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class CustomerReviewDeleteUpdateView(generics.RetrieveUpdateDestroyAPIView):
lookup_field = 'slug'
permission_classes = (IsSuperUser, )
serializer_class = CustomerReviewSerializer
queryset = CustomerReviewModel.objects.all()
class ClientComplain(APIView):
permission_classes = (IsSuperUser, )
serializer = ViewComplainSerializer(many=True)
class clientfeedback(APIView):
permission_classes = (IsSuperUser, )
def get(self, request, format=None):
feeds = ClientFeedBackModel.objects.filter(
Class__admin = self.request.user
)
serializer = ClientFeedbackSerializer(feeds, many=True)
return Response(serializer.data)
class Enroll_Course(APIView):
permission_classes = (IsSuperUser, )
def post(self, request, format=None):
serializer = EnrollCourseSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data,status =status.HTTP_201_CREATED)
else:
return Response(serializer.errors,status =status.HTTP_400_BAD_REQUEST)
class ViewEnroll_Course(APIView):
permission_classes = (IsSuperUser, )
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Enroll_CourseModel.objects.filter(
course = course, client = client
)
serializer = ViewEnrollCourseSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class DetailEnroll_CourseView(APIView):
permission_classes = (IsSuperUser, )
def get_object(self,pk):
try:
return Enroll_CourseModel.objects.get(id=pk)
except:
raise Http404
def get(self, request, pk, format=None):
data = self.get_object(pk)
serializer = ViewEnrollCourseSerializer(data)
return Response(serializer.data)
def put(self,request,pk,format=None):
data = self.get_object(pk)
serializer = ViewEnrollCourseSerializer(data,data = request.data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self,request,pk,format=None):
data = self.get_object(pk)
data.delete()
return Response(status = status.HTTP_204_NO_CONTENT)
class CourseDetail(APIView):
permission_classes = (IsSuperUser, )
def get_object(self, slug):
try:
return CourseModel.objects.get(slug=slug)
except CourseModel.DoesNotExist:
raise Http404
def get(self, request, slug, format=None):
data = self.get_object(slug)
if data.classes.school.admin == self.request.user:
serializer = ViewCourseSerializer(data)
return Response(serializer.data)
else:
return Response(
{'message':'This course does not belong to your school'},
status=status.HTTP_400_BAD_REQUEST
)
def put(self,request,slug,format=None):
data = self.get_object(slug)
if data.course.client.admin == self.request.user:
serializer = CourseSerializer(data,data = request.data)
if serializer.is_valid(raise_exception=True):
course = serializer.validated_data.get('course', '')
if course.client.admin == self.request.user:
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
return Response(
{'message':'This Class does not belong to you'},
status=status.HTTP_400_BAD_REQUEST
)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
return Response(
{'message':'This course does not belong to you'},
status=status.HTTP_400_BAD_REQUEST
)
def delete(self,request,slug,format=None):
data = self.get_object(slug)
if data.course.client.admin == self.request.user:
data.delete()
return Response(status = status.HTTP_204_NO_CONTENT)
else:
return Response(
{'message':'This course does not belong to you'},
status=status.HTTP_400_BAD_REQUEST
)
class SchoolRegistrationView(RegisterView):
serializer_class = RegisterSchoolSerializer
permission_classes = (IsSuperUser,)
class Add_question(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
def post(self,request,format=None):
serializer = QuestionSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data,status =status.HTTP_201_CREATED)
else:
return Response(serializer.errors,status =status.HTTP_400_BAD_REQUEST)
class Viewquestion(generics.ListAPIView):
permission_classes = (IsSuperUser, )
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
data = QuestionModel.objects.filter(
course_id = course)
serializer = QuestionSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class QuestionDetail(APIView):
permission_classes = (IsSuperUser, )
def get_object(self,pk):
try:
return QuestionModel.objects.get(id=pk)
except:
raise Http404
def get(self,request,pk,format=None):
data = self.get_object(pk)
serializer = QuestionSerializer(data)
return Response(serializer.data)
def put(self,request,pk,format=None):
data = self.get_object(pk)
serializer = QuestionSerializer(data,data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self,request,pk,format=None):
data = self.get_object(pk)
data.delete()
return Response(status = status.HTTP_204_NO_CONTENT)
class SubmittedQuestionView(APIView):
permission_classes = (IsSuperUser, )
def get(self, request, *args, **kwargs):
admin = self.request.user
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Client_SubmitquestionModel.objects.filter(
course__course = course,
client__client = client
)
serializer = Client_submittedquestionSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class AddonlineTest(generics.CreateAPIView):
permission_classes = (IsSuperUser, )
def post(self, request, format=None):
serializer = testSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
course = serializer.validated_data.get('course', '')
serializer.save()
return Response(serializer.data,status =status.HTTP_201_CREATED)
else:
return Response(serializer.errors,status =status.HTTP_400_BAD_REQUEST)
class ViewOnlinetest(generics.ListAPIView):
permission_classes = (IsSuperUser, )
def get(self, request, *args, **kwargs):
course = self.kwargs['course_id']
data = Client_testModel.objects.filter(
course_id = course)
serializer = testSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class onlinetestDetail(APIView):
permission_classes = (IsSuperUser, )
def get_object(self,pk):
try:
return Client_testModel.objects.get(id=pk)
except:
raise Http404
def get(self,request,pk,format=None):
data = self.get_object(pk)
serializer = testSerializer(data)
return Response(serializer.data)
def put(self,request,pk,format=None):
data = self.get_object(pk)
serializer = testSerializer(data,data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self,request,pk,format=None):
data = self.get_object(pk)
data.delete()
return Response(status = status.HTTP_204_NO_CONTENT)
class SubmittedonlineTestView(APIView):
permission_classes = (IsSuperUser, )
def get(self, request, *args, **kwargs):
admin = self.request.user
course = self.kwargs['course_id']
client = self.kwargs['client_id']
data = Client_SubmittestModel.objects.filter(
course__course = course,
client__client = client
)
serializer = Client_submittedtestSerializer(data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK) |
3,546 | 7b35a7f28c11be15fe2ac8d6eae4067ac5379f3e | def test(a):
"""
This function return square of number
"""
return (a**2)
print(test(2))
help(test)
test.__doc__
|
3,547 | f66f82c5c2842fc4fcae2251d4a16a9850230041 | # Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from django.template import Context, loader
from django.db import transaction
from django.db.models import Q
from maximus.models import Mercenary, Team, TeamMember, Tournament, TournamentTeam, TournamentMatchup, Matchup, MatchupStatistics, MatchResult
def index(request):
model = Context({})
t = loader.get_template('index.html')
return HttpResponse(t.render(model))
def create_team(request):
def get():
heroes = Mercenary.objects.filter(type='HERO')
pawns = Mercenary.objects.filter(type='PAWN')
model = Context({ 'heroes': heroes, 'pawns': pawns, 'mercrange': range(1,7), 'teams': get_team_groups() })
t = loader.get_template('teams.html')
return HttpResponse(t.render(model))
def post():
team = Team()
class_c = request.POST['hero']
leader = Mercenary.objects.filter(type='HERO').filter(name=class_c)
team.leader = leader[0]
team.wins = 0
team.losses = 0
team.notes = ""
team.save()
for i in range(1,10):
who = request.POST['pawn%s' % i]
if who != '':
merc = Mercenary.objects.filter(type='PAWN').filter(name=who)
current = TeamMember()
current.team = team
current.merc = merc[0]
current.location = i
current.save()
return HttpResponseRedirect('/app/teams')
if request.method == "POST":
return post()
else:
return get()
def edit_team(request):
def get():
team_id = request.GET["team"]
team = Team.objects.get(id=team_id)
model = Context({ 'team': team })
t = loader.get_template('edit_team.html')
return HttpResponse(t.render(model))
def post():
new_notes = request.POST["notes"]
team_id = request.POST["team"]
team = Team.objects.get(id=team_id)
team.notes = new_notes
team.save()
return HttpResponseRedirect('/app/teams/edit?team=%s' % team_id)
if request.method == "POST":
return post()
else:
return get()
def create_tournament(request):
def get():
inprogress = Tournament.objects.filter(completed=False);
finished = Tournament.objects.filter(completed=True);
model = Context({ 'teams': get_team_groups(), "in_progress": inprogress, "finished": finished })
t = loader.get_template('tournament/create_tournament.html')
return HttpResponse(t.render(model))
@transaction.commit_on_success
def post():
tournament = Tournament()
tournament.completed = False
tournament.save()
for team_id in request.POST.getlist('participant'):
if team_id != "":
team = Team.objects.get(id=team_id)
tourney_team = TournamentTeam()
tourney_team.tournament = tournament
tourney_team.team = team
tourney_team.save()
return HttpResponseRedirect('/app/tournament/matchups?tournament=%s' % str(tournament.id))
if request.method == "POST":
return post()
else:
return get()
def view_tournament(request):
def get():
tourney = Tournament.objects.get(id=request.GET["tournament"])
pending_teams = []
teams = []
for team in tourney.tourney_team_set.all():
if team.matchup_index == None:
pending_teams.append(team.team)
else:
teams.append(team.team)
matches = [[i for i in range(0,4)],[i for i in range(0,2)],[0]]
for match in tourney.tourney_match_set.all():
matches[match.round][match.index] = match
model = Context({ "pending_teams": pending_teams, "teams": teams, "matches": matches, "tourney": tourney})
t = loader.get_template('tournament/view_tournament.html')
return HttpResponse(t.render(model))
@transaction.commit_on_success
def post():
tourney_id = request.GET["tournament"]
tourney = Tournament.objects.get(id=tourney_id)
versus = request.POST.getlist("versus")
teams = []
for team_id in versus:
if team_id != "":
teams.append(Team.objects.get(id=team_id))
existing_matches = TournamentMatchup.objects.filter(tournament=tourney)
match = Matchup()
match.team1 = teams[0]
match.team2 = teams[1]
match.save()
tourney_match = TournamentMatchup()
tourney_match.tournament = tourney
tourney_match.matchup = match
tourney_match.round = 0
tourney_match.index = existing_matches.count()
tourney_match.save()
tourney_teams = []
tourney_teams.append(TournamentTeam.objects.filter(tournament=tourney).filter(team=teams[0]).get())
tourney_teams.append(TournamentTeam.objects.filter(tournament=tourney).filter(team=teams[1]).get())
tourney_teams[0].matchup_index = tourney_match.index * 2
tourney_teams[1].matchup_index = tourney_match.index * 2 + 1
tourney_teams[0].save();
tourney_teams[1].save();
return HttpResponseRedirect("/app/tournament/matchups?tournament=%s" % tourney_id)
if request.method == "POST":
return post()
else:
return get()
def result_tournament(request):
@transaction.commit_on_success
def post():
tournament_match_id = request.GET['tournament_match_key']
match = TournamentMatchup.objects.get(id=tournament_match_id)
winner_id = int(request.POST['winner'])
matchup = match.matchup
result = MatchResult()
if winner_id == matchup.team1.id:
result.winner = matchup.team1
result.loser = matchup.team2
elif winner_id == matchup.team2.id:
result.winner = matchup.team2
result.loser = matchup.team1
else:
raise Exception("could not determine winner key: %s (%s, %s)" % (winner_id, matchup.team1.id, matchup.team2.id))
update_stats(result.winner, result.loser)
result.save()
next_round_indices = {0:0, 1:0, 2:1, 3:1}
next_round_index = next_round_indices[match.index]
next_round = match.round + 1
if match.round < 2:
# look in existing matches for this winner's opponent
existing = TournamentMatchup.objects.filter(tournament=match.tournament).filter(round=next_round).filter(index=next_round_index)
if existing.count() == 1:
next_match = existing[0]
next_matchup = next_match.matchup
next_matchup.team2 = result.winner
next_matchup.save()
elif existing.count() == 0:
next_match = TournamentMatchup()
next_matchup = Matchup()
next_matchup.team1 = result.winner
next_matchup.save()
next_match.tournament = match.tournament
next_match.round = next_round
next_match.index = next_round_index
next_match.matchup = next_matchup
next_match.save()
else:
tourney = match.tournament
tourney.completed = True
tourney.winner = result.winner
tourney.save()
match.matchup.delete()
match.matchup = None
match.result = result
match.save()
return HttpResponseRedirect("/app/tournament/matchups?tournament=%s" % match.tournament.id)
if request.method == "POST":
return post()
else:
return HttpResponseRedirect("/app/tournament/matchups?tournament=%s" % request.GET["tournament"])
def result_detail(request):
result_id = request.GET['match']
match = MatchResult.objects.get(id=result_id)
model = Context({ 'match': match })
t = loader.get_template('result_detail.html')
return HttpResponse(t.render(model))
def get_team_groups():
teams = Team.objects.all()
team_groups = { }
for team in teams:
if not team.leader in team_groups:
team_groups[team.leader] = []
team_groups[team.leader].append(team)
team_groups = [sorted(team_groups[k], lambda x,y: cmp(x.id, y.id)) for k in sorted(team_groups.keys(), lambda x,y: cmp(x.name, y.name))]
return team_groups
def update_stats(winner, loser):
existing = MatchupStatistics.objects.filter(Q(team1__in=[winner.id, loser.id]) & Q(team2__in=[winner.id, loser.id]))
stats = None
if existing.count() == 0:
newStats = MatchupStatistics()
newStats.team1 = winner
newStats.team2 = loser
newStats.team1_wins = 1
newStats.team2_wins = 0
winner.wins = winner.wins + 1
loser.losses = loser.losses + 1
newStats.save()
winner.save()
loser.save()
return (1, 0)
elif existing.count() == 1:
oldStats = existing.fetch(1)[0]
if oldStats.team1.id == winner.id:
oldStats.team1_wins = oldStats.team1_wins + 1
else:
oldStats.team2_wins = oldStats.team2_wins + 1
winner.wins = winner.wins + 1
loser.losses = loser.losses + 1
oldStats.save()
winner.save()
loser.save()
return (0, 1)
else:
logging.error("unexpected state: %s matchup statistics for the same team pair (expected 1)" % existing.count())
return (0, 0)
|
3,548 | 16a95573c4fccc10bdc5e37b307d0c85714b328c | import PyInstaller.__main__
import os
import shutil
# Paths
basePath = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir))
srcPath = os.path.join(basePath, 'src')
outPath = os.path.join(basePath, 'out')
workPath = os.path.join(outPath, 'work')
# Bundle
PyInstaller.__main__.run([
'--clean',
'--onefile',
'--workpath', workPath,
'--distpath', outPath,
'--hidden-import', 'win32timezone',
os.path.join(srcPath, 'service.py'),
os.path.join(srcPath, 'bridge.py'),
])
# Copy config files
shutil.copy2(os.path.join(srcPath, 'bridge.cfg'), outPath)
shutil.copy2(os.path.join(srcPath, 'groups.cfg'), outPath)
# Remove build artifacts
shutil.rmtree(workPath) |
3,549 | f6cebf6ec848a06f81c4e1f584ebb83f4d9ff47c | # -*- coding: utf-8 -*-
'''
Created on Dec 22, 2014
@author: Alan Tai
'''
from handlers.handler_webapp2_extra_auth import BaseHandler
from models.models_porn_info import WebLinkRoot, WebLinkPornTemp, WebLinkPorn,\
Tag
from dictionaries.dict_key_value_pairs import KeyValuePairsGeneral
from bs4 import BeautifulSoup
import webapp2, logging, re, urllib2, urlparse
from datetime import datetime
#
dict_general = KeyValuePairsGeneral()
class TaskCrawlRootLinksDispatcher(BaseHandler):
def get(self):
self._read_feed()
def _read_feed(self):
""" crawling task """
# temp root links
root_list_temp = dict_general.default_urls
# construct search list
search_list = []
query_root_entities = WebLinkRoot.query()
if query_root_entities.count() > 0:
for entity in query_root_entities:
search_list.append({"title" : entity.title , "link" : entity.link})
else:
search_list = root_list_temp
# start to crawl
list_found_link = []
while len(search_list) > 0:
link = search_list.pop(0)["link"]
parsed_str = urlparse.urlsplit(link)
link_base = "{url_scheme}://{url_netloc}".format(url_scheme = parsed_str.scheme, url_netloc = parsed_str.netloc)
try:
req = urllib2.Request(link)
response = urllib2.urlopen(req) # need to add new mechanism to prevent fetch javascript
searched_page = response.read()
soup = BeautifulSoup(searched_page)
for found_link in soup.find_all('a'):
if found_link.get('href'):
match_group = re.match("http", found_link.get('href'), re.I)
full_href = ""
title = "NA"
if not match_group:
full_href = "{href_link_base}{sub_href}".format(href_link_base = link_base, sub_href = found_link.get('href'))
else:
full_href = found_link.get('href')
if found_link.contents and len(found_link.contents) > 0 and found_link.contents[0].string:
title = found_link.contents[0].string
list_found_link.append({'title' : title, 'link' : full_href})
except urllib2.HTTPError, err:
pass
# store result into db
while len(list_found_link) > 0:
new_link = list_found_link.pop(0)
query = WebLinkPornTemp.query(WebLinkPornTemp.link == new_link['link'])
if query.count() == 0:
new_info = WebLinkPornTemp()
new_info.link = new_link['link']
new_info.title = new_link['title']
new_info.put()
# crawl temp links
class TaskCrawlTempLinksDispatcher(BaseHandler):
def get(self):
# fetch entities from db
entities = WebLinkPornTemp.query().fetch(15)
search_list = []
if entities:
for entity in entities:
search_list.append({'title' : entity.title, 'link' : entity.link})
entity.key.delete()
else:
search_list = dict_general.default_urls
# crawl website
list_found_link = []
while len(search_list) > 0:
link = search_list.pop(0)['link']
parsed_str = urlparse.urlsplit(link)
link_base = "{url_scheme}://{url_netloc}".format(url_scheme = parsed_str.scheme, url_netloc = parsed_str.netloc)
try:
req = urllib2.Request(link)
response = urllib2.urlopen(req) # need to add new mechanism to prevent fetch javascript
searched_page = response.read()
soup = BeautifulSoup(searched_page)
for found_link in soup.find_all('a'):
if found_link.get('href'):
match_group = re.match("http", found_link.get('href'), re.I)
full_href = ""
title = "NA"
if not match_group:
full_href = "{href_link_base}{sub_href}".format(href_link_base = link_base, sub_href = found_link.get('href'))
else:
full_href = found_link.get('href')
if found_link.contents and len(found_link.contents) > 0 and found_link.contents[0].string:
title = found_link.contents[0].string
list_found_link.append({'title' : title, 'link' : full_href})
except urllib2.HTTPError, err:
pass
# store result into db
while len(list_found_link) > 0:
new_link = list_found_link.pop(0)
query = WebLinkPornTemp.query(WebLinkPornTemp.link == new_link['link'])
if query.count() == 0:
new_info = WebLinkPornTemp()
new_info.link = new_link['link']
new_info.title = new_link['title']
new_info.put()
# categorize wine info
class TaskCategorizePornInfoDispatcher(BaseHandler):
def get(self):
""" cron task """
self._categorize()
def _categorize(self):
""" categorize wine info """
entities = WebLinkPornTemp.query().fetch(50) # to avoid running datastore free quota limit
for entity in entities:
result = re.findall(r"video\d+|redtube\.com\d+|videos\d+|watch\d+|viewkey=\d+", entity.link, re.I) # sku ; BuyWine/Item ; bwe
query = WebLinkPorn.query(WebLinkPorn.link == entity.link)
if result and query.count() == 0:
new_wine_info = WebLinkPorn()
new_wine_info.link = entity.link
new_wine_info.title = entity.title
new_wine_info.put()
class TaskCrawlTagInfo(BaseHandler):
def get(self):
base_url = 'http://www.xvideos.com/tags/'
req = urllib2.Request(base_url)
response = urllib2.urlopen(req) # need to add new mechanism to prevent fetch javascript
searched_page = response.read()
soup = BeautifulSoup(searched_page)
for found_link in soup.find_all('a'):
try:
if found_link.get('href'):
match_group = re.match("/tags/.*", found_link.get('href'), re.I)
if match_group:
tag_name = found_link.get('href')[found_link.get('href').rfind('/') + 1:]
tag_number = str(found_link.nextSibling).strip()
tag_info = Tag( site = 'Xvideos',
name = tag_name,
number = tag_number,
created_datetime = datetime.now())
tag_info.put()
except:
pass
# configuration
config = dict_general.config_setting
# app
app = webapp2.WSGIApplication([
webapp2.Route(r'/cron_tasks/crawl_root_links', TaskCrawlRootLinksDispatcher, name = 'crawl_root_links'),
webapp2.Route(r'/cron_tasks/crawl_temp_links', TaskCrawlTempLinksDispatcher, name = 'crawl_temp_links'),
webapp2.Route(r'/cron_tasks/categorize_porn_info', TaskCategorizePornInfoDispatcher, name = "categorize_wine_info"),
webapp2.Route(r'/cron_tasks/crawl_tag_info', TaskCrawlTagInfo, name = 'crawl_tag_info')
], debug=True, config=config)
# log
logging.getLogger().setLevel(logging.DEBUG) |
3,550 | b33af7aff0f3fde6499d5e24fc036d5bd74b6e47 | rom diseas import Disease
from parse import analyzing
from config import FILE_NAME
from random import randint
if __name__ == '__main__':
"""
Main module that runs the program.
"""
def working_with_user(disea):
print('Choose what you want to know about that disease:\naverage_value(will return the average value\
of deaths for the certain period of time)\naverage_changing(will return the average annual changing for the death rate)\n\
graphic(will show you a plot for the death rates)\n\
predicting(will make a prediction for the year, that you type)\n\
min_value and max_value')
new1_command = input()
if new1_command in ['average_value', 'average_changing', 'max_value', 'min_value']:
print(eval(f'Disease(disea).{new1_command}()'))
elif new1_command == 'graphic':
value1 = input("Do you want to have the prediction on your graphic?\
Type 2018 in this case. Otherwise type nothing\n")
Disease(disea).graphic(int(value1))
elif new1_command == 'predicting':
value1 = input("Type the year, which value have to be predicted(int bigger than 2018)")
Disease(disea).graphic(value1)
else:
print('Something went wrong')
while True:
print('Hello, now you are using the program, that can acknowledge you with data about death rates')
print('Here you can use following commands:\nshow - to show the list of the death causes\n\
leave - to go out of the program')
command = input()
if command == 'show':
for index, illness in enumerate(analyzing(FILE_NAME).keys()):
print(index, illness)
new_command = input("Now, choose the number of the disease or type randomly\
if you don't want to read a lot\n")
if new_command == 'randomly':
value = randint(0, 55)
for index1, illness1 in enumerate(analyzing(FILE_NAME).keys()):
if index1 == value:
print(illness1)
working_with_user(illness1)
elif '0' <= new_command <= '55':
for index2, illness2 in enumerate(analyzing(FILE_NAME).keys()):
if index2 == int(new_command):
working_with_user(illness2)
elif command == 'leave':
break
|
3,551 | a19b4928c9423dae6c60f39dbc5af0673b433c8e | from flask_opencv_streamer.streamer import Streamer
import cv2
import numpy as np
MASK = np.array([
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]
])
port = 3030
require_login = False
streamer = Streamer(port, require_login)
video_capture = cv2.VideoCapture('http://149.43.156.105/mjpg/video.mjpg')
while True:
_, frame = video_capture.read()
frame = cv2.medianBlur(frame, 3)
frame = cv2.filter2D(frame, -1, MASK)
_, frame = cv2.threshold(frame, 10, 255, cv2.THRESH_BINARY_INV)
streamer.update_frame(frame)
if not streamer.is_streaming:
streamer.start_streaming()
# было в примере, но вроде и без этого работает
# cv2.waitKey(30) |
3,552 | d32496c9bce86f455b24cd9c6dc263aee1bf82af | import requests
from bs4 import BeautifulSoup
import json
import geojson
import re
import time
_apiKey = "SNgeI1tCT-oihjeZDGi6WqcM0a9QAttLhKTecPaaETQ"
def Geocode(address, apiKey):
URL = 'https://geocode.search.hereapi.com/v1/geocode'
# Параметры запроса
params = {
'q': address,
'apiKey': apiKey
}
import pdb; pdb.set_trace()
# Парсинг ответа в JSON формате
response = requests.get(URL, params=params).json()
item = response['items'][0]
address = item['address']
position = item['position']
result = {
'address': address['label'],
'lat': position['lat'],
'lng': position['lng'],
}
return result
if __name__ == "__main__":
address = "Украина, Александрия, Соборный проспект 98"
res = Geocode(address, _apiKey) |
3,553 | ab27780b19db6854855af51eea063f07d9eb7302 | import datetime
import subprocess
from time import sleep
from flask import render_template, redirect, request, url_for, flash, abort
from dirkules import app, db, scheduler, app_version
import dirkules.manager.serviceManager as servMan
import dirkules.manager.driveManager as driveMan
import dirkules.manager.cleaning as cleaningMan
from dirkules.models import Drive, Cleaning, Pool
import dirkules.manager.viewManager as viewManager
from dirkules.validation.validators import CleaningForm, PoolAddForm
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html', error=str(e))
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html', error=str(e))
@app.route('/', methods=['GET'])
def index():
return render_template('index.html', service=servMan.service_state())
@app.route('/drives', methods=['GET'])
def drives():
delete = request.args.get('delete')
if delete is not None:
try:
drive = driveMan.get_drive_by_id(int(delete))
driveMan.delete_drive(drive)
except ValueError:
abort(500, description="Expected int, but got {}.".format(delete))
except LookupError:
abort(500, description="Invalid drive id {}".format(delete))
return redirect(url_for('drives'))
return render_template('drives.html', drives=Drive.query.all())
@app.route('/pools', methods=['GET'])
def pools():
return render_template('pools.html', pools=Pool.query.all())
@app.route('/pool/<pool>', methods=['GET'])
def pool(pool):
db_pool = Pool.query.get(pool)
if db_pool is None:
abort(404, description="Pool with ID {} could not be found.".format(pool))
return render_template('pool.html', pool=db_pool)
@app.route('/pools/add', methods=['GET', 'POST'])
def add_pool():
form = PoolAddForm(request.form)
form.drives.choices = viewManager.get_empty_drives()
if request.method == 'POST' and form.validate():
try:
viewManager.create_btrfs_pool(form)
except subprocess.CalledProcessError as e:
abort(500, description="While creating a pool, the following exception occured: {}".format(e))
except subprocess.TimeoutExpired as e:
abort(500, description="Pool creation took too long: {}".format(e))
scheduler.get_job("refresh_disks").modify(next_run_time=datetime.datetime.now())
sleep(1)
return redirect(url_for('pools'))
return render_template('pool_add.html', form=form)
@app.route('/about', methods=['GET'])
def about():
return render_template('about.html', version=app_version)
@app.route('/partitions/<part>', methods=['GET'])
def partitions(part):
try:
drive = driveMan.get_drive_by_id(int(part))
except ValueError:
abort(500, description="Expected int, but got {}.".format(part))
except LookupError:
abort(500, description="Invalid drive id {}".format(part))
return render_template('partitions.html', parts=drive.partitions)
@app.route('/cleaning', methods=['GET'])
def cleaning():
remove = request.args.get('remove')
changestate = request.args.get('changestate')
service = request.args.get('service')
if not (remove is not None and changestate is not None):
if remove is not None:
try:
remove = int(remove)
Cleaning.query.filter(Cleaning.id == remove).delete()
db.session.commit()
return redirect(request.path, code=302)
except ValueError:
flash("Value Error: remove")
elif changestate is not None:
try:
changestate = int(changestate)
job = Cleaning.query.get(changestate)
if job.state == 0:
job.state = 1
else:
job.state = 0
db.session.commit()
return redirect(request.path, code=302)
except ValueError:
flash("Value Error: changestate")
else:
flash("Value Error: remove and changestate set")
if service is not None:
try:
service = str(service)
if service == "start":
if not cleaningMan.running():
cleaningMan.enable()
return redirect(request.path, code=302)
else:
flash("Error: Cleaning Service already running.")
elif service == "pause":
if cleaningMan.running():
cleaningMan.disable()
return redirect(request.path, code=302)
else:
flash("Error: Cleaning Service already paused.")
else:
raise ValueError
except ValueError:
flash("Value Error: service")
elements = Cleaning.query.order_by(db.asc(db.collate(Cleaning.name, 'NOCASE'))).all()
return render_template('cleaning.html', elements=elements, task_running=cleaningMan.running())
@app.route('/add_cleaning', methods=['GET', 'POST'])
def add_cleaning():
form = CleaningForm(request.form)
if request.method == 'POST' and form.validate():
viewManager.create_cleaning_obj(form.jobname.data, form.path.data, form.active.data)
return redirect(url_for('cleaning'))
return render_template('add_cleaning.html', form=form)
|
3,554 | 64a590d31be98f7639034662b2a322e5572cc1ae | # coding=utf-8
# flake8:noqa
from .string_helper import (
camelize, uncamelize,
camelize_for_dict_key, camelize_for_dict_key_in_list,
uncamelize_for_dict_key, uncamelize_for_dict_key_in_list
)
from .datetime_helper import datetime_format
from .class_helper import override
from .paginate import paginate2dict
from .json_type import JsonType
from .request import RequestDict
from .response import ResponseJson
from .api_helper import gen_links, gen_pagination, sort_list
from .api_helper import eliminate_key, remain_key
|
3,555 | 30d75aafd9612ac02557b947fc4e3c2f7322a7fd | import math
getal1 = 5
getal2 = 7
getal3 = 8
getal4 = -4
getal5 = 2
print(getal1*getal2+getal3)
print(getal1*(getal2+getal3))
print(getal2+getal3/getal1)
print((getal2+getal3)/getal1)
print(getal2+getal3%getal1)
print(abs(getal4*getal1))
print(pow(getal3,getal5))
print(round(getal5/getal2,2))
print(max(getal1,getal2,getal3,getal4,getal5))
print(min(getal1,getal2,getal3,getal4,getal5))
print(math.sqrt(getal5*getal3))
|
3,556 | 0aa419b0045914b066fbec457c918d83276f2583 | from matplotlib import pyplot as plt
from read_and_calculate_speed import get_info_from_mongodb
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.family'] = 'sans-serif'
def mat_line(speed_time_info, interface, direction, last_time):
# 调节图形大小,宽,高
fig = plt.figure(figsize=(6, 6))
# 一共一行,每行一图,第一图
ax = fig.add_subplot(111)
# 处理X轴时间格式
import matplotlib.dates as mdate
# 设置时间标签显示格式
# ax.xaxis.set_major_formatter(mdate.DateFormatter('%Y-%m-%d %H:%M:%S'))
ax.xaxis.set_major_formatter(mdate.DateFormatter('%H:%M:%S'))
# 处理Y轴百分比格式
import matplotlib.ticker as mtick
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
# ax.set_ylim(0, 100) # 控制Y轴的取值范围
# 把cpu_usage_list的数据,拆分为x轴的时间,与y轴的利用率
x = []
y = []
for time, speed in speed_time_info:
x.append(time)
y.append(speed)
# 添加主题和注释
plt.title('路由器' + interface + '接口,' + direction + '方向,' + str(last_time) + '分钟速率')
plt.xlabel('采集时间')
plt.ylabel('速率kbps')
# 当x轴太拥挤的时候可以让他自适应
fig.autofmt_xdate()
# 实线红色
ax.plot(x, y, linestyle='solid', color='r', label='R1')
# 虚线黑色
# ax.plot(x, y, linestyle='dashed', color='b', label='R1')
# 如果你有两套数据,完全可以在一幅图中绘制双线
# ax.plot(x2, y2, linestyle='dashed', color='b', label='R1')
# 设置说明的位置
ax.legend(loc='upper left')
# 绘制图形
plt.show()
if __name__ == '__main__':
list_info = ['GigabitEthernet1', 'out', 2]
# 获取数据库两分钟内的信息
time_recode, speed = get_info_from_mongodb(*list_info)
speed_time_info = list(zip(time_recode, speed))
# 绘图
mat_line(speed_time_info, list_info[0], list_info[1], list_info[2])
|
3,557 | 9c277030ef384d60e62c2c48e38a1271a43826d6 | __author__ = 'dongdaqing'
import threading,time
class MyThread(threading.Thread):
def __init__(self, name=None):
threading.Thread.__init__(self)
self.name = name
def run(self):
print time.strftime('%Y-%m-%d %H-%M-%S',time.localtime())
print self.name
def test():
for i in range(0, 100):
t = MyThread("thread_" + str(i))
t.start()
if __name__=='__main__':
test() |
3,558 | f49b80d0b8b42bafc787a36d0a8be98ab7fa53e7 | from turtle import Turtle
class Paddle(Turtle):
def __init__(self, x_position, y_position):
super().__init__()
self.shape('square')
self.shapesize(stretch_wid=5, stretch_len=1)
self.penup()
self.color("white")
self.goto(x=x_position, y=y_position)
self.speed("fastest")
self.score = 0
def up(self):
y_pos = self.ycor()
x_pos = self.xcor()
self.goto(y=y_pos + 20, x=x_pos)
def down(self):
y_pos = self.ycor()
x_pos = self.xcor()
self.goto(y=y_pos - 20, x=x_pos)
def increase_score(self):
self.score += 1
|
3,559 | 7bc2a02d85c3b1a2b7ed61dc7567d1097b63d658 | from setuptools import setup, find_packages
setup(
name='testspace-python',
version='',
packages=find_packages(include=['testspace', 'testspace.*']),
url='',
license="MIT license",
author="Jeffrey Schultz",
author_email='jeffs@s2technologies.com',
description="Module for interacting with Testspace Server",
install_requires=[
'requests',
]
)
|
3,560 | 3b7839347f24d39904d29d40e688a5dfd63534d7 | import numpy as np
import tensorflow as tf
from tfrecords_handler.moving_window.tfrecord_mean_reader import TFRecordReader
from configs.global_configs import training_data_configs
class StackingModelTester:
def __init__(self, **kwargs):
self.__use_bias = kwargs["use_bias"]
self.__use_peepholes = kwargs["use_peepholes"]
self.__input_size = kwargs["input_size"]
self.__output_size = kwargs["output_size"]
self.__binary_train_file_path = kwargs["binary_train_file_path"]
self.__binary_test_file_path = kwargs["binary_test_file_path"]
self.__seed = kwargs["seed"]
self.__cell_type = kwargs["cell_type"]
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
def __l2_loss(selfself, z, t):
loss = tf.losses.mean_squared_error(labels=t, predictions=z)
return loss
# Training the time series
def test_model(self, **kwargs):
# extract the parameters from the kwargs
num_hidden_layers = kwargs['num_hidden_layers']
cell_dimension = kwargs['cell_dimension']
minibatch_size = kwargs['minibatch_size']
max_epoch_size = kwargs['max_epoch_size']
max_num_epochs = kwargs['max_num_epochs']
l2_regularization = kwargs['l2_regularization']
gaussian_noise_stdev = kwargs['gaussian_noise_stdev']
optimizer_fn = kwargs['optimizer_fn']
random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']
# reset the tensorflow graph
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
# declare the input and output placeholders
input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__input_size])
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev, dtype=tf.float32)
training_input = input + noise
testing_input = input
# output format [batch_size, sequence_length, dimension]
true_output = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__output_size])
sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)
# RNN with the layer of cells
def cell():
if self.__cell_type == "LSTM":
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,
initializer=weight_initializer)
elif self.__cell_type == "GRU":
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)
elif self.__cell_type == "RNN":
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))
return cell
multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for _ in range(int(num_hidden_layers))])
with tf.variable_scope('train_scope') as train_scope:
training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,
inputs=training_input,
sequence_length=sequence_lengths,
dtype=tf.float32)
# connect the dense layer to the RNN
training_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=training_rnn_outputs, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer')
with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE) as inference_scope:
inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,
inputs=testing_input,
sequence_length=sequence_lengths,
dtype=tf.float32)
# connect the dense layer to the RNN
inference_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=inference_rnn_outputs, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer', reuse=True)
# error that should be minimized in the training process
error = self.__l1_loss(training_prediction_output, true_output)
# l2 regularization of the trainable model parameters
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
# create the adagrad optimizer
optimizer = optimizer_fn(total_loss)
# create the Dataset objects for the training and test data
training_dataset = tf.data.TFRecordDataset(filenames=[self.__binary_train_file_path], compression_type="ZLIB")
test_dataset = tf.data.TFRecordDataset([self.__binary_test_file_path], compression_type="ZLIB")
# parse the records
tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size)
# prepare the training data into batches
# randomly shuffle the time series within the dataset
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
# training_dataset = training_dataset.apply(
# tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,
# count=int(max_epoch_size), seed=shuffle_seed))
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.validation_data_parser)
# create the batches by padding the datasets to make the variable sequence lengths fixed within the individual batches
padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=(
[], [tf.Dimension(None), self.__input_size],
[tf.Dimension(None), self.__output_size],
[tf.Dimension(None), self.__output_size + 2]))
# get an iterator to the batches
training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()
# access each batch using the iterator
next_training_data_batch = training_data_batch_iterator.get_next()
# preparing the test data
test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)
# create a single batch from all the test time series by padding the datasets to make the variable sequence lengths fixed
padded_test_input_data = test_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=([], [tf.Dimension(None), self.__input_size],
[tf.Dimension(None), self.__output_size + 2]))
# get an iterator to the test input data batch
test_input_iterator = padded_test_input_data.make_one_shot_iterator()
# access the test input batch using the iterator
test_input_data_batch = test_input_iterator.get_next()
# setup variable initialization
init_op = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init_op)
for epoch in range(int(max_num_epochs)):
print("Epoch->", epoch)
session.run(training_data_batch_iterator.initializer, feed_dict={shuffle_seed: epoch})
while True:
try:
training_data_batch_value = session.run(next_training_data_batch,
feed_dict={shuffle_seed: epoch})
session.run(optimizer,
feed_dict={input: training_data_batch_value[1],
true_output: training_data_batch_value[2],
sequence_lengths: training_data_batch_value[0]})
except tf.errors.OutOfRangeError:
break
# applying the model to the test data
list_of_forecasts = []
while True:
try:
# get the batch of test inputs
test_input_batch_value = session.run(test_input_data_batch)
# get the output of the network for the test input data batch
test_output = session.run(inference_prediction_output,
feed_dict={input: test_input_batch_value[1],
sequence_lengths: test_input_batch_value[0]})
last_output_index = test_input_batch_value[0] - 1
array_first_dimension = np.array(range(0, test_input_batch_value[0].shape[0]))
forecasts = test_output[array_first_dimension, last_output_index]
list_of_forecasts.extend(forecasts.tolist())
except tf.errors.OutOfRangeError:
break
session.close()
return list_of_forecasts
|
3,561 | 2d5993489ff3120d980d29edbb53422110a5c039 | '''
Написати програму, що визначає, яка з двох
точок знаходиться ближче до початку координат.
'''
import re
re_number = re.compile("^[-+]?\d+\.?\d*$")
def validator(pattern,promt):
text=input(promt)
while not bool(pattern.match(text)):
text = input(promt)
return text
def number_validator(promt):
number=float(validator(re_number, promt))
return number
def len_line(x,y):
length=(x**2 + y**2)**(1/2)
return length
x_1 = number_validator("Введіть абсцису точки А: ")
y_1 = number_validator("Введіть ординатуі точки А: ")
x_2 = number_validator("Введіть абсцису точки В: ")
y_2 = number_validator("Введіть ординату точки В: ")
if len_line(x_1,y_1) > len_line(x_2,y_2) :
print("Точка В лежить ближче до початку координат")
elif len_line(x_1,y_1) < len_line(x_2,y_2):
print("Точка А лежить ближче до початку координат")
else:
print("Відстань від точок до початку координат рівні") |
3,562 | 4942b20a8e4f58c52b82800fb4c59db169cd8048 | #!/usr/bin/env python
# encoding=utf-8
import MySQLdb
import re
# 打开数据库连接
db = MySQLdb.connect(host='wonderfulloffline.mysql.rds.aliyuncs.com',port=3306,user='wonderfull_ai',password='868wxRHrPaTKkjvC', db='wonderfull_ai_online', charset='utf8' )
def load_stop_word():
stop_word=set()
with open("data/stop_word.txt","r",encoding="utf-8") as file:
for line in file.readlines():
stop_word.add(line.strip())
return stop_word
# 使用cursor()方法获取操作游标
def get_goods_title_dict(stop_word_dict):
cursor = db.cursor()
# 使用execute方法执行SQL语句
cursor.execute("select goods_name FROM goods")
# 使用 fetchone() 方法获取一条数据
data = cursor.fetchall()
goods_name_dict=dict()
idx=1
for line in data:
title = line[0].strip().lower()
for c in title:
if(c.strip()==''):
continue
if(c in stop_word_dict):
continue
if(c not in goods_name_dict):
goods_name_dict[c]=idx
idx=idx+1
cursor.execute("select goods_name FROM goods where is_onsell=1")
data = cursor.fetchall()
regexp = r"[0-9a-z]+"
pattern = re.compile(regexp)
for line in data:
title = line[0].strip().lower()
match_res = pattern.findall(title)
print(title,match_res)
for item in match_res:
if (item not in goods_name_dict):
goods_name_dict[item] = idx
idx = idx + 1
# 关闭数据库连接
# db.close()
return goods_name_dict
def write_dict(word_dict):
file=open("data/vocab_unigram.txt","w",encoding="utf-8")
file.write("[UNK]"+"\t"+"0"+"\n")
for k,v in word_dict.items():
# print(k,v)
file.write(k+"\t"+str(v)+"\n")
file.close()
if __name__ == '__main__':
stop_word_dict=load_stop_word()
goods_name_dict=get_goods_title_dict(stop_word_dict)
# print(goods_name_dict)
write_dict(goods_name_dict) |
3,563 | d60690892eddda656c11470aacd1fdc9d07a721a | # CIS 117 Python Programming - Lab 10
# Bryce DesBrisay
def middle(string):
characters = list(string)
length = len(characters)
middleNum = round((length + .5) / 2)
if length % 2 == 0:
return characters[middleNum - 1] + characters[middleNum]
else:
return characters[middleNum - 1]
def countVowels(string):
count = 0
vowels = ['a','e','i','o','u','y']
for vowel in vowels:
count += string.count(vowel)
return count
def reverse(string):
return string[::-1]
def isPalindrome(string):
return reverse(string) == string
def main():
count = 5
while count > 0:
string = input('Enter a string: ')
print('The middle character(s) is/are: ' + middle(string))
print('The string reversed is: ' + reverse(string))
print('The string contains ' + str(countVowels(string)) + ' vowels.')
if isPalindrome(string):
print('That is a palindrome.\n')
else:
print('That is not palindrome.\n')
count -= 1
main()
'''
Enter a string: racecar
The middle character(s) is/are: e
The string reversed is: racecar
The string contains 3 vowels.
That is a palindrome.
Enter a string: apple
The middle character(s) is/are: p
The string reversed is: elppa
The string contains 2 vowels.
That is not palindrome.
Enter a string: civic
The middle character(s) is/are: v
The string reversed is: civic
The string contains 2 vowels.
That is a palindrome.
Enter a string: bottle
The middle character(s) is/are: tt
The string reversed is: elttob
The string contains 2 vowels.
That is not palindrome.
Enter a string: noon
The middle character(s) is/are: oo
The string reversed is: noon
The string contains 2 vowels.
That is a palindrome.
'''
|
3,564 | 4032503bba8a1dd273015d503f52b6ea2d932d1d |
from pprint import pprint
from collections import Counter
from copy import deepcopy
class Sudoku():
def __init__(self, grid):
'''
Initializes the grid
'''
self.grid = grid
self.sub_grid = self.create_sub_grid(self.grid)
def create_sub_grid(self, grid):
'''
Creates a Sub grid, containing the possible numbers within a cell
Returns a Sub grid
'''
sub_grid = []
for i in range(9):
sub = []
for j in range(9):
if grid[i][j] == 0:
sub.append(self.missing_numbers(i,j))
else:
sub.append([grid[i][j]])
sub_grid.append(sub)
del sub
return sub_grid
def missing_numbers(self, row, column):
'''
Returs the possible set of numbers of a particular row and column
'''
rrow, ccolumn = self.row_and_column(self.grid, row, column)
cell = self.cell_3by3(row, column)
missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn + cell))
return missing_num
def cell_3by3(self, row, column):
'''
Returns grid of 3 X 3
'''
cell = []
a = row // 3
b = column // 3
for i in range(9):
for j in range(9):
if i // 3 == a and j // 3 == b :
cell.append(grid[i][j])
return cell
def row_and_column(self, grid, row, column):
'''
Returns rows and columns
'''
r = grid[row]
c = []
for j in range(9):
c.append(grid[j][column])
return r, c
def step_1(self, sub_grid, num):
'''
Reducing a list of clues to a single value based on row and column elimination
Returns a refined sub grid
'''
row,column = self.row_and_column(sub_grid,num,num)
row_flatten = sum(row,[])
single_values = [i for i,j in Counter(row_flatten).items() if j == 1 ]
# For Rows
for i in range(len(sub_grid)):
for j in single_values:
if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:
sub_grid[num][i] = [j]
# For Columns
column_flatten = sum(column, [])
column_single_values = [i for i,j in Counter(column_flatten).items() if j == 1 ]
for i in range(len(sub_grid)):
for j in column_single_values:
if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:
sub_grid[i][num] = [j]
return sub_grid
def step_2(self, sub_grid, num):
'''
Removes a number 'n' that fits at its correct position from other lists corresponding its row and column
Returns refined sub grid
'''
row,column = self.row_and_column(sub_grid,num,num)
# For Rows
single_value_list = []
for i in range(len(row)):
if len(sub_grid[num][i]) == 1:
single_value_list.append(sub_grid[num][i])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[num][i]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[num][i]:
sub_grid[num][i].remove(j)
# For Columns
single_value_list = []
for i in range(len(column)):
if len(sub_grid[i][num]) == 1:
single_value_list.append(sub_grid[i][num])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[i][num]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[i][num]:
sub_grid[i][num].remove(j)
return sub_grid
def step_3(self, sub_grid, num):
pass
def perform(self):
'''
Performs the step_1 and step_2 untill the Sub grid is solved
Returns None
'''
temp = []
while self.sub_grid != temp:
temp = deepcopy(self.sub_grid)
for i in range(len(grid)):
self.sub_grid = self.step_1(self.sub_grid, i)
self.sub_grid = self.step_2(self.sub_grid, i)
def solve(self):
'''
Solves the Sub grid and prints the sub grid
Returns None
'''
self.perform()
for i in range(9):
for j in range(9):
print(self.sub_grid[i][j], end=' ')
print()
# grid = [
# [0,3,0,0,1,0,0,6,0],
# [7,5,0,0,3,0,0,4,8],
# [0,0,6,9,8,4,3,0,0],
# [0,0,3,0,0,0,8,0,0],
# [9,1,2,0,0,0,6,7,4],
# [0,0,4,0,0,0,5,0,0],
# [0,0,1,6,7,5,2,0,0],
# [6,8,0,0,9,0,0,1,5],
# [0,9,0,0,4,0,0,3,0]
# ]
# grid = [
# [6,0,0,1,0,8,2,0,3],
# [0,2,0,0,4,0,0,9,0],
# [8,0,3,0,0,5,4,0,0],
# [5,0,4,6,0,7,0,0,9],
# [0,3,0,0,0,0,0,5,0],
# [7,0,0,8,0,3,1,0,2],
# [0,0,1,7,0,0,9,0,6],
# [0,8,0,0,3,0,0,2,0],
# [3,0,2,9,0,4,0,0,5]
# ]
grid = [
[8,0,6,0,0,0,4,0,9],
[0,0,0,0,0,0,0,0,0],
[0,9,2,0,0,0,5,0,8],
[0,0,9,0,7,1,3,0,0],
[5,0,8,0,0,0,0,2,0],
[0,0,4,0,5,0,0,0,0],
[0,0,0,0,0,7,9,1,0],
[0,0,0,9,0,0,0,0,7],
[0,7,0,0,0,3,0,0,4],
]
mat = Sudoku(grid)
mat.solve()
|
3,565 | e828c2792d508ba41c5dca3f4a255eee2611c333 | Max = 100010
a = [0 for i in range(Max)]
p = []
for i in range(2,Max):
if a[i ] == 0:
p.append(i)
j = i * i
while j < Max:
a[j ] = 1
j = j + i
cnt,j = 0,1
n = int(input())
while p[j] <= n :
if p[j ] - p[j-1] == 2: cnt = cnt + 1
j = j + 1
print(cnt) |
3,566 | 5c4c893caa19e58491e641420261bb70e7202cf0 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
# Describes where to search for the config file if no location is specified
DEFAULT_CONFIG_LOCATION = "config.json"
DEFAULT_CONFIG = {
"project": None,
"fixed_model_name": None,
"config": DEFAULT_CONFIG_LOCATION,
"data": None,
"emulate": None,
"language": "en",
"log_file": None,
"log_level": 'INFO',
"mitie_file": os.path.join("data", "total_word_feature_extractor.dat"),
"spacy_model_name": None,
"num_threads": 1,
"max_training_processes": 1,
"path": "projects",
"port": 5000,
"token": None,
"cors_origins": [],
"max_number_of_ngrams": 7,
"pipeline": [],
"response_log": "logs",
"aws_endpoint_url": None,
"duckling_dimensions": None,
"duckling_http_url": None,
"ner_crf": {
"BILOU_flag": True,
"features": [
["low", "title", "upper", "pos", "pos2"],
["bias", "low", "word3", "word2", "upper", "title", "digit", "pos", "pos2", "pattern"],
["low", "title", "upper", "pos", "pos2"]],
"max_iterations": 50,
"L1_c": 1,
"L2_c": 1e-3
},
"intent_classifier_sklearn": {
"C": [1, 2, 5, 10, 20, 100],
"kernel": "linear"
}
}
class InvalidConfigError(ValueError):
"""Raised if an invalid configuration is encountered."""
def __init__(self, message):
# type: (Text) -> None
super(InvalidConfigError, self).__init__(message)
class AnnotatorConfig(object):
DEFAULT_PROJECT_NAME = "default"
def __init__(self, filename=None):
pass
def __getitem__(self, key):
return self.__dict__[key]
def get(self, key, default=None):
return self.__dict__.get(key, default)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
def __len__(self):
return len(self.__dict__)
def __getstate__(self):
return self.as_dict()
def __setstate__(self, state):
self.override(state)
def items(self):
return list(self.__dict__.items())
def as_dict(self):
return dict(list(self.items()))
|
3,567 | d8a09f9952856da69120fae6221636dd5bd8c93e | # python examples/mnist_rnn.py --bsz 128 --bsz-eval 256
import sys
from argparse import ArgumentParser
import pytorch_lightning as pl
import torch.nn as nn
import torch.optim as optim
from loguru import logger
from slp.config.config_parser import make_cli_parser, parse_config
from slp.data.collators import SequenceClassificationCollator
from slp.modules.rnn import RNN
from slp.plbind import (
FromLogits,
PLDataModuleFromDatasets,
RnnPLModule,
make_trainer,
watch_model,
)
from slp.util.log import configure_logging
from torchvision.datasets import MNIST # type: ignore
from torchvision.transforms import Compose, Normalize, ToTensor # type: ignore
collate_fn = SequenceClassificationCollator()
class Net(nn.Module):
def __init__(self, input_size, hidden_size=40, num_classes=10, bidirectional=False):
super().__init__()
self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional)
out_size = hidden_size if not bidirectional else 2 * hidden_size
self.clf = nn.Linear(out_size, num_classes)
def forward(self, x, lengths):
_, x, _ = self.encoder(x, lengths)
out = self.clf(x)
return out
def get_parser():
parser = ArgumentParser("MNIST classification example")
parser.add_argument(
"--hidden",
dest="model.hidden_size",
type=int,
help="Intermediate hidden layers for linear module",
)
parser.add_argument(
"--bi",
dest="model.bidirectional",
action="store_true",
help="Use BiLSTM",
)
return parser
def get_data():
# Fix: https://stackoverflow.com/a/66820249
MNIST.resources = [
(
"https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz",
"f68b3c2dcbeaaa9fbdd348bbdeb94873",
),
(
"https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz",
"d53e105ee54ea40749a09fcbcd1e9432",
),
(
"https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz",
"9fb629c4189551a2d022fa330f9573f3",
),
(
"https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz",
"ec29112dd5afa0611ce80d1b7f02629c",
),
]
def squeeze(x):
return x.squeeze()
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)), squeeze])
train = MNIST(download=True, root=".", transform=data_transform, train=True)
val = MNIST(download=False, root=".", transform=data_transform, train=False)
return train, val
if __name__ == "__main__":
# SETUP ##################################################
parser = get_parser()
parser = make_cli_parser(parser, PLDataModuleFromDatasets)
config = parse_config(parser, parser.parse_args().config)
if config.trainer.experiment_name == "experiment":
config.trainer.experiment_name = "mnist-rnn-classification"
configure_logging(f"logs/{config.trainer.experiment_name}")
if config.seed is not None:
logger.info("Seeding everything with seed={seed}")
pl.utilities.seed.seed_everything(seed=config.seed)
train, test = get_data()
# Get data and make datamodule ##########################
ldm = PLDataModuleFromDatasets(
train, test=test, seed=config.seed, collate_fn=collate_fn, **config.data
)
# Create model, optimizer, criterion, scheduler ###########
model = Net(28, **config.model)
optimizer = getattr(optim, config.optimizer)(model.parameters(), **config.optim)
criterion = nn.CrossEntropyLoss()
lr_scheduler = None
if config.lr_scheduler:
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, **config.lr_schedule
)
# Wrap in PLModule, & configure metrics ####################
lm = RnnPLModule(
model,
optimizer,
criterion,
lr_scheduler=lr_scheduler,
metrics={"acc": FromLogits(pl.metrics.classification.Accuracy())},
hparams=config,
)
# Run debugging session or fit & test the model ############
if config.debug:
logger.info("Running in debug mode: Fast run on 5 batches")
trainer = make_trainer(fast_dev_run=5)
trainer.fit(lm, datamodule=ldm)
logger.info("Running in debug mode: Overfitting 5 batches")
trainer = make_trainer(overfit_batches=5)
trainer.fit(lm, datamodule=ldm)
else:
trainer = make_trainer(**config.trainer)
watch_model(trainer, model)
trainer.fit(lm, datamodule=ldm)
trainer.test(ckpt_path="best", test_dataloaders=ldm.test_dataloader())
logger.info("Run finished. Uploading files to wandb...")
|
3,568 | 27edc753ebb9d60715a2ffa25d77e69ef363d010 | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.stats import chisquare, chi2, binom, poisson
def f_1(x, a):
return (1 / (x + 5)) * np.sin(a * x)
def f_2(x, a):
return np.sin(a * x) + 1
def f_3(x, a):
return np.sin(a * (x ** 2))
def f_4(x, a):
return np.sin(a * x + 1) ** 2
def f_5(x):
return x * np.tan(x)
def f_6(x, a, b):
return (1 + a * x + b * (x ** 2)) / ((2/3) * (b + 3))
def f_7(x, a, b):
return a + b * x
def f_8(x, a, b, c):
return np.sin(a * x) + c * np.exp(b * x) + 1
def f_9(x, a, b):
return np.exp(-(x - a) ** 2 / (2 * (b ** 2)))
def my_pdf(VAR, x):
a = VAR
pdf = f_1(x, a)
ln_pdf = np.log((pdf))
result = np.sum(-ln_pdf)
return result
fname = 'Exam_2018_Prob1.txt'
data = np.loadtxt(fname)
z = data[:, 0]
a_bound = (-10, 0)
b_bound = (-10, 10)
c_bound = (4000, 8000)
n_bound = (0, None)
p_bound = (0, None)
mu_bound = (0, None)
data_0 = minimize(my_pdf, [1, ], args=(z), method='SLSQP',
bounds=(a_bound, ))
print(data_0)
x = np.arange(20, 27, 0.01)
y = f_1(x, -3)
plt.plot(x, y+0.2)
plt.hist(z, bins=200, normed=True)
plt.show()
binwidth = 0.1
n_bins = np.arange(min(data[:, 2]), max(data[:, 2]) + binwidth, binwidth)
# Chi2 calculator
# observed_values, bins, _ = plt.hist(data[:, 2], bins=n_bins)
# plt.show()
# We normalize by multiplyting the length of the data with the binwidth
# expected_values = poisson.pmf(bins, data_0.x[0]) * len(data)
# print(observed_values[observed_values!=0])
# print(expected_values[expected_values!=0])
# print(chisquare(observed_values[observed_values!=0], f_exp=expected_values[expected_values!=0]))
# print('Threshold value ', chi2.isf(0.05, 18))
# x = np.arange(-1, 1, 0.01)
# y = f_6(x, data_0.x[0], data_0.x[1])
# plt.plot(x,y)
# plt.show()
|
3,569 | ee489c2e313a96671db79398218f8604f7ae1bf3 | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
# Copyright (c) OpenMMLab. All rights reserved.
"""This file holding some environment constant for sharing by other files."""
import os.path as osp
import subprocess
import sys
from collections import defaultdict
import cv2
import torch
import mmcv
from .parrots_wrapper import get_build_config
def collect_env():
"""Collect the information of the running environments.
Returns:
dict: The environment information. The following fields are contained.
- sys.platform: The variable of ``sys.platform``.
- Python: Python version.
- CUDA available: Bool, indicating if CUDA is available.
- GPU devices: Device type of each GPU.
- CUDA_HOME (optional): The env var ``CUDA_HOME``.
- NVCC (optional): NVCC version.
- GCC: GCC version, "n/a" if GCC is not installed.
- MSVC: Microsoft Virtual C++ Compiler version, Windows only.
- PyTorch: PyTorch version.
- PyTorch compiling details: The output of \
``torch.__config__.show()``.
- TorchVision (optional): TorchVision version.
- OpenCV: OpenCV version.
- MMCV: MMCV version.
- MMCV Compiler: The GCC version for compiling MMCV ops.
- MMCV CUDA Compiler: The CUDA version for compiling MMCV ops.
"""
env_info = {}
env_info['sys.platform'] = sys.platform
env_info['Python'] = sys.version.replace('\n', '')
cuda_available = torch.cuda.is_available()
env_info['CUDA available'] = cuda_available
if cuda_available:
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for name, device_ids in devices.items():
env_info['GPU ' + ','.join(device_ids)] = name
from mmcv.utils.parrots_wrapper import _get_cuda_home
CUDA_HOME = _get_cuda_home()
env_info['CUDA_HOME'] = CUDA_HOME
if CUDA_HOME is not None and osp.isdir(CUDA_HOME):
try:
nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
nvcc = subprocess.check_output(f'"{nvcc}" -V', shell=True)
nvcc = nvcc.decode('utf-8').strip()
release = nvcc.rfind('Cuda compilation tools')
build = nvcc.rfind('Build ')
nvcc = nvcc[release:build].strip()
except subprocess.SubprocessError:
nvcc = 'Not Available'
env_info['NVCC'] = nvcc
try:
# Check C++ Compiler.
# For Unix-like, sysconfig has 'CC' variable like 'gcc -pthread ...',
# indicating the compiler used, we use this to get the compiler name
import sysconfig
cc = sysconfig.get_config_var('CC')
if cc:
cc = osp.basename(cc.split()[0])
cc_info = subprocess.check_output(f'{cc} --version', shell=True)
env_info['GCC'] = cc_info.decode('utf-8').partition(
'\n')[0].strip()
else:
# on Windows, cl.exe is not in PATH. We need to find the path.
# distutils.ccompiler.new_compiler() returns a msvccompiler
# object and after initialization, path to cl.exe is found.
import locale
import os
from distutils.ccompiler import new_compiler
ccompiler = new_compiler()
ccompiler.initialize()
cc = subprocess.check_output(
f'{ccompiler.cc}', stderr=subprocess.STDOUT, shell=True)
encoding = os.device_encoding(
sys.stdout.fileno()) or locale.getpreferredencoding()
env_info['MSVC'] = cc.decode(encoding).partition('\n')[0].strip()
env_info['GCC'] = 'n/a'
except subprocess.CalledProcessError:
env_info['GCC'] = 'n/a'
env_info['PyTorch'] = torch.__version__
env_info['PyTorch compiling details'] = get_build_config()
try:
import torchvision
env_info['TorchVision'] = torchvision.__version__
except ModuleNotFoundError:
pass
env_info['OpenCV'] = cv2.__version__
env_info['MMCV'] = mmcv.__version__
try:
from mmcv.ops import get_compiler_version, get_compiling_cuda_version
except ModuleNotFoundError:
env_info['MMCV Compiler'] = 'n/a'
env_info['MMCV CUDA Compiler'] = 'n/a'
else:
env_info['MMCV Compiler'] = get_compiler_version()
env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version()
return env_info
|
3,570 | 53127de883fb5da3214d13904664566269becba6 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import stevedore
from keystoneauth1 import exceptions
PLUGIN_NAMESPACE = 'keystoneauth1.plugin'
__all__ = ('get_available_plugin_names',
'get_available_plugin_loaders',
'get_plugin_loader',
'get_plugin_options',
'BaseLoader',
'PLUGIN_NAMESPACE')
def _auth_plugin_available(ext):
"""Read the value of available for whether to load this plugin."""
return ext.obj.available
def get_available_plugin_names():
"""Get the names of all the plugins that are available on the system.
This is particularly useful for help and error text to prompt a user for
example what plugins they may specify.
:returns: A list of names.
:rtype: frozenset
"""
mgr = stevedore.EnabledExtensionManager(namespace=PLUGIN_NAMESPACE,
check_func=_auth_plugin_available,
invoke_on_load=True,
propagate_map_exceptions=True)
return frozenset(mgr.names())
def get_available_plugin_loaders():
"""Retrieve all the plugin classes available on the system.
:returns: A dict with plugin entrypoint name as the key and the plugin
loader as the value.
:rtype: dict
"""
mgr = stevedore.EnabledExtensionManager(namespace=PLUGIN_NAMESPACE,
check_func=_auth_plugin_available,
invoke_on_load=True,
propagate_map_exceptions=True)
return dict(mgr.map(lambda ext: (ext.entry_point.name, ext.obj)))
def get_plugin_loader(name):
"""Retrieve a plugin class by its entrypoint name.
:param str name: The name of the object to get.
:returns: An auth plugin class.
:rtype: :py:class:`keystoneauth1.loading.BaseLoader`
:raises keystoneauth1.exceptions.auth_plugins.NoMatchingPlugin:
if a plugin cannot be created.
"""
try:
mgr = stevedore.DriverManager(namespace=PLUGIN_NAMESPACE,
invoke_on_load=True,
name=name)
except RuntimeError:
raise exceptions.NoMatchingPlugin(name)
return mgr.driver
def get_plugin_options(name):
"""Get the options for a specific plugin.
This will be the list of options that is registered and loaded by the
specified plugin.
:returns: A list of :py:class:`keystoneauth1.loading.Opt` options.
:raises keystoneauth1.exceptions.auth_plugins.NoMatchingPlugin:
if a plugin cannot be created.
"""
return get_plugin_loader(name).get_options()
class BaseLoader(metaclass=abc.ABCMeta):
@property
def plugin_class(self):
raise NotImplementedError()
def create_plugin(self, **kwargs):
"""Create a plugin from the options available for the loader.
Given the options that were specified by the loader create an
appropriate plugin. You can override this function in your loader.
This used to be specified by providing the plugin_class property and
this is still supported, however specifying a property didn't let you
choose a plugin type based upon the options that were presented.
Override this function if you wish to return different plugins based on
the options presented, otherwise you can simply provide the
plugin_class property.
Added 2.9
"""
return self.plugin_class(**kwargs)
@abc.abstractmethod
def get_options(self):
"""Return the list of parameters associated with the auth plugin.
This list may be used to generate CLI or config arguments.
:returns: A list of Param objects describing available plugin
parameters.
:rtype: list
"""
return []
@property
def available(self):
"""Return if the plugin is available for loading.
If a plugin is missing dependencies or for some other reason should not
be available to the current system it should override this property and
return False to exclude itself from the plugin list.
:rtype: bool
"""
return True
def load_from_options(self, **kwargs):
"""Create a plugin from the arguments retrieved from get_options.
A client can override this function to do argument validation or to
handle differences between the registered options and what is required
to create the plugin.
"""
missing_required = [o for o in self.get_options()
if o.required and kwargs.get(o.dest) is None]
if missing_required:
raise exceptions.MissingRequiredOptions(missing_required)
return self.create_plugin(**kwargs)
def load_from_options_getter(self, getter, **kwargs):
"""Load a plugin from getter function that returns appropriate values.
To handle cases other than the provided CONF and CLI loading you can
specify a custom loader function that will be queried for the option
value.
The getter is a function that takes a
:py:class:`keystoneauth1.loading.Opt` and returns a value to load with.
:param getter: A function that returns a value for the given opt.
:type getter: callable
:returns: An authentication Plugin.
:rtype: :py:class:`keystoneauth1.plugin.BaseAuthPlugin`
"""
for opt in (o for o in self.get_options() if o.dest not in kwargs):
val = getter(opt)
if val is not None:
val = opt.type(val)
kwargs[opt.dest] = val
return self.load_from_options(**kwargs)
|
3,571 | dc27781d0c3129d11aa98a5889aea0383b5a49d6 | from django.db import models
from django.contrib.auth.models import User
class CustomUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='custom_user')
def get_current_score(self):
acc = 0
for score in self.user_scores.all():
acc += score.points
return acc
def __str__(self):
return self.user.username
class Team(models.Model):
name = models.CharField(max_length=150)
players = models.ManyToManyField(CustomUser)
def __str__(self):
return self.name + ' ' + str(self.id)
class Leaderboard(models.Model):
end_date = models.DateField(auto_now=True)
submit_deadline = models.DateField(auto_now=True)
team = models.ForeignKey(Team, on_delete=models.CASCADE, related_name='leaderboards')
name = models.CharField(max_length=150, default="Leaderboard")
def __str__(self):
return self.name + ' id: ' + str(self.id)
class UserScore(models.Model):
points = models.IntegerField()
leaderboard = models.ForeignKey(Leaderboard, on_delete=models.CASCADE, related_name='user_scores')
custom_user = models.ForeignKey(CustomUser, on_delete=models.CASCADE, related_name='user_scores')
def __str__(self):
return self.custom_user.user.username
|
3,572 | 32869a88bb59d47281249b6ebe2357328beb0359 | #!/usr/bin/env python
def question():
print("02. 「パトカー」+「タクシー」=「パタトクカシーー」")
print("「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.")
def main():
str1 = "パトカー"
str2 = "タクシー"
print(''.join([x[0] + x[1] for x in zip(str1, str2)]))
if __name__ == '__main__':
question()
main()
|
3,573 | e75bee4e014aa369131c3e200ce874a8840b5690 | from GRAFICA_BRESENHAMS import Bresenhams
def main():
x = int(input('INGRESA VALOR PARA X: \n'))
y = int(input('INGRESA VALOR PARA Y: \n'))
x1 = int(input('INGRESA VALOR PARA X1: \n'))
y1 = int(input('INGRESA VALOR PARA Y1: \n'))
Bresenhams(x,y,x1,y1)
if __name__=='__main__':
main() |
3,574 | ff99b5fd168d7987e488d7f6d0455619e988f15a | import numpy as np
import math
import activations
class FC_layer():
def __init__(self, input_size, output_size, weight_init_range, activation, debug):
self.type = "FC"
self.activation_name = activation
self.shape = (input_size, output_size)
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.input = None
self.output = None
self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size=(input_size, output_size))
self.bias = np.random.rand(1,output_size)
self.weights_grads = np.zeros(self.weights.shape)
self.bias_grads = np.zeros(self.bias.shape)
self.debug = debug
def forward(self, input_activations):
# Dot product of input with W plus bias. Cache, activate and return
output = np.dot(input_activations, self.weights) + self.bias
# Cache the weighted outputs and inputs
#self.output = output
self.input = input_activations
# Pass the output throug the activation function
output = self.activation(self, output)
self.output = output
return output
def backward(self, jacobian_L_Z):
# Get the jacobian linking the loss with respect of this layer output from the previous layer.
# PURPOSE: Calculate the weights gradients, the bias gradient and the input_loss
# that will be passed to the previous activation layer and so on, up to layer previous input
Y = self.input
# Create the jacobian J_Z_sum with the layer cached outputs and the derivative of activation function
jacobian_Z_sum = self.create_jacobian_Z_sum()
# Find the Weights gradients jacobian_L_W
# Compute the simple jacobian linking the outputs and the weights
simp_jacobian_Z_W = np.outer(Y, jacobian_Z_sum.diagonal())
# Then compute the jacobian linking the loss to the weights
jacobian_L_W = jacobian_L_Z * simp_jacobian_Z_W
# Calculate the input layer loss jacobian_L_Y
# by doing dot product of output layer loss and the weigths matrix transposed (so to invert M N to N M, where M < N, we go the other way around)
jacobian_Z_Y = np.dot(jacobian_Z_sum ,self.weights.T)
jacobian_L_Y = np.dot( jacobian_L_Z, jacobian_Z_Y)
# Bias loss is the as the output loss --> the bias influence on the loss == layer activation output influence on the loss
jacobian_L_B = jacobian_L_Z
# Now save the bias loss and weight loss (representing the calculated gradiants).
# This will be updated at the end of the batch, or SGD
self.weights_grads =self.weights_grads + jacobian_L_W
self.bias_grads = self.bias_grads + jacobian_L_B
#Finally return the calculated input loss --> this will be the output loss of the next layer
return jacobian_L_Y
def create_jacobian_Z_sum(self):
return np.identity(self.output[0].size) * self.d_activation(self, self.output)
def update_gradients(self, learning_rate, gradient_avg_factor = 1):
#Update gradients, usefull when doing batch learning
# Get the avg of the gradients (for SGD divide by 1, else divide by batchsize)
## UPDATE: removed the division by batchsize: Implemented this factor in the learning rate
#self.weights_grads = self.weights_grads / gradient_avg_factor
#self.bias_grads = self.bias_grads / gradient_avg_factor
# Update weights and biases
self.weights -= learning_rate * self.weights_grads
self.bias -= learning_rate * self.bias_grads
self.weights_grads = np.zeros(self.weights.shape)
self.bias_grads = np.zeros(self.bias.shape)
def __str__(self):
return "FC Layer type size = " + str(self.weights.shape) + " with activation = " + self.activation_name
class conv2D():
def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes, weight_init_range, activation, debug):
self.type = "conv2D"
self.input_shape = input_shape
self.activation_name = activation
#Kernel stack shape for the layer (N, I, K_x, K_y)
self.kernel_shape = (n_kernels, input_shape[0], kernel_shape[0], kernel_shape[1])
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.strides = strides
self.modes = modes
self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape)
self.weights_grads = np.zeros(self.weights.shape)
self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = self.calculate_padding()
self.output_shape = self.calculate_output_shape()
self.cached_calculation = {}
self.cache_weights_input_output_triplet_locations()
self.cached_output = None
self.debug = debug
'''print("###########################")
a = np.random.randint(1,4,(6,6))
print(a)
padded_a = self.apply_zero_padding(a)
print(padded_a)
print("kernel shape", (self.kernel_shape[2], self.kernel_shape[3]))
print("input shape", a.shape)
print("padded shape", padded_a.shape)
print("###########################")'''
def cache_weights_input_output_triplet_locations(self):
placeholder_input = np.zeros(self.input_shape)
array = placeholder_input[0]
kernel = self.weights[0][0]
stride_x_pointer = 0
while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):
stride_y_pointer = 0
#while the kernel does not go over the x-akse of the array
while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1):
#while the kernel does not go over the x-akse of the array
#cache all touched weights and input for each kernel (output or Coordinates??)
for row in range(kernel.shape[0]):
for column in range(kernel.shape[1]):
# Cache coordinate only: (weight, input) --> output
#format: key ((weight_x_pos, weight_y_pos), (input_x_pos, input_y_pos)) ---> (output_x_pos, output_y_pos)
conv_output_coordinate = (stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1])
self.cached_calculation[((row, column), (row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate
#Cache weight coordinate and input/output values
# Update the stride long the y-axis
stride_y_pointer += self.strides[1]
#update the stride long the x-axis
stride_x_pointer += self.strides[0]
#End of convolution
def forward(self, input_feature_maps):
#reset the cached calculations from the previous forward pass
#self.cached_calculation = {}
output = np.zeros(self.output_shape)
#Apply padding
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
#for each kernel stack
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
#for each kernel in the kernel stack (or input channel)
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print("**** NEW CONVOLUTION ****")
while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):
stride_y_pointer = 0
#while the kernel does not go over the x-akse of the array
while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1):
#while the kernel does not go over the x-akse of the array
#Get the snip of the array to apply convolution on
array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0], stride_y_pointer: stride_y_pointer + kernel.shape[1]]
#apply convolution and get the result
result = np.sum(np.multiply(array_snip, kernel))
#update the output tensor
conv_output_coordinate = (i, stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1])
output[conv_output_coordinate] += result
'''#cache all the results, touched weights and input for each kernel (output or Coordinates??)
for row in range(kernel.shape[0]):
for column in range(kernel.shape[1]):
# Cache coordinate only: (weight, input) --> output
#format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)
self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate
#Cache weight coordinate and input/output values
#ALTERNATIVE
# format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val
#self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result'''
if self.debug:
print("convolution nr ", conv_counter )
print("\narray_snip: \n", array_snip)
print("\nkernel: \n", kernel)
print("\nelementwise multiplication: \n", np.multiply(array_snip, kernel))
print("\nresult: ", result)
# Update the stride long the y-axis
stride_y_pointer += self.strides[1]
conv_counter+=1
#update the stride long the x-axis
stride_x_pointer += self.strides[0]
#End of convolution
if self.debug:
print("\n----REVIEW----\n")
print("Total convolutions: ", conv_counter)
print("\ninput_feature_map:\n ", array)
print("\napplied kernel:\n ", kernel)
print("\nconvolution result:\n ", output[i])
print("***********************************")
#Cache input and output
self.cached_output = output
self.cached_input = input_feature_maps
#Apply activation
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
#Reshape J_LZ from FC to Conv2D and pass through activation layer
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
#print("JLZ før relu\n", jacobian_L_Z)
#jacobian_L_Z = self.d_activation(self, jacobian_L_Z)
#print("cached out after activation\n", self.cached_output)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output)
#print("JLZ etter relu\n", jacobian_L_Z)
# J_L_Z * f'(cached_output)
#Calculate J_LW
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
#Calculate J_LX
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
#Pass Jacobian L Y upstream
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
#Iterate through all the weights (4 dimension)
#Iterate through the kernel stacks
for i in range(self.weights.shape[0]):
#Iterate throught each kernel/input channel
for j in range(self.weights.shape[1]):
#iterate through the x-axis of the kernel
for k in range(self.weights.shape[2]):
#iterate through the y-axis of the kernel
for l in range(self.weights.shape[3]):
#cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)}
for key in self.cached_calculation.keys():
if key[0] == (k,l):
grads[(i,j,k,l)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
#Iterate through all the inputs (3 dimension)
#iterate through all channels/kernel of a kernel stack
for i in range(self.input_shape[0]):
#iterate through x-akses of 2d input
for j in range(self.input_shape[1]):
#iterate through y-axes of 2d input
for k in range(self.input_shape[2]):
#cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)}
for key in self.cached_calculation.keys():
if key[1] == (j,k):
#for each kernel-stack
for l in range(self.weights.shape[0]):
jacobian_L_Y[(i,j,k)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]]
return jacobian_L_Y
def calculate_output_shape(self):
width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.strides[0] + 1)
height = math.floor((self.input_shape[2] - self.kernel_shape[3] + self.p_y_start + self.p_y_stop)/self.strides[1] + 1 )
return (self.kernel_shape[0], width, height)
def calculate_padding(self):
#Calculate padding long the x axis
s = self.strides[0]
f = self.kernel_shape[2]
i = self.input_shape[1]
if self.modes[0] == "full":
#Every pixel must experience every weight of the kernel
p_x_start = f - 1
p_x_stop = f - 1
elif self.modes[0] == "same":
#Every pixel must experience the middle weight of the kernel
p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)
p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)
else:
p_x_start = 0
p_x_stop = 0
#Calculate padding long y axis
s = self.strides[1]
f = self.kernel_shape[3]
i = self.input_shape[2]
if self.modes[1] == "full":
#Every pixel must experience every weight of the kernel
p_y_start = f - 1
p_y_stop = f - 1
elif self.modes[1] == "same":
#Every pixel must experience the middle weight of the kernel
p_y_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)
p_y_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)
else:
p_y_start = 0
p_y_stop = 0
return p_x_start, p_x_stop, p_y_start, p_y_stop
def apply_zero_padding(self, input_feature_maps):
# Apply zero padding to the input feature maps according to the modes, strides and kernel size
padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop, input_feature_maps.shape[2] + self.p_y_start + self.p_y_stop ))
for channel in range(input_feature_maps.shape[0]):
array = input_feature_maps[channel]
#Create the background zero array
padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop, array.shape[1] + self.p_y_start + self.p_y_stop))
#Copy the array in the middle of the zero background
padded_array[self.p_x_start:array.shape[0]+ self.p_x_start, self.p_y_start:array.shape[1]+ self.p_y_start] = array
#Save the array
padded_input_feature_maps[channel] = padded_array
return padded_input_feature_maps
def __str__(self):
return "Conv 2D Layer type with "+ str(self.kernel_shape[0]) +" kernels of shape = " + str(self.kernel_shape[1:]) +"input/output of shape" + str(self.input_shape)+"/" + str(self.output_shape) + " strides= s" + str(self.strides) + " modes= " + str(self.modes) +" with activation = " + self.activation_name
class conv1D():
def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode, weight_init_range, activation, debug):
self.type = "conv1D"
self.input_shape = input_shape
self.activation_name = activation
#Kernel stack shape for the layer (Num_kernel_stacks, Channels, Kernel_x)'
self.kernel_shape = (n_kernels, input_shape[0], kernel_shape)
self.activation = activations.get_activation_function(activation)
self.d_activation = activations.get_activation_derivative(activation)
self.stride = stride
self.mode = mode
self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape)
self.weights_grads = np.zeros(self.weights.shape)
self.p_x_start, self.p_x_stop = self.calculate_padding()
self.output_shape = self.calculate_output_shape()
self.cached_calculation = {}
self.cache_weights_input_output_triplet_locations()
self.cached_output = None
self.debug = debug
def cache_weights_input_output_triplet_locations(self):
#Performe an empty convolution and cache all the position of the kernel, input and output triplet
placeholder_input = np.zeros(self.input_shape)
array = placeholder_input[0]
kernel = self.weights[0][0]
stride_x_pointer = 0
while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):
#while the kernel does not go over the x-akse of the array
#cache all touched weights and input for each kernel
for column in range(kernel.shape[0]):
# Cache coordinate only: (weight, input) --> output
#format: key ((weight_x_pos), (input_x_pos)) ---> (output_x_pos)
conv_output_coordinate = (stride_x_pointer // self.stride)
self.cached_calculation[(column, column + stride_x_pointer)] = conv_output_coordinate
#Cache weight coordinate and input/output values
#update the stride long the x-axis
stride_x_pointer += self.stride
#End of convolution
def forward(self, input_feature_maps):
output = np.zeros(self.output_shape)
#Apply padding
input_feature_maps = self.apply_zero_padding(input_feature_maps)
for i in range(0, self.kernel_shape[0]):
#for each kernel stack
kernel_stack = self.weights[i]
for j in range(0, self.kernel_shape[1]):
#for each kernel in the kernel stack (or input channel)
kernel = kernel_stack[j]
array = input_feature_maps[j]
stride_x_pointer = 0
conv_counter = 1
if self.debug:
print("**** NEW CONVOLUTION ****")
while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):
#while the kernel does not go over the x-akse of the array
#Get the snip of the array to apply convolution on
array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0]]
#apply convolution and get the result
result = np.sum(np.multiply(array_snip, kernel))
#update the output tensor
conv_output_coordinate = (i, stride_x_pointer // self.stride)
output[conv_output_coordinate] += result
if self.debug:
print("convolution nr ", conv_counter )
print("\narray_snip: \n", array_snip)
print("\nkernel: \n", kernel)
print("\nelementwise multiplication: \n", np.multiply(array_snip, kernel))
print("\nresult: ", result)
conv_counter+=1
#update the stride long the x-axis
stride_x_pointer += self.stride
#End of convolution
if self.debug:
print("\n----REVIEW----\n")
print("Total convolutions: ", conv_counter)
print("\ninput_feature_map:\n ", array)
print("\napplied kernel:\n ", kernel)
print("\nconvolution result:\n ", output[i])
print("***********************************")
#Cache input and output
self.cached_output = output
self.cached_input = input_feature_maps
#Apply activation
output = self.activation(self, output)
return output
def backward(self, jacobian_L_Z):
#Reshape J_LZ from FC to Conv2D and pass through activation layer
jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)
#print("JLZ før relu\n", jacobian_L_Z)
#jacobian_L_Z = self.d_activation(self, jacobian_L_Z)
#print("cached out after activation\n", self.cached_output)
jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output)
#print("JLZ etter relu\n", jacobian_L_Z)
# J_L_Z * f'(cached_output)
#Calculate J_LW
jacobian_L_W = self.compute_gradients(jacobian_L_Z)
self.weights_grads += jacobian_L_W
#Calculate J_LX
jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)
#Pass Jacobian L Y upstream
return jacobian_L_Y
def update_gradients(self, learning_rate):
self.weights -= learning_rate * self.weights_grads
self.weights_grads = np.zeros(self.weights.shape)
def compute_gradients(self, jacobian_L_Z):
grads = np.zeros(self.weights.shape)
#Iterate through all the weights (3 dimension)
for i in range(self.weights.shape[0]):
for j in range(self.weights.shape[1]):
for k in range(self.weights.shape[2]):
for key in self.cached_calculation.keys():
if key[0] == k:
grads[(i,j,k)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]]
return grads
def compute_J_LY(self, jacobian_L_Z):
jacobian_L_Y = np.zeros(self.input_shape)
#Iterate through all the inputs (3 dimension)
#iterate through all channels/kernel of a kernel stack
for i in range(self.input_shape[0]):
#iterate through x-akses of 1d input
for j in range(self.input_shape[1]):
for key in self.cached_calculation.keys():
if key[1] == j:
#for each kernel-stack
for l in range(self.weights.shape[0]):
jacobian_L_Y[(i,j)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]]
return jacobian_L_Y
def calculate_output_shape(self):
width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.stride + 1)
return (self.kernel_shape[0], width)
def calculate_padding(self):
#Calculate padding long the x axis
s = self.stride
f = self.kernel_shape[2]
i = self.input_shape[1]
if self.mode == "full":
#Every pixel must experience every weight of the kernel
p_x_start = f - 1
p_x_stop = f - 1
elif self.mode == "same":
#Every pixel must experience the middle weight of the kernel
p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)
p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)
else:
p_x_start = 0
p_x_stop = 0
return p_x_start, p_x_stop
def apply_zero_padding(self, input_feature_maps):
# Apply zero padding to the input feature maps according to the modes, strides and kernel size
#if self.p_x_start == 0 and self.p_x_stop == 0:
# return input_feature_maps
padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))
for channel in range(input_feature_maps.shape[0]):
array = input_feature_maps[channel]
#Create the background zero array
padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop))
#Copy the array in the middle of the zero background
padded_array[self.p_x_start:array.shape[0]+ self.p_x_start] = array
#Save the array
padded_input_feature_maps[channel] = padded_array
return padded_input_feature_maps
def __str__(self):
return "Conv 1D Layer type with "+ str(self.kernel_shape[0]) +" kernels of shape = " + str(self.kernel_shape[1:]) +"input/output of shape" + str(self.input_shape)+"/" + str(self.output_shape) + " stride= " + str(self.stride) + " mode= " + str(self.mode) +" with activation = " + self.activation_name
class softmax():
def __init__(self, size):
self.size = size
self.shape = (1, size)
self.type = "softmax"
self.activation_function = activations.softmax
def forward(self, input_data):
return self.activation_function(self, input_data)
def backward(self, jacobian_L_S, softmaxed_network_output):
# Create jacobian of derivate of softmax
jacobian_soft = self.compute_j_soft(softmaxed_network_output)
# Compute jacobian linking Loss to output
jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)
return jacobian_L_Z
def compute_j_soft(self, S):
S = np.squeeze(S)
n = len(S)
j_soft = np.zeros((n,n))
for i in range(n):
for j in range(n):
if i == j:
j_soft[i][j] = S[i] - S[i]**2
else:
j_soft[i][j] = -S[i]*S[j]
return j_soft
def __str__(self):
return "Softmax Layer of size = " + str(self.size)
|
3,575 | 17cd6746e58a7f33bc239c1420d51c6810ed02d8 | from turtle import *
import time
import random
colormode(255)
class Ball(Turtle):
def __init__(self, x,y,dx,dy,r):
Turtle.__init__(self)
self.pu()
self.goto(x,y)
self.dx = dx
self.dy = dy
self.r = r
self.shape("circle")
self.shapesize(r/10)
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
self.color(r,g,b)
def move(self,screen_width, screen_hight):
current_x = self.xcor()
new_x = current_x + self.dx
current_y = self.ycor()
new_y = current_y + self.dy
right_side_ball = new_x + self.r
left_side_ball = new_x - self.r
bottom_ball = new_y - self.r
upper_ball_side = new_y + self.r
self.goto(new_x, new_y)
if bottom_ball < -screen_hight/2 or upper_ball_side > screen_hight/2:
self.dy *= -1
if left_side_ball < -screen_width/2 or right_side_ball > screen_width/2:
self.dx *= -1
tracer(0)
ht()
RUNNING = True
SLEEP = 0.0077
SCREEN_WIDTH = getcanvas().winfo_width()/2
SCREEN_HEIGHT = getcanvas().winfo_height()/2
MY_BALL = (0,0,0.5,-0.4,30)
NUMBER_OF_BALLS = 5
MINIMUM_BALL_RADIUS = 10
MAXIMUM_BALL_RADIUS = 100
MINIMUM_BALL_DX = -5
MAXIMUM_BALL_DX = 5
MINIMUM_BALL_DY = -5
MAXIMUM_BALL_DY = 5
BALLS = []
for i in range(NUMBER_OF_BALLS):
x = random.randint(int(- SCREEN_WIDTH / 2 + MAXIMUM_BALL_RADIUS) , int(SCREEN_WIDTH/2 - MAXIMUM_BALL_RADIUS))
y = random.randint(-SCREEN_HEIGHT/2 + MAXIMUM_BALL_RADIUS , SCREEN_HEIGHT/2 - MAXIMUM_BALL_RADIUS)
dy = random.randint(MINIMUM_BALL_DY , MAXIMUM_BALL_DY)
dx = random.randint(MINIMUM_BALL_DX , MAXIMUM_BALL_DX)
r = random.randint(MINIMUM_BALL_RADIUS , MAXIMUM_BALL_RADIUS)
while dx == 0:
dx = random.randint(MINIMUM_BALL_DX , MAXIMUM_BALL_DX)
while dy == 0:
dy = random.randint(MINIMUM_BALL_DY , MAXIMUM_BALL_DY)
new_ball = Ball(x,y,dx,dy,r)
BALLS.append(new_ball)
def move_all_balls(BALLS):
for index in range(len(BALLS)):
BALLS[index].move(SCREEN_WIDTH , SCREEN_HEIGHT)
#move_all_balls(BALLS)
mainloop()
|
3,576 | c14673b56cb31efb5d79859dd0f6f3c6806e1056 | import main.Tools
class EnigmaRotor:
def __init__(self, entrata, uscita, rotore_succ=None, flag=True):
self.entrata=entrata.copy()
self.uscita=uscita.copy()
self.numeroSpostamenti=0
self.flag=flag
self.rotore_succ=rotore_succ
#Imposta il rotore sull'elemento specificato: l'elemento comparirà in cima al vettore
def impostaPosizione(self, elemento):
self.numeroSpostamenti=0
while(not(self.entrata[0].__eq__(elemento))):
self.sposta()
#Data la posizione nel vettore 'entrata', si cerca la lettera corrispondente nel vettore 'uscita' e si restituisce
#la sua posizione
def posizioneSinistra(self, posizione):
return main.Tools.search(self.uscita, self.entrata[posizione])
#Data la posizione nel vettore 'uscita', si cerca la lettera corrispondente nel vettore 'entrata' e si restituisce
#la sua posizione
def posizioneDestra(self, posizione):
return main.Tools.search(self.entrata, self.uscita[posizione])
#Sposta di una posizione gli elementi dei vettori
def muovi(self):
if self.flag==True:
self.numeroSpostamenti=self.numeroSpostamenti+1
self.sposta()
if self.numeroSpostamenti>26 and not(self.rotore_succ is None):
self.rotore_succ.muovi()
self.numeroSpostamenti=0
def sposta(self):
elementoEntrata = self.entrata.pop(0)
elementoUscita = self.uscita.pop(0)
self.entrata.append(elementoEntrata)
self.uscita.append(elementoUscita)
def getEntrata(self):
return self.entrata
def getUscita(self):
return self.uscita
def __str__(self):
return '{'+str(self.entrata)+', '+str(self.uscita)+'}'
|
3,577 | bd5f298027f82edf5451f5297d577005674de4c3 | import time
import random
import math
people = [('Seymour', 'BOS'),
('Franny', 'DAL'),
('Zooey', 'CAK'),
('Walt', 'MIA'),
('Buddy', 'ORD'),
('Les', 'OMA')]
destination = 'LGA'
flights = dict()
for line in file('schedule.txt'):
origin, dest, depart, arrive, price = line.strip().split(',')
flights.setdefault((origin, dest), [])
flights[(origin, dest)].append((depart, arrive, int(price)))
def getMinutes(t):
x = time.strptime(t, '%H:%M')
return x[3] * 60 + x[4]
def printSchedule(r):
for d in range(len(r) / 2):
name = people[d][0]
origin = people[d][1]
out = flights[(origin, destination)][r[2 * d]]
ret = flights[(origin, destination)][r[2 * d + 1]]
print "%10s%10s %5s-%5s $%3s %5s-%5s $%3s" % (name, origin,
out[0], out[1], out[2],
ret[0], ret[1], ret[2])
def scheduleCost(sol):
totalPrice = 0
totalWait = 0
latestArrival = 0
earliestDepart = 24 * 60
for d in range(len(sol) / 2):
origin = people[d][1]
out = flights[(origin, destination)][int(sol[2 * d])]
ret = flights[(origin, destination)][int(sol[2 * d + 1])]
totalPrice += out[2] + ret[2]
if latestArrival < getMinutes(out[1]): latestArrival = getMinutes(out[1])
if earliestDepart > getMinutes(ret[0]): earliestDepart = getMinutes(ret[0])
for d in range(len(sol) / 2):
origin = people[d][1]
out = flights[(origin, destination)][int(sol[2 * d])]
ret = flights[(origin, destination)][int(sol[2 * d + 1])]
totalWait += latestArrival - getMinutes(out[1])
totalWait += getMinutes(ret[0]) - earliestDepart
if latestArrival > earliestDepart: totalPrice += 50
return totalWait + totalPrice
def randomOptimize(domain, costf = scheduleCost):
best = 999999999999
bestr = None
for i in range(1000):
r = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]
cost = costf(r)
if cost < best:
best = cost
bestr = r
return r
def hillClimb(domain, costf = scheduleCost):
sol = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]
while 1:
neighbors = list()
for j in range(len(domain)):
if sol[j] > domain[j][0]:
neighbors.append(sol[0:j] + [sol[j] - 1] + sol[j + 1 :])
if sol[j] < domain[j][1]:
neighbors.append(sol[0:j] + [sol[j] + 1] + sol[j + 1 :])
current = costf(sol)
best = current
for j in neighbors:
cost = costf(j)
if cost < best:
best = cost
sol = j
if best == current:
break
return sol
def annealingOptimize(domain, costf = scheduleCost, T = 10000.0, cool = 0.95, step = 1):
sol = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]
while T > 0.1:
i = random.randint(0, len(domain) - 1)
dir = random.randint(-step, step)
vec = sol[:]
vec[i] += dir
if vec[i] < domain[i][0]: vec[i] = domain[i][0]
elif vec[i] > domain[i][1]: vec[i] = domain[i][1]
ca = costf(sol)
cb = costf(vec)
if cb < ca or random.random() < pow(math.e, -(cb - ca) / T):
sol = vec
T = T * cool
return sol
def geneticOptimize(domain, costf = scheduleCost, popSize = 50, step = 1,
mutProb = 0.2, elite = 0.2, maxIter = 100):
def mutate(vec):
i = random.randint(0, len(domain) - 1)
if random.random < 0.5 and vec[i] > domain[i][0]:
return vec[0 : i] + [vec[i] - step] + vec[i + 1 :]
elif vec[i] < domain[i][1]:
return vec[0 : i] + [vec[i] + step] + vec[i + 1 :]
def crossOver(r1, r2):
i = random.randint(1, len(domain) - 2)
return r1[0 : i] + r2[i :]
pop = list()
for i in range(popSize):
vec = [random.randint(domain[i][0], domain[i][1])
for i in range(len(domain))]
pop.append(vec)
topElite = int(elite * popSize)
for i in range(maxIter):
scores = [(costf(v), v) for v in pop if v != None]
scores.sort()
ranked = [v for (s, v) in scores]
pop = ranked[0 : topElite]
while len(pop) < popSize:
if random.random() < mutProb:
pop.append(mutate(ranked[random.randint(0, topElite)]))
else:
c1 = random.randint(0, topElite)
c2 = random.randint(0, topElite)
pop.append(crossOver(ranked[c1], ranked[c2]))
print scores[0][0]
return scores[0][1]
|
3,578 | 6e3aa677985d7bd91bfbbd2078665206839bac63 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import paramiko
import commands
def ip_check():
"""
Parses attributes for given hosts,
then checks if hosts are up
and then calls path_check function with working hosts.
"""
hosts = []
valid_hosts = []
for item in sys.argv:
if '@' in item:
hosts.append(item)
for i in hosts:
host = i.split('@')[1].split(':')[0]
command = os.system('ping -c 1 '+host+' > /dev/null')
if command == 0:
valid_hosts.append(i)
if valid_hosts:
path_check(valid_hosts)
def path_check(hosts):
"""
Parses username, port, host and local and remote path,
finds all local and remote files, using find_local_files and find_remote_files functions,
and then opens ssh session using paramiko for each given host.
"""
local_files = []
local_path = ''
for item in sys.argv:
if '–pass' in item:
secret = item.split('=')[1].strip("'")
break
else:
secret = ''
for item in sys.argv:
if '/' in item and '@' not in item:
local_path = item
if '.' in item and '/' not in item:
local_files.append(item)
if local_path:
local_files.append(find_local_files(local_path, 'f'))
for i in hosts:
user_port, host_remote_path = i.split('@')
if ':' in i:
host, remote_path = host_remote_path.split(':')
else:
host = host_remote_path
remote_path = ''
for separator in ',.:':
if separator in user_port:
user, port = user_port.split(separator)
break
else:
user = user_port
port = 0
ssh = open_sshclient(host, user, port, secret)
if not remote_path:
remote_path = local_path
ssh.exec_command('mkdir -p '+remote_path)
remote_files = find_remote_files(remote_path, 'f', ssh)
ssh.close()
copy_file(hosts)
def open_sshclient(host, user, port, secret):
"""
Opens ssh session using paramiko.
"""
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.load_system_host_keys()
if secret and port:
ssh_client.connect(hostname=host, username=user, password=secret, port=port)
elif secret and port==0:
ssh_client.connect(hostname=host, username=user, password=secret)
elif not secret and port:
ssh_client.connect(hostname=host, username=user, port=port)
else:
ssh_client.connect(hostname=host, username=user)
return ssh_client
def copy_file(hosts):
"""
Makes all needed operations according to given attributes with rsync.
"""
arguments = []
for item in sys.argv[1:]:
if '@' not in item and '–pass' not in item:
arguments.append(item)
for item in hosts:
# plz use .format for test strings concatenation
os.system('rsync '+' '.join(arguments)+' '+item)
def find_remote_files(remote_path, type, ssh):
"""
Finds all files or directories on remote machine, according to given attributes.
"""
(ssh_in, ssh_out, ssh_err) = ssh.exec_command("find %s -name \"*\" -type %s" % (remote_path, type))
files = []
for file in ssh_out.readlines():
files.append(file.rstrip())
return files
def find_local_files(local_path, type):
"""
Finds all files or directories on local machine, according to given attributes.
"""
local_out = commands.getoutput("find %s -name \"*\" -type %s" % (local_path, type))
files = []
for file in local_out.split("\n"):
files.append(file)
return files
ip_check()
|
3,579 | c385fe2af9aebc9c4a42d4db5a341fcedeec3898 | from django.shortcuts import render
# Create your views here.
from django.shortcuts import redirect
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import Http404, HttpResponseForbidden
from django.shortcuts import render
from django.urls import reverse
from django.views.generic.edit import FormMixin
from django.contrib.auth.decorators import login_required
from django.views.generic import DetailView, ListView
# from .forms import ComposeForm
# from .models import Thread, ChatMessage
from django.shortcuts import render
import os
import django
os.environ["DJANGO_SETTINGS_MODULE"] = 'arizona.settings'
django.setup()
def index(request):
return render(request, 'canyon/index.html')
def results(request):
return render(request, 'canyon/results.html')
|
3,580 | 0577c274672bac333500535f21f568ade62100c7 |
# *Using Min & Max Exercise
def extremes(nums):
return (max(nums), min(nums))
|
3,581 | fe0d6cc03512d54d2d8722551e3f2a7c1bf43997 | #!/opt/Python/2.7.3/bin/python
import sys
from collections import defaultdict
import numpy as np
import re
import os
import argparse
from Bio import SeqIO
def usage():
test="name"
message='''
python CircosConf.py --input circos.config --output pipe.conf
'''
print message
def fasta_id(fastafile):
fastaid = defaultdict(str)
for record in SeqIO.parse(fastafile,"fasta"):
fastaid[record.id] = 1
return fastaid
#temperate.mPing.group.id
def readtable(infile):
data = defaultdict(str)
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\t',line)
if not data.has_key(unit[0]):
data[unit[0]] = 1
return data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-o', '--output')
parser.add_argument('-v', dest='verbose', action='store_true')
args = parser.parse_args()
try:
len(args.input) > 0
except:
usage()
sys.exit(2)
unique_id = readtable(args.input)
#1 IRIS313-15896 Colombia Indica ERS467753 anonftp@ftp.ncbi.nlm.nih.gov:/sra/sra-instant/reads/ByRun/sra/ERR/ERR626/ERR626447/ERR626447.sra
infile = '../GigaScience/rice_line_IRRI_2466.download.list'
count = 0
other_id = defaultdict(lambda : int())
total_id = defaultdict(lambda : int())
r = re.compile(r'Japonica', re.IGNORECASE)
ofiles = []
for i in range(7):
ofile = open('%s.other%s.download.list' %(args.input, i), 'w')
ofiles.append(ofile)
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\t',line)
if not r.search(unit[3]):
continue
total_id[unit[1]] = 1
if not unique_id.has_key(unit[1]):
other_id[unit[1]] = 1
count = len(other_id.keys())
index = int(float(count)/100)
#print index, count, unit[1]
print >> ofiles[index], line
for i in range(7):
ofiles[i].close()
print 'high mping: %s (2 are not japonica in this group)' %(len(unique_id.keys()))
print 'other: %s' %(count)
print 'total: %s' %(len(total_id.keys()))
if __name__ == '__main__':
main()
|
3,582 | 7a4044acaa191509c96e09dcd48e5b951ef7a711 | # put your python code here
time_one = abs(int(input()))
time_two = abs(int(input()))
time_three = abs(int(input()))
time_four = abs(int(input()))
time_five = abs(int(input()))
time_six = abs(int(input()))
HOUR = 3600 # 3600 seconds in an hour
MINUTE = 60 # 60 seconds in a minute
input_one = time_one * HOUR + time_two * MINUTE + time_three
input_two = time_four * HOUR + time_five * MINUTE + time_six
print(abs(input_one - input_two))
|
3,583 | 91cf1f4cf34ac9723be4863e81149c703adca27a | import sys
sys.path.append("..") # Adds higher directory to python modules path.
from utils import npm_decorator
# num_node = 3
@ npm_decorator(3)
def scenario():
"""
1. Check each peer's genesis block
2. Generate new blocks on each peer
2.1. 2 blocks on peer #1
2.2. 4 blocks on peer #2
2.3. 2 blocks on peer #3
3. Connect peers
3.1. peer #1 with #2 (1->2)
3.2. peer #1 with #3 (1->(2 and 3))
4. Generate new blocks
4.1. 3 blocks on peer #1
4.2. 5 blocks on peer #3
5. Stop all peers
"""
LOCAL_HOST = "http://127.0.0.1"
# import functions
from . import genesis_block
from . import create_block
from . import connect_peer
from . import stop_server
from . import block_crosscheck
total_cnt = 0
pass_cnt = 0
# 1. Check each peer's genesis block
try:
assert genesis_block.check(LOCAL_HOST, 3001)
assert genesis_block.check(LOCAL_HOST, 3002)
assert genesis_block.check(LOCAL_HOST, 3003)
print("pass", end=' ')
pass_cnt += 1
except:
print("FAIL", end=' ')
finally:
print("test1/genesis_block")
total_cnt += 1
# 2. Generate new blocks
# 2.1. 2 blocks on peer #1
# 2.2. 4 blocks on peer #2
# 2.3. 2 blocks on peer #3
try:
assert create_block.addBlocks(LOCAL_HOST, 3001, num=2)
assert create_block.check(LOCAL_HOST, 3001, num=2)
assert create_block.addBlocks(LOCAL_HOST, 3002, num=4)
assert create_block.check(LOCAL_HOST, 3002, num=4)
assert create_block.addBlocks(LOCAL_HOST, 3003, num=2)
assert create_block.check(LOCAL_HOST, 3003, num=2)
print("pass", end=' ')
pass_cnt += 1
except:
print("FAIL", end=' ')
finally:
print("test1/create_block")
total_cnt += 1
# 3. Connect peers
# 3.1. peer #1 with #2 (1->2)
# 3.2. peer #1 with #3 (1->(2 and 3))
try:
assert connect_peer.connectPeer(LOCAL_HOST, 3001, "ws://127.0.0.1:6002")
assert connect_peer.connectPeer(LOCAL_HOST, 3001, "ws://127.0.0.1:6003")
print("pass", end=' ')
pass_cnt += 1
except:
print("FAIL", end=' ')
finally:
print("test1/connect_peer")
total_cnt += 1
# 4. Generate new blocks
# 4.1. 3 blocks on peer #1
# 4.2. 5 blocks on peer #3
try:
isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3001, num=3)
assert isPass
assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=3)
assert block_crosscheck.check(LOCAL_HOST, 3003, newBlocks, num=3)
isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3003, num=5)
assert isPass
assert block_crosscheck.check(LOCAL_HOST, 3001, newBlocks, num=5)
assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=5)
print("pass", end=' ')
pass_cnt += 1
except:
print("FAIL", end=' ')
finally:
print("test1/block_crosscheck")
total_cnt += 1
# 5. Stop all peers
try:
assert stop_server.stopServer(LOCAL_HOST, 3001)
assert stop_server.stopServer(LOCAL_HOST, 3002)
assert stop_server.stopServer(LOCAL_HOST, 3003)
print("pass", end=' ')
pass_cnt += 1
except:
print("FAIL", end=' ')
finally:
print("test1/stop_server")
total_cnt += 1
# return pass_cnt_per_test and total_cnt_per_test
return pass_cnt, total_cnt
|
3,584 | 9e98a361ef20049cba488b86ad06eb92b3d29d11 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
self.mem = dict()
if root is None:
return True
leftH = self.getHeight(root.left)
rightH = self.getHeight(root.right)
return (
abs(leftH-rightH) <= 1 and
self.isBalanced(root.left) and
self.isBalanced(root.right)
)
def getHeight(self, node):
if node in self.mem:
return self.mem[node]
if node is None:
return 0
h = max(self.getHeight(node.left), self.getHeight(node.right)) + 1
self.mem[node] = h
return h
|
3,585 | 1073845131afb2446ca68ee10092eeb00feef800 | # uncompyle6 version 3.2.3
# Python bytecode 3.6 (3379)
# Decompiled from: Python 2.7.5 (default, Jul 13 2018, 13:06:57)
# [GCC 4.8.5 20150623 (Red Hat 4.8.5-28)]
# Embedded file name: ./authx/migrations/0001_initial.py
# Compiled at: 2018-08-23 19:33:14
# Size of source mod 2**32: 2715 bytes
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion, uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length')]
operations = [
migrations.CreateModel(name='User',
fields=[
(
'password', models.CharField(max_length=128, verbose_name='password')),
(
'last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
(
'is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
(
'id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
(
'created_at', models.DateTimeField(auto_now_add=True)),
(
'updated_at', models.DateTimeField(auto_now=True)),
(
'username', models.CharField(max_length=11, unique=True, verbose_name='')),
(
'fullname', models.CharField(blank=True, max_length=80, verbose_name='')),
(
'thumbnail', models.ImageField(blank=True, null=True, upload_to='thumbnail', verbose_name='')),
(
'is_active', models.BooleanField(default=True)),
(
'is_admin', models.BooleanField(default=False)),
(
'is_staff', models.BooleanField(default=False)),
(
'phone_number', models.CharField(blank=True, max_length=30, null=True)),
(
'email', models.CharField(blank=True, max_length=30, null=True)),
(
'groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
(
'owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
(
'user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'))],
options={'verbose_name':'',
'verbose_name_plural':'',
'db_table':'auth_user',
'permissions':(('view_user', 'Can drive'), )})]
# okay decompiling ./restful/hawkeye/authx/migrations/0001_initial.pyc
|
3,586 | 8d3f8872a3d5c4351551dc2d46839763d28ebd70 | # For better usage on ddp
import torch
from pytorch_lightning.metrics import Metric
import cv2
import numpy as np
import skimage
import torch.tensor as Tensor
class SegMetric(Metric):
def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
# call `self.add_state`for every internal state that is needed for the metrics computations
# dist_reduce_fx indicates the function that should be used to reduce
# state from multiple processes
self.iou_thr = iou_thr
self.prob_thr = prob_thr
self.img_size = img_size
self.use_ddp = dist_sync_on_step
self.add_state("csv_files", default=[], dist_reduce_fx="cat")
def update(self, preds: torch.Tensor, target: torch.Tensor):
logit_seg, _ = preds
_, mask, mask_cls, _, img_path, _ = target
assert logit_seg.shape == mask.shape
pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()
gt_seg = mask.detach().cpu().numpy()
gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()
pred_seg = pred_seg.astype("float32")
for idx, file_path in enumerate(img_path):
pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))
pred = np.expand_dims(pred, 0)
gt = cv2.resize(
gt_seg[idx][0],
(self.img_size, self.img_size),
interpolation=cv2.INTER_NEAREST,
)
gt = np.expand_dims(gt, 0)
gt_c = gt_cls[idx]
is_p = int(gt_c == 1.0)
is_n = 1 - is_p
gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(
pred, gt, iou_th=self.iou_thr, prob_ths=[self.prob_thr]
)
# csv = file_path.split("/")[5]
csv = file_path.split("png_1024/")[1].split("/")[0]
if not hasattr(self, f"{csv}_gt"):
self.csv_files += [csv]
self.add_state(f"{csv}_gt", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(f"{csv}_pred", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(f"{csv}_tp", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(f"{csv}_fp", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(f"{csv}_pos", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(
f"{csv}_neg", default=torch.tensor(0), dist_reduce_fx="sum"
)
# TODO: Need to be change if num_class > 1
# FIXME: 몬 생긴 포맷..
setattr(self, f"{csv}_gt", getattr(self, f"{csv}_gt") + gt_nums_[0])
setattr(
self, f"{csv}_pred", getattr(self, f"{csv}_pred") + pred_nums_[0, 0]
)
setattr(self, f"{csv}_tp", getattr(self, f"{csv}_tp") + tp_nums_[0, 0])
setattr(self, f"{csv}_fp", getattr(self, f"{csv}_fp") + fp_nums_[0, 0])
setattr(self, f"{csv}_pos", getattr(self, f"{csv}_pos") + is_p)
setattr(self, f"{csv}_neg", getattr(self, f"{csv}_neg") + is_n)
def update_each(self, preds: torch.Tensor, target: torch.Tensor):
self.update(preds, target)
def compute(self):
gt = 0
tp = 0
fp = 0
pos = 0
neg = 0
for csv in self.csv_files:
gt += getattr(self, f"{csv}_gt").item()
tp += getattr(self, f"{csv}_tp").item()
fp += getattr(self, f"{csv}_fp").item()
pos += getattr(self, f"{csv}_pos").item()
neg += getattr(self, f"{csv}_neg").item()
pre = tp / (tp + fp * (pos / (neg + 1e-5)) + 1e-5)
rec = tp / (gt + 1e-5)
f1 = 2 * (pre * rec) / (pre + rec + 1e-5)
myf1 = (pre + rec) / 2.0
lesion_metric_dict = {
"pre": pre,
"rec": rec,
"f1": f1,
"myf1": myf1,
}
# FIXME: DDP Error: https://github.com/PyTorchLightning/pytorch-lightning/discussions/2529
# Tensors must be CUDA and dense
# if self.use_ddp:
# lesion_metric_dict = torch.FloatTensor([myf1], device=self.device)
return lesion_metric_dict
def compute_each(self):
metric_dict_each_csv = {}
for csv in self.csv_files:
gt = getattr(self, f"{csv}_gt").item()
tp = getattr(self, f"{csv}_tp").item()
fp = getattr(self, f"{csv}_fp").item()
pos = getattr(self, f"{csv}_pos").item()
neg = getattr(self, f"{csv}_neg").item()
pre = tp / (tp + fp * (pos / (neg + 1e-5)) + 1e-5)
rec = tp / (gt + 1e-5)
f1 = 2 * (pre * rec) / (pre + rec + 1e-5)
fppi = fp / (pos + neg + 1e-5)
# myf1 = (pre + rec) / 2.0
lesion_metric_dict = {
"gt": gt,
"pos": pos,
"neg": neg,
"pre": pre,
"rec": rec,
"f1": f1,
"fppi": fppi
# "myf1": myf1,
}
metric_dict_each_csv[csv] = lesion_metric_dict
return metric_dict_each_csv
# Helper functions
def calc_iou(bbox_a, bbox_b):
"""
:param a: bbox list [min_y, min_x, max_y, max_x]
:param b: bbox list [min_y, min_x, max_y, max_x]
:return:
"""
size_a = (bbox_a[2] - bbox_a[0]) * (bbox_a[3] - bbox_a[1])
size_b = (bbox_b[2] - bbox_b[0]) * (bbox_b[3] - bbox_b[1])
min_ab_y = max(bbox_a[0], bbox_b[0])
min_ab_x = max(bbox_a[1], bbox_b[1])
max_ab_y = min(bbox_a[2], bbox_b[2])
max_ab_x = min(bbox_a[3], bbox_b[3])
inter_ab = max(0, max_ab_y - min_ab_y) * max(0, max_ab_x - min_ab_x)
return inter_ab / (size_a + size_b - inter_ab)
def evaluation(pred, gt, iou_th=0.15, prob_ths=[0.5]):
"""
:param pred: Prediction Seg Map, shape = (1, num_classes, height, width)
:param gt: Ground-truth Seg Map, shape = (1, num_classes, height, width)
:param iou_th: Threshold for prediction and gt matching
:return:
gt_nums: Ground-truth region numbers
pred_nums: Prediction region numbers
tp_nums: True Positive region numbers
fp_nums: False Positive region numbers
# 필수 가정: batch_size=1 (regionprops 함수가 2차원 행렬에만 적용 가능함)
# Region을 고려에서 제외하는 경우(2048x2048 이미지 기반, pixel spacing=0.2mm)
# i) Region bbox 크기 < 400 pixels
# ii) (현재 사용x) Region bbox 장축<4mm(20pixels), 단축<2mm(10 pixels)
# issue: # 3. 영상사이즈는 디텍터 크기에 따라 달라질 수 있습니다. 완벽히 하기 위해선 pixel spacing 정보를 받아야 합니다.
# # 따라서 영상 크기에 대해 기준이 변경되는 것은 현단계에서는 적용할 필요가 없어 보입니다.
"""
if len(pred.shape) > 3:
pred = pred[0]
gt = gt[0]
num_classes = pred.shape[0]
image_size = gt.shape[2]
gt_regions = [
skimage.measure.regionprops(skimage.measure.label(gt[c, :, :]))
for c in range(num_classes)
]
for c in range(num_classes):
gt_regions[c] = [
r for r in gt_regions[c] if r.area > (20 * (image_size / 2048)) ** 2
]
pred_regions = [
[
skimage.measure.regionprops(skimage.measure.label(pred[c, :, :] > th))
for c in range(num_classes)
]
for th in prob_ths
] # shape - len(prob_th), num_classes
# 초기화
gt_nums = np.array([len(gt_regions[c]) for c in range(num_classes)])
pred_nums = np.array(
[
[len(pred_regions[thi][c]) for c in range(num_classes)]
for thi in range(len(prob_ths))
]
)
tp_nums = np.zeros((len(prob_ths), num_classes))
fp_nums = pred_nums.copy() # .copy() 없으면 포인터가 같아짐
# Gt-Pred Bbox Iou Matrix
for c in range(num_classes):
for thi in range(len(prob_ths)):
if (gt_nums[c] == 0) or (pred_nums[thi][c] == 0): # np array 이상함;
continue
iou_matrix = np.zeros((gt_nums[c], pred_nums[thi][c]))
for gi, gr in enumerate(gt_regions[c]):
for pi, pr in enumerate(pred_regions[thi][c]):
iou_matrix[gi, pi] = calc_iou(gr.bbox, pr.bbox)
tp_nums[thi][c] = np.sum(np.any((iou_matrix >= iou_th), axis=1))
fp_nums[thi][c] -= np.sum(np.any((iou_matrix > iou_th), axis=0))
return gt_nums, pred_nums, tp_nums, fp_nums |
3,587 | fd45657083942dee13f9939ce2a4b71ba3f67397 | # -*- coding: utf-8 -*-
# @Time : 2022-03-09 21:51
# @Author : 袁肖瀚
# @FileName: WDCNN-DANN.py
# @Software: PyCharm
import torch
import numpy as np
import torch.nn as nn
import argparse
from model import WDCNN1
from torch.nn.init import xavier_uniform_
import torch.utils.data as Data
import matplotlib.pylab as plt
import wandb
import os
from matplotlib.ticker import FuncFormatter
#定义wandb参数
hyperparameter_defaults = dict(
epochs=70,
batch_train=40,
batch_val=50,
batch_test=40,
lr=0.0002,
weight_decay=0.0005,
r=0.02
)
wandb.init(config=hyperparameter_defaults, project="WDCNN-DANN")
config = wandb.config
plt.rcParams['font.family'] = ['Times New Roman']
def to_percent(temp, position):
return '%1.0f' % (temp) + '%'
# model initialization 参数初始化
def weight_init(m):
class_name = m.__class__.__name__ #得到网络层的名字
if class_name.find('Conv') != -1: # 使用了find函数,如果不存在返回值为-1,所以让其不等于-1
xavier_uniform_(m.weight.data)
if class_name.find('Linear') != -1:
xavier_uniform_(m.weight.data)
def batch_norm_init(m):
class_name = m.__class__.__name__
if class_name.find('BatchNorm') != -1:
m.reset_running_stats()
# split train and split data
def data_split_train(data_set, label_set):
data_set_train = []
data_set_val = []
label_set_train = []
label_set_val = []
for i in range(data_set.shape[0]): #行数 shape[2]通道数
index = np.arange(data_set.shape[1]) #列数矩阵[0 1 2 ''']
np.random.shuffle(index) #随机打乱数据 每次shuffle后数据都被打乱,这个方法可以在机器学习训练的时候在每个epoch结束后将数据重新洗牌进入下一个epoch的学习
a = index[:int((data_set.shape[1]) * 0.8)]
data = data_set[i] #第i行
data_train = data[a]
data_val = np.delete(data, a, 0)
data_set_train.append(data_train)
data_set_val.append(data_val)
label_set_train.extend(label_set[i][:len(data_train)])
label_set_val.extend(label_set[i][:len(data_val)])
data_set_train = np.array(data_set_train).reshape(-1, data_set.shape[-1])
data_set_val = np.array(data_set_val).reshape(-1, data_set.shape[-1])
label_set_train = np.array(label_set_train)
label_set_val = np.array(label_set_val)
return data_set_train, data_set_val, label_set_train, label_set_val
# training process
def train(train_dataset, val_dataset_s, val_dataset_t,train_dataset_t):
global alpha
#torch.cuda.empty_cache()
length = len(train_dataset.tensors[0])
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
train_dataloader = Data.DataLoader(train_dataset, batch_size=config.batch_train, shuffle=True)
val_dataloader_s = Data.DataLoader(val_dataset_s, batch_size=config.batch_val, shuffle=False)
val_dataloader_t = Data.DataLoader(val_dataset_t, batch_size=config.batch_val, shuffle=False)
t_loader = Data.DataLoader(train_dataset_t, batch_size=int(config.batch_train), shuffle=True) # 修改这里,保证两个训练集的迭代次数一致
# t_loader_iter = iter(t_loader)
val_loss_s = []
val_loss_t = []
val_acc_s = []
val_acc_t = []
cross_loss = [] #暂时不知道作用
Source_Train_Acc=[]
for epoch in range(config.epochs):
# t_loader = Data.DataLoader(train_dataset_t, batch_size=int(args.batch_train),shuffle=True) # 修改这里,保证两个训练集的迭代次数一致
t_loader_iter = iter(t_loader)
model.train()
for index, (s_data_train, s_label_train) in enumerate(train_dataloader):
p = float(index) / 20
alpha = 2. / (1. + np.exp(-10 * p)) - 1
t_data_train = t_loader_iter.next()
s_data_train = s_data_train.float().to(device).unsqueeze(dim=1)
t_data_train = t_data_train[0].float().to(device).unsqueeze(dim=1)
s_label_train = s_label_train.long().to(device)
s_domain_label = torch.zeros(config.batch_train).long().cuda()
t_domain_label = torch.ones(config.batch_train).long().cuda()
s_out_train, s_domain_out = model(s_data_train, alpha)
t_out_train, t_domain_out = model(t_data_train, alpha)
loss_domain_s = criterion(s_domain_out, s_domain_label) #源域域分类损失
loss_domain_t = criterion(t_domain_out, t_domain_label) #目标域域分类损失
loss_c = criterion(s_out_train, s_label_train) #分类器损失
loss = loss_c + (loss_domain_s + loss_domain_t)*0.02
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred_s = torch.argmax(s_out_train.data, 1) # 返回指定维度最大值的序号 dim=1
correct_s = pred_s.eq(s_label_train).cpu().sum() #源域正确率
acc = 100. * correct_s.item() / len(s_data_train)
Source_Train_Acc.append(acc)
wandb.log({"Source Train Acc": acc})
if index % 2 == 0:
print('Train Epoch: {}/{} [{}/{} ({:.0f}%)] \t Loss_c: {:.6f} Loss_d: {:.6f} Source Train Acc: {:.2f}%'.format
(epoch, config.epochs, (index + 1) * len(s_data_train), length,
100. * (config.batch_train * (index + 1) / length), loss_c.item(),
loss_domain_s.item() + loss_domain_t.item()
, acc))
#validation
model.eval()
#源域验证
correct_val_s = 0
sum_loss_s = 0
length_val_s = len(val_dataset_s)
for index, (s_data_val, s_label_val) in enumerate(val_dataloader_s):
with torch.no_grad():
s_data_val = s_data_val.float().to(device).unsqueeze(dim=1)
s_label_val = s_label_val.long().to(device)
output_val_s, _ = model(s_data_val, alpha)
loss_s = criterion(output_val_s, s_label_val)
pred_val_s = torch.argmax(output_val_s.data, 1)
correct_val_s += pred_val_s.eq(s_label_val).cpu().sum()
sum_loss_s += loss_s
acc_s = 100. * correct_val_s.item() / length_val_s #源域正确率
average_loss_s = sum_loss_s.item() / length_val_s #源域损失
#目标域验证
correct_val_t = 0
sum_loss_t = 0
length_val_t = len(val_dataset_t)
for index, (t_data_val, t_label_val) in enumerate(val_dataloader_t):
with torch.no_grad():
t_data_val = t_data_val.float().to(device).unsqueeze(dim=1)
t_label_val = t_label_val.long().to(device)
output_val_t, _ = model(t_data_val, alpha)
loss_t = criterion(output_val_t, t_label_val)
pred_val_t = torch.argmax(output_val_t.data, 1)
correct_val_t += pred_val_t.eq(t_label_val).cpu().sum()
sum_loss_t += loss_t
acc_t = 100. * correct_val_t.item() / length_val_t #目标域正确率
average_loss_t = sum_loss_t.item() / length_val_t #目标域损失
metrics = {"Acc_val_t": acc_t, 'epoch':epoch}
wandb.log(metrics)
print('\n The {}/{} epoch result : Average loss_s: {:.6f}, Acc_val_s: {:.2f}% , Average loss_t: {:.6f}, Acc_val_t: {:.2f}%'.format(
epoch, config.epochs, average_loss_s, acc_s,average_loss_t, acc_t))
val_loss_s.append(loss_s.item())
val_loss_t.append(loss_t.item())
val_acc_t.append(acc_t)
val_acc_s.append(acc_s)
torch.save(model.state_dict(), os.path.join(wandb.run.dir, "model.pth"))
#画出验证集正确率曲线
plt.plot(val_acc_s, 'r-',marker='s')
plt.plot(val_acc_t, 'g-',marker='*')
plt.legend(["Source domain validation accuracy", "Target domain validation accuracy"])
plt.xlabel('Epochs')
plt.ylabel('validation accuracy')
plt.title('Source doamin & Target domain Validation Accuracy Rate')
plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))
plt.savefig("Source doamin & Target domain Validation Accuracy Rate.png")
plt.show()
#画出验证集损失
plt.plot(val_loss_s, 'r-',marker='o')
plt.plot(val_loss_t, 'g-',marker='x')
plt.legend(["Source domain validation Loss", "Target domain validation Loss"])
plt.xlabel('Epochs')
plt.ylabel('val_loss')
plt.title('Source domain & Target domain Validation Loss')
plt.savefig("Source domain & Target domain Validation Loss")
plt.show()
# testing
def test(test_dataset):
model.eval()
length = len(test_dataset)
correct = 0
test_loader = Data.DataLoader(test_dataset, batch_size=config.batch_test, shuffle=False)
y_test = []
y_pred = []
for index, (data, label) in enumerate(test_loader):
with torch.no_grad():
data = data.float().to(device)
label = label.long().to(device)
y_test.append(label)
output, _ = model(data.unsqueeze(dim=1), alpha)
pred = torch.argmax(output.data, 1)
y_pred.append(pred)
correct += pred.eq(label).cpu().sum()
acc = 100. * correct / length
return acc
if __name__ == '__main__':
torch.cuda.empty_cache()
# use cpu or gpu
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
device = torch.device(device)
# CWRU
dataset_s_train = np.load(r'bearing numpy data\dataset_train_0HP_100.npz')
dataset_s_test = np.load(r'bearing numpy data\dataset_val_0HP_80.npz')
dataset_t_train = np.load(r'bearing numpy data\dataset_train_3HP_100.npz')
dataset_t_test = np.load(r'bearing numpy data\dataset_val_3HP_80.npz')
data_s_train_val = dataset_s_train['data']
data_s_test = dataset_s_test['data'].reshape(-1, 1024)
data_t_train_val = dataset_t_train['data']
data_t_test = dataset_t_test['data'].reshape(-1, 1024)
label_s_train_val = dataset_s_train['label']
label_s_test = dataset_s_test['label'].reshape(1, -1)
label_t_train_val = dataset_t_train['label']
label_t_test = dataset_t_test['label'].reshape(1, -1)
iteration_acc = []
test_acc_s = []
# repeat several times for an average result
for iteration in range(1):
# load model
model = WDCNN1(C_in=1, class_num=10).to(device)
model.apply(weight_init)
model.apply(batch_norm_init)
# train/val
data_s_train, data_s_val, label_s_train, label_s_val = data_split_train(data_s_train_val, label_s_train_val)
data_t_train, data_t_val, _, label_t_val = data_split_train(data_t_train_val, label_t_train_val)
# transfer ndarray to tensor
data_s_train = torch.from_numpy(data_s_train)
data_s_val = torch.from_numpy(data_s_val)
data_t_val = torch.from_numpy(data_t_val) #加的验证
data_s_test = torch.from_numpy(data_s_test)
data_t_train = torch.from_numpy(data_t_train)
data_t_test = torch.from_numpy(data_t_test)
label_s_train = torch.from_numpy(label_s_train)
label_s_val = torch.from_numpy(label_s_val)
label_t_val = torch.from_numpy(label_t_val) #加的验证
label_s_test = torch.from_numpy(label_s_test)
#label_t_train = torch.from_numpy(label_t_train)
label_t_test = torch.from_numpy(label_t_test)
# seal to data-set
train_dataset_s = Data.TensorDataset(data_s_train, label_s_train)
train_dataset_t = Data.TensorDataset(data_t_train)
val_dataset_s = Data.TensorDataset(data_s_val, label_s_val)
val_dataset_t = Data.TensorDataset(data_t_val, label_t_val) #加的验证
test_dataset_s = Data.TensorDataset(data_s_test, label_s_test.squeeze())
test_dataset_t = Data.TensorDataset(data_t_test, label_t_test.squeeze())
# print(train_dataset_s, val_dataset_s)
criterion = nn.NLLLoss()
train(train_dataset_s, val_dataset_s, val_dataset_t,train_dataset_t)
s_test_acc = test(test_dataset_s)
t_test_acc = test(test_dataset_t)
print('\n source_acc: {:.2f}% target_acc: {:.2f}%'.format(s_test_acc, t_test_acc))
wandb.finish()
|
3,588 | 97c97f18d1b93dc54538a0df7badafd961fdcb9c | from manimlib.imports import *
class A_Scroller(Scene):
CONFIG={
"camera_config":{"background_color":"#FFFFFF"}
}
def construct(self):
text_1 = Text("3493", color="#DC3832")
text_2 = Text("3646", color="#221F20").shift(2*RIGHT)
text_3 = Text("4182", color="#2566AD").shift(4*RIGHT)
text_4 = Text("16417", color="#DC3832").shift(6*RIGHT)
text_5 = Text("18209", color="#221F20").shift(8*RIGHT)
text_6 = Text("18569", color="#2566AD").shift(10*RIGHT)
text_7 = Text("22229", color="#DC3832").shift(12*RIGHT)
text_8 = Text("24928", color="#221F20").shift(14*RIGHT)
text_9 = Text("26827", color="#2566AD").shift(16*RIGHT)
text_10 = Text("29779", color="#DC3832").shift(18*RIGHT)
line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6, text_7, text_8, text_9, text_10)
text_11 = Text("30898", color="#221F20").shift(DOWN)
text_12 = Text("31568", color="#2566AD").shift(2*RIGHT+DOWN)
text_13 = Text("32075", color="#DC3832").shift(4*RIGHT+DOWN)
text_14 = Text("32777", color="#221F20").shift(6*RIGHT+DOWN)
text_15 = Text("33959", color="#2566AD").shift(8*RIGHT+DOWN)
text_16 = Text("35450", color="#DC3832").shift(10*RIGHT+DOWN)
text_17 = Text("37680", color="#221F20").shift(12*RIGHT+DOWN)
text_18 = Text("38268", color="#2566AD").shift(14*RIGHT+DOWN)
text_19 = Text("38269", color="#DC3832").shift(16*RIGHT+DOWN)
text_20 = Text("38849", color="#221F20").shift(18*RIGHT+DOWN)
line_2 = VGroup(text_11, text_12, text_13, text_14, text_15, text_16, text_17, text_18, text_19, text_20)
text_21 = Text("44204", color="#2566AD").shift(2*DOWN)
text_22 = Text("44798", color="#DC3832").shift(2*RIGHT+2*DOWN)
text_23 = Text("44814", color="#221F20").shift(4*RIGHT+2*DOWN)
text_24 = Text("45084", color="#2566AD").shift(6*RIGHT+2*DOWN)
text_25 = Text("45252", color="#DC3832").shift(8*RIGHT+2*DOWN)
text_26 = Text("46041", color="#221F20").shift(10*RIGHT+2*DOWN)
text_27 = Text("46380", color="#2566AD").shift(12*RIGHT+2*DOWN)
text_28 = Text("47891", color="#DC3832").shift(14*RIGHT+2*DOWN)
text_29 = Text("51126", color="#221F20").shift(16*RIGHT+2*DOWN)
text_30 = Text("51599", color="#2566AD").shift(18*RIGHT+2*DOWN)
line_3 = VGroup(text_21, text_22, text_23, text_24, text_25, text_26, text_27, text_28, text_29, text_30)
all_numbers_1 = VGroup(line_1, line_2, line_3)
all_numbers_2 = all_numbers_1.copy()
all_numbers_1.move_to(2*UP).shift(20*RIGHT)
all_numbers_2.move_to(2*UP)
all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)
self.add(all_numbers)
self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10, rate_func=linear)
|
3,589 | bb847480e7e4508fbfb5e7873c4ed390943e2fcf | #import os
import queue as q
#Считываем ввод
file = open('input.txt', 'r')
inp = ''
for i in file:
for j in i:
if (j != '\n'):
inp += j
else:
inp += ' '
inp += ' '
#print(inp)
file.close()
#Записываем все пути в двумерный массив
tmp = '' #Переменная для хранения текущего числа из ввода
svNumber = 0 #Число связей между узлами (число улиц в городе)
counter = 0 #Cчётчик улиц оставшихся для определения
cityCounter = 0 #Счётчик гродов
flag = True #Флаг отвечающий за отличии того что мы считываем (номер начального/конечного узла)
for i in inp:
if (i != ' '):
tmp += i
else:
tmp = int(tmp)
#print(tmp)
if (counter == 0): #Тот случай когда в tmp лежит кол-во связей
mas = [[0 for id1 in range(2)] for id2 in range(tmp)]
svNumber = tmp
counter = tmp
else:
if (flag): #Запись номера начального узла текущей связи
mas[svNumber - counter][0] = tmp
flag = False
else: #Запись номера конечного узла текущей связи
mas[svNumber - counter][1] = tmp
flag = True
counter -= 1
#print(mas)
tmp = ''
if (counter == 0):
#print(mas)
uzNumber = max(max(i) for i in mas) + 1 # Считаем колличество узлов
#print(uzNumber)
#Находим результат (двумерный массив кол-ва путей)
rez = [[0 for id1 in range(uzNumber)] for id2 in range(uzNumber)]
#Заранее заполняем результат минус единицами в нужных местах
for i in mas:
for j in mas:
if (i == j[::-1]):
rez[i[0]][i[0]] = -1
rez[i[1]][i[1]] = -1
rez[i[0]][i[1]] = -1
rez[i[1]][i[0]] = -1
'''
for i in rez:
for j in i:
print(j, end = ' ')
print()
print()
'''
flag2 = True #Флаг отвечающий за то что мы не закончили начальную обработку результата
while (flag2):
flag2 = False
for i in mas:
for j in range(uzNumber):
for k in range(uzNumber):
#print (j, k, i[0], i[1])
if (rez[j][k] == -1 and i[1] == j and rez[i[0]][k] != -1):
rez[i[0]][k] = -1
flag2 =True
#print('here')
if (rez[i[0]][i[0]] == -1 and i[0] == j and i[1] == k and rez[j][k] != -1):
rez[j][k] = -1
flag2 =True
#print('here2')
'''
for i in rez:
for j in i:
print(j, end = ' ')
print()
print()
'''
#Заполняем результат построчно с помощью алгоритма A*
for k in range(uzNumber):
#if (rez[k][0] != -1): #Проверка на то что из узла k нельзя попасть в цикл
frontier = q.Queue() #Создаём очередь типа FIFO для хранения текущих узлов
frontier.put(k) #Начинам с узла, номер которого соответствует номеру текущей строки в результате
while (not frontier.empty()):
current = frontier.get()
#print(current)
for i in range (len(mas)):
if (mas[i][0] == current):
if (rez[k][mas[i][1]] != -1):
frontier.put(mas[i][1]) #Добавляем в очередь соседние узлы
rez[k][mas[i][1]] += 1 #Считаем результат
#Выводим результат
print('matrix for city', cityCounter)
for i in rez:
for j in i:
print(j, end = ' ')
print()
print()
cityCounter += 1
mas = []
#os.system('PAUSE')
|
3,590 | daeb11000978d14a05ea62113dcf6e30d6a98b15 | # Enunciado: faça um programa que leia um ano qualquer e mostre se ele é BISEXTO.
ano = int(input('\nInforme o ano: '))
ano1 = ano % 4
ano2 = ano % 100
if ano1 == 0 and ano2 != 0:
print('\nO ano de {} é Bissexto !!'.format(ano))
else:
print('\nO ano de {} não foi Bissexto !!'.format(ano))
|
3,591 | 3ecc9ce82d9c902958a4da51ce7ee3c39b064b2b | import datetime
from django.views.generic import DetailView, ListView
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_list_or_404, render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.conf import settings
from farm.models import Animal, Breed, Product, Milking
from notes.forms import BriefNoteForm
class BreedDetailView(DetailView):
model = Breed
def get_queryset(self, *args, **kwargs):
return Breed.objects.filter(genus__slug=self.kwargs['genus_slug'])
class ProductDetailView(DetailView):
model = Product
def get_queryset(self, *args, **kwargs):
return Product.objects.filter(type__slug=self.kwargs['type_slug'])
class MilkingListView(ListView):
model = Animal
def get_queryset(self, *args, **kwargs):
try:
animal = Animal.objects.get(self.kwargs.get('slug', None))
qs = Milking.objects.filter(animal__primary_breed__genus__slug=self.kwargs['genus_slug'], animal__primary_breed__slug=self.kwargs['breed_slug'], animal__slug=self.kwargs['slug'])
except:
try:
qs = Milking.objects.filter(animal__primary_breed__genus__slug=self.kwargs['genus_slug'], animal__primary_breed__slug=self.kwargs['breed_slug'], animal__uuid__contains=self.kwargs['slug'])
except:
qs = None
return qs
class AnimalDetailView(DetailView):
model = Animal
def get_queryset(self, *args, **kwargs):
return Animal.objects.filter(primary_breed__genus__slug=self.kwargs['genus_slug'], primary_breed__slug=self.kwargs['breed_slug'])
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
pk = self.kwargs.get('pk', None)
slug_or_uuid = self.kwargs.get('slug', None)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
elif slug_or_uuid is not None:
try:
obj = queryset.filter(uuid__contains=slug_or_uuid)[0]
except:
try:
obj = queryset.get(slug=slug_or_uuid)
except ObjectDoesNotExist:
raise Http404(_(u"No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
# If none of those are defined, it's an error.
else:
raise AttributeError(u"Generic detail view %s must be called with "
u"either an object pk or a slug."
% self.__class__.__name__)
return obj
def get_context_data(self, **kwargs):
context = super(AnimalDetailView, self).get_context_data(**kwargs)
context['note_form'] = BriefNoteForm()
return context
|
3,592 | 64b4deaad548a38ba646423d33fc6a985483a042 | ########################################
__author__ = "Abdelrahman Eldesokey"
__license__ = "GNU GPLv3"
__version__ = "0.1"
__maintainer__ = "Abdelrahman Eldesokey"
__email__ = "abdo.eldesokey@gmail.com"
########################################
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
import numpy as np
from scipy.stats import poisson
from scipy import signal
from utils.util import retrieve_elements_from_indices
# The proposed Normalized Convolution Layer
class NConv2d(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, pos_fn='softplus',
init_method='k', stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
# Call _ConvNd constructor
super(NConv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, False, _pair(0), groups, bias, padding_mode)
self.eps = 1e-20
self.pos_fn = pos_fn
self.init_method = init_method
# Initialize weights and bias
self.init_parameters()
if self.pos_fn is not None:
EnforcePos.apply(self, 'weight', pos_fn)
def forward(self, data, conf):
# Normalized Convolution
denom = F.conv2d(conf, self.weight, None, self.stride,
self.padding, self.dilation, self.groups)
nomin = F.conv2d(data*conf, self.weight, None, self.stride,
self.padding, self.dilation, self.groups)
nconv = nomin / (denom+self.eps)
# Add bias
b = self.bias
sz = b.size(0)
b = b.view(1,sz,1,1)
b = b.expand_as(nconv)
nconv += b
# Propagate confidence
cout = denom
sz = cout.size()
cout = cout.view(sz[0], sz[1], -1)
k = self.weight
k_sz = k.size()
k = k.view(k_sz[0], -1)
s = torch.sum(k, dim=-1, keepdim=True)
cout = cout / s
cout = cout.view(sz)
return nconv, cout
def init_parameters(self):
# Init weights
if self.init_method == 'x': # Xavier
torch.nn.init.xavier_uniform_(self.weight)
elif self.init_method == 'k': # Kaiming
torch.nn.init.kaiming_uniform_(self.weight)
elif self.init_method == 'p': # Poisson
mu=self.kernel_size[0]/2
dist = poisson(mu)
x = np.arange(0, self.kernel_size[0])
y = np.expand_dims(dist.pmf(x),1)
w = signal.convolve2d(y, y.transpose(), 'full')
w = torch.tensor(w).type_as(self.weight)
w = torch.unsqueeze(w,0)
w = torch.unsqueeze(w,1)
w = w.repeat(self.out_channels, 1, 1, 1)
w = w.repeat(1, self.in_channels, 1, 1)
self.weight.data = w + torch.rand(w.shape)
# Init bias
self.bias = torch.nn.Parameter(torch.zeros(self.out_channels)+0.01)
# My modification is in this class
# Non-negativity enforcement class
class EnforcePos(object):
def __init__(self, pos_fn, name):
self.name = name
self.pos_fn = pos_fn
@staticmethod
def apply(module, name, pos_fn):
fn = EnforcePos(pos_fn, name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name+'_pre', Parameter(weight.data))
setattr(module, name, fn._pos(getattr(module, name+'_pre')))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, inputs):
#if module.training:
# weight = getattr(module, self.name)
#del module._parameters[self.name]
pos_weight = self._pos(getattr(module, self.name+'_pre'))
setattr(module, self.name, pos_weight)
#else:
# pass
def _pos(self, p):
pos_fn = self.pos_fn.lower()
if pos_fn == 'softmax':
p_sz = p.size()
p = p.view(p_sz[0],p_sz[1], -1)
p = F.softmax(p, -1)
return p.view(p_sz)
elif pos_fn == 'exp':
return torch.exp(p)
elif pos_fn == 'softplus':
return F.softplus(p, beta=10)
elif pos_fn == 'sigmoid':
return F.sigmoid(p)
else:
print('Undefined positive function!')
return
class NormCNN(nn.Module):
def __init__(self, pos_fn=None, num_channels=2):
super().__init__()
self.pos_fn = pos_fn
self.nconv1 = NConv2d(1, num_channels, (5, 5), pos_fn, 'p', padding=2)
self.nconv2 = NConv2d(num_channels, num_channels, (5, 5), pos_fn, 'p', padding=2)
self.nconv3 = NConv2d(num_channels, num_channels, (5, 5), pos_fn, 'p', padding=2)
self.nconv4 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1)
self.nconv5 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1)
self.nconv6 = NConv2d(2 * num_channels, num_channels, (3, 3), pos_fn, 'p', padding=1)
self.nconv7 = NConv2d(num_channels, 1, (1, 1), pos_fn, 'k')
def forward(self, x0, c0):
x1, c1 = self.nconv1(x0, c0)
x1, c1 = self.nconv2(x1, c1)
x1, c1 = self.nconv3(x1, c1)
# Downsample 1
ds = 2
c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)
x1_ds = retrieve_elements_from_indices(x1, idx)
c1_ds /= 4
x2_ds, c2_ds = self.nconv2(x1_ds, c1_ds)
x2_ds, c2_ds = self.nconv3(x2_ds, c2_ds)
# Downsample 2
ds = 2
c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)
x2_dss = retrieve_elements_from_indices(x2_ds, idx)
c2_dss /= 4
x3_ds, c3_ds = self.nconv2(x2_dss, c2_dss)
# Downsample 3
ds = 2
c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)
x3_dss = retrieve_elements_from_indices(x3_ds, idx)
c3_dss /= 4
x4_ds, c4_ds = self.nconv2(x3_dss, c3_dss)
# Upsample 1
x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')
c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')
x34_ds, c34_ds = self.nconv4(torch.cat((x3_ds, x4), 1), torch.cat((c3_ds, c4), 1))
# Upsample 2
x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')
c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')
x23_ds, c23_ds = self.nconv5(torch.cat((x2_ds, x34), 1), torch.cat((c2_ds, c34), 1))
# Upsample 3
x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')
c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')
xout, cout = self.nconv6(torch.cat((x23, x1), 1), torch.cat((c23, c1), 1))
xout, cout = self.nconv7(xout, cout)
return xout, cout
class PretrainedCNN(nn.Module):
def __init__(self, pos_fn=None, num_channels=2):
super().__init__()
self.pos_fn = pos_fn
self.navg1 = self.navg_layer((5, 5), 3, 1, num_channels, 'p', True)
self.navg2 = self.navg_layer((5, 5), 3, num_channels, num_channels, 'p', True)
self.navg3 = self.navg_layer((5, 5), 3, num_channels, num_channels, 'p', True)
self.navg4 = self.navg_layer((1, 1), 3, num_channels, 1, 'p', True)
self.navg34 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True)
self.navg23 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True)
self.navg12 = self.navg_layer((3, 3), 3, 2 * num_channels, num_channels, 'p', True)
self.bias1 = nn.Parameter(torch.zeros(num_channels) + 0.01)
self.bias2 = nn.Parameter(torch.zeros(num_channels) + 0.01)
self.bias3 = nn.Parameter(torch.zeros(num_channels) + 0.01)
self.bias4 = nn.Parameter(torch.zeros(1) + 0.01)
self.bias34 = nn.Parameter(torch.zeros(num_channels) + 0.01)
self.bias23 = nn.Parameter(torch.zeros(num_channels) + 0.01)
self.bias12 = nn.Parameter(torch.zeros(num_channels) + 0.01)
def forward(self, x0, c0):
x1, c1 = self.navg_forward(self.navg1, c0, x0, self.bias1)
x1, c1 = self.navg_forward(self.navg2, c1, x1, self.bias2)
x1, c1 = self.navg_forward(self.navg3, c1, x1, self.bias3)
ds = 2
c1_ds, idx = F.max_pool2d(c1, ds, ds, return_indices=True)
x1_ds = torch.zeros(c1_ds.size()).cuda()
for i in range(x1_ds.size(0)):
for j in range(x1_ds.size(1)):
x1_ds[i, j, :, :] = x1[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])
c1_ds /= 4
x2_ds, c2_ds = self.navg_forward(self.navg2, c1_ds, x1_ds, self.bias2)
x2_ds, c2_ds = self.navg_forward(self.navg3, c2_ds, x2_ds, self.bias3)
ds = 2
c2_dss, idx = F.max_pool2d(c2_ds, ds, ds, return_indices=True)
x2_dss = torch.zeros(c2_dss.size()).cuda()
for i in range(x2_dss.size(0)):
for j in range(x2_dss.size(1)):
x2_dss[i, j, :, :] = x2_ds[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])
c2_dss /= 4
x3_ds, c3_ds = self.navg_forward(self.navg2, c2_dss, x2_dss, self.bias2)
# x3_ds, c3_ds = self.navg_forward(self.navg3, c3_ds, x3_ds, self.bias3)
ds = 2
c3_dss, idx = F.max_pool2d(c3_ds, ds, ds, return_indices=True)
x3_dss = torch.zeros(c3_dss.size()).cuda()
for i in range(x3_dss.size(0)):
for j in range(x3_dss.size(1)):
x3_dss[i, j, :, :] = x3_ds[i, j, :, :].view(-1)[idx[i, j, :, :].view(-1)].view(idx.size()[2:])
c3_dss /= 4
x4_ds, c4_ds = self.navg_forward(self.navg2, c3_dss, x3_dss, self.bias2)
x4 = F.interpolate(x4_ds, c3_ds.size()[2:], mode='nearest')
c4 = F.interpolate(c4_ds, c3_ds.size()[2:], mode='nearest')
x34_ds, c34_ds = self.navg_forward(self.navg34, torch.cat((c3_ds, c4), 1), torch.cat((x3_ds, x4), 1),
self.bias34)
x34 = F.interpolate(x34_ds, c2_ds.size()[2:], mode='nearest')
c34 = F.interpolate(c34_ds, c2_ds.size()[2:], mode='nearest')
x23_ds, c23_ds = self.navg_forward(self.navg23, torch.cat((c2_ds, c34), 1), torch.cat((x2_ds, x34), 1),
self.bias23)
x23 = F.interpolate(x23_ds, x0.size()[2:], mode='nearest')
c23 = F.interpolate(c23_ds, c0.size()[2:], mode='nearest')
xout, cout = self.navg_forward(self.navg12, torch.cat((c23, c1), 1), torch.cat((x23, x1), 1), self.bias12)
xout, cout = self.navg_forward(self.navg4, cout, xout, self.bias4)
return xout, cout
def navg_forward(self, navg, c, x, b, eps=1e-20, restore=False):
# Normalized Averaging
ca = navg(c)
xout = torch.div(navg(x * c), ca + eps)
# Add bias
sz = b.size(0)
b = b.view(1, sz, 1, 1)
b = b.expand_as(xout)
xout = xout + b
if restore:
cm = (c == 0).float()
xout = torch.mul(xout, cm) + torch.mul(1 - cm, x)
# Propagate confidence
# cout = torch.ne(ca, 0).float()
cout = ca
sz = cout.size()
cout = cout.view(sz[0], sz[1], -1)
k = navg.weight
k_sz = k.size()
k = k.view(k_sz[0], -1)
s = torch.sum(k, dim=-1, keepdim=True)
cout = cout / s
cout = cout.view(sz)
k = k.view(k_sz)
return xout, cout
def navg_layer(self, kernel_size, init_stdev=0.5, in_channels=1, out_channels=1, initalizer='x', pos=False,
groups=1):
navg = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1,
padding=(kernel_size[0] // 2, kernel_size[1] // 2), bias=False, groups=groups)
weights = navg.weight
if initalizer == 'x': # Xavier
torch.nn.init.xavier_uniform(weights)
elif initalizer == 'k':
torch.nn.init.kaiming_uniform(weights)
elif initalizer == 'p':
mu = kernel_size[0] / 2
dist = poisson(mu)
x = np.arange(0, kernel_size[0])
y = np.expand_dims(dist.pmf(x), 1)
w = signal.convolve2d(y, y.transpose(), 'full')
w = torch.from_numpy(w).float().cuda()
w = torch.unsqueeze(w, 0)
w = torch.unsqueeze(w, 1)
w = w.repeat(out_channels, 1, 1, 1)
w = w.repeat(1, in_channels, 1, 1)
weights.data = w + torch.rand(w.shape).cuda()
return navg
if __name__ == '__main__':
ncnn = NormCNN(pos_fn='softplus')
print(ncnn.__str__())
|
3,593 | 9af2b94c6eef47dad0348a5437593cc8561a7deb | import numpy
numpy.random.seed(1)
M = 20
N = 100
import numpy as np
x = np.random.randn(N, 2)
w = np.random.randn(M, 2)
f = np.einsum('ik,jk->ij', w, x)
y = f + 0.1*np.random.randn(M, N)
D = 10
from bayespy.nodes import GaussianARD, Gamma, SumMultiply
X = GaussianARD(0, 1, plates=(1,N), shape=(D,))
alpha = Gamma(1e-5, 1e-5, plates=(D,))
C = GaussianARD(0, alpha, plates=(M,1), shape=(D,))
F = SumMultiply('d,d->', X, C)
tau = Gamma(1e-5, 1e-5)
Y = GaussianARD(F, tau)
Y.observe(y)
from bayespy.inference import VB
Q = VB(Y, X, C, alpha, tau)
C.initialize_from_random()
from bayespy.inference.vmp.transformations import RotateGaussianARD
rot_X = RotateGaussianARD(X)
rot_C = RotateGaussianARD(C, alpha)
from bayespy.inference.vmp.transformations import RotationOptimizer
R = RotationOptimizer(rot_X, rot_C, D)
Q.set_callback(R.rotate)
Q.update(repeat=1000)
import bayespy.plot as bpplt
bpplt.hinton(C) |
3,594 | 2fbf312e1f8388008bb9ab9ba0ee4ccee1a8beae | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-04-12 12:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cstasker', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='usertask',
name='ut_id',
field=models.BigIntegerField(primary_key=True, serialize=False),
),
]
|
3,595 | d60810ea0b19cc9163ce526e6a5a54da9c8b3f68 | ###############################################################################
# Copyright (c) 2017-2020 Koren Lev (Cisco Systems), #
# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from unittest.mock import MagicMock, patch
from scan.fetchers.kube.kube_fetch_containers import KubeFetchContainers
from scan.test.fetch.kube_fetch.kube_test_base import KubeTestBase
from scan.test.fetch.kube_fetch.test_data.kube_access import KUBE_CONFIG
from scan.test.fetch.kube_fetch.test_data.kube_fetch_containers import \
POD_DOCUMENT, CONTAINERS_FOLDER_ID, PODS_RESPONSE_NO_MATCH, \
EXPECTED_CONTAINER_DOC
from scan.test.fetch.kube_fetch.test_data.kube_fetch_pods import PODS_RESPONSE, \
EMPTY_RESPONSE
class TestKubeFetchContainers(KubeTestBase):
class DummyConfig(object):
def __init__(self, _environment):
self.environment = _environment
def setUp(self):
super().setUp()
self.conf_patcher = patch(
'utils.cli_access.Configuration'
)
self.conf_class = self.conf_patcher.start()
self.fetcher = KubeFetchContainers(KUBE_CONFIG)
self.fetcher.configuration = TestKubeFetchContainers.DummyConfig({
'environment_type': 'Kubernetes'
})
@staticmethod
def _get_by_id(environment, item_id):
if environment:
pass
if item_id == POD_DOCUMENT['id']:
return POD_DOCUMENT
return None
def test_get_flannel(self):
self.fetcher.configuration.environment['mechanism_drivers'] = \
['Flannel']
self.inv.get_by_id.side_effect = self._get_by_id
self.fetcher.run = MagicMock(return_value="[]")
response = self._get_response(payload=PODS_RESPONSE,
response_type='V1PodList')
self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)
containers = self.fetcher.get(CONTAINERS_FOLDER_ID)
self.assertEqual(1, len(containers))
self.assertDictContains(EXPECTED_CONTAINER_DOC, containers[0])
def test_get_no_db_pod(self):
self.inv.get_by_id.return_value = None
containers = self.fetcher.get(CONTAINERS_FOLDER_ID)
self.assertEqual(0, len(containers))
def test_get_no_kube_pods(self):
self.inv.get_by_id.side_effect = self._get_by_id
response = self._get_response(payload=EMPTY_RESPONSE,
response_type='V1PodList')
self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)
containers = self.fetcher.get(CONTAINERS_FOLDER_ID)
self.assertEqual(0, len(containers))
def test_get_no_matching_pod(self):
self.inv.get_by_id.side_effect = self._get_by_id
response = self._get_response(payload=PODS_RESPONSE_NO_MATCH,
response_type='V1PodList')
self.api.list_pod_for_all_namespaces = MagicMock(return_value=response)
containers = self.fetcher.get(CONTAINERS_FOLDER_ID)
self.assertEqual(0, len(containers))
def tearDown(self):
self.conf_patcher.stop()
super().tearDown()
|
3,596 | 36fce3837e0341d94ff6099a06be8cf757a1cfa9 | from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName("same_host").setMaster("local")
sc = SparkContext(conf=conf)
julyFirstLogs = sc.textFile("/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv")
augFirstLogs = sc.textFile("/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv")
julyFirstLogs = julyFirstLogs.map(lambda line: line.split("\t")[0])
augFirstLogs = augFirstLogs.map(lambda line: line.split("\t")[0])
intersection = julyFirstLogs.intersection(augFirstLogs)
cleanedHostIntersection = intersection.filter(lambda host: host != "host")
cleanedHostIntersection.saveAsTextFile("out/nasa_logs_same_hosts.csv")
|
3,597 | 7b01e81c3e31e0a315ee01f36bf1b1f7384a9d10 | from tracking.centroidtracker import CentroidTracker
from tracking.trackableobject import TrackableObject
import tensornets as nets
import cv2
import numpy as np
import time
import dlib
import tensorflow.compat.v1 as tf
import os
# For 'disable_v2_behavior' see https://github.com/theislab/scgen/issues/14
tf.disable_v2_behavior()
# Image size must be '416x416' as YoloV3 network expects that specific image size as input
img_size = 416
inputs = tf.placeholder(tf.float32, [None, img_size, img_size, 3])
model = nets.YOLOv3COCO(inputs, nets.Darknet19)
ct = CentroidTracker(maxDisappeared=5, maxDistance=50) # Look into 'CentroidTracker' for further info about parameters
trackers = [] # List of all dlib trackers
trackableObjects = {} # Dictionary of trackable objects containing object's ID and its' corresponding centroid/s
skip_frames = 10 # Numbers of frames to skip from detecting
confidence_level = 0.40 # The confidence level of a detection
total = 0 # Total number of detected objects from classes of interest
use_original_video_size_as_output_size = True # Shows original video as output and not the 416x416 image that is used as yolov3 input (NOTE: Detection still happens with 416x416 img size but the output is displayed in original video size if this parameter is True)
video_path = os.getcwd() + "/videos/M6 Motorway Traffic - Short version.mp4"
video_name = os.path.basename(video_path)
print("Loading video {video_path}...".format(video_path=video_path))
if not os.path.exists(video_path):
print("File does not exist. Exited.")
exit()
# From https://github.com/experiencor/keras-yolo3/blob/master/yolo3_one_file_to_detect_them_all.py#L389
# YoloV3 detects 80 classes represented below
all_classes = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", \
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", \
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", \
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", \
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", \
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", \
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", \
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", \
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", \
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
# Classes of interest (with their corresponding indexes for easier looping)
classes = { 1 : 'bicycle', 2 : 'car', 3 : 'motorbike', 5 : 'bus', 7 : 'truck' }
with tf.Session() as sess:
sess.run(model.pretrained())
cap = cv2.VideoCapture(video_path)
# Get video size (just for log purposes)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Scale used for output window size and net size
width_scale = 1
height_scale = 1
if use_original_video_size_as_output_size:
width_scale = width / img_size
height_scale = height / img_size
def drawRectangleCV2(img, pt1, pt2, color, thickness, width_scale=width_scale, height_scale=height_scale):
point1 = (int(pt1[0] * width_scale), int(pt1[1] * height_scale))
point2 = (int(pt2[0] * width_scale), int(pt2[1] * height_scale))
return cv2.rectangle(img, point1, point2, color, thickness)
def drawTextCV2(img, text, pt, font, font_scale, color, lineType, width_scale=width_scale, height_scale=height_scale):
pt = (int(pt[0] * width_scale), int(pt[1] * height_scale))
cv2.putText(img, text, pt, font, font_scale, color, lineType)
def drawCircleCV2(img, center, radius, color, thickness, width_scale=width_scale, height_scale=height_scale):
center = (int(center[0] * width_scale), int(center[1] * height_scale))
cv2.circle(img, center, radius, color, thickness)
# Python 3.5.6 does not support f-strings (next line will generate syntax error)
#print(f"Loaded {video_path}. Width: {width}, Height: {height}")
print("Loaded {video_path}. Width: {width}, Height: {height}".format(video_path=video_path, width=width, height=height))
skipped_frames_counter = 0
while(cap.isOpened()):
ret, frame = cap.read()
if ret == False:
print("Error reading frame. cap.read() returned {ret}".format(ret))
# Frame must be resized to 'img_size' (because that's what YoloV3 accepts as input)
img = cv2.resize(frame, (img_size, img_size))
# Output image is used for drawing annotations (tracking rectangles and detected classes) on the image
output_img = frame if use_original_video_size_as_output_size else img
tracker_rects = []
if skipped_frames_counter == skip_frames:
# Detecting happens after number of frames have passes specified by 'skip_frames' variable value
print("[DETECTING]")
trackers = []
skipped_frames_counter = 0 # reset counter
np_img = np.array(img).reshape(-1, img_size, img_size, 3)
start_time=time.time()
predictions = sess.run(model.preds, {inputs: model.preprocess(np_img)})
print("Detection took %s seconds" % (time.time() - start_time))
# model.get_boxes returns a 80 element array containing information about detected classes
# each element contains a list of detected boxes, confidence level ...
detections = model.get_boxes(predictions, np_img.shape[1:3])
np_detections = np.array(detections)
# Loop only through classes we are interested in
for class_index in classes.keys():
local_count = 0
class_name = classes[class_index]
# Loop through detected infos of a class we are interested in
for i in range(len(np_detections[class_index])):
box = np_detections[class_index][i]
if np_detections[class_index][i][4] >= confidence_level:
print("Detected ", class_name, " with confidence of ", np_detections[class_index][i][4])
local_count += 1
startX, startY, endX, endY = box[0], box[1], box[2], box[3]
drawRectangleCV2(output_img, (startX, startY), (endX, endY), (0, 255, 0), 1)
drawTextCV2(output_img, class_name, (startX, startY), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255), 1)
# Construct a dlib rectangle object from the bounding box coordinates and then start the dlib correlation
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(int(startX), int(startY), int(endX), int(endY))
tracker.start_track(img, rect)
# Add the tracker to our list of trackers so we can utilize it during skip frames
trackers.append(tracker)
# Write the total number of detected objects for a given class on this frame
print(class_name," : ", local_count)
else:
# If detection is not happening then track previously detected objects (if any)
print("[TRACKING]")
skipped_frames_counter += 1 # Increase the number frames for which we did not use detection
# Loop through tracker, update each of them and display their rectangle
for tracker in trackers:
tracker.update(img)
pos = tracker.get_position()
# Unpack the position object
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
# Add the bounding box coordinates to the tracking rectangles list
tracker_rects.append((startX, startY, endX, endY))
# Draw tracking rectangles
drawRectangleCV2(output_img, (startX, startY), (endX, endY), (255, 0, 0), 1)
# Use the centroid tracker to associate the (1) old object centroids with (2) the newly computed object centroids
objects = ct.update(tracker_rects)
# Loop over the tracked objects
for (objectID, centroid) in objects.items():
# Check to see if a trackable object exists for the current object ID
to = trackableObjects.get(objectID, None)
if to is None:
# If there is no existing trackable object, create one
to = TrackableObject(objectID, centroid)
else:
to.centroids.append(centroid)
# If the object has not been counted, count it and mark it as counted
if not to.counted:
total += 1
to.counted = True
# Store the trackable object in our dictionary
trackableObjects[objectID] = to
# Draw both the ID of the object and the centroid of the object on the output frame
object_id = "ID {}".format(objectID)
drawTextCV2(output_img, object_id, (centroid[0] - 10, centroid[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
drawCircleCV2(output_img, (centroid[0], centroid[1]), 2, (0, 255, 0), -1)
# Display the total count so far
total_str = "Total counted: " + str(total)
drawTextCV2(output_img, total_str, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# Display the current frame (with all annotations drawn up to this point)
cv2.imshow(video_name, output_img)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'): # QUIT (exits)
break
elif key == ord('p'):
cv2.waitKey(0) # PAUSE (Enter any key to continue)
cap.release()
cv2.destroyAllWindows()
print("Exited")
|
3,598 | bafb6c09ecd0017428441e109733ebcb189863ad | operation = input('operation type: ').lower()
num1 = input("First number: ")
num2 = input("First number: ")
try:
num1, num2 = float(num1), float(num2)
if operation == 'add':
result = num1 + num2
print(result)
elif operation == 'subtract':
result = num1 - num2
print(result)
elif operation == 'multiply':
result = num1 * num2
print(result)
elif operation == 'divide':
result = num1 / num2
print(result)
else:
print('You didi choose the right operation')
except:
#
print("Impoper numbers or Operation") |
3,599 | 628e625be86053988cbaa3ddfe55f0538136e24d | ##################
#Drawing Generic Rest of Board/
##################
def drawBoard(canvas,data):
canvas.create_rectangle(10,10,data.width-10,data.height-10, fill = "dark green")
canvas.create_rectangle(187, 160, 200, 550, fill = "white")
canvas.create_rectangle(187, 160, 561, 173, fill = "white")
canvas.create_rectangle(561, 160, 574, 550, fill = "white")
canvas.create_text(data.width//2, data.height//4, text = ("You've selected Tile "+str(data.tileNumber)), font = "courier 20")
canvas.create_rectangle(50,50, 350,75, fill = "white", width = 4)
canvas.create_text(700, 40, text = "P1", fill = data.p1color)
canvas.create_text(700, 60, text = "P2", fill = data.p2color)
canvas.create_text(700, 80, text = "P3", fill = data.p3color)
canvas.create_text(700, 100, text = "P4", fill = data.p4color)
def drawMiddleTiles(canvas,data):
if data.played!=None:
for tile in data.played:
canvas.create_image(tile[0], tile[1], image = tile[2])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.