text stringlengths 38 1.54M |
|---|
from collections import defaultdict
import pandas as pd
from Bio import SeqIO
files = ["result/alignment_traD.fas", "result/alignment_traN.fas"]
def main(files, result_path):
result = defaultdict(lambda: defaultdict(str))
for file in files:
for seq_record in SeqIO.parse(file, "fasta"):
result[file][seq_record.id] = seq_record.seq
result = pd.DataFrame(result)
for col in result:
filler = "-" * int(result[col].str.len().max())
result[col] = result[col].fillna(filler)
result["S"] = result.sum(1)
with open(result_path, "w") as file:
for id_, seq in result["S"].iteritems():
file.write(f">{id_}\n{seq}\n")
if __name__ == '__main__':
main(files, "result/alignment_rare.fas")
|
from django.shortcuts import get_object_or_404
from rest_framework.response import Response # JSON 응답 생성기
from rest_framework.decorators import api_view # require_methods 와 비슷
from .models import Todo
from .serializers import TodoSerializer
# @api_view(['GET'])
# def todo_list(request):
# serializer = TodoSerializer()
@api_view(['POST']) # POST 요청에 대해서만 동작할 것임
def create_todo(request):
# request.POST 는 form-Data (Form 태그로 날라온 요청) 만 잡음 >> 대신 request.data 이것을 쓴다.
serializer = TodoSerializer(data=request.data)
if serializer.is_valid():
serializer.save(user=request.user)
return Response(serializer.data)
return Response(status=400, data=serializer.errors)
@api_view(['PATCH', 'DELETE'])
def update_delete_todo(request, todo_id):
todo = get_object_or_404(Todo, id=todo_id)
if request.method == 'PATCH':
serializer = TodoSerializer(instance=todo, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(status=400, data=serializer.errors)
elif request.method == 'DELETE':
todo.delete()
return Response(status=204)
|
import pytest
from homework.homework11.hw1 import *
def test_meta_parameters():
assert ColorsEnum.RED == "RED"
assert SizesEnum.XL == "XL"
|
from fabricasqlcnx import Fabricacnx
import os
__author__ = 'Gabriel Lopes 22/09/15'
class Fabricag(Fabricacnx):
def __init__(self):
Fabricacnx.__init__(self)
self.resul_ok = []
self.resul_erro = []
self.printa = None
def gravaok(self, grava):
for i in self.resul_ok:
self.param = {'sistema': i[0], 'host': i[1],
'ip': i[2], 'porta': i[3],
'status': i[4], 'data': i[5]}
if self.printa == 'sim':
print(self.param)
self.query = grava
Fabricacnx.conector(self)
self.conn.commit()
self.cursor.close()
self.conn.close()
def gravafalha(self, falha):
for i in self.resul_erro:
self.param = {'sistema': i[0], 'host': i[1],
'ip': i[2], 'porta': i[3],
'status': i[4], 'data': i[5]}
if self.printa == 'sim':
print(self.param)
self.query = falha
Fabricacnx.conector(self)
self.conn.commit()
self.cursor.close()
self.conn.close()
def logok(self, disco, arquivo):
arquivo = disco+'/'+arquivo
print(arquivo)
for i in self.resul_ok:
t = 'Sistema: %s Hostname: %s IP: %s ' \
'Porta: %s Status: %s Data: %s\n' % \
(i[0], i[1], i[2],
i[3], i[4], i[5])
print(t)
abrir = open(arquivo, 'a')
abrir.write(t)
abrir.close()
for i in self.resul_erro:
t = 'Sistema: %s Hostname: %s IP: %s ' \
'Porta: %s Status: %s Data: %s\n' % \
(i[0], i[1], i[2],
i[3], i[4], i[5])
print(t)
abrir = open(arquivo, 'a')
abrir.write(t)
abrir.close()
@staticmethod
def valida(disco, arquivo):
try:
for dirr in os.listdir((os.path.abspath(disco))):
if dirr == arquivo:
return 'achei'
return 'nao achei'
except NotADirectoryError:
return 'nodir'
except FileNotFoundError:
return 'nofile'
except IsADirectoryError:
return 'errodir'
@staticmethod
def validarq(disco, arquivo):
valida = disco+'/'+arquivo
if os.path.isdir(valida):
return 'sim'
else:
return 'nao'
@staticmethod
def validadir(disco):
if os.path.isdir(disco):
return 'sim'
else:
return 'nao'
|
import pandas as pd
import numpy as np
data = pd.read_csv('2016-2018.csv')
# subset data to fit to table attributes in cashflow table
data = data[['Team','League','Year']]
data = data.rename(columns = {'Team':'club', 'League':'league', 'Year':'year'})
#clean year column
data['year'] = pd.to_numeric(data['year'], downcast='signed')
# print(data.dtypes)
# print (data)
#export file
import time
timestr = time.strftime("%Y%m%d-%H%M%S")
# data.to_csv((timestr + 'club_league.csv'))
#create database for clubs and leagues
club = data['club'].copy().drop_duplicates().reset_index(drop=True)
print(club)
club.to_csv((timestr + 'club.csv'))
league = data['league'].copy().drop_duplicates().reset_index(drop=True)
league.to_csv((timestr + 'league.csv'))
print(league) |
string = input()
alphabets = digits = special = 0
space=0
for i in range(len(string)):
if(string[i].isalpha()):
alphabets = alphabets + 1
elif(string[i].isdigit()):
digits = digits + 1
elif(string[i]==' '):
space=space+1
else:
special = special + 1
print(special)
|
from django.db import models
class Movie(models.Model):
name = models.CharField(max_length=100, verbose_name='نام')
director = models.CharField(max_length=50, verbose_name='کارگردان')
year = models.IntegerField(verbose_name='سال تولید')
length = models.IntegerField(verbose_name='زمان فیلم')
descreption = models.TextField(verbose_name='توضیحات')
poster = models.ImageField(verbose_name='پوستر', upload_to='movie_poster/')
class Meta:
verbose_name = 'فیلم'
verbose_name_plural = 'فیلم'
def __str__(self):
return self.name
class Cinema(models.Model):
cinema_code = models.IntegerField(primary_key=True, verbose_name ='کد سینما')
name = models.CharField(max_length=50, verbose_name ='نام')
city = models.CharField(max_length=30, default='تهران', verbose_name ='شهر')
capacity = models.IntegerField(verbose_name ='ظرفیت')
phone = models.CharField(max_length=20, null=True, verbose_name ='تلفن')
address = models.TextField(verbose_name ='آدرس')
image = models.ImageField(verbose_name='تصویر', upload_to='cinema_image/')
class Meta:
verbose_name = 'سینما'
verbose_name_plural = 'سینما'
def __str__(self):
return self.name
# TODO VALIDATOR >>> Sanse.salable_seat and Sanse.free_seat must be lower than or equal to Cinama.capacity
class Sanse(models.Model):
movie = models.ForeignKey(Movie, on_delete=models.PROTECT, verbose_name ='فیلم')
cinema = models.ForeignKey(Cinema, on_delete=models.PROTECT, verbose_name ='سینما')
start_time = models.DateTimeField(verbose_name ='زمان شروع')
price = models.IntegerField(verbose_name ='قیمت')
salable_seats = models.IntegerField(verbose_name ='صندلی های قابل فروش')
free_seat = models.IntegerField(verbose_name ='صندلی های خالی')
SALE_NOT_STARTED = 1
SALE_OPEN = 2
TICKETS_SOLD = 3
SALE_CLOSED = 4
MOVIE_PLAYED = 5
SHOW_CALCELED = 6
status_choices = (
(SALE_NOT_STARTED, "فروش آغاز نشده"),
(SALE_OPEN, "در حال بلیت فروشی"),
(TICKETS_SOLD, "بلیت تمام شد"),
(SALE_CLOSED, "پایان فروش"),
(MOVIE_PLAYED, "فیلم پخش شد"),
(SHOW_CALCELED, "نمایش لغو شد")
)
status = models.IntegerField(choices=status_choices, verbose_name ='وضعیت')
class Meta:
verbose_name = 'سانس'
verbose_name_plural = 'سانس'
def __str__(self):
return '{} - {} - {}'.format(self.movie, self.cinema, self.start_time)
|
# package com.gwittit.client.facebook.entities
import java
from java import *
from com.google.gwt.core.client.JavaScriptObject import JavaScriptObject
class User(JavaScriptObject):
"""
Facebook User, basic info.
@author olamar72
"""
@java.init
def __init__(self, *a, **kw):
pass
@java.protected
@__init__.register
@java.typed()
def __init__(self, ):
self.__init__._super()
@java.final
@java.native
def getUidString(self):
pass
@java.final
def getUid(self):
return Long(self.getUidString())
@java.final
@java.native
def getName(self):
pass
|
"""
Script needs nine parameters to run:
1. Movie poster search URL
2. Movie poster URL
3. Database connection string
4. Database name
5. AWS S3 access key
6. AWS S3 secret key
7. AWS S3 bucket name
8. AWS S3 directory name
9. AWS S3 region
"""
from pymongo import MongoClient
from datetime import datetime
import boto3
from PIL import Image
from io import BytesIO
import urllib.request
import requests
import sys
import string
def upload_image(origin_url):
access_key = sys.argv[5]
secret_key = sys.argv[6]
bucket_name = sys.argv[7]
directory_name = sys.argv[8]
region = sys.argv[9]
s3 = boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=region)
filename = origin_url.split('/')[-1]
poster = urllib.request.urlopen(origin_url)
img = Image.open(poster)
in_memory_file = BytesIO(poster.read())
img.save(in_memory_file, format=img.format)
in_memory_file.seek(0)
s3.upload_fileobj(in_memory_file, bucket_name, directory_name + '/' + filename, ExtraArgs={'ContentType': 'image/jpg'})
s3_poster_url = 'https://{}.s3.{}.amazonaws.com/{}/{}'.format(bucket_name, region, directory_name, filename)
return s3_poster_url
movieSearchUrl = sys.argv[1]
moviePosterUrl = sys.argv[2]
client = MongoClient(sys.argv[3])
db = client.get_database(sys.argv[4])
movies = db.movies.find({}, {'title':1, 'releaseDate':1, '_id':1})
for movie in movies:
print('Updating \'{}\''.format(movie['title']))
formattedMovieTitle = movie['title'].translate(movie['title'].maketrans('', '', string.punctuation))
url = movieSearchUrl + formattedMovieTitle
response = requests.get(url = url)
data = response.json()
if 'results' not in data:
print('Could not find any results for \'{}\''.format(formattedMovieTitle))
continue
for result in data['results']:
if 'release_date' not in result:
print('No release date set for \'{}\' result'.format(formattedMovieTitle))
continue
releaseYear = result['release_date'].split('-')[0]
movieReleaseYear = movie['releaseDate'].strftime("%Y")
if releaseYear == movieReleaseYear and formattedMovieTitle in result['title']:
posterPath = result['poster_path']
if not posterPath:
print('No poster available for this \'{}\' result'.format(formattedMovieTitle))
continue
posterUrl = moviePosterUrl + posterPath
uploaded_image_url = upload_image(posterUrl)
db.movies.update_one({'_id': movie['_id']}, {'$set': {'posterUrl': uploaded_image_url,
'lastUpdated': datetime.strptime(datetime.today().isoformat() , '%Y-%m-%dT%H:%M:%S.%f')}})
print('Updated \'{}\' with movie poster'.format(movie['title']))
break |
#!/usr/bin/env python
import time
import math
import numpy as np
from minnow_low_level_control.HeadingControl import *
from minnow_low_level_control.SurgeSpeedControl import *
class control_system_manager:
# This is for standalone troubleshooting of the python code ---------------------
desired_speed = 0.0
desired_heading = -160.0 # between -180 and 180 (i.e. 181 = -179)
desired_pitch = 0.0
current_speed = 0.0
current_heading = 170
current_pitch = -2.0
# -------------------------------------------------------------------------------
if (desired_heading >= 0) and (desired_heading <= 180):
contrl_desired_heading = desired_heading
else:
contrl_desired_heading = desired_heading - 360
if (current_heading >= 0) and (current_heading <= 180):
contrl_current_heading = current_heading
else:
contrl_current_heading = current_heading - 360
speed_control_system = speed_controller()
heading_control_system = heading_controller()
heading_control_system.DesiredHeading(contrl_desired_heading)
while True:
# Run speed controller
(speed_contrl_thrust)=speed_control_system.update(desired_speed)
print("Speed Control Thrust Output: %f" % (speed_contrl_thrust))
# Run heading controller
(hdg_differential_thrust,hdg_port_thrust,hdg_stbd_thrust)=heading_control_system.update(contrl_current_heading,speed_contrl_thrust)
print("Heading control port thrust: %f" % hdg_port_thrust)
print("Heading control stbd thrust: %f" % hdg_stbd_thrust)
# Final motor thrust computation
time.sleep(.01)
|
# -*- coding: utf-8 -*-
import re
import string
import collections
def cleanSentence(sentence):
sentence = sentence.split(' ')
sentence = [word.strip(string.punctuation + string.whitespace) for word in sentence]
sentence = [word for word in sentence if len(word) > 1 or (word.lower() == 'a' or word.lower() == 'i')]
return sentence
def cleanInput(content):
content = content.upper()
#content = re.sub('\\n|[[\\d+\\]]', ' ', content)
#content = bytes(content, "UTF-8")
#content = content.decode("ascii", "ignore")
sentences = content.split(" ")
#return [cleanSentence(sentence) for sentence in sentences]
return sentences
def getNgramsFromSentence(content, n):
output = []
for i in range(len(content) - n + 1):
tmp = content[i:i + n]
word = ""
for i in tmp:
word = word + " " +i
output.append(word)
return output
def getNgrams(content, n):
content = cleanInput(content),
ngrams = []
for sentence in content:
ngrams.extend(getNgramsFromSentence(sentence, n)),
return (ngrams)
#content = "wo shi liuwei 123 wo shi liuwei"
#title = getNgrams(content,2)
#print title
#print collections.Counter(title)
|
import boto3
from rekognition_image import RekognitionImage
class ObjectDetector:
"""
Represents one Amazon rekognition object detection call and the response of it.
Raises botocore.exceptions.ClientError if API call to Rekognition fails.
"""
def __init__(self, payload, img_name):
rekognition_client = boto3.client('rekognition')
self.rekog_obj = RekognitionImage(
{'Bytes': payload}, img_name, rekognition_client)
print("Analyzing objects............")
self.obj_meta_data = self.rekog_obj.detect_labels(5)
# print(f"Found {len(self.obj_meta_data)} object")
# for obj in self.obj_meta_data:
# print("Object Label:: ", obj.to_dict())
if len(self.obj_meta_data) < 1:
raise FileNotFoundError("No Objects present in the given Image.")
def getLabelList(self):
return [item.name for item in self.obj_meta_data] |
from .base import *
ALLOWED_HOSTS = ['*']
MIDDLEWARE.insert(0, 'config.log.LogMiddleware')
CONTENTS_DIR = os.path.join(BASE_DIR.parent, 'django-blog-contents')
DATABASES['default']['NAME'] = os.path.join(CONTENTS_DIR, 'db.sqlite3')
TEMPLATES[0]['DIRS'] += [os.path.join(CONTENTS_DIR, 'templates')]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(CONTENTS_DIR, 'static'),
]
DEFAULT_AUTHOR = 'sysja'
DEBUG = False
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(thread)d %(clientIp)-15s %(levelname)-5s %(name)s.%(funcName)s %(message)s'
},
'simple': {
'format': '%(asctime)s %(levelname)-5s %(name)s.%(funcName)s %(message)s'
},
'operation': {
'format': '%(asctime)s %(clientIp)-15s %(requestPath)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'log/debug.log'),
'maxBytes': 1024 * 1024 * 1,
'backupCount': 5,
'formatter': 'verbose',
},
'operation': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'log/operation.log'),
'maxBytes': 1024 * 1024 * 1,
'backupCount': 5,
'formatter': 'operation',
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'INFO',
},
'blog': {
'handlers': ['file'],
'level': 'INFO',
},
'blog.operation': {
'handlers': ['operation'],
'level': 'INFO',
},
}
}
from .. import log
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = True
try:
from .local import *
except ImportError:
pass
|
# 단방향 연결 리스트(singly linked list)가 주어지면 총 합이 0으로 되는 연결된 노드들을 뺀 뒤 남은 노드의 값을 프린트 하시오.
def check(arr):
for n in arr:
if n < 0:
return False
return True
def solution(linkedList):
temp = []
cnt = len(linkedList) + 1
if check(linkedList):
return linkedList
while True:
cnt -= 1
for i in range(len(linkedList) - cnt + 1):
if sum(linkedList[i:i + cnt]) == 0:
temp = linkedList[:i] + linkedList[i + cnt:]
linkedList = temp
cnt = len(linkedList) + 1
break
if check(linkedList):
break
return linkedList |
from flask import Flask # importing flask
from flask_sqlalchemy import SQLAlchemy # helps to use the database as Objects
from flask_bcrypt import Bcrypt # for hashing the passwords
from flask_login import LoginManager #This is an in-built thing for flask, helps to manage the logining in and out of users
from flask_mail import Mail # a special diswan for sending mails through flasks, nice right?
import os # imported so we can hide sensitive info
app = Flask(__name__) # yaay the instance our Flask App
app.config['SECRET_KEY'] = os.environ['ALPHA_SECRET_KEY'] # so our site is cookies are secured
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['ALPHA_URI'] # location for database, note we use mysql and sqlachemy interchangebly. Interesting stuff really, see notes at end of page
app.config['SERVER_NAME'] = '127.0.0.1:5000' # it is a local server so yah, we dix
app.config['SECURITY_PASSWORD_SALT'] = os.environ['SECURITY_PASSWORD_SALT'] # This is for confirmation emails, I'll explain later.
db = SQLAlchemy(app) # sqlachemy database instance
bcrypt = Bcrypt(app) # instance of our hashing passwords
login_manager = LoginManager(app) #instance of the login manager, it is initialized for our app
login_manager.login_view = 'main.login' # login route for login required pages
# the login is the function name of our routes for login (you barb?)
login_manager.login_message_category = 'info' # makes the messages a bit nicer
app.config['MAIL_SERVER'] = 'smtp.gmail.com' # using google's mail service
app.config['MAIL_PORT'] = 465 # well this is the mail port
app.config['MAIL_USE_TL'] = False
app.config['MAIL_USE_SSL'] = True
app.config['MAIL_USERNAME'] = os.environ['ALPHA_MAIL']
app.config['MAIL_PASSWORD'] = os.environ['ALPHA_MAIL_PASSWORD']
mail = Mail(app) # instance of the mail app
from alphaprep import routes
from alphaprep.main.routes import main
from alphaprep.users.routes import users
from alphaprep.tests.routes import tests
from alphaprep.tutorials.routes import tutorials
app.register_blueprint(main)
app.register_blueprint(users)
app.register_blueprint(tests)
app.register_blueprint(tutorials)
# it is here because of the sequence of the executing of instructions
# it would avoid anything basa basa
# each class is its own table in the database
# everything has been separated into packages, modules and files
'''
So we use Sqlachemy and at the same time mysql.
Our SQLAlchemy URI is intialized to our mysql database, so we can actually use both at the same time.
We can have the full functionality of the objects for alchemy and the experience we have from mysql.
So mysql is mainly for the questions, see 'dbloader' to see its implementation
the rest is mainly handled by sqlachemy or so. yeah
''' |
from collections import deque
import copy
#<--------------------어려웡------------------->
#노드의 개수 입력받기
v = int(input())
#모든 노드에 대한 진입차수는 0으로 초기화
indegree = [0] * (v+1)
#각 노드에 연결된 간선정보를 담기 위한 연결 리스트 (그래프) 초기화
graph = [[] for i in range(v+1)]
#각 강의 시간을 0으로 초기화 #가중치를 넣어주기위해 만든다
time = [0] * (v+1)
#방향 그래프의 모든 간선정보를 입력받기
for i in range(1, v+1):
data = list(map(int, input().split()))
time[i] = data[0] #첫 번째 수는 시간 정보를 담고 있음
for x in data[1:-1]:
indegree[i] += 1 #연결당하는 간선저장 진입차수
graph[x].append(i) #연결하는 간선 저장
#위상 정렬함수
def topology_sort():
result = copy.deepcopy(time) #알고리즘 수행결과를 담을 리스트 #왜 리스트를 이렇게 담냐 -> 단순히 대입 연산을 하면 값이 변경될때 문제가 발생할 수 있기 때문에
q = deque()
#처음 시작할 때는 진입차수가 0인 노드를 큐에 삽입
for i in range(1, v+1):
if indegree[i] == 0:
q.append(i)
while q:
#큐에서 원소 꺼내기
now = q.popleft()
#해당 원소와 연결된 노드들의 진입차수에서 1뺴기
for i in graph[now]: #연결되있는 거 찾기
result[i] = max(result[i], result[now] + time[i]) #순서를 넣는게아니라 값을 넣기
indegree[i] -= 1 #간선 빼기 #진입차수를 0으로 만든다
#새롭게 진입차수가 0이 되는 노드를 큐에 삽입
if indegree[i] ==0:
q.append(i)
#위상 정렬을 수행한 결과 출력
for i in range(1, v+1):
print(result[i])
topology_sort() |
from django.core.serializers import json
from django.http import HttpResponse
from django.shortcuts import render_to_response, RequestContext
# Create your views here.
from match_service.models import *
def match_page(request):
context = {}
if request.method == 'POST':
return render_to_response(
'video_labeling.html',
context,
)
return render_to_response(
'wellcome_page.html',
context,
context_instance = RequestContext(request)
)
pass
def get_next_video(request):
context = {
'title':'',
'youtube_url':''
}
return HttpResponse(
json.dumps(context),
content_type='application/json'
)
|
# Generated by Django 3.0.8 on 2020-10-11 11:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0004_auto_20201011_0145'),
]
operations = [
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('semester', models.TextField(null=True)),
('sgpa', models.FloatField(null=True)),
],
),
migrations.RemoveField(
model_name='student',
name='semester',
),
migrations.RemoveField(
model_name='student',
name='sgpa',
),
]
|
import sys
# sys.stdin = open("input.txt", "rt")
'''
K번째 큰 수
현수는 1부터 100사이의 자연수가 적힌 N장의 카드를 가지고 있습니다. 같은 숫자의 카드가
여러장 있을 수 있습니다. 현수는 이 중 3장을 뽑아 각 카드에 적힌 수를 합한 값을 기록하려
고 합니다. 3장을 뽑을 수 있는 모든 경우를 기록합니다. 기록한 값 중 K번째로 큰 수를 출력
하는 프로그램을 작성하세요.
만약 큰 수부터 만들어진 수가 25 25 23 23 22 20 19......이고 K값이 3이라면 K번째 큰 값
은 22입니다.
▣ 입력설명
첫 줄에 자연수 N(3<=N<=100)과 K(1<=K<=50) 입력되고, 그 다음 줄에 N개의 카드값이 입력
된다.
▣ 출력설명
첫 줄에 K번째 수를 출력합니다. K번째 수는 반드시 존재합니다.
▣ 입력예제 1
10 3
13 15 34 23 45 65 33 11 26 42
▣ 출력예제 1
143
'''
n, k = map(int, input().split())
arr = list(map(int, input().split()))
result = set()
for x in range(n):
for y in range(x + 1, n):
for z in range(y + 1, n):
result.add(arr[x] + arr[y] + arr[z])
result = list(result)
result.sort(reverse=True)
print(result[k - 1])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-12-28 16:28:50
# @Author : Your Name (you@example.org)
# @Link : http://example.org
# @Version : $Id$
import os,sys
import json
import timetool
#当前对象主要为对k线数据分类的神经网络对象,包括数据分类网络训练,和网络保存使用
class KlineNNTool(object):
"""docstring for KlineNNTool"""
def __init__(self, datpth,nnconfigpth):
self.datapth = datpth #训练用的数据文件所在目录
self.nnconfigpth = nnconfigpth
self.nnSaveFilePth = None
self.m5NN = None #5分钟神经网络,
self.h1NN = None #1小时神经网络
self.h4NN = None #4小时神经网络
self.h12NN = None #12小时网络
self.h24NN = None #24小时网络
self.fenleix = 10 #分类x行数量
self.fenleiy = 10 #分类y列数量
self.fenleiIndex = 0 #分类编号
#最后一次训练的k线数据时间,初始化时,会查找当前的新数据是否比原来数据新,如果新,则使用新数据对网络进行更新训练
self.lastKlineTime = 0
self.initObj()
#初始化神经网络,如果没有网络则训练一个网络
def initObj(self):
pass
def create5MinNN(self): #创建5分钟自编码器分类网络,如果文件中已存在网络,则先加载再进行训练
pass
def create1hourNN(self): #创建1小时自编码器分类网络,如果文件中已存在网络,则先加载再进行训练
pass
def create4hourNN(self): #创建4小时自编码器分类网络,如果文件中已存在网络,则先加载再进行训练
pass
def create12hourNN(self): #创建12小时自编码器分类网络,如果文件中已存在网络,则先加载再进行训练
pass
def create24hourNN(self): #创建24小时自编码器分类网络,如果文件中已存在网络,则先加载再进行训练
pass
def training5minNN(self): #训练5分钟自编码分类网络,并保存到文件
pass
def training1hourNN(self): #训练1小时自编码分类网络,并保存到文件
pass
def training4hourNN(self): #训练4小时自编码分类网络,并保存到文件
pass
def training12hourNN(self): #训练12小时自编码分类网络,并保存到文件
pass
def training24hourNN(self): #训练24小时自编码分类网络,并保存到文件
pass
def trainingAllNN(self): #训练总体分类神经网络,并保存到文件,输入为5分钟分类,1小时分类,4小时分类,12小时分类,24小时分类
pass
def getAddNewDataXY(self,data): #添加一次数据到网络,返回神经网络分类坐标
pass
def getAddNewDataIndex(self,data): #添加一次数据到网络,返回当前分类状态值
pass
def main():
pass
def test():
pass
# print sntimes
#测试
if __name__ == '__main__':
# args = sys.argv
# fpth = ''
# if len(args) == 2 :
# if os.path.exists(args[1]):
# fpth = args[1]
# else:
# print "请加上要转码的文件路径"
# else:
# print "请加上要转码的文件路径"
# main()
main()
|
class TripletLoss(nn.Module):
def __init__(self, margin=.2, negative='max'):
super(TripletLoss, self).__init__()
self.margin = margin
self.negative = negative
def forward(self, anchor, positive):
dists = torch.cdist(anchor, positive)
p_dists = torch.diag(dists)
p_dist = p_dists.unsqueeze(1).expand_as(dists)
cost = (p_dist-dists) + self.margin
cost = cost.clamp(min=0).fill_diagonal_(0)
# print(cost)
if self.negative == 'max':
cost = torch.max(cost, keepdim=True, dim=1)[0]
# print(cost)
elif self.negative == 'random':
pass
elif self.negative == 'all':
pass
else:
raise ValueError("error")
return cost[cost > 0].mean()
|
import peewee, peewee_async
from ..config.config import DB_CONFIG
from ..common.exceptions import PageNotFound
database = peewee_async.PostgresqlDatabase(**DB_CONFIG)
class ContentBlock(peewee.Model):
name = peewee.CharField(max_length=64)
slug = peewee.CharField(max_length=16)
video_link = peewee.CharField(max_length=256)
order_num = peewee.IntegerField()
counter = peewee.IntegerField(default=0)
class Meta:
database = database
def inc(self):
self.counter += 1
self.save()
@classmethod
def get_list(cls, page_slug: str):
try:
page = Page.get(Page.slug == page_slug)
except Page.DoesNotExist:
raise PageNotFound
#print(dir(cls.contentblockpagethrough_set.model))
for block in page.blocks.order_by(cls.order_num):
block.inc()
yield block
class Page(peewee.Model):
name = peewee.CharField(max_length=64)
slug = peewee.CharField(max_length=16)
blocks = peewee.ManyToManyField(ContentBlock, backref='pages')
order_num = peewee.IntegerField()
class Meta:
database = database
@classmethod
def get_list(cls):
yield from cls.select().order_by(cls.order_num)
|
import business_rules
from business_rules import run_all, export_rule_data
from business_rules.variables import *
from business_rules.actions import *
from business_rules.fields import *
from tngsdk.validation.util import read_descriptor_file
from tngsdk.validation import event
from tngsdk.validation.storage import DescriptorStorage
from tngsdk.validation.logger import TangoLogger
import datetime
import json
import os
import sys
import yaml
import logging
LOG = TangoLogger.getLogger(__name__)
evtlog = event.get_logger('validator.events')
class DescriptorVDU(object):
def __init__(self, vnfd_id):
self._vnfd_id = vnfd_id
self._vdu_id = ""
self._errors = []
self._storage = {}
self._cpu = {}
self._memory = {}
self._network = {}
self._vm_images_format = ""
def display_error(self, error_text):
LOG.error("Custom error in descriptor '{}' in vdu_id = '{}'\n{}"
.format(self._vnfd_id, self._vdu_id, error_text))
def display_warning(self, warning_text):
LOG.warning("Warning detected in custom rules validation: {}"
.format(warning_text))
class DescriptorVariablesVDU(BaseVariables):
def __init__(self, descriptor):
self._vnfd_id = descriptor._vnfd_id
self._vdu_id = descriptor._vdu_id
self._storage = descriptor._storage
self._cpu = descriptor._cpu
self._memory = descriptor._memory
self._network = descriptor._network
self._vm_images_format = descriptor._vdu_images_format
# virtual_deployment_units/resource_requirements/memory
@numeric_rule_variable(label='Size of RAM')
def vdu_resource_requirements_ram_size(self):
size = self._memory.get("size")
if size:
return size
else:
return -1
@string_rule_variable(label='Unit of RAM')
def vdu_resource_requirements_ram_size_unit(self):
size_unit = self._memory.get("size_unit")
if size_unit:
return size_unit
else:
LOG.warning("Custom error in descriptor '{}' in vdu_id = '{}'\n{}"
.format(self._vnfd_id, self._vdu_id, "'size_unit' is not present in 'memory'"))
return ""
# virtual_deployment_units/resource_requirements/cpu
@numeric_rule_variable(label='Number of vCPUs')
def vdu_resource_requirements_cpu_vcpus(self):
vcpus = self._cpu.get("vcpus")
if vcpus:
return vcpus
else:
return -1
# virtual_deployment_units/resource_requirements/storage
@numeric_rule_variable(label='Size of storage')
def vdu_resource_requirements_storage_size(self):
size = self._storage.get("size")
if size:
return size
else:
return -1
@string_rule_variable(label='Unit of storage')
def vdu_resource_requirements_storage_size_unit(self):
size_unit = self._storage.get("size_unit")
if size_unit:
return size_unit
else:
LOG.warning("Custom error in descriptor '{}' in vdu_id = '{}'\n{}"
.format(self._vnfd_id, self._vdu_id, "'size_unit' is not present in 'storage'"))
return ""
# virtual_deployment_units/network
@numeric_rule_variable(label='size of BW')
def vdu_resource_requirements_network_network_interface_bandwidth(self):
if self._network:
size = self._network.get("network_interface_bandwidth")
if size:
return size
else:
return -1
else:
return -1
@string_rule_variable(label='Unit of BW')
def vdu_resource_requirements_network_network_interface_bandwidth_unit(self):
if self._network:
size_unit = self._network.get("network_interface_bandwidth_unit")
if size_unit:
return size_unit
else:
LOG.warning("Custom error in descriptor '{}' in vdu_id = '{}'\n{}"
.format(self._vnfd_id, self._vdu_id, "'network_interface_bandwidth_unit' is not present in 'network'"))
return ""
else:
LOG.warning("Custom error in descriptor '{}' in vdu_id = '{}'\n{}"
.format(self._vnfd_id, self._vdu_id, "'network' is not present in 'resource_requirements'"))
return ""
@boolean_rule_variable(label='SR-IOV')
def vdu_resource_requirements_network_network_interface_card_capabilities_SRIOV(self):
return False
@boolean_rule_variable(label='Mirroring')
def vdu_resource_requirements_network_network_interface_card_capabilities_mirroring(self):
return False
@string_rule_variable(label='Format of VM')
def vdu_vm_resource_format(self):
return self._vm_images_format
class DescriptorActions(BaseActions):
def __init__(self, descriptor):
self.descriptor = descriptor
@rule_action(params={"error_text": FIELD_TEXT})
def raise_error(self, error_text):
self.descriptor._errors.append(error_text)
self.descriptor.display_error(error_text)
@rule_action(params={"error_text": FIELD_TEXT})
def raise_warning(self, error_text):
self.descriptor.display_warning(error_text)
def process_rules(custom_rule_file, descriptor_file_name):
rules = load_rules_yaml(custom_rule_file)
storage = DescriptorStorage()
func = storage.create_function(descriptor_file_name)
if not func:
evtLOG.log("Invalid function descriptor, Couldn't store "
"VNF of file '{0}'".format(descriptor_file_name),
descriptor_file_name,
'evt_function_invalid_descriptor')
exit(1)
for vdu in func.content.get("virtual_deployment_units"):
descriptor = DescriptorVDU(func.id)
descriptor._vdu_id = vdu.get("id")
descriptor._storage = vdu.get("resource_requirements").get("storage")
descriptor._cpu = vdu.get("resource_requirements").get("cpu")
descriptor._memory = vdu.get("resource_requirements").get("memory")
descriptor._network = vdu.get("resource_requirements").get("network")
descriptor._vdu_images_format = vdu.get("vm_image_format")
triggered = run_all(rule_list=rules,
defined_variables=DescriptorVariablesVDU(descriptor),
defined_actions=DescriptorActions(descriptor),
stop_on_first_trigger=False)
return descriptor._errors
def load_rules_yaml(custom_rule_file):
if not os.path.isfile(custom_rule_file):
LOG.error("Invalid custom rule file")
exit(1)
try:
with open(custom_rule_file, "r") as fn_custom_rule:
rules = yaml.load(fn_custom_rule, Loader=yaml.SafeLoader)
except IOError:
LOG.error("Error opening custom rule file: "
"File does not appear to exist.")
exit(1)
except (yaml.YAMLError, yaml.MarkedYAMLError) as e:
LOG.error("The rule file seems to have contain invalid YAML syntax."
" Please fix and try again. Error: {}".format(str(e)))
exit(1)
return rules
if __name__ == "__main__":
if len(sys.argv) != 3:
# if len(sys.argv)!= 2:
LOG.error("This script takes exactly two arguments: "
"example_descriptor <custom rule file> <descriptor file>")
exit(1)
custom_rule_file = sys.argv[1]
descriptor_file_name = sys.argv[2]
if not os.path.isfile(custom_rule_file):
LOG.error("Invalid custom rule file")
exit(1)
if not os.path.isfile(descriptor_file_name):
LOG.info("Invalid descriptor file")
exit(1)
process_rules(custom_rule_file, descriptor_file_name)
|
c = [0.8916583583 ,0.9364599092 ,0.9418026692 ,0.9660107754 ,0.9735619037
,0.9752730086 ,0.9795233774 ,0.9736945491 ,0.983412122 ,0.8847568897
,0.937049294 ,0.9556460673 ,0.9521823306 ,0.9457192893 ,0.9755469101
,0.9781225838 ,0.9804915898 ,0.7425709229 ,0.885471973 ,0.8549843111
,0.9540545879 ,0.9638071451 ,0.9549170066 ,0.9591822503 ,0.9771572723
,0.9802537765 ,0.9703582279 ,0.9436619718 ,0.9485614647 ,0.8532666905
,0.9380387931 ,0.9383123181 ,0.9020750758 ,0.8996929376 ,0.9635932203
,0.9663973089 ,0.9712227524 ,0.9697056889 ,0.9709112973 ]
import numpy as np
import matplotlib.pyplot as plt
from pylab import var,mean
cc = range(len(c))
plt.figure()
v = var(c)
plt.errorbar(cc, c, xerr=v,label=str(var(c)))
plt.plot(cc,[mean(c)]*len(c),'--',label=str(mean(c)))
plt.plot(cc,[0]*len(c),'.w')
plt.title("Sequential Parallel Ratio")
plt.legend(loc='lower center')
plt.show()
|
import numpy as np
import os
import matplotlib.pyplot as plt
import warnings
import pickle
from collections import defaultdict
from nltk import pos_tag, word_tokenize
warnings.simplefilter("ignore")
def dd():
return defaultdict(int)
def get_actions():
with open('./Data/vocab.pkl','rb') as f:
actions = pickle.load(f)
actions = {k:i for i,k in enumerate(actions)}
return actions
def getReward(reward_func):
if reward_func == 1:
#print('Reward will be: word-word co-occurrence')
return word_cooc_reward()
if reward_func == 2:
#print('Reward will be: pos-pos co-occurrence')
return pos_cooc_reward()
if reward_func == 3:
#print('Reward will be: product of word-word and pos-pos cooccurrence')
return word_pos_reward('prod')
if reward_func == 4:
#print('reward will be: average of word-word and pos-pos cooccurrence')
return word_pos_reward('avg')
def word_cooc_reward():
with open('./Data/word_cooccurrence.pkl','rb') as f:
return pickle.load(f)
def pos_cooc_reward():
with open('./Data/pos_cooccurrence.pkl','rb') as f:
return pickle.load(f)
def word_pos_reward(combine):
if os.path.exists('./Data/word_pos_%s'%combine):
with open('./Data/word_pos_%s'%combine,'rb') as f:
rewards = pickle.load(f)
else:
with open('./Data/pos_cooccurrence.pkl','rb') as f:
pos_cooc = pickle.load(f)
with open('./Data/word_cooccurrence.pkl','rb') as f:
word_cooc = pickle.load(f)
rewards = defaultdict(dd)
for key, val in word_cooc.items():
for word, score in val.items():
bigram = [key, word]
tagged_bigram = pos_tag(bigram)
if combine == 'prod':
rewards[key][word] = pos_cooc[tagged_bigram[0][1]][tagged_bigram[1][1]] * score
if combine == 'avg':
rewards[key][word] = (pos_cooc[tagged_bigram[0][1]][tagged_bigram[1][1]] + score) / 2
with open('./Data/word_pos_%s.pickle'%combine, 'wb') as f:
pickle.dump(rewards, f)
return rewards
#def scale(val, old_min, old_max, new_min, new_max):
# new_val = (val - old_min)/(old_max - old_min)
# return new_val
#def count(number, base, shape):
# c = np.zeros(shape=shape)
# i = c.shape[0] - 1
# while number >= base:
# remainder = number % base
# c[i] = remainder
# i -= 1
# number = number / base
# if number != 0 and number < base:
# c[i] = number
# return c
def plot(data, method, trials, NEPS,eps,alp,g):
mean = np.mean(data, axis=1)
#print mean.shape
variance = np.mean(np.square(data.T-mean).T, axis=1)
#print variance
std = np.sqrt(variance)
#print std
x = list(np.arange(0,NEPS,1))
y = list(mean)
print 'Length of x: {} length of y: {}'.format(len(x), len(y))
err = list(std)
plt.axis((0,NEPS,0,15))
plt.errorbar(x, y, yerr=err, fmt='-ro')
#plt.plot(y)
plt.xlabel('Episode')
plt.ylabel('Expected return of reward')
plt.title('%s for %d trials, epsilon: %.4f, alpha: %.2f, gamma: %.2f' % (method, trials, float(eps), float(alp), float(g)))
plt.savefig('Expected_Return_%s_%d_unclipped.jpg' % (method, trials))
plt.show()
return mean[-1]
def log(method, trials, eps, gamma, alpha, maxima=None, time=0):
if os.path.exists('log'):
with open('log','r') as f:
data = f.readlines()
data.append('method: {0}, trials: {1}, epsilon: {2}, gamma: {3}, alpha: {4}, maximum value: {5}, time taken: {6}\n'.format(method, trials, eps, gamma, alpha, maxima, time))
else:
data = 'method: {0}, trials: {1}, epsilon: {2}, gamma: {3}, alpha: {4}, maximum value: {5}, time taken: {6}\n'.format(method, trials, eps, gamma, alpha, maxima, time)
with open('log','w') as f:
for line in data:
f.write(line)
|
from collections import defaultdict
from sugarrush.solver import SugarRush
from garageofcode.common.utils import flatten_simple
N = 3
def get_state(solver):
# one-hot encoding
X = [[[solver.var() for _ in range(N**2)]
for _ in range(N)]
for _ in range(N)]
for x in flatten_simple(X):
solver.add(solver.equals(x, 1)) # exactly one number per tile
#for y in zip(*X):
# solver.add(solver.equals(y, 1))
return X
def get_transition(solver, X0, X1):
ij2swaps = defaultdict(list)
swap2ijs = {}
for i in range(N):
for j in range(N):
if j < N - 1:
swap = solver.var()
swap2ijs[swap] = [(i, j), (i, j+1)]
ij2swaps[(i, j)].append(swap)
ij2swaps[(i, j+1)].append(swap)
if i < N - 1:
swap = solver.var()
swap2ijs[swap] = [(i, j), (i+1, j)]
ij2swaps[(i, j)].append(swap)
ij2swaps[(i+1, j)].append(swap)
cnf = []
for i in range(N):
for j in range(N):
hot = X0[i][j][0]
# if the empty square is on (i, j) (is 'hot'),
# then one of the adjacent swaps must be used
cnf.append([-hot] + ij2swaps[(i, j)])
# the non-adjacent swaps must be 0
for swap in swap2ijs:
if swap not in ij2swaps[(i, j)]:
cnf.append([-hot, -swap])
for swap, ijs in swap2ijs.items():
# if a swap is used, one of the adjacent
# squares must be hot
#cnf.append([-swap] + swap2ijs[swap])
# if swap is true, then the adjacent tiles should swap values
(il, jl), (ir, jr) = ijs # left/right
for x0l, x1r in zip(X0[il][jl], X1[ir][jr]):
# swap => x0l == x1r
cnf.extend([[-swap, x0l, -x1r], [-swap, -x0l, x1r]])
for x0r, x1l in zip(X0[ir][jr], X1[il][jl]):
# swap => x0r == x1l
cnf.extend([[-swap, x0r, -x1l], [-swap, -x0r, x1l]])
for ij in ij2swaps:
# if tile is not adjacent to swap,
# then X1 = X0 in that tile
if ij not in ijs:
i, j = ij
for x0, x1 in zip(X0[i][j], X1[i][j]):
# swap => x0 == x1
cnf.extend([[-swap, x0, -x1], [-swap, -x0, x1]])
swaps = list(swap2ijs.keys())
cnf.extend(solver.equals(swaps, 1)) # only one swap per turn
return cnf
def set_state(X0, ij2k=None):
cnf = []
for i in range(N):
for j in range(N):
for k in range(N**2):
if k == ij2k[(i, j)]:
cnf.append([X0[i][j][k]])
else:
cnf.append([-X0[i][j][k]])
return cnf
def print_solve(solver, Xr):
Xr_solve = [[[solver.solution_value(xi) for xi in x]
for x in row]
for row in Xr]
for row in Xr_solve:
print([x.index(1) for x in row])
def main():
for r in range(4, 24, 2):
solver = SugarRush()
#r = 5 # note: r is number of steps i.e. num states minus one
X = [get_state(solver) for _ in range(r+1)]
ij2k = {(i, j): i * N + j for j in range(N) for i in range(N)}
cnf = set_state(X[0], ij2k)
solver.add(cnf)
for X0, X1 in zip(X, X[1:]):
cnf = get_transition(solver, X0, X1)
solver.add(cnf)
ij2k[(0, 1)] = 2
ij2k[(0, 2)] = 1
cnf = set_state(X[-1], ij2k)
solver.add(cnf)
satisfiable = solver.solve()
if satisfiable:
print(r)
for x in X:
print_solve(solver, x)
print()
return
else:
print(r, "not satisfiable")
if __name__ == '__main__':
main() |
from skmultiflow.core import BaseSKMObject, ClassifierMixin
from skmultiflow.utils import get_dimensions
from collections import deque
import numpy as np
class OracleClassifier(BaseSKMObject, ClassifierMixin):
"""Oracle recommender for testing purposes.
Parameters
----------
stream: Stream
The stream from which to draw the samples.
"""
def __init__(self, stream):
super().__init__()
self.stream = stream
def partial_fit(self, X, y, classes=None, sample_weight=None):
return
def predict(self, X):
predictions = deque()
r, _ = get_dimensions(X)
y_pred = self.stream.current_sample_y
for i in range(r):
predictions.append(y_pred)
return np.array(predictions)
def predict_proba(self, X):
"""Not implemented for this method."""
raise NotImplementedError
|
import pickle
import os
def mutex_process(filename):
if os.path.exists(filename):
return False
filemutex = filename+".mutex"
try:
f = open(filemutex, "x")
f.close()
return True
except FileExistsError:
return False
def mutex_save(obj, filename):
filemutex = filename+".mutex"
if os.path.exists(filename):
raise Exception("[mutex_save] file '" + filename + "' already exists")
if not os.path.exists(filemutex):
raise Exception(str("[mutex_save] file '" + filemutex + "' does not exist"))
outfile = open(filename, 'wb')
pickle.dump(obj, outfile)
outfile.close()
os.remove(filemutex)
def mutex_update(obj, filename):
outfile = open(filename, 'wb')
pickle.dump(obj, outfile)
outfile.close()
def mutex_load(filename):
pickle_file = open(filename, 'rb')
obj = pickle.load(pickle_file)
pickle_file.close()
return obj
#mutex_process("/tmp/tt.pkl")
#mutex_save({'tt':[0,2]}, "/tmp/tt.pkl")
#tt = mutex_load("/tmp/tt.pkl")
|
import re
from functools import reduce
import nltk
import spacy
class Preprocessor(object):
"""
Cleans, removes stopwords and tokenizes lines
"""
def __init__(self):
# Stopwords
nltk.download('stopwords', quiet=True, raise_on_error=True)
# Sentence Tokenizer
nltk.download('punkt', quiet=True, raise_on_error=True)
self._tokenized_stop_words = nltk.word_tokenize(' '.join(nltk.corpus.stopwords.words('english')))
self._stop_words = set(nltk.corpus.stopwords.words('english'))
# Porter stemmer
self.stemmer = nltk.stem.PorterStemmer()
# spacy
# python -m spacy download en_core_web_sm
self.EN = spacy.load('en_core_web_sm')
def stem_word(self, word):
return self.stemmer.stem(word)
def tokenize_string(self, line):
tokens = nltk.word_tokenize(line)
tokens = (self.stem_word(token) for token in tokens)
tokens = [token for token in tokens if token.isalnum()]
return list(tokens)
@staticmethod
def word_split(text):
"""
Split a text in words. Returns a list of tuple that contains
(word, location) location is the starting byte position of the word.
"""
word_list = []
w_current = []
w_index = None
for i, c in enumerate(text):
if c.isalnum():
w_current.append(c)
w_index = i
elif w_current:
word = u''.join(w_current)
word_list.append((w_index - len(word) + 1, word))
w_current = []
if w_current:
word = u''.join(w_current)
word_list.append((w_index - len(word) + 1, word))
return word_list
def words_cleanup(self, words):
"""
Stems words and removes
words with length less then a minimum and stopwords.
"""
cleaned_words = []
for index, word in words:
if len(word) < 3 or word in self._stop_words or word in self._tokenized_stop_words or not str(
word).isalnum():
continue
word = self.stem_word(word)
cleaned_words.append((index, word))
return cleaned_words
def word_index(self, text):
"""
Just a helper method to process a text.
It calls word split, normalize and cleanup.
"""
words = self.word_split(text)
words = self.words_cleanup(words)
return words
def remove_stopwords(self, text) -> str:
'''this function will remove stopwords from text '''
final_text = ''
for word in text.split():
if word not in self._stop_words:
final_text += word + ' '
return final_text
def clean_str(self, text, stopwords=True) -> str:
import re
text = (text.encode('ascii', 'ignore')).decode("utf-8")
text = re.sub("&.*?;", "", text)
text = re.sub("[\]\|\[\@\,\$\%\*\&\\\(\)\":]", "", text)
text = re.sub("-", " ", text)
text = re.sub("\.+", "", text)
text = re.sub("^\s+", "", text)
text = re.sub("\.+", "", text)
# text = re.sub("[_]{2,}", "", text)
text = re.sub("[/]", " ", text)
# text = re.sub("[0-9\n\t?!]", " ", text)
text = re.sub("[ ]{2,}", "", text)
text = text.lower()
if stopwords:
text = self.remove_stopwords(text)
return text
# ###########################################################################
@staticmethod
def to_lowercase(words):
"""Convert all characters to lowercase from list of tokenized words"""
new_words = []
for word in words:
new_word = word.lower()
new_words.append(new_word)
return new_words
@staticmethod
def remove_punctuation(words):
"""Remove punctuation from list of tokenized words"""
new_words = []
for word in words:
new_word = re.sub(r'[^\w\s]', '', word)
if new_word != '':
new_words.append(new_word)
return new_words
def remove_stopwords_2(self, words):
"""Remove stop words from list of tokenized words"""
new_words = []
for word in words:
if word not in self._stop_words:
new_words.append(word)
return new_words
def normalize(self, words, stopwords):
words = self.to_lowercase(words)
words = self.remove_punctuation(words)
if stopwords:
words = self.remove_stopwords_2(words)
return words
def tokenize_text(self, text):
"""Apply tokenization using spacy to docstrings."""
tokens = self.EN.tokenizer(text)
return [token.text.lower() for token in tokens if not token.is_space]
def preprocess_text(self, text, stopwords=True) -> str:
return ' '.join(self.normalize(self.tokenize_text(text), stopwords))
class Indexer(object):
def __init__(self, preprocessor):
self.preprocessor = preprocessor
def inverted_index(self, text):
"""
Create an Inverted-Index of the specified text document.
{word:[locations]}
"""
inverted = {}
for index, word in self.preprocessor.word_index(text):
locations = inverted.setdefault(word, [])
locations.append(index)
return inverted
@staticmethod
def inverted_index_add(inverted, doc_id, doc_index):
"""
Add Inverted-Index doc_index of the document doc_id to the
Multi-Document Inverted-Index (inverted),
using doc_id as document identifier.
{word:{doc_id:[locations]}}
"""
for word, locations in doc_index.items():
indices = inverted.setdefault(word, {})
indices[doc_id] = locations
return inverted
def search(self, inverted, query):
"""
Returns a set of documents id that contains all the words in your query.
"""
words = [word for _, word in self.preprocessor.word_index(query) if word in inverted]
results = [set(inverted[word].keys()) for word in words]
return reduce(lambda x, y: x & y, results) if results else []
|
from worker import WorkerThread, Task
from scheduler import SchedulerThread
from time import sleep
class WorkerPool(object):
"""
maintain asynchronous task distrubution
over group ow working threads
"""
#time to sleep in scheduler when all workers are busy:bsy
DEFAULT_TICK = 0.01
DEFAULT_WORKER_COUNT = 4
# as lower is as scheduler loop consume more cpu over single thread
def __init__(self,*args, **kwargs):
if kwargs is not None:
if 'count' in kwargs:
self._wcnt = kwargs['count']
else:
self._wcnt = WorkerPool.DEFAULT_WORKER_COUNT
if 'all_done' in kwargs:
self._all_done = kwargs['all_done']
else:
def ___pass( *args, **kwargs ):
pass
self._all_done = ___pass
if 'tick' in kwargs:
self._tick = kwargs['tick']
self._wpol = list([]) # all workers
self._wbsy = dict({}) # busy
self._wfre = list([]) # free
self._tque = list([]) # task queue
self._tick = WorkerPool.DEFAULT_TICK # sleep time if all busy
self._wrkr = SchedulerThread(0) # task schedule is worker
self._task = Task() # taks for scheduler
self._task.target = self._task_schedule
self._task.args = tuple()
self._task.kwargs = dict()
self._ball_done = False
def _on_worker_task_done( worker ):
"""
default calback for scheduler
when some worker is done with a task
it is appended to free workers queue
also marked free in busy dict
"""
self._wfre.append(worker)
self._wbsy[worker._id] = False
self.____f = _on_worker_task_done
# pool initialization
for i in xrange(1,self._wcnt+1) :
if 'mp' in kwargs:
w = WorkerThread(i,mp=kwargs['mp'])
else:
w = WorkerThread(i)
w._pool = self # back reference
w._worker_task_done = self.____f
self._wpol.append(w)
self._wfre.append(w)
self._wbsy[i] = False
self._wrkr._target = self._task_schedule
self._wrkr._args = tuple()
self._wrkr._kwargs = dict()
def _task_schedule(self):
"""
task scheduler worker target function:
check for tasks in queue
schedule to free workers
"""
#print ">>>>>>>>> task schedule"
#loop = True
while not self._wrkr._bexit:
#print "schedule loop"
if self._tque:
#print "have in que"
task = self._tque.pop()
if self._wfre:
#print "have free workers"
w = self._wfre.pop()
success = w._set_task(task)
if success:
self._wbsy[w._id] = True
else:
#print "attach task to woker failed"
self._wbsy[w._id] = False
self._tque.append(task)
sleep(self._tick)
else:
#print "thre is no free workers"
sleep(self._tick)
self._tque.append( task )
self._wrkr._e.clear()
self._wrkr._e.set()
else:
#print "noting in que"
bsy_cnt = 0
if len(self._wfre) == len(self._wpol):
#print "all done"
self._ball_done = True
self._all_done( self )
break
else:
sleep(self._tick)
self._wrkr._e.clear()
self._wrkr._e.set()
def _task_schedule_done(self,task_result):
#print "task chedule done"
pass
def _append_task(self,task):
self._tque.append(task)
def _commit_tasks(self):
self._wrkr._e.clear()
self._wrkr._e.set()
def _exit(self):
for w in self._wpol:
w._exit()
self._wrkr._exit()
|
from typing import List
class Solution:
def canPartition(self, nums: List[int]) -> bool:
tot = sum(nums)
if tot & 1: # odd number
return False
capacity = tot // 2
N = len(nums)
dp = [False for j in range(capacity + 1)]
dp[0] = True
for i in range(1, N + 1):
for j in range(capacity, 0, -1):
weight = nums[i-1]
if (j - weight >= 0):
dp[j] |= dp[j - weight]
else:
dp[j] = dp[j]
return dp[capacity]
s = Solution()
nums = [1,5,11,5]
nums = [1,2,3,10]
print(s.canPartition(nums)) |
#import matplotlib.pyplot as plt
#import matplotlib.gridspec as gridspec
from scipy.spatial.distance import *
#import scipy.cluster.hierarchy as sch
import numpy as np
#from scipy.cluster.hierarchy import linkage, dendrogram
import scipy.stats
import sys
#from plot_pearsons_heatmap_hclust import *
# generated the change in time plot (containing the line) . if first entry is change 1962/1963 then start year is 1963
def genPlot(group):
out_file = group[0]
countries = group[1]
text = r'''
#### plotting group:''' + out_file + r''' ########
########## PNG PLOTS ###############
set output "scores/''' + out_file + '''.png"
set terminal png size 1200,500
set xlabel "Years" font "Times-Roman, 20"
set ylabel "Trading partner sparsity score" font "Times-Roman, 20"
set lmargin 11
set rmargin 15
set key font "Times-Roman, 20"
set key spacing 1.5
set key outside
set key right top
set grid xtics lt 0 lw 1 lc rgb "#bbbbbb"
#set xtics center offset 0,-1
set style line 1 lc rgb '#0060ad' lt 1 lw 3 pt 7 ps 1.5 # --- blue
set style line 2 lc rgb '#dd181f' lt 1 lw 3 pt 7 ps 1.5 # --- red
set style line 3 lc rgb '#FF00FF' lt 1 lw 3 pt 7 ps 1.5 # --- purple
set style line 4 lc rgb '#194719' lt 1 lw 3 pt 7 ps 1.5 # --- green
set style line 5 lc rgb '#993300' lt 1 lw 3 pt 7 ps 1.5 # --- brown
set style line 6 lc rgb '#00F2FF' lt 1 lw 3 pt 7 ps 1 # --- cyan
set style line 7 lc rgb '#000000' lt 1 lw 3 pt 7 ps 1 # --- black
'''
if (out_file == "eu_accession"):
text += r'''set xrange [1995:2010]'''
else:
text += r'''set xrange [1962:2010]'''
text += r'''
plot '''
for i in range(len(countries)):
country = countries[i]
text += '''"scores/''' + country + r'''.scores" using 1:2 w lines ls ''' + str(i+1) + ''' title "''' + country + r'''"'''
if (i < len(countries) - 1):
text += r''', \
'''
text += '''
########## POSTSCRIPT PLOTS ###############
set output "scores/''' + out_file + '''.eps"
set terminal postscript enhanced color
set size 1.0,0.7
plot '''
for i in range(len(countries)):
country = countries[i]
text += '''"scores/''' + country + r'''.scores" using 1:2 w lines ls ''' + str(i+1) + ''' title "''' + country + r'''"'''
if (i < len(countries) - 1):
text += r''', \
'''
text += '''
syscall=sprintf("epstopdf --outfile=scores/''' + out_file + '''2.pdf scores/''' + out_file + '''.eps" )
system syscall
'''
return text
###############################
####### End of functions ######
###############################
# no arguments need to be passed
# I took out BGR and CYP because these countries are too small and had no trading partners at times
g7 = ("g7", ["USA", "CHN", "DEU", "FRA", "GBR"])
eastern_europe = ("eastern_europe", ["RUS", "POL", "DDR", "ROM", "CZE", "HUN", "SUN"])
argentina_related = ("argentina_related", ["ARG", "GRC"])
eu_accession = ("eu_accession", ["CZE", "POL", "SVK", "HUN"])
opec = ("opec", ["IRN", "SAU", "ARE"])
all_groups = [g7, eastern_europe, argentina_related, eu_accession, opec]
for group in all_groups:
print genPlot(group)
|
# Gauss Jordan Elimination
# Algorithm to solve linear equations of the kind A * x = b, where A is the matrix, and b is solutions vector
# This algorithm was computed using partial pivoting - can be created without pivoting (usually not customary for these
# calculations) by deleting the block of code used to swap the rows.
#imports
import numpy as np
import math
import sympy
import pandas as pd
def gauss_jordan(A,b):
# input: A is a matrix N x N, and b is a vector the length of N
# Matrix Initialization
I = np.eye(len(A)) # Identity Matrix the size of A
A_tot = np.concatenate((A, b, I), axis=1) # Matrix to perform the Gauss Jordan elimination on
k = 0 # index for the loop
# loop through all the rows of the matrix
for i in range(len(A_tot)):
# PIVOT SECTION
# pivot for every column
piv = abs(A_tot[i,k])
index = i # temp index for swapping
# loop through the column to find the pivot
for n in range(k,len(A_tot)):
if (abs(A_tot[n,k]) > piv):
piv = abs(A_tot[n,k])
index = n
# swapping the rows
A_tot[[i, index]] = A_tot[[index, i]]
# normalize the row which is used in this iteration
A_tot[i] = (1/A_tot[i,k])*A_tot[i]
# GAUSS JORDAN SECTION
# loop through all the rows where i != j for the Gauss Jordan elimination for every row
for j in range(len(A_tot)):
if (i != j):
# calculation for every row
A_tot[j] = A_tot[j] - (A_tot[i]*A_tot[j,k])
k += 1 # index progress
# Slicing the matrix to return the inverse matrix and x vector separtely
Ainv = A_tot[:,k+1:]
x = A_tot[:,k]
return [Ainv, x]
# Example
# RUN THIS CODE TO SEE EXAMPLE
# initializing an example matrix
mat = "12.113 1.067 9.574 8.414 0.098 -0.046; 9.609 5.015 8.814 7.983 7.692 -11.655; 7.402 0.081 5.394 0.417 9.603 0.0623; 1.451 1.517 3.741 4.668 2.601 -1.351; 2.053 1.576 8.046 8.152 2.896 -0.227"
m_np = np.matrix(mat) # create the matrix
solve_gj = gauss_jordan(m_np[:,:5],m_np[:,5])
b_sol = m_np[:,:5] * solve_gj[1]
x_sol = solve_gj[0] * m_np[:,5]
identity_mat_sol = m_np[:,:5] * solve_gj[0]
# Show Results for the example
print('Gauss Jordan Elimination with partial pivoting:')
print('Ainv: ')
print(pd.DataFrame(solve_gj[0]))
print('x: ')
print(pd.DataFrame(solve_gj[1]))
print('A*x: ')
print(pd.DataFrame(b_sol))
print('Ainv*b: ')
print(pd.DataFrame(x_sol))
print('A*Ainv: ')
print(pd.DataFrame(identity_mat_sol))
|
import json
# parameters
filename = '/home/pi/pibs_client/system.json'
# functions
def getMacAddress():
import netifaces
# find the network interfaces available
ifaces = netifaces.interfaces()
# preference is given to ethernet cord
if 'eth0' in ifaces:
info = netifaces.ifaddresses('eth0')[netifaces.AF_LINK]
elif 'en0' in ifaces:
info = netifaces.ifaddresses('en0')[netifaces.AF_LINK]
elif 'wlan0' in ifaces:
info = netifaces.ifaddresses('wlan0')[netifaces.AF_LINK]
else:
# if there is no connection, return a default and hope for best
print('**ERROR: Unable to determine mac address....')
print('-> Continuing and hoping for the best!')
print('-> Using default mac address: 00:00:00:00:00:00')
return '000000000000'
# get the mac address from the info
mac = info[0]['addr']
# remove colons (':')
mac = mac.replace(':','')
return mac
def getIPAddress():
import commands
RetMyIP = commands.getoutput("hostname -I") # returns the IP address of local host as a string
return RetMyIP
def create_system_json():
macAddress = getMacAddress()
myconfig = {
"mac_address": macAddress,
"broker_name": "fmnc.cse.nd.edu",
"initial_topics": [
"pibs/clients/command/"+macAddress,
"pibs/clients/command/command_to_all",
"pibs/clients/status/",
"pibs/clients/status/"+macAddress
],
"ip_address": getIPAddress(),
"sensors": [
{"type": "sensors.clock.RaspberryClock", "hardware_id": "1"},
{"type": "sensors.locationing.DummyGPS",
"hardware_id": "2",
"latitude": 41.704909999999998,
"longitude": -86.240613999999994,
"altitude": 777}
],
"init_tasks": [{"task_name": "tasks.test.echo",
"arguments": {"topic": "pibs/clients/command/"+macAddress, "echo_text": "initialization"}}]
}
with open(filename,'w') as fp:
json.dump(myconfig,fp,indent=4,sort_keys=True,separators=(',',':'))
if __name__=="__main__":
create_system_json()
|
import parse
import os
def same_params(params1, params2):
list1 = params1.split()
list2 = params2.split()
if len(list2) == len(list1):
for idx, val in enumerate(list1):
if list2[idx] != val:
return False
else:
return False
return True
def compare_two_dicts_and_return_alter(db_dict1, db_dict2):
output_sql = ''
for key, value in db_dict1.items():
if key not in db_dict2:
# no such table.
# form create table total
output_temp = ''
for key2, value2 in value.items():
output_temp = '%s\n `%s` %s,' % (output_temp, key2, value2)
output_sql = '%s\n CREATE TABLE `%s` ( %s ) ENGINE=InnoDB DEFAULT CHARSET=utf8;' % (output_sql, key, output_temp)
else:
# such table exists
output_temp = ''
for key2, value2 in value.items():
if key2 not in db_dict2[key]:
# add
# ALTER TABLE `tablename` ADD `fieldname` [params];
output_temp = '%s\n ALTER TABLE `%s` ADD `%s` %s;' % (output_temp, key, key2, value2)
else:
# compare params and modify if needed
# ALTER TABLE `tablename` MODIFY `fieldname` [params];
if not same_params(value2, db_dict2[key][key2]):
output_temp = '%s\n ALTER TABLE `%s` MODIFY `%s` %s;' % (output_temp, key, key2, value2)
output_sql = '%s %s' % (output_sql, output_temp)
return output_sql
def parse_db_to_dict(db_string=''):
temp_dict = {}
for table in parse.findall("CREATE TABLE `{}` ({}) ENGINE=InnoDB", db_string):
# table[0] = tablename
# table[1] = all table fields
temp_table_dict = {}
for field in parse.findall("`{}` {},\n", table[1]):
# field[0] = field name
# field[1] = field description
temp_table_dict[field[0]] = field[1]
temp_dict[table[0]] = temp_table_dict
return temp_dict
import argparse
parser = argparse.ArgumentParser(description='Find diff in two MySQL dumps and create diff file with ALTER commands(like migration')
parser.add_argument('db_file1', type=str, nargs=1, help='dbdump1')
parser.add_argument('db_file2', type=str, nargs=1, help='dbdump2')
parser.add_argument('output_file', type=str, nargs=1, help='output file')
args = parser.parse_args()
args = vars(args)
path1 = args['db_file1'][0]
path2 = args['db_file2'][0]
path3 = args['output_file'][0]
with open(path1, 'r') as myfile:
db1_string=myfile.read()
with open(path2, 'r') as myfile:
db2_string=myfile.read()
db1_dict = parse_db_to_dict(db1_string)
db2_dict = parse_db_to_dict(db2_string)
diff_sql_alter = compare_two_dicts_and_return_alter(db1_dict, db2_dict)
with open(path3, 'w') as f:
print(diff_sql_alter, file=f)
print('Success')
|
import networkx as nx
def make(name):
if name=='eight':
G=nx.Graph()
# add nodes and edges to the graph
G.add_edge(0,1, bw=3, lat=1)
G.add_edge(0,2, bw=3, lat=1)
G.add_edge(1,3, bw=3, lat=0.4)
G.add_edge(2,3, bw=3, lat=1)
G.add_edge(3,4, bw=3, lat=1)
G.add_edge(3,5, bw=3, lat=0.2)
G.add_edge(4,6, bw=3, lat=0.5)
G.add_edge(5,6, bw=3, lat=1)
return G
elif name=='srg':
G=nx.Graph()
# add nodes and edges to the graph
# bw: line thickness
# latency unit: 10e-4 seconds (geo distance / light)
G.add_edge(1,2, bw=5, lat=2.515)
G.add_edge(1,4, bw=5, lat=4.591)
G.add_edge(1,9, bw=5, lat=1.317)
G.add_edge(2,11, bw=5, lat=2.076)
G.add_edge(2,10, bw=5, lat=0.897)
G.add_edge(2,7, bw=5, lat=3.162)
G.add_edge(2,15, bw=5, lat=1.333)
G.add_edge(2,3, bw=15, lat=3.207)
G.add_edge(2,6, bw=15, lat=5.182)
G.add_edge(3,12, bw=5, lat=1.033)
G.add_edge(3,16, bw=15, lat=0.922)
G.add_edge(4,16, bw=15, lat=1.708)
G.add_edge(8,14, bw=5, lat=1.69)
G.add_edge(14,5, bw=5, lat=3.574)
G.add_edge(13,4, bw=5, lat=2.819)
G.add_edge(4,5, bw=5, lat=1.685)
G.add_edge(4,6, bw=15, lat=6.258)
G.add_edge(5,17, bw=5, lat=4.303)
G.add_edge(17,2, bw=5, lat=3.236)
G.add_edge(5,19, bw=5, lat=3.102)
G.add_edge(5,18, bw=5, lat=2.368)
G.add_edge(19,20, bw=5, lat=1.634)
G.add_edge(20,6, bw=5, lat=2.702)
G.add_edge(6,21, bw=5, lat=0.733)
G.add_edge(6,7, bw=5, lat=3.496)
G.add_edge(6,11, bw=5, lat=5.41)
G.add_edge(6,15, bw=5, lat=3.866)
return G
node2city={1: 'Basel', 2: 'Zurich', 3: 'Bern', 4: 'Lausanne', 5: 'Genf', 6: 'Lugano', 7: 'Chur', 8: 'Delemont', 9: 'Aarau', 10: 'Rapperswil', 11: 'St. Gallen', 12: 'Solothurn', 13: 'Biel', 14: 'Neuenburg', 15: 'Luzern', 16: 'Friburg', 17: 'Thun', 18: 'Martigny', 19: 'Sion', 20: 'Brig', 21: 'Locarno'}
|
# -*- coding: utf-8 -*-
"""
DON'T TOUCH THIS CLASS
Base Class for AI logic. Contains functionality to play a game of battleship.
You may modify the two child classes which inherit from this one
Created on Sat Mar 21 11:48:51 2020
@author: Kyle
"""
import numpy as np
class AIPlayer:
#initialization of a player, this method is only called once, at the beginning of each match.
#not between every game.
def __init__(self):
#list of ships in ordered pair i.e. [[row, col], [row, col]], see place piece method
self.pieces = []
self.amIFirst = False
#A method to prepare for a new game
#This method is called at the beginning of each game, so up to 1000 times in a single match
def startGame(self):
#The result of the last move, The game manager will update this after you make a shot
self.lastMoveResult = 0 #Miss = 0, Hit = 1, Sink = -1
#This will be set by the GameManager, if it is still -1,-1 then you are first
self.OtherplayersLastMove = [-1,-1]
#place your pieces on the board
self.placePieces()
#Basic function for making a shot
#GameManager will call this method every time it is your turn.
def makeMove(self):
#Select a random row and column
r = np.random.randint(10)
c = np.random.randint(10)
#Return the row and column numbers, The grid is 10X10 so numbers 0-9 are valid
return [r,c]
#Basic function for placing your pieces on the board at the beginning of the game
def placePieces(self):
Battleship = [[0,1],[0,2],[0,3],[0,4]]
ptBoat = [[7,7],[8,7]]
AircraftCarrier = [[0,9],[1,9],[2,9],[3,9],[4,9]]
Destroyer1 = [[9,0],[9,1],[9,2]]
Destroyer2 = [[7,5],[7,4],[7,3]]
#please note that the following code would not be a valid ship placement
#Destroyer2 = [[7,4],[7,5],[7,3]] #pieces need to be in order either forward or backward
self.pieces = [Battleship, ptBoat, AircraftCarrier, Destroyer1, Destroyer2]
#Function for game clean-up and analysis, not a necessary function for basic play but still
#available for you to override
def gameOver(self, opponentsPieces, opponentsShots):
pass |
from copy import deepcopy
from django import forms
from django.contrib import admin
from mezzanine.pages import admin as pages_admin
from .models import CategoryLink
CATEGORY_LINK_FIEDSETS = deepcopy(pages_admin.PageAdmin.fieldsets)
CATEGORY_LINK_FIEDSETS[0][1]['fields'].insert(1, 'blog_category')
class CategoryLinkForm(forms.ModelForm):
model = CategoryLink
class CategoryLinkAdmin(pages_admin.PageAdmin):
form = CategoryLinkForm
fieldsets = CATEGORY_LINK_FIEDSETS
admin.site.register(CategoryLink, CategoryLinkAdmin)
|
# Copyright (C) 2013-2015 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os
import shutil
import hashlib
import uuid
import ezRPConfig as gConfig
hash_md5 = lambda data: hashlib.md5(data).hexdigest()
remove_file_if_exists = lambda tfile: os.path.isfile(tfile) and os.remove(tfile)
def rootDirs(path):
dirs = os.walk(path).next()[1]
return list() if len(dirs) == 0 else dirs
# def hashMD5File(tfile):
# with open(tfile, 'rb') as f:
# data = f.read()
# return str(hash_md5(data))
# return None
def deletePath(path):
shutil.rmtree(path)
def ensurePathExists(path):
if not os.path.exists(path):
os.makedirs(path)
def getTempfile():
path = os.path.join(gConfig.workingDirectory,'tmp')
ensurePathExists(path)
return os.path.join(path, str(uuid.uuid4()))
def ifPathExists(path):
return os.path.exists(path)
def copyPath(src, des):
'''
Copy src directory into des directory
Note: The destination directory must not already exist
'''
shutil.copytree(src, des)
|
# -*- coding: utf-8 -*-
"""
"""
import os, matplotlib.pyplot as plt, numpy as np
class PlotGraph:
def getBarPlot(self,stockCat, finalList, col, numFeat, folderPath):
my_xticks = [item['stock'] for item in finalList]
x = np.array([i+1 for i in range(len(my_xticks))])
if len(my_xticks)>30:
fSize = 5
else:
fSize = 7
#Test Accuracy
y_test = np.array([item['train_acc']*100 for item in finalList])
plt.bar(x, y_test, align = 'center', color=col)
plt.xticks(x, my_xticks, fontsize=fSize ,rotation='vertical')
plt.title(stockCat + ' ('+ str(numFeat) + ' features) ')
plt.xlabel('Stocks')
plt.ylabel("Train Accuracy (%)")
fig = plt.gcf()
plt.show()
plt.draw()
#Save graph plot in train_Result folder
if not os.path.exists(folderPath):
os.makedirs(folderPath)
fig.savefig(folderPath+'Train.png', format='png', bbox_inches='tight', dpi=1000)
#Train Accuracy
y_test = np.array([item['test_acc']*100 for item in finalList])
plt.bar(x, y_test, align = 'center', color=col)
plt.xticks(x, my_xticks, fontsize=fSize ,rotation='vertical')
plt.title(stockCat + ' ('+ str(numFeat) + ' features) ')
plt.xlabel('Stocks')
plt.ylabel("Test Accuracy (%)")
fig = plt.gcf()
plt.show()
plt.draw()
#Save graph plot in test_Result folder
if not os.path.exists(folderPath):
os.makedirs(folderPath)
fig.savefig(folderPath+'Test.png', format='png', bbox_inches='tight', dpi=1000)
|
def bubble_sort(alist):
"""冒泡排序"""
n = len(alist)
for j in range(n-1):
#外层循环控制走几次,j表示第几次走这个过程,一共走n-1次
count = 0
for i in range(0,n-1-j):
#内层循环控制从头走到尾
#构造一个游标i,表示列表的下标
#range()左闭右开
#一共n个元素,下标从0-(n-1),游标到n-2的位置停下,但因为range()是左闭右开,所以range(0,n-1)
if alist[i] > alist[i+1]:
alist[i],alist[i+1] = alist[i+1],alist[i]
count += 1
if count==0:
return
if __name__ == "__main__":
li = [54,26,93,17,77,31,44,55,20]
print(li)
bubble_sort(li)
print(li)
|
# coding:utf-8
from flask import Flask, render_template
from sqlalchemy import create_engine, MetaData, Table
from sqlalchemy.orm import sessionmaker
app = Flask(__name__)
# 连接数据库
engine = create_engine('sqlite:///smzdm.db')
metadata = MetaData(bind=engine)
Session = sessionmaker(bind=engine)
Item = Table('items', metadata, autoload=True)
@app.route('/', methods=['GET'])
def index():
items = get_30_items()
return render_template('index.html', items=items)
def get_30_items():
session = Session()
items=session.query(Item).order_by(Item.c.update_time).limit(30).all()
session.close()
return items
if __name__ == "__main__":
print(get_30_items()[0])
app.run(port=8642, debug=True) |
# 标准库
import os
# 第三方库
from flask import (
Flask, request, render_template, send_from_directory,
redirect, url_for, abort
)
from werkzeug.datastructures import FileStorage
# 自己写的其他模块
from helper import random_filename, ensure_folder
ROOT = os.path.dirname(os.path.abspath(__file__))
app = Flask(__name__)
app.upload_folder = os.path.join(ROOT, 'upload')
ensure_folder(app.upload_folder)
@app.route('/')
def view_index():
return render_template('index.html')
@app.route('/upload', methods=['POST'])
def view_upload():
myfile = request.files.get('myfile')
if isinstance(myfile, FileStorage):
new_name = random_filename(myfile.filename)
# TODO: 把 new_name 保存到数据库
save_path = os.path.join(app.upload_folder, new_name)
myfile.save(save_path)
return redirect(url_for('view_demo'))
else:
abort(400)
@app.route('/image/<path:filename>')
def view_image(filename):
full_path = os.path.join(app.upload_folder, filename)
if not os.path.exists(full_path):
return abort(404)
return send_from_directory(app.upload_folder, filename)
@app.route('/demo')
def view_demo():
'''
# 从数据库中读出来的列表
file_list = [
'52508804ef2a522f9295c59865d00c08@1235464.jpg',
'eb6a60e57f7c5e5985befc0380168c6c@asd12543.jpg'
]
'''
file_list = os.listdir(app.upload_folder)
return render_template('demo.html', image_list=file_list)
if __name__ == '__main__':
app.run(debug=True, port=7777) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
""" Unittests for zimsoap.zobjects """
import unittest
from six import text_type, binary_type
import zimsoap.utils
from zimsoap.zobjects import (
Account, Domain, Identity, Mailbox, Signature, ZObject)
from . import samples
class ZObjectsTests(unittest.TestCase):
class NullZObject(ZObject):
ATTRNAME_PROPERTY = 'n'
TAG_NAME = 'TestObject'
def setUp(self):
# samples, as dict
xml2dict = zimsoap.utils.xml_str_to_dict
self.simple_domain_dict = xml2dict(samples.SIMPLE_DOMAIN)
self.misnamed_domain_dict = xml2dict(samples.MISNAMED_DOMAIN)
self.mbox_dict = xml2dict(samples.MBOX)
self.admin_account_dict = xml2dict(samples.ADMIN_ACCOUNT)
self.system_account_dict = xml2dict(samples.SYSTEM_ACCOUNT)
self.normal_account_dict = xml2dict(samples.NORMAL_ACCOUNT)
self.signature_dict = xml2dict(samples.SIGNATURE)
self.identity_dict = xml2dict(samples.IDENTITY)
def testZobjectNeverFailsToPrint(self):
zo = self.NullZObject()
self.assertIn(self.NullZObject.__name__, str(zo))
zo.id = 'myid'
self.assertIn('myid', str(zo))
zo.name = 'myname'
self.assertIn('myname', str(zo))
def testZobjectNeverFailsToRepr(self):
zo = self.NullZObject()
self.assertIn(self.NullZObject.__name__, repr(zo))
self.assertIn(hex(id(zo)), repr(zo))
zo.id = 'myid'
self.assertIn('myid', repr(zo))
zo.name = 'myname'
self.assertIn('myid', repr(zo))
def testDomainFromDict(self):
data = self.simple_domain_dict['domain']
d = Domain.from_dict(data)
self.assertIsInstance(d, Domain)
self.assertIsInstance(d.id, text_type)
self.assertIsInstance(d.name, text_type)
self.assertIsNotNone(d.id)
self.assertEqual(d.name, 'client1.unbound.example.com')
self.assertEqual(d.get_full_data(), data)
def testDomainSelector(self):
d = Domain(name='foo')
s = d.to_selector()
self.assertEqual(s['by'], 'name')
self.assertEqual(s['_content'], 'foo')
def testInvalidDomainSelector(self):
with self.assertRaises(ValueError):
Domain().to_selector()
# Should not produce a selector with spamattr
with self.assertRaises(ValueError):
Domain(spamattr='eggvalue').to_selector()
def test_ZObjects_import_a_tags(self):
props = Domain._parse_a_tags(self.simple_domain_dict['domain'])
self.assertIsInstance(props, dict)
# 53 is the number of unique "n" keys in the sample domain.
self.assertEqual(len(props), 53)
# Just check one of the <a> tags
self.assertEqual(props['zimbraAuthMech'], 'zimbra')
def test_ZObjects_get_single_tag_list(self):
contact_dic = {'a': {'_content': 'test@example.com', 'n': 'email'},
'l': '7',
'd': '1445446429000',
'id': '298',
'rev': '24825',
'fileAsStr': ''}
props = self.NullZObject._parse_a_tags(contact_dic)
self.assertEqual(props['email'], 'test@example.com')
def test_ZObjects_import_a_tags_multivalue(self):
props = Domain._parse_a_tags(self.simple_domain_dict['domain'])
self.assertIsInstance(props['objectClass'], list)
self.assertEqual(
props['objectClass'],
['dcObject', 'organization', 'zimbraDomain', 'amavisAccount'])
def test_ZObjects_access_a_tag_as_item(self):
d = Domain.from_dict(self.simple_domain_dict['domain'])
self.assertEqual(d['zimbraAuthMech'], 'zimbra')
def test_ZObjects_comparison_equals(self):
d1 = Domain(id='d78fd9c9-f000-440b-bce6-ea938d40fa2d')
d2 = Domain(id='d78fd9c9-f000-440b-bce6-ea938d40fa2d')
self.assertTrue(d1 == d2)
self.assertFalse(d1 != d2)
def test_ZObjects_comparison(self):
d1 = Domain(id='d78fd9c9-f000-440b-bce6-ea938d40fa2d')
d2 = Domain(id='dddddddd-f000-440b-bce6-dddddddddddd')
self.assertTrue(d1 != d2)
self.assertFalse(d1 == d2)
def test_ZObjects_comparison_invalid_id_first(self):
d1 = Domain(id='123')
d2 = Domain(id='d78fd9c9-f000-440b-bce6-ea938d40fa2d')
with self.assertRaises(ValueError):
d1 == d2
def test_ZObjects_comparison_invalid_id_second(self):
d1 = Domain(id='123')
d2 = Domain(id='d78fd9c9-f000-440b-bce6-ea938d40fa2d')
with self.assertRaises(ValueError):
d2 == d1
def test_ZObjects_comparison_invalid_type(self):
d1 = Domain(id='d78fd9c9-f000-440b-bce6-ea938d40fa2d')
m1 = Mailbox(id='d78fd9c9-f000-440b-bce6-ea938d40fa2d')
with self.assertRaises(TypeError):
d1 == m1
def test_Signature_to_selector(self):
s = Signature(id='1234')
self.assertEqual(s.to_selector(), {'id': '1234'})
self.assertIsInstance(s.to_selector(), dict)
s = Signature(name='jdoe')
self.assertEqual(s.to_selector(), {'name': 'jdoe'})
s = Signature(id='1234', name='jdoe')
self.assertEqual(s.to_selector(), {'id': '1234'})
def test_Signature_creator_fails_without_content(self):
s = Signature(name='unittest')
with self.assertRaises(AttributeError):
s.to_xml_creator()
def test_Signature_creator_default_format(self):
s = Signature(name='unittest')
s.set_content('TEST_CONTENT')
self.assertEqual(s._contenttype, 'text/html')
def test_Signature_set_content(self):
s = Signature(name='unittest')
s.set_content('TEST_CONTENT', contenttype='text/plain')
self.assertEqual(s._contenttype, 'text/plain')
self.assertEqual(s._content, 'TEST_CONTENT')
def test_Signature_creator_success(self):
s = Signature(name='unittest')
s.set_content('TEST_CONTENT', contenttype='text/plain')
d = s.to_creator()
self.assertTrue(d['content'], 'TEST_CONTENT')
def test_Signature_dict_import(self):
s = Signature.from_dict(self.signature_dict['signature'])
self.assertIsInstance(s, Signature)
self.assertIsInstance(s.get_content(), (text_type, binary_type))
self.assertEqual(s.get_content(), 'CONTENT')
self.assertEqual(s.get_content_type(), 'text/html')
def test_Identity_to_creator(self):
test_attr = 'zimbraPrefForwardReplyPrefixChar'
i = Identity.from_dict(self.identity_dict['identity'])
dict_again = Identity.from_dict(i.to_creator())
self.assertEqual(i[test_attr], dict_again[test_attr])
def test_Account_system(self):
sys = Account.from_dict(self.system_account_dict['account'])
norm = Account.from_dict(self.normal_account_dict['account'])
adm = Account.from_dict(self.admin_account_dict['account'])
self.assertEqual(sys.is_system(), True)
self.assertEqual(adm.is_system(), False)
self.assertEqual(norm.is_system(), False)
def test_Account_admin(self):
sys = Account.from_dict(self.system_account_dict['account'])
norm = Account.from_dict(self.normal_account_dict['account'])
adm = Account.from_dict(self.admin_account_dict['account'])
self.assertEqual(sys.is_admin(), False)
self.assertEqual(adm.is_admin(), True)
self.assertEqual(norm.is_admin(), False)
def test_property(self):
norm = Account.from_dict(self.normal_account_dict['account'])
self.assertEqual(norm.property('zimbraFeatureSignaturesEnabled'), True)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class BossItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
# 定义数据结构
job_name = scrapy.Field() # 职位名称
monthly_salary = scrapy.Field() # 月薪
company_name = scrapy.Field() # 公司名称
company_addr = scrapy.Field() # 公司地址
company_type = scrapy.Field() # 公司类型
company_size = scrapy.Field() # 公司规模
is_listed = scrapy.Field() # 是否上市
experience = scrapy.Field() # 工作经验
education = scrapy.Field() # 学历
|
class Card():
"""Class handles cards in a deck
Attributes:
suit -- The possible suits in a deck of cards
value -- The possible values in a deck of cards
"""
suits = [('Heart',1), ('Diamond',2), ('Spade',3), ('Club',4)]
values = [('Ace',11),('Two',2),('Three',3),('Four',4),('Five',5),
('Six',6),('Seven',7), ('Eight',8), ('Nine',9), ('Ten',10),
('Jack',10), ('Queen',10),('King',10)]
def __init__(self, card_value = 0, suit = 0):
"""Inits Card class with card_value and suit """
self.value = Card.values[card_value]
self.suit = Card.suits[suit]
|
# open function is built into python
# for more information on built in functions go to http://docs.python.org/lib/built-in-functions.html
# Use the file name mbox-short.txt as the file name
fname = raw_input("Enter file name: ")
fh = open(fname)
qty = 0
val = 0
for line in fh:
if not line.startswith("X-DSPAM-Confidence:") : continue
val = val + float(line[line.find(':')+2:len(line)])
qty = qty + 1
avg = str(val/qty)
print 'Average spam confidence: '+avg |
### Group the People Given the Group Size They Belong To - Solution
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
id_and_person = collections.defaultdict(list)
for person, id_ in enumerate(groupSizes):
id_and_person[id_].append(person)
final_group = tuple(val[i:i+key] for key, val in id_and_person.items() for i in range(0, len(val), key))
return final_group |
from uuid import uuid4
class Annotation(object):
""" A named entity object that has been annotated on a document.
Note:
The attributes of this class assume a plain text representation
of the document, after normalization. For example, `text` will
be in lower case, if the `norm` parameter of the `to_plain_text`
method call contains lowercasing. Similarly `offset` refers to
the offset on the potentially normalized/clean text.
Attributes:
text (str): The continuous text snippet that forms the named
entity.
label (str): The named entity type, e.g. "PERSON", "ORGANIZATION".
offset (2-tuple of int): Indices that represent the positions of
the first and the last letter of the annotated entity in the
plain text.
identifier (str, optional): A unique identifier for the annotation.
discontinued (bool, optional): A flag to indicate the presence
of discontinued offset.
uuid (str): The unique identifier of a relation
"""
def __init__(self, text, label, offset, identifier=None, uuid=None, discontinued=False, score=1.0):
self.text = text
self.label = label
self.offset = offset
self.discontinued = discontinued
self.score = score
self.uuid = uuid or uuid4()
self.identifier = identifier or "T{}".format(self.uuid)
def to_inline_string(self):
""" Returns the annotated entity as a string of the form: label[text].
"""
return "{}[{}]".format(self.label, self.text)
def to_inline_string_color(self):
""" Returns the annotated entity as a string of the form: label[text].
"""
return "{}[{}]".format("\x1b[34m " + self.label + "\x1b[0m", "\x1b[32m " + self.text + "\x1b[0m")
def __str__(self):
return "{}\t{} {} {}\t{}".format(self.identifier, self.label, self.offset[0], self.offset[1], self.text)
""" The following methods allow us to compare annotations for sorting,
hashing, etc.
Note: It doesn't make sense to compare annotations that occur in
different documents.
"""
def __eq__(self, other):
return ((self.text == other.text) and (self.label == other.label) and (self.offset == other.offset))
def __lt__(self, other):
if self.offset[0] != other.offset[0]:
return self.offset[0] < other.offset[0]
return self.offset[1] < other.offset[1]
def __gt__(self, other):
if self.offset[0] != other.offset[0]:
return self.offset[0] > other.offset[0]
return self.offset[1] > other.offset[1]
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
def __hash__(self):
return hash(self.text + self.label + str(self.offset[0]) + str(self.offset[1]))
class Relation(object):
""" A relation object that has been annotated on a document.
Attributes:
label (str): The relation type, e.g. "Origin", "Effect".
source_id (str): The identifier of the annotated entity in the
plain text that is the source argument for the relation.
target_id (str): The identifier of the annotated entity in the
plain text that is the target argument for the relation.
identifier (str, optional): A unique identifier for the relation.
source_role (str, optional): The role of the annotated entity in
the plain text that is the source argument for the relation.
target_role (str, optional): The role of the annotated entity in
the plain text that is the target argument for the relation.
uuid (str): The unique identifier of a relation
"""
def __init__(self,
label,
source_id,
target_id,
identifier=None,
source_role="arg1",
target_role="arg2",
score=1.0,
uuid=None):
self.label = label
self.source_id = source_id
self.target_id = target_id
self.source_role = source_role
self.target_role = target_role
self.score = score
self.uuid = uuid or uuid4()
self.identifier = identifier or "R{}".format(self.uuid)
def __str__(self):
return "{}\t{} {}:{} {}:{}".format(self.identifier, self.label, self.source_role, self.source_id,
self.target_role, self.target_id)
def __eq__(self, other):
return ((self.label == other.label) and (self.source_id == other.source_id) and (
self.target_id == other.target_id))
def __lt__(self, other):
sid1 = int(self.source_id[1:])
sid2 = int(other.source_id[1:])
if sid1 < sid2:
return True
if sid1 == sid2:
tid1 = int(self.target_id[1:])
tid2 = int(other.target_id[1:])
return tid1 < tid2
return False
def __gt__(self, other):
sid1 = int(self.source_id[1:])
sid2 = int(other.source_id[1:])
if sid1 > sid2:
return True
if sid1 == sid2:
tid1 = int(self.target_id[1:])
tid2 = int(other.target_id[1:])
return tid1 > tid2
return False
def __le__(self, other):
sid1 = int(self.source_id[1:])
sid2 = int(other.source_id[1:])
if sid1 <= sid2:
return True
return False
def __ge__(self, other):
sid1 = int(self.source_id[1:])
sid2 = int(other.source_id[1:])
if sid1 >= sid2:
return True
return False
def __hash__(self):
return hash(self.label + self.source_id + self.target_id)
class Normalization(object):
""" A normalization object that has been annotated on a document.
Attributes:
argument_id (str): The identifier of the annotated entity in the
plain text to which the normalization is associated.
resource_id (str): An identifier for the external resource from
which the normalization is taken.
external_id (str): The identifier of the entry within the
external resource from which the normalization is taken.
identifier (str, optional): A unique identifier for the
normalization.
label (str, optional): The normalization type, e.g. "Reference".
preferred_term (str, optional): The preferred term from the
external resource for the normalization.
"""
def __init__(self,
argument_id,
resource_id,
external_id,
identifier=None,
label="Reference",
preferred_term="",
score=1.0,
uuid=None):
self.argument_id = argument_id
self.resource_id = resource_id
self.external_id = external_id
self.label = label
self.preferred_term = preferred_term
self.score = score
self.uuid = uuid or uuid4()
self.identifier = identifier or "N{}".format(self.uuid)
def __str__(self):
return "{}\t{} {} {}:{}\t{}".format(self.identifier, self.label, self.argument_id, self.resource_id,
self.external_id, self.preferred_term or "--")
def __eq__(self, other):
return ((self.label == other.label) and (self.argument_id == other.argument_id) and (
self.resource_id == other.resource_id) and (self.external_id == other.external_id))
def __lt__(self, other):
id1 = int(self.argument_id[1:])
id2 = int(other.argument_id[1:])
return id1 < id2
def __gt__(self, other):
id1 = int(self.argument_id[1:])
id2 = int(other.argument_id[1:])
return id1 > id2
def __le__(self, other):
id1 = int(self.argument_id[1:])
id2 = int(other.argument_id[1:])
return id1 <= id2
def __ge__(self, other):
id1 = int(self.argument_id[1:])
id2 = int(other.argument_id[1:])
return id1 >= id2
def __hash__(self):
return hash(self.label + self.argument_id + self.resource_id + self.external_id)
|
#!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,
InfraredSensor, UltrasonicSensor, GyroSensor)
from pybricks.parameters import Port, Stop, Direction, Button, Color
from pybricks.tools import wait, StopWatch, DataLog
from pybricks.robotics import DriveBase
from pybricks.media.ev3dev import SoundFile, ImageFile
# -------------------------------------------------------------------------------------------------
def main():
ev3 = EV3Brick()
ev3.speaker.beep()
ev3.screen.clear()
ev3.light.on( Color.YELLOW )
colors = ColorSensor( Port.S3 )
ir = InfraredSensor( Port.S4 )
left_motor = Motor( Port.B, Direction.CLOCKWISE )
left_speed = 0
left_pressed = False
right_motor = Motor( Port.C, Direction.CLOCKWISE )
right_speed = 0
right_pressed = False
speed_mult = 128
ev3.light.off()
while not ev3.buttons.pressed():
b = []
ch = 0
# look for button(s) pressed in channel
for channel in range( 1, 5 ):
cb = ir.buttons( channel )
if ( len(cb) ):
b = cb
ch = channel
break
# left motor control
if ( Button.LEFT_UP in b ):
if ( not left_pressed ):
left_pressed = True
if ( left_speed < 0 ):
left_speed = 0
else:
left_speed = speed_mult * ch
elif ( Button.LEFT_DOWN in b ):
if ( not left_pressed ):
left_pressed = True
if ( 0 < left_speed ):
left_speed = 0
else:
left_speed = -1 * speed_mult * ch
else:
left_pressed = False
# right motor control
if ( Button.RIGHT_UP in b ):
if ( not right_pressed ):
right_pressed = True
if ( right_speed < 0 ):
right_speed = 0
else:
right_speed = speed_mult * ch
elif ( Button.RIGHT_DOWN in b ):
if ( not right_pressed ):
right_pressed = True
if ( 0 < right_speed ):
right_speed = 0
else:
right_speed = -1 * speed_mult * channel
else:
right_pressed = False
left_motor.run( left_speed )
right_motor.run( right_speed )
wait(50)
# -------------------------------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
from setuptools._distutils.errors import CompileError as BaseCompileError
class MissingGXX(Exception):
"""
This error is raised when we try to generate c code,
but g++ is not available.
"""
class CompileError(BaseCompileError):
"""This custom `Exception` prints compilation errors with their original
formatting.
"""
def __str__(self):
return self.args[0]
|
from django.db import models
from tracker.models import Daily, Food, Activity, Medication, Supplement
import datetime
def CreateDaily(energy):
d = Daily(date = datetime.datetime.now(), energylevel = energy)
d.save()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class MybankCreditLoantradeGuarletterApplyQueryResponse(AlipayResponse):
def __init__(self):
super(MybankCreditLoantradeGuarletterApplyQueryResponse, self).__init__()
self._accept_result = None
self._reject_reason = None
@property
def accept_result(self):
return self._accept_result
@accept_result.setter
def accept_result(self, value):
self._accept_result = value
@property
def reject_reason(self):
return self._reject_reason
@reject_reason.setter
def reject_reason(self, value):
self._reject_reason = value
def parse_response_content(self, response_content):
response = super(MybankCreditLoantradeGuarletterApplyQueryResponse, self).parse_response_content(response_content)
if 'accept_result' in response:
self.accept_result = response['accept_result']
if 'reject_reason' in response:
self.reject_reason = response['reject_reason']
|
import pandas as pd
import matplotlib.pyplot as plt
from io import BytesIO
import os
file=input('FilePath: ')
piv_index=input('Write Your Index: ').split()
piv_data=input('Columns to Graph: ').split()
extension = os.path.splitext(file)[1]
filename = os.path.splitext(file)[0]
pth=os.path.dirname(file)
newfile=os.path.join(pth,filename+'_2'+extension)
xl = pd.ExcelFile(file)
sheetnames=xl.sheet_names
file=[pd.read_excel(file, sheet_name=s) for s in sheetnames]
for k in range(len(sheetnames)):
df1=pd.DataFrame(file[k])
df_pt=pd.pivot_table(df1, index = piv_index,values= piv_data)
img=df_pt.plot(kind='bar',secondary_y=piv_data[-1], width=0.8, figsize=(12,10), title=sheetnames[k])
imgdata = BytesIO()
fig = plt.figure()
img.figure.savefig(imgdata)
writer = pd.ExcelWriter('{}/{}.xlsx'.format(pth,sheetnames[k]), engine='xlsxwriter')
df_pt.to_excel(writer, sheet_name=sheetnames[k])
worksheet = writer.sheets['{}'.format(sheetnames[k])]
worksheet.insert_image('F1','',{'image_data': imgdata})
writer.save()
|
import time
import numpy as np
from scipy.sparse import linalg
from scipy.sparse.linalg import spsolve
def solve_cgs(k, f, m=None, tol=1e-5):
"""Solves a linear system of equations (Ku = f) using the CGS iterative method.
:param k: N x N matrix of the linear system
:type k: :class:`scipy.sparse.csc_matrix`
:param f: N x 1 right hand side of the linear system
:type f: :class:`numpy.ndarray`
:param float tol: Tolerance for the solver to achieve. The algorithm terminates when either
the relative or the absolute residual is below tol.
:param m: Preconditioner for the linear matrix approximating the inverse of k
:type m: :class:`scipy.linalg.LinearOperator`
:return: The solution vector to the linear system of equations
:rtype: :class:`numpy.ndarray`
:raises RuntimeError: If the CGS iterative method does not converge
"""
(u, info) = linalg.cgs(k, f, tol=tol, M=m)
if info != 0:
raise RuntimeError("CGS iterative method did not converge.")
return u
def solve_cgs_lagrange(k_lg, f, tol=1e-5, m=None):
"""Solves a linear system of equations (Ku = f) using the CGS iterative method and the
Lagrangian multiplier method.
:param k: (N+1) x (N+1) Lagrangian multiplier matrix of the linear system
:type k: :class:`scipy.sparse.csc_matrix`
:param f: N x 1 right hand side of the linear system
:type f: :class:`numpy.ndarray`
:param float tol: Tolerance for the solver to achieve. The algorithm terminates when either
the relative or the absolute residual is below tol.
:param m: Preconditioner for the linear matrix approximating the inverse of k
:type m: :class:`scipy.linalg.LinearOperator`
:return: The solution vector to the linear system of equations
:rtype: :class:`numpy.ndarray`
:raises RuntimeError: If the CGS iterative method does not converge or the error from the
Lagrangian multiplier method exceeds the tolerance
"""
(u, info) = linalg.cgs(k_lg, np.append(f, 0), tol=tol, M=m)
if info != 0:
raise RuntimeError("CGS iterative method did not converge.")
# compute error
err = u[-1] / max(np.absolute(u))
if err > tol:
err = "Lagrangian multiplier method error exceeds tolerance."
raise RuntimeError(err)
return u[:-1]
def solve_direct(k, f):
"""Solves a linear system of equations (Ku = f) using the direct solver method.
:param k: N x N matrix of the linear system
:type k: :class:`scipy.sparse.csc_matrix`
:param f: N x 1 right hand side of the linear system
:type f: :class:`numpy.ndarray`
:return: The solution vector to the linear system of equations
:rtype: :class:`numpy.ndarray`
"""
return spsolve(k, f)
def solve_direct_lagrange(k_lg, f):
"""Solves a linear system of equations (Ku = f) using the direct solver method and the
Lagrangian multiplier method.
:param k: (N+1) x (N+1) Lagrangian multiplier matrix of the linear system
:type k: :class:`scipy.sparse.csc_matrix`
:param f: N x 1 right hand side of the linear system
:type f: :class:`numpy.ndarray`
:return: The solution vector to the linear system of equations
:rtype: :class:`numpy.ndarray`
:raises RuntimeError: If the Lagrangian multiplier method exceeds a tolerance of 1e-5
"""
u = spsolve(k_lg, np.append(f, 0))
# compute error
err = u[-1] / max(np.absolute(u))
if err > 1e-5:
err = "Lagrangian multiplier method error exceeds tolerance of 1e-5."
raise RuntimeError(err)
return u[:-1]
def function_timer(text, function, *args):
"""Displays the message *text* and returns the time taken for a function, with arguments
*args*, to execute. The value returned by the timed function is also returned.
:param string text: Message to display
:param function: Function to time and execute
:type function: function
:param args: Function arguments
:return: Value returned from the function
"""
start_time = time.time()
if text != "":
print(text)
result = function(*args)
if text != "":
print("----completed in {0:.6f} seconds---\n".format(time.time() - start_time))
return result
|
# -*- coding: utf-8 -*-
'''
@Author: Lingyu
@Date: 2021-10-19
@Description:
'''
from .logger import logger,getLogger
from .makeresponse import make_response
from . import hook
def init_app(app):
hook.init_app(app) |
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2016, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains the following classes:
* :class:`~aeneas.ttswrappers.festivalttswrapper.FESTIVALTTSWrapper`,
a wrapper for the ``Festival`` TTS engine.
Please refer to
http://www.cstr.ed.ac.uk/projects/festival/
for further details.
"""
from __future__ import absolute_import
from __future__ import print_function
from aeneas.exacttiming import TimeValue
from aeneas.language import Language
from aeneas.runtimeconfiguration import RuntimeConfiguration
from aeneas.ttswrappers.basettswrapper import BaseTTSWrapper
import aeneas.globalfunctions as gf
class FESTIVALTTSWrapper(BaseTTSWrapper):
"""
A wrapper for the ``Festival`` TTS engine.
This wrapper supports calling the TTS engine
via ``subprocess`` or via Python C++ extension.
.. warning::
The C++ extension call is experimental and
probably works only on Linux at the moment.
In abstract terms, it performs one or more calls like ::
$ echo text | text2wave -eval "(language_italian)" -o output_file.wav
To use this TTS engine, specify ::
"tts=festival"
in the ``RuntimeConfiguration`` object.
To execute from a non-default location: ::
"tts=festival|tts_path=/path/to/wave2text"
See :class:`~aeneas.ttswrappers.basettswrapper.BaseTTSWrapper`
for the available functions.
Below are listed the languages supported by this wrapper.
:param rconf: a runtime configuration
:type rconf: :class:`~aeneas.runtimeconfiguration.RuntimeConfiguration`
:param logger: the logger object
:type logger: :class:`~aeneas.logger.Logger`
"""
CES = Language.CES
""" Czech """
CYM = Language.CYM
""" Welsh """
ENG = Language.ENG
""" English """
FIN = Language.FIN
""" Finnish """
ITA = Language.ITA
""" Italian """
RUS = Language.RUS
""" Russian """
SPA = Language.SPA
""" Spanish """
ENG_GBR = "eng-GBR"
""" English (GB) """
ENG_SCT = "eng-SCT"
""" English (Scotland) """
ENG_USA = "eng-USA"
""" English (USA) """
LANGUAGE_TO_VOICE_CODE = {
CES: CES,
CYM: CYM,
ENG: ENG,
SPA: SPA,
FIN: FIN,
ITA: ITA,
RUS: RUS,
ENG_GBR: ENG_GBR,
ENG_SCT: ENG_SCT,
ENG_USA: ENG_USA,
}
DEFAULT_LANGUAGE = ENG_USA
CODE_TO_HUMAN = {
CES: u"Czech",
CYM: u"Welsh",
ENG: u"English",
FIN: u"Finnish",
ITA: u"Italian",
RUS: u"Russian",
SPA: u"Spanish",
ENG_GBR: u"English (GB)",
ENG_SCT: u"English (Scotland)",
ENG_USA: u"English (USA)",
}
CODE_TO_HUMAN_LIST = sorted([u"%s\t%s" % (k, v) for k, v in CODE_TO_HUMAN.items()])
VOICE_CODE_TO_SUBPROCESS = {
CES: u"(language_czech)",
CYM: u"(language_welsh)",
ENG: u"(language_english)",
ENG_GBR: u"(language_british_english)",
ENG_SCT: u"(language_scots_gaelic)",
ENG_USA: u"(language_american_english)",
SPA: u"(language_castillian_spanish)",
FIN: u"(language_finnish)",
ITA: u"(language_italian)",
RUS: u"(language_russian)",
}
DEFAULT_TTS_PATH = "text2wave"
OUTPUT_AUDIO_FORMAT = ("pcm_s16le", 1, 16000)
HAS_SUBPROCESS_CALL = True
HAS_C_EXTENSION_CALL = True
C_EXTENSION_NAME = "cfw"
TAG = u"FESTIVALTTSWrapper"
def __init__(self, rconf=None, logger=None):
super(FESTIVALTTSWrapper, self).__init__(rconf=rconf, logger=logger)
self.set_subprocess_arguments([
self.tts_path,
self.CLI_PARAMETER_VOICE_CODE_FUNCTION,
u"-o",
self.CLI_PARAMETER_WAVE_PATH,
self.CLI_PARAMETER_TEXT_STDIN
])
def _voice_code_to_subprocess(self, voice_code):
return [u"-eval", self.VOICE_CODE_TO_SUBPROCESS[voice_code]]
def _synthesize_multiple_c_extension(self, text_file, output_file_path, quit_after=None, backwards=False):
"""
Synthesize multiple text fragments, using the cfw extension.
Return a tuple (anchors, total_time, num_chars).
:rtype: (bool, (list, :class:`~aeneas.exacttiming.TimeValue`, int))
"""
self.log(u"Synthesizing using C extension...")
# convert parameters from Python values to C values
try:
c_quit_after = float(quit_after)
except TypeError:
c_quit_after = 0.0
c_backwards = 0
if backwards:
c_backwards = 1
self.log([u"output_file_path: %s", output_file_path])
self.log([u"c_quit_after: %.3f", c_quit_after])
self.log([u"c_backwards: %d", c_backwards])
self.log(u"Preparing u_text...")
u_text = []
fragments = text_file.fragments
for fragment in fragments:
f_lang = fragment.language
f_text = fragment.filtered_text
if f_lang is None:
f_lang = self.DEFAULT_LANGUAGE
f_voice_code = self.VOICE_CODE_TO_SUBPROCESS[self._language_to_voice_code(f_lang)]
if f_text is None:
f_text = u""
u_text.append((f_voice_code, f_text))
self.log(u"Preparing u_text... done")
# call C extension
sr = None
sf = None
intervals = None
self.log(u"Preparing c_text...")
if gf.PY2:
# Python 2 => pass byte strings
c_text = [(gf.safe_bytes(t[0]), gf.safe_bytes(t[1])) for t in u_text]
else:
# Python 3 => pass Unicode strings
c_text = [(gf.safe_unicode(t[0]), gf.safe_unicode(t[1])) for t in u_text]
self.log(u"Preparing c_text... done")
self.log(u"Calling aeneas.cfw directly")
try:
self.log(u"Importing aeneas.cfw...")
import aeneas.cfw.cfw
self.log(u"Importing aeneas.cfw... done")
self.log(u"Calling aeneas.cfw...")
sr, sf, intervals = aeneas.cfw.cfw.synthesize_multiple(
output_file_path,
c_quit_after,
c_backwards,
c_text
)
self.log(u"Calling aeneas.cfw... done")
except Exception as exc:
self.log_exc(u"An unexpected error occurred while running cfw", exc, False, None)
return (False, None)
self.log([u"sr: %d", sr])
self.log([u"sf: %d", sf])
# create output
anchors = []
current_time = TimeValue("0.000")
num_chars = 0
if backwards:
fragments = fragments[::-1]
for i in range(sf):
# get the correct fragment
fragment = fragments[i]
# store for later output
anchors.append([
TimeValue(intervals[i][0]),
fragment.identifier,
fragment.filtered_text
])
# increase the character counter
num_chars += fragment.characters
# update current_time
current_time = TimeValue(intervals[i][1])
# return output
# NOTE anchors do not make sense if backwards == True
self.log([u"Returning %d time anchors", len(anchors)])
self.log([u"Current time %.3f", current_time])
self.log([u"Synthesized %d characters", num_chars])
self.log(u"Synthesizing using C extension... done")
return (True, (anchors, current_time, num_chars))
|
from collections import deque
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
q = deque([s])
seen = set()
while q:
s = q.popleft() # popleft() = BFS ; pop() = DFS
for word in wordDict:
if s.startswith(word):
new_s = s[len(word):]
if new_s == "":
return True
if new_s not in seen:
q.append(new_s)
seen.add(new_s)
return False
"""
139. Word Break
Medium
3420
184
Add to List
Share
Given a non-empty string s and a dictionary wordDict containing a list of non-empty words, determine if s can be segmented into a space-separated sequence of one or more dictionary words.
Note:
The same word in the dictionary may be reused multiple times in the segmentation.
You may assume the dictionary does not contain duplicate words.
Example 1:
Input: s = "leetcode", wordDict = ["leet", "code"]
Output: true
Explanation: Return true because "leetcode" can be segmented as "leet code".
Example 2:
Input: s = "applepenapple", wordDict = ["apple", "pen"]
Output: true
Explanation: Return true because "applepenapple" can be segmented as "apple pen apple".
Note that you are allowed to reuse a dictionary word.
Example 3:
Input: s = "catsandog", wordDict = ["cats", "dog", "sand", "and", "cat"]
Output: false
""" |
from django.test import TestCase
from users.models import CustomUser
from django.urls import reverse
class UserLoginViewTest(TestCase):
def test_login_view_url_exists_at_desired_location(self):
resp = self.client.get('/login/')
self.assertEqual(resp.status_code, 200)
def test_login_view_url_accessible_by_name(self):
resp = self.client.get(reverse('login'))
self.assertEqual(resp.status_code, 200)
def test_login_view_uses_correct_template(self):
resp = self.client.get(reverse('login'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'registration/login.html')
class UserLogoutViewTest(TestCase):
def test_logout_view_url_exists_at_desired_location(self):
resp = self.client.get('/logout/')
self.assertEqual(resp.status_code, 200)
def test_logout_view_url_accessible_by_name(self):
resp = self.client.get(reverse('logout'))
self.assertEqual(resp.status_code, 200)
def test_logout_view_uses_correct_template(self):
resp = self.client.get(reverse('logout'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'registration/logged_out.html')
class UserSignupViewTest(TestCase):
def test_signup_view_url_exists_at_desired_location(self):
resp = self.client.get('/signup/')
self.assertEqual(resp.status_code, 200)
def test_signup_view_url_accessible_by_name(self):
resp = self.client.get(reverse('signup'))
self.assertEqual(resp.status_code, 200)
def test_signup_view_uses_correct_template(self):
resp = self.client.get(reverse('signup'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'registration/signup.html')
class UserResetPasswordViewTest(TestCase):
def test_reset_password_view_url_exists_at_desired_location(self):
resp = self.client.get('/password-reset/')
self.assertEqual(resp.status_code, 200)
def test_reset_password_view_url_accessible_by_name(self):
resp = self.client.get(reverse('password_reset'))
self.assertEqual(resp.status_code, 200)
def test_reset_password_view_uses_correct_template(self):
resp = self.client.get(reverse('password_reset'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'registration/password_reset_form.html')
class UserResetPasswordUsernameDoneViewTest(TestCase):
def test_reset_password_username_view_url_exists_at_desired_location(self):
resp = self.client.get('/password-reset/username/done/')
self.assertEqual(resp.status_code, 200)
def test_reset_password_username_view_url_accessible_by_name(self):
resp = self.client.get(reverse('password_reset_user'))
self.assertEqual(resp.status_code, 200)
def test_reset_password_username_view_uses_correct_template(self):
resp = self.client.get(reverse('password_reset_user'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'registration/password_reset_user_done.html')
class UserResetPasswordEmailDoneViewTest(TestCase):
def test_reset_password_email_view_url_exists_at_desired_location(self):
resp = self.client.get('/password-reset/email/done/')
self.assertEqual(resp.status_code, 200)
def test_reset_password_email_view_url_accessible_by_name(self):
resp = self.client.get(reverse('password_reset_email'))
self.assertEqual(resp.status_code, 200)
def test_reset_password_email_view_uses_correct_template(self):
resp = self.client.get(reverse('password_reset_email'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'registration/password_reset_user_done.html')
class UserResetPasswordPhoneDoneViewTest(TestCase):
def test_reset_password_phone_view_url_exists_at_desired_location(self):
resp = self.client.get('/password-reset/phone/done/')
self.assertEqual(resp.status_code, 200)
def test_reset_password_phone_view_url_accessible_by_name(self):
resp = self.client.get(reverse('password_reset_phone'))
self.assertEqual(resp.status_code, 200)
def test_reset_password_phone_view_uses_correct_template(self):
resp = self.client.get(reverse('password_reset_phone'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'registration/password_reset_phone_done.html')
class UserResetPasswordDoneViewTest(TestCase):
def test_reset_done_view_url_exists_at_desired_location(self):
resp = self.client.get('/reset/done/')
self.assertEqual(resp.status_code, 200)
def test_reset_done_view_url_accessible_by_name(self):
resp = self.client.get(reverse('password_reset_complete'))
self.assertEqual(resp.status_code, 200)
def test_reset_done_view_uses_correct_template(self):
resp = self.client.get(reverse('password_reset_complete'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'registration/password_reset_complete.html')
class UserAccessBannedViewTest(TestCase):
def test_access_banned_view_url_exists_at_desired_location(self):
resp = self.client.get('/login/banned/')
self.assertEqual(resp.status_code, 200)
def test_access_banned_view_url_accessible_by_name(self):
resp = self.client.get(reverse('banned'))
self.assertEqual(resp.status_code, 200)
def test_access_banned_view_uses_correct_template(self):
resp = self.client.get(reverse('banned'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'registration/banned.html')
class UserActivationSendViewTest(TestCase):
def test_activation_sent_view_url_exists_at_desired_location(self):
resp = self.client.get('/account-activation-sent/')
self.assertEqual(resp.status_code, 200)
def test_activation_sent_view_url_accessible_by_name(self):
resp = self.client.get(reverse('account_activation_sent'))
self.assertEqual(resp.status_code, 200)
def test_activation_sent_view_uses_correct_template(self):
resp = self.client.get(reverse('account_activation_sent'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'registration/account_activation_sent.html')
class UserVerificationTokenViewTest(TestCase):
def test_ver_token_view_url_exists_at_desired_location(self):
resp = self.client.get('/verification/token/')
self.assertEqual(resp.status_code, 200)
def test_ver_token_view_url_accessible_by_name(self):
resp = self.client.get(reverse('token_validation'))
self.assertEqual(resp.status_code, 200)
def test_ver_token_view_uses_correct_template(self):
resp = self.client.get(reverse('token_validation'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'registration/token_validation.html')
class UserQuestionResetViewTest(TestCase):
def setUp(self):
test_user1 = CustomUser.objects.create_user(username='testuser1', password='12345')
# test_user1.email = 'test@i.ua'
test_user1.save()
def test_user_exist_view_url_exists_at_desired_location(self):
resp = self.client.get('/password-reset/testuser1/')
self.assertEqual(resp.status_code, 200)
def test_user_not_exist_view_url_exists_at_desired_location(self):
resp = self.client.get('/password-reset/testuser100/')
self.assertEqual(resp.status_code, 302)
class UserIndexPageViewTest(TestCase):
def setUp(self):
test_user1 = CustomUser.objects.create_user(username='testuser1', password='12345')
# test_user1.email = 'test@i.ua'
test_user1.save()
def test_index_view_url_exists_at_desired_location(self):
resp = self.client.get('')
self.assertEqual(resp.status_code, 200)
def test_ver_token_view_url_accessible_by_name(self):
resp = self.client.get(reverse('home'))
self.assertEqual(resp.status_code, 200)
def test_ver_token_view_uses_correct_template(self):
resp = self.client.get(reverse('home'))
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'landing/home.html')
|
"""
https://edabit.com/challenge/76ibd8jZxvhAwDskb
"""
def tallest_skyscraper(lst):
return max(sum(lst[j][i] for j in range(len(lst))) for i in range(len(lst[0])))
print(tallest_skyscraper([
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 1, 1, 1]
]) == 3)
|
""" This module extends the ra.Relation class to allow row_id based get, put
and delete operations of the tuples.
The tuples are stored as a list of list, where the outer list stores a
single list per unique rowid, and the inner list stores multiple versions
of the corresponding rowid.
The most recent version is appended to the end of the list so the most
recent version of tuple with row_id 0 can be accessed using:
self.tuples[0][-1]
Please note that multiple versions are stored only if the Table class
object is instantiated using the use_multiversion option set to True.
The delete operation on any row_id inserts the row_id into the
deleted_row_ids list, which is checked for a free row_id in the insert
operation. During delete operation, the corresponding list holding the
tuple is made empty leaving hole in the self.tuples list. This empty
row is ingored while printing the table.
"""
from ra.relation import Relation
class Table(Relation):
""" Implementation of Table class which extends the existing ra.relation.
Relation class.
Attributes
----------
schema :list
Schema of the table as a list of pair
('attribute_name', attribute_type)
tuples :list
List which maintains multiple tuple versions for every row
deleted_row_ids :list
List of :int rowids whose tuples were deleted.
use_multiversion :bool
Whether to store multiple versions per tuple or not
"""
def __init__(self, name, schema, use_multiversion=False):
""" Instantiates an object of the Table class
Parameters
----------
name :string
Name of the relation or expression the relation object is built
from.
schema :list
Schema of the table as a list of pair
('attribute_name', attribute_type)
use_multiversion :bool
Whether to store multiple versions per tuple or not
"""
super().__init__(name, schema)
self.tuples = []
self.schema = schema
self.deleted_row_ids = []
self.use_multiversion = use_multiversion
def get(self, rowid):
""" Get a tuple with the given row_id
Parameters
----------
rowid :int
rowid of the tuple
Returns
-------
the row as a tuple of attribute values
"""
if rowid < 0 or rowid >= len(self.tuples):
return []
return self.tuples[rowid]
def put(self, row):
""" Insert a row into the table
Parameters
----------
row :list
List of attribute values in a correct order
"""
rowid_index = self.get_attribute_index('row_id')
assert row is not None
assert row[rowid_index] >= 0 and row[rowid_index] < len(self.tuples)
row = tuple(row)
self._check_schema(row)
if not self.use_multiversion:
self.tuples[row[rowid_index]] = [row]
else:
self.tuples[row[rowid_index]].append(row)
def get_next_row_id(self):
""" Get the next free rowid where the row can be inserted
Returns
-------
:int The free rowid where the row can be inserted
"""
if len(self.deleted_row_ids) > 0:
# is there an existing deleted row?
rowid = self.deleted_row_ids.pop()
else:
rowid = len(self.tuples)
# append an empty row
self.tuples.append([])
return rowid
def tuple_to_dict(self, row):
""" Convert the tuple into a dictionary for single version store
Parameters
----------
row :list
list of values in the tuple
Returns
-------
tuple_dictionary :dict
Dictionary with the tuple mapping attribute-name to value
"""
assert len(row) == len(self.attributes)
ret_dict = {}
for i in range(len(self.attributes)):
ret_dict[self.attributes[i]] = row[i]
return ret_dict
def dict_to_tuple(self, update_dict, old_row):
""" Convert the given dictionary into a tuple for single version store
Parameters
----------
update_dict :dict
Dictionary with the tuple mapping attribute-name to value
org_tuple :list
copy of the most recent original tuple
Returns
-------
row :tuple
Properly ordered tuple representation of the given dictionary
"""
old_row = list(old_row)
for attribute_name in update_dict:
if attribute_name in self.attributes:
index = self.get_attribute_index(attribute_name)
old_row[index] = update_dict[attribute_name]
else:
return None
return tuple(old_row)
def delete(self, rowid):
""" Delete the given tuple from the table
Parameters
----------
rowid :int
The rowid which is to be deleted
"""
assert rowid >= 0 and rowid < len(self.tuples)
if rowid not in self.deleted_row_ids:
self.tuples[rowid] = []
def _print(self, limit, tuples, version=False):
""" Prints the given table and its tuples in a tabular layout
Parameters
----------
limit :int
The maximum number of rows to print
version :bool
If the table represents the version table. This adds
additional text to the table name to make it distin-
-guisable from the original table.
"""
assert (limit is None or limit > 0)
# calculate column width for printing
col_width = self._get_col_width()
# relation name bold
if version:
target = '-'*len(self.name) + '\n' \
'\033[1m' + self.name + ' (older_versions)' \
'\033[0m \n'
else:
target = '-'*len(self.name) + '\n' \
'\033[1m' + self.name + '\033[0m \n'
# attribute names bold
target += '-' * max(len(self.name), col_width * len(self.attributes))
target += '\n' + '\033[1m' + ''.join(attr_name.ljust(col_width)
for attr_name in self.attributes)
target += '\033[0m \n'
target += '-' * max(len(self.name), col_width * len(self.attributes))
target += '\n'
# tuples
limitCount = 0
for tup in tuples:
if len(tup) > 0:
target += ''.join(str(attr_val).ljust(col_width)
for attr_val in tup)
target += '\n'
limitCount += 1
if limit is not None and limitCount >= limit:
target += "\nWARNING: skipping "
target += str(len(self.tuples)-limit)
target += " out of " + str(len(tuples)) + " tuples..."
break
print(target)
def print_table(self, limit=None):
""" Print the table, and a secondary version table (if exists)
Parameters
----------
limit :int
The maximum number of rows to print
"""
old_version_tuples = []
most_recent_tuples = []
for row in self.tuples:
if len(row) > 0:
most_recent_tuples.append(row[-1])
for version in row[:-1]:
old_version_tuples.append(version)
self._print(limit, most_recent_tuples, False)
if len(old_version_tuples) > 0:
print('\n\n')
self._print(limit, old_version_tuples, True)
def _get_col_width(self):
""" Computes the maximum column width required to represent the
relation in tabular layout.
Returns
-------
:int The maximum column width required by the table
"""
attr_name_width = max(len(attr_name) for attr_name in self.attributes)
attr_val_width = max((len(str(attr_val))
for tup in self.tuples
for version in tup
for attr_val in version), default=0)
return max(attr_name_width, attr_val_width) + 2 # padding
|
import numpy as np
from scipy.io import loadmat,savemat
import os
import matplotlib.pyplot as plt
Predictions = os.listdir("Allmat/")
def perf_measure(y_actual, y_pred):
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(0,len(y_pred)):
# print(i)
if y_actual[i]==y_pred[i]==1:
TP += 1
if y_pred[i]==1 and y_actual[i]!=y_pred[i]:
FP += 1
if y_actual[i]==y_pred[i]==0:
TN += 1
if y_pred[i]==0 and y_actual[i]!=y_pred[i]:
FN += 1
return(TP, FP, TN, FN)
TPR = list()
FPR = list()
for thr in range(1,9000,50):
print(thr)
thr = thr*0.00005
GTP = 0
GFP = 0
GTN = 0
GFN = 0
for prediction in Predictions:
GT = loadmat("Allmat/" + prediction)['truth'][0]
NT = loadmat("../Eval_Res/" + prediction)['prediction']
FT = np.zeros((1,len(GT)))
d_len = int(len(GT)/32)
for i in range(len(NT)):
FT[0,1*d_len*(i-1):d_len*i] = NT[i]
if i == len(NT)-1:
FT[0,1*d_len*(i-1):-1] = NT[i]
# print((FT.shape[1]),len(GT))
for i in range(0,FT.shape[1]):
if FT[0,i] >= thr:
FT[0,i] = 1
else:
FT[0,i] = 0
# print(GT,FT)
TP,FP,TN,FN = perf_measure(GT,FT[0,:])
GTP+=TP
GFP+=FP
GTN+=TN
GFN+=FN
TPR.append(GTP/(GTP+GFN))
FPR.append(GFP/(GFP+GTN))
print(TPR,FPR)
plt.plot(np.array(FPR),np.array(TPR))
plt.show()
|
import random
def getrandarray(origsize , number):
listoflist = []
while number > 0:
print number
size = origsize
arrayList = []
while size > 0:
cnum = getrand(1,34)
if cnum not in arrayList:
arrayList.append(cnum)
size = size - 1
arrayList.sort()
if not listcontain(listoflist , arrayList):
listoflist.append(arrayList)
number = number - 1
return listoflist
def listcontain(listoflist, list):
if len(listoflist) == 0:
return False
for i in listoflist:
i.sort()
if i == list:
return True
difs = 0
for j in i:
if j in list:
difs = difs +1
if difs > 4:
return True
return False
def getrand(a,b):
return random.randint(a,b)
def main():
for i in getrandarray(5, 10):
print i
if __name__ == '__main__':
main()
|
"""
Реализовать формирование списка, используя функцию range() и возможности генератора.
В список должны войти четные числа от 100 до 1000 (включая границы). Необходимо
получить результат вычисления произведения всех элементов списка.
Подсказка: использовать функцию reduce().
"""
from functools import reduce
print(reduce(lambda a, b: a * b, [x for x in range(100, 1001, 2)])) |
#Find non repeatative word from a string
from codecs import StreamReader
from collections import Counter
def nonrepat(string):
for i in string:
#get the frquency
freq=Counter(string)
print (f" \n Strings is {i} and count is {freq} \n ")
if freq[i] == 1 :
print(f"The first char is : {i}")
break
if __name__ == '__main__':
string="samar"
nonrepat(string) |
#!/usr/bin/env python
"""Tests for an ApiCallRouterWithChecks."""
import mock
from grr import config
from grr_api_client import errors as grr_api_errors
from grr.gui import api_auth_manager
from grr.gui import api_call_handler_base
from grr.gui import api_call_router_with_approval_checks as api_router
from grr.gui import http_api_e2e_test
from grr.gui.api_plugins import client as api_client
from grr.gui.api_plugins import cron as api_cron
from grr.gui.api_plugins import flow as api_flow
from grr.gui.api_plugins import hunt as api_hunt
from grr.gui.api_plugins import user as api_user
from grr.gui.api_plugins import vfs as api_vfs
from grr.lib import flags
from grr.lib import utils
from grr.server import access_control
from grr.server import aff4
from grr.server.aff4_objects import security
from grr.server.aff4_objects import user_managers_test
from grr.server.hunts import implementation
from grr.server.hunts import standard
from grr.server.hunts import standard_test
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
class ApiCallRouterWithApprovalChecksTest(test_lib.GRRBaseTest,
standard_test.StandardHuntTestMixin):
"""Tests for an ApiCallRouterWithApprovalChecks."""
# ACCESS_CHECKED_METHODS is used to identify the methods that are tested
# for being checked for necessary access rights. This list is used
# in testAllOtherMethodsAreNotAccessChecked.
ACCESS_CHECKED_METHODS = []
def setUp(self):
super(ApiCallRouterWithApprovalChecksTest, self).setUp()
self.client_id = test_lib.TEST_CLIENT_ID
self.delegate_mock = mock.MagicMock()
self.legacy_manager_mock = mock.MagicMock()
self.router = api_router.ApiCallRouterWithApprovalChecks(
delegate=self.delegate_mock, legacy_manager=self.legacy_manager_mock)
def CheckMethodIsAccessChecked(self,
method,
access_type,
args=None,
token=None):
token = token or self.token
# Check that legacy access control manager is called and that the method
# is then delegated.
method(args, token=token)
self.assertTrue(getattr(self.legacy_manager_mock, access_type).called)
getattr(self.delegate_mock, method.__name__).assert_called_with(
args, token=token)
self.delegate_mock.reset_mock()
self.legacy_manager_mock.reset_mock()
try:
# Check that when exception is raised by legacy manager, the delegate
# method is not called.
getattr(self.legacy_manager_mock,
access_type).side_effect = access_control.UnauthorizedAccess("")
with self.assertRaises(access_control.UnauthorizedAccess):
method(args, token=token)
self.assertTrue(getattr(self.legacy_manager_mock, access_type).called)
self.assertFalse(getattr(self.delegate_mock, method.__name__).called)
finally:
getattr(self.legacy_manager_mock, access_type).side_effect = None
self.delegate_mock.reset_mock()
self.legacy_manager_mock.reset_mock()
def CheckMethodIsNotAccessChecked(self, method, args=None, token=None):
token = token or self.token
method(args, token=token)
self.assertFalse(self.legacy_manager_mock.CheckClientAccess.called)
self.assertFalse(self.legacy_manager_mock.CheckHuntAccess.called)
self.assertFalse(self.legacy_manager_mock.CheckCronJob.called)
self.assertFalse(self.legacy_manager_mock.CheckIfCanStartFlow.called)
self.assertFalse(self.legacy_manager_mock.CheckDataStoreAccess.called)
getattr(self.delegate_mock, method.__name__).assert_called_with(
args, token=token)
self.delegate_mock.reset_mock()
self.legacy_manager_mock.reset_mock()
ACCESS_CHECKED_METHODS.extend([
"InterrogateClient",
"ListClientCrashes",
"ListClientActionRequests",
"GetClientLoadStats"]) # pyformat: disable
def testClientMethodsAreAccessChecked(self):
args = api_client.ApiInterrogateClientArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.InterrogateClient, "CheckClientAccess", args=args)
args = api_client.ApiListClientCrashesArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.ListClientCrashes, "CheckClientAccess", args=args)
args = api_client.ApiGetClientLoadStatsArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetClientLoadStats, "CheckClientAccess", args=args)
ACCESS_CHECKED_METHODS.extend([
"ListFiles",
"GetVfsFilesArchive",
"GetFileDetails",
"GetFileText",
"GetFileBlob",
"GetFileVersionTimes",
"GetFileDownloadCommand",
"CreateVfsRefreshOperation",
"GetVfsTimeline",
"GetVfsTimelineAsCsv",
"UpdateVfsFileContent"
]) # pyformat: disable
def testVfsMethodsAreAccessChecked(self):
args = api_vfs.ApiListFilesArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.ListFiles, "CheckClientAccess", args=args)
args = api_vfs.ApiGetVfsFilesArchiveArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetVfsFilesArchive, "CheckClientAccess", args=args)
args = api_vfs.ApiGetFileDetailsArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetFileDetails, "CheckClientAccess", args=args)
args = api_vfs.ApiGetFileTextArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetFileText, "CheckClientAccess", args=args)
args = api_vfs.ApiGetFileBlobArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetFileBlob, "CheckClientAccess", args=args)
args = api_vfs.ApiGetFileVersionTimesArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetFileVersionTimes, "CheckClientAccess", args=args)
args = api_vfs.ApiGetFileDownloadCommandArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetFileDownloadCommand, "CheckClientAccess", args=args)
args = api_vfs.ApiCreateVfsRefreshOperationArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.CreateVfsRefreshOperation, "CheckClientAccess", args=args)
args = api_vfs.ApiGetVfsTimelineArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetVfsTimeline, "CheckClientAccess", args=args)
args = api_vfs.ApiGetVfsTimelineAsCsvArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetVfsTimelineAsCsv, "CheckClientAccess", args=args)
args = api_vfs.ApiUpdateVfsFileContentArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.UpdateVfsFileContent, "CheckClientAccess", args=args)
ACCESS_CHECKED_METHODS.extend([
"ListFlows",
"GetFlow",
"CreateFlow",
"CancelFlow",
"ListFlowRequests",
"ListFlowResults",
"GetExportedFlowResults",
"GetFlowResultsExportCommand",
"GetFlowFilesArchive",
"ListFlowOutputPlugins",
"ListFlowOutputPluginLogs",
"ListFlowOutputPluginErrors",
"ListFlowLogs"
]) # pyformat: disable
def testAllClientFlowsMethodsAreAccessChecked(self):
args = api_flow.ApiListFlowsArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.ListFlows, "CheckClientAccess", args=args)
args = api_flow.ApiGetFlowArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetFlow, "CheckClientAccess", args=args)
args = api_flow.ApiCreateFlowArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.CreateFlow, "CheckClientAccess", args=args)
self.CheckMethodIsAccessChecked(
self.router.CreateFlow, "CheckIfCanStartFlow", args=args)
args = api_flow.ApiCancelFlowArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.CancelFlow, "CheckClientAccess", args=args)
args = api_flow.ApiListFlowRequestsArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.ListFlowRequests, "CheckClientAccess", args=args)
args = api_flow.ApiListFlowResultsArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.ListFlowResults, "CheckClientAccess", args=args)
args = api_flow.ApiGetExportedFlowResultsArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetExportedFlowResults, "CheckClientAccess", args=args)
args = api_flow.ApiGetFlowResultsExportCommandArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetFlowResultsExportCommand, "CheckClientAccess", args=args)
args = api_flow.ApiGetFlowFilesArchiveArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.GetFlowFilesArchive, "CheckClientAccess", args=args)
args = api_flow.ApiListFlowOutputPluginsArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.ListFlowOutputPlugins, "CheckClientAccess", args=args)
args = api_flow.ApiListFlowOutputPluginLogsArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.ListFlowOutputPluginLogs, "CheckClientAccess", args=args)
args = api_flow.ApiListFlowOutputPluginErrorsArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.ListFlowOutputPluginErrors, "CheckClientAccess", args=args)
args = api_flow.ApiListFlowLogsArgs(client_id=self.client_id)
self.CheckMethodIsAccessChecked(
self.router.ListFlowLogs, "CheckClientAccess", args=args)
ACCESS_CHECKED_METHODS.extend([
"ForceRunCronJob",
"ModifyCronJob",
"DeleteCronJob"]) # pyformat: disable
def testCronJobMethodsAreAccessChecked(self):
args = api_cron.ApiForceRunCronJobArgs(cron_job_id="TestCronJob")
self.CheckMethodIsAccessChecked(
self.router.ForceRunCronJob, "CheckCronJobAccess", args=args)
args = api_cron.ApiModifyCronJobArgs(cron_job_id="TestCronJob")
self.CheckMethodIsAccessChecked(
self.router.ModifyCronJob, "CheckCronJobAccess", args=args)
args = api_cron.ApiDeleteCronJobArgs(cron_job_id="TestCronJob")
self.CheckMethodIsAccessChecked(
self.router.DeleteCronJob, "CheckCronJobAccess", args=args)
ACCESS_CHECKED_METHODS.extend([
"ModifyHunt",
"DeleteHunt",
"GetHuntFilesArchive",
"GetHuntFile"]) # pyformat: disable
def testModifyHuntIsAccessChecked(self):
args = api_hunt.ApiModifyHuntArgs(hunt_id="H:123456")
self.CheckMethodIsAccessChecked(
self.router.ModifyHunt, "CheckHuntAccess", args=args)
def testDeleteHuntRaisesIfHuntNotFound(self):
args = api_hunt.ApiDeleteHuntArgs(hunt_id="H:123456")
with self.assertRaises(api_call_handler_base.ResourceNotFoundError):
self.router.DeleteHunt(args, token=self.token)
def testDeleteHuntIsAccessCheckedIfUserIsNotCreator(self):
hunt = self.CreateHunt()
args = api_hunt.ApiDeleteHuntArgs(hunt_id=hunt.urn.Basename())
self.CheckMethodIsAccessChecked(
self.router.DeleteHunt,
"CheckHuntAccess",
args=args,
token=access_control.ACLToken(username="foo"))
def testDeleteHuntIsNotAccessCheckedIfUserIsCreator(self):
hunt = self.CreateHunt()
args = api_hunt.ApiDeleteHuntArgs(hunt_id=hunt.urn.Basename())
self.CheckMethodIsNotAccessChecked(self.router.DeleteHunt, args=args)
def testGetHuntFilesArchiveIsAccessChecked(self):
args = api_hunt.ApiGetHuntFilesArchiveArgs(hunt_id="H:123456")
self.CheckMethodIsAccessChecked(
self.router.GetHuntFilesArchive, "CheckHuntAccess", args=args)
def testGetHuntFileIsAccessChecked(self):
args = api_hunt.ApiGetHuntFileArgs(hunt_id="H:123456")
self.CheckMethodIsAccessChecked(
self.router.GetHuntFilesArchive, "CheckHuntAccess", args=args)
ACCESS_CHECKED_METHODS.extend([
"ListGrrBinaries",
"GetGrrBinary"]) # pyformat: disable
def testListGrrBinariesIsAccessChecked(self):
with self.assertRaises(access_control.UnauthorizedAccess):
self.router.ListGrrBinaries(None, token=self.token)
self.CreateAdminUser(self.token.username)
self.router.ListGrrBinaries(None, token=self.token)
def testGetGrrBinaryIsAccessChecked(self):
with self.assertRaises(access_control.UnauthorizedAccess):
self.router.GetGrrBinary(None, token=self.token)
self.CreateAdminUser(self.token.username)
self.router.GetGrrBinary(None, token=self.token)
ACCESS_CHECKED_METHODS.extend([
"ListFlowDescriptors",
])
def testListFlowDescriptorsIsAccessChecked(self):
handler = self.router.ListFlowDescriptors(None, token=self.token)
# Check that correct security manager got passed into the handler.
self.assertEqual(handler.legacy_security_manager,
self.router.legacy_manager)
ACCESS_CHECKED_METHODS.extend([
"GetGrrUser"]) # pyformat: disable
def testGetGrrUserReturnsFullTraitsForAdminUser(self):
self.CreateAdminUser(self.token.username)
handler = self.router.GetGrrUser(None, token=self.token)
self.assertEqual(handler.interface_traits,
api_user.ApiGrrUserInterfaceTraits().EnableAll())
def testGetGrrUserReturnsRestrictedTraitsForNonAdminUser(self):
handler = self.router.GetGrrUser(None, token=self.token)
self.assertNotEqual(handler.interface_traits,
api_user.ApiGrrUserInterfaceTraits().EnableAll())
def testAllOtherMethodsAreNotAccessChecked(self):
unchecked_methods = (
set(self.router.__class__.GetAnnotatedMethods().keys()) -
set(self.ACCESS_CHECKED_METHODS))
self.assertTrue(unchecked_methods)
for method_name in unchecked_methods:
self.CheckMethodIsNotAccessChecked(getattr(self.router, method_name))
class ApiCallRouterWithApprovalChecksE2ETest(http_api_e2e_test.ApiE2ETest):
def setUp(self):
super(ApiCallRouterWithApprovalChecksE2ETest, self).setUp()
self.config_overrider = test_lib.ConfigOverrider({
"API.DefaultRouter": api_router.ApiCallRouterWithApprovalChecks.__name__
})
self.config_overrider.Start()
# Force creation of new APIAuthorizationManager, so that configuration
# changes are picked up.
api_auth_manager.APIACLInit.InitApiAuthManager()
def tearDown(self):
super(ApiCallRouterWithApprovalChecksE2ETest, self).tearDown()
self.config_overrider.Stop()
def ClearCache(self):
api_router.ApiCallRouterWithApprovalChecks.ClearCache()
api_auth_manager.APIACLInit.InitApiAuthManager()
def RevokeClientApproval(self, approval_urn, token, remove_from_cache=True):
with aff4.FACTORY.Open(
approval_urn, mode="rw", token=self.token.SetUID()) as approval_request:
approval_request.DeleteAttribute(approval_request.Schema.APPROVER)
if remove_from_cache:
self.ClearCache()
def CreateHuntApproval(self, hunt_urn, token, admin=False):
approval_urn = aff4.ROOT_URN.Add("ACL").Add(hunt_urn.Path()).Add(
token.username).Add(utils.EncodeReasonString(token.reason))
with aff4.FACTORY.Create(
approval_urn,
security.HuntApproval,
mode="rw",
token=self.token.SetUID()) as approval_request:
approval_request.AddAttribute(
approval_request.Schema.APPROVER("Approver1"))
approval_request.AddAttribute(
approval_request.Schema.APPROVER("Approver2"))
if admin:
self.CreateAdminUser("Approver1")
def CreateSampleHunt(self):
"""Creats SampleHunt, writes it to the data store and returns it's id."""
with implementation.GRRHunt.StartHunt(
hunt_name=standard.SampleHunt.__name__,
token=self.token.SetUID()) as hunt:
return hunt.session_id
def testSimpleUnauthorizedAccess(self):
"""Tests that simple access requires a token."""
client_id = "C.%016X" % 0
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(client_id).File("fs/os/foo").Get)
def testApprovalExpiry(self):
"""Tests that approvals expire after the correct time."""
client_id = "C.%016X" % 0
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(client_id).File("fs/os/foo").Get)
with test_lib.FakeTime(100.0, increment=1e-3):
self.RequestAndGrantClientApproval(client_id, self.token)
# This should work now.
self.api.Client(client_id).File("fs/os/foo").Get()
token_expiry = config.CONFIG["ACL.token_expiry"]
# Make sure the caches are reset.
self.ClearCache()
# This is close to expiry but should still work.
with test_lib.FakeTime(100.0 + token_expiry - 100.0):
self.api.Client(client_id).File("fs/os/foo").Get()
# Make sure the caches are reset.
self.ClearCache()
# Past expiry, should fail.
with test_lib.FakeTime(100.0 + token_expiry + 100.0):
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(client_id).File("fs/os/foo").Get)
def testClientApproval(self):
"""Tests that we can create an approval object to access clients."""
client_id = "C.%016X" % 0
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(client_id).File("fs/os/foo").Get)
approval_urn = self.RequestAndGrantClientApproval(client_id, self.token)
self.api.Client(client_id).File("fs/os/foo").Get()
self.RevokeClientApproval(approval_urn, self.token)
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(client_id).File("fs/os/foo").Get)
def testHuntApproval(self):
"""Tests that we can create an approval object to run hunts."""
hunt_urn = self.CreateSampleHunt()
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Hunt(hunt_urn.Basename()).Start)
self.CreateHuntApproval(hunt_urn, self.token, admin=False)
self.assertRaisesRegexp(
grr_api_errors.AccessForbiddenError,
"Need at least 1 additional approver with the 'admin' label for access",
self.api.Hunt(hunt_urn.Basename()).Start)
self.CreateHuntApproval(hunt_urn, self.token, admin=True)
self.api.Hunt(hunt_urn.Basename()).Start()
def testFlowAccess(self):
"""Tests access to flows."""
client_id = "C." + "a" * 16
self.assertRaises(
grr_api_errors.AccessForbiddenError,
self.api.Client(client_id).CreateFlow,
name=flow_test_lib.SendingFlow.__name__)
approval_urn = self.RequestAndGrantClientApproval(client_id, self.token)
f = self.api.Client(client_id).CreateFlow(
name=flow_test_lib.SendingFlow.__name__)
self.RevokeClientApproval(approval_urn, self.token)
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(client_id).Flow(f.flow_id).Get)
self.RequestAndGrantClientApproval(client_id, self.token)
self.api.Client(client_id).Flow(f.flow_id).Get()
def testCaches(self):
"""Makes sure that results are cached in the security manager."""
client_id = "C." + "b" * 16
approval_urn = self.RequestAndGrantClientApproval(client_id, self.token)
f = self.api.Client(client_id).CreateFlow(
name=flow_test_lib.SendingFlow.__name__)
# Remove the approval from the data store, but it should still exist in the
# security manager cache.
self.RevokeClientApproval(approval_urn, self.token, remove_from_cache=False)
# If this doesn't raise now, all answers were cached.
self.api.Client(client_id).Flow(f.flow_id).Get()
self.ClearCache()
# This must raise now.
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(client_id).Flow(f.flow_id).Get)
def testNonAdminsCanNotStartAdminOnlyFlow(self):
client_id = self.SetupClient(0).Basename()
self.RequestAndGrantClientApproval(client_id, token=self.token)
with self.assertRaises(grr_api_errors.AccessForbiddenError):
self.api.Client(client_id).CreateFlow(
name=user_managers_test.AdminOnlyFlow.__name__)
def testAdminsCanStartAdminOnlyFlow(self):
client_id = self.SetupClient(0).Basename()
self.CreateAdminUser(self.token.username)
self.RequestAndGrantClientApproval(client_id, token=self.token)
self.api.Client(client_id).CreateFlow(
name=user_managers_test.AdminOnlyFlow.__name__)
def testClientFlowWithoutCategoryCanNotBeStartedWithClient(self):
client_id = self.SetupClient(0).Basename()
self.RequestAndGrantClientApproval(client_id, token=self.token)
with self.assertRaises(grr_api_errors.AccessForbiddenError):
self.api.Client(client_id).CreateFlow(
name=user_managers_test.ClientFlowWithoutCategory.__name__)
def testClientFlowWithCategoryCanBeStartedWithClient(self):
client_id = self.SetupClient(0).Basename()
self.RequestAndGrantClientApproval(client_id, token=self.token)
self.api.Client(client_id).CreateFlow(
name=user_managers_test.ClientFlowWithCategory.__name__)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
import os
import pytest
import copy
from microsim.microsim_model import Microsim
from microsim.column_names import ColumnNames
from microsim.population_initialisation import PopulationInitialisation
import multiprocessing
# ********************************************************
# These tests run through a whole dummy model process
# ********************************************************
from quant_api import QuantRampAPI
test_dir = os.path.dirname(os.path.abspath(__file__))
dummy_data_dir = os.path.join(test_dir, "dummy_data")
# arguments used when calling the PopulationInitialisation constructor.
population_init_args = {"data_dir": dummy_data_dir,
"testing": True, "debug": True,
"quant_object": QuantRampAPI(os.path.join(dummy_data_dir, "QUANT_RAMP"))
}
# arguments used when calling the Microsim constructor.
microsim_args = {"data_dir": os.path.join(test_dir, "dummy_data"),
"r_script_dir": os.path.normpath(os.path.join(test_dir, "..", "R/py_int")),
"disable_disease_status": True}
# This 'fixture' means that other test functions can use the object created here.
# Note: Don't try to run this test, it will be called when running the others that need it,
# like `test_step()`.
@pytest.fixture()
def test_microsim():
population_init = PopulationInitialisation(**population_init_args)
microsim = Microsim(
individuals=population_init.individuals,
activity_locations=population_init.activity_locations,
time_activity_multiplier=None,
**microsim_args)
yield microsim
def test_change_behaviour_with_disease(test_microsim):
"""Check that individuals behaviour changed correctly with the disease status"""
m = copy.deepcopy(test_microsim) # less typing and so as not to interfere with other tests
# Give some people the disease (these two chosen because they both spend a bit of time in retail
p1 = 1
p2 = 6
m.individuals.loc[p1, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SYMPTOMATIC # Behaviour change
m.individuals.loc[p2, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.PRESYMPTOMATIC # No change
m.step()
m.change_behaviour_with_disease() # (this isn't called by default when testing)
# Nothing should have happened as we hadn't indicated a change in disease status
for p, act in zip([p1, p1, p2, p2], [ColumnNames.Activities.HOME, ColumnNames.Activities.RETAIL,
ColumnNames.Activities.HOME, ColumnNames.Activities.RETAIL]):
assert m.individuals.loc[p, f"{act}{ColumnNames.ACTIVITY_DURATION}"] == \
m.individuals.loc[p, f"{act}{ColumnNames.ACTIVITY_DURATION_INITIAL}"]
# Mark behaviour changed then try again
m.individuals.loc[p1, ColumnNames.DISEASE_STATUS_CHANGED] = True
m.individuals.loc[p2, ColumnNames.DISEASE_STATUS_CHANGED] = True
m.step()
m.change_behaviour_with_disease() # (this isn't called by default when testing)
# First person should spend more time at home and less at work
assert m.individuals.loc[p1, f"{ColumnNames.Activities.RETAIL}{ColumnNames.ACTIVITY_DURATION}"] < m.individuals.loc[
p1, f"{ColumnNames.Activities.RETAIL}{ColumnNames.ACTIVITY_DURATION_INITIAL}"]
assert m.individuals.loc[p1, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION}"] > m.individuals.loc[
p1, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION_INITIAL}"]
# Second person should be unchanged
assert m.individuals.loc[p2, f"{ColumnNames.Activities.RETAIL}{ColumnNames.ACTIVITY_DURATION}"] == m.individuals.loc[
p2, f"{ColumnNames.Activities.RETAIL}{ColumnNames.ACTIVITY_DURATION_INITIAL}"]
assert m.individuals.loc[p2, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION}"] == m.individuals.loc[
p2, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION_INITIAL}"]
# Mark behaviour changed then try again
m.individuals.loc[p1, ColumnNames.DISEASE_STATUS_CHANGED] = True
m.individuals.loc[p2, ColumnNames.DISEASE_STATUS_CHANGED] = True
m.step()
m.change_behaviour_with_disease() # (this isn't called by default when testing)
# First person should spend more time at home and less at work
assert m.individuals.loc[p1, f"{ColumnNames.Activities.RETAIL}{ColumnNames.ACTIVITY_DURATION}"] < m.individuals.loc[
p1, f"{ColumnNames.Activities.RETAIL}{ColumnNames.ACTIVITY_DURATION_INITIAL}"]
assert m.individuals.loc[p1, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION}"] > m.individuals.loc[
p1, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION_INITIAL}"]
# Second person should be unchanged
assert m.individuals.loc[p2, f"{ColumnNames.Activities.RETAIL}{ColumnNames.ACTIVITY_DURATION}"] == m.individuals.loc[
p2, f"{ColumnNames.Activities.RETAIL}{ColumnNames.ACTIVITY_DURATION_INITIAL}"]
assert m.individuals.loc[p2, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION}"] == m.individuals.loc[
p2, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION_INITIAL}"]
# First person no longer infectious, behaviour should go back to normal
m.individuals.loc[p1, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.RECOVERED
m.step()
m.change_behaviour_with_disease() # (this isn't called by default when testing)
assert m.individuals.loc[p1, f"{ColumnNames.Activities.RETAIL}{ColumnNames.ACTIVITY_DURATION}"] == m.individuals.loc[
p1, f"{ColumnNames.Activities.RETAIL}{ColumnNames.ACTIVITY_DURATION_INITIAL}"]
assert m.individuals.loc[p1, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION}"] == m.individuals.loc[
p1, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION_INITIAL}"]
def test_update_venue_danger_and_risks(test_microsim):
"""Check that the current risk is updated properly"""
# This is actually tested as part of test_step
assert True
def test_hazard_multipliers(test_microsim):
"""
This tests whether hazards for particular disease statuses or locations are multiplied properly.
The relevant code is in update_venue_danger_and_risks().
:param test_microsim: This is a pointer to the initialised model. Dummy data will have been read in,
but no stepping has taken place yet."""
m = copy.deepcopy(test_microsim) # For less typing and so as not to interfere with other functions use microsim
households = m.activity_locations[f"{ColumnNames.Activities.HOME}"]._locations
# Note: the following is a useful way to get relevant info about the individuals
# m.individuals.loc[:, ["ID", "PID", "HID", "area", ColumnNames.DISEASE_STATUS, "MSOA_Cases", "HID_Cases"]]
# Set the hazard-related parameters.
# As we don't specify them when the tests are set up, they should be empty dictionaries
assert not m.hazard_location_multipliers
assert not m.hazard_individual_multipliers
# Manually create some hazards for individuals and locations as per the parameters file
m.hazard_individual_multipliers["presymptomatic"] = 1.0
m.hazard_individual_multipliers["asymptomatic"] = 2.0
m.hazard_individual_multipliers["symptomatic"] = 3.0
for act in ColumnNames.Activities.ALL:
m.hazard_location_multipliers[act] = 1.0
# Step 0 (initialisation):
# Everyone should start without the disease (they will have been assigned a status as part of initialisation)
m.individuals[ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SUSCEPTIBLE
#
# Person 1: lives with one other person (p2). Both people spend all their time at home doing nothing else
#
p1 = 0
p2 = 1
m.individuals.loc[p1, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.PRESYMPTOMATIC # Give p1 the disease
for p in [p1, p2]: # Set their activity durations to 0 except for home
for name, activity in m.activity_locations.items():
m.individuals.at[p, f"{name}{ColumnNames.ACTIVITY_DURATION}"] = 0.0
m.individuals.at[p, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION}"] = 1.0
m.step()
# Check the disease has spread to the house with a multiplier of 1.0, but nowhere else
_check_hazard_spread(p1, p2, m.individuals, households, 1.0)
# If the person is asymptomatic, we said the hazard should be doubled, so the risk should be doubled
m.individuals.loc[p1, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.ASYMPTOMATIC # Give p1 the disease
m.individuals.loc[p2, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SUSCEPTIBLE # Make sure p2 is clean
m.step()
_check_hazard_spread(p1, p2, m.individuals, households, 2.0)
# And for symptomatic we said 3.0
m.individuals.loc[p1, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SYMPTOMATIC # Give p1 the disease
m.individuals.loc[p2, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SUSCEPTIBLE # Make sure p2 is clean
m.step()
_check_hazard_spread(p1, p2, m.individuals, households, 3.0)
# But if they both get sick then double danger and risk)
m.individuals.loc[p1, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SYMPTOMATIC
m.individuals.loc[p2, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SYMPTOMATIC
m.step()
_check_hazard_spread(p1, p2, m.individuals, households, 6.0)
#
# Now see if the hazards for locations work. Check houses and schools
#
# Both people are symptomatic. And double the hazard for home. So in total the new risk should
# be 3 * 2 * 5 = 30
m.individuals.loc[p1, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SYMPTOMATIC
m.individuals.loc[p2, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SYMPTOMATIC
m.hazard_location_multipliers[ColumnNames.Activities.HOME] = 5.0
m.step()
_check_hazard_spread(p1, p2, m.individuals, households, 30.0)
# Check for school as well. Now give durations for home and school as 0.5. Make them asymptomatic so the additional
# hazard is 2.0 (set above). And make the risks for home 5.35 and for school 2.9.
# Make sure all *other* individuals go to a different school (school 1), then make p1 and p2 go to the same school
# (school 0) below
# (annoying apply is because pandas doesn't like a list being assigned to a value in a cell)
m.individuals[f"{ColumnNames.Activities.PRIMARY}{ColumnNames.ACTIVITY_VENUES}"] = \
m.individuals.loc[:, f"{ColumnNames.Activities.PRIMARY}{ColumnNames.ACTIVITY_VENUES}"].apply(lambda x: [1])
m.individuals.loc[[p1, p2], f"{ColumnNames.Activities.PRIMARY}{ColumnNames.ACTIVITY_VENUES}"] = \
m.individuals.loc[[p1, p2], f"{ColumnNames.Activities.PRIMARY}{ColumnNames.ACTIVITY_VENUES}"].apply(lambda x: [0])
# All school flows need to be 1 (don't want the people to go to more than 1 school
m.individuals[f"{ColumnNames.Activities.PRIMARY}{ColumnNames.ACTIVITY_FLOWS}"] = \
m.individuals.loc[:, f"{ColumnNames.Activities.PRIMARY}{ColumnNames.ACTIVITY_VENUES}"].apply(lambda x: [1.0])
for p in [p1, p2]: # Set their activity durations to 0.5 for home and school
for name, activity in m.activity_locations.items():
m.individuals.at[p, f"{name}{ColumnNames.ACTIVITY_DURATION}"] = 0.0
m.individuals.at[p, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION}"] = 0.5
m.individuals.at[p, f"{ColumnNames.Activities.PRIMARY}{ColumnNames.ACTIVITY_DURATION}"] = 0.5
# Make them asymptomatic
m.individuals.loc[p1, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.ASYMPTOMATIC
m.individuals.loc[p2, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.ASYMPTOMATIC
# Set hazards for home and school
m.hazard_location_multipliers[ColumnNames.Activities.HOME] = 5.35
m.hazard_location_multipliers[ColumnNames.Activities.PRIMARY] = 2.9
m.step()
# Can't use _check_hazard_spread because it assumes only one activity (HOME)
# Current risks are:
# For home. 2 people * 2.0 asymptomatic hazard * 0.5 duration * 5.35 HOME risk = 10.7
# For school. 2 people * 2.0 asymptomatic hazard * 0.5 duration * 2.9 PRIMARY risk = 5.8
# Total risk for individuals: 10.7*0.5 + 5.8*0.5 = 8.25
# Individuals
for p in [p1, p2]:
assert m.individuals.at[p, ColumnNames.CURRENT_RISK] == 8.25
for p in range(2, len(m.individuals)):
assert m.individuals.at[p, ColumnNames.CURRENT_RISK] == 0.0
# Households
assert households.at[0, ColumnNames.LOCATION_DANGER] == 10.7
# (the self.households dataframe should be the same as the one stored in the activity_locations)
assert m.activity_locations[ColumnNames.Activities.HOME]._locations.at[0, ColumnNames.LOCATION_DANGER] == 10.7
for h in range(1, len(households)): # all others are 0
assert households.at[h, ColumnNames.LOCATION_DANGER] == 0.0
# Schools
assert m.activity_locations[ColumnNames.Activities.PRIMARY]._locations.at[0, ColumnNames.LOCATION_DANGER] == 5.8
for h in range(1, len( m.activity_locations[ColumnNames.Activities.PRIMARY]._locations)): # all others are 0
assert m.activity_locations[ColumnNames.Activities.PRIMARY]._locations.at[h, ColumnNames.LOCATION_DANGER] == 0.0
print("End of test hazard multipliers")
def _check_hazard_spread(p1, p2, individuals, households, risk):
"""Checks how the disease is spreading. To save code repetition in test_hazard_multipliers"""
for p in [p1, p2]:
assert individuals.at[p, ColumnNames.CURRENT_RISK] == risk
for p in range(2, len(individuals)):
assert individuals.at[p, ColumnNames.CURRENT_RISK] == 0.0
assert households.at[0, ColumnNames.LOCATION_DANGER] == risk
for h in range(1, len(households)): # all others are 0
assert households.at[h, ColumnNames.LOCATION_DANGER] == 0.0
def test_step(test_microsim):
"""
Test the step method. This is the main test of the model. Simulate a deterministic run through and
make sure that the model runs as expected.
Only thing it doesn't do is check for retail, shopping, etc., that danger and risk increase by the correct
amount. It just checks they go above 0 (or not). It does do that more precise checks for home activities though.
:param test_microsim: This is a pointer to the initialised model. Dummy data will have been read in,
but no stepping has taken place yet."""
m = copy.deepcopy(test_microsim) # For less typing and so as not to interfere with other functions use microsim
households = m.activity_locations[f"{ColumnNames.Activities.HOME}"]._locations
# Note: the following is a useful way to get relevant info about the individuals
# m.individuals.loc[:, ["ID", "PID", "HID", "area", ColumnNames.DISEASE_STATUS, "MSOA_Cases", "HID_Cases"]]
# Step 0 (initialisation):
# Everyone should start without the disease (they will have been assigned a status as part of initialisation)
m.individuals[ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SUSCEPTIBLE
#
# Person 1: lives with one other person (p2). Both people spend all their time at home doing nothing else
#
p1 = 0
p2 = 1
m.individuals.loc[p1, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SYMPTOMATIC # Give them the disease
for p in [p1, p2]: # Set their activity durations to 0
for name, activity in m.activity_locations.items():
m.individuals.at[p, f"{name}{ColumnNames.ACTIVITY_DURATION}"] = 0.0
m.individuals.at[p, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION}"] = 1.0 # Spend all their time at home
m.step()
# Check the disease has spread to the house but nowhere else
for p in [p1, p2]:
assert m.individuals.at[p, ColumnNames.CURRENT_RISK] == 1.0
for p in range(2, len(m.individuals)):
assert m.individuals.at[p, ColumnNames.CURRENT_RISK] == 0.0
assert households.at[0, ColumnNames.LOCATION_DANGER] == 1.0
for h in range(1, len(households)): # all others are 0
assert households.at[h, ColumnNames.LOCATION_DANGER] == 0.0
m.step()
# Risk and danger stay the same (it does not cumulate over days)
for p in [p1, p2]:
assert m.individuals.at[p, ColumnNames.CURRENT_RISK] == 1.0
for p in range(2, len(m.individuals)):
assert m.individuals.at[p, ColumnNames.CURRENT_RISK] == 0.0
assert households.at[0, ColumnNames.LOCATION_DANGER] == 1.0
for h in range(1, len(households)):
assert households.at[h, ColumnNames.LOCATION_DANGER] == 0.0
# If the infected person doesn't go home (in this test they do absolutely nothing) then danger and risks should go
# back to 0
m.individuals.at[p1, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION}"] = 0.0
m.step()
for p in range(len(m.individuals)):
assert m.individuals.at[p, ColumnNames.CURRENT_RISK] == 0.0
for h in range(0, len(households)):
assert households.at[h, ColumnNames.LOCATION_DANGER] == 0.0
# But if they both get sick then they should be 2.0 (double danger and risk)
m.individuals.loc[p1:p2, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SYMPTOMATIC # Give them the disease
m.individuals.at[p1, f"{ColumnNames.Activities.HOME}{ColumnNames.ACTIVITY_DURATION}"] = 1.0 # Make the duration normal again
m.step()
for p in [p1, p2]:
assert m.individuals.at[p, ColumnNames.CURRENT_RISK] == 2.0
assert households.at[0, ColumnNames.LOCATION_DANGER] == 2.0
for h in range(1, len(households)): # All other houses are danger free
assert households.at[h, ColumnNames.LOCATION_DANGER] == 0.0
#
# Now see what happens when one person gets the disease and spreads it to schools, shops and work
#
del p1, p2
p1 = 4 # The infected person is index 1
# Make everyone better except for that one person
m.individuals[ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SUSCEPTIBLE
m.individuals.loc[p1, ColumnNames.DISEASE_STATUS] = ColumnNames.DiseaseStatuses.SYMPTOMATIC
# Assign everyone equal time doing all activities
for name, activity in m.activity_locations.items():
m.individuals[f"{name}{ColumnNames.ACTIVITY_DURATION}"] = 1.0 / len(m.activity_locations)
m.step()
# Now check that the danger has propagated to locations and risk to people
# TODO Also check that the total risks and danger scores sum correctly
for name, activity in m.activity_locations.items():
# Indices of the locations where this person visited
visited_idx = m.individuals.at[p1, f"{name}{ColumnNames.ACTIVITY_VENUES}"]
not_visited_idx = list(set(range(len(activity._locations))) - set(visited_idx))
# Dangers should be >0.0 (or not if the person didn't visit there)
assert False not in list(activity._locations.loc[visited_idx, "Danger"].values > 0)
assert False not in list(activity._locations.loc[not_visited_idx, "Danger"].values == 0)
# Individuals should have an associated risk
for index, row in m.individuals.iterrows():
for idx in visited_idx:
if idx in row[f"{name}{ColumnNames.ACTIVITY_VENUES}"]:
assert row[ColumnNames.CURRENT_RISK] > 0
# Note: can't check if risk is equal to 0 because it might come from another activity
print("End of test step")
def _get_rand(microsim_model, N=100):
"""Get a random number using the PopulationInitialisation object's random number generator"""
for _ in range(N):
microsim_model.random.random()
return microsim_model.random.random()
def test_random():
"""
Checks that random classes are produce different (or the same!) numbers when they should do
:return:
"""
population_init = PopulationInitialisation(**population_init_args)
p1 = Microsim(individuals=population_init.individuals, activity_locations=population_init.activity_locations,
**microsim_args)
p2 = Microsim(individuals=population_init.individuals, activity_locations=population_init.activity_locations,
random_seed=2.0, **microsim_args)
p3 = Microsim(individuals=population_init.individuals, activity_locations=population_init.activity_locations,
random_seed=2.0, **microsim_args)
# Genrate a random number from each model. The second two numbers should be the same
r1, r2, r3 = [_get_rand(x) for x in [p1, p2, p3]]
assert r1 != r2
assert r2 == r3
# Check that this still happens even if they are executed in pools.
# Create a large number of microsims and check that all random numbers are unique
pool = multiprocessing.Pool()
num_reps = 1000
m = [Microsim(individuals=population_init.individuals, activity_locations=population_init.activity_locations,
**microsim_args) for _ in range(num_reps)]
r = pool.map(_get_rand, m)
assert len(r) == len(set(r))
pool.close()
# Repeat, this time explicitly passing a None seed
pool = multiprocessing.Pool()
num_reps = 50 # (don't do quite as many this time, it takes ages)
m = [Microsim(individuals=population_init.individuals, activity_locations=population_init.activity_locations,
random_seed=None, **microsim_args) for _ in range(num_reps)]
r = pool.map(_get_rand, m)
assert len(r) == len(set(r))
pool.close()
|
'''
Start App
'''
import UsConfig as Config
from Common.Utilities import centerRoot
if __name__ == '__main__':
root = Config.ASSEMBLER.assemble("MainApp")
root.title("EasyPass")
root.iconbitmap(default=r'D:/logo.ico')
centerRoot(root)
root.mainloop()
|
#/bin/python
from pyb import *
from time import sleep
x = 0
flag = 0
led1 = LED(1)
led2 = LED(2)
def onBtnRIGHTPressed(evt):
global x,flag
x += 10
ExtInt(Pin('RIGHT'), ExtInt.IRQ_FALLING, Pin.PULL_UP, onBtnRIGHTPressed)
def onBtnLEFTPressed(evt):
global x,flag
x += -20
ExtInt(Pin('LEFT'), ExtInt.IRQ_FALLING, Pin.PULL_UP, onBtnLEFTPressed)
def onBtnBTNAPressed(evt):
global x,flag
# LED aus
flag = 0
ExtInt(Pin('BTNA'), ExtInt.IRQ_FALLING, Pin.PULL_UP, onBtnBTNAPressed)
def onBtnBTNBPressed(evt):
global x,flag
# LED an
flag = 1
ExtInt(Pin('BTNB'), ExtInt.IRQ_FALLING, Pin.PULL_UP, onBtnBTNBPressed)
flag = 1
x = 100
while True:
if flag == 0:
led1.off()
else:
if flag == 1:
led1.intensity(x)
sleep(0.1)
|
msg = "string in double qutation"
msg2 = 'string in single quatation'
msg3 = "string \" with escape char"
msg4 = """ using triple double qutation to have single ' and " quate as needed """
print(msg)
print(msg2)
print(msg3)
print(msg4) |
import warnings
import numpy as np
import LS_SVM
from sklearn import datasets
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
X, y = datasets.make_classification(n_samples=50, n_features=2, n_informative=2, n_redundant=0, n_classes=2,
random_state=7, class_sep=1.5, flip_y=0.1)
if __name__ == '__main__':
data_dict = {1: np.array([[1, 2],
[1, 1],
[3, 4],
[4, 2],
[2, 1]]),
-1: np.array([[2, -3],
[0, -3],
[1, -1],
[1.5, -2]])}
'''
data_dict = {1: np.array([[0, 0],
[1, 0]]),
-1: np.array([[0, 1],
[1, 2]])}
'''
k1 = LS_SVM.Kernel('lin', param=[None, None], hyperparam=[1, 1])
k2 = LS_SVM.Kernel('pol', param=[1, 2], hyperparam=[1, 1])
k3 = LS_SVM.Kernel('rad', param=[10, None], hyperparam=[1, 1])
k4 = LS_SVM.Kernel('sig', param=[0.1, 10], hyperparam=[1, 3])
M1 = np.array([])
M2 = np.array([])
j = 1
k = 1
for i, item in enumerate(X):
if y[i] == 1:
M1 = np.reshape(np.append(M1, item), (j, 2))
j += 1
else:
M2 = np.reshape(np.append(M2, item), (k, 2))
k += 1
data_dict_new = {1: M1, -1: M2}
LS_SVM.gesamt(data_dict_new, [k1, k2, k3, k4])
|
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# file on university type from financial file
df_type = pd.read_csv("./../university_financial/university_financial0116.csv", index_col='Unnamed: 0')
df_type = df_type[['UNITID', 'TYPE']]
# retrieve sfa data
df_sfa = pd.read_csv("./sfa9916.csv", index_col='Unnamed: 0')
# merge on id
df_result = pd.merge(df_type, df_sfa, left_on='UNITID', right_on='UNITID', how='inner')
# save to file
df_result.to_csv('./sfa9916_with_types.csv')
|
# encoding=utf-8
import matplotlib
import numpy as np
matplotlib.use('agg')
import matplotlib.pyplot as plt
train_Acc=open('tarin_Acc.txt','r')
train_Loss=open('train_Loss.txt','r')
val_Acc=open('val_Acc.txt','r')
val_Loss=open('val_Loss.txt','r')
def ReadTxtName(rootdir):
lines = []
with open(rootdir, 'r') as file_to_read:
while True:
line = file_to_read.readline()
if not line:
break
if "Acc" in rootdir:
line = float(line.strip('\n'))/3200
lines.append(str(line))
return lines
lineslist1=ReadTxtName('tarin_Acc.txt')
lineslist2=ReadTxtName('val_Acc.txt')
lineslist3=ReadTxtName('train_Loss.txt')
lineslist4=ReadTxtName('val_Loss.txt')
x0 = [i for i in range(len(lineslist1))]
y00 = list(map(float,lineslist1[0:len(lineslist1)]))
y01 = list(map(float,lineslist2[0:len(lineslist2)]))
y02 = list(map(float,lineslist3[0:len(lineslist3)]))
y03 = list(map(float,lineslist4[0:len(lineslist4)]))
# print(len(x0))
# print(y03)
# print(y02)
fig = plt.figure(figsize = (12,8))
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
plt.title('Epochs & Train Loss',fontsize=18)
# plt.subplot(1,2,1) #要生成两行两列,这是第一个图plt.subplot('行','列','编号')
plt.plot(x0, y00, '.-', label='tarin_Acc')
plt.plot(x0, y01, '.-', label='val_Acc')
# plt.grid(True)
# plt.subplot(1,2,2) #两行两列,这是第二个图
plt.plot(x0, y02, '.-', label='train_Loss')
plt.plot(x0, y03, '.-', label='val_Loss')
plt.grid(True)
plt.legend(prop = {'size':16})
plt.xticks(np.arange(0.0, 110, step=5))
plt.yticks(np.arange(0.0, 1.1, step=0.05))
plt.xlabel('Epochs',fontsize=16)
# plt.ylabel('Train Loss',fontsize=16)
plt.tick_params(labelsize=16)
#plt.legend() # 将样例显示出来
plt.show()
plt.savefig('testblueline.jpg') |
# -*- encoding: utf-8 -*-
"""
@File : add_love_user.py
@Time : 2020/2/21 15:55
@Author : Tianjin
@Email : tianjincn@163.com
@Software: PyCharm
"""
from lin.core import User
from lin.db import db
from app.app import create_app
from app.models.love import Love_user
def main():
app = create_app()
with app.app_context():
with db.auto_commit():
# 创建一个超级管理员
user = Love_user()
user.Username = "夏枯草"
user.Address = "贵阳"
user.Sex = "男"
user.Province = "贵阳"
user.Openid
db.session.add(user)
if __name__ == '__main__':
try:
main()
print("新增超级管理员成功")
except Exception as e:
raise e
|
'''
公众号:早起Python
作者:陈熹
请在桌面创建一个文件夹并命名为data,然后将测试Excel文件放入data文件夹中!
'''
from openpyxl import load_workbook
import os
import glob
import random
import pandas as pd
import re
from openpyxl.styles import Alignment
from openpyxl.styles import Side, Border
from openpyxl.styles import Font
def GetDesktopPath():
return os.path.join(os.path.expanduser("~"), 'Desktop')
path = glob.glob(f'{GetDesktopPath()}/data/*.xls*')[0]
workbook = load_workbook(filename=path)
sheet_init = workbook.active
name_lst = ['皮卡丘', '小火龙', '杰尼龟', '妙蛙种子', '风速狗', '小拳石', '飞天螳螂']
place_lst = [chr(i).upper() for i in range(97, 123)]
activity_lst = ['椭圆机', '篮球', '足球', '羽毛球', '跳绳']
source_lst = ['朋友介绍', '微信聊天', '网页弹窗', '其他']
for i in range(30):
sheet = workbook.copy_worksheet(sheet_init)
sheet.title = f'{i+1}日'
for j in range(random.randint(10, 30)):
for row in sheet.iter_rows(min_row=3+j, max_row=3+j):
info = [f'{j+1}', f'{i+1}日', f'{random.choice(name_lst)}', f'{random.choice(place_lst)}馆',
f'{random.choice(activity_lst)}', f'{random.choice(source_lst)}', f'{random.randint(1, 10)}',
'无', f'{random.choice(["Y", "N"])}', f'{random.choice(["Y", "N"])}', f'{random.choice(["Y", "N"])}']
for index, k in enumerate(info):
row[index].value = k
workbook.save(filename=f'{GetDesktopPath()}/data/results.xlsx')
path_new = glob.glob(f'{GetDesktopPath()}/data/results.xls*')[0]
# 方便获取总表数便于遍历
workbook = load_workbook(path_new)
sheetnames = workbook.sheetnames
df_lst = []
for i in range(1, len(sheetnames)):
df = pd.read_excel(path_new, encoding='utf-8', sheet_name=i, skiprows=1)
df_lst.append(df)
df_total = pd.concat(df_lst, axis=0, ignore_index=True)
df_total['编号'] = df_total.index + 1
writer = pd.ExcelWriter(path_new, engine='openpyxl')
writer.book = workbook
workbook.remove(workbook['汇总表'])
df_total.to_excel(excel_writer=writer, sheet_name=u'汇总表', index=None)
writer.close()
workbook._sheets.insert(0, workbook._sheets.pop())
sheet = workbook[sheetnames[0]]
sheet.insert_rows(idx=0)
font = Font(name='宋体', size=18, bold=True)
sheet['A1'] = '皮卡丘体育2020年06月新学员信息登记表'
sheet['A1'].font = font
req = ':(\w)'
weight = re.findall(req, sheet.dimensions)[0]
sheet.merge_cells(f'A1:{weight}1')
alignment = Alignment(horizontal='center', vertical='center')
side = Side(style='thin', color='000000')
border = Border(left=side, right=side, top=side, bottom=side)
rows = sheet[f'{sheet.dimensions}']
for row in rows:
for cell in row:
cell.alignment = alignment
cell.border = border
sheet.row_dimensions[1].height = 38
sheet.row_dimensions[2].height = 38
letter_lst = [chr(i+64).upper() for i in range(2, ord(weight)-ord('A')+1+1)]
sheet.column_dimensions['A'].width = 8
for i in letter_lst:
sheet.column_dimensions[f'{i}'].width = 14
workbook.save(filename=f'{GetDesktopPath()}/data/results.xlsx')
print('文件已生成') |
import sys
from typing import List
class Node:
def __init__(self, data, key):
self.data = data
self.key = key
def __lt__(self, other):
return self.key < other.key
def __repr__(self):
return f"data={self.data};key={self.key}"
class MinPriorityQueue:
def __init__(self):
self.heap = []
self.data2idx = {}
def is_empty(self):
return len(self.heap) == 0
def insert(self, data, key):
node = Node(data=data, key=sys.maxsize)
self.heap.append(node)
self.data2idx[data] = len(self.heap) - 1
self.decrease_key(data, key)
def decrease_key(self, data, key):
idx = self.data2idx[data]
if key > self.heap[idx].key:
raise Exception
self.heap[idx].key = key
# up
current_idx = idx
while (current_idx > 0) and (
self.heap[self.get_parent(current_idx)] > self.heap[current_idx]
):
self.swap_node(src_idx=self.get_parent(current_idx), dst_idx=current_idx)
current_idx = self.get_parent(current_idx)
def get_min(self) -> Node:
return self.heap[0]
def extract_min(self) -> Node:
min_node = self.heap[0]
self.delete(min_node.data)
return min_node
def delete(self, data):
idx = self.data2idx[data]
last_idx = len(self.heap) - 1
# swap
self.swap_node(src_idx=idx, dst_idx=last_idx)
# delete node
del self.data2idx[self.heap[last_idx].data]
self.heap = self.heap[:-1]
# heapify_down
self.heapify(idx)
def swap_node(self, src_idx: int, dst_idx: int):
self.data2idx[self.heap[src_idx].data] = dst_idx
self.data2idx[self.heap[dst_idx].data] = src_idx
self.heap[src_idx], self.heap[dst_idx] = (
self.heap[dst_idx],
self.heap[src_idx],
)
def get_parent(self, idx: int) -> int:
return idx // 2
def get_left_child(self, idx: int) -> int:
return idx * 2 + 1
def get_right_child(self, idx: int) -> int:
return idx * 2 + 2
def get_heap_size(self) -> int:
return len(self.heap)
def heapify(self, idx: int):
left_child_idx = self.get_left_child(idx)
right_child_idx = self.get_right_child(idx)
heap_size = self.get_heap_size()
min_idx = idx
if left_child_idx < heap_size and self.heap[idx] > self.heap[left_child_idx]:
min_idx = left_child_idx
if (right_child_idx < heap_size) and (
self.heap[min_idx] > self.heap[right_child_idx]
):
min_idx = right_child_idx
if min_idx != idx:
self.swap_node(src_idx=idx, dst_idx=min_idx)
self.heapify(min_idx)
pq = MinPriorityQueue()
pq.heap = [Node(data=3, key=3), Node(data=1, key=1), Node(data=2, key=2)]
pq.data2idx = {3: 0, 1: 1, 2: 2}
pq.heapify(0)
print(pq.heap)
print(pq.data2idx)
pq = MinPriorityQueue()
pq.heap = [Node(data=3, key=3), Node(data=2, key=2), Node(data=1, key=1)]
pq.data2idx = {3: 0, 2: 1, 1: 2}
pq.heapify(0)
print(pq.heap)
print(pq.data2idx)
pq = MinPriorityQueue()
pq.heap = [Node(data=3, key=3), Node(data=2, key=2), Node(data=1, key=1)]
pq.data2idx = {3: 0, 2: 1, 1: 2}
pq.heapify(0)
print(pq.heap)
print(pq.data2idx)
pq = MinPriorityQueue()
pq.heap = [Node(data=2, key=2), Node(data=1, key=1)]
pq.data2idx = {2: 0, 1: 1}
pq.heapify(0)
print(pq.heap)
print(pq.data2idx)
# pq = MinPriorityQueue()
# pq.heap = [Node(data=2, key=2), None, Node(data=1, key=1)]
# pq.data2idx = {Node(data=2, key=2): 0, Node(data=1, key=1): 2}
# pq.heapify(0)
# print(pq.heap)
# print(pq.data2idx)
# class Solution:
# def minCostConnectPoints(self, points: List[List[int]]) -> int:
# return self.prim(points)
# def get_weight(self, point_a: List[int], point_b: List[int]) -> int:
# return abs(point_a[0] - point_b[0]) + abs(point_a[1] - point_b[1])
# def prim(self, points: List[List[int]]):
# pq = MinPriorityQueue()
# for x, y in points:
# pq.insert(data=(x, y), key=self.get_weight(points[0], (x, y)))
# assert len(pq.heap) == len(points)
# # # print(pq.heap)
# # # print(pq.data2idx)
# # first_x, first_y = points[0]
# # pq.decrease_key(data=(first_x, first_y), key=0)
# cost = 0
# while not pq.is_empty():
# current_node = pq.extract_min()
# cost += current_node.key
# current_x, current_y = current_node.data
# print(current_node)
# for node in pq.heap:
# x, y = node.data
# weight = self.get_weight([current_x, current_y], [x, y])
# if weight < node.key:
# pq.decrease_key(data=(x, y), key=weight)
# return cost
# sol = Solution()
# points = [[0, 0], [0, 1], [0, -1], [1, 0], [-1, 0]]
# print(points)
# print(sol.minCostConnectPoints(points))
|
"""
## Questions :
### 7. [Reverse Integer](https://leetcode.com/problems/reverse-integer/)
Given a 32-bit signed integer, reverse digits of an integer.
**Example 1:**
Input: 123
Output: 321
Example 2:
<pre>
Input: -123
Output: -321
</pre>
Example 3:
Input: 120
Output: 21
Note:
Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−231, 231 − 1]. For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.
"""
## Solutions
import math
class Solution:
def reverse(self, x: int) -> int:
if x > ((2 ** 31) - 1) or x < ((-1) * (2 ** 31)):
return 0
else:
if x < 0:
x = abs(x)
flag = 1
else:
flag = 0
s = str(x)
s = s[::-1]
s = int(s)
if s > ((2 ** 31) - 1):
return 0
else:
if flag == 1:
s = (-1) * s
if s < ((-1) * (2 ** 31)):
return 0
else:
return s
else:
return s
# Runtime: 28 ms
# Memory Usage: 13.3 MB
# Other Solution
class Solution:
def reverse(self, x: int) -> int:
min_range = -(2 ** 31)
max_range = 2 ** 31 - 1
if x < min_range and x > max_range:
return 0
if x > 0:
flag = 0
else:
x *= -1
flag = 1
res = 0
while x > 0:
res = res * 10 + x % 10
x //= 10
if flag == 1:
res *= -1
if res >= min_range and res <= max_range:
return res
return 0
if res >= min_range and res <= max_range:
return res
return 0
# Runtime: 28 ms, faster than 77.76%
# Memory Usage: 12.8 MB, less than 100.00%
|
# Generated by Django 2.2.1 on 2019-07-08 11:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20190708_1101'),
]
operations = [
migrations.AddField(
model_name='courseprogress',
name='current_assignment',
field=models.OneToOneField(blank=True, help_text='Refers to the last assignment the user had subscribe to', null=True, on_delete=django.db.models.deletion.PROTECT, to='core.Assignment'),
),
]
|
import numpy as np
import matplotlib.pyplot as plt
number_of_points = 500
x_point = []
y_point = []
a = 0.22
b = 0.78
for i in range(number_of_points):
x = np.random.normal(0.0, 0.5)
y = a*x + b + np.random.normal(0.0, 0.1)
x_point.append([x])
y_point.append([y])
plt.plot(x_point, y_point, 'o', label='Input Data')
plt.legend()
plt.show()
|
from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from mysite.forms import CategoryForm, PageForm
from polls.models import UserProfile, Category, Image
from mysite.forms import UserForm, UserProfileForm
def encode_url(str):
return str.replace(' ', '_')
def decode_url(str):
return str.replace('_', ' ')
def track_url(request, image_id):
context = RequestContext(request)
if request.method == 'GET':
# i = get_object_or_404(Image, id=image_id)
img = Image.objects.get(pk=image_id)
img.views += 1
img.save()
caturl = encode_url(img.category.name)
return render_to_response("polls/display.html", {'image': img, 'caturl': caturl}, context)
@login_required
def profile(request, profile_id):
context = RequestContext(request)
cat_list = get_user_albums(request)
context_dict = {'cat_list': cat_list}
image_list = Image.objects.filter(user=User.objects.get(pk=profile_id))
context_dict['image_list'] = image_list
u = User.objects.get(pk=profile_id)
try:
up = UserProfile.objects.get(user=u)
except:
up = None
context_dict['user'] = u
context_dict['userprofile'] = up
# context_dict['current'] = current
# context_dict['friends'] = currentfriends
return render_to_response('polls/profile.html', context_dict, context)
# def addriend(request)
def albumlist(request):
context = RequestContext(request)
cat_list = get_category_list()
context_dict = {'albums': cat_list}
images = Image.objects.all().order_by('-views')
context_dict['images'] = images
return render_to_response('polls/albums.html', context_dict, context)
@login_required()
def upload(request):
context = RequestContext(request)
cat_list = Category.objects.all().order_by('name')
for cat in cat_list:
cat.url = encode_url(cat.name)
context_dict = {'albums': cat_list}
return render_to_response('polls/upload.html', context_dict, context)
def category(request, category_name_url):
# Request our context
context = RequestContext(request)
# Change underscores in the category name to spaces.
# URL's don't handle spaces well, so we encode them as underscores.
category_name = decode_url(category_name_url)
# Build up the dictionary we will use as out template context dictionary.
context_dict = {'category_name': category_name, 'category_name_url': category_name_url}
cat_list = get_category_list()
context_dict['cat_list'] = cat_list
try:
cat = Category.objects.get(name__iexact=category_name)
context_dict['category'] = cat
cat.views += 1
cat.save()
pages = Image.objects.filter(category=cat).order_by('-views')
context_dict['pages'] = pages
except Category.DoesNotExist:
pass
return render_to_response('polls/category.html', context_dict, context)
def get_category_list():
cat_list = Category.objects.all().order_by('-views')
for cat in cat_list:
cat.url = encode_url(cat.name)
return cat_list
def get_user_albums(request):
cat_list = Category.objects.filter(user=request.user).order_by('-views')
for cat in cat_list:
cat.url = encode_url(cat.name)
return cat_list
def index(request):
context = RequestContext(request)
top_category_list = Category.objects.order_by('-views')[:5]
for category in top_category_list:
category.url = encode_url(category.name)
context_dict = {'categories': top_category_list}
cat_list = get_category_list()
context_dict['cat_list'] = cat_list
page_list = Image.objects.order_by('-views')[:5]
context_dict['pages'] = page_list
if request.session.get('last_visit'):
# The session has a value for the last visit
last_visit_time = request.session.get('last_visit')
visits = request.session.get('visits', 0)
if (datetime.now() - datetime.strptime(last_visit_time[:-7], "%Y-%m-%d %H:%M:%S")).days > 0:
request.session['visits'] = visits + 1
else:
# The get returns None, and the session does not have a value for the last visit.
request.session['last_visit'] = str(datetime.now())
request.session['visits'] = 1
# Render and return the rendered response back to the user.
return render_to_response('polls/index.html', context_dict, context)
@login_required
def add_page(request, category_name_url):
context = RequestContext(request)
cat_list = get_category_list()
context_dict = {}
context_dict['cat_list'] = cat_list
category_name = decode_url(category_name_url)
if request.method == 'POST':
form = PageForm(request.POST, request.FILES, request.user)
if form.is_valid():
# This time we cannot commit straight away.
# Not all fields are automatically populated!
newimg = Image(title=request.POST.get('title'), image=request.FILES['image'], user=request.user,
category=Category.objects.get(name=category_name))
newimg.save()
url = "/goto/" + str(newimg.id)
return HttpResponseRedirect(url)
# Now that the page is saved, display the category instead.
return category(request, category_name_url)
else:
print(form.errors)
else:
form = PageForm()
context_dict['category_name_url'] = category_name_url
context_dict['category_name'] = category_name
context_dict['form'] = form
return render_to_response('polls/add_page.html',
context_dict,
context)
@login_required
def add_category(request):
# Get the context from the request.
context = RequestContext(request)
cat_list = get_category_list()
context_dict = {}
context_dict['cat_list'] = cat_list
# A HTTP POST?
if request.method == 'POST':
form = CategoryForm(request.POST, request.user)
# Have we been provided with a valid form?
if form.is_valid():
# Save the new category to the database.
newcat = Category(name=request.POST.get('name'), views=request.POST.get('views'), likes=request.POST.get('likes'), user=request.user)
newcat.save()
# Now call the index() view.
# The user will be shown the homepage.
return index(request)
else:
# The supplied form contained errors - just print them to the terminal.
print(form.errors)
else:
# If the request was not a POST, display the form to enter details.
form = CategoryForm()
# Bad form (or form details), no form supplied...
# Render the form with error messages (if any).
context_dict['form'] = form
return render_to_response('polls/add_category.html', context_dict, context)
def about(request):
# Request the context.
context = RequestContext(request)
context_dict = {}
cat_list = get_category_list()
context_dict['cat_list'] = cat_list
# If the visits session varible exists, take it and use it.
# If it doesn't, we haven't visited the site so set the count to zero.
count = request.session.get('visits', 0)
context_dict['visit_count'] = count
# Return and render the response, ensuring the count is passed to the template engine.
return render_to_response('polls/about.html', context_dict, context)
# register.html
def register(request):
context = RequestContext(request)
# boolean to tell template whether the registration was successful
registered = False
if request.method == 'POST':
# attempt to grab raw info
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
# if 2 forms valid
if user_form.is_valid() and profile_form.is_valid():
# save user form to database
user = user_form.save()
# hash user with set password method
user.set_password(user.password)
user.save()
# sort out UserProfile instance
profile = profile_form.save(commit=False)
profile.user = user
# Did user provide profile picture?
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
# save user profile
profile.save()
# update register boolean
registered = True
else:
print(user_form.errors, profile_form.errors)
# Not a HTTP POST, render form using 2 ModelForm instances
else:
user_form = UserForm()
profile_form = UserProfileForm()
# Render the template depending on the context
return render_to_response(
'polls/register.html',
{'user_form': user_form, 'profile_form': profile_form, 'registered': registered},
context)
def user_login(request):
context = RequestContext(request)
if request.method == 'POST':
# gather username and password
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
# if user object, details are correct
if user:
# is account active?
if user.is_active:
# send user to homepage
login(request, user)
url = '/profile/' + str(user.id)
return HttpResponseRedirect(url)
else:
# inactive account used
return HttpResponse("Your Photoshare account is disabled.")
else:
# bad login details provided
print("Invalid login details: {0}, {1}").format(username, password)
return HttpResponse("Invalid login details supplied.")
# else request is not a POST
else:
# no context variables to pass to template system
return render_to_response('polls/login.html', context)
# Only allow logged in users to logout - add the @login_required decorator!
@login_required
def user_logout(request):
# As we can assume the user is logged in, we can just log them out.
logout(request)
# Take the user back to the homepage.
return HttpResponseRedirect('/')
|
from django.contrib import admin
from .models import Restaurent
# Register your models here.
admin.site.register(Restaurent)
|
#coding:utf-8
'''
Created on 2015.12.29
@author: Chunyun
'''
import pika
import time
connection = pika.BlockingConnection(pika.ConnectionParameters('192.168.206.129'))
channel = connection.channel()
# 创建名字为test的queue,然后可以在server上sudo rabbitmqctl list_queues来查看这个queue
channel.queue_declare(queue='queue-1', durable=True)
print ' [*] Waiting for messages. To exit press CTRL+C'
def callback(ch, method, properties, body):
print " [test] Received %r" % (body,)
time.sleep( body.count('.') )
print " [test] Done"
ch.basic_ack(delivery_tag = method.delivery_tag)
# 通过 basic.qos 方法设置prefetch_count=1 ,这样RabbitMQ就会使
# 得每个Consumer在同一个时间点最多处理一个Message,
# 换句话说,在接收到该Consumer的ack前,他它不会将新的Message分发给它
# 注意,这种方法可能会导致queue满。当然,这种情况下你可能需要添加更多
# 的Consumer,或者创建更多的virtualHost来细化设计
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback,
queue='queue-1')
channel.start_consuming() |
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.sensors import SqlSensor
from airflow.operators.hive_operator import HiveOperator
from airflow.operators.mysql_operator import MySqlOperator
from settings import default_args
# - 学生账号
# - 基本信息
# - 设备/环境 (测网)
# - 学生表(2019-03-11移动sso的用户dag下)
# - 家长表
dag = DAG('dw_student_basic_d', default_args=default_args,
schedule_interval='0 1 * * *')
start = SqlSensor(
task_id='start',
conn_id='src_main_db',
sql="SELECT * FROM restore_tracking.restore_log WHERE date(restored_time) = current_date;",
dag=dag)
del_partiton_stg_parents = HiveOperator(
task_id='del_partiton_stg_parents',
hive_cli_conn_id='spark_thrift',
hql="alter table stg.newuuabc_parents drop if exists PARTITION (etl_date='{{ macros.ds(ti) }}');\n ",
dag=dag)
src_stg_parents = BashOperator(
task_id='src_stg_parents',
bash_command='dataship extract uuold_newuuabc.parents {{ macros.ds(ti) }} {{ macros.tomorrow_ds(ti) }}',
pool="embulk_pool",
dag=dag)
add_partiton_stg_parents = HiveOperator(
task_id='add_partiton_stg_parents',
hive_cli_conn_id='spark_thrift',
hql="alter table stg.newuuabc_parents add PARTITION (etl_date='{{ macros.ds(ti) }}');\n ",
dag=dag)
stg_ods_parents = HiveOperator(
task_id='stg_ods_parents',
hive_cli_conn_id='spark_thrift',
hql='scripts/stg2ods.newuuabc_parents_insert.sql',
dag=dag
)
end = MySqlOperator(
task_id='end',
mysql_conn_id='etl_db',
sql="INSERT INTO `signal` VALUES('{1}', '{0}') ON DUPLICATE KEY UPDATE `value`='{0}'; ".format("{{ macros.ds(ti) }}", "{{ dag.dag_id }}"),
database='etl',
dag=dag
)
start >> del_partiton_stg_parents >> src_stg_parents >> add_partiton_stg_parents >> stg_ods_parents >> end
|
#!/usr/bin/env python
import requests
import json
from bs4 import BeautifulSoup
#Returns a session to use
def login():
#Load login credentials
LOGIN_FILE_NAME = 'credentials'
with open(LOGIN_FILE_NAME, 'r') as f:
personal_info = json.load(f)
f.close()
#session
r = requests.Session()
#log in (requires foreign affairs account)
resp = r.get('https://www.foreignaffairs.com/user?op=lo')
html = resp.raw.read()
soup = BeautifulSoup(html)
form_build_id = soup.find(id='content-area').find(type='hidden')['id']
credentials = {
'name': personal_info['email'],
'pass': personal_info['password'],
'form_build_id': form_build_id,
'form_id': 'user_login',
'op': 'Log in'
}
login_resp = r.post('https://www.foreignaffairs.com/user?destination=home', data=credentials)
return r
#Use this function only after logging in.
#r is the session returned from logging on
def get_html(url, r):
resp = r.get(url)
html = resp.raw.read()
with open('temp.html', 'w') as f:
f.write(html)
f.close()
return html
def get_article_text(url, r):
full = ''
resp = r.get(url)
html = resp.raw.read()
soup = BeautifulSoup(html)
title = soup.h1.get_text()
subtitle = soup.h2.get_text()
author = soup.find(is_author).get_text()
article = soup.find(is_article).get_text()
output = title+'.\n'+subtitle+'.'+author+'.'+article
return output
def is_article(tag):
return tag.has_attr('class') and tag['class'][0] == 'content-resize'
def is_author(tag):
return tag.has_attr('class') and tag['class'][0] == 'article-field-authors'
|
from django.db import models
from django.utils.translation import gettext_lazy as _
class Troop(models.Model):
""" Scout troop like a single troop (Stamm) or a district (Bezirk/Diözese)
Columns:
:number: national scouting ID of the troop
:name: name of the troop
"""
number = models.PositiveIntegerField(
_('number'),
unique=True,
)
name = models.CharField(
_('name'),
max_length=128,
unique=True,
)
class Meta:
verbose_name = _('troop')
verbose_name_plural = _('troops')
def __str__(self):
return '{} {}'.format(self.number, self.name)
|
# python3
# Create date: 2021-06-06
# Author: Scc_hy
# Func: 基于百度的开源OCR库进行文本识别
# =================================================================================
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
import numpy as np
from PyQt5.QtGui import QImage
import time
from PIL import Image
import time
from paddleocr import PaddleOCR
ocr = PaddleOCR(lang='ch', use_gpu=False)
def sample_ocr(img:"np.array/os.path", ocr_model:"paddleocr.PaddleOCR"=ocr, show:bool=True) -> list:
"""
基于百度开源的ocr库进行中文识别
param img: np.array 图片矩阵 | os.path 图像目录
param ocr_model: paddleocr.PaddleOCR 百度ocr模型
param show: bool 是否将ocr识别的图片展示出来
"""
if show:
Image.fromarray(img).show()
st = time.perf_counter()
res = ocr_model.ocr(img, rec=True, det=True)
cost_ = time.perf_counter() - st
print(f'Finished detect the pic - {cost_:.5f}s')
return [i[1][0] for i in res]
def pyqt5_img2arr(q_img: QImage) -> np.array:
"""
将pyqt5的图片格式转换成np.array
注: 直接转化的图片是4通道的,需要再借助PIL转化一下
"""
# QImage -> np.array (m, n, 4)
q_img = q_img.convertToFormat(QImage.Format_RGBA8888)
width = q_img.width()
height = q_img.height()
ptr = q_img.bits()
ptr.setsize(width * height * 4)
arr = np.frombuffer(ptr, np.uint8).reshape((height, width, 4))
# np.array (m, n, 4) -> np.array (m, n, 3)
return np.array(Image.fromarray(arr).convert('RGB'))
def pyqt5img_ocr(q_img: "QImage/os.path") -> list:
"""
pyqt5_img2arr -> sample_ocr
"""
if isinstance(q_img, str) and os.path.exists(q_img):
return sample_ocr(q_img)
return sample_ocr(pyqt5_img2arr(q_img))
|
"""This module provides the main functionality of HTTPie.
Invocation flow:
1. Read, validate and process the input (args, `stdin`).
2. Create and send a request.
3. Stream, and possibly process and format, the requested parts
of the request-response exchange.
4. Simultaneously write to `stdout`
5. Exit.
"""
import sys
import errno
import requests
from requests.compat import str, is_py3
from httpie import __version__ as httpie_version
from requests import __version__ as requests_version
from pygments import __version__ as pygments_version
from .cli import parser
from .client import get_response
from .models import Environment
from .output import output_stream, write, write_with_colors_win_p3k
from . import exit
def get_exist_status(code, follow=False):
"""Translate HTTP status code to exit status."""
if 300 <= code <= 399 and not follow:
# Redirect
return exit.ERROR_HTTP_3XX
elif 400 <= code <= 499:
# Client Error
return exit.ERROR_HTTP_4XX
elif 500 <= code <= 599:
# Server Error
return exit.ERROR_HTTP_5XX
else:
return exit.OK
def print_debug_info(env):
sys.stderr.writelines([
'HTTPie %s\n' % httpie_version,
'HTTPie data: %s\n' % env.config.directory,
'Requests %s\n' % requests_version,
'Pygments %s\n' % pygments_version,
'Python %s %s\n' % (sys.version, sys.platform)
])
def main(args=sys.argv[1:], env=Environment()):
"""Run the main program and write the output to ``env.stdout``.
Return exit status.
"""
if env.config.default_options:
args = env.config.default_options + args
def error(msg, *args):
msg = msg % args
env.stderr.write('\nhttp: error: %s\n' % msg)
debug = '--debug' in args
traceback = debug or '--traceback' in args
status = exit.OK
if debug:
print_debug_info(env)
if args == ['--debug']:
sys.exit(exit.OK)
try:
args = parser.parse_args(args=args, env=env)
response = get_response(args, config_dir=env.config.directory)
if args.check_status:
status = get_exist_status(response.status_code,
args.follow)
if status and not env.stdout_isatty:
error('%s %s', response.raw.status, response.raw.reason)
stream = output_stream(args, env, response.request, response)
write_kwargs = {
'stream': stream,
'outfile': env.stdout,
'flush': env.stdout_isatty or args.stream
}
try:
if env.is_windows and is_py3 and 'colors' in args.prettify:
write_with_colors_win_p3k(**write_kwargs)
else:
write(**write_kwargs)
except IOError as e:
if not traceback and e.errno == errno.EPIPE:
# Ignore broken pipes unless --traceback.
env.stderr.write('\n')
else:
raise
except (KeyboardInterrupt, SystemExit):
if traceback:
raise
env.stderr.write('\n')
status = exit.ERROR
except requests.Timeout:
status = exit.ERROR_TIMEOUT
error('Request timed out (%ss).', args.timeout)
except Exception as e:
# TODO: distinguish between expected and unexpected errors.
# network errors vs. bugs, etc.
if traceback:
raise
error('%s: %s', type(e).__name__, str(e))
status = exit.ERROR
return status
|
import os
def dcm2nii():
return
def main():
working_dir = "./"
for patient in os.listdir(working_dir):
if os.path.isdir(os.path.join(working_dir, patient)):
print("Working on",patient)
for stage in os.listdir(os.path.join(working_dir, patient)):
for phase in os.listdir(os.path.join(working_dir, patient,stage, "dicom")):
for file in os.listdir(os.path.join(working_dir, patient,stage, "dicom",phase)):
if os.path.splitext(file)[1] == ".gz":
print(os.path.join(working_dir, patient,stage, "dicom", file))
source_file = os.path.join(working_dir, patient,stage, "dicom", phase, file)
target_file = os.path.join(working_dir, patient,stage, "nii", phase, file)
print(source_file,target_file)
os.makedirs(os.path.join(working_dir, patient,stage, "nii", phase), exist_ok=True)
os.rename(source_file, target_file)
if __name__=="__main__":
main() |
import numpy as np
import os
import pandas as pd
import datetime
from xgboost import XGBRegressor
import matplotlib.pyplot as plt
def train_test_split(data, perc):
data = data.values
n = int(len(data) * (1 - perc))
return data[:n], data[n:]
def xgboost_predict(train, val, model):
train = np.array(train)
x = train[:, :-2]
y = train[:, -1]
model.fit(x, y)
val = np.array(val).reshape(1, -1)
pred = model.predict(val)
return pred[0]
def xgboost_get_value(val, model):
val = np.array(val).reshape(1, -1)
pred = model.predict(val)
return pred[0]
def xgboost_prediction(df, isROC, prefix):
model_path = ''
estimators = 0
if (isROC):
df = df[['Close', 'Date']].copy()
df['Close_Yesterday'] = df.Close.shift(1)
df['ROC'] = (df['Close'] - df['Close_Yesterday']) / \
df['Close_Yesterday']
df['Target'] = df.ROC.shift(-1)
df = df[['ROC', 'Date', 'Target']].copy()
model_path = './xgboost/model_ROC_' + prefix + '.json'
estimators = 2000
else:
df = df[['Close', 'Date']].copy()
df['Target'] = df.Close.shift(-1)
model_path = './xgboost/model_CLOSE_' + prefix + '.json'
estimators = 25
df.dropna(inplace=True)
prediction = []
precentTestRecord = 0.015 # update this
train, test = train_test_split(df, precentTestRecord)
history = [x for x in train]
isModelStored = False
model = XGBRegressor(objective="reg:squarederror", n_estimators=estimators)
if os.path.exists(model_path):
model.load_model(model_path)
isModelStored = True
for i in range(len(test)):
val = np.array(test[i, :-2])
pred = xgboost_get_value(
val, model) if isModelStored else xgboost_predict(history, val, model)
prediction.append(pred)
history.append(test[i])
print('%d/%d test done' % (i + 1, len(test)))
if (isModelStored == False):
open(model_path, "w")
model.save_model(model_path)
predDate = test[:, 1][1:]
predDate = [x for x in predDate]
lastDateStr = predDate[len(predDate) - 1]
lastDate = datetime.datetime.strptime(lastDateStr, '%Y-%m-%d')
nextDate = lastDate + datetime.timedelta(days=1)
nextDateStr = datetime.datetime.strftime(nextDate, '%Y-%m-%d')
predDate.append(nextDateStr)
prediction_data = {'Date': predDate, 'Prediction': prediction}
prediction_df = pd.DataFrame(prediction_data)
df["Date"] = pd.to_datetime(df.Date, format="%Y-%m-%d")
df.index = df['Date']
prediction_df["Date"] = pd.to_datetime(
prediction_df.Date, format="%Y-%m-%d")
prediction_df.index = prediction_df['Date']
return df, prediction_df
|
# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
import os
import codecs
import json
import pickle
import jieba
ConfDirPath = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "conf")
DataDirPath = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "data")
UNKNOWN = "<OOV>"
def get_data_ids(configs, word_to_id_dict, char_to_id_dict):
test_reader = codecs.open(configs["input_file"], mode='r', encoding='utf-8')
data_lists = []
for line in test_reader.readlines():
data_lists.append((jieba.lcut(line.strip()), [ch for ch in line.strip()]))
data_id_lists = []
word_unknown_id = word_to_id_dict[UNKNOWN]
char_unknown_id = char_to_id_dict[UNKNOWN]
word_max_len = configs["word_max_len"]
char_max_len = configs["char_max_len"]
for (text_list, char_list) in data_lists:
data_item = []
for word in text_list:
if word in word_to_id_dict:
data_item.append(word_to_id_dict[word])
else:
data_item.append(word_unknown_id)
if len(data_item) > word_max_len:
data_item = data_item[:word_max_len]
else:
while len(data_item) <= word_max_len:
data_item.append(word_unknown_id)
for char in char_list:
if char in char_to_id_dict:
data_item.append(char_to_id_dict[char])
else:
data_item.append(char_unknown_id)
if len(data_item) > word_max_len + char_max_len:
data_item = data_item[:word_max_len + char_max_len]
else:
while len(data_item) <= word_max_len + char_max_len:
data_item.append(char_unknown_id)
data_id_lists.append(data_item)
return data_id_lists
if __name__ == "__main__":
configs = json.load(open(os.path.join(ConfDirPath, "interface.json")))
serialization_dir = os.path.join(DataDirPath, configs["serialization_dir"])
word_to_id_dict = pickle.load(open(os.path.join(serialization_dir, configs["word_to_id"]), 'rb'))
char_to_id_dict = pickle.load(open(os.path.join(serialization_dir, configs["char_to_id"]), 'rb'))
# get data id
data_id_lists = get_data_ids(configs, word_to_id_dict, char_to_id_dict)
print("data nums in test file:", len(data_id_lists))
# load model
checkpoint_file = tf.train.latest_checkpoint(configs["check_point_dir"])
if checkpoint_file is None:
print("Cannot find a valid checkpoint file!")
exit(0)
print("Using checkpoint file : {}".format(checkpoint_file))
graph = tf.Graph()
with graph.as_default(), tf.Session() as sess:
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_name = "text_classification/model_input:0"
dropout_name = "text_classification/dropout_keep_prob:0"
predict_name = "text_classification/output/predictions:0"
# features_name = "text_classification/dropout/dropout_feature/mul:0"
input_tensor = graph.get_tensor_by_name(input_name)
dropout_tensor = graph.get_tensor_by_name(dropout_name)
predict_tensor = graph.get_tensor_by_name(predict_name)
# features_tensor = graph.get_tensor_by_name(features_name)
# model run
data_tags = []
# data_features = []
for data_item in data_id_lists:
data_item_array = np.asarray(data_item, dtype=np.int32).reshape((1, -1))
item_tag = sess.run(fetches=predict_tensor, feed_dict={input_tensor: data_item_array, dropout_tensor: 1.0})
data_tags.append(item_tag[0])
# data_features.append(item_features[0])
# save results
tag_res_writer = codecs.open(os.path.join(configs["output_dir"], "tags.txt"), mode='w', encoding='utf-8')
for tag in data_tags:
tag_res_writer.write(str(tag)+"\n")
tag_res_writer.close()
# pickle.dump(data_features, open(os.path.join(configs["output_dir"], "data_features"), 'wb'))
print("model run over") |
#coding=utf-8
#Version:python3.6.0
#Tools:Pycharm 2017.3.2
# Author:LIKUNHONG
__date__ = '2018/7/30 19:14'
__author__ = 'Colby'
# f =open("testWenJian",'r',encoding='utf-8')
# print(f.read())
#测试r+
# f = open("testWenJian",'r+',encoding='utf-8')
# # f.write("\n111")
#
# print(f.readline())
# print(f.readline())
# print(f.readline())
# f.write('新的')
#迭代器读文件行
# for index,line in enumerate(f.readlines()):
# if index == 2:
# print('--------------------------------')
# continue
# print(line.strip())
#读第一个文件,修改写入第二个文件
f = open("testWenJian",'r',encoding='utf-8')
f_new = open('NewtestWenJian','w',encoding='utf-8')
for line in f:
if '新的' in line:
line = line.replace('新的','又来一次新的')
f_new.write(line)
f.close()
f_new.close()
#水仙花数
# for i in range(100,999):
# a = (int)(i % 10)
# b = (int)(i / 10 % 10)
# c = (int)(i/ 100 % 10)
# if a*a*a+b*b*b+c*c*c == i:
# print(a,b,c,i)
#大小转小写,小写转大写
ch = 'a'
if 'a' <= ch <= 'z':
ch += 32
else:
ch -= 32
|
from kivy.app import App
from kivy.lang import Builder
# The custom App class
# Widget creation code is removed
# Another example of abstraction at work!
class HelloKv(App):
def build(self):
self.title = "Hello world!"
self.root = Builder.load_file('widget.kv')
return self.root
# create and start the App running
HelloKv().run()
|
import unittest
class TestFraction(unittest.TestCase):
"""Здесь нужно реализовать тест-методы""" |
ranges={}
#scannerLocations={}
f=open('input/input13.txt')
for line in f:
s=line.split(': ')
ranges[int(s[0])]=int(s[1])
#scannerLocations[int(s[0])]=0
f.close()
IamCaught=lambda depth,offset,sRange:(depth+offset)%(2*(sRange-1))==0
severity=0
for k in ranges.keys():
if IamCaught(k,0,ranges[k]):
severity+=k*ranges[k]
print severity
offset=0
caught=True
while caught:
caught=False
offset+=1
for k in ranges.keys():
if IamCaught(k,offset,ranges[k]):
caught=True
break
print offset |
from cassandra.cluster import Cluster
hostname = '127.0.0.1'
keyspace = 'db1'
column_family = 'postInfo'
nodes = []
nodes.append(hostname)
cluster = Cluster(nodes)
session = cluster.connect(keyspace)
def createPostInfoCF():
'''
Create the column family 'PostInfo' in the cassandra keyspace
:return:
'''
createColumnFamily = session.prepare("""
create columnfamily if not exists postInfo(
Domain TEXT PRIMARY KEY
,TotalQuestions INT
,UnansweredQuestions INT
,TrendingTags LIST<TEXT>
,AverageAnswersCount INT
,MostViewedPosts LIST<INT>
,MostScoredPosts LIST<INT>
,AverageTimeToAnswer INT
""")
session.execute(createColumnFamily)
|
import cv2
import imutils
import numpy as np
from pyimagesearch.centroidtracker import CentroidTracker
class Counter:
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
def __init__(self, confidence=0.3):
self.confidence = confidence
self.net = cv2.dnn.readNetFromCaffe('mobilenet_ssd/MobileNetSSD_deploy.prototxt',
'mobilenet_ssd/MobileNetSSD_deploy.caffemodel')
self.W = -1
self.H = -1
self.ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
def count(self, image):
image = imutils.resize(image, width=500)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.W < 0:
(self.H, self.W) = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 0.007843, (self.W, self.H), 127.5)
self.net.setInput(blob)
detections = self.net.forward()
cnt = 0
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > self.confidence:
class_idx = int(detections[0, 0, i, 1])
if self.CLASSES[class_idx] == "person":
cnt += 1
return cnt
|
# 파일 입출력
score_file = open("score.txt", "w", encoding="utf8")
print("수학 : 0", file=score_file)
print("영어 : 50", file=score_file)
score_file.close()
# w 처음부터 쓰기 a 이어서 추가쓰기
score_file = open("score.txt", "a", encoding="utf8")
score_file.write("과학: 80")
score_file.write("\ncoding :100")
score_file.close()
|
from itertools import combinations_with_replacement
def solver(sections, darts, score):
candidates = filter(
lambda solution: sum(solution) == score,
combinations_with_replacement(sections, r=darts)
)
return ['-'.join(str(s) for s in x) for x in candidates]
if __name__ == '__main__':
print(solver([3, 7, 11, 14, 18, 20, 25], 3, 32))
|
"""
module: Notification_DB.py
------------------------------------------------------------------------
Author: David J. Sanders
Student No: H00035340
Last Update: 15 December 2015
Update: Revise documentation
------------------------------------------------------------------------
Overivew: The persistence layer for notification objects.
Purpose: Defines the database members, functions, and operations for the
Notification 'class'
Called By: n/a
References
----------
"""
# Import the configuration package
import notifications.resources.Config as Config
# Import the app and api contexts
from notifications import app, api
# Import the notification class
from notifications.resources.Notification import Notification
# Import jsonschema validation and exception functions
from jsonschema import validate, exceptions
# Import SQLite3 for persistence. Could be anything, e.g. redis
import sqlite3
#
# The Notification_DB object.
#
class Notification_DB(object):
database_name = None
# Reference: http://flask.pocoo.org/docs/0.10/patterns/sqlite3/
# The decorator app.teardown_appcontext is fired whenever Flask knows it
# is done with a request. So, we make sure any database activity is closed
# properly.
#
@app.teardown_appcontext
def teardown_close(self):
database = Config.db_get()
Config.db_close(database)
# Query all notifications from the database with no qualifiers
def query_all(self):
'''
query_all()
Selet all notifications in the database.
'''
return_list = []
try:
database = Config.db_get()
db_records = Config.db_execute(
database=database,
sql_statement='select key, value from notifications',
multiple=True
)
Config.db_close(database)
for db_row in db_records:
return_list.append(db_row[1])
except Exception as e:
raise
return return_list
# Query one specific notification from the database
def query_one(self, key):
'''
query_one(key=the_key)
Select one notification in the database where key equals the_key. Key MUST be
an integer.
'''
if key == None\
or type(key) != int\
or key < 0:
raise KeyError('Key must be greater than or equal to zero')
database = Config.db_get()
db_records = Config.db_execute(
database=database,
sql_statement='select key, value from notifications '+\
'where key = ?',
args=(key,),
multiple=False
)
Config.db_close(database)
if db_records == None:
raise IndexError('Notification does not exist.')
return db_records[1]
def delete_one(self, key):
'''
delete_one(key=the_key)
Delete one notification from the database where key equals the_key. Key MUST be
an integer.
'''
try:
database = Config.db_get()
db_records = Config.db_execute(
database=database,
sql_statement='delete from notifications '+ \
'where key = ?',
args=(key, )
)
database.commit()
Config.db_close(database)
except Exception as e:
raise
return
def delete_all(self):
'''
delete_all()
Delete all notifications in the database.
'''
try:
database = Config.db_get()
db_records = Config.db_execute(
database=database,
sql_statement='delete from notifications'
)
database.commit()
Config.db_close(database)
except Exception as e:
raise
return
def update_one(self, key, value):
'''
update_one(key=the_key, value=the_value)
Update one notification in the database where key equals the_key and set its
value to the_value (e.g. {"notification":"...",...} - IE it must be a string of
JSON data!). Key MUST be an integer.
'''
updated_data = False
try:
database = Config.db_get()
db_records = Config.db_execute(
database=database,
sql_statement='update notifications '+ \
'set value = ? '+ \
'where key = ?',
args=(value, key)
)
database.commit()
Config.db_close(database)
except Exception as e:
print('The exception happend here!', repr(e))
raise
return
def insert(self, note):
'''
insert_one(note=note_object)
Insert a notification in the database. NOTE an object is being passed NOT a
key and string value.
'''
try:
database = Config.db_get()
db_records = Config.db_execute(
database=database,
sql_statement='select ifnull(max(key),0) from notifications',
multiple=False
)
note.identifier = db_records[0] + 1
db_records = Config.db_execute(
database=database,
sql_statement='insert into notifications (key, value) '+ \
'values (?, ?)',
args=(note.identifier, note.dump())
)
database.commit()
Config.db_close(database)
except Exception as e:
raise
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.pipelines.images import ImagesPipeline
import scrapy
class SoPipeline(ImagesPipeline):
def get_media_requests(self,item,info):
yield scrapy.Request(item['img_link'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.