text stringlengths 38 1.54M |
|---|
def SelectionSort(A):
n = len(A)
for j in range(0, n - 1):
imin = j
for i in range(j + 1, n):
if (A[i] < A[imin]):
imin = i
A[imin], A[j] = A[j], A[imin]
|
# coding:utf-8
import os
import time
import tempfile
import aircv as ac
import pytesseract
from PIL import Image
PATH = lambda p: os.path.abspath(p)
TEMP_FILE = PATH(tempfile.gettempdir() + "/temp_screen.png")
class Appium_Extend(object):
def __init__(self, driver):
self.driver = driver
def get_time(self):
tamp = int(time.time())
return tamp
def screen_shot(self):
'模拟器全屏截图'
im_name = self.get_time()
filename = r'D:\D:\appium_test\zmjj\screen_shot\%s.png' % im_name
self.driver.get_screenshot_as_file(filename)
def get_screenshot_by_custom_size(self, box):
'自定义截取范围'
self.driver.get_screenshot_as_file(TEMP_FILE)
image = Image.open(TEMP_FILE)
newImage = image.crop(box)
# newImage.save(TEMP_FILE)
return newImage
def get_screenshot_of_room_id(self):
'自定义截取范围,得到一个大概的房间id位置的图片'
self.driver.get_screenshot_as_file(TEMP_FILE)
image = Image.open(TEMP_FILE)
(x, y) = image.size
x1, x2, y1, y2 = x * 0.14, x * 0.21, y * 0.03, y * 0.09
box = (x1, x2, y1, y2)
newImage = image.crop(box)
# newImage.save(TEMP_FILE)
return newImage
def get_element(self, im_path):
'根据路径图片得到模拟器上的坐标'
imobj = ac.imread(im_path)
self.driver.get_screenshot_as_file(TEMP_FILE)
imsrc = ac.imread(TEMP_FILE)
pos = ac.find_template(imsrc, imobj, 0.7).get('result')
# pos = ac.find_template(imsrc, imobj, 0.7)
return pos
def get_room_id_position(self, im_path):
'根据路径图片得到房间id在模拟器上的坐标'
imobj = ac.imread(im_path)
self.driver.get_screenshot_as_file(TEMP_FILE)
imsrc = ac.imread(TEMP_FILE)
pos = ac.find_template(imsrc, imobj).get('rectangle')
return pos
def retry(self, function):
'传入一个有返回的操作函数,重试3次'
for i in range(3):
value = function()
if value:
break
time.sleep(2)
return value
def get_image_number(self, image):
"识别图中数字并提取"
code = pytesseract.image_to_string(image)
code = code.encode('utf-8')
num = filter(str.isdigit, code)
return num
def image_resize(self, image, image_size):
'图片尺寸调整'
(x, y) = image.size
xsize = x * image_size
ysize = y * image_size
image = image.resize((xsize, ysize), resample=3)
return image
|
import numpy as np
import os
years = np.arange(501,525)
v = 'SST'
output = 'ice_month'
for y in years:
os.system('ncra -v ' + v + ' /short/e14/erd561/mom/archive/gfdl_nyf_1080_hist_5069/output' + str(y) + '/' + output + '.nc /g/data/e14/erd561/mom/gfdl_nyf_1080_hist_5069/' + output + str(y) + '_' + v + '.nc')
print(str(y) + ' OK')
|
# -*- coding: utf-8 -*-
import glob
import os
import importlib
import sys
modules = {}
# 출력 결과 비교 모듈을 발견해 봅시다
diff_dir = os.path.dirname(__file__)
sys.path.append(diff_dir)
files = glob.glob(os.path.join(diff_dir, "*.py"))
for file in files:
try:
differ = os.path.basename(file).split(".")[0]
if differ == "__init__": continue
mod = importlib.import_module(differ)
if hasattr(mod, "judge"):
modules[differ] = mod
except ImportError:
continue
sys.path.remove(diff_dir)
|
from __future__ import division
import numpy as np
import tensorflow.contrib.learn.python.learn as learn
from sklearn import metrics
batch_size = 32
def get_classification_score(train_encodings, train_labels, test_encodings, test_labels, steps):
feature_columns = learn.infer_real_valued_columns_from_input(train_encodings)
classifier = learn.DNNClassifier(hidden_units=[32], n_classes=10, feature_columns=feature_columns)
classifier.fit(train_encodings, train_labels, steps=steps, batch_size=batch_size)
# For measuring accuracy
test_predictions = list(classifier.predict(test_encodings, as_iterable=True))
return metrics.accuracy_score(test_labels, test_predictions)
# ========== FSDD ===========
train_encodings = np.load(open('../data/fsdd_train_encodings_2.npy', 'r'))
train_labels = np.load(open('../data/fsdd_train_encodings_2_labels.npy', 'r'))
test_encodings = np.load(open('../data/fsdd_test_encodings_2.npy', 'r'))
test_labels = np.load(open('../data/fsdd_test_encodings_2_labels.npy', 'r'))
train_labels = np.array([np.int32(label) for label in train_labels])
test_labels = np.array([np.int32(label) for label in test_labels])
steps = len(train_encodings) * 200 / batch_size # ~200 epochs
print 'FSDD accuracy:', get_classification_score(train_encodings, train_labels, test_encodings, test_labels, steps)
# ========== MNIST ===========
train_encodings = np.load(open('../data/mnist_train_encodings_5.npy', 'r'))
train_labels = np.load(open('../data/mnist_train_encodings_5_labels.npy', 'r'))
test_encodings = np.load(open('../data/mnist_test_encodings_5.npy', 'r'))
test_labels = np.load(open('../data/mnist_test_encodings_5_labels.npy', 'r'))
train_labels = np.array([np.int32(label) for label in train_labels])
test_labels = np.array([np.int32(label) for label in test_labels])
steps = len(train_encodings) * 50 / batch_size # ~ 50 epochs, fewer epochs are needed because its a larger dataset
print 'MNIST accuracy:', get_classification_score(train_encodings, train_labels, test_encodings, test_labels, steps)
|
# Copyright 2015 ARM Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import argparse
import os
import sys
import pkg_resources
import shutil
import valinor.logging_setup as logging_setup
import valinor.ide_detection as ide_detection
import valinor.elf as elf
from project_generator.project import Project
from project_generator.generate import Generator
from project_generator.settings import ProjectSettings
def main():
logging_setup.init()
logging.getLogger().setLevel(logging.INFO)
p = argparse.ArgumentParser()
p.add_argument('--version', dest='show_version', action='version',
version=pkg_resources.require("valinor")[0].version,
help='display the version'
)
p.add_argument('-t', '--tool', dest='ide_tool', default=None,
help='Debug tool (IDE) to generate for. If omitted, a debug project '+
'will be generated for an IDE detected on your system, '+
'defaulting to opening a GDB debug session, if no known IDEs '+
'are detected'
)
p.add_argument('-d', '--project-dir', dest='project_dir', default=None,
help='The directory in which to generate any necessary project files. '+
'Defaults to the directory of the executable argument.'
)
p.add_argument('-n', '--no-open', dest='start_session', default=True, action='store_false',
help='Do not open the debug session, just generate the necessary '+
'files to enable debugging, and print the command that would be '+
'necessary to proceed.'
)
p.add_argument('--target', dest='target', required=True,
help='The target board to generate a project file for (e.g. K64F).'
)
p.add_argument('executable',
help='Path to the executable to debug.'
)
args = p.parse_args()
# check that the executable exists before we proceed, so we get a nice
# error message if it doesn't
if not os.path.isfile(args.executable):
logging.error('cannot debug file "%s" that does not exist' % args.executable)
sys.exit(1)
# Get setttings and generator (it updates targets def prior select)
projects = {
'projects' : {}
}
generator = Generator(projects)
project_settings = ProjectSettings()
available_ides = ide_detection.available()
ide_tool = args.ide_tool
if not ide_tool:
ide_tool = ide_detection.select(available_ides, args.target, project_settings)
if ide_tool is None:
if len(available_ides):
logging.error('None of the detected IDEs supports "%s"', args.target)
else:
logging.error('No IDEs were detected on this system!')
logging.info('Searched for:\n %s', '\n '.join(ide_detection.IDE_Preference))
if ide_tool is None:
logging.error(
'No IDE tool available for target "%s". Please see '+
'https://github.com/project-generator/project_generator for details '+
'on adding support.', args.target
)
sys.exit(1)
file_name = os.path.split(args.executable)[1]
file_base_name = os.path.splitext(file_name)[0]
executable_dir = os.path.dirname(args.executable)
projectfile_dir = args.project_dir or executable_dir
files = elf.get_files_from_executable(args.executable)
# pass empty data to the tool for things we don't care about when just
# debugging (in the future we could add source files by reading the debug
# info from the file being debugged)
project_data = {
'common': {
'target': [args.target], # target
'build_dir': ['.'],
'linker_file': ['None'],
'export_dir': ['.' + os.path.sep + projectfile_dir],
'output_dir': {
'rel_path' : [''],
'path' : [os.path.relpath(executable_dir, projectfile_dir) + os.path.sep],
},
'sources': {'Source_Files':sorted([f for f in files], key=lambda file: os.path.basename(file))},
}
}
project = Project(file_base_name, [project_data], project_settings)
project.generate(ide_tool)
# perform any modifications to the executable itself that are necessary to
# debug it (for example, to debug an ELF with Keil uVision, it must be
# renamed to have the .axf extension)
executable = args.executable
if ide_tool in ('uvision', 'uvision5'):
new_exe_path = args.executable + '.axf'
shutil.copy(args.executable, new_exe_path)
executable = new_exe_path
projectfiles = project.get_generated_project_files(ide_tool)
if not projectfiles:
logging.error("failed to generate project files")
sys.exit(1)
if args.start_session:
launch_fn = ide_detection.get_launcher(ide_tool)
if launch_fn is not None:
try:
launch_fn(projectfiles['files'], executable)
except Exception as e:
logging.error('failed to launch debugger: %s', e)
else:
logging.warning('failed to open IDE')
print('project files have been generated in: %s' % os.path.join(os.getcwd(), os.path.normpath(projectfiles['path'])))
|
# coding=utf-8
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django_resized import ResizedImageField
from .choices import *
from star_ratings.models import Rating
class AbsPerm(models.Model):
# For Staff
is_active = models.BooleanField(_('Ativo na plataforma'), default=True)
is_banned = models.BooleanField(_('Banido da plataforma'), default=False)
data_create = models.DateTimeField(_('Criado em'), default=timezone.now)
data_published = models.DateTimeField(_('Publicado em'), default=timezone.now)
class Profile(models.Model):
user = models.OneToOneField(User, related_name='profile', on_delete=models.CASCADE, verbose_name='usuario')
photo = ResizedImageField(size=[160, 160], quality=75, upload_to='user/profile', default='/imagens/img/user.svg',
blank=True, verbose_name='imagem',)
phone = models.CharField(max_length=11, blank=True)
sexo = models.BooleanField(choices=sexo, default=0)
def __str__(self):
return self.user.get_full_name()
class Meta:
verbose_name = 'Perfil'
verbose_name_plural = 'Perfis'
class Address(models.Model):
user = models.ForeignKey(User, related_name='address', blank=True, verbose_name='usuario')
route = models.CharField(_('rua'), max_length=200)
country = models.CharField(_('pais'), max_length=200)
administrative_area_level_1 = models.CharField(_('estado'), max_length=200)
administrative_area_level_2 = models.CharField(_('cidade'), max_length=200)
street_number = models.CharField(_('número'), max_length=30)
postal_code = models.CharField(_('CEP'), max_length=20)
complement = models.CharField(_('complemento'), max_length=20)
sublocality_level_1 = models.CharField(_('bairro'), max_length=200)
def full_address(self):
return f'{self.route}, {self.street_number} - {self.sublocality_level_1} -' \
f' {self.administrative_area_level_2}'
def __str__(self):
return self.full_address
class Store(models.Model):
owner = models.OneToOneField(User, related_name='store', blank=True)
name = models.CharField(max_length=60, blank=True)
photo = ResizedImageField(size=[400, 400], default='/imagens/img/user.svg', blank=True)
adm = models.CharField(max_length=200, default=owner, blank=True)
|
"""Deck."""
from typing import Optional, List
import requests
import random
class Card:
"""Simple dataclass for holding card information."""
def __init__(self, value: str, suit: str, code: str):
"""Constructor."""
self.value = value
self.suit = suit
self.code = code
self.top_down = False
self.remaining = 0
def __str__(self):
"""Str."""
if not self.top_down:
return self.code
if self.top_down:
return "??"
def __repr__(self) -> str:
"""Repr."""
return self.code
def __eq__(self, o) -> bool:
"""Eq."""
if isinstance(o, Card) and o.suit == self.suit and o.value == self.value:
return True
class Deck:
"""Deck."""
DECK_BASE_API = "https://deckofcardsapi.com/api/deck/"
def __init__(self, deck_count: int = 1, shuffle: bool = False):
"""Constructor."""
self._backup_deck = self._generate_backup_pile() * deck_count
self.deck_count = deck_count
self.is_shuffled = shuffle
self.request = None
self.api_check = False
self._request(self.DECK_BASE_API)
def shuffle(self) -> None:
"""Shuffle the deck."""
requests.get(f"{self.DECK_BASE_API}/{self.deck_id}/shuffle/")
def draw_card(self, top_down: bool = False) -> Optional[Card]:
"""
Draw card from the deck.
:return: card instance.
"""
if self.api_check and self.deck_id and self.remaining:
if self.remaining >= 0:
request = requests.get(f"{self.DECK_BASE_API}{self.deck_id}/draw/?count={self.deck_count}").json()
if not request["cards"]:
return None
card = request["cards"][0]
value = card["value"]
suit = card["suit"]
code = card["code"]
# print(f"value: {value}, suit: {suit}, code: {code}")
self.remaining -= 1
card = Card(value, suit, code)
self._backup_deck.remove(card)
return card
else:
if not self._backup_deck:
return None
card = random.choice(self._backup_deck)
self._backup_deck.remove(card)
self.remaining = len(self._backup_deck)
return card
def _request(self, url: str) -> dict:
"""Update deck."""
try:
self.request = requests.get(f"{url}new/")
except requests.exceptions.ConnectionError as e:
print(e)
if self.request is not None and self.request.status_code == requests.codes.ok:
self.api_check = True
self.result = requests.get(f"{url}new/?deck_count={self.deck_count}").json()
if self.is_shuffled:
self.result = requests.get(f"{url}new/shuffle/?deck_count={self.deck_count}").json()
self.deck_id = self.result.get("deck_id", None)
self.remaining = self.result.get("remaining", None)
return self.result
else:
self.remaining = len(self._backup_deck)
@staticmethod
def _generate_backup_pile() -> List[Card]:
"""Generate backup pile."""
values = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'JACK', 'QUEEN', 'KING', 'ACE']
suits = ['SPADES', 'DIAMONDS', 'HEARTS', 'CLUBS']
backup_deck = []
for suit in suits:
for value in values:
if value == "10":
code = f"0{suit[0]}"
backup_deck.append(Card(value, suit, code))
else:
backup_deck.append(Card(value, suit, f"{value[0]}{suit[0]}"))
return backup_deck
if __name__ == '__main__':
d = Deck(shuffle=True, deck_count=2)
print(d._backup_deck)
|
'''
闭包(python可以嵌套定义函数)
-内部函数对外部函数作用域里变量的引用(非全局变量),则称内部函数为闭包
-Python内函数也是对象
-闭包不能修改外部作用域的局部变量
'''
def addx(x):# x是引用环境
def adder(y): return x + y
return adder
# adder为闭包
c = addx(8)# c现在是一个函数
type(c)# function
c(10)# 相当于addx(8)(10) |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import copy
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:
if root is None: return []
result = []
def dfs(node, path, curSum):
path.append(node.val)
curSum += node.val
isLeaf = node.left == None and node.right == None
if isLeaf and curSum == sum:
result.append(copy.deepcopy(path))
path.pop()
return
if node.left: dfs(node.left, path, curSum)
if node.right: dfs(node.right, path, curSum)
path.pop()
dfs(root, [], 0)
return result
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, getopt
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hf:t:",["ifile=","text="])
except getopt.GetoptError:
print 'Error: SVGtoPDFfromText.py -f <inputfile> -t <text>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'Ayuda: SVGtoPDFfromText.py -f <inputfile> -t <text>'
sys.exit()
elif opt in ("-f", "--ifile"):
inputfile = arg
elif opt in ("-t", "--text"):
inputtext = arg
return [inputfile,inputtext]
inputfile = ''
inputtext = ''
if __name__ == "__main__":
[inputfile,inputtext]=main(sys.argv[1:])
print "Tu archivo: " + inputfile
print "Tu texto: " + inputtext
from xml.dom import minidom
xml_documento = minidom.parse(inputfile)
lista = xml_documento.getElementsByTagName("tspan")
for nodo in lista:
if nodo.getAttribute("id")=="parametro_1":
inputtext = inputtext.decode('utf8')
nodo.firstChild.replaceWholeText(inputtext)
out_file="/tmp/output_generarpdf.svg"
file_handle = open(out_file,"w")
texto = xml_documento.toxml()
file_handle.write(texto.encode('utf8'))
file_handle.close()
import os
outputfile = inputfile.split(".")[0]+" - "+inputtext+".pdf";
comando = "inkscape -f \"%s\" -A \"%s\"" % (out_file,outputfile)
print comando
os.system(comando.encode('utf8'))
|
import os
import re
from os import walk, getcwd
import csv
"""-------------------------------------------------------------------"""
""" Configure Paths"""
valid_txt_path = 'img_handshake_oznaczone/'
detected_txt_path = 'img_handshake_wyliczone/'
outpath = "CSV_dic/"
""" Get input text file list """
txt_name_list = []
for root, dirnames, filenames in os.walk(detected_txt_path):
for dirname in dirnames:
try:
os.makedirs(outpath)
except OSError:
if not os.path.isdir(outpath):
raise
for root2, dirnames2, filenames2 in os.walk(detected_txt_path + dirname):
txt_name_list.extend(map(lambda filename: dirname + '/' + filename, filenames2))
print "Found " + str(len(txt_name_list)) + " files"
# print(txt_name_list)
""" Process """
for txt_name in txt_name_list:
# txt_file = open("Labels/stop_sign/001.txt", "r")
raw_txt_name = os.path.split(txt_name)[1]
raw_txt_name = re.sub("predicted_", "", raw_txt_name)
""" Open detected text files """
full_detected_txt_path = detected_txt_path + txt_name
print("Detected:" + full_detected_txt_path)
detected_txt_file = open(full_detected_txt_path, "r")
detected_lines = detected_txt_file.read().split('\n') # for ubuntu, use "\r\n" instead of "\n"
# txt_name = re.sub("predicted_", "", txt_name)
""" Open valid text files """
full_valid_txt_path = valid_txt_path + raw_txt_name
print("Valid:" + full_valid_txt_path)
valid_txt_file = open(full_valid_txt_path, "r")
valid_lines = valid_txt_file.read().split('\n') # for ubuntu, use "\r\n" instead of "\n"
total = 0
with open(full_valid_txt_path) as f:
for i, line in enumerate(f):
if line:
total +=1
weight_number = str(re.findall(r'\d+', os.path.split(full_detected_txt_path)[0])[0])
""" Compare data """
total_detected = 0
match = 0
false_positive = 0
with open(full_detected_txt_path) as f_d:
for i_d, line_d in enumerate(f_d):
if line_d:
total_detected +=1
if total == 0 and total_detected != 0:
match = 0
false_positive = total_detected
csv_row = [raw_txt_name, total, match, false_positive]
print weight_number + ": CSV row: "
print csv_row
with open('CSV_dic/' + weight_number + '.csv', 'ab') as csv_File:
csv_FileWriter = csv.writer(csv_File)
if csv_row:
csv_FileWriter.writerow(csv_row)
if total_detected == 0:
match = 0
false_positive = 0
csv_row = [raw_txt_name, total, match, false_positive]
print weight_number + ": CSV row: "
print csv_row
with open('CSV_dic/' + weight_number + '.csv', 'ab') as csv_File:
csv_FileWriter = csv.writer(csv_File)
if csv_row:
csv_FileWriter.writerow(csv_row)
ct = 0
for valid_line in valid_lines:
if (len(valid_line) >= 2):
match = 0
false_positive = 0
ct = ct + 1
# print(valid_line + "\n")
valid_elems = valid_line.split(' ')
print(valid_elems)
xmin_valid = float(valid_elems[1])
xmax_valid = float(valid_elems[3])
ymin_valid = float(valid_elems[2])
ymax_valid = float(valid_elems[4])
xmin_match = False
xmax_match = False
ymin_match = False
ymax_match = False
for detected_line in detected_lines:
if (len(detected_line) >= 2):
detected_elems = detected_line.split(' ')
print(valid_elems)
xmin_detected = float(detected_elems[1])
xmax_detected = float(detected_elems[3])
ymin_detected = float(detected_elems[2])
ymax_detected = float(detected_elems[4])
xmin_match = False
xmax_match = False
ymin_match = False
ymax_match = False
# variance 35%
variance = float(0.45)
if (xmin_detected <= xmin_valid*(1 + variance) and xmin_detected >= xmin_valid*(1 - variance) ):
xmin_match = True
if (xmax_detected <= xmax_valid*(1 + variance) and xmax_detected >= xmax_valid*(1 - variance)):
xmax_match = True
if (ymin_detected <= ymin_valid*(1 + variance) and ymin_detected >= ymin_valid*(1 - variance)):
ymin_match = True
if (ymax_detected <= ymax_valid*(1 + variance) and ymax_detected >= ymax_valid*(1 - variance) ):
ymax_match = True
if (xmax_match and xmin_match and ymin_match and ymax_match):
match += 1
if total >= 1 and total_detected >= 1:
false_positive = total_detected - match
csv_row = [raw_txt_name, total, match, false_positive]
print weight_number + ": CSV row: "
print csv_row
with open('CSV_dic/' + weight_number +'.csv', 'ab') as csv_File:
csv_FileWriter = csv.writer(csv_File)
if csv_row:
csv_FileWriter.writerow(csv_row)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 14:59:55 2020
@author: ganesh
"""
import pandas as pd
mail = pd.read_csv('/home/ganesh/Desktop/SpamClassifier-master/smsspamcollection/SMSSpamCollection', sep='\t',
names=["label", "message"])
# the above note pad is 2 parts 1st column represent the lable spam or ham
# then the dependent var and independent var is is sepereated by one tab so /t
#and there is no column name so im forcingly specifying 2 heading . 1st is lable and 2nd is message
# now data cleaning and pre processssssssssngs
import nltk
import re
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
#nltk.download('stopwords')
from nltk.corpus import stopwords
ps=PorterStemmer()# stemming purpose
lem=WordNetLemmatizer()
corpus=[]
for i in range(0,len(mail)):
review = re.sub('[^a-zA-z]',' ',mail['message'][i])# space is given in 2nd parameter of sub
review=review.lower()
review=review.split()
review=[lem.lemmatize(word) for word in review if not word in stopwords.words('english')]
review=' '.join(review) # words int o sentence
corpus.append(review)
#when i use TfidfVectorizer , i get accuracy of 0.9778708133971292
#from sklearn.feature_extraction.text import TfidfVectorizer
#cv=TfidfVectorizer(max_features=5000)# here the max_feature will take top 5k most important word up to 5000 words
#x=cv.fit_transform(corpus).toarray()# this is the date that we will be training
#print("=============")
#when i use CountVectorizer bag of words , i get accuracy of 0.9850478468899522
from sklearn.feature_extraction.text import CountVectorizer
cv=CountVectorizer(max_features=5000)# here the max_feature will take top 5k most important word up to 5000 words
x=cv.fit_transform(corpus).toarray()# this is the date that we will be training
print("=============")
# we have target var as ham and spam so we gonna make it mumentrical as 1 for spam and 0 for ham
y=pd.get_dummies(mail['label'])
# here this y cnsisit of 1 as ham and 0 as spam , and it has 2 columns differently so we re going to make it one column
y=y.iloc[:,1].values # now 0 means its ham and 1 means it spam
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=0)
#=================================================================
# using naves bayes 0.9850478468899522
#from sklearn.naive_bayes import MultinomialNB
#spam_detect=MultinomialNB()
#train=spam_detect.fit(x_train,y_train)
#y_pred=spam_detect.predict(x_test)
#=================================================================
# using nLogisticRegression i get accuracy 0.9820574162679426
from sklearn.linear_model import LogisticRegression
lrg=LogisticRegression()
lrg.fit(x_train,y_train)
y_pred=lrg.predict(x_test)
#=================================================================
# using DecisionTreeClassifier i get accuracy 0.9360047846889952
#from sklearn.tree import DecisionTreeClassifier
#clf_entropy=DecisionTreeClassifier(criterion="entropy",random_state=100,max_depth=3,min_samples_leaf=5)
#clf_entropy.fit(x_train,y_train)
#y_pred=clf_entropy.predict(x_test)
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test,y_pred)
from sklearn.metrics import accuracy_score
acc=accuracy_score(y_test,y_pred)
print("Using CountVectorizer and with Naive Bayes model",acc)
#------------------------------------------------------------------------------
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(5,5))
sns.heatmap(cm,annot=True,fmt=".2f",linewidths=.10,square=True,cmap='Blues_r')
plt.ylabel("ACTUAL VALUE OR LABEL")
plt.xlabel(" PREDICTED VALUE OR LABEL")
all_sample_title="ACCURACY SCORE {0}".format(acc)
plt.title(all_sample_title,size=15)
|
'''
if a image (e.g., segmentation maps) is composed of several components, but there are some noise, which are in form of isolated components
we can then remove the extra noise with following codes
Dong Nie
12/17/2016
'''
import numpy as np
import SimpleITK as sitk
from multiprocessing import Pool
import os
import h5py
import scipy.io as scio
from scipy import ndimage as nd
from imgUtils import dice
def filter_isolated_cells(array, struct):
""" Return array with completely isolated single cells removed
:param array: Array with completely isolated single cells
:param struct: Structure array for generating unique regions
:return: Array with minimum region size > 1
"""
filtered_array = np.copy(array)
#id_regions, num_ids = ndimage.label(filtered_array, structure=struct)
id_regions, num_ids = nd.measurements.label(filtered_array, structure=struct)
#print 'id_region shape is ',id_regions.shape
#print 'num_ids is ',num_ids
#id_regions:unique label for unique features
#num_features: how many objects are found
#id_sizes = np.array(ndimage.sum(array, id_regions, range(num_ids + 1)))
id_sizes = np.array(nd.measurements.sum(array, id_regions, range(num_ids + 1))) #number of pixels for this region (id)
#An array of the sums of values of input inside the regions defined by labels with the same shape as index. If 'index' is None or scalar, a scalar is returned.
#print 'id_sizes shape is ',id_sizes.shape
#print 'id_sizes is ', id_sizes
maxV=np.amax(id_sizes)
for v in id_sizes:
if v==maxV:
continue
area_mask = (id_sizes == v)
#print 'area_mask.shape is ', area_mask.shape
filtered_array[area_mask[id_regions]] = 0
return filtered_array
'''
denoise Images for each unique intensity
'''
def denoiseImg(array, struct):
uniqueVs=np.unique(array)
denoised_array=np.zeros(array.shape)
for v in uniqueVs:
temp_array=np.zeros(array.shape)
vMask=(array==v)
temp_array[vMask]=v
#print 'vMask shape, ',vMask.shape
#print 'arrayV shape, ',arrayV.shape
filtered_array=filter_isolated_cells(temp_array,struct)
denoised_array[(filtered_array==v)]=v
return denoised_array
def main():
path='/home/dongnie/Desktop/Caffes/caffe/python/pelvicSeg/'
saveto='/home/dongnie/Desktop/Caffes/caffe/python/pelvicSeg/'
caffeApp=0
fileIDs=[1,2,3,4,6,7,8,10,11,12,13]
#fileIDs=[1]
for ind in fileIDs:
datafilename='preSub%d_5x168x112.nii'%ind #provide a sample name of your filename of data here
datafn=os.path.join(path,datafilename)
labelfilename='gt%d.nii'%ind # provide a sample name of your filename of ground truth here
labelfn=os.path.join(path,labelfilename)
prefilename='preSub%d_denoised.nii'%ind #provide a sample name of your filename of data here
prefn=os.path.join(path,prefilename)
imgOrg=sitk.ReadImage(datafn)
mrimg=sitk.GetArrayFromImage(imgOrg)
labelOrg=sitk.ReadImage(labelfn)
labelimg=sitk.GetArrayFromImage(labelOrg)
# Run function on sample array
#filtered_array = filter_isolated_cells(mrimg, struct=np.ones((3,3,3)))
filtered_array = denoiseImg(mrimg, struct=np.ones((3,3,3)))
# Plot output, with all isolated single cells removed
#plt.imshow(filtered_array, cmap=plt.cm.gray, interpolation='nearest')
pr0=dice(labelimg,filtered_array,0)
pr1=dice(labelimg,filtered_array,1)
pr2=dice(labelimg,filtered_array,2)
pr3=dice(labelimg,filtered_array,3)
print 'dice for sub%d: '%ind,pr0, ' ',pr1,' ',pr2,' ',pr3
preVol=sitk.GetImageFromArray(filtered_array)
sitk.WriteImage(preVol,prefn)
if __name__ == '__main__':
main()
|
import random, string
def rndText(letters=15, lines=3, spaces=5):
return "\n".join("".join((random.choice(string.letters + ' ' * spaces) for _ in xrange(letters))) for _ in xrange(lines))
RESET = '\x1b[0m'
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = ['\x1b[1;{}m'.format(30+i) for i in range(8)]
import serial as realSerial
class Serial(object):
def __init__(self, port=None, baudrate=9600, bytesize=realSerial.EIGHTBITS,
parity=realSerial.PARITY_NONE, stopbits=realSerial.STOPBITS_ONE,
timeout=None, xonxoff=False, rtscts=False, writeTimeout=None,
dsrdtr=False, interCharTimeout=None):
self.port = port
self.baudrate = baudrate
self.bytesize = bytesize
self.parity = parity
self.stopbits = stopbits
self.timeout = timeout
self.xonxoff = xonxoff
self.rtscts = rtscts
self.writeTimeout = writeTimeout
self.dsrdtr = dsrdtr
self.interCharTimeout = interCharTimeout
self.BAUDRATES = realSerial.Serial.BAUDRATES
self.BYTESIZES = realSerial.Serial.BYTESIZES
self.PARITIES = realSerial.Serial.PARITIES
self.STOPBITS = realSerial.Serial.STOPBITS
self._isOpen = True if port is not None else False
self.dataIn = ""
self.dataOut = rndText()
self.debug = True
self.log("Created:\n{}".format(self))
def __str__(self):
"""Return a string representation mimicking that of the serial class"""
data = (hex(id(self)), self._isOpen)
head = "id={}, open={}".format(*data)
data = (self.port, self.baudrate, self.bytesize, self.parity, self.stopbits, self.timeout, self.xonxoff, self.rtscts, self.dsrdtr)
data = map(repr, data)
body = "port={}, baudrate={}, bytesize={}, parity={}, stopbits={}, timeout={}, xonxoff={}, rtscts={}, dsrdtr={}".format(*data)
return "Serial<{}>({})".format(head, body)
__repr__ = __str__ # Return str(self), like the serial class
def open(self):
"""
Open the port
Actually sets the state variable _isOpen to True
"""
self.log("open")
self._isOpen = True
def close(self):
"""
Close the port
Actually sets the state variable _isOpen to False
"""
self.log("close")
self._isOpen = False
def isOpen(self):
"""
Test if the port is open
Actually tests the state variable _isOpen
"""
self.log("port is {}".format(self.state))
return self._isOpen
def write(self, data):
"""
Write data to the port
Actually appends data to dataIn
"""
self.raiseIfClosed()
self.log('write: "{}"'.format(data), color=GREEN)
self.dataIn += data
def writelines(self, sequence):
"""Write a list of strings to the port"""
self.log("writelines")
for s in sequence:
self.write(s)
def read(self, size=1):
"""
Read and return size bytes from the port
Actually, the bytes are read from dataOut
"""
self.raiseIfClosed()
data, self.dataOut = self.dataOut[:size], self.dataOut[size:]
self.log('read: "{}"'.format(data), color=RED)
return data
def readline(self, limit=None, eol='\n'):
"""
Read and return a line from the port
If limit is specified, at most limit bytes will be read
The recognized line terminator(s) can be set via newlines
Actually, the line is read from dataOut
"""
self.raiseIfClosed()
size = min((self.dataOut.index(nl) for nl in eol))
if size == -1:
return ""
size += 1
if limit is not None:
size = min(size, limit)
data, self.dataOut = self.dataOut[:size], self.dataOut[size:]
self.log('readline: "{}"'.format(data.rstrip(eol)), color=RED)
return data
next = readline # Return the next line, like the serial class
__iter__ = lambda self: self
def inWaiting(self):
"""
Return the number of chars in the receive buffer
Actually returns length of dataOut
"""
self.log("inWaiting")
return len(dataOut)
def outWaiting(self):
"""
Return the number of chars in the output buffer
Actually returns length of dataIn
"""
self.log("outWaiting")
return len(dataIn)
def flushInput(self):
"""
Clear input buffer
Actually discards content of dataIn
"""
self.dataIn = ""
self.log("flushInput")
def flushOutput(self):
"""
Clear output buffer
Actually discards content of dataOut
"""
self.dataOut = ""
self.log("flushOutput")
def readinto(self, b):
"""
Read up to len(b) bytes into bytearray b and return the number of bytes read
Actually, reads from dataOut
"""
size = len(b)
b, self.dataOut = self.dataOut[:size], self.dataOut[size:]
self.log('readinto: "{}"'.format(b), color=RED)
return size
# the following methods do nothing but log
flush = lambda self: self.log("flush")
nonblocking = lambda self: self.log("nonblocking")
rtsToggle = lambda self: self.log("rtsToggle")
getSettingsDict = lambda self: self.log("getSettingsDict")
applySettingsDict = lambda self, d: self.log("applySettingsDict")
setBreak = lambda self, level=True: self.log("setBreak")
setDTR = lambda self, level=True: self.log("setDTR")
setRTS = lambda self, level=True: self.log("setRTS")
setXON = lambda self, level=True: self.log("setXON")
flowControlOut = lambda self, enable: self.log("flowControlOut")
# should these raise IOError?
isatty = lambda self: self.log("isatty")
tell = lambda self: self.log("tell")
seek = lambda self, pos, whence=0: self.log("seek")
truncate = lambda self, n=None: self.log("truncate")
# should these sleep?
sendBreak = lambda self, duration=0.25: self.log("sendBreak")
readlines = lambda self, sizehint=None, eol='\n': self.log("readlines")
xreadlines = lambda self, sizehint=None: self.log("xreadlines")
# the following methods log and always return True/False
def getCTS(self):
self.log("getCTS")
return True
def getDSR(self):
self.log("getDSR")
return True
def getRI(self):
self.log("getRI")
return True
def getCD(self):
self.log("getCD")
return True
def readable(self):
self.log("readable")
return True
def writable(self):
self.log("writable")
return True
def seekable(self):
self.log("seekable")
return False
def fileno(self):
self.log("fileno")
return 1
# the following methods are not part of the real serial
def log(self, string, color=BLUE):
"""Print debugging log"""
if self.debug:
print color + string + RESET
@property
def state(self):
"""Return state of the port as string ("open"/"closed")"""
if self._isOpen:
return "open"
else:
return "closed"
def raiseIfClosed(self):
"""Raise ValueError if port is closed"""
if not self._isOpen:
raise ValueError
|
import numpy as np
# unique class values
def y_Encoder(classList):
num_class = len(classList)
classDict = {}
for i,classElem in enumerate(classList):
code = np.zeros((num_class,))
code[i] = 1.0
classDict[classElem] = code
return classDict
# from nltk.corpus import wordnet as wn
# nouns = {x.name().split('.', 1)[0] for x in wn.all_synsets('n')}
# print (nouns)
# checkList = ["multiplex",
# "1984",
# "tea",
# ]
#
# sysn = wn.synsets("undercuts")
#
# print (sysn[0])
|
x = int(input('Quanti voti ha ricevuto il 1° candidato? '))
y = int(input('Quanti voti ha ricevuto il 2° candidato? '))
z = x+y
d = (x/z)*100
u = (y/z)*100
print('Percentuale 1° candidato: ', d, '%')
print('percentuale 2° candidato: ', u, '%')
if d>u :
print('Il primo candidato ha vinto!')
if d<u :
print('il secondo candidato ha vinto!')
else :
print('Sono pari!') |
import orderbook as ob
o = ob.Orderbook('new_orderbook.h5')
o.read_data('SPY','order_book')
o.rollup_orderbook()
o.plot_bid_ask_prices()
o.plot_bid_ask_spread()
o.plot_interpacket_gap()
o.plot_order_traffic(40)
|
from os import path
d = path.dirname(__file__)
d = '/'.join(d.split('/')[:-2])
audio_num_mel_bins = 80
audio_sample_rate = 16000
num_freq = 513
symbol_size = 256
n_fft = 1024
rescale = True
rescaling_max = 0.999
hop_size = 256
win_size = 1024
frame_shift_ms = None
preemphasize = True
preemphasis = 0.97
min_level_db = -100
ref_level_db = 20
fmin = 55
fmax = 7600
signal_normalization = True
allow_clipping_in_normalization = True
symmetric_mels = True
max_abs_value = 4
power = 1.1
magnitude_power = 1.3
# griffin_lim_iters = 60
griffin_lim_iters = 3
trim_fft_size = 1200
trim_hop_size = 300
trim_top_db = 23
use_lws = False
silence_threshold = 2
trim_silence = True
max_mel_frames = 2048
wavenet_pad_sides = 1
predict_linear = True
phone_list_file = "data/phone_set.json"
bin_data_dir = "liqiao" # 生成特征名字,需要跟train.sh一致
metadata_csv = "meta/liqiao.csv"
#metadata_csv = "linnan.csv"
test_num = 1
text_data_dir = "meta"
#wav_data_dir = "Wave16k/7000000000-7100002500"
wav_data_dir = "wav/train"
#train_csv = "meta/train.csv"
#test_csv = "meta/test.csv"
#phone_set = "res/phone_set.json"
#wav_dir = "wav/train/final"
#test_wav_dir = "wav/test"
#train_feat_dir = "feat/train"
#test_feat_dir = "feat/test"
#data_dir = '%s/1.pub-data'%(d) #"/fast/lxd_room/bjfu-ailab/1.pub-data"
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.test import Client
import json
from assets.constants import SUPERVISOR, ADMIN
from authentication.models import UserRole
from authentication.tests.helper_functions import create_new_user, log_in
class AuthenticationViewTestCase(TestCase):
client = Client()
response = None
def setUp(self):
user = User.objects.create_user(username="Heffalumps", email="heffalumps@woozles.com", password="Woozles")
UserRole.objects.create(name=SUPERVISOR, user=user)
# This test case will also attempt to use LDAP, which takes about 10 seconds, so it is commented out.
# def test_unauthorised_access(self):
# response = self.client.post("/api/auth/get_token/", {"username": "Mango", "password": "Apple"})
# self.assertEqual(response.status_code, 400, "There shouldn't be a token received.")
#
# response = self.client.post("/api/auth/authenticated/", {}, HTTP_AUTHORIZATION='JWT {}'.format("bad token"))
# self.assertEqual(response.status_code, 401, "There shouldn't be access granted.")
def test_authorised_access(self):
response = self.client.post("/api/auth/get_token/", {"username": "Heffalumps", "password": "Woozles"})
self.assertEqual(response.status_code, 200, "The token should be successfully returned.")
response_content = json.loads(response.content.decode('utf-8'))
token = response_content["token"]
response = self.client.post("/api/auth/authenticated/", {}, HTTP_AUTHORIZATION='JWT {}'.format(token))
response_content = json.loads(response.content.decode('utf-8'))
self.assertTrue(response_content["authenticated"], "The user should be able to access this endpoint.")
def test_authorised_access_via_login(self):
response = self.client.post("/api/auth/login/", {"username": "Heffalumps", "password": "Woozles"})
self.assertEqual(response.status_code, 200, "The token should be successfully returned.")
response_content = json.loads(response.content.decode('utf-8'))
token = response_content["token"]
response = self.client.post("/api/auth/authenticated/", {}, HTTP_AUTHORIZATION='JWT {}'.format(token))
response_content = json.loads(response.content.decode('utf-8'))
self.assertTrue(response_content["authenticated"], "The user should be able to access this endpoint.")
class UsersTestCase(TestCase):
client = Client()
response = None
def setUp(self):
user = User.objects.create_user(username="Heffalumps", email="heffalumps@woozles.com", password="Woozles")
UserRole.objects.create(name=ADMIN, user=user)
# Tests if we can get all staff members from our database
def test_get_staff_members(self):
# Register supervisors
create_new_user("Yeesha", "Woozles", user_type=SUPERVISOR)
create_new_user("Yeesah2", "Woozles", user_type=SUPERVISOR)
# Login as the admin
token = log_in(self.client, "Heffalumps", "Woozles")
staff_response = self.client.get(path="/api/auth/all_staff/",
HTTP_AUTHORIZATION='JWT {}'.format(token))
staff_response_content = json.loads(staff_response.content.decode('utf-8'))
self.assertEqual(len(staff_response_content), 3, "There should be 2 supervisors and 1 admin in the system.")
# Tests if we can get all supervisor staff members from our database
def test_get_supervisor_staff_members(self):
# Register supervisors
create_new_user("Yeesha", "Woozles", user_type=SUPERVISOR)
create_new_user("Yeesah2", "Woozles", user_type=SUPERVISOR)
# Login as the admin
token = log_in(self.client, "Heffalumps", "Woozles")
staff_response = self.client.get(path="/api/auth/supervisor_staff/",
HTTP_AUTHORIZATION='JWT {}'.format(token))
staff_response_content = json.loads(staff_response.content.decode('utf-8'))
self.assertEqual(len(staff_response_content), 2, "There should be 2 supervisors in the system.")
# Tests if we can change the role of specific staff members in our database
def test_change_staff_roles(self):
# Register a supervisor
create_new_user("Yeesha", "Woozles", user_type=SUPERVISOR)
# Login as the supervisor
token = log_in(self.client, "Yeesha", "Woozles")
staff_response = self.client.post("/api/auth/staff_roles/",
json.dumps({'new_user_roles': {'Heffalumps': SUPERVISOR}}),
HTTP_AUTHORIZATION='JWT {}'.format(token),
content_type="application/json")
self.assertEquals(staff_response.status_code, 400)
# Login as the admin
token = log_in(self.client, "Heffalumps", "Woozles")
staff_response = self.client.post("/api/auth/staff_roles/",
json.dumps({'new_user_roles': {'Yeesha': ADMIN}}),
HTTP_AUTHORIZATION='JWT {}'.format(token),
content_type="application/json")
self.assertEquals(staff_response.status_code, 200)
# Tests if we can get the list of all supervisors' usernames
def test_get_supervisor_usernames(self):
# Register supervisor
create_new_user("Yeesha", "Woozles", user_type=SUPERVISOR)
# Login as the admin
token = log_in(self.client, "Heffalumps", "Woozles")
supervisor_response = self.client.get(path="/api/auth/supervisor_usernames/", data={},
HTTP_AUTHORIZATION='JWT {}'.format(token))
self.assertEqual(supervisor_response.status_code, 200)
statistics_response_content = json.loads(supervisor_response.content.decode('utf-8'))
self.assertEqual(len(statistics_response_content["usernames"]), 1)
self.assertEqual(statistics_response_content["usernames"][0], "Yeesha")
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
class preprocess:
def __init__(self, FLAGS):
self.dataset_path = FLAGS.dataset_path
def Mnist2data(self):
self.mnist = input_data.read_data_sets(self.dataset_path+"/MNIST_data/", one_hot=True)
x_data, y_data, x_test, y_test = self.mnist.train.images, self.mnist.train.labels, self.mnist.test.images, self.mnist.test.labels
return x_data, y_data, x_test, y_test
if __name__ == '__main__':
pp = preprocess()
print(pp.Mnist2data())
|
real = float(input('\nDigite um valor em Reais: R$'))
peso = real * 13.63
iene = real * 19.80
dolar = real / 5.33
euro = real / 6.33
libra = real / 7.00
print('\nCONVERSOR DE MOEDAS')
print('-'*25)
print('Peso Argentino: $ {:.2f}' .format(peso))
print('Iene: ¥ {:.2f}' .format(iene))
print('Dolar: U$ {:.2f}' .format(dolar))
print('Euro: € {:.2f}' .format(euro))
print('Libra esterlina: £ {:.2f}' .format(libra))
print('-'*25) |
import os
from contextlib import contextmanager
try:
import psycopg2cffi as psycopg2
except ImportError:
import psycopg2
import queries
DB_CONNECT = os.getenv('NHLDB_CONNECT')
@contextmanager
def session(connect=None, pool_size=10, is_tornado=False):
if connect is None:
connect = DB_CONNECT
if connect is None:
raise Exception('Please provide a connection string or set the NHLDB_CONNECT env. variable')
if is_tornado:
s = queries.TornadoSession(connect, pool_max_size=pool_size)
else:
s = queries.Session(connect, pool_max_size=pool_size)
try:
yield s
finally:
s.close()
def _insert_dict(s, table_name, values):
vals = [values[col] for col in values]
s.execute('INSERT INTO {table_name} ({cols}) VALUES ({vals})'.format(table_name=table_name,
cols=','.join(["%s"] * len(vals)),
vals=','.join(vals)))
class Query(object):
def __init__(self, table, select=None):
if hasattr(table, '__table_name'):
self.table = table.__table_name
else:
self.table = table
self.select = select
self._where = []
def where(self, clause):
self._where.append(clause)
return self
def __str__(self):
sel = '*'
if self.select is not None:
sel = ', '.join(self.select)
sql = ['''
SELECT {selects}
FROM {table}
'''.format(selects=sel, table=self.table)]
for i, where in enumerate(self._where):
if i == 0:
sql.append('WHERE')
else :
sql.append('AND')
sql.append(where)
return ' '.join(sql)
|
import config.config as config
# Decoder class for use with a rotary encoder.
class decoder:
"""Class to decode mechanical rotary encoder pulses."""
def __init__(self, pi, rot_gpioA, rot_gpioB, switch_gpio, rotation_callback, switch_callback):
"""
Instantiate the class with the pi and gpios connected to
rotary encoder contacts A and B. The common contact
should be connected to ground. The callback is
called when the rotary encoder is turned. It takes
one parameter which is +1 for clockwise and -1 for
counterclockwise.
EXAMPLE
import time
import pigpio
import rotary_encoder
pos = 0
def callback(way):
global pos
pos += way
print("pos={}".format(pos))
pi = config.pigpio.pi()
decoder = rotary_encoder.decoder(pi, 7, 8, callback)
time.sleep(300)
decoder.cancel()
pi.stop()
"""
self.pi = pi
self.rot_gpioA = rot_gpioA
self.rot_gpioB = rot_gpioB
self.rot_callback = rotation_callback
self.sw_callback = switch_callback
self.levA = 0
self.levB = 0
self.lastGpio = None
# Setting up rotary encoder, including callback.
self.pi.set_mode(rot_gpioA, config.pigpio.INPUT)
self.pi.set_mode(rot_gpioB, config.pigpio.INPUT)
self.pi.set_pull_up_down(rot_gpioA, config.pigpio.PUD_UP)
self.pi.set_pull_up_down(rot_gpioB, config.pigpio.PUD_UP)
self.cbA = self.pi.callback(rot_gpioA, config.pigpio.EITHER_EDGE, self._pulse)
self.cbB = self.pi.callback(rot_gpioB, config.pigpio.EITHER_EDGE, self._pulse)
# Setting up switch of rotary encoder.
self.pi.set_mode(switch_gpio, config.pigpio.INPUT)
self.pi.set_mode(switch_gpio, config.pigpio.INPUT)
self.pi.set_pull_up_down(switch_gpio, config.pigpio.PUD_UP)
self.pi.set_pull_up_down(switch_gpio, config.pigpio.PUD_UP)
self.switch_cb = self.pi.callback(switch_gpio, config.pigpio.RISING_EDGE, self._switch_toggle)
# Handles the switch part of the rotary encoder.
def _switch_toggle(self, gpio, level, tick):
self.sw_callback()
def _pulse(self, gpio, level, tick):
"""
Decode the rotary encoder pulse.
+---------+ +---------+ 0
| | | |
A | | | |
| | | |
+---------+ +---------+ +----- 1
+---------+ +---------+ 0
| | | |
B | | | |
| | | |
----+ +---------+ +---------+ 1
"""
if gpio == self.rot_gpioA:
self.levA = level
else:
self.levB = level;
if gpio != self.lastGpio: # debounce
self.lastGpio = gpio
if gpio == self.rot_gpioA and level == 1:
if self.levB == 1:
self.rot_callback(1)
elif gpio == self.rot_gpioB and level == 1:
if self.levA == 1:
self.rot_callback(-1)
def cancel(self):
"""
Cancel the rotary encoder decoder.
"""
self.cbA.cancel()
self.cbB.cancel()
if __name__ == "__main__":
import time
import pigpio
import rotary_encoder
pos = 0
def callback(way):
global pos
pos += way
print("pos={}".format(pos))
pi = pigpio.pi()
decoder = rotary_encoder.decoder(pi, 2, 4, callback)
time.sleep(300)
decoder.cancel()
pi.stop() |
#!/usr/bin/env python
#==============================================================================
# gtf2juncs.py
#
# Shawn Driscoll
# 20160819
#
# Gene Expression Laboratory, Pfaff
# Salk Institute for Biological Studies
#
# Print out a file of junctions from a GTF annotation. this will include
# gene name, id, strand and transcript id info
#==============================================================================
import sys, argparse, re
import subprocess as sp
from os import unlink
from os.path import isfile, expanduser
# from subprocess import Popen
# from random import gauss, random, sample
# from scipy.stats import norm
# import numpy as np
# import numpy.random as npr
# R support
# import rpy2.robjects as robjects
# r = robjects.r
HOME = expanduser("~")
#==============================================================================
# main
#==============================================================================
def main(args):
# variables
juncdb = {}
tid = ""
gid = ""
gname = ""
ltid = ""
laln = []
jid = ""
iidx = 0
#
# Start parsing the GTF
#
try:
fin = open(args.gtf, "r")
except:
sys.stderr.write("Failed to open GTF ({})\n".format(args.gtf))
return 1
sys.stderr.write("Parsing {}\n".format(args.gtf))
for szl in fin:
aln = szl.strip().split("\t")
# skip any non exon lines
if aln[2] != "exon":
continue
# find the transcript id
rres = re.search("transcript_id \"([^\"]+)\"", aln[8])
if rres:
tid = rres.group(1)
else:
sys.stderr.write("[main] failed to parse transcript id")
fin.close()
return(1)
# check if current transcript id matches the last one. if so then we have a junction
# because we have gone from one exon to the next
if tid == ltid:
# junction is from laln[4] to aln[3]
jid = "{}:{}-{}".format(aln[0], str(int(laln[4])+1), str(int(aln[3])-1))
# is this junction in the database already?
if jid not in juncdb:
juncdb[jid] = dict(ref=aln[0], start=int(laln[4])-1, end=int(aln[3])-1,
strand=aln[6], gnames={}, gids={}, tids={})
# add transcript id to the sets
juncdb[jid]["tids"][tid] = 0
# add gene id to the sets
rres = re.search("gene_id \"([^\"]+)\"", aln[8])
gid = ""
if rres:
gid = rres.group(1)
juncdb[jid]['gids'][rres.group(1)] = 0
# add gene name to the set
rres = re.search("gene_name \"([^\"]+)\"", aln[8])
gname = ""
if rres:
gname = rres.group(1)
juncdb[jid]['gnames'][rres.group(1)] = 0
laln = list(aln)
ltid = tid
fin.close()
# last line
if tid == ltid:
# junction is from laln[4] to aln[3]
jid = "{}:{}-{}".format(aln[0], str(int(laln[4])+1), str(int(aln[3])-1))
# is this junction in the database already?
if jid not in juncdb:
juncdb[jid] = dict(ref=aln[0], start=int(laln[4])-1, end=int(aln[3])-1,
strand=aln[6], gnames={}, gids={}, tids={})
# add transcript id to the sets
juncdb[jid]["tids"][tid] = 0
# add gene id to the sets
rres = re.search("gene_id \"([^\"]+)\"", aln[8])
gid = ""
if rres:
gid = rres.group(1)
juncdb[jid]['gids'][rres.group(1)] = 0
# add gene name to the set
rres = re.search("gene_name \"([^\"]+)\"", aln[8])
gname = ""
if rres:
gname = rres.group(1)
juncdb[jid]['gnames'][rres.group(1)] = 0
sys.stderr.write("done.\n")
#
# print bed type format
#
for jid in sorted(juncdb.keys()):
# check length
flen = juncdb[jid]['end']-juncdb[jid]['start']
if flen < args.min_length or flen > args.max_length:
# skip this one
if flen < args.min_length:
sys.stderr.write("Warning: skipping short intron ({}): {}\n".format(flen, jid))
else:
sys.stderr.write("Warning: skipping long intron ({}): {}\n".format(flen, jid))
continue
if args.g:
iidx += 1
iid = "INTID_{:08d}".format(iidx)
lout = [
juncdb[jid]['ref'],
"introns", "exon",
juncdb[jid]['start']+1,
juncdb[jid]['end'],
".",
juncdb[jid]['strand'],
".",
"gene_id \"{}\"; transcript_id \"{}\"; gene_name \"{}\"; locus \"{}\"; oId \"{}\";".format(
jid, iid, ",".join(juncdb[jid]['gnames']), iid, ",".join(juncdb[jid]['tids']))
]
else:
lout = [
juncdb[jid]['ref'],
juncdb[jid]['start'],
juncdb[jid]['end'],
jid,
juncdb[jid]['strand'],
",".join(juncdb[jid]['gnames']),
",".join(juncdb[jid]['gids']),
",".join(juncdb[jid]['tids'])]
print "\t".join(map(str, lout))
return 0
# --
# runcmd
# run a system level command in subprocess. optionally you can return the process.
# if the process isn't returned then the function waits for the process to finish
def runcmd(cmd, returnProcess=False):
sys.stderr.write("CMD: {}\n".format(cmd))
p1 = sp.Popen(cmd.split())
if returnProcess==True:
return(p1)
p1.wait()
return(0)
#==============================================================================
# entry point
#==============================================================================
parser = argparse.ArgumentParser(description="...")
parser.add_argument('gtf', type=str, help="GTF to extract junctions from")
parser.add_argument('-g', action="store_const", const=True, default=False,
help="Output in GTF format instead of BED.")
parser.add_argument('--min-length', default=10, type=int, action="store",
help="Minimum length of intron/junction feature to be exported [10]")
parser.add_argument('--max-length', default=200000, type=int, action="store",
help="Maximum length of intron/junction feature to be exported [200000]")
args = parser.parse_args()
if __name__ == "__main__":
try:
sys.exit(main(args))
except KeyboardInterrupt:
sys.stderr.write("\nkilled it\n")
|
"""
Script to easily read in and plot the results of BestTrack output
Example Usage: python plotBestTrack.py 20150408_20150409 -i tracks
"""
import argparse
import datetime
from datetime import timedelta
import time
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import scipy.stats.mstats as stats
from collections import defaultdict
import json
import os
MIN_LAT = 20
MAX_LAT = 51
MIN_LON = -119
MAX_LON = -62
BEFORE_WIDTH = 4
AFTER_WIDTH = 2
MIN_MIN_CELLS = 2 # Min min number storm cells per track.
MAX_MIN_CELLS = 12 # Max min number storm cells per track.
def getOptions():
"""
Retrieve the user-speficified command line arguments
Returns
--------
Namespace
Namespace of parsed arguments returned by ArgumentParser.parse_args()
"""
# Define legal command arguments
parser = argparse.ArgumentParser()
parser.add_argument('file_name', type = str, metavar = 'file_name', help = 'File to read in exluding _tracks or _cells, etc')
parser.add_argument('-i', '--input_dir', type = str, metavar = '', default = 'tracks', help = 'Location of source files')
parser.add_argument('-s', '--dir_suffix', type = str, metavar = '', default = '', help = 'Name of last subdirectory for source files')
parser.add_argument('-mc', '--min_cells', type = int, metavar = '', default = 3, help = 'Minimum number of storm cells per track')
args = parser.parse_args()
return args
def checkArgs(args):
"""
Check the user-specified command line arguments for errors not handled by argparse.
Errors will print to console before terminating the script.
Parameters
----------
args: Namespace
Namespace of user-specified arguments returned from getOptions()
"""
inSuffix = args['dir_suffix']
minCells = args['min_cells']
if '\\' in inSuffix or '/' in inSuffix:
print('\nERROR: Input directory suffix must not contain / or \\. Instead got: ' + inSuffix + '\n')
sys.exit(2)
else: print('Name of last subdirectory for original tracking files: ' + inSuffix)
if minCells < MIN_MIN_CELLS or minCells > MAX_MIN_CELLS:
print('\nERROR: Min Cells must be in range [' + str(MIN_MIN_CELLS) + ', ' + str(MAX_MIN_CELLS) + ']. Instead got: ' + str(minCells) + '\n')
sys.exit(2)
else: print('Minimum number of cells per track: ' + str(minCells))
#====================================================================================================================#
# #
# Main Method - Handle user input, read in files, then plot #
# #
#====================================================================================================================#
if __name__ == '__main__':
"""Handle user input, read in files, then plot the tracks"""
args = vars(getOptions())
checkArgs(args)
stormTracks = {}
stormCells = {}
# Read in track files
for root, dirs, files in os.walk(args['input_dir']):
if args['dir_suffix'] != '' and not (files and not dirs and os.path.split(root)[-1] == args['dir_suffix']): continue
for trackFile in files:
if trackFile.startswith(args['file_name']) and trackFile.endswith('_tracks.data'):
# Load tracks
f = open(root + '/' + trackFile)
stormTracks = json.load(f)
f.close()
elif trackFile.startswith(args['file_name']) and trackFile.endswith('_cells.data'):
# Load cells
f = open(root + '/' + trackFile)
stormCells = json.load(f)
f.close()
#print stormCells
#print stormTracks
# Load dimensions
lats = [MIN_LAT, MAX_LAT]
lons = [MIN_LON, MAX_LON]
# Generate each map
print('Plotting figure...')
fig = plt.figure( 1)
theseLats = lats
theseLons = lons
meanLat = np.mean(theseLats)
meanLon = np.mean(theseLons)
m = Basemap(llcrnrlon=-119, llcrnrlat=22, urcrnrlon=-64,
urcrnrlat=49, projection='lcc', lat_1=33, lat_2=45,
lon_0=-95, resolution='i', area_thresh=10000)
# Read in shapefiles
m.readshapefile('counties/c_11au16', name = 'counties', drawbounds = True, color = '#C9CFD1')
m.readshapefile('States_Shapefiles/s_11au16', name = 'states', drawbounds = True)
m.readshapefile('province/province', name = 'canada', drawbounds = True)
# Sort cells in each track by time and then get lat lon pairs for each cell
for track in stormTracks:
times = []
finalCellsX = []
finalCellsY = []
for cell in stormTracks[track]['cells']:
times.append(stormCells[str(cell)]['time'])
times = sorted(times)
for cellTime in times:
for cell in stormTracks[track]['cells']:
if stormCells[str(cell)]['time'] == cellTime:
finalCellsX.append(m(stormCells[str(cell)]['lon'], stormCells[str(cell)]['lat'])[0])
finalCellsY.append(m(stormCells[str(cell)]['lon'], stormCells[str(cell)]['lat'])[1])
break
m.plot(finalCellsX, finalCellsY, color = 'r', linewidth = AFTER_WIDTH)
plt.show()
|
import flask_bootstrap
from flask_wtf import form
from flask import request
import forms
import time
from forms import *
from flask import abort
from werkzeug.exceptions import Unauthorized
from flask import Flask, render_template, session, Response, request
from flask import render_template, flash, redirect, url_for
from config import Config
from flask_bootstrap import Bootstrap
import saicalls
import json
app = Flask(__name__)
app.config.from_object(Config)
app.config.from_object(Config)
bootstrap = flask_bootstrap.Bootstrap(app)
"""TODO:
1. Make the hard coded sai profile upload
2. make a brightness change option
3. pages more user friendly"""
# define your app routes
@app.route('/')
@app.route('/index')
def index():
page = {'pagename': 'Start Page'}
return render_template('/html/index.html', title='home', page=page)
# panel for accessing the main function of the lights
@app.route('/panel')
def panel():
"""
Create the light control panel
Control Panel for the light.
Shows admin the option to go into user admin panel.
"""
# Check if user is admin
if session['rank'] == "admin\n":
isAdmin = True
else:
isAdmin = False
return render_template('/html/panel.html', isAdmin=isAdmin)
# being able to add people to login to the web panel
@app.route('/userAdmin', methods=['GET', 'POST'])
def userAdmin():
"""
User administration panel for admins to see existing users and create new ones
"""
# yuval forgot to add a check
if session['rank'] == "admin\n":
# Create a list of all existing users to display to admin
users = open('users.txt', 'r')
lines = users.readlines()
list_of_users = []
for line in lines:
username, password, rank = line.split(".")
list_of_users.append({"username": username,
"password": password,
"rank": rank
})
UserForm = NewUser()
if UserForm.validate_on_submit():
username = UserForm.username.data
password = UserForm.password.data
rank = UserForm.Rank.data
newUserString = f'{username}.{password}.{rank}\n'
# Open a connection to users.txt
file = open('users.txt', 'a')
file.write(newUserString)
return render_template('/html/userAdmin.html', users=list_of_users, form=UserForm)
else:
abort(401)
# Custom error codes
# noinspection PyUnusedLocal
@app.errorhandler(401)
def custom_401(error):
return render_template('/html/error401.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
"""
Compare user inputted data to a text file with all users.
If not found send user back to index page.
If found send user to panel page.
Save username and rank into a session.
"""
# Open the user file to read from
user_file = open('users.txt', 'r')
lines = user_file.readlines()
page = {'pagename': 'login'}
# noinspection PyShadowingNames
form = LoginForm()
if form.validate_on_submit():
# Save the data as a local variable
username = form.username.data
password = form.password.data
# Loop through every line in the users file and find if the user exists or not
for line in lines:
saved_username, saved_password, saved_rank = line.split(".")
if username == saved_username and password == saved_password:
# Save the rank and username of the user into a session item
session['rank'] = saved_rank
session['username'] = username
return redirect(url_for('panel'))
else:
# If user is not found
continue
# If user doesn't input true values send him back to the index page
return redirect(url_for('index'))
return render_template('/html/login.html', title='Login', page=page, form=form)
# noinspection PyUnusedLocal
@app.errorhandler(404)
def not_found_error(error):
return render_template('/html/404.html'), 404
# noinspection PyShadowingNames,SpellCheckingInspection
@app.route('/options', methods=['GET', 'POST'])
def options():
form = chooseProfile()
if request.method == 'POST':
if form.validate_on_submit():
profilename = form.profileName.data
saicalls.sendprof(profilename)
time.sleep(10)
saicalls.trigger()
print('sending profile')
return render_template('/html/options.html', form = form)
@app.route('/reset', methods=['GET', 'POST'])
def reset():
saicalls.reset()
return redirect('options', code=302)
@app.route('/test', methods=['GET', 'POST']) # temp testing site to ensure redirects and stuff like that
def test():
form = testform
if request.method == 'POST':
if form.validate_on_submit():
number = form.number.data
print(number+"This is the string")
number = int(number)
print(number)
return render_template('/html/test.html')
if __name__ == '__main__':
app.run(debug=True)
FLASK_DEBUG = 1 |
from __future__ import print_function, division
import os, json, random
from PIL import Image
import numpy as np
import torch
import torchvision
from torch.utils.data import Dataset
from envs.config import Config
from torchvision import transforms
import cv2
import utils.video_transforms as video_transforms
#import video_transforms as video_transforms
class URFDFusion(Dataset):
NUM_CLASSES = 2
def __init__(self,
dataset_dict,
base_dir=Config.get_dataset_path('urfdfusion'),
input_size=224,
stack_size=10,
split='train',
):
super().__init__()
self.dataset_dict = dataset_dict
self._base_dir = base_dir
self.input_size = input_size
self.stack_size = stack_size
self.split = split
self.videos = self.dataset_dict[self.split]['video_name']
self.labels = self.dataset_dict[self.split]['labels']
self.num_frames = self.dataset_dict[self.split]['num_frames']
self.data = dataset_dict[self.split]['data']
# Display stats
print('Number of images in {}: {:d}'.format(split, len(self.labels)))
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
rgb, flow = self.get_data(self.data[index])
if self.split == 'train':
rgb = self.transform_tr(rgb, is_flow=False)
flow = self.transform_tr(flow, is_flow=True)
elif self.split == 'val':
rgb = self.transform_tr(rgb, is_flow=False)
flow = self.transform_val(flow, is_flow=True)
target = int(self.labels[index]) - 1
return {'rgb': rgb, 'flow': flow, 'label': target}
def transform_tr(self, sample, is_flow=False):
if is_flow:
composed_transforms = video_transforms.Compose([
#video_transforms.MultiScaleCrop((224, 224), [1.0, 0.875, 0.75]),
#video_transforms.RandomHorizontalFlip(),
video_transforms.CenterCrop((224, 224)),
video_transforms.ToTensor(),
video_transforms.Normalize([0.5, 0.5] * self.stack_size, [0.226, 0.226] * self.stack_size),
])
else:
composed_transforms = video_transforms.Compose([
#video_transforms.MultiScaleCrop((224, 224), [1.0, 0.875, 0.75]),
#video_transforms.RandomHorizontalFlip(),
video_transforms.CenterCrop((224, 224)),
video_transforms.ToTensor(),
video_transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
return composed_transforms(sample)
def transform_val(self, sample, is_flow=False):
if is_flow:
composed_transforms = video_transforms.Compose([
video_transforms.CenterCrop((224, 224)),
video_transforms.ToTensor(),
video_transforms.Normalize([0.5, 0.5] * self.stack_size, [0.226, 0.226] * self.stack_size),
])
else:
composed_transforms = video_transforms.Compose([
video_transforms.CenterCrop((224, 224)),
video_transforms.ToTensor(),
video_transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
return composed_transforms(sample)
def get_data(self, data):
dir_name = data.split(',')[0]
frame_idx = int(data.split(',')[1])
rgb = self.get_rgb(dir_name, frame_idx)
flow = self.get_flows(dir_name, frame_idx)
return rgb, flow
def get_rgb(self, dir_name, frame_idx):
frame_dir = os.path.join(self._base_dir, 'URFD_images', dir_name)
rgb_img_path = os.path.join(frame_dir, 'rgb_{0:05d}.jpg'.format(frame_idx))
assert(os.path.exists(rgb_img_path))
rgb_img = cv2.imread(rgb_img_path, cv2.IMREAD_COLOR)
return rgb_img
def get_flows(self, dir_name, frame_idx):
flow = []
frame_dir = os.path.join(self._base_dir, 'URFD_opticalflow', dir_name)
for stack_idx in range(frame_idx, frame_idx + self.stack_size):
x_img_path = os.path.join(frame_dir, 'flow_x_{0:05d}.jpg'.format(stack_idx))
y_img_path = os.path.join(frame_dir, 'flow_y_{0:05d}.jpg'.format(stack_idx))
assert(os.path.exists(x_img_path))
assert(os.path.exists(y_img_path))
x_img = cv2.imread(x_img_path, cv2.IMREAD_GRAYSCALE)
y_img = cv2.imread(y_img_path, cv2.IMREAD_GRAYSCALE)
flow.append(np.expand_dims(x_img, 2))
flow.append(np.expand_dims(y_img, 2))
flow = np.concatenate(flow, axis=2)
return flow
def __str__(self):
return 'URFDFusion(split=' + self.split + ')'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# camera.py
# Copyright (c) 2017-2021, Richard Gerum
#
# This file is part of the cameratransform package.
#
# cameratransform is free software: you can redistribute it and/or modify
# it under the terms of the MIT licence.
#
# cameratransform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
#
# You should have received a copy of the license
# along with cameratransform. If not, see <https://opensource.org/licenses/MIT>
import numpy as np
import os
import json
import itertools
from scipy import stats
from typing import Union, Optional, Tuple, List
from numbers import Number
from .parameter_set import ParameterSet, ClassWithParameterSet, Parameter, TYPE_GPS
from .projection import RectilinearProjection, EquirectangularProjection, CylindricalProjection, CameraProjection
from .spatial import SpatialOrientation
from .lens_distortion import NoDistortion, LensDistortion, ABCDistortion, BrownLensDistortion
from . import gps
from . import ray
try:
from numpy.typing import ArrayLike
except ImportError:
from numpy import ndarray as ArrayLike
Points1D = Union[ArrayLike, List, float] # (), (N)
Points2D = ArrayLike # (2), (Nx2)
Points3D = ArrayLike # (3), (Nx3)
Image = ArrayLike # (HxW)
Mesh3D = ArrayLike # (3x3), (Mx3x3)
RECTILINEAR = 0
CYLINDRICAL = 1
EQUIRECTANGULAR = 2
NODISTORTION = 0
ABCDDISTORTION = 1
BROWNLENSDISTORTION = 2
def _getSensorFromDatabase(model: str) -> Optional[Tuple[float, float]]:
"""
Get the sensor size from the given model from the database at: https://github.com/openMVG/CameraSensorSizeDatabase
Parameters
----------
model: string
the model name as received from the exif data
Returns
-------
sensor_size: tuple
(sensor_width, sensor_height) in mm or None
"""
import requests
url = "https://raw.githubusercontent.com/openMVG/CameraSensorSizeDatabase/master/sensor_database_detailed.csv"
database_filename = "sensor_database_detailed.csv"
# download the database if it is not there
if not os.path.exists(database_filename):
with open(database_filename, "w") as fp:
print("Downloading database from:", url)
r = requests.get(url)
fp.write(r.text)
# load the database
with open(database_filename, "r") as fp:
data = fp.readlines()
# format the name
model = model.replace(" ", ";", 1)
name = model + ";"
# try to find it
for line in data:
if line.startswith(name):
# extract the sensor dimensions
line = line.split(";")
sensor_size = (float(line[3]), float(line[4]))
return sensor_size
# no sensor size found
return None
def getCameraParametersFromExif(filename: str, verbose: bool = False, sensor_from_database: bool = True) -> \
Tuple[float, Tuple[float, float], Tuple[float, float]]:
"""
Try to extract the intrinsic camera parameters from the exif information.
Parameters
----------
filename: basestring
the filename of the image to load.
verbose: bool
whether to print the output.
sensor_from_database: bool
whether to try to load the sensor size from a database at https://github.com/openMVG/CameraSensorSizeDatabase
Returns
-------
focal_length: number
the extracted focal length in mm
sensor_size: tuple
(width, height) of the camera sensor in mm
image_size: tuple
(width, height) of the image in pixel
Examples
--------
>>> import cameratransform as ct
Supply the image filename to print the results:
>>> ct.getCameraParametersFromExif("Image.jpg", verbose=True)
Intrinsic parameters for 'Canon EOS 50D':
focal length: 400.0 mm
sensor size: 22.3 mm × 14.9 mm
image size: 4752 × 3168 Pixels
Or use the resulting parameters to initialize a CameraTransform instance:
>>> focal_length, sensor_size, image_size = ct.getCameraParametersFromExif("Image.jpg")
>>> cam = ct.Camera(focal_length, sensor=sensor_size, image=image_size)
"""
from PIL import Image
from PIL.ExifTags import TAGS
def get_exif(fn):
ret = {}
i = Image.open(fn)
info = i._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
return ret
# read the exif information of the file
exif = get_exif(filename)
# get the focal length
f = exif["FocalLength"][0] / exif["FocalLength"][1]
# get the sensor size, either from a database
if sensor_from_database:
sensor_size = _getSensorFromDatabase(exif["Model"])
# or from the exif information
if not sensor_size or sensor_size is None:
sensor_size = (
exif["ExifImageWidth"] / (exif["FocalPlaneXResolution"][0] / exif["FocalPlaneXResolution"][1]) * 25.4,
exif["ExifImageHeight"] / (exif["FocalPlaneYResolution"][0] / exif["FocalPlaneYResolution"][1]) * 25.4)
# get the image size
image_size = (exif["ExifImageWidth"], exif["ExifImageHeight"])
# print the output if desired
if verbose:
print("Intrinsic parameters for '%s':" % exif["Model"])
print(" focal length: %.1f mm" % f)
print(" sensor size: %.1f mm × %.1f mm" % sensor_size)
print(" image size: %d × %d Pixels" % image_size)
return f, sensor_size, image_size
class CameraGroup(ClassWithParameterSet):
projection_list = None
orientation_list = None
lens_list = None
def __init__(self, projection: Union[list, CameraProjection], orientation: Union[list, SpatialOrientation] = None,
lens: Union[list, LensDistortion] = None):
ClassWithParameterSet.__init__(self)
self.N = 1
def checkCount(parameter, class_type, parameter_name, default):
if parameter is None:
setattr(self, parameter_name, [default()])
elif isinstance(parameter, class_type):
setattr(self, parameter_name, [parameter])
else:
setattr(self, parameter_name, list(parameter))
self.N = len(getattr(self, parameter_name))
checkCount(projection, CameraProjection, "projection_list", RectilinearProjection)
checkCount(orientation, SpatialOrientation, "orientation_list", SpatialOrientation)
checkCount(lens, LensDistortion, "lens_list", NoDistortion)
params = {}
def gatherParameters(parameter_list):
if len(parameter_list) == 1:
params.update(parameter_list[0].parameters.parameters)
else:
for index, proj in enumerate(parameter_list):
for name in proj.parameters.parameters:
params["C%d_%s" % (index, name)] = proj.parameters.parameters[name]
gatherParameters(self.projection_list)
gatherParameters(self.orientation_list)
gatherParameters(self.lens_list)
self.parameters = ParameterSet(**params)
self.cameras = [Camera(projection, orientation, lens) for index, projection, orientation, lens in
zip(range(self.N), itertools.cycle(self.projection_list), itertools.cycle(self.orientation_list), itertools.cycle(self.lens_list))]
def getBaseline(self) -> float:
return np.sqrt((self[0].pos_x_m-self[1].pos_x_m)**2 + (self[0].pos_y_m-self[1].pos_y_m)**2)
def spaceFromImages(self, points1: Points2D, points2: Points2D) -> Points3D:
p1, v1 = self.cameras[0].getRay(points1)
p2, v2 = self.cameras[1].getRay(points2)
return ray.intersectionOfTwoLines(p1, v1, p2, v2)
def discanteBetweenRays(self, points1: Points2D, points2: Points2D) -> Points1D:
p1, v1 = self.cameras[0].getRay(points1, normed=True)
p2, v2 = self.cameras[1].getRay(points2, normed=True)
return ray.distanceOfTwoLines(p1, v1, p2, v2)
def imagesFromSpace(self, points: Points3D) -> List[Points2D]:
return [cam.imageFromSpace(points) for cam in self.cameras]
def __getitem__(self, item) -> "Camera":
return self.cameras[item]
def __len__(self) -> int:
return len(self.cameras)
def __iter__(self):
return iter(self.cameras)
def addBaselineInformation(self, target_baseline, uncertainty=6):
def baselineInformation(target_baseline=target_baseline, uncertainty=uncertainty):
# baseline
return np.sum(stats.norm(loc=target_baseline, scale=uncertainty).logpdf(self.getBaseline()))
self.log_prob.append(baselineInformation)
def addPointCorrespondenceInformation(self, corresponding1, corresponding2, uncertainty=1):
def pointCorrespondenceInformation(corresponding1=corresponding1, corresponding2=corresponding2):
sum = 0
corresponding = [corresponding1, corresponding2]
# iterate over cam1 -> cam2 and cam2 -> cam1
for i in [0, 1]:
# get the ray from the correspondences in the first camera's image
world_epipole, world_ray = self[i].getRay(corresponding[i])
# project them to the image of the second camera
p1 = self[1 - i].imageFromSpace(world_epipole + world_ray * 1)
p2 = self[1 - i].imageFromSpace(world_epipole + world_ray * 2)
# find the perpendicular point from the epipolar lines to the correspondes point
perpendicular_point = ray.getClosestPointFromLine(p1, p2 - p1, corresponding[1 - i])
# calculate the distances
distances = np.linalg.norm(perpendicular_point - corresponding[1 - i], axis=-1)
# sum the logprob of these distances
sum += np.sum(stats.norm(loc=0, scale=uncertainty).logpdf(distances))
# return the sum of the logprobs
return sum
self.log_prob.append(pointCorrespondenceInformation)
def pointCorrespondenceError(self, corresponding1, corresponding2):
sum = 0
corresponding = [corresponding1, corresponding2]
distances_list = []
# iterate over cam1 -> cam2 and cam2 -> cam1
for i in [0, 1]:
# get the ray from the correspondences in the first camera's image
world_epipole, world_ray = self[i].getRay(corresponding[i])
# project them to the image of the second camera
p1 = self[1 - i].imageFromSpace(world_epipole + world_ray * 1, hide_backpoints=False)
p2 = self[1 - i].imageFromSpace(world_epipole + world_ray * 2, hide_backpoints=False)
# find the perpendicular point from the epipolar lines to the correspondes point
perpendicular_point = ray.getClosestPointFromLine(p1, p2 - p1, corresponding[1 - i])
# calculate the distances
distances = np.linalg.norm(perpendicular_point - corresponding[1 - i], axis=-1)
# sum the logprob of these distances
distances_list.append(distances)
# return the sum of the logprobs
return distances_list
def getLogProbability(self):
"""
Gives the sum of all terms of the log probability. This function is used for sampling and fitting.
"""
prob = np.sum([logProb() for logProb in self.log_prob]) + np.sum([logProb() for cam in self for logProb in cam.log_prob])
return prob if not np.isnan(prob) else -np.inf
def setCameraParametersByPointCorrespondence(self, corresponding1, corresponding2, baseline):
import cv2
cam1 = self[0]
cam2 = self[1]
f, cx, cy = cam1.focallength_x_px, cam1.center_x_px, cam1.center_y_px
K = np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]])
mat, mask = cv2.findEssentialMat(corresponding1, corresponding2, cam1.focallength_x_px,
(cam1.center_x_px, cam1.center_y_px))
n, rot, t, mask = cv2.recoverPose(mat, corresponding1, corresponding2, K)
cam1.heading_deg = 0
cam1.tilt_deg = 0
cam1.roll_deg = 0
def rotationToEuler(R):
alpha = np.rad2deg(np.arctan2(R[0, 2], -R[1, 2]))
beta = np.rad2deg(np.arccos(R[2, 2]))
gamma = np.rad2deg(np.arctan2(R[2, 0], R[2, 1]))
return np.array([180 + alpha, beta, 180 + gamma])
roll, tilt, heading = rotationToEuler(rot)
data = dict(roll_deg=roll,
tilt_deg=tilt,
heading_deg=heading,
pos_x_m=cam1.pos_x_m + t[0, 0]*baseline,
pos_y_m=cam1.pos_y_m + t[1, 0]*baseline,
elevation_m=cam1.elevation_m + t[2, 0]*baseline)
print(data)
cam2.parameters.set_fit_parameters(data.keys(), data.values())
def plotEpilines(self, corresponding1, corresponding2, im1, im2):
import cv2
import matplotlib.pyplot as plt
cam1 = self[0]
cam2 = self[1]
F, mask = cv2.findFundamentalMat(corresponding1, corresponding2)#, method=cv2.FM_8POINT)
lines1 = cv2.computeCorrespondEpilines(corresponding2, 2, F)[:, 0, :]
lines2 = cv2.computeCorrespondEpilines(corresponding1, 1, F)[:, 0, :]
def drawLine(line, x_min, x_max, y_min, y_max):
a, b, c = line
points = []
y_x_min = -(a * x_min + c) / b
if y_min < y_x_min < y_max:
points.append([x_min, y_x_min])
y_x_max = -(a * x_max + c) / b
if y_min < y_x_max < y_max:
points.append([x_max, y_x_max])
x_y_min = -(b * y_min + c) / a
if x_min < x_y_min < x_max:
points.append([x_y_min, y_min])
x_y_max = -(b * y_max + c) / a
if x_min < x_y_max < x_max:
points.append([x_y_max, y_max])
if len(points) == 0:
return
points = np.array(points)
p, = plt.plot(points[:, 0], points[:, 1], "-")
return p
def drawEpilines(camA, camB, lines, points):
border = camA.getImageBorder()
plt.plot(border[:, 0], border[:, 1], "r-")
for point, line in zip(points, lines):
line = drawLine(line, 0, camA.image_width_px, 0, camA.image_height_px)
plt.plot(point[0], point[1], "o", color=line.get_color())
p = camA.imageFromSpace(camB.getPos())
print("p", p)
plt.plot(p[0], p[1], "r+", ms=5)
plt.axis("equal")
plt.subplot(121)
drawEpilines(cam1, cam2, lines1, corresponding1)
plt.imshow(im1)
plt.subplot(122)
drawEpilines(cam2, cam1, lines2, corresponding2)
plt.imshow(im2)
plt.show()
def plotMyEpiploarLines(self, corresponding1, corresponding2, im1=None, im2=None):
import matplotlib.pyplot as plt
cam1 = self[0]
cam2 = self[1]
def drawEpilines(camA, camB, pointsA, pointsB):
for pointA, pointB in zip(pointsA, pointsB):
origin, world_ray = camB.getRay(pointB, normed=True)
pixel_points = []
for i in np.arange(-10000, 10000, 100):
pixel_points.append(camA.imageFromSpace(origin + world_ray*i, hide_backpoints=False))
pixel_points = np.array(pixel_points)
p, = plt.plot(pixel_points[:, 0], pixel_points[:, 1], "-")
plt.plot(pointA[0], pointA[1], "o", color=p.get_color())
# find the perpendicular point from the epipolar lines to the correspondes point
perpendicular_point = ray.getClosestPointFromLine(pixel_points[0], pixel_points[1] - pixel_points[0], pointA)
plt.plot(perpendicular_point[0], perpendicular_point[1], "+", color=p.get_color())
plt.plot([pointA[0], perpendicular_point[0]], [pointA[1], perpendicular_point[1]], "--", color=p.get_color())
# calculate the distances
distances = np.linalg.norm(perpendicular_point - pointA, axis=-1)
plt.text(pointA[0], pointA[1], "%.1f" % distances, color=p.get_color())
plt.subplot(121)
drawEpilines(cam1, cam2, corresponding1, corresponding2)
if im1 is not None:
plt.imshow(im1)
plt.subplot(122)
drawEpilines(cam2, cam1, corresponding2, corresponding1)
if im2 is not None:
plt.imshow(im2)
def scaleSpace(self, scale):
for cam in self:
cam.pos_x_m, cam.pos_y_m, cam.elevation_m = np.array([cam.pos_x_m, cam.pos_y_m, cam.elevation_m]) * scale
class Camera(ClassWithParameterSet):
"""
This class is the core of the CameraTransform package and represents a camera. Each camera has a projection
(subclass of :py:class:`CameraProjection`), a spatial orientation (:py:class:`SpatialOrientation`) and optionally
a lens distortion (subclass of :py:class:`LensDistortion`).
"""
map = None
last_extent = None
last_scaling = None
map_undistort = None
last_extent_undistort = None
last_scaling_undistort = None
R_earth = 6371e3
def __init__(self, projection: CameraProjection, orientation: SpatialOrientation = None, lens: LensDistortion = None):
ClassWithParameterSet.__init__(self)
self.projection = projection
if orientation is None:
orientation = SpatialOrientation()
self.orientation = orientation
if lens is None:
lens = NoDistortion()
self.lens = lens
self.lens.setProjection(projection)
params = dict(gps_lat=Parameter(0, default=0, type=TYPE_GPS), gps_lon=Parameter(0, default=0, type=TYPE_GPS))
params.update(self.projection.parameters.parameters)
params.update(self.orientation.parameters.parameters)
params.update(self.lens.parameters.parameters)
self.parameters = ParameterSet(**params)
def __str__(self):
string = "CameraTransform(\n"
string += str(self.lens)
string += str(self.projection)
string += str(self.orientation)
string += ")"
return string
def setGPSpos(self, lat: Union[Number, str, ], lon: Number = None, elevation: Number = None):
"""
Provide the earth position for the camera.
Parameters
----------
lat: number, string
the latitude of the camera or the string representing the gps position.
lon: number, optional
the longitude of the camera.
elevation: number, optional
the elevation of the camera (this is the same elevation SpatialOrientation is using, setting it here might
overwrite a previous value from SpatialOrientation).
Examples
--------
>>> import cameratransform as ct
>>> cam = ct.Camera()
Supply the gps position of the camera as floats:
>>> cam.setGPSpos(-66.66, 140.00, 19)
or as a string:
>>> cam.setGPSpos("66°39'53.4\"S 140°00'34.8\"")
"""
# if it is a string
if isinstance(lat, str):
try:
lat, lon, elevation = gps.gpsFromString(lat, height=elevation)
except ValueError:
lat, lon = gps.gpsFromString(lat, height=elevation)
else:
# if it is a tuple
try:
lat, lon, elevation = gps.splitGPS(lat, keep_deg=True)
# or if it is just a single value
except (AttributeError, Exception):
pass
self.gps_lat = lat
self.gps_lon = lon
if elevation is not None:
self.elevation_m = elevation
def addObjectHeightInformation(self, points_feet: Points2D, points_head: Points2D, height: Points1D,
variation: Points1D, only_plot: bool = False, plot_color: bool = None):
"""
Add a term to the camera probability used for fitting. This term includes the probability to observe the objects
with the given feet and head positions and a known height and height variation.
Parameters
----------
points_feet : ndarray
the position of the objects feet, dimension (2) or (Nx2)
points_head : ndarray
the position of the objects head, dimension (2) or (Nx2)
height : number, ndarray
the mean height of the objects, dimensions scalar or (N)
variation : number, ndarray
the standard deviation of the heights of the objects, dimensions scalar or (N). If the variation is not known
a pymc2 stochastic variable object can be used.
only_plot : bool, optional
when true, the information will be ignored for fitting and only be used to plot.
"""
if not only_plot:
if not isinstance(variation, (float, int)):
self.additional_parameters += [variation]
def heigthInformation(points_feet=points_feet, points_head=points_head, height=height,
variation=variation):
height_distribution = stats.norm(loc=height, scale=variation.value)
# get the height of the penguins
heights = self.getObjectHeight(points_feet, points_head)
# the probability that the objects have this height
return np.sum(height_distribution.logpdf(heights))
else:
height_distribution = stats.norm(loc=height, scale=variation)
def heigthInformation(points_feet=points_feet, points_head=points_head, height_distribution=height_distribution):
# get the height of the penguins
heights = self.getObjectHeight(points_feet, points_head)
# the probability that the objects have this height
return np.sum(height_distribution.logpdf(heights))
self.log_prob.append(heigthInformation)
def plotHeightPoints(points_feet=points_feet, points_head=points_head, color=plot_color):
import matplotlib.pyplot as plt
p, = plt.plot(points_feet[..., 0], points_feet[..., 1], "_", label="feet", color=color)
# get the feet positions in the world
point3D_feet = self.spaceFromImage(points_feet, Z=0)
point3D_feet[..., 2] += height
projected_head = self.imageFromSpace(point3D_feet)
plt.scatter(projected_head[..., 0], projected_head[..., 1], label="heads", facecolors='none',
edgecolors=p.get_color())
plt.plot(points_head[..., 0], points_head[..., 1], "+", label="heads fitted", color=p.get_color())
data = np.concatenate(([points_head], [projected_head], [np.ones(points_head.shape)*np.nan]))
if len(data.shape) == 3:
data = data.transpose(1, 0, 2).reshape((-1, 2))
else:
data = data.reshape((-1, 2))
plt.plot(data[..., 0], data[..., 1], "-", color=p.get_color())
self.info_plot_functions.append(plotHeightPoints)
def addObjectLengthInformation(self, points_front: Points2D, points_back: Points2D, length: Points1D,
variation: Points1D, Z: Number = 0, only_plot: bool = False,
plot_color: bool = None):
"""
Add a term to the camera probability used for fitting. This term includes the probability to observe the objects
with a given length lying flat on the surface. The objects are assumed to be like flat rods lying on the z=0 surface.
Parameters
----------
points_front : ndarray
the position of the objects front, dimension (2) or (Nx2)
points_back : ndarray
the position of the objects back, dimension (2) or (Nx2)
length : number, ndarray
the mean length of the objects, dimensions scalar or (N)
variation : number, ndarray
the standard deviation of the lengths of the objects, dimensions scalar or (N). If the variation is not known
a pymc2 stochastic variable object can be used.
only_plot : bool, optional
when true, the information will be ignored for fitting and only be used to plot.
"""
if not only_plot:
if not isinstance(variation, (float, int)):
self.additional_parameters += [variation]
def lengthInformation(points_front=points_front, points_back=points_back, length=length,
variation=variation, Z=Z):
length_distribution = stats.norm(loc=length, scale=variation.value)
# get the length of the objects
heights = self.getObjectLength(points_front, points_back, Z)
# the probability that the objects have this height
return np.sum(length_distribution.logpdf(heights))
else:
length_distribution = stats.norm(loc=length, scale=variation)
def lengthInformation(points_front=points_front, points_back=points_back,
length_distribution=length_distribution, Z=Z):
# get the length of the objects
heights = self.getObjectLength(points_front, points_back, Z)
# the probability that the objects have this height
return np.sum(length_distribution.logpdf(heights))
self.log_prob.append(lengthInformation)
def plotHeightPoints(points_front=points_front, points_back=points_back, Z=Z, color=plot_color):
import matplotlib.pyplot as plt
p, = plt.plot(points_front[..., 0], points_front[..., 1], "_", label="front", color=color)
# get the back positions in the world
point3D_front = self.spaceFromImage(points_front, Z=Z)
point3D_back = self.spaceFromImage(points_back, Z=Z)
difference = point3D_back - point3D_front
difference /= np.linalg.norm(difference, axis=-1)[..., None]
predicted_back = point3D_front + difference * length
projected_back = self.imageFromSpace(predicted_back)
plt.scatter(projected_back[..., 0], projected_back[..., 1], label="back", facecolors='none',
edgecolors=p.get_color())
plt.plot(points_back[..., 0], points_back[..., 1], "+", label="back fitted", color=p.get_color())
data = np.concatenate(([points_front], [projected_back], [np.ones(points_front.shape) * np.nan]))
if len(data.shape) == 3:
data = data.transpose(1, 0, 2).reshape((-1, 2))
else:
data = data.reshape((-1, 2))
plt.plot(data[..., 0], data[..., 1], "-", color=p.get_color())
self.info_plot_functions.append(plotHeightPoints)
def addLandmarkInformation(self, lm_points_image: Points2D, lm_points_space: Points3D, uncertainties: Points1D,
only_plot: bool = False, plot_color: bool = None):
"""
Add a term to the camera probability used for fitting. This term includes the probability to observe the given
landmarks and the specified positions in the image.
Parameters
----------
lm_points_image : ndarray
the pixel positions of the landmarks in the image, dimension (2) or (Nx2)
lm_points_space : ndarray
the **space** positions of the landmarks, dimension (3) or (Nx3)
uncertainties : number, ndarray
the standard deviation uncertainty of the positions in the **space** coordinates. Typically for landmarks
obtained by gps, it could be e.g. [3, 3, 5], dimensions scalar, (3) or (Nx3)
only_plot : bool, optional
when true, the information will be ignored for fitting and only be used to plot.
"""
uncertainties = np.array(uncertainties)
offset = np.max(uncertainties)
sampled_offsets = np.linspace(-2*offset, +2*offset, 1000)
if len(lm_points_image.shape) == 1:
lm_points_image = lm_points_image[None, ...]
if len(lm_points_space.shape) == 1:
lm_points_space = lm_points_space[None, ...]
if len(uncertainties.shape) == 1:
uncertainties = uncertainties[None, ..., None]
else:
uncertainties = uncertainties[..., None]
def landmarkInformation(lm_points_image=lm_points_image, lm_points_space=lm_points_space, uncertainties=uncertainties):
origins, lm_rays = self.getRay(lm_points_image, normed=True)
nearest_point = ray.getClosestPointFromLine(origins, lm_rays, lm_points_space)
distance_from_camera = np.linalg.norm(nearest_point-np.array([self.pos_x_m, self.pos_y_m, self.elevation_m]), axis=-1)
factor = distance_from_camera[..., None] + sampled_offsets
distribution = stats.norm(lm_points_space[..., None], uncertainties)
points_on_rays = origins[None, :, None] + lm_rays[:, :, None] * factor[:, None, :]
return np.sum(distribution.logpdf(points_on_rays))
if not only_plot:
self.log_prob.append(landmarkInformation)
def plotLandmarkPoints(lm_points_image=lm_points_image, lm_points_space=lm_points_space, color=plot_color):
import matplotlib.pyplot as plt
lm_projected_image = self.imageFromSpace(lm_points_space)
p, = plt.plot(lm_points_image[..., 0], lm_points_image[..., 1], "+", label="landmarks fitted", color=color)
plt.scatter(lm_projected_image[..., 0], lm_projected_image[..., 1], label="landmarks", facecolors='none', edgecolors=p.get_color())
data = np.concatenate(([lm_points_image], [lm_projected_image], [np.ones(lm_points_image.shape) * np.nan]))
if len(data.shape) == 3:
data = data.transpose(1, 0, 2).reshape((-1, 2))
else:
data = data.reshape((-1, 2))
plt.plot(data[..., 0], data[..., 1], "-", color=p.get_color())
self.info_plot_functions.append(plotLandmarkPoints)
def addHorizonInformation(self, horizon: Points2D, uncertainty=Points1D,
only_plot: bool = False, plot_color: bool = None):
"""
Add a term to the camera probability used for fitting. This term includes the probability to observe the horizon
at the given pixel positions.
Parameters
----------
horizon : ndarray
the pixel positions of points on the horizon in the image, dimension (2) or (Nx2)
uncertainty : number, ndarray
the pixels offset, how clear the horizon is visible in the image, dimensions () or (N)
only_plot : bool, optional
when true, the information will be ignored for fitting and only be used to plot.
"""
# ensure that input is an numpy array
horizon = np.array(horizon)
def horizonInformation(horizon=horizon, uncertainty=uncertainty):
# evaluate the horizon at the provided x coordinates
image_horizon = self.getImageHorizon(horizon[..., 0])
# calculate the difference of the provided to the estimated horizon in y pixels
horizon_deviation = horizon[..., 1] - image_horizon[..., 1]
# the distribution for the uncertainties
distribution = stats.norm(loc=0, scale=uncertainty)
# calculated the summed log probability
return np.sum(distribution.logpdf(horizon_deviation))
if not only_plot:
self.log_prob.append(horizonInformation)
def plotHorizonPoints(horizon=horizon, color=plot_color):
import matplotlib.pyplot as plt
image_horizon = self.getImageHorizon(horizon[..., 0])
if 0:
p, = plt.plot(image_horizon[..., 0], image_horizon[..., 1], "+", label="horizon fitted", color=color)
plt.scatter(horizon[..., 0], horizon[..., 1], label="horizon", facecolors='none', edgecolors=p.get_color())
else:
p, = plt.plot(horizon[..., 0], horizon[..., 1], "+", label="horizon", color=color)
plt.scatter(image_horizon[..., 0], image_horizon[..., 1], label="horizon fitted", facecolors='none', edgecolors=p.get_color())
image_horizon_line = self.getImageHorizon(np.arange(self.image_width_px))
plt.plot(image_horizon_line[..., 0], image_horizon_line[..., 1], "--", color=p.get_color())
data = np.concatenate(([horizon], [image_horizon], [np.ones(horizon.shape) * np.nan]))
if len(data.shape) == 3:
data = data.transpose(1, 0, 2).reshape((-1, 2))
else:
data = data.reshape((-1, 2))
plt.plot(data[..., 0], data[..., 1], "-", color=p.get_color())
self.info_plot_functions.append(plotHorizonPoints)
def distanceToHorizon(self) -> float:
"""
Calculates the distance of the camera's position to the horizon of the earth. The horizon depends on the radius
of the earth and the elevation of the camera.
Returns
-------
distance : number
the distance to the horizon.
"""
return np.sqrt(2 * self.R_earth ** 2 * (1 - self.R_earth / (self.R_earth + self.elevation_m)))
def getImageHorizon(self, pointsX: Points1D = None) -> Points2D:
"""
This function calculates the position of the horizon in the image sampled at the points x=0, x=im_width/2,
x=im_width.
Parameters
----------
pointsX : ndarray, optional
the x positions of the horizon to determine, default is [0, image_width/2, image_width], dimensions () or (N)
Returns
-------
horizon : ndarray
the points im camera image coordinates of the horizon, dimensions (2), or (Nx2).
"""
d = self.distanceToHorizon()
if pointsX is None:
pointsX = [0, self.image_width_px/2, self.image_width_px]
pointsX = np.array(pointsX)
pointsY = np.arange(0, self.image_height_px)
if len(pointsX.shape) == 0:
pointsX = np.array([pointsX])
single_point = True
else:
single_point = False
points = []
# for every x-coordinate where we want to determine the horizon
for x in pointsX:
# test all y points of the image
p = np.vstack((np.ones(len(pointsY))*x, pointsY)).T
# transform them to the space with a fixed distance from the camera (the distance to the horizon)
# and select the point with the z coordinate closest to 0
try:
y = np.nanargmin(np.abs(self.spaceFromImage(p, D=d)[:, 2]))
except ValueError:
y = np.nan
# add the found point to the list
points.append([x, y])
if single_point:
return np.array([x, y])
return np.array(points)
def getPos(self) -> Points3D:
return np.array([self.pos_x_m, self.pos_y_m, self.elevation_m])
def getImageBorder(self, resolution: Number = 1) -> Points3D:
"""
Get the border of the image in a top view. Useful for drawing the field of view of the camera in a map.
Parameters
----------
resolution : number, optional
the pixel distance between neighbouring points.
Returns
-------
border : ndarray
the border of the image in **space** coordinates, dimensions (Nx3)
"""
w, h = self.projection.parameters.image_width_px, self.projection.parameters.image_height_px
border = []
for y in np.arange(0, h, resolution):
border.append([0, y])
for x in np.arange(0, w, resolution):
border.append([x, h])
for y in np.arange(h, 0, -resolution):
border.append([w, y])
for x in np.arange(w, 0, -resolution):
border.append([x, 0])
return np.array(border)
def getCameraCone(self, project_to_ground: bool = False, D: Number = 1) -> Points3D:
"""
The cone of the camera's field of view. This includes the border of the image and lines to the origin of the
camera.
Returns
-------
cone: ndarray
the cone of the camera in **space** coordinates, dimensions (Nx3)
"""
w, h = self.projection.parameters.image_width_px, self.projection.parameters.image_height_px
if project_to_ground:
border = []
corner_indices = [0]
for y in range(h):
border.append([0, y])
corner_indices.append(len(border))
for x in range(w):
border.append([x, h])
corner_indices.append(len(border))
for y in np.arange(h, 0, -1):
border.append([w, y])
corner_indices.append(len(border))
for x in np.arange(w, 0, -1):
border.append([x, 0])
corner_indices.append(len(border))
border = list(self.spaceFromImage(border, Z=0))
else:
border = []
corner_indices = [0]
border.append([0, h])
corner_indices.append(len(border))
border.append([w, h])
corner_indices.append(len(border))
border.append([w, 0])
corner_indices.append(len(border))
border.append([0, 0])
corner_indices.append(len(border))
border.append([0, h])
corner_indices.append(len(border))
border = list(self.spaceFromImage(border, D=D))
origin = self.orientation.spaceFromCamera([0, 0, 0])
for corner_index in corner_indices:
border.append([np.nan, np.nan, np.nan])
border.append(origin)
border.append(border[corner_index])
return np.array(border)
def imageFromSpace(self, points: Points3D, hide_backpoints: bool = True) -> Points2D:
"""
Convert points (Nx3) from the **space** coordinate system to the **image** coordinate system.
Parameters
----------
points : ndarray
the points in **space** coordinates to transform, dimensions (3), (Nx3)
Returns
-------
points : ndarray
the points in the **image** coordinate system, dimensions (2), (Nx2)
Examples
--------
>>> import cameratransform as ct
>>> cam = ct.Camera(ct.RectilinearProjection(focallength_px=3729, image=(4608, 2592)),
>>> ct.SpatialOrientation(elevation_m=15.4, tilt_deg=85))
transform a single point from the space to the image:
>>> cam.imageFromSpace([-4.17, 45.32, 0.])
[1969.52 2209.73]
or multiple points in one go:
>>> cam.imageFromSpace([[-4.03, 43.96, 0.], [-8.57, 47.91, 0.]]))
[[1971.05 2246.95]
[1652.73 2144.53]]
"""
# ensure that the points are provided as an array
points = np.array(points)
# project the points from the space to the camera and from the camera to the image
return self.lens.distortedFromImage(self.projection.imageFromCamera(self.orientation.cameraFromSpace(points), hide_backpoints=hide_backpoints))
def getRay(self, points: Points2D, normed: bool = False) -> Tuple[Points3D, Points3D]:
"""
As the transformation from the **image** coordinate system to the **space** coordinate system is not unique,
**image** points can only be uniquely mapped to a ray in **space** coordinates.
Parameters
----------
points : ndarray
the points in **image** coordinates for which to get the ray, dimensions (2), (Nx2)
Returns
-------
offset : ndarray
the origin of the camera (= starting point of the rays) in **space** coordinates, dimensions (3)
rays : ndarray
the rays in the **space** coordinate system, dimensions (3), (Nx3)
Examples
--------
>>> import cameratransform as ct
>>> cam = ct.Camera(ct.RectilinearProjection(focallength_px=3729, image=(4608, 2592)),
>>> ct.SpatialOrientation(elevation_m=15.4, tilt_deg=85))
get the ray of a point in the image:
>>> offset, ray = cam.getRay([1968, 2291]))
>>> offset
[0.00 0.00 15.40]
>>> ray
[-0.09 0.97 -0.35]
or the rays of multiple points in the image:
>>> offset, ray, cam.getRay([[1968, 2291], [1650, 2189]])
>>> offset
[0.00 0.00 15.40]
>>> ray
[[-0.09 0.97 -0.35]
[-0.18 0.98 -0.33]]
"""
# ensure that the points are provided as an array
points = np.array(points)
# get the camera position in space (the origin of the camera coordinate system)
offset = self.orientation.spaceFromCamera([0, 0, 0])
# get the direction fo the ray from the points
# the projection provides the ray in camera coordinates, which we convert to the space coordinates
direction = self.orientation.spaceFromCamera(self.projection.getRay(self.lens.imageFromDistorted(points), normed=normed), direction=True)
# return the offset point and the direction of the ray
return offset, direction
def spaceFromImage(self, points: Points2D, X: Points1D = None, Y: Points1D = None, Z: Points1D = 0,
D: Points1D = None, mesh: Mesh3D = None) -> Points3D:
"""
Convert points (Nx2) from the **image** coordinate system to the **space** coordinate system. This is not a unique
transformation, therefore an additional constraint has to be provided. The X, Y, or Z coordinate(s) of the target
points can be provided or the distance D from the camera.
Parameters
----------
points : ndarray
the points in **image** coordinates to transform, dimensions (2), (Nx2)
X : number, ndarray, optional
the X coordinate in **space** coordinates of the target points, dimensions scalar, (N)
Y : number, ndarray, optional
the Y coordinate in **space** coordinates of the target points, dimensions scalar, (N)
Z : number, ndarray, optional
the Z coordinate in **space** coordinates of the target points, dimensions scalar, (N), default 0
D : number, ndarray, optional
the distance in **space** coordinates of the target points from the camera, dimensions scalar, (N)
mesh : ndarray, optional
project the image coordinates onto the mesh in **space** coordinates. The mesh is a list of M triangles,
consisting of three 3D points each. Dimensions, (3x3), (Mx3x3)
Returns
-------
points : ndarray
the points in the **space** coordinate system, dimensions (3), (Nx3)
Examples
--------
>>> import cameratransform as ct
>>> cam = ct.Camera(ct.RectilinearProjection(focallength_px=3729, image=(4608, 2592)),
>>> ct.SpatialOrientation(elevation_m=15.4, tilt_deg=85))
transform a single point (impliying the condition Z=0):
>>> cam.spaceFromImage([1968 , 2291])
[-3.93 42.45 0.00]
transform multiple points:
>>> cam.spaceFromImage([[1968 , 2291], [1650, 2189]])
[[-3.93 42.45 0.00]
[-8.29 46.11 -0.00]]
points that cannot be projected on the image, because they are behind the camera (for the RectilinearProjection)
are returned with nan entries:
>>> cam.imageFromSpace([-4.17, -10.1, 0.])
[nan nan]
specify a y coordinate as for the back projection.
>>> cam.spaceFromImage([[1968 , 2291], [1650, 2189]], Y=45)
[[-4.17 45.00 -0.93]
[-8.09 45.00 0.37]]
or different y coordinates for each point:
>>> cam.spaceFromImage([[1968 , 2291], [1650, 2189]], Y=[43, 45])
[[-3.98 43.00 -0.20]
[-8.09 45.00 0.37]]
"""
# ensure that the points are provided as an array
points = np.array(points)
# get the index which coordinate to force to the given value
given = np.array([X, Y, Z], dtype=object)
if X is not None:
index = 0
elif Y is not None:
index = 1
elif Z is not None:
index = 2
# if a mesh is provided, intersect the rays with the mesh
if mesh is not None:
# get the rays from the image points
offset, direction = self.getRay(points)
return ray.ray_intersect_triangle(offset, direction, mesh)
# transform to a given distance
if D is not None:
# get the rays from the image points (in this case it has to be normed)
offset, direction = self.getRay(points, normed=True)
# the factor is than simple the distance
factor = D
else:
# get the rays from the image points
offset, direction = self.getRay(points)
# solve the line equation for the factor (how many times the direction vector needs to be added to the origin point)
factor = (given[index] - offset[..., index]) / direction[..., index]
if not isinstance(factor, np.ndarray):
# if factor is not an array, we don't need to specify the broadcasting
points = direction * factor + offset
else:
# apply the factor to the direction vector plus the offset
points = direction * factor[:, None] + offset[None, :]
# ignore points that are behind the camera (e.g. trying to project points above the horizon to the ground)
points[factor < 0] = np.nan
return points
def gpsFromSpace(self, points: Points3D) -> Points3D:
"""
Convert points (Nx3) from the **space** coordinate system to the **gps** coordinate system.
Parameters
----------
points : ndarray
the points in **space** coordinates to transform, dimensions (3), (Nx3)
Returns
-------
points : ndarray
the points in the **gps** coordinate system, dimensions (3), (Nx3)
"""
return gps.gpsFromSpace(points, np.array([self.gps_lat, self.gps_lon, self.elevation_m]))
def spaceFromGPS(self, points: Points3D) -> Points3D:
"""
Convert points (Nx3) from the **gps** coordinate system to the **space** coordinate system.
Parameters
----------
points : ndarray
the points in **gps** coordinates to transform, dimensions (3), (Nx3)
Returns
-------
points : ndarray
the points in the **space** coordinate system, dimensions (3), (Nx3)
"""
return gps.spaceFromGPS(points, np.array([self.gps_lat, self.gps_lon]))
def gpsFromImage(self, points: Points2D, X: Points3D = None, Y: Points3D = None, Z: Points3D = 0,
D: Points3D = None) -> Points3D:
"""
Convert points (Nx2) from the **image** coordinate system to the **gps** coordinate system.
Parameters
----------
points : ndarray
the points in **image** coordinates to transform, dimensions (2), (Nx2)
Returns
-------
points : ndarray
the points in the **gps** coordinate system, dimensions (3), (Nx3)
"""
return self.gpsFromSpace(self.spaceFromImage(points, X=X, Y=Y, Z=Z, D=D))
def imageFromGPS(self, points: Points3D) -> Points2D:
"""
Convert points (Nx3) from the **gps** coordinate system to the **image** coordinate system.
Parameters
----------
points : ndarray
the points in **gps** coordinates to transform, dimensions (3), (Nx3)
Returns
-------
points : ndarray
the points in the **image** coordinate system, dimensions (2), (Nx2)
"""
return self.imageFromSpace(self.spaceFromGPS(points))
def getObjectHeight(self, point_feet: Points2D, point_heads: Points2D, Z: Points1D = 0) -> Points1D:
"""
Calculate the height of objects in the image, assuming the Z position of the objects is known, e.g. they are
assumed to stand on the Z=0 plane.
Parameters
----------
point_feet : ndarray
the positions of the feet, dimensions: (2) or (Nx2)
point_heads : ndarray
the positions of the heads, dimensions: (2) or (Nx2)
Z : number, ndarray, optional
the Z position of the objects, dimensions: scalar or (N), default 0
Returns
-------
heights: ndarray
the height of the objects in meters, dimensions: () or (N)
"""
# get the feet positions in the world
point3D_feet = self.spaceFromImage(point_feet, Z=Z)
# get the head positions in the world
point3D_head1 = self.spaceFromImage(point_heads, Y=point3D_feet[..., 1])
point3D_head2 = self.spaceFromImage(point_heads, X=point3D_feet[..., 0])
point3D_head = np.mean([point3D_head1, point3D_head2], axis=0)
# the z difference between these two points
return point3D_head[..., 2] - point3D_feet[..., 2]
def getObjectLength(self, point_front: Points2D, point_back: Points2D, Z: Points1D = 0) -> Points1D:
"""
Calculate the length of objects in the image, assuming the Z position of the objects is known, e.g. they are
assumed to lie flat on the Z=0 plane.
Parameters
----------
point_front : ndarray
the positions of the front end, dimensions: (2) or (Nx2)
point_back : ndarray
the positions of the back end, dimensions: (2) or (Nx2)
Z : number, ndarray, optional
the Z position of the objects, dimensions: scalar or (N), default 0
Returns
-------
lengths: ndarray
the lengths of the objects in meters, dimensions: () or (N)
"""
# get the front positions in the world
point3D_front = self.spaceFromImage(point_front, Z=Z)
# get the back positions in the world
point3D_back = self.spaceFromImage(point_back, Z=Z)
# the z difference between these two points
return np.linalg.norm(point3D_front - point3D_back, axis=-1)
def _getUndistortMap(self, extent=None, scaling=None):
# if no extent is given, take the maximum extent from the image border
if extent is None:
extent = [0, self.image_width_px, 0, self.image_height_px]
# if no scaling is given, scale so that the resulting image has an equal amount of pixels as the original image
if scaling is None:
scaling = 1
# if we have cached the map, use the cached map
if self.map_undistort is not None and \
self.last_extent_undistort == extent and \
self.last_scaling_undistort == scaling:
return self.map_undistort
# get a mesh grid
mesh = np.array(np.meshgrid(np.arange(extent[0], extent[1], scaling),
np.arange(extent[2], extent[3], scaling)))
# convert it to a list of points Nx2
mesh_points = mesh.reshape(2, mesh.shape[1] * mesh.shape[2]).T
# transform the space points to the image
mesh_points_shape = self.lens.distortedFromImage(mesh_points)
# reshape the map and cache it
self.map_undistort = mesh_points_shape.T.reshape(mesh.shape).astype(np.float32)[:, ::-1, :]
self.last_extent_undistort = extent
self.last_scaling_undistort = scaling
# return the calculated map
return self.map_undistort
def undistortImage(self, image: Image, extent: List[Number] = None, scaling: Number=None,
do_plot: bool = False, alpha: Number = None, skip_size_check: bool = False) -> Image:
"""
Applies the undistortion of the lens model to the image. The purpose of this function is mainly to check the
sanity of a lens transformation. As CameraTransform includes the lens transformation in any calculations, it
is not necessary to undistort images before using them.
Parameters
----------
image : ndarray
the image to undistort.
extent : list, optional
the extent in pixels of the resulting image. This can be used to crop the resulting undistort image.
scaling : number, optional
the number of old pixels that are used to calculate a new pixel. A higher value results in a smaller target
image.
do_plot : bool, optional
whether to plot the resulting image directly in a matplotlib plot.
alpha : number, optional
when plotting an alpha value can be specified, useful when comparing multiple images.
skip_size_check : bool, optional
if true, the size of the image is not checked to match the size of the cameras image.
Returns
-------
image : ndarray
the undistorted image
"""
import cv2
# check if the size of the image matches the size of the camera
if not skip_size_check:
assert image.shape[1] == self.image_width_px, "The with of the image (%d) does not match the image width of the camera (%d)" % (image.shape[1], self.image_width_px)
assert image.shape[0] == self.image_height_px, "The height of the image (%d) does not match the image height of the camera (%d)." % (image.shape[0], self.image_height_px)
x, y = self._getUndistortMap(extent=extent, scaling=scaling)
# ensure that the image has an alpha channel (to enable alpha for the points outside the image)
if len(image.shape) == 2:
pass
elif image.shape[2] == 3:
image = np.dstack((image, np.ones(shape=(image.shape[0], image.shape[1], 1), dtype="uint8") * 255))
image = cv2.remap(image, x, y,
interpolation=cv2.INTER_NEAREST,
borderValue=[0, 1, 0, 0])[::-1] # , borderMode=cv2.BORDER_TRANSPARENT)
if do_plot:
import matplotlib.pyplot as plt
extent = self.last_extent_undistort.copy()
extent[2], extent[3] = extent[3]-1, extent[2]-1
plt.imshow(image, extent=extent, alpha=alpha)
return image
def _getMap(self, extent=None, scaling=None, Z=0, hide_backpoints=True):
# if no extent is given, take the maximum extent from the image border
if extent is None:
border = self.getImageBorder()
extent = [np.nanmin(border[:, 0]), np.nanmax(border[:, 0]),
np.nanmin(border[:, 1]), np.nanmax(border[:, 1])]
# if we have cached the map, use the cached map
if self.map is not None and \
all(self.last_extent == np.array(extent)) and \
(self.last_scaling == scaling):
return self.map
# if no scaling is given, scale so that the resulting image has an equal amount of pixels as the original image
if scaling is None:
scaling = np.sqrt((extent[1] - extent[0]) * (extent[3] - extent[2])) / \
np.sqrt((self.projection.parameters.image_width_px * self.projection.parameters.image_height_px))
# get a mesh grid
mesh = np.array(np.meshgrid(np.arange(extent[0], extent[1], scaling),
np.arange(extent[2], extent[3], scaling)))
# convert it to a list of points Nx2
mesh_points = mesh.reshape(2, mesh.shape[1] * mesh.shape[2]).T
mesh_points = np.hstack((mesh_points, Z*np.ones((mesh_points.shape[0], 1))))
# transform the space points to the image
mesh_points_shape = self.imageFromSpace(mesh_points, hide_backpoints=hide_backpoints)
# reshape the map and cache it
self.map = mesh_points_shape.T.reshape(mesh.shape).astype(np.float32)[:, ::-1, :]
self.last_extent = extent
self.last_scaling = scaling
# return the calculated map
return self.map
def getTopViewOfImage(self, image: Image, extent: List[Number] = None, scaling: Number = None,
do_plot: bool = False, alpha: Number = None, Z: Number = 0., skip_size_check: bool = False,
hide_backpoints: bool = True) -> Image:
"""
Project an image to a top view projection. This will be done using a grid with the dimensions of the extent
([x_min, x_max, y_min, y_max]) in meters and the scaling, giving a resolution. For convenience, the image can
be plotted directly. The projected grid is cached, so if the function is called a second time with the same
parameters, the second call will be faster.
Parameters
----------
image : ndarray
the image as a numpy array.
extent : list, optional
the extent of the resulting top view in meters: [x_min, x_max, y_min, y_max]. If no extent is given a suitable
extent is guessed. If a horizon is visible in the image, the guessed extent will in most cases be too streched.
scaling : number, optional
the scaling factor, how many meters is the side length of each pixel in the top view. If no scaling factor is
given, a good scaling factor is guessed, trying to get about the same number of pixels in the top view as in
the original image.
do_plot : bool, optional
whether to directly plot the resulting image in a matplotlib figure.
alpha : number, optional
an alpha value used when plotting the image. Useful if multiple images should be overlaid.
Z : number, optional
the "height" of the plane on which to project.
skip_size_check : bool, optional
if true, the size of the image is not checked to match the size of the cameras image.
Returns
-------
image : ndarray
the top view projected image
"""
import cv2
# check if the size of the image matches the size of the camera
if not skip_size_check:
assert image.shape[1] == self.image_width_px, "The with of the image (%d) does not match the image width of the camera (%d)" % (image.shape[1], self.image_width_px)
assert image.shape[0] == self.image_height_px, "The height of the image (%d) does not match the image height of the camera (%d)." % (image.shape[0], self.image_height_px)
# get the mapping
x, y = self._getMap(extent=extent, scaling=scaling, Z=Z, hide_backpoints=hide_backpoints)
# ensure that the image has an alpha channel (to enable alpha for the points outside the image)
if len(image.shape) == 2:
pass
elif image.shape[2] == 3:
image = np.dstack((image, np.ones(shape=(image.shape[0], image.shape[1], 1), dtype="uint8") * 255))
image = cv2.remap(image, x, y,
interpolation=cv2.INTER_NEAREST,
borderValue=[0, 1, 0, 0]) # , borderMode=cv2.BORDER_TRANSPARENT)
if do_plot:
import matplotlib.pyplot as plt
plt.imshow(image, extent=self.last_extent, alpha=alpha)
return image
def generateLUT(self, undef_value: Number = 0, whole_image: bool = False) -> ArrayLike:
"""
Generate LUT to calculate area covered by one pixel in the image dependent on y position in the image
Parameters
----------
undef_value : number, optional
what values undefined positions should have, default=0
whole_image : bool, optional
whether to generate the look up table for the whole image or just for a y slice
Returns
-------
LUT: ndarray
same length as image height
"""
def get_square(points):
p0 = points + np.array([-0.5, -0.5])
p1 = points + np.array([+0.5, -0.5])
p2 = points + np.array([+0.5, +0.5])
p3 = points + np.array([-0.5, +0.5])
squares = np.array([p0, p1, p2, p3])
if len(squares.shape) == 3:
return squares.transpose(1, 0, 2)
return squares
if whole_image:
x = np.arange(0, self.image_width_px)
y = np.arange(0, self.image_height_px)
xv, yv = np.meshgrid(x, y)
points = np.array([xv.flatten(), yv.flatten()]).T
else:
y = np.arange(self.image_height_px)
x = self.image_width_px / 2 * np.ones(len(y))
points = np.array([x, y]).T
squares = get_square(points).reshape(-1, 2)
squares_space = self.spaceFromImage(squares, Z=0).reshape(-1, 4, 3)
A = ray.areaOfQuadrilateral(squares_space)
if whole_image:
A = A.reshape(self.image_height_px, self.image_width_px)
A[np.isnan(A)] = undef_value
return A
def rotateSpace(self, delta_heading: Number):
"""
Rotates the whole camera setup, this will turn the heading and rotate the camera position (pos_x_m, pos_y_m)
around the origin.
Parameters
----------
delta_heading : number
the number of degrees to rotate the camera clockwise.
"""
self.heading_deg += delta_heading
delta_heading_rad = np.deg2rad(delta_heading)
pos = np.array([self.pos_x_m, self.pos_y_m])
s, c = np.sin(delta_heading_rad), np.cos(delta_heading_rad)
self.pos_x_m, self.pos_y_m = np.dot(np.array([[c, s], [-s, c]]), pos)
def save(self, filename: str):
"""
Saves the camera parameters to a json file.
Parameters
----------
filename : str
the filename where to store the parameters.
"""
keys = self.parameters.parameters.keys()
export_dict = {key: getattr(self, key) for key in keys if key != "focallength_px"}
# check projections and save
if isinstance(self.projection, RectilinearProjection):
export_dict["projection"] = RECTILINEAR
elif isinstance(self.projection, CylindricalProjection):
export_dict["projection"] = CYLINDRICAL
elif isinstance(self.projection, EquirectangularProjection):
export_dict["projection"] = EQUIRECTANGULAR
# check lens distortions and save
if isinstance(self.lens, NoDistortion):
export_dict["lens"] = NODISTORTION
elif isinstance(self.lens, ABCDistortion):
export_dict["lens"] = ABCDDISTORTION
elif isinstance(self.lens, BrownLensDistortion):
export_dict["lens"] = BROWNLENSDISTORTION
with open(filename, "w") as fp:
fp.write(json.dumps(export_dict, indent=4))
def load(self, filename: str):
"""
Load the camera parameters from a json file.
Parameters
----------
filename : str
the filename of the file to load.
"""
with open(filename, "r") as fp:
variables = json.loads(fp.read())
if "projection" in variables.keys():
if variables["projection"] == RECTILINEAR:
projection = RectilinearProjection(image=(100, 50), focallength_px=100)
elif variables["projection"] == CYLINDRICAL:
projection = CylindricalProjection(image=(100, 50), focallength_px=100)
elif variables["projection"] == EQUIRECTANGULAR:
projection = EquirectangularProjection(image=(100, 50), focallength_px=100)
variables.pop("projection")
else:
projection = RectilinearProjection(image=(100, 50), focallength_px=100)
if "lens" in variables.keys():
if variables["lens"] == NODISTORTION:
lens = NoDistortion()
elif variables["lens"] == ABCDDISTORTION:
lens = ABCDistortion()#variables.get("a", None), variables.get("b", None),variables.get("c", None))
elif variables["lens"] == BROWNLENSDISTORTION:
lens = BrownLensDistortion()#variables.get("k1", None), variables.get("k2", None),variables.get("k3", None))
variables.pop("lens")
else:
lens = None
self.__init__(projection=projection, lens=lens, orientation=SpatialOrientation())
for key in variables:
setattr(self, key, variables[key])
def load_camera(filename: str) -> Camera:
"""
Create a :py:class:`Camera` instance with the parameters from the file.
Parameters
----------
filename : str
the filename of the file to load.
Returns
-------
camera : :py:class:`Camera`
the camera with the given parameters.
"""
cam = Camera(RectilinearProjection(image=(100, 50), focallength_px=100), SpatialOrientation(), NoDistortion())
cam.load(filename)
return cam
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
this module is to inspect keys, expecially keys with colon.
'''
import audit
import re
import json
import codecs
OSM_PATH = "sample.osm"
OUTPUT = "inspect_keys.json"
get_element = audit.get_element
LOWER_COLON = audit.LOWER_COLON
'''
The function parse key values to dictionary.
If a key does not have colon, it'll be added to 'non-colon' dictionary.
If has colon, it will be splited into 2 parts as type and key.
Then it will be sorted by type.
'''
def sort_keys(filename):
result = {'non-colon':{}}
for element in get_element(OSM_PATH, tags=('tag')):
k = element.attrib['k']
if k.find(':') != -1:
klist = k.split(':',1)
if result.has_key(klist[0]):
if result[klist[0]].has_key(klist[1]):
result[klist[0]][klist[1]] += 1
else:
result[klist[0]][klist[1]] = 1
else:
result[klist[0]] = {klist[1] : 1}
else:
if result['non-colon'].has_key(k):
result['non-colon'][k] += 1
else:
result['non-colon'][k] = 1
return result
if __name__ == '__main__':
d = sort_keys(OSM_PATH)
with codecs.open(OUTPUT,'w',encoding='utf-8') as f:
json.dump(d,f,indent=1,ensure_ascii=False)
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Profile(models.Model):
GENDER = (
('M', 'Male'),
('F', 'Female')
)
user = models.OneToOneField(User)
age = models.IntegerField(default=0)
gender = models.CharField(max_length=1, choices=GENDER)
phone = models.CharField('Contact #:', max_length=16)
about_you = models.TextField('About You :')
employee = models.BooleanField(default=False)
def is_employee(self):
return self.employee
is_employee.boolean = True
def __str__(self):
return self
|
# -*- coding: utf-8 -*-
"""
Created on 2018/11/7 10:34
@author: royce.mao
# 构造第2阶段,小图片数字的识别检测网络
"""
from __future__ import print_function
from __future__ import absolute_import
from keras.models import Model
from keras.layers import Conv2D, Reshape, Input, Activation, Convolution2D, MaxPooling2D, ZeroPadding2D, Add, BatchNormalization, concatenate, TimeDistributed, AveragePooling2D, Flatten, Dense
from fixed_batch_normalization import FixedBatchNormalization
from roi_pooling_conv import RoiPoolingConv
from keras import backend as K
def stage_2_net(nb_classes, input_tensor, height=160, width=80):
"""
自己设计的4倍下采样,简易类VGG基础网络
:return:
"""
bn_axis = 3
# 只有net卷积的4倍下采样
conv1 = Convolution2D(filters=16, kernel_size=3, strides=4, padding='same')(input_tensor)
act1 = Activation('relu')(conv1)
bn1 = BatchNormalization(axis=bn_axis)(act1)
# net卷积加pooling的4倍下采样
conv2 = Convolution2D(filters=16, kernel_size=3, strides=2, padding='same')(input_tensor)
act2 = Activation('relu')(conv2)
bn2 = BatchNormalization(axis=bn_axis)(act2)
pool2 = MaxPooling2D(pool_size=(2, 2))(bn2)
# 只有pooling的4倍下采样
pool3 = MaxPooling2D(pool_size=(4, 4))(input_tensor)
# 特征融合
concat = concatenate([conv1, pool2, pool3])
# 接上cls输出层
classification = Convolution2D(filters=height * width * 12 * nb_classes // 12800, kernel_size=3, padding='same')(concat)
classification = Reshape(target_shape=(-1, nb_classes))(classification)
classification = Activation(activation='softmax', name='classification')(classification)
# 接上regr输出层
bboxes_regression = Convolution2D(filters=height * width * 12 * 4 // 1280, kernel_size=3, padding='same')(concat)
bboxes_regression = Reshape(target_shape=(-1, 4*(nb_classes-1)), name='regression')(bboxes_regression)
'''
# 接上cls输出层
classification = Dense(nb_classes, activation='softmax', kernel_initializer='zero')(concat)
# 接上regr输出层
bboxes_regression = Dense(4 * (nb_classes - 1), activation='linear', kernel_initializer='zero')(concat)
'''
# 最终的model构建
detect_model = Model(inputs=input_tensor, outputs=[classification, bboxes_regression])
detect_model.summary()
return [classification, bboxes_regression]
def stage_2_net_vgg(nb_classes, input_tensor, height = 160, width = 80):
"""
VGG网络的前3个blocks,8倍下采样
:param input_tensor:
:param trainable:
:return:
"""
bn_axis = 3
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(input_tensor)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# # 接上cls输出层
# classification = Dense(nb_classes, activation='softmax', kernel_initializer='zero')(x)
# # 接上regr输出层
# bboxes_regression = Dense(4 * (nb_classes - 1), activation='linear', kernel_initializer='zero')(x)
# 接上cls输出层
classification = Convolution2D(filters=height * width * 18 * nb_classes // 12800, kernel_size=3, padding='same')(x)
classification = Reshape(target_shape=(-1, nb_classes))(classification)
classification = Activation(activation='softmax', name='classification')(classification)
# 接上regr输出层
bboxes_regression = Convolution2D(filters=height * width * 18 * 4 // 1280, kernel_size=3, padding='same')(x)
bboxes_regression = Reshape(target_shape=(-1, 4*(nb_classes-1)), name='regression')(bboxes_regression)
detect_model = Model(inputs=input_tensor, outputs=[classification, bboxes_regression])
detect_model.summary()
return [classification, bboxes_regression]
def stage_2_net_res(nb_classes, input_tensor, height = 160, width = 80):
"""
resnet网络的前2个blocks,8倍下采样
:param input_tensor:
:param trainable:
:return:
"""
bn_axis = 3
x = ZeroPadding2D((3, 3))(input_tensor)
x = Convolution2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
# NOTE: this code only support to keras 2.0.3, newest version this line will got errors.
x = FixedBatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# 接上cls输出层
classification = Convolution2D(filters=height * width * 18 * nb_classes // 12800, kernel_size=3, padding='same')(x)
classification = Reshape(target_shape=(-1, nb_classes))(classification)
classification = Activation(activation='softmax', name='classification')(classification)
# 接上regr输出层
bboxes_regression = Convolution2D(filters=height * width * 18 * 4 // 1280, kernel_size=3, padding='same')(x)
bboxes_regression = Reshape(target_shape=(-1, 4*(nb_classes-1)), name='regression')(bboxes_regression)
# detect_model = Model(inputs=input_tensor, outputs=[classification, bboxes_regression])
# detect_model.summary()
return [classification, bboxes_regression]
def stage_2_net_res_td(nb_classes, input_tensor, input_rois, height = 160, width = 80):
"""
resnet网络的前2个blocks,8倍下采样,并增加TimeDistributed层
:param nb_classes:
:param input_tensor:
:param height:
:param width:
:return:
"""
bn_axis = 3
# 输入td结构部分的input_shape应该满足,与RoiPoolingConv输出一致
input_shape = (height * width * 18 // 64, 20, 10, 64)
x = ZeroPadding2D((3, 3))(input_tensor)
x = Convolution2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
# NOTE: this code only support to keras 2.0.3, newest version this line will got errors.
x = FixedBatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((2, 2), name='max_pool')(x)
# 新增部分结构,roi pooling的特征映射(anchors与img之间)
x = RoiPoolingConv([10, 20], height * width * 18 // 64)([x, input_rois])
x = conv_block_td(x, 3, [64, 64, 256], stage=1, block='a', input_shape=input_shape, strides=(1, 1))
x = identity_block_td(x, 3, [64, 64, 256], stage=1, block='b')
x = identity_block_td(x, 3, [64, 64, 256], stage=1, block='c')
# 新增部分结构
# x = TimeDistributed(AveragePooling2D((2, 2)), name='avg_pool')(x)
out = TimeDistributed(Flatten())(x)
classification = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'),
name='dense_class_{}'.format(nb_classes))(out)
# note: no regression target for bg class
bboxes_regression = TimeDistributed(Dense(4 * (nb_classes - 1), activation='linear', kernel_initializer='zero'),
name='dense_regress_{}'.format(nb_classes))(out)
'''
# cls and regr 分类回归分支
classification = TimeDistributed(Convolution2D(filters=height * width * 18 * nb_classes // 12800, kernel_size=3, padding='same'))(x)
classification = TimeDistributed(Reshape(target_shape=(-1, nb_classes)))(classification)
classification = Activation(activation='softmax', name='classification')(classification)
# 接上regr输出层
bboxes_regression = TimeDistributed(Convolution2D(filters=height * width * 18 * 4 // 1280, kernel_size=3, padding='same'))(x)
bboxes_regression = TimeDistributed(Reshape(target_shape=(-1, 4*(nb_classes-1))), name='regression')(bboxes_regression)
'''
detect_model = Model(inputs=input_tensor, outputs=[classification, bboxes_regression])
detect_model.summary()
return [classification, bboxes_regression]
def identity_block(input_tensor, kernel_size, filters, stage, block, trainable=True):
nb_filter1, nb_filter2, nb_filter3 = filters
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, (1, 1), name=conv_name_base + '2a', trainable=trainable)(input_tensor)
x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b',
trainable=trainable)(x)
x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter3, (1, 1), name=conv_name_base + '2c', trainable=trainable)(x)
x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = Add()([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), trainable=True):
nb_filter1, nb_filter2, nb_filter3 = filters
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, (1, 1), strides=strides, name=conv_name_base + '2a', trainable=trainable)(
input_tensor)
x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b',
trainable=trainable)(x)
x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter3, (1, 1), name=conv_name_base + '2c', trainable=trainable)(x)
x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Convolution2D(nb_filter3, (1, 1), strides=strides, name=conv_name_base + '1', trainable=trainable)(
input_tensor)
shortcut = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
def identity_block_td(input_tensor, kernel_size, filters, stage, block, trainable=True):
# identity block time distributed
nb_filter1, nb_filter2, nb_filter3 = filters
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = TimeDistributed(Convolution2D(nb_filter1, (1, 1), trainable=trainable, kernel_initializer='normal'),
name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = TimeDistributed(
Convolution2D(nb_filter2, (kernel_size, kernel_size), trainable=trainable, kernel_initializer='normal',
padding='same'), name=conv_name_base + '2b')(x)
x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = TimeDistributed(Convolution2D(nb_filter3, (1, 1), trainable=trainable, kernel_initializer='normal'),
name=conv_name_base + '2c')(x)
x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2c')(x)
x = Add()([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block_td(input_tensor, kernel_size, filters, stage, block, input_shape, strides=(2, 2), trainable=True):
# conv block time distributed
nb_filter1, nb_filter2, nb_filter3 = filters
if K.image_dim_ordering() == 'tf':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = TimeDistributed(
Convolution2D(nb_filter1, (1, 1), strides=strides, trainable=trainable, kernel_initializer='normal'),
input_shape=input_shape, name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = TimeDistributed(Convolution2D(nb_filter2, (kernel_size, kernel_size), padding='same', trainable=trainable,
kernel_initializer='normal'), name=conv_name_base + '2b')(x)
x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = TimeDistributed(Convolution2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c',
trainable=trainable)(x)
x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2c')(x)
shortcut = TimeDistributed(
Convolution2D(nb_filter3, (1, 1), strides=strides, trainable=trainable, kernel_initializer='normal'),
name=conv_name_base + '1')(input_tensor)
shortcut = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '1')(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
if __name__ == "__main__":
# stage_2_net(11, Input(shape=(160, 80, 3)), height=160, width=80)
# stage_2_net_vgg(11, Input(shape=(160, 80, 3)), height=160, width=80)
stage_2_net_res_td(11, Input(shape=(160, 80, 3)), Input(shape=(None, 4)), height=160, width=80)
|
# -*- coding: utf-8 -*-
import cv2
import sys
import face_recognition
import os
import MySQLdb
import numpy as np
from datetime import datetime
import databaseScript
path = "ImagesAttendance"
images = []
classNames = []
myList = os.listdir(path)
# print(myList)
for cl in myList:
curImage = cv2.imread(f'{path}/{cl}')
images.append(curImage)
classNames.append(os.path.splitext(cl)[0])
# print(classNames)
def findEncodingImg(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
# def MarkAttendance(name):#save data name attendance into csv file
# with open('Attendance.csv','r+')as f:
# myDataList=f.readlines()
# nameList=[]
# for line in myDataList:
# entry=line.split(',')
# nameList.append(entry[0])
# if name not in nameList:
# now=datetime.now()
# dtstring=now.strftime("%H:%M:%S")
# f.writelines(f'\n{name},{dtstring}')
databaseScript.create_data()
def check_name_state(name):
now = datetime.now()
d1 = now.strftime("%d/%m/%Y")
if(not databaseScript.exist_name(name, d1)):
dtstring = now.strftime("%d/%m/%Y %H:%M:%S")
databaseScript.insert_data(name, dtstring)
def markattend(name):
db=db=MySQLdb.connect(host="localhost",user="root",password="",db="attendance")
cur=db.cursor()
query="insert into markme(reg, Studentname) values('"+sys.argv[1]+"', '"+name+"');"
cur.execute(query)
db.commit()
db.close()
known_face_encodings = findEncodingImg(images)
print("Encoding complete.....")
# ----------------------------------------------------------------------
cap = cv2.VideoCapture(0)
while True:
success, img = cap.read()
imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
faceCurFrame = face_recognition.face_locations(imgS)
encodeCurFrame = face_recognition.face_encodings(imgS, faceCurFrame)
recognized = False
name = "Something"
for encodeFace, faceLoc in zip(encodeCurFrame, faceCurFrame):
matches = face_recognition.compare_faces(
known_face_encodings, encodeFace)
faceDis = face_recognition.face_distance(
known_face_encodings, encodeFace)
# print(faceDis)
matcheIndexes = np.argmin(faceDis)
if(matches[matcheIndexes]):
name = classNames[matcheIndexes].upper()
print(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(img, (x1, y2-35), (x2, y2), (0, 255, 0), cv2.FILLED)
cv2.putText(img, name, (x1+6, y2-6),
cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
# MarkAttendance(name)
# check_name_state(name)
if (name):
recognized = True
cv2.putText(img, 'press q to exit', (10, 18),
cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 255), 2)
cv2.imshow("Attendance System", img)
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
if (recognized and name == sys.argv[1]):
markattend(name)
break
cap.release()
cv2.destroyAllWindows()
|
"""Add first/last name fields to user table.
Revision ID: 81162fe5d987
Revises: 4e8beae024e9
Create Date: 2018-11-28 22:14:00.933976
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '81162fe5d987'
down_revision = '4e8beae024e9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('firstname', sa.String(length=255), nullable=True))
op.add_column('user', sa.Column('lastname', sa.String(length=255), nullable=True))
op.create_index(op.f('ix_user_firstname'), 'user', ['firstname'], unique=False)
op.create_index(op.f('ix_user_lastname'), 'user', ['lastname'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_lastname'), table_name='user')
op.drop_index(op.f('ix_user_firstname'), table_name='user')
op.drop_column('user', 'lastname')
op.drop_column('user', 'firstname')
# ### end Alembic commands ###
|
#remove dublets
List=[1,2,4,7,22,3,5,6,3,1,22,1,4,5,2,3,4,5,6,7,5]
NewList=[]
for number in List:
if number not in NewList:
NewList.append(number) #append means- put into
print(NewList)
#sort list
NewList.sort()
print(NewList)
|
import pygame
import time
import random
import numpy as np
from PIL import Image
import env
from game.Board import BoardSingleton
from game.Snake import Snake
from game.Food import Food
from game.EventListener import EventListener
class SnakeGameEnv():
def __init__(self):
self.board = BoardSingleton.getInstance()
self.food = Food()
self.snake = Snake()
self.event = EventListener()
if(env.SHOW_PREVIEW):
pygame.init()
pygame.display.set_caption('Q-Learning Snake')
self.clock = pygame.time.Clock()
def step(self, action):
self.snake.action(action)
new_observation = np.array(self.get_image_map())
reward = -env.MOVE_PENALTY
if(self.snake.snake_invalid_move()):
reward = -env.INVALID_MOVE_PENALTY
elif(self.food == self.snake):
reward = env.FOOD_REWARD
if(self.food == self.snake):
self.food.set_new_food_coordinates()
self.board.add_score_point()
self.snake.snake_lenght += 1
done = False
if(self.event.exit == True or self.snake.snake_invalid_move()):
done = True
return new_observation, reward, done
def render(self):
self.event.listener()
self.board.refresh_board()
self.food.food_draw_frame()
self.snake.snake_draw_frame()
pygame.display.update()
self.clock.tick(env.FRAME_RATE)
def reset(self, show_episode):
self.snake = Snake()
self.food = Food()
self.board.restart(show_episode)
observation = np.array(self.get_image_map())
return observation, False
def calculate_snake_head(self):
snake_head_temp = self.snake.get_snake_head()
if(snake_head_temp[0] < 0):
snake_head_temp[0] = 0
elif(snake_head_temp[0] >= env.MAP_SIZE):
snake_head_temp[0] = env.MAP_SIZE-env.DOT_SIZE
if(snake_head_temp[1] < 0):
snake_head_temp[1] = 0
elif(snake_head_temp[1] >= env.MAP_SIZE):
snake_head_temp[1] = env.MAP_SIZE-env.DOT_SIZE
snake_head_temp = [snake_head_temp[0]//env.DOT_SIZE, snake_head_temp[1]//env.DOT_SIZE]
return snake_head_temp
def get_image_map(self):
map_env = np.zeros((env.BOARD_TABLE, env.BOARD_TABLE, 3), dtype=np.uint8)
food_location = self.food.get_food_xy()
map_env[food_location[0]][food_location[1]] = self.board.COLOR["FD"]
snake_head = self.calculate_snake_head()
map_env[snake_head[0]][snake_head[1]] = self.board.COLOR["SN_HD"]
snake_tail = self.snake.snake
for x, y in snake_tail[:-1]:
map_env[x//env.DOT_SIZE][y//env.DOT_SIZE] = self.board.COLOR["SN"]
img = Image.fromarray(map_env, 'RGB')
return img
def get_highest_score(self):
return self.board.high_score
def get_episode_score(self):
return self.board.score |
import math
# import matplotlib.pyplot as plt
import numpy as np
from ... environment.simulator.models import Simulation
from ... import db
# To differentiate first and last from movement display
MARKER_FIRST = {
'marker': "o",
'markerfacecolor': "green",
'markersize': "14",
}
MARKER = {
'marker': "x",
'markerfacecolor': "yellow",
'markersize': "10",
}
MARKER_LAST = {
'marker': "d",
'markerfacecolor': "blue",
'markersize': "14",
}
class AMM(db.Model):
"""AMM model."""
__tablename__ = 'amms'
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(100),
nullable=False,
)
name_x = db.Column(
db.String(10),
nullable=False,
)
bal_x = db.Column(
db.Float,
default=0,
nullable=False,
)
name_y = db.Column(
db.String(10),
nullable=False,
)
bal_y = db.Column(
db.Float,
default=0,
nullable=False,
)
weight_x = db.Column(
db.Float,
default=0.5,
nullable=False,
)
weight_y = db.Column(
db.Float,
default=0.5,
nullable=False,
)
is_dynamic = db.Column(
db.Boolean,
default=False,
nullable=False,
)
records = db.relationship('AMM_Record',
backref='amm_records',
lazy=True)
#
simulation_id = db.Column(
db.Integer,
db.ForeignKey('simulations.id'),
nullable=False
)
#
@property
def serialize(self):
return {
'id': self.id,
'name': self.name,
'name_x': self.name_x,
'bal_x': self.bal_x,
'weight_x': self.weight_x,
'name_y': self.name_y,
'bal_y': self.bal_y,
'weight_y': self.weight_y,
'is_dynamic': self.is_dynamic,
'simulation_id': self.simulation_id
}
#
def constant_product(self):
return self.v() if self.is_dynamic else self.k()
def k(self):
return self.bal_x * self.bal_y
#
def v(self):
return (self.bal_x ** self.weight_x) * (self.bal_y ** self.weight_y)
#
def x_price(self):
return self._d_x_price() if self.is_dynamic else self._c_x_price()
#
def _c_x_price(self):
return self.bal_y / self.bal_x
#
def _d_x_price(self):
return (self.bal_y / self.weight_y) / (self.bal_x / self.weight_x)
#
def y_price(self):
return self._d_y_price() if self.is_dynamic else self._c_y_price()
#
def _c_y_price(self):
return self.bal_x / self.bal_y
#
def _d_y_price(self):
return (self.bal_x / self.weight_x) / (self.bal_y / self.weight_y)
#
def seed(self, _x, _y):
self.bal_x = _x
self.bal_y = _y
# self.clear_records()
#
def add_liquidity(self, _x):
return self._d_add_liquidity(_x) if self.is_dynamic else self._c_add_liquidity(_x)
#
def _c_add_liquidity(self, _x):
new_y = _x * self.bal_y / self.bal_x + 1
self.bal_x += _x
self.bal_y += new_y
return
#
def _d_add_liquidity(self, _x):
new_token = _x * self.bal_y / self.bal_x + 1
self.bal_x += _x
self.bal_y += new_token
return
#
def apply_volume(self, _net_y_volume):
self._d_apply_volume(_net_y_volume) if self.is_dynamic else self._c_apply_volume(_net_y_volume)
return
#
def _c_apply_volume(self, _net_y_volume):
if abs(_net_y_volume) >= self.bal_y:
return False
k = self.k()
x = self.bal_x
y = self.bal_y
self.bal_x = k / (y + _net_y_volume)
self.bal_y = y + _net_y_volume
return True
#
def _d_apply_volume(self, _net_y_volume):
if abs(_net_y_volume) >= self.bal_y:
return False
tokens_in, tokens_out, is_x_in = self._d_volume_math(_net_y_volume)
if is_x_in:
print(is_x_in)
print("!!Tokens!!")
print(tokens_in)
print(tokens_out)
print(_net_y_volume)
self.bal_x += tokens_in
self.bal_y -= tokens_out
else:
print("!!Reverse Tokens!!")
print(tokens_in)
print(tokens_out)
self.bal_x -= tokens_out
self.bal_y += tokens_in
return True
def _d_volume_math(self, _net_y_volume ):
print(_net_y_volume)
if _net_y_volume > 0:
# Rename
is_x_in = False
amount_tokens_swapped_in = _net_y_volume
bal_outgoing_token = self.bal_x
bal_incoming_token = self.bal_y
# Separate Concerns
weight_factor = self.weight_y / self.weight_x
incoming_factor = bal_incoming_token / (bal_incoming_token + amount_tokens_swapped_in)
# Culmination
amount_tokens_swapped_out = bal_outgoing_token * (1 - (incoming_factor ** weight_factor))
else:
# Rename
is_x_in = True
amount_tokens_swapped_out = _net_y_volume
bal_outgoing_token = self.bal_y
bal_incoming_token = self.bal_x
# Separate Concerns
weight_factor = self.weight_y / self.weight_x
outgoing_factor = amount_tokens_swapped_out / bal_outgoing_token # = (1 - (incoming_factor ** weight_factor))
incoming_factor = (1 - outgoing_factor)**(1/weight_factor)
# Culmination
amount_tokens_swapped_in = (bal_incoming_token / incoming_factor) - bal_incoming_token
return amount_tokens_swapped_in, amount_tokens_swapped_out, is_x_in
def set_weight_x(self, _weight):
if _weight >= 1 or _weight <= 0:
return False
self.weight_x = _weight
self.weight_y = 1 - _weight
self.is_dynamic = True
def plot_curve(self, _plotter):
max_x = self.bal_x * 2
max_y = self.bal_y * 2
x, y, extras = self._d_plot_curve(_plotter) if self.is_dynamic else self._c_plot_curve(_plotter)
_plotter.plot(x, y, extras)
_plotter.set_limits(1, max_x, 1, max_y)
_plotter.set_axis( "X Balance", "Y Balance")
_plotter.set_title("{} Price Curve".format(self.name))
return _plotter
#
def _c_plot_curve(self, _plotter):
k = self.k()
max_x = self.bal_x * 2
interval = self.bal_x / 20
x = np.arange(1, max_x, interval )
y = k / x
extras = {'label': "{} Price Curve".format(self.name),
'color': 'black',
'linestyle': 'dashed',
'linewidth': 3,
'marker': None,
'markerfacecolor': None ,
'markersize': None}
return x, y, extras
def _d_plot_curve(self, _plotter):
v = self.v()
max_x = self.bal_x * 2
max_y = self.bal_y * 2
interval = self.bal_x / 20
x = np.arange(1, max_x, interval )
y = (v / (x ** self.weight_x)) ** (1/self.weight_y)
# (self.bal_x ** self.weight_x) * (self.bal_y ** self.weight_y)
extras = {'label': "{} Price Curve".format(self.name),
'color': 'black',
'linestyle': 'dashed',
'linewidth': '3',
'marker': None,
'markerfacecolor': None ,
'markersize': None}
return x, y, extras
def plot_current(self):
if first:
marker = MARKER_FIRST
elif last:
marker = MARKER_LAST
else:
marker = MARKER
extras = {'label': "{} Current Balance".format(self.name),
'color': 'black',
'linestyle': 'dashed',
'linewidth': '3',
'marker': marker['marker'],
'markerfacecolor': marker['markerfacecolor'],
'markersize': marker['markersize']}
_plotter.plot(self.bal_x, self.bal_y, extras)
return _plotter
#
def most_recent_record(self):
records = self.records
max_id = 0
most_recent = None
for record in records:
if record.id > max_id:
max_id = record.id
most_recent = record
return most_recent
#
class AMM_Record(db.Model):
"""AMM Historic Context model."""
__tablename__ = 'amm_records'
id = db.Column(
db.Integer,
primary_key=True
)
created_on = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True
)
amm_id = db.Column(
db.Integer,
db.ForeignKey('amms.id'),
nullable=False
)
token_price_x = db.Column(
db.Float,
nullable=False
)
bal_x = db.Column(
db.Float,
nullable=False
)
weight_x = db.Column(
db.Float,
nullable=False
)
token_price_y = db.Column(
db.Float,
nullable=False
)
bal_y = db.Column(
db.Float,
nullable=False
)
weight_y = db.Column(
db.Float,
nullable=False
)
constant_product = db.Column(
db.Integer,
nullable=False
)
@property
def serialize(self):
return {
'id': self.id,
'created_on': self.created_on,
'amm_id': self.amm_id,
'token_price_x': self.token_price_x,
'bal_x': self.bal_x,
'weight_x': self.weight_x,
'token_price_y': self.token_price_y,
'bal_y': self.bal_y,
'weight_y': self.weight_y,
'constant_product': self.constant_product
}
def plot_record(self, _plotter, first=False, last=False):
if first:
marker = MARKER_FIRST
elif last:
marker = MARKER_LAST
else:
marker = MARKER
amm = AMM.query.get(self.amm_id)
extras = {'label': "{} Current Balance".format(amm.name),
'color': 'black',
'linestyle': 'dashed',
'linewidth': '3',
'marker': marker['marker'],
'markerfacecolor': marker['markerfacecolor'],
'markersize': marker['markersize']}
_plotter.plot(self.bal_x, self.bal_y, extras)
return _plotter
|
# coding: utf-8
# # Introducing Pandas
#
# Pandas is a Python library that makes handling tabular data easier. Since we're doing data science - this is something we'll use from time to time!
#
# It's one of three libraries you'll encounter repeatedly in the field of data science:
#
# ## Pandas
# Introduces "Data Frames" and "Series" that allow you to slice and dice rows and columns of information.
#
# ## NumPy
# Usually you'll encounter "NumPy arrays", which are multi-dimensional array objects. It is easy to create a Pandas DataFrame from a NumPy array, and Pandas DataFrames can be cast as NumPy arrays. NumPy arrays are mainly important because of...
#
# ## Scikit_Learn
# The machine learning library we'll use throughout this course is scikit_learn, or sklearn, and it generally takes NumPy arrays as its input.
#
# So, a typical thing to do is to load, clean, and manipulate your input data using Pandas. Then convert your Pandas DataFrame into a NumPy array as it's being passed into some Scikit_Learn function. That conversion can often happen automatically.
#
# Let's start by loading some comma-separated value data using Pandas into a DataFrame:
#
# In[3]:
get_ipython().magic('matplotlib inline')
import numpy as np
import pandas as pd
df = pd.read_csv("PastHires.csv")
df.head()
# head() is a handy way to visualize what you've loaded. You can pass it an integer to see some specific number of rows at the beginning of your DataFrame:
# In[5]:
df.head(10)
# You can also view the end of your data with tail():
# In[6]:
df.tail(4)
# We often talk about the "shape" of your DataFrame. This is just its dimensions. This particular CSV file has 13 rows with 7 columns per row:
# In[7]:
df.shape
# The total size of the data frame is the rows * columns:
# In[5]:
df.size
# The len() function gives you the number of rows in a DataFrame:
# In[8]:
len(df)
# If your DataFrame has named columns (in our case, extracted automatically from the first row of a .csv file,) you can get an array of them back:
# In[7]:
df.columns
# Extracting a single column from your DataFrame looks like this - this gives you back a "Series" in Pandas:
# In[8]:
df['Hired']
# You can also extract a given range of rows from a named column, like so:
# In[9]:
df['Hired'][:5]
# Or even extract a single value from a specified column / row combination:
# In[10]:
df['Hired'][5]
# To extract more than one column, you pass in an array of column names instead of a single one:
# In[9]:
df[['Years Experience', 'Hired']]
# You can also extract specific ranges of rows from more than one column, in the way you'd expect:
# In[11]:
df[['Years Experience', 'Hired']][:5]
# Sorting your DataFrame by a specific column looks like this:
# In[13]:
df.sort_values(['Years Experience'])
# You can break down the number of unique values in a given column into a Series using value_counts() - this is a good way to understand the distribution of your data:
# In[14]:
degree_counts = df['Level of Education'].value_counts()
degree_counts
# Pandas even makes it easy to plot a Series or DataFrame - just call plot():
# In[15]:
degree_counts.plot(kind='bar')
# ## Exercise
#
# Try extracting rows 5-10 of our DataFrame, preserving only the "Previous Employers" and "Hired" columns. Assign that to a new DataFrame, and create a histogram plotting the distribution of the previous employers in this subset of the data.
# In[26]:
df.head()
newdf = df[["Previous employers", "Hired"]]
newdf
newdf.plot(kind = 'hist')
|
""" Captcha.Visual.Backgrounds
Background layers for visual CAPTCHAs
SimpleCaptcha Package
Forked from PyCAPTCHA Copyright (C) 2004 Micah Dowty <micah@navi.cx>
"""
from simplecaptcha.visual import Layer, pictures
import random
from PIL import Image, ImageDraw
class SolidColor(Layer):
"""A solid color background. Very weak on its own, but good to combine with
other backgrounds. """
def __init__(self, color="white"):
self.color = color
def render(self, image):
image.paste(self.color)
class Grid(Layer):
"""A grid of lines, with a given foreground color. The size is given in
pixels. The background is transparent, so another layer (like SolidColor)
should be put behind it. """
def __init__(self, size=16, foreground="black"):
self.size = size
self.foreground = foreground
self.offset = (random.uniform(0, self.size),
random.uniform(0, self.size))
def render(self, image):
draw = ImageDraw.Draw(image)
r1 = int(image.size[0] / (self.size + 1))
for i in range(r1):
draw.line((i*self.size+self.offset[0],
0,
i*self.size+self.offset[0],
image.size[1]),
fill=self.foreground)
r2 = int(image.size[0] / (self.size + 1))
for i in range(r2):
draw.line((0,
i*self.size+self.offset[1],
image.size[0],
i*self.size+self.offset[1]),
fill=self.foreground)
class TiledImage(Layer):
"""Pick a random image and a random offset, and tile the rendered image
with it"""
def __init__(self, image_factory=pictures.abstract):
self.tile_name = image_factory.pick()
self.offset = (random.uniform(0, 1),
random.uniform(0, 1))
def render(self, image):
tile = Image.open(self.tile_name)
for j in range(-1, int(image.size[1] / tile.size[1]) + 1):
for i in range(-1, int(image.size[0] / tile.size[0]) + 1):
dest = (int((self.offset[0] + i) * tile.size[0]),
int((self.offset[1] + j) * tile.size[1]))
image.paste(tile, dest)
class CroppedImage(Layer):
"""Pick a random image, cropped randomly. Source images should be larger
than the CAPTCHA."""
def __init__(self, image_factory=pictures.nature):
self.image_name = image_factory.pick()
self.align = (random.uniform(0, 1),
random.uniform(0, 1))
def render(self, image):
i = Image.open(self.image_name)
image.paste(i, (int(self.align[0] * (image.size[0] - i.size[0])),
int(self.align[1] * (image.size[1] - i.size[1]))))
class RandomDots(Layer):
"""Draw random colored dots"""
def __init__(self, colors=("white", "black"), dot_size=4, num_dots=400):
self.colors = colors
self.dot_size = dot_size
self.num_dots = num_dots
self.seed = random.random()
def render(self, image):
r = random.Random(self.seed)
for i in range(self.num_dots):
bx = int(r.uniform(0, image.size[0]-self.dot_size))
by = int(r.uniform(0, image.size[1]-self.dot_size))
image.paste(r.choice(self.colors), (bx, by,
bx+self.dot_size-1,
by+self.dot_size-1))
|
import imapclient
import pyzmail
imapObj = imapclient.IMAPClient('imap.gmail.com', ssl=True)
imapObj.login(' chokkadibhat@gmail.com ', ' abhI$hek15 ')
imapObj.select_folder('INBOX', readonly=True)
UIDs = imapObj.search(['SINCE 05-Jul-2014'])
#UIDs have list of message ID
rawMessages = imapObj.fetch([40041], ['BODY[]', 'FLAGS'])
message = pyzmail.PyzMessage.factory(rawMessages[40041]['BODY[]'])
message.get_subject()
message.text_part.get_payload().decode(message.text_part.charset)
imapObj.logout()
|
from django.urls import path
from . import views
app_name = 'logtest'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('today_access', views.TodayAccessView.as_view(), name='today_access'),
path('recent_log_table', views.RecentLogView.as_view(), name='recent_log_table'),
path('err_log_table', views.ErrLogView.as_view(), name='err_log_table'),
path('http_status_pie', views.HttpStatusPieView.as_view(), name='http_status_pie'),
path('http_status_line', views.HttpStatusLineView.as_view(), name='http_status_line'),
path('inner_ip', views.InnerIPView.as_view(), name='inner_ip'),
path('inner_diff_ip', views.InnerDiffIPView.as_view(), name='inner_diff_ip'),
path('outer_ip', views.OuterIPView.as_view(), name='outer_ip'),
path('outer_diff_ip', views.OuterDiffIPView.as_view(), name='outer_diff_ip'),
path('ua_browser_bot', views.UABrowserBotView.as_view(), name='ua_browser_bot'),
path('ua_browser_detail', views.UABrowserDetailView.as_view(), name='ua_browser_detail'),
path('ua_os', views.UAOsView.as_view(), name='ua_os'),
]
|
from os import environ
'''
smtplib is a less secure method than google auth.
To use google auth, see https://github.com/shankarj67/python-gmail-api
'''
import smtplib
def send_email(ads_msgs):
#ads_msgs: a list of strings.
gmail_bot = environ['gmail_bot']
gmail_bot_pwd = environ['gmail_bot_pwd']
sent_from = gmail_bot
to = environ['email_to']
to = to.replace(',',' ')
#to is a list of emails.
#to = ['user1@amail.com', 'user2@bmail.com']
to = to.split()
'''
Example for the value in 'email_to': 'user1@amail.com, user2@gmai.com'
Each email is separated by a commas ','. Blank spaces are allowed
'user1@amail.com, user2@gmai.com' is also valid
'user1@gmail.com, user2@dkjf.com' is also valid
'user1@gmail.com, user2@dkjf.com,' is also valid
'''
print(to)
subject = 'Charlottenburg new rental ads'
body = "\n".join(ads_msgs)
email_text = """Subject:%s\n\n %s""" % (subject, body)
print(email_text)
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_bot, gmail_bot_pwd)
#https://docs.python.org/2/library/smtplib.html#smtplib.SMTP.sendmail
server.sendmail(sent_from, to, email_text.encode())
server.close()
print('Email sent.')
except Exception as e:
print('Something went wrong with emails: ', e)
|
import pytest
import numpy as np
from astropy import units as u
from ..flux import compute_flux
def strip_parentheses(string):
return string.replace('(', '').replace(')', '')
COMBINATIONS = \
[
(np.array([1, 2, 3]) * u.Jy, u.Jy, {}, 6 * u.Jy),
(np.array([1, 2, 3]) * u.mJy, u.Jy, {}, 0.006 * u.Jy),
(np.array([1, 2, 3]) * u.erg / u.cm ** 2 / u.s / u.Hz, u.Jy, {}, 6e23 * u.Jy),
(np.array([1, 2, 3]) * u.erg / u.cm ** 2 / u.s / u.micron, u.Jy, {'wavelength': 2 * u.micron}, 8005538284.75565 * u.Jy),
(np.array([1, 2, 3]) * u.Jy / u.arcsec ** 2, u.Jy, {'spatial_scale': 2 * u.arcsec}, 24. * u.Jy),
(np.array([1, 2, 3]) * u.Jy / u.beam, u.Jy, {'spatial_scale': 2 * u.arcsec, 'beam_major': 1 * u.arcsec, 'beam_minor': 0.5 * u.arcsec}, 42.36166269526079 * u.Jy),
(np.array([1, 2, 3]) * u.K, u.Jy, {'spatial_scale': 2 * u.arcsec, 'beam_major': 1 * u.arcsec, 'beam_minor': 0.5 * u.arcsec, 'wavelength': 2 * u.mm}, 0.38941636582186634 * u.Jy),
(np.array([1, 2, 3]) * u.K, u.Jy, {'spatial_scale': 2 * u.arcsec, 'beam_major': 1 * u.arcsec, 'beam_minor': 0.5 * u.arcsec, 'wavelength': 100 * u.GHz}, 0.17331365650395836 * u.Jy),
]
@pytest.mark.parametrize(('input_quantities', 'output_unit', 'keywords', 'output'), COMBINATIONS)
def test_compute_flux(input_quantities, output_unit, keywords, output):
q = compute_flux(input_quantities, output_unit, **keywords)
np.testing.assert_allclose(q.value, output.value)
assert q.unit == output.unit
def test_monochromatic_wav_missing():
with pytest.raises(ValueError) as exc:
compute_flux(np.array([1, 2, 3]) * u.erg / u.cm ** 2 / u.s / u.micron, u.Jy)
assert exc.value.args[0] == 'wavelength is needed to convert from erg / (cm2 micron s) to Jy'
def test_monochromatic_wav_invalid_units():
with pytest.raises(ValueError) as exc:
compute_flux(np.array([1, 2, 3]) * u.erg / u.cm ** 2 / u.s / u.micron, u.Jy, wavelength=3 * u.L)
assert exc.value.args[0] == 'wavelength should be a physical length'
def test_surface_brightness_scale_missing():
with pytest.raises(ValueError) as exc:
compute_flux(np.array([1, 2, 3]) * u.Jy / u.arcsec ** 2, u.Jy)
assert strip_parentheses(exc.value.args[0]) == 'spatial_scale is needed to convert from Jy / arcsec2 to Jy'
def test_surface_brightness_invalid_units():
with pytest.raises(ValueError) as exc:
compute_flux(np.array([1, 2, 3]) * u.Jy / u.arcsec ** 2, u.Jy, spatial_scale=3 * u.m)
assert exc.value.args[0] == 'spatial_scale should be an angle'
def test_per_beam_scale_missing():
with pytest.raises(ValueError) as exc:
compute_flux(np.array([1, 2, 3]) * u.Jy / u.beam, u.Jy, beam_major=3 * u.arcsec, beam_minor=2. * u.arcsec)
assert strip_parentheses(exc.value.args[0]) == 'spatial_scale is needed to convert from Jy / beam to Jy'
with pytest.raises(ValueError) as exc:
compute_flux(np.array([1, 2, 3]) * u.Jy / u.beam, u.Jy, spatial_scale=3 * u.arcsec, beam_minor=2. * u.arcsec)
assert strip_parentheses(exc.value.args[0]) == 'beam_major is needed to convert from Jy / beam to Jy'
with pytest.raises(ValueError) as exc:
compute_flux(np.array([1, 2, 3]) * u.Jy / u.beam, u.Jy, spatial_scale=3 * u.arcsec, beam_major=2. * u.arcsec)
assert strip_parentheses(exc.value.args[0]) == 'beam_minor is needed to convert from Jy / beam to Jy'
def test_per_beam_invalid_units():
with pytest.raises(ValueError) as exc:
compute_flux(np.array([1, 2, 3]) * u.Jy / u.beam, u.Jy, spatial_scale=3 * u.arcsec, beam_major=3 * u.m, beam_minor=2. * u.arcsec)
assert exc.value.args[0] == 'beam_major should be an angle'
with pytest.raises(ValueError) as exc:
compute_flux(np.array([1, 2, 3]) * u.Jy / u.beam, u.Jy, spatial_scale=3 * u.arcsec, beam_major=3 * u.arcsec, beam_minor=2. * u.m)
assert exc.value.args[0] == 'beam_minor should be an angle'
|
from rest_framework.serializers import ModelSerializer, ReadOnlyField
from .models import Investment as InvestmentModel
class InvestmentSerializer(ModelSerializer):
investmentId = ReadOnlyField(source='id')
class Meta:
model = InvestmentModel
fields = ['investmentId', 'username', 'amount', 'duration', 'investmentReturns']
|
#!/usr/bin/env python
# get_all_kmers.py
seq = 'GCCGGCCCTCAGACAGGAGTGGTCCTGGATG'
kmer_length = 7
stop = len(seq) - kmer_length + 1
for start in range(0, stop):
kmer = seq[start:start + kmer_length]
print(kmer)
|
#!/usr/bin/env python3
import os
import sys
import gzip
from Bio import SeqIO
#functions for trimming and merging fastq files, and characterizing fastq files
def cut_primers(file, primer, raw_dir, trimmed_dir):
#Cut primer sequences off from beginning of a read and send to new "trimmed" file
#Input: zipped fastq file
#Output: new fastq.gz file with primers trimmed off
print(file)
with gzip.open(file, "rt") as handle:
fq_record = (SeqIO.parse(handle, 'fastq'))
#Add a record to trimmed file if sequence starts with primer, then cut the primer sequence off
trimmed_primer_reads = (rec[8:] for rec in fq_record if rec.seq.startswith(primer))
trimmed_file_name = file.replace(".fastq.gz","_trimmed.fastq").replace(raw_dir, trimmed_dir)
count = SeqIO.write(trimmed_primer_reads, trimmed_file_name, "fastq")
###
def merge_reads(file1, file2, trimmed_dir, merged_dir, orphan_dir):
#Create interleaved merged fastq and orphaned reads fastq files
#Input: paired end fastq files, and directories for trimmed, and output dirs
#output: print statements, merged fastq, orphan fastq
#import indexes for fq_1 and fq_2
fq_dict_1 = SeqIO.index(trimmed_dir + "/" + file1,"fastq")
fq_dict_2 = SeqIO.index(trimmed_dir + "/" + file2,"fastq")
#find all reads that are present in both, and find orphan reads
orphan_reads_1 = []
orphan_reads_2 = []
both_fq= []
for key in fq_dict_1.keys():
if key in fq_dict_2.keys():
both_fq.append(key)
else:
orphan_reads_1.append(key)
for key in fq_dict_2.keys():
if key not in both_fq:
orphan_reads_2.append(key)
#Create filename for the merged and orphan fastq file
output_merge = (file1.replace("_1_trimmed.fastq","_trimmed_merged.fastq"))
output_orphan = (file1.replace("_1_trimmed.fastq","_trimmed_orphans.fastq"))
#Print statements for summaries
print (output_merge)
print("Total matched reads: " + str(len(both_fq)))
print (output_orphan)
print("Total orphan reads: " + str(len(orphan_reads_1) + len(orphan_reads_2)))
#initialize paired and orphan fastq files
fh_merged = open(merged_dir + "/" + output_merge, "w")
fh_orphan = open(orphan_dir + "/" + output_orphan,"w")
#create the merged file where read 1 and read 2 are interleaved
for id in both_fq:
read_1 = (fq_dict_1.get_raw(id).decode())
read_2 = (fq_dict_2.get_raw(id).decode())
fh_merged.write(read_1)
fh_merged.write(read_2)
#create the orphaned file where reads didn't have matches
for id in orphan_reads_1:
read = (fq_dict_1.get_raw(id).decode())
fh_orphan.write(read)
for id in orphan_reads_2:
read_2 = (fq_dict_2.get_raw(id).decode())
fh_orphan.write(read)
fh_merged.close()
fh_orphan.close()
###
def characterize_final_fastq(file, merged_dir, orphan_dir):
#Get summary stats for final merged fastqc files
#Input: unzipped fastq file, directory of unzipped fastq file, directory of orphan file
#Output: tsv format of summary stats for this file
#initialize counts and lists
record_count = 0
read_lengths = []
bad_qc_reads = 0
orphan_count = 0
#open final merged fastq
fh=open(merged_dir + "/" + file)
fq_record = (SeqIO.parse(fh, 'fastq'))
#add record to record count and len to read lengths
for record in fq_record:
record_count += 1
read_lengths.append(len(record.seq))
#If records average phred score is less than 20, note it
if sum(record.letter_annotations["phred_quality"])/len(record.letter_annotations["phred_quality"]) < 20:
bad_qc_reads += 1
#Get the file name for the orphaned read fastq
orphan_file_name = file.replace("_merged", "_orphans")
#Open the orphan read fastqs
fh_orphan = open (orphan_dir + "/" + orphan_file_name)
fq_record_orphan = (SeqIO.parse(fh_orphan, "fastq"))
#Add orphan counts to orphan_count
for record in fq_record_orphan:
orphan_count += 1
#Summary stats of read lengths
avg = (sum(read_lengths)/len(read_lengths))
minimum = min(read_lengths)
maximum = max(read_lengths)
#return record count, bad qc reads, orphan #, avg read length, min read length, max read length
return (str(record_count) + " " + str(bad_qc_reads) + " " + str(orphan_count) + " " + str(avg) + " " + str(minimum) + " " + str(maximum))
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""cnnctc train"""
import argparse
import ast
import mindspore
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.dataset import GeneratorDataset
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.model import Model
from mindspore.communication.management import init
from mindspore.common import set_seed
from src.config import Config_CNNCTC
from src.callback import LossCallBack
from src.dataset import ST_MJ_Generator_batch_fixed_length, ST_MJ_Generator_batch_fixed_length_para
from src.cnn_ctc import CNNCTC_Model, ctc_loss, WithLossCell
set_seed(1)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False,
save_graphs_path=".", enable_auto_mixed_precision=False)
def dataset_creator(run_distribute):
if run_distribute:
st_dataset = ST_MJ_Generator_batch_fixed_length_para()
else:
st_dataset = ST_MJ_Generator_batch_fixed_length()
ds = GeneratorDataset(st_dataset,
['img', 'label_indices', 'text', 'sequence_length'],
num_parallel_workers=8)
return ds
def train(args_opt, config):
if args_opt.run_distribute:
init()
context.set_auto_parallel_context(parallel_mode="data_parallel")
ds = dataset_creator(args_opt.run_distribute)
net = CNNCTC_Model(config.NUM_CLASS, config.HIDDEN_SIZE, config.FINAL_FEATURE_WIDTH)
net.set_train(True)
if config.CKPT_PATH != '':
param_dict = load_checkpoint(config.CKPT_PATH)
load_param_into_net(net, param_dict)
print('parameters loaded!')
else:
print('train from scratch...')
criterion = ctc_loss()
opt = mindspore.nn.RMSProp(params=net.trainable_params(), centered=True, learning_rate=config.LR_PARA,
momentum=config.MOMENTUM, loss_scale=config.LOSS_SCALE)
net = WithLossCell(net, criterion)
loss_scale_manager = mindspore.train.loss_scale_manager.FixedLossScaleManager(config.LOSS_SCALE, False)
model = Model(net, optimizer=opt, loss_scale_manager=loss_scale_manager, amp_level="O2")
callback = LossCallBack()
config_ck = CheckpointConfig(save_checkpoint_steps=config.SAVE_CKPT_PER_N_STEP,
keep_checkpoint_max=config.KEEP_CKPT_MAX_NUM)
ckpoint_cb = ModelCheckpoint(prefix="CNNCTC", config=config_ck, directory=config.SAVE_PATH)
if args_opt.run_distribute:
if args_opt.device_id == 0:
model.train(config.TRAIN_EPOCHS, ds, callbacks=[callback, ckpoint_cb], dataset_sink_mode=False)
else:
model.train(config.TRAIN_EPOCHS, ds, callbacks=[callback], dataset_sink_mode=False)
else:
model.train(config.TRAIN_EPOCHS, ds, callbacks=[callback, ckpoint_cb], dataset_sink_mode=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CNNCTC arg')
parser.add_argument('--device_id', type=int, default=0, help="Device id, default is 0.")
parser.add_argument("--ckpt_path", type=str, default="", help="Pretrain file path.")
parser.add_argument("--run_distribute", type=ast.literal_eval, default=False,
help="Run distribute, default is false.")
args_cfg = parser.parse_args()
cfg = Config_CNNCTC()
if args_cfg.ckpt_path != "":
cfg.CKPT_PATH = args_cfg.ckpt_path
train(args_cfg, cfg)
|
bp = list(input())
ab = "abcdefghijklmnopqrstuvwxyz"
AB = "abcdefghijklmnopqrstuvwxyz".upper()
for a in ab:
p = list(filter(lambda x: x != a and x != a.upper(), bp))
Done = False
while not Done:
Done = True
i = 1
while i < len(p):
if (p[i] in ab and p[i-1] == p[i].upper()) or (p[i] in AB and p[i-1] == p[i].lower()):
p = p[:i-1] + p[i+1:]
Done = False
else:
i += 1
print(a, len(p))
|
import sys
import os
import argparse
import json
import csv
import numpy as np
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.cross_decomposition import PLSRegression
from sklearn.metrics import mean_squared_error, r2_score
import model_io
import pls_analysis
fontSize = 8
totalItems = 0
totalSpectra = 0
snapPositions = 4
#feasibleIndex = range(400, 2400, 1)
def main(argv):
parser = argparse.ArgumentParser(description='Generic PLS deploy')
parser.add_argument('--data_file', action='store', nargs='?', default='data.csv', dest='data_file', type=str, required=False, help='Specify the name of csv file.')
parser.add_argument('--target_file', action='store', nargs='?', default='target.csv', dest='target_file', type=str, required=False, help='Specify the filename containing targets corresponding to input_file.')
parser.add_argument('--model_file', action='store', nargs='?', default='pls_model.json', dest='model_file', type=str, required=False, help='Specify the JSON file that will contain meta-data of trained PLS model.')
parser.add_argument('--config_file', action='store', nargs='?', default='pls_config.json', dest='config_file', type=str, required=False, help='Specify the JSON file that will contain configuration of PLS model.')
parser.add_argument('--result_file', action='store', nargs='?', default='result.csv', dest='result_file', type=str, required=False, help='Specify the result file containing pls deploying.')
args = parser.parse_args()
print('data_file:', args.data_file)
print('target_file:', args.target_file)
print('model_file:', args.model_file)
print('config_file:', args.config_file)
print('result_file:', args.result_file)
if not os.path.isfile(args.data_file):
raise Exception('The data_file does not exist.'%(args.data_file))
#if not os.path.isfile(args.target_file):
# raise Exception('The target_file does not exist.'%(args.target_file))
if not os.path.isfile(args.model_file):
raise Exception('The model_file does not exist.'%(args.model_file))
if not os.path.isfile(args.config_file):
raise Exception('The config_file does not exist.'%(args.config_file))
### Loading data
data_list = []
data_reader = csv.DictReader(open(args.data_file, 'r', newline=''))
for row in data_reader:
data_list.append(list(row.values()))
Xs = np.array(data_list, dtype=np.float64)
print('Total samples:', Xs.shape[0])
totalSamples = Xs.shape[0]
### Loading PLS model
plsModel = model_io.loadModelFromJSON(args.model_file)
predictYs = plsModel.predict(Xs)
### Record
result_writer = csv.DictWriter(open(args.result_file, 'w', newline=''), fieldnames=['result'])
result_writer.writeheader()
for result in predictYs:
result_writer.writerow({'result': result[0]})
### Scoring
if os.path.isfile(args.target_file):
target_list = []
target_reader = csv.DictReader(open(args.target_file, newline=''))
for row in target_reader:
target_list.append(list(row.values()))
Ys = np.array(target_list, dtype=np.float64)
assert(Xs.shape[0] == Ys.shape[0])
score = r2_score(Ys, predictYs)
mse = mean_squared_error(Ys, predictYs)
sep = np.std(predictYs[:,0] - Ys)
rpd = np.std(Ys)/sep
bias = np.mean(predictYs[:,0] - Ys)
print('R2: %5.3f'%(score,))
print('MSE: %5.3f'%(mse,))
print('SEP: %5.3f'%(sep,))
print('RPD: %5.3f'%(rpd,))
print('Bias: %5.3f'%(bias,))
return 0
if __name__ == '__main__':
main(sys.argv)
|
from django.shortcuts import render
from .models import Price, Site
# Create your views here.
def home (request):
return render(request, 'home.html')
def prices(request):
prices = Price.objects.order_by('price_base')
return render(request, 'prices.html', {'prices': prices})
def sites(request):
sites = Site.objects.order_by('date_made')
return render(request, 'sites.html', {'sites': sites})
|
# -*- coding: utf-8 -*-
# from subprocess import *
# import threading
import time
import subprocess
import tornado.process
gProjectName = "PD_103023_ELK"
gVersion="6.6.6.6"
gCmdLine = "start -branch %s -setVersion %s -uploadFtp -signature\n"
# gProcess = subprocess.Popen('ffmpeg.exe', bufsize=10240, shell=True,stdin=subprocess.PIPE , stdout=subprocess.PIPE)
# # print gProcess.communicate()
# time.sleep(1)
def Demo1():
subprocess.call("ffmpeg -i 1.flv")
time.sleep(2)
subprocess.call("ffmpeg -h")
def Demo2():
iP1 = subprocess.Popen(['cmd.exe'], bufsize=10240, shell=True,stdin=subprocess.PIPE , stdout=subprocess.PIPE)
iP2 = subprocess.Popen(['ping','baidu.com'], bufsize=10240, shell=True,stdin=iP1.stdout , stdout=subprocess.PIPE)
print iP2.stdout.read()
def Demo3():
iP1 = subprocess.Popen(['compile.exe'], bufsize=1024, shell=True,stdin=subprocess.PIPE , stdout=subprocess.PIPE)
print iP1.stdout.read()
time.sleep(2)
iP2 = subprocess.Popen(['3\n'], bufsize=1024, shell=True,stdin=iP1.stdout , stdout=subprocess.PIPE)
print iP2.stdout.read()
iP3 = subprocess.Popen(['list\n'], bufsize=1024, shell=True,stdin=iP2.stdout , stdout=subprocess.PIPE)
def Demo4():
iP1 = subprocess.Popen(['compile.exe'], bufsize=1024, shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE )
iP1.stdin.write("3\n")
iP1.stdin.flush()
time.sleep(1)
iP1.stdin.write("list\n")
iP1.stdin.flush()
print iP1.stdout.read()
if __name__ == "__main__":
Demo4()
# print iResult[1]
# print gProcess.stdout.read()
# time.sleep(2)
# gProcess.stdin.write("ffmpeg -i 1.flv\n")
# gProcess.stdin.flush()
# time.sleep(1) #延迟是因为等待一下线程就绪
# gProcess.stdin.write("ffmpeg -i 1.flv\n")
# gProcess.stdin.flush()
# time.sleep(1)
# gProcess.stdin.write("clear\n")
# gProcess.stdin.flush()
# iIndex = 0
# print "waiting for clean ... %d"%iIndex,
# while iIndex < 3:
# print "\rwaiting for clean ... %d"%iIndex,
# iIndex+=1
# time.sleep(1)
# print "\rbegin to compile project"
# gCmdLine = str.format(gCmdLine%(gProjectName,gVersion))
# gProcess.stdin.write(gCmdLine)
# gProcess.stdin.flush()
iInput = raw_input()
|
#!/usr/bin/python
import random
def lessthan(test, pivot):
return test < pivot
def greaterthan(test, pivot):
return pivot < test
def partition(a, left, right, predicate):
pivot = a[right]
il = left
ir = right
while il < ir:
while il < right and predicate(a[il], pivot):
il += 1
while ir >= left and not predicate(a[ir], pivot):
ir -= 1
if il < ir:
a[il], a[ir] = a[ir], a[il]
a[il], a[right] = a[right], a[il]
return il
def _qsort(a, left, right, predicate):
if (right <= left):
return
i = partition(a, left, right, predicate)
if left < i - 1:
_qsort(a, left, i - 1, predicate)
if i + 1 < right:
_qsort(a, i + 1, right, predicate)
def qsort(a, predicate=lessthan):
_qsort(a, 0, len(a) - 1, predicate)
def test_qsort():
nfrom = 0
nto = 15
nelements = 19
a = [random.randint(nfrom, nto) for x in range(nelements)]
qsort(a, greaterthan)
if not sorted(a, reverse=True) == a:
print a
def main():
for i in range(100):
test_qsort()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 17 11:05:50 2018
@author: dell
"""
from tkinter import *
from PIL import Image, ImageTk
from tkinter.messagebox import *
import main3x1
import main3x2
import main3x3
import main3x4
import main3x5
import main3x6
import qjbl
qjbl.bl()
def info():
'''软件信息介绍框'''
show = showinfo(title="软件介绍",
message='''
本软件用于圆柱圆锥滚子的修形设计工作,能方便快捷的计算给
定工况下滚子的接触应力分布情况,为修形设计提供参考,大大
提高工作效率。
''',)
def fun():
f1.destroy()
import main2
main2.main2(root)
def fun1():
tl = Toplevel()
main3x1.func1(root,tl)
def fun2():
tl = Toplevel()
main3x2.func2(root,tl)
def fun3():
tl = Toplevel()
main3x3.func3(root,tl)
def fun4():
tl = Toplevel()
main3x4.func4(root,tl)
def fun5():
tl = Toplevel()
main3x5.func5(root,tl)
def fun6():
tl = Toplevel()
main3x6.func6(root,tl)
root = Tk()
root.title('滚子修形分析系统')
root.resizable(width=False,height=False)
# 菜单栏
menu = Menu(root) #相当于菜单栏
root.config(menu=menu)
filemenu = Menu(menu,tearoff=0) #在菜单栏上添加新菜单
menu.add_cascade(label="新建", menu=filemenu) #添加下拉菜单
filemenu.add_command(label="圆柱滚子直线型",command=fun1) #往菜单里添加新条目
filemenu.add_command(label="圆柱滚子圆弧型",command=fun2) #往菜单里添加新条目
filemenu.add_command(label="圆柱滚子对数型",command=fun3) #往菜单里添加新条目
filemenu.add_separator() #画一条分割线
filemenu.add_command(label="圆锥滚子直线型",command=fun4) #往菜单里添加新条目
filemenu.add_command(label="圆锥滚子圆弧型",command=fun5) #往菜单里添加新条目
filemenu.add_command(label="圆锥滚子对数型",command=fun6) #往菜单里添加新条目
# =============================================================================
# filemenu.add_command(label="打开",)
# filemenu.add_separator() #画一条分割线
# filemenu.add_command(label="保存",)
# =============================================================================
infomenu = Menu(menu,tearoff=0)
menu.add_cascade(label="关于", menu=infomenu)
infomenu.add_command(label="软件介绍",command=info)
f1=Frame(root)
f1.pack()
c1 = Canvas(f1,width=1000,height=600)
c1.pack()
img = ImageTk.PhotoImage(file = 'bjt1.jpg')
c1.create_image(0,0,anchor='nw',image=img)
c1.create_text(500,200,text='圆柱圆锥滚子修形分析系统',font=('黑体',50),
fill='white')
c1.create_text(500,550,text='大连理工大学 - 机械工程学院',
font=('黑体',20),fill='white')
b = Button(f1,text='新建项目',font=('黑体',20),fg='blue',command=fun)
b.place(relx=0.5,rely=0.6,anchor='center')
root.mainloop()
|
import os
from setuptools import setup, find_packages
import distutils.cmd
import distutils.log
from version import get_git_version
VERSION, SOURCE_LABEL = get_git_version()
PROJECT = 'dossier.models'
AUTHOR = 'Diffeo, Inc.'
AUTHOR_EMAIL = 'support@diffeo.com'
URL = 'http://github.com/dossier/dossier.models'
DESC = 'Active learning models'
def read_file(file_name):
file_path = os.path.join(os.path.dirname(__file__), file_name)
return open(file_path).read()
class DataInstallCommand(distutils.cmd.Command):
'''installs nltk data'''
user_options = []
description = '''installs nltk data'''
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import nltk
from dossier.models.tests.test_features import nltk_data_packages
for data_name in nltk_data_packages:
print('nltk.download(%r)' % data_name)
nltk.download(data_name)
setup(
name=PROJECT,
version=VERSION,
description=DESC,
license='MIT',
long_description=read_file('README.md'),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
packages=find_packages(),
cmdclass={'install_data': DataInstallCommand},
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
],
install_requires=[
'cbor',
'coordinate',
'beautifulsoup4',
'dossier.fc >= 0.1.4',
'dossier.label >= 0.1.5',
'dossier.web >= 0.7.12',
'gensim',
'happybase',
'joblib',
'many_stop_words',
'nltk',
'numpy',
'nilsimsa',
'regex',
'requests',
'scipy',
'scikit-learn',
'streamcorpus-pipeline>=0.7.7',
'pytest',
'pytest-diffeo >= 0.1.4',
'urlnorm >= 1.1.3',
'xlsxwriter',
'Pillow',
'yakonfig',
'coordinate',
'kvlayer',
'trollius',
],
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'dossier.models = dossier.models.web.run:main',
'dossier.models.soft_selectors = dossier.models.soft_selectors:main',
'dossier.models.linker = dossier.models.linker.run:main',
'dossier.models.dragnet = dossier.models.dragnet:main',
'dossier.etl = dossier.models.etl:main',
],
'streamcorpus_pipeline.stages': [
'to_dossier_store = dossier.models.etl.interface:to_dossier_store',
],
},
)
|
#coding:utf-8
from django.shortcuts import render, render_to_response
from django.contrib import auth
# Create your views here.
def login(request, **kwargs):
print "into login"
'''
response = render_to_response('login/login.html')
# 在客户端Cookie中添加Post表单token,避免用户重复提交表单
response.set_cookie("postToken",value='allow')
return response
'''
return render(request, 'login/login.html')
'''
token = "allow" # 可以采用随机数
request.session['postToken'] = token
# 将Token值返回给前台模板
return render_to_response('login/login.html',{'postToken':token})
'''
def main(request, **kwargs):
auth.logout(request)
return render(request, 'login/index.html') |
# Generated by Django 3.1.3 on 2020-11-25 12:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Initial_Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Relative', models.CharField(max_length=30)),
('Owner', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Plot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('initial_information', models.ImageField(upload_to='initial_information')),
('initial_feedback', models.CharField(choices=[('INTERESTED', 'interested'), ('NOT INTERESTED', 'not interested')], max_length=60)),
('agreement', models.ImageField(upload_to='agreement')),
('first_payment_by_cheque', models.BooleanField(default=False)),
('second_payment_by_cheque', models.BooleanField(default=False)),
('third_payment_by_cheque', models.BooleanField(default=False)),
('initial_contact', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='ploting.initial_contact')),
],
),
]
|
"""
Using for loop to calculate the summary from 1 to 100
"""
sum = 0
for x in range(1,101):
sum += x
print("The summary from 1 to 100 is %f" % sum)
############################################################
"""
Using for-in loop to calculate the even summary from 1 to 100
"""
sum = 0
for y in range(0, 101, 2):
sum += y
#print(y)
print("The even summary from 1 to 100 is %f" % sum)
############################################################
"""
The other way to use for-in loop to calculate the even summary from 1 to 100
"""
sum = 0
for z in range(1, 101):
if z %2 ==0:
sum += z
print("The even summary from 1 to 100 is %f" % sum) |
import os
import SocketServer
from ComputerVision.FaceDetector import FaceDetector
from ComputerVision.CV2Wrapper import CV2Wrapper
from Database.DBWrapper import DBWrapper
class WebInterfaceTCPHandler(SocketServer.BaseRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def __init__(self, *args, **kwargs):
SocketServer.BaseRequestHandler.__init__(self, *args, **kwargs)
def handle(self):
# self.request is the TCP socket connected to the client
faceDetector = FaceDetector()
openCV = CV2Wrapper()
db = DBWrapper()
# receives the command name
command = self._readCommand()
if command == 'ADD':
# receives the person name
personName = self._readName()
# receives the number of images to receive
numberOfImages = self._readLength()
faces = []
for x in xrange(0, numberOfImages):
# receives the length of an image
length = self._readLength()
# receives the image
self._readAll(length)
faces = faces + [face for face in faceDetector.detectFromBinary(self.data)]
print "Detected %d faces" % len(faces)
db.addPattern(personName, [openCV.toSIFTMatrix(face) for face in faces])
self.request.sendall('ok')
def _readCommand(self):
return self._readLine()
def _readName(self):
return self._readLine()
def _readLine(self):
rv = ''
while True:
readed = self.request.recv(1)
if readed == '\n':
return rv
else:
rv = rv + readed
def _readLength(self):
rv = 0
while True:
readed = self.request.recv(1)
if readed == '\n':
return rv
else:
rv = rv * 10 + int(readed)
def _readAll(self, length):
self.data = self.request.recv(length)
while len(self.data) < length:
self.data += self.request.recv(length - len(self.data))
|
import os, sys
sys.path.append('/var/scalak/scalakweb')
os.environ['PYTHON_EGG_CACHE'] = '/var/scalak/python_egg_cache'
from paste.deploy import loadapp
application = loadapp('config:/var/scalak/scalakweb/production.ini')
|
#!/usr/bin/python
import sys
flag=0
for input_line in sys.stdin:
line = input_line.strip().split(",")
if (flag ==0):
columns=line
flag=1
else:
disno = float(line[11])
print "{0}\t{1}".format(disno,str(input_line.strip())) |
from bs4 import BeautifulSoup
data = open('/home/fenris/work/Internshit/Kamtech/out/Bike Rental System.html','r')
soup = BeautifulSoup(data, 'lxml')
inner_ul = soup.find_all('ul')
lists = {}
for ultag in soup.find_all('ul'):
temp = []
for litag in ultag.find_all('li'):
temp.append(litag.text.split)
lists[ultag.text] = temp
print(lists)
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
random_array = np.linspace(0,2*np.pi,100) #generamos el array con 100 elementos aleatorios entre 0 y 2*PI
array_sin = np.sin(random_array) #generamos el seno de todos los elementos anteriores
array_cos = np.cos(random_array) #generamos el coseno de todos los elementos anteriores
array_sincos = array_sin + array_cos # generamos el array de la suma de array_sin y array_cos
plt.plot(array_sin,'k--', label='array_sin') #curva del seno (negro)
plt.plot(array_cos,'b--', label='array_cos') #curva del coseno (azul)
plt.plot(array_sincos,'r--', label='array_sin+cos') #curva de la suma (rojo)
plt.xlabel('I am x axis')
plt.ylabel('I am y axis')
plt.title('Parte 3')
plt.legend()
plt.show()
|
# -*- coding: utf-8 -*-
def booleNett(phrase):
'''
phrase : chaîne de caractères.
Apairage des parenthèses nécessaires, espaces tolérés, tous les autres caractères hors opérateurs sont considérés comme variables
'''
# 1-On enlève les espaces
phrase=phrase.decode(encoding='UTF-8')
phrase=phrase.replace(" ","")
# 2-On fait le tour des vérifications de base avant de traiter davantage la phrase
# 2.1-Vérification de la présence des opérateurs bien écrits, pas de caractères "|" isolés
phrase_sans_op=phrase.replace("|ET|","")
phrase_sans_op=phrase_sans_op.replace("|OU|","")
if "|" in phrase_sans_op:
print("Présence de caractères \'|\' indésirables")
return None
# 2.2-Vérification du bon nombre de parenthèses (ouverture et fermeture)
parEntr=phrase_sans_op.count('(')
parFerm=phrase_sans_op.count(')')
if parEntr!=parFerm:
print("Votre phrase contient "+str(parEntr)+" ouverture(s) de parenthèse pour "+str(parFerm)+" fermeture(s) !")
return None
# 2.3-Vérification qu'un opérateur ne colle pas l'intérieur d'une parenthèse
if '(|' in phrase or '|)' in phrase:
print('Un opérateur colle l\'intérieur d\'une parenthèse !')
return None
return phrase.encode(encoding='UTF-8')
def ou(liste):
'''
liste : liste de propositions évaluées
Retourne la version simplifiée si possible de l'expression sous la forme linéaire
'''
# 1-Élimination des doublons
liste=list(set(liste))
# 2-Développement des |ET|
dev=[]
for e in range(len(liste)):
dev.append(liste[e].split("|ET|"))
print(dev)
# 3-On vérifie qu'aucun des éléments de OU n'est sous-ensemble de l'un des autres (ex: a|ET|b inclus dans a|ET|b|ET|c -> la deuxième liste est supprimée)
simpl=dev[:]
for e in range(len(dev)):
# A COMPLETER
None
return liste |
import random
from django.core.management.base import BaseCommand
from django.contrib.admin.utils import flatten
from django_seed import Seed
from playlists import models as playlist_models
from users import models as user_models
from songs import models as song_models
class Command(BaseCommand):
"""
class: Command
author: haein
des: PlayList Command Model Definition
date: 2020-04-30
"""
help = "This command creates playlists"
def add_arguments(self, parser):
parser.add_argument(
"--number",
type=int,
default=1,
help="How many playlists do you want to created",
)
def handle(self, *args, **options):
number = options.get("number")
seeder = Seed.seeder()
seeder.add_entity(
playlist_models.PlayList,
number,
{
"description": lambda x: seeder.faker.sentence(),
"category": lambda x: playlist_models.ListCategory.objects.order_by(
"?"
).first(),
"user": lambda x: user_models.User.objects.order_by("?").first(),
},
)
created_lists = seeder.execute()
created_lists = flatten(list(created_lists.values()))
for li in created_lists:
created_list = playlist_models.PlayList.objects.get(pk=li)
category = created_list.category
category.used = True
category.save()
song_cnt = song_models.Song.objects.count()
random_number = random.randint(min(2, song_cnt), min(50, song_cnt))
songs = song_models.Song.objects.all().order_by("?")[2:random_number]
for song in songs:
created_list.songs.add(song)
created_list.save()
self.stdout.write(self.style.SUCCESS(f"{number} playlists are created!"))
|
"""
Example explaining the peculiriaties of evaluation
"""
from ml_recsys_tools.datasets.prep_movielense_data import get_and_prep_data
import pandas as pd
from ml_recsys_tools.data_handlers.interaction_handlers_base import ObservationsDF
from ml_recsys_tools.recommenders.lightfm_recommender import LightFMRecommender
rating_csv_path, users_csv_path, movies_csv_path = get_and_prep_data()
ratings_df = pd.read_csv(rating_csv_path)
obs = ObservationsDF(ratings_df, uid_col='userid', iid_col='itemid')
train_obs, test_obs = obs.split_train_test(ratio=0.2)
# train and test LightFM recommender
lfm_rec = LightFMRecommender()
lfm_rec.fit(train_obs, epochs=10)
# print evaluation results:
# for LightFM there is an exact method that on large and sparse
# data might be too slow (for this data it's much faster though)
print(lfm_rec.eval_on_test_by_ranking_exact(test_obs.df_obs, prefix='lfm regular exact '))
# this ranking evaluation is done by sampling top n_rec recommendations
# rather than all ranks for all items (very slow and memory-wise expensive for large data).
# choosing higher values for n_rec makes
# the evaluation more accurate (less pessimmistic)
# this way the evaluation is mostly accurate for the top results,
# and is quite pessimmistic (especially for AUC, which scores for all ranks) and any non @k metric
print(lfm_rec.eval_on_test_by_ranking(test_obs.df_obs, prefix='lfm regular ', n_rec=100))
|
import psycopg2
connect_str = "dbname='empleado' user='cursoPython'host='localhost' password='1234567890'"
conexion = psycopg2.connect(connect_str)
cur = conexion.cursor() #
rows= cur.execute("SELECT * FROM empleado")
rows= cur.fetchall()
for row in rows:
print("ID: ",row[0],"\nNombre: ",row[1],row[2],"\nSueldo: ",row[3])
conexion.close()
|
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from fbprophet import Prophet
# setting the Seaborn aesthetics.
sns.set(font_scale=1.3)
df = pd.read_csv('ts_df.csv')
m = Prophet()
m.fit(df)
forecast = m.predict(df)
fig = m.plot_components(forecast)
plt.show()
|
#-*- coding: utf-8 -*-
from ElasticSearchManager import esManager
class EVElasticSearch:
"""Base Class for EV Data Objects"""
_internalCount = 0
_es = esManager()
def __init__(self):
self._internalCount = self._internalCount + 1
def showCount(self):
return self._internalCount
def insert(self):
self._es.insert()
# self test
#jp = EVBase()
#print(jp.__doc__)
#print(jp.showCount())
#jp.insert()
|
from transformers import GPT2LMHeadModel, BertTokenizer, GPT2Config, TrainingArguments, Trainer
import torch
import os
import argparse
import random
import numpy as np
import sys
sys.path.append('/home/user/project/text_generation/')
from src.util import read_data, split_data
from src.text_keywords_generation.dataset import GetDataset, get_train_val_dataloader
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def seed_everything(seed):
'''
设置seed
:param seed:
:return:
'''
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def load_tokenizer(tokenizer_path, special_token_path=None):
'''
加载tokenizer
:param tokenizer_path:
:param special_token_path:
:return:
'''
print('tokenizer loadding...')
tokenizer = BertTokenizer.from_pretrained(tokenizer_path)
if special_token_path:
tokenizer.add_special_tokens(special_token_path)
return tokenizer
def load_pretrained_mode(tokenizer, pretrained_model_path, special_token_path=None):
'''
加载 pretrained model
:param tokenizer:
:param pretrained_model_path:
:param special_token_path:
:return:
'''
print("pretrained model loadding...")
gpt2Config = GPT2Config.from_pretrained(pretrained_model_path,
bos_token_id=tokenizer.bos_token,
eos__token_id=tokenizer.eos_token,
sep_token_id=tokenizer.sep_token,
pad_token_id=tokenizer.pad_token,
output_hidden_states=False)
model = GPT2LMHeadModel.from_pretrained(pretrained_model_path, config=gpt2Config)
if special_token_path:
# 添加special token,model embedding size需要作调整
model.resize_token_embeddings(len(tokenizer))
'''
# bias和layernorm.weight不衰减,其它衰减
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-5)
'''
# 冻结所有层
for param in model.parameters():
param.requires_grad = False
# 1.只训练最后6个block
'''
for i, m in enumerate(model.transformer.h):
if (i + 1) > 6:
for param in m.parameters():
param.requires_grad=True
'''
# 2.或者只训练最后的一层
for param in model.lm_head.parameters():
param.requires_grad=True
return model.to(DEVICE)
def build_mode(tokenizer, model_config, special_token_path=None):
'''
未使用pretrained model
:param tokenizer:
:param model_config:
:param special_token_path:
:return:
'''
gpt2Config = GPT2Config.from_json_file(model_config)
model = GPT2LMHeadModel(config=gpt2Config)
if special_token_path:
model.resize_token_embeddings(len(tokenizer))
return model.to(DEVICE)
def train_val(model, tokenizer, train_dataset, val_dataset, param_args):
'''
训练
:param model:
:param tokenizer:
:param train_dataset
:param val_dataset
:param param_args
:return:
'''
training_args = TrainingArguments(output_dir=param_args.output_dir,
num_train_epochs=param_args.epochs,
per_device_train_batch_size=param_args.batch_size,
per_device_eval_batch_size=len(val_dataset),
gradient_accumulation_steps=param_args.gradient_accumulation_steps,
evaluation_strategy=param_args.evaluation_strategy,
fp16=param_args.fp16,
fp16_opt_level=param_args.apex_opt_level,
warmup_steps=param_args.warmup_steps,
learning_rate=param_args.lr,
adam_epsilon=param_args.adam_eps,
weight_decay=param_args.weight_decay,
save_total_limit=1,
load_best_model_at_end=True,
logging_dir=param_args.logging_dir,
)
trainer = Trainer(model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
tokenizer=tokenizer)
trainer.train()
trainer.save_model()
if __name__ == '__main__':
path = os.path.abspath(os.path.join(os.getcwd(), ".."))
print("path : {}".format(path))
parser = argparse.ArgumentParser()
parser.add_argument(
'--pretrained_model_path',
default=os.path.join(path, "model/pretrained_model_401"),
type=str,
required=False,
help='预训练模型路径'
)
parser.add_argument(
"--config_path",
default=os.path.join(path, "model/pretrained_model_401/config.json"),
type=str,
required=False,
help="模型参数",
)
parser.add_argument(
'--special_token_path',
default=os.path.join(path, 'model/pretrained_model_401/special_tokens_map.json')
)
parser.add_argument(
"--vocab_path",
default=os.path.join(path, "model/pretrained_model_401/vocab.txt"),
type=str,
required=False,
help="选择词典",
)
parser.add_argument(
"--data_path",
default=os.path.join(path, 'data/news.csv'),
type=str,
required=False,
help="训练语料",
)
parser.add_argument("--epochs", default=10, type=int, required=False, help="训练epochs")
parser.add_argument(
"--batch_size", default=8, type=int, required=False, help="训练batch size"
)
parser.add_argument("--lr", default=1.5e-3, type=float, required=False, help="学习率")
parser.add_argument("--warmup_steps", default=1e2, type=float, required=False, help="lr更新的耐心系数")
parser.add_argument("--gradient_accumulation_steps", default=16, type=int, required=False, help="多少次更新一次梯度")
parser.add_argument("--weight_decay", default=1e-2, type=float, required=False, help="衰减系数")
parser.add_argument(
"--max_length", default=768, type=int, required=False, help="单条文本最长长度"
)
parser.add_argument(
"--train_ratio", default=0.9, type=float, required=False, help="训练集比例"
)
parser.add_argument(
"--print_loss", default=1, type=int, required=False, help="多少步打印一次loss"
)
parser.add_argument(
"--output_dir", default=os.path.join(path, 'model/text_keywords_generation_model'), type=str, required=False, help="模型输出路径"
)
parser.add_argument("--logging_dir", default=os.path.join(path, 'model/text_keywords_generation_model/logs'), type=str, required=False, help="log输入路径")
parser.add_argument(
"--seed", default=2021, type=int, required=False, help="python hash seed"
)
parser.add_argument(
"--use_apex", default=True, type=bool, required=False, help="使用apex"
)
parser.add_argument("--fp16", default=True, type=bool, required=False, help="使用apex单精度")
parser.add_argument("--evaluation_strategy", default="epoch", type=str, required=False, help="评估策略")
parser.add_argument("--adam_eps", default=1e-8, type=float, required=False, help="adam eps,防止除零")
parser.add_argument("--apex_opt_level", default="o1", type=str, required=False, help="apex训练类型")
args = parser.parse_args()
pretrained_model_path = args.pretrained_model_path
config_path = args.config_path
vocab_path = args.vocab_path
data_path = args.data_path
special_token_path = args.special_token_path
epochs = args.epochs
batch_size = args.batch_size
lr = args.lr
warmup_steps = args.warmup_steps
max_length = args.max_length
train_ratio = args.train_ratio
print_loss = args.print_loss
output_dir = args.output_dir
logging_dir = args.logging_dir
seed = args.seed
use_apex = args.use_apex
apex_opt_level = args.apex_opt_level
warmup_steps = args.warmup_steps
gradient_accumulation_steps = args.gradient_accumulation_steps
weight_decay = args.weight_decay
fp16 = args.fp16
evaluation_strategy = args.evaluation_strategy
SPECIAL_TOKENS = {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]",
"bos_token": "[BOS]", "eos_token": "[EOS]"}
# train data format
columns = [
'title',
'keywords',
'content'
]
# read data
pd_data = read_data(data_path, columns)
# split train and val
train_set, val_set = split_data(pd_data, 0.9)
# load tokenize
tokenizer = load_tokenizer(pretrained_model_path, SPECIAL_TOKENS)
# 构建数据集
trainset = GetDataset(train_set, tokenizer, max_length, SPECIAL_TOKENS)
valset = GetDataset(val_set, tokenizer, max_length, SPECIAL_TOKENS)
# _, _, train_dataset, val_dataset= get_train_val_dataloader(batch_size, trainset, train_ratio)
# load pretrained model and fine tune
#model = load_pretrained_mode(tokenizer, pretrained_model_path, SPECIAL_TOKENS)
# build model,no pretrained model
model = build_mode(tokenizer, config_path, SPECIAL_TOKENS)
# train and val
train_val(model, tokenizer, trainset, valset, args)
|
import atm.database as db
from utilities import *
eval = db.Database('sqlite', '../../atm.db')
################## database and datarun
# print_hp_summary(eval, 1)
# print_summary(eval, 1)
print_method_summary(eval, 1) |
from typing import Union, List
import requests
from Summary import Summary
from Provinces import Provinces
from SummaryAll import SummaryAll
from SummaryByProvince import SummaryByProvince
from SummaryByRegion import SummaryByRegion
class CovidSdk:
__split_data = None
@staticmethod
def summary(province: Provinces) -> Union[SummaryAll, SummaryByProvince]:
if province == Provinces.All:
data = requests.get('https://api.covid19tracker.ca/summary')
return SummaryAll(data.json())
else:
if CovidSdk.__split_data is None:
CovidSdk.__split_data = requests.get('https://api.covid19tracker.ca/summary/split')
province_split_data = CovidSdk.__split_data.json()
for item in province_split_data['data']:
if item['province'] == province.value:
province_breakdown = SummaryByProvince(item)
return province_breakdown
@staticmethod
def summary_by_region(region_id) -> SummaryByRegion:
hr_data_split = requests.get('https://api.covid19tracker.ca/summary/split/hr')
all_regions_summary = hr_data_split.json()
for item in all_regions_summary['data']:
if item['hr_uid'] == region_id:
region_summary = SummaryByRegion(item)
return region_summary
|
#!/usr/bin/python3
import os
from formula import formula
insightType = os.environ.get("INSIGHT_TYPE")
contribution = os.environ.get("CONTRIBUTION")
formula.Run(insightType, contribution)
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db.models.signals import post_save,pre_save
from django.dispatch import receiver
from django.contrib.auth.models import UserManager
# Create your models here.
USER_CHOICES=(
("PATIENT", "PATIENT"),
("DOCTOR", "DOCTOR"),
)
Catergory_Choices=(
( "Mental Health", "Mental Heart"),
("Heart Disease", "Heart Disease"),
("Covid 19", "Covid 19"),
("Immunization", "Immunization")
)
class User(AbstractUser):
user_type=models.CharField(max_length=20, choices=USER_CHOICES)
username = models.CharField(max_length=34, db_index=True)
profilepicture=models.ImageField(upload_to="media/photo")
email=models.EmailField(max_length=30, unique=True)
address=models.CharField(max_length=100)
country=models.CharField(max_length=100)
state=models.CharField(max_length=30)
pincode=models.CharField(max_length=40)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
class Blog(models.Model):
title=models.CharField(max_length=100)
user=models.ForeignKey(User, on_delete=models.CASCADE, related_name="blog", null=True)
image=models.ImageField(upload_to="media/blog")
category=models.CharField(max_length=30, choices=Catergory_Choices)
summary=models.TextField(max_length=200)
content=models.TextField(max_length=100)
class Draft(models.Model):
title=models.CharField(max_length=100)
user=models.ForeignKey(User, on_delete=models.CASCADE, related_name="draft")
image=models.ImageField(upload_to="media/blog")
category=models.CharField(max_length=30, choices=Catergory_Choices)
summary=models.TextField(max_length=200)
content=models.TextField(max_length=100)
class Appointment(models.Model):
start_time = models.DateTimeField("start time")
end_time = models.DateTimeField('End time')
date = models.DateField(auto_now_add=True)
specialization = models.CharField('Specialization', max_length=50)
customer = models.ForeignKey(User, on_delete=models.CASCADE, related_name="patient")
doctor = models.ForeignKey(User, on_delete=models.CASCADE, related_name="doctor")
@receiver(pre_save, sender=Blog)
def CreateDraft(sender, instance, **kwargs):
Draft.objects.create(user=instance.user, title=instance.title, image=instance.image,
category=instance.category, summary=instance.summary, content=instance.content)
|
from ply import lex
literals = ['[', ']']
tokens = ['PLUS', 'MINUS', 'LSHIFT', 'RSHIFT', 'OUTPUT', 'INPUT']
t_PLUS = '\+'
t_MINUS = '-'
t_LSHIFT = '<'
t_RSHIFT = '>'
t_OUTPUT = '\.'
t_INPUT = ','
t_ignore = ' \t'
def t_NEWLINE(t):
r'\n'
pass
def t_error(t):
print("Illegal Character %s", t.value[0])
t.lexer.skip(1)
lexer = lex.lex(debug=0) |
SIGN_MASK = 1 << 32
def max_old(a, b):
if (a - b) & SIGN_MASK:
return b
return a
def max_with_overflow(a, b):
# overflow would be caused by different signed params
# e.g. a = INT_MAX (2^31 - 1) and b = -15
if (a & SIGN_MASK and not b & SIGN_MASK) or (not a & SIGN_MASK and not b & SIGN_MASK):
if a & SIGN_MASK:
return b
return a
return max_old(a, b)
|
from sqlalchemy import DateTime, Float, create_engine, Column, String, Integer, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql import func
from flask import Flask, request, jsonify
app = Flask(__name__)
app.debug = True
prod_engine = create_engine('postgresql://usr:pass@localhost:5432/prod', convert_unicode=True)
Session = sessionmaker()
app.Session = Session
Session.configure(bind=prod_engine)
Base = declarative_base()
class Transfer(Base):
__tablename__ = 'transfers'
id = Column(Integer, primary_key=True)
amount = Column(Float, nullable=False)
user_id = Column(Integer, ForeignKey('users.id', ondelete="CASCADE"))
created = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
user = relationship("User", back_populates="transfers")
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
email = Column(String(32), unique=True, nullable=False)
first_name = Column(String(32))
last_name = Column(String(32))
created = Column(DateTime(timezone=True), server_default=func.now(), nullable=False)
transfers = relationship("Transfer", back_populates="user")
@app.route("/api/loyalty/create_new_user", methods=["POST"])
def create_new_user():
"""
POST loyalty/user
JSON payload with email, firstName and lastName,
creates user
return success=True/False
"""
data = request.get_json()
email = data.get('email')
first_name = data.get('first_name')
last_name = data.get('last_name')
user = User(email=email, first_name=first_name, last_name=last_name)
db_session = app.Session()
try:
db_session.add(user)
db_session.commit()
return jsonify(dict(
email=user.email, first_name=user.first_name, id=user.id
)), 201
except IntegrityError as e:
db_session.rollback()
return jsonify({"error": "could not create user"}), 500
finally:
db_session.close()
@app.route("/api/loyalty/create_new_transfer", methods=["POST"])
def create_new_transfer():
"""
POST loyalty/transfer
JSON payload with user_id, amount,
creates transfer
return success=True/False
"""
data = request.get_json()
user_id = int(data.get('user_id'))
amount = float(data.get('amount'))
transfer = Transfer(user_id=user_id, amount=amount)
db_session = app.Session()
try:
transfers = db_session.query(Transfer).filter(Transfer.user_id == user_id).all()
if sum([t.amount for t in transfers]) + amount < 0:
db_session.rollback()
return jsonify({"error": "not enough funds"}), 418
else:
db_session.add(transfer)
db_session.commit()
return jsonify(dict(
user_id=transfer.user_id, transfer_id=transfer.id,
amount=transfer.amount
)), 201
except IntegrityError:
db_session.rollback()
return jsonify({"error": "could not make transfer"}), 500
finally:
db_session.close()
@app.route("/api/loyalty/user/<int:user_id>/transfers", methods=["GET"])
def get_user_transfers(user_id):
"""
GET loyalty/user/<user_id>/transfers
returns all transfers as JSON
"""
db_session = app.Session()
transfers = db_session.query(Transfer).filter(Transfer.user_id == user_id).all()
transfers = [dict(id=str(t.id),
created=t.created.strftime("%Y-%m-%d %H:%M:%S"),
user_id=str(t.user_id),
amount=str(t.amount)) for t in transfers]
# TODO consider returning user does not exists
return jsonify(dict(transfers=transfers)), 500
if __name__ == "__main__":
app.run()
|
import boto3
import sys
sys.path.append("./packages")
import mysql.connector as mysql_connector
import tornado.escape as tornado
if __name__ == '__main__':
response = boto3.client('health', region_name='us-east-1').describe_event_details_for_organization(
organizationEventDetailFilters=[
{
'awsAccountId': '596134558493',
'eventArn': 'arn:aws:health:ap-northeast-1::event/EC2/AWS_EC2_OPERATIONAL_NOTIFICATION/AWS_EC2_OPERATIONAL_NOTIFICATION_a2aab6b0-9c08-4021-9ba5-f8e0b7d14159'
}
],
locale='en'
)
description = response.get('successfulSet', [{}])[0].get('eventDescription', {}).get('latestDescription')
cnx = mysql_connector.connect(
user='admin',
password='adminpass',
host='localhost',
port=3306,
database='test'
)
cursor = cnx.cursor()
params = (
"""
REPLACE INTO test_tb
(id, text)
VALUES (%s,%s);
""",
(
"1",
description
)
)
cursor.execute(*params)
cnx.commit()
with open('x.text', 'w') as f:
f.write(description)
with open('x.html', 'w') as f:
# f.write(tornado.xhtml_unescape(tornado.linkify(description)))
f.write(tornado.linkify(description.replace("\n", "\n<br>")))
|
from __future__ import print_function
import platform
import sys
import struct
import socket
import threading
import types
import time
from .config import DEBUG, HOST, PORT, MSG_TIMEOUT, SOCK_TIMEOUT, GET_PORT_ATTEMPT_COUNT
from ._datatypes import *
from .utils import show_error_message
EVENTS_NAMES = (
'eviteminfo', 'evitemdeleted', 'evspeech', 'evdrawgameplayer',
'evmoverejection', 'evdrawcontainer', 'evadditemtocontainer',
'evaddmultipleitemsincont', 'evrejectmoveitem', 'evupdatechar',
'evdrawobject', 'evmenu', 'evmapmessage', 'evallowrefuseattack',
'evclilocspeech', 'evclilocspeechaffix', 'evunicodespeech',
'evbuffdebuffsystem', 'evclientsendresync', 'evcharanimation',
'evicqdisconnect', 'evicqconnect', 'evicqincomingtext', 'evicqerror',
'evincominggump', 'evtimer1', 'evtimer2', 'evwindowsmessage', 'evsound',
'evdeath', 'evquestarrow', 'evpartyinvite', 'evmappin', 'evgumptextentry',
'evgraphicaleffect', 'evircincomingtext', 'evmessengerevent',
'evsetglobalvar', 'evupdateobjstats', 'evglobalchat'
)
EVENTS_ARGTYPES = _str, _uint, _int, _ushort, _short, _ubyte, _byte, _bool
VERSION = 1, 0, 0, 0
class Connection:
port = None
def __init__(self):
self._sock = socket.socket()
self._id = 0
self._buffer = bytes()
self.pause = False
self.results = {}
self.callbacks = {}
self._handlers = []
for i in range(len(EVENTS_NAMES)):
self.callbacks[i] = None
@property
def method_id(self):
self._id += 1
if self._id >= 65535:
self._id = 0
return self._id
def connect(self, host=None, port=None):
if host is None:
host = HOST
if port is None:
if self.port is None:
port = get_port()
self.__class__.port = port
else:
port = self.port
self._sock.settimeout(SOCK_TIMEOUT)
self._sock.connect((host, port))
self._sock.setblocking(False)
# SCLangVersion
# send language type and protocol version to stealth (data type - 5)
# python - 1; delphi - 2; c# - 3; other - 255
data = struct.pack('=HH5B', 5, 0, 1, *VERSION)
size = struct.pack('!I', len(data))
self.send(size + data)
def close(self):
self._sock.close()
def receive(self, size=4096):
# try to get a new data from socket
data = b''
try:
data += self._sock.recv(size)
if not data:
error = 'Connection to Stealth was lost.'
show_error_message(error)
exit(1)
except socket.error:
return
if DEBUG:
print('Data received: {}'.format(data))
# parse data
offset = 0
while 1:
if self._buffer: # if some data was already stored
data = self._buffer + data
self._buffer = bytes()
# parse packet header
if len(data) - offset < 4:
self._buffer += data[offset:]
break
size, = struct.unpack_from('!I', data, offset)
offset += 4
if size > len(data) - offset:
self._buffer += data[offset - 4:]
break
type_, = struct.unpack_from('H', data, offset)
offset += 2
# packet type is 1 (a returned value)
if type_ == 1:
id_, = struct.unpack_from('H', data, offset)
self.results[id_] = data[offset + 2:offset + size]
offset += size - 2 # - type_
# packet type is 3 (an event callback)
elif type_ == 3:
index, count = struct.unpack_from('=2B', data, offset)
offset += 2
# parse args
args = []
for i in range(count):
argtype = EVENTS_ARGTYPES[struct.unpack_from('B', data,
offset)[0]]
offset += 1
arg = argtype.from_buffer(data, offset)
offset += struct.calcsize(arg.fmt)
args.append(arg.value)
# save handler
handler = {
'handler': self.callbacks[index],
'args': args
}
self._handlers.append(handler)
# packet type is 4 (a pause script packet)
elif type_ == 4:
self.pause = True if not self.pause else False
offset += size - 2 # - type_
# packet type is 2 (terminate script)
elif type_ == 2:
exit(0)
if offset >= len(data):
break
# run event handlers
while len(self._handlers):
handler = self._handlers.pop(0)
handler['handler'](*handler['args'])
def send(self, data):
if DEBUG:
print('Packet sent: {}'.format(data))
self._sock.send(data)
class ScriptMethod:
argtypes = []
restype = None
def __init__(self, index):
self.index = index
def __call__(self, *args):
conn = get_connection()
conn.receive() # check pause or events
while conn.pause:
conn.receive()
time.sleep(.01)
if not self.index: # wait
return
# pack args
data = bytes()
for cls, val in zip(self.argtypes, args):
data += cls(val).serialize()
# form packet
id_ = conn.method_id if self.restype else 0
header = struct.pack('=2H', self.index, id_)
packet = header + data
size = struct.pack('!I', len(packet))
# send to the stealth
conn.send(size + packet)
# wait for a result if required
while self.restype is not None:
conn.receive()
try:
result = self.restype.from_buffer(conn.results.pop(id_))
return result.value
except KeyError:
time.sleep(.001)
def get_port():
def win():
import os
from . import py_stealth_winapi as _winapi
wnd = 'TStealthForm'.decode() if b'' == '' else 'TStealthForm' # py2
hwnd = _winapi.FindWindow(wnd, None)
if not hwnd:
error = 'Can not find Stealth window.'
_winapi.MessageBox(0, error.decode() if b'' == '' else error, # py2
'Error'.decode() if b'' == '' else 'Error', 0)
exit(1)
# form copydata
pid = '{pid:08X}'.format(pid=os.getpid())
lp = (pid + os.path.basename(sys.argv[0])).encode() + b'\x00'
cb = len(lp)
dw = _winapi.GetCurrentThreadId()
copydata = _winapi.COPYDATA(dw, cb, lp)
# send message
_winapi.SetLastError(0)
if not _winapi.SendMessage(hwnd, _winapi.WM_COPYDATA, 0, copydata.pointer):
error = 'Can not send message. ErrNo: {}'.format(_winapi.GetLastError())
_winapi.MessageBox(0, error.decode() if b'' == '' else error, # py2
'Error'.decode() if b'' == '' else 'Error', 0)
exit(1)
# wait for an answer
msg = _winapi.MSG()
now = time.time()
while now + MSG_TIMEOUT > time.time():
if _winapi.PeekMessage(msg, 0, _winapi.FM_GETFOCUS,
_winapi.FM_GETFOCUS, _winapi.PM_REMOVE):
while len(sys.argv) < 3:
sys.argv.append('')
sys.argv[2] = str(msg.wParam)
return msg.wParam
else:
time.sleep(0.005)
error = 'PeekMessage timeout'
_winapi.MessageBox(0, error.decode() if b'' == '' else error, # py2
'Error'.decode() if b'' == '' else 'Error', 0) # py2
exit(1)
def unix():
# attempt to connect to Stealth
sock = socket.socket()
sock.settimeout(SOCK_TIMEOUT)
if DEBUG:
print('connecting to {0}:{1}'.format(HOST, PORT))
try:
sock.connect((HOST, PORT))
except socket.error:
show_error_message('Stealth not found. Port: {}'.format(PORT))
exit(1)
sock.setblocking(False)
if DEBUG:
print('connected')
# attempts to get a port number
for i in range(GET_PORT_ATTEMPT_COUNT):
if DEBUG:
print('attempt №' + str(i + 1))
packet = struct.pack('=HI', 4, 0xDEADBEEF)
sock.send(packet)
if DEBUG:
print('packet sent: {}'.format(packet))
timer = time.time()
while timer + SOCK_TIMEOUT > time.time():
try:
data = sock.recv(4096)
except socket.error:
continue
if data:
if DEBUG:
print('received: {}'.format(data))
length = struct.unpack_from('=H', data)[0]
if DEBUG:
print('length: {}'.format(length))
port = struct.unpack_from('=' + 'H' if length == 2 else 'I', data, 2)[0]
if DEBUG:
print('port: {}'.format(port))
sock.close()
if DEBUG:
print('socket closed')
return port
else:
error = 'Connection to Stealth was lost.'
show_error_message(error)
exit(1)
# First way - get port from cmd parameters.
# If script was launched as internal script from Stealth.
if len(sys.argv) >= 3 and sys.argv[2].isalnum():
return int(sys.argv[2])
# Second way - ask Stealth for a port number via socket connection or windows messages.
# If script was launched as external script.
if platform.system() == 'Windows':
return win()
else:
return unix()
def get_connection():
def join(self, timeout=None):
self.connection.close()
self.__class__.join(self, timeout)
thread = threading.current_thread()
if hasattr(thread, 'connection'):
return thread.connection
thread.connection = Connection()
thread.connection.connect()
thread.join = types.MethodType(join, thread) # close socket for each one
return thread.connection
|
# Adapted from Graham Neubig's Paired Bootstrap script
# https://github.com/neubig/util-scripts/blob/master/paired-bootstrap.py
import numpy as np
from sklearn.metrics import f1_score, precision_score, recall_score
from tqdm import tqdm
EVAL_TYPE_ACC = "acc"
EVAL_TYPE_BLEU = "bleu"
EVAL_TYPE_BLEU_DETOK = "bleu_detok"
EVAL_TYPE_PEARSON = "pearson"
EVAL_TYPE_F1 = "f1"
EVAL_TYPE_MACRO_F1 = "macro-f1"
EVAL_TYPE_PREC = "precision"
EVAL_TYPE_REC = "recall"
EVAL_TYPE_AVG = "avg"
EVAL_TYPES = [EVAL_TYPE_ACC,
EVAL_TYPE_BLEU,
EVAL_TYPE_BLEU_DETOK,
EVAL_TYPE_PEARSON,
EVAL_TYPE_F1,
EVAL_TYPE_AVG,
EVAL_TYPE_PREC,
EVAL_TYPE_REC]
def eval_preproc(data, eval_type='acc'):
''' Preprocess into the appropriate format for a particular evaluation type '''
if type(data) == str:
data = data.strip()
if eval_type == EVAL_TYPE_BLEU:
data = data.split()
elif eval_type == EVAL_TYPE_PEARSON:
data = float(data)
elif eval_type in [EVAL_TYPE_F1, EVAL_TYPE_MACRO_F1, EVAL_TYPE_PREC, EVAL_TYPE_REC]:
data = float(data)
elif eval_type == EVAL_TYPE_AVG:
data = float(data)
return data
def eval_measure(gold, sys, eval_type='acc'):
''' Evaluation measure
This takes in gold labels and system outputs and evaluates their
accuracy. It currently supports:
* Accuracy (acc), percentage of labels that match
* Pearson's correlation coefficient (pearson)
* BLEU score (bleu)
* BLEU_detok, on detokenized references and translations, with internal tokenization
:param gold: the correct labels
:param sys: the system outputs
:param eval_type: The type of evaluation to do (acc, pearson, bleu, bleu_detok)
'''
if eval_type == EVAL_TYPE_ACC:
return sum([1 if g == s else 0 for g, s in zip(gold, sys)]) / float(len(gold))
elif eval_type == EVAL_TYPE_BLEU:
import nltk
gold_wrap = [[x] for x in gold]
return nltk.translate.bleu_score.corpus_bleu(gold_wrap, sys)
elif eval_type == EVAL_TYPE_PEARSON:
return np.corrcoef([gold, sys])[0,1]
elif eval_type == EVAL_TYPE_BLEU_DETOK:
import sacrebleu
# make sure score is 0-based instead of 100-based
return sacrebleu.corpus_bleu(sys, [gold]).score / 100.
elif eval_type == EVAL_TYPE_F1:
return f1_score(gold, sys)
elif eval_type == EVAL_TYPE_MACRO_F1:
return f1_score(gold, sys, average="macro")
elif eval_type == EVAL_TYPE_PREC:
return precision_score(gold, sys)
elif eval_type == EVAL_TYPE_REC:
return recall_score(gold, sys)
elif eval_type == EVAL_TYPE_AVG:
return np.mean(sys)
else:
raise NotImplementedError('Unknown eval type in eval_measure: %s' % eval_type)
def eval_with_paired_bootstrap(gold, sys1, sys2,
num_samples=10000, sample_ratio=0.5,
eval_type='acc',
return_results=False):
''' Evaluate with paired boostrap
This compares two systems, performing a significance tests with
paired bootstrap resampling to compare the accuracy of the two systems.
:param gold: The correct labels
:param sys1: The output of system 1
:param sys2: The output of system 2
:param num_samples: The number of bootstrap samples to take
:param sample_ratio: The ratio of samples to take every time
:param eval_type: The type of evaluation to do (acc, pearson, bleu, bleu_detok)
'''
assert(len(gold) == len(sys1))
assert(len(gold) == len(sys2))
# Preprocess the data appropriately for they type of eval
gold = [eval_preproc(x, eval_type) for x in gold]
sys1 = [eval_preproc(x, eval_type) for x in sys1]
sys2 = [eval_preproc(x, eval_type) for x in sys2]
sys1_scores = []
sys2_scores = []
wins = [0, 0, 0]
n = len(gold)
ids = list(range(n))
for _ in tqdm(range(num_samples)):
# Subsample the gold and system outputs
np.random.shuffle(ids)
reduced_ids = ids[:int(len(ids)*sample_ratio)]
reduced_gold = [gold[i] for i in reduced_ids]
reduced_sys1 = [sys1[i] for i in reduced_ids]
reduced_sys2 = [sys2[i] for i in reduced_ids]
# Calculate accuracy on the reduced sample and save stats
sys1_score = eval_measure(reduced_gold, reduced_sys1, eval_type=eval_type)
sys2_score = eval_measure(reduced_gold, reduced_sys2, eval_type=eval_type)
if sys1_score > sys2_score:
wins[0] += 1
elif sys1_score < sys2_score:
wins[1] += 1
else:
wins[2] += 1
sys1_scores.append(sys1_score)
sys2_scores.append(sys2_score)
# Print win stats
wins = [x/float(num_samples) for x in wins]
print('Win ratio: sys1=%.3f, sys2=%.3f, tie=%.3f' % (wins[0], wins[1], wins[2]))
if wins[0] > wins[1]:
print('(sys1 is superior with p value p=%.10f)\n' % (1-wins[0]))
elif wins[1] > wins[0]:
print('(sys2 is superior with p value p=%.10f)\n' % (1-wins[1]))
# Print system stats
sys1_scores.sort()
sys2_scores.sort()
print('sys1 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %
(np.mean(sys1_scores), np.median(sys1_scores), sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)]))
print('sys2 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %
(np.mean(sys2_scores), np.median(sys2_scores), sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)]))
if return_results:
sys1_summary = (np.mean(sys1_scores), (sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)]))
sys2_summary = (np.mean(sys2_scores), (sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)]))
p_value_lose = 1-wins[0]
p_value_win = 1-wins[1]
return sys1_summary, sys2_summary, p_value_lose, p_value_win
def eval_with_hierarchical_paired_bootstrap(gold, sys1_list, sys2_list,
num_samples=10000, sample_ratio=0.5,
eval_type='acc',
return_results=False):
''' Evaluate with a hierarchical paired boostrap
This compares two systems, performing a significance tests with
paired bootstrap resampling to compare the accuracy of the two systems, with
two-level sampling: first we sample a model, then we sample data to evaluate
it on.
:param gold: The correct labels
:param sys1: The output of system 1
:param sys2: The output of system 2
:param num_samples: The number of bootstrap samples to take
:param sample_ratio: The ratio of samples to take every time
:param eval_type: The type of evaluation to do (acc, pearson, bleu, bleu_detok)
'''
for sys1 in sys1_list:
assert(len(gold) == len(sys1))
for sys2 in sys2_list:
assert(len(gold) == len(sys2))
# Preprocess the data appropriately for they type of eval
gold = [eval_preproc(x, eval_type) for x in gold]
sys1_list = [[eval_preproc(x, eval_type) for x in sys1] for sys1 in sys1_list]
sys2_list = [[eval_preproc(x, eval_type) for x in sys2] for sys2 in sys2_list]
sys1_scores = []
sys2_scores = []
wins = [0, 0, 0]
n = len(gold)
ids = list(range(n))
for _ in tqdm(range(num_samples)):
# Subsample the gold and system outputs
np.random.shuffle(ids)
reduced_ids = ids[:int(len(ids)*sample_ratio)]
sys1_idx = np.random.choice(list(range(len(sys1_list))))
sys1 = sys1_list[sys1_idx]
sys2_idx = np.random.choice(list(range(len(sys2_list))))
sys2 = sys2_list[sys2_idx]
reduced_gold = [gold[i] for i in reduced_ids]
reduced_sys1 = [sys1[i] for i in reduced_ids]
reduced_sys2 = [sys2[i] for i in reduced_ids]
# Calculate accuracy on the reduced sample and save stats
sys1_score = eval_measure(reduced_gold, reduced_sys1, eval_type=eval_type)
sys2_score = eval_measure(reduced_gold, reduced_sys2, eval_type=eval_type)
if sys1_score > sys2_score:
wins[0] += 1
elif sys1_score < sys2_score:
wins[1] += 1
else:
wins[2] += 1
sys1_scores.append(sys1_score)
sys2_scores.append(sys2_score)
# Print win stats
wins = [x/float(num_samples) for x in wins]
print('Win ratio: sys1=%.3f, sys2=%.3f, tie=%.3f' % (wins[0], wins[1], wins[2]))
if wins[0] > wins[1]:
print('(sys1 is superior with p value p=%.10f)\n' % (1-wins[0]))
elif wins[1] > wins[0]:
print('(sys2 is superior with p value p=%.10f)\n' % (1-wins[1]))
# Print system stats
sys1_scores.sort()
sys2_scores.sort()
print('sys1 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %
(np.mean(sys1_scores), np.median(sys1_scores), sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)]))
print('sys2 mean=%.3f, median=%.3f, 95%% confidence interval=[%.3f, %.3f]' %
(np.mean(sys2_scores), np.median(sys2_scores), sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)]))
if return_results:
sys1_summary = (np.mean(sys1_scores), (sys1_scores[int(num_samples * 0.025)], sys1_scores[int(num_samples * 0.975)]))
sys2_summary = (np.mean(sys2_scores), (sys2_scores[int(num_samples * 0.025)], sys2_scores[int(num_samples * 0.975)]))
p_value_lose = 1-wins[0]
p_value_win = 1-wins[1]
return sys1_summary, sys2_summary, p_value_lose, p_value_win
|
#encoding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
class Swish(nn.Module):
def __init__(self):
super(Act_op, self).__init__()
def forward(self, x):
x = x * F.sigmoid(x)
return x
'''
## 由于 Function 可能需要暂存 input tensor。
## 因此,建议不复用 Function 对象,以避免遇到内存提前释放的问题。
class Swish_act(torch.autograd.Function):
## save_for_backward can only!!!! save input or output tensors
@staticmethod
def forward(self, input_):
print('swish act op forward')
output = input_ * F.sigmoid(input_)
self.save_for_backward(input_)
return output
@staticmethod
def backward(self, grad_output):
## according to the chain rule(Backpropagation),
## d(loss)/d(x) = d(loss)/d(output) * d(output)/d(x)
## grad_output is the d(loss)/d(output)
## we calculate and save the d(output)/d(x) in forward
input_, = self.saved_tensors
output = input_ * F.sigmoid(input_)
grad_swish = output + F.sigmoid(input_) * (1 - output)
print('swish act op backward')
return grad_output * grad_swish
''' |
#YOur Script is was update by Hirokaazo Nagata
#fb : Hirokazo Nagata
#gmail : ziadabouelfarah2@gmail.com
import os,time
print('''
XX MMMMMMMMMMMMMMMMss''' '''ssMMMMMMMMMMMMMMMM XX
XX MMMMMMMMMMMMyy'' ''yyMMMMMMMMMMMM XX
XX MMMMMMMMyy'' ''yyMMMMMMMM XX
XX MMMMMy'' ''yMMMMM XX
XX MMMy' 'yMMM XX
XX Mh' 'hM XX
XX - - XX
XX XX
XX :: :: XX
XX MMhh. ..hhhhhh.. ..hhhhhh.. .hhMM XX
XX MMMMMh ..hhMMMMMMMMMMhh. .hhMMMMMMMMMMhh.. hMMMMM XX
XX ---MMM .hMMMMdd:::dMMMMMMMhh.. ..hhMMMMMMMd:::ddMMMMh. MMM--- XX
XX MMMMMM MMmm'' 'mmMMMMMMMMyy. .yyMMMMMMMMmm' ''mmMM MMMMMM XX
XX ---mMM '' 'mmMMMMMMMM MMMMMMMMmm' '' MMm--- XX
XX yyyym' . 'mMMMMm' 'mMMMMm' . 'myyyy XX
XX mm'' .y' ..yyyyy.. '''' '''' ..yyyyy.. 'y. ''mm XX
XX MN .sMMMMMMMMMss. . . .ssMMMMMMMMMs. NM XX
XX N` MMMMMMMMMMMMMN M M NMMMMMMMMMMMMM `N XX
XX + .sMNNNNNMMMMMN+ `N N` +NMMMMMNNNNNMs. + XX
XX o+++ ++++Mo M M oM++++ +++o XX
XX oo oo XX
XX oM oo oo Mo XX
XX oMMo M M oMMo XX
XX +MMMM s s MMMM+ XX
XX +MMMMM+ +++NNNN+ +NNNN+++ +MMMMM+ XX
XX +MMMMMMM+ ++NNMMMMMMMMN+ +NMMMMMMMMNN++ +MMMMMMM+ XX
XX MMMMMMMMMNN+++NNMMMMMMMMMMMMMMNNNNMMMMMMMMMMMMMMNN+++NNMMMMMMMMM XX
XX yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMy XX
XX m yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMy m XX
XX MMm yMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMy mMM XX
XX MMMm .yyMMMMMMMMMMMMMMMM MMMMMMMMMM MMMMMMMMMMMMMMMMyy. mMMM XX
XX MMMMd ''''hhhhh odddo obbbo hhhh'''' dMMMM XX
XX MMMMMd 'hMMMMMMMMMMddddddMMMMMMMMMMh' dMMMMM XX
XX MMMMMMd 'hMMMMMMMMMMMMMMMMMMMMMMh' dMMMMMM XX
XX MMMMMMM- ''ddMMMMMMMMMMMMMMdd'' -MMMMMMM XX
XX MMMMMMMM '::dddddddd::' MMMMMMMM XX
XX MMMMMMMM- -MMMMMMMM XX
XX MMMMMMMMM MMMMMMMMM XX
XX MMMMMMMMMy yMMMMMMMMM XX
XX MMMMMMMMMMy. .yMMMMMMMMMM XX
XX MMMMMMMMMMMMy. .yMMMMMMMMMMMM XX
XX MMMMMMMMMMMMMMy. .yMMMMMMMMMMMMMM XX
XX MMMMMMMMMMMMMMMMs. .sMMMMMMMMMMMMMMMM XX
XX MMMMMMMMMMMMMMMMMMss. .... .ssMMMMMMMMMMMMMMMMMM XX
XX MMMMMMMMMMMMMMMMMMMMNo oNNNNo oNMMMMMMMMMMMMMMMMMMMM XX
''')
time.sleep(3)
def slowprint(s):
for c in s + '\n' :
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(10. / 100)
def package(package):
os.system('sudo apt install ' + package + ' -y')
os.system('clear')
print('\033[31m Updating your package : ')
os.system('sudo apt update')
print('\033[34m Done !!')
print('\033[36mInstalling The Package ...\033[37m')
os.system('clear')
time.sleep(2)
package('cmatrix')
package('figlet')
package('espeak')
package('open-vm-tools-desktop fuse')
package('bum')
package('rkhunter')
package('network-manager-openvpn-gnome')
package('network-manager-pptp')
package('network-manager-pptp-gnome')
package('network-manager-strongswan')
package('network-manager-vpnc')
package('network-manager-vpnc-gnome')
time.sleep(5)
os.system('clear')
print("""
Done !!!
You can now use Kali-fixer
Enjoy !! ;)
""") |
import docx
import os
''' Function: read_file(file_name, item_type)
Parameters: file_name - the name of the file to be read.
item_type - user-inputted search term to specify what is parsed for in the document.
os.path.dirname(os.path.abspath(__file__))+"\\" is the current working directory.
Returns: parsed_data_list - A list of parsed data (strings) from the parsed text documents.
This list will be used to append the item_type_master_list to be written to a .txt file.
Desc: Function intended to read files and return a list (parsed_data_list) of text lines starting at the search
keyword and ending at the end of the line.
The function creates an object (doc) which is defined by the contents of the document's paragraphs,
then creates an empty list (parsed_data_list). For loop looks through each line inside (doc) for search word (item_type)
specified by the user.
If item_type found, prints a string from the start of the line item_type was found on to the end of the length of the
line that item_type exists on and appends the parsed_data_list with it.
'''
def read_file(file_name, item_type):
try:
# Convert file contents to string.
document_paragraphs = docx.Document(file_name).paragraphs
# Replace document file extensions with empty strings and add the new strings to parsed_data_list.
parsed_data_list = [file_name.replace(".docx", "")]
# Check user-entered item is in the documents and append the parsed_data_list with the line from the document.
for text_lines in document_paragraphs:
if item_type in text_lines.text:
parsed_data_list.append(text_lines.text[:text_lines.text.find(item_type)] + text_lines.text[text_lines.text.find(item_type):])
elif item_type.upper() in text_lines.text:
parsed_data_list.append(text_lines.text[:text_lines.text.find(item_type.upper())] + text_lines.text[text_lines.text.find(item_type.upper()):])
elif item_type.lower() in text_lines.text:
parsed_data_list.append(text_lines.text[:text_lines.text.find(item_type.lower())] + text_lines.text[text_lines.text.find(item_type.lower()):])
return parsed_data_list
except Exception:
print("[Error:", file_name, "is not a .document_paragraphs or .docx file.]")
'''Function: parse_directory(directory)
Parameter: directory - The current or specified directory which documents are searched from.
Returns: files_in_dir_list - A list of file names that were found within the directory.
Desc: Creates list (files_in_dir_list) of .docx or .doc files the current directory.'''
def parse_directory(directory):
files_in_dir_list = []
for (dirpath, dirnames, filenames) in os.walk(directory):
print("\n\nFiles found in directory ", dirpath, ": \n", filenames, "\n")
for files in filenames:
if (".docx" in files) or (".doc" in files):
files_in_dir_list.append(files)
else:
pass
break
return files_in_dir_list
'''Function: collect_motions(directory, item_type, item_list)
Parameters: directory - The current or specified directory which documents are searched from.
item_type - The user-entered search term.
item_list - list of lines in directory Word files that correspond to the searched item_type.
Returns: item_list - list of found data alongside its instance of item_type.
Desc: Used to append a master list (item_list), passed in the program as item_type_master_list, of lines where
item_type was found.'''
def collect_motions(directory, item_type, item_list):
try:
filenames = parse_directory(directory)
for file in filenames:
file_exists = read_file(file, item_type)
if file_exists:
# Make lines in item_type_master_list to separate motions.
item_list.append("\n\n" + "-" * 100)
item_list.extend(file_exists)
return item_list
except TypeError:
print("[Error: Cannot build list of filenames.]")
'''Function: write_text_file(data, user_item)
Parameters: data - The list containing the data to be written to a text file.
user_item - User-entered search term. Used in the output file to specify what was searched for.
counter - Counter to keep track of number of files outputted so far this run. Used to create output_x.txt
file names where x = counter.
Returns: None. Writes data to file, closes file.
Desc: Call to enter found data parameter (in this case a list) into a new text file, write and close the file.'''
def write_text_file(data, user_item, counter):
if type(user_item) == str:
# Concatenate the name of the file to be created using the user-specified item that was found in the Word Docs.
new_file_name = "output_" + str(counter) + ".txt"
# Turn data (master list of found strings) into a string that can be written to a new file.
text_file = open(new_file_name, 'w')
data_to_write = "Search for '" + user_item + "'. Files are labelled and separated by lines.\n\n" + '\n'.join(data)
text_file.write(data_to_write)
text_file.close()
else:
print("Error: Output file not created. Invalid user search keyword or bug.")
pass
def main():
# Define current dir variable to be referred to throughout program. Create empty list for final parsed data.
current_directory = os.path.dirname(os.path.abspath(__file__))
item_type_master_list = []
output_count = 0
print("RatiFinder v. 0.2. (Terminal) -- Christian Pearson and Darren Berg 2018")
print("Automate compiling of motions, actions or other search-terms from Word files such as Meeting Minutes.")
print("\nStep 1: Copy Word documents to search into folder that contains your RatiFinder.exe file.")
print("Step 2: Run program and follow the prompts on screen.")
print("Step 3: Check folder for output.txt file containing list of document lines found with search keyword.")
print("-" * 100, "\n")
loop_check = 1
# Begin main program menu loop. User selects one of three options using 1, 2 or 3 as terms.
while loop_check != -1:
print("1) Directory Search -- search entire folder the .exe is in. [Recommended]")
print("2) Single File Search -- specify one file to search.")
print("3) End program")
# User chooses one of the three options by input.
user_choice = input("Please enter 1, 2 or 3, corresponding to above options: ").casefold()
# Search whole directory.
if user_choice == "1":
print("-" * 100 + "\n")
print("The current directory is ", current_directory + "\\")
# User enters search term to search current dir documents for.
item_type = input("Please enter EXACT keyword you are searching for (eg. motion, action): ").casefold()
try:
for files in collect_motions(current_directory + "\\", item_type, item_type_master_list):
print(files)
except TypeError:
print("[Error finding ", item_type, " No files found in directory.]")
try:
# Write data to .txt file.
write_text_file(item_type_master_list, item_type, output_count)
output_count += 1
# Clear content of list for next search.
item_type_master_list = []
except Exception:
print("Error: Unable to write data to text file.")
# User decides if to break the loop.
print("\nSearch for '", item_type, "' complete.")
end_choice = input("\nKeyword instances collected. If nothing visible then no files/keywords were found. "
"Would you like to rerun the program? (y/n) ")
if (end_choice == "y") or (end_choice == "yes") or (end_choice == "Yes"):
loop_check = 1
else:
loop_check = -1
# Search a single file.
elif user_choice == "2":
print("\n" + "-" * 100)
print("The current directory is ", current_directory + "\\" + "\n")
print("\n" + "-" * 100)
# User defines the item to search for.
item_type = input("Please enter the type of item you are searching for (motion, action): ").casefold()
# Print files in directory to help user choose.
try:
print(".doc and .docx files found in directory: \n\n", parse_directory(current_directory))
except TypeError:
print("[Error finding ", item_type, " No files found in directory.]\n")
# User enters file to search.
path = input("\nPlease enter the filename: ")
# Correct potential omission of (.docx) file type.
if ".docx" not in path:
path += ".docx"
try:
for files in read_file(path, item_type):
print(files)
except TypeError:
print("[Read File Error: Cannot construct list of ", item_type, " in non-existent file.]\n", "-" * 100)
try:
# Write data to .txt file.
write_text_file(read_file(path, item_type), item_type, output_count)
except Exception:
print("Error: Unable to write data to text file.")
loop_check = -1
# User wants to end the program.
elif user_choice == "3":
break
# Catch invalid user selections.
else:
print("[Input Error: Input must be 1, 2 or 3.]\n", "-" * 100)
print("\nProgram complete. Thanks for using RatiFinder v. 0.1! Graphical functionality will be in future versions. "
"For more information please see README.txt. To report bugs please email christian.pearson@stemist.ca")
input("\nPress enter to end program. ")
if __name__ == '__main__':
main()
|
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
with open("README.md", 'r') as f:
long_description = f.read()
APP = ['BuildTool\\main.py']
DATA_FILES = []
OPTIONS = {}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
name="BuildTool",
version="1.0",
description="Python Build Tool to support {NS} builds/tests for Android an iOS",
author="alexZaicev",
author_email="alex.zaicef@gmail.com",
packages="BuildTool",
install_requires=["schedule"],
long_description=long_description
)
|
from github import Github
#Hesabımıza Giriş Yapıyoruz.
try:
git_hesap = Github("K_Adı", "Sifre")
#print("Hesabınıza başarı ile giriş yapıldı.")
except:
print("Hesabınıza giriş başarısız!")
#Githubda arama yapmak ve çıktı almak.
try:
repos = git_hesap.search_repositories(query="language:python")
print("Arama yapılıyor..")
for repo in repos:
print(repo)
except:
print("Arama yapılamadı!")
#Profilde bulunan repolarınızın ismini almak.
try:
for repo in git_hesap.get_user().get_repos():
print(repo.name)
except:
print("Kullanıcıya ait repo isimleri alınamadı!")
#Spesifik projeyi almak
try:
repo = git_hesap.get_repo("Vitaee/PythonileSesliAsistanV2")
#Belirlenen projenin yıldız sayısını almak.
print(repo.stargazers_count)
except:
print("Hata işlem başarısız!")
#Belirlenen projeye kaç kişi baktığını görmek.
try:
repo = git_hesap.get_repo("Vitaee/PythonileSesliAsistanV2")
traff = repo.get_views_traffic()
print(traff)
except:
print("Proje bilgilerine erişilemedi!")
#Belirlenen projede bulunan dosyaları görmek.
try:
repo = git_hesap.get_repo("Vitaee/PythonileSesliAsistanV2")
content = repo.get_contents("")
for content_fil in content:
print(content_fil)
except:
print("Hata!")
#Yeni bir proje oluşturmak.
try:
user = git_hesap.get_user()
repo = user.create_repo("deneme")
except:
print("Repo oluşturulamadı!")
#Oluşturulan projeye dosya eklemek.
try:
repo.create_file("test.txt", "commit","deneme yapıyorum")
except:
print("Dosya ekleme işlemi başarısız oldu!")
#Oluşturulan projeyi silmek.
try:
repo = git_hesap.get_repo("Vitaee/deneme")
cont = repo.get_contents("test.txt")
repo.delete_file(cont.path,"remove deneme", cont.sha, branch="master")
print("Dosya silme işlemi başarı ile tamamlandı!")
except:
print("Dosya silme işlemi başarısız!")
|
from Core.Globals import *
from Mapping import Mapping, E
class VehicleMapping(Mapping):
def __init__(self,target):
self.target = target
class Keyboard1Mapping(VehicleMapping):
IMAGE = 'Keyboard1Mapping.png'
def __init__(self,target):
VehicleMapping.__init__(self,target)
def KeyPressedEvent(self, key):
if key == KEY.W: return self.target.vehicle.behavior.PlayerAccelerateEvent (+1.0)
elif key == KEY.S: return self.target.vehicle.behavior.PlayerAccelerateEvent (-1.0)
elif key == KEY.LSHIFT: return self.target.vehicle.behavior.PlayerBrakeEvent (True)
elif key == KEY.A: return self.target.vehicle.behavior.PlayerTurnEvent (-1.0)
elif key == KEY.D: return self.target.vehicle.behavior.PlayerTurnEvent (+1.0)
elif key == KEY.SPACE: return self.target.vehicle.behavior.PlayerBoostEvent (True)
elif key == KEY.C: return self.target.CameraChangedEvent ( )
elif key == KEY.ESCAPE: return E.PauseEvent ( )
elif key == KEY.T: return self.target.vehicle.behavior.RespawnCarEvent ( )
elif key == KEY.F5: return self.target.DebugCameraToggle()
def KeyReleasedEvent(self, key):
if key == KEY.W: return self.target.vehicle.behavior.PlayerAccelerateEvent ( 0)
elif key == KEY.S: return self.target.vehicle.behavior.PlayerAccelerateEvent ( 0)
elif key == KEY.LSHIFT:return self.target.vehicle.behavior.PlayerBrakeEvent (False)
elif key == KEY.A: return self.target.vehicle.behavior.PlayerTurnEvent ( 0)
elif key == KEY.D: return self.target.vehicle.behavior.PlayerTurnEvent ( 0)
elif key == KEY.SPACE: return self.target.vehicle.behavior.PlayerBoostEvent (False)
class Keyboard2Mapping(VehicleMapping):
IMAGE = 'Keyboard2Mapping.png'
def __init__(self,target):
VehicleMapping.__init__(self,target)
def KeyPressedEvent(self, key):
if key == KEY.UP: return self.target.vehicle.behavior.PlayerAccelerateEvent (+1.0)
elif key == KEY.DOWN: return self.target.vehicle.behavior.PlayerAccelerateEvent (-1.0)
elif key == KEY.RCONTROL: return self.target.vehicle.behavior.PlayerBrakeEvent (True)
elif key == KEY.LEFT: return self.target.vehicle.behavior.PlayerTurnEvent (-1.0)
elif key == KEY.RIGHT: return self.target.vehicle.behavior.PlayerTurnEvent (+1.0)
elif key == KEY.NUMPAD0: return self.target.vehicle.behavior.PlayerBoostEvent (True)
elif key == KEY.DECIMAL: return self.target.CameraChangedEvent ( )
elif key == KEY.PAUSE: return E.PauseEvent ( )
elif key == KEY.RSHIFT: return self.target.vehicle.behavior.RespawnCarEvent ( )
def KeyReleasedEvent(self, key):
if key == KEY.UP: return self.target.vehicle.behavior.PlayerAccelerateEvent ( 0)
elif key == KEY.DOWN: return self.target.vehicle.behavior.PlayerAccelerateEvent ( 0)
elif key == KEY.RCONTROL: return self.target.vehicle.behavior.PlayerBrakeEvent (False)
elif key == KEY.LEFT: return self.target.vehicle.behavior.PlayerTurnEvent ( 0)
elif key == KEY.RIGHT: return self.target.vehicle.behavior.PlayerTurnEvent ( 0)
elif key == KEY.NUMPAD0: return self.target.vehicle.behavior.PlayerBoostEvent (False)
class KeyboardDebugMapping(VehicleMapping):
def __init__(self,target):
VehicleMapping.__init__(self,target)
def KeyPressedEvent(self, key):
if key == KEY.UP: return E.CameraAccelerateEvent (+1.0)
elif key == KEY.DOWN: return E.CameraAccelerateEvent (-1.0)
elif key == KEY.LEFT: return E.CameraStrafeEvent (-1.0)
elif key == KEY.RIGHT: return E.CameraStrafeEvent (+1.0)
elif key == KEY.R: return E.ReloadConstsEvent ( )
def KeyReleasedEvent(self, key):
if key == KEY.UP: return E.CameraAccelerateEvent (-1.0)
elif key == KEY.DOWN: return E.CameraAccelerateEvent (+1.0)
elif key == KEY.LEFT: return E.CameraStrafeEvent (+1.0)
elif key == KEY.RIGHT: return E.CameraStrafeEvent (-1.0)
elif key == KEY.TAB:
if CONSTS.CAR_DEBUG:
print "DebugMode is On"
CONSTS.CAR_DEBUG = not CONSTS.CAR_DEBUG
else:
return E.ReloadConstsEvent()
def MouseMovedEvent(self, relX, relY):
return E.CameraLookAroundEvent(relX/300.,relY/300.)
class GamepadMapping(VehicleMapping):
IMAGE = 'GamepadMapping.png'
def __init__(self, gamepadId, target):
VehicleMapping.__init__(self,target)
self.gamepadId = gamepadId
def GamepadStick1AbsoluteEvent(self, gamepadId, x, y):
if gamepadId == self.gamepadId:
return self.target.vehicle.behavior.PlayerTurnEvent(x/1000.0)
def GamepadTriggerAbsoluteEvent(self, gamepadId, z):
if gamepadId == self.gamepadId:
return self.target.vehicle.behavior.PlayerAccelerateEvent(z/-1000.0)
def GamepadButtonPressedEvent(self, gamepadId, button):
if gamepadId == self.gamepadId:
if button == cpp.BUTTON_START:
return E.PauseEvent()
elif button == cpp.BUTTON_A:
return self.target.vehicle.behavior.PlayerBoostEvent(True)
elif button == cpp.BUTTON_B:
return self.target.vehicle.behavior.PlayerBrakeEvent(True)
elif button == cpp.BUTTON_Y:
return self.target.CameraChangedEvent()
elif button == cpp.BUTTON_X:
return self.target.vehicle.behavior.RespawnCarEvent()
def GamepadButtonReleasedEvent(self, gamepadId, button):
if gamepadId == self.gamepadId:
if button == cpp.BUTTON_B:
return self.target.vehicle.behavior.PlayerBrakeEvent(False)
elif button == cpp.BUTTON_A:
return self.target.vehicle.behavior.PlayerBoostEvent(False)
class Gamepad1Mapping(GamepadMapping):
def __init__(self, target):
GamepadMapping.__init__(self, 0, target)
class Gamepad2Mapping(GamepadMapping):
def __init__(self, target):
GamepadMapping.__init__(self, 1, target)
class Gamepad3Mapping(GamepadMapping):
def __init__(self, target):
GamepadMapping.__init__(self, 2, target)
class Gamepad4Mapping(GamepadMapping):
def __init__(self, target):
GamepadMapping.__init__(self, 3, target)
class GamepadDebugMapping(VehicleMapping):
def __init__(self,target):
VehicleMapping.__init__(self,target)
def GamepadStick2AbsoluteEvent(self, gamepadId, relX, relY):
#needs calibration
return E.CameraLookAroundEvent(relX/300.,relY/300.)
def GamepadButtonPressedEvent(self, gamepadId, button):
if button == cpp.BUTTON_X:
if CONSTS.CAR_DEBUG:
print "DebugMode is Off"
CONSTS.CAR_DEBUG = not CONSTS.CAR_DEBUG
else:
print "DebugMode is On"
return E.ReloadConstsEvent()
class GameMapping(Mapping):
def __init__(self, mappings):
self.mappings = mappings
def __getattribute__(self, attr):
if not attr.endswith('Event'): return object.__getattribute__(self, attr)
def f(*args):
for mapping in self.mappings:
m = getattr(mapping, attr, None)
if m: m(*args)
return f
|
import psycopg2
import pandas
import numpy
import objects
import config
db = objects.Database(config.server, config.database, config.user, config.password)
# Read population data.
dfPopulation = pandas.read_excel(r"C:\Users\hoged\OneDrive\Skrivebord\Speciale\Data\Population (2019).xlsx", skiprows=2, nrows=106, usecols='D:E')
########################################################################################################################################################### Municipality population
# Get municipalities in database.
dfMunicipalities = db.Read("SELECT * FROM municipality")
# Insert each population number.
for i, row in dfMunicipalities.iterrows():
# Identify corresponding population data.
populationResult = "NULL"
populationResults = dfPopulation[dfPopulation['Enhed'].str.contains(row['name'].replace('s Kommune','').replace(' Kommune',''))]
if len(populationResults.index) == 1:
populationResult = populationResults.iloc[0]['2019K4']
sql = """UPDATE municipalitydemographics SET population = {0} WHERE municipalitycode = {1};""".format(populationResult, row['municipalitycode'])
#db.Insert(sql)
#db.Commit()
########################################################################################################################################################### Region population
# Get regions in database.
dfRegions = db.Read("SELECT * FROM region")
# Insert each population number.
for i, row in dfRegions.iterrows():
# Identify corresponding population data.
populationResult = "NULL"
populationResults = dfPopulation[dfPopulation['Enhed'].str.contains(row['name'].replace('Region ',''))]
if len(populationResults.index) == 1:
populationResult = populationResults.iloc[0]['2019K4']
sql = """INSERT INTO regiondemographics(yearofmeasurement, regioncode, population) VALUES({0}, {1}, {2});""".format('2019', row['regioncode'], populationResult)
db.Insert(sql)
db.Commit()
########################################################################################################################################################### Disconnect from database
db.Disconnect() |
import unittest
from src.core import formi
# [] TODO: add test to combine more than 1 operations
class FormiCoreTestCase(unittest.TestCase):
""" Test formi.py core functions """
def test_join_string_function(self):
result = formi.join_string('the\nquick')
expected = 'the, quick'
self.assertEqual(result, expected)
def test_join_string_function_custom_delimiter(self):
result = formi.join_string('the\nquick', ';')
expected = 'the; quick'
self.assertEqual(result, expected)
def test_expand_string_function(self):
result = formi.expand_string('the quick')
expected = 'the\nquick'
self.assertEqual(result, expected)
def test_count_inputTextEdit_content(self):
text = """the
quick
little
brown
fox
jumps
over
the
lazy
dog."""
result = formi.count_input(text)
expected = 10
self.assertEqual(result, expected)
def test_count_outputTextEdit_content(self):
text = 'w3, 12, 3, 13, 54, 3, 32, 3, 3, 3, 3, 3, 3, 3, 2'
result = len(text.split(','))
expected = 15
self.assertEqual(result, expected)
def test_remove_duplicate_function(self):
text = 'apple\napple\nbanana\nkiwi\norange\norange'
result = formi.remove_duplicate(text)
expected = ['apple', 'banana', 'kiwi', 'orange']
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtMultimedia import *
from random import randint
from doudizhu import poker_distribute, pattern_spot, cards_validate, strategy, compare, ai_jiaofen, rearrange, print_cards
import json, socket, threading, struct, sys, os, doudizhu, logging, sqlite3
logger_name = 'doudizhu_log'
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('log')
fh.setLevel(logging.WARNING)
fh.setFormatter(formatter)
logger.addHandler(fh)
# logging.basicConfig(filename='log', level=logging.DEBUG)
class DouDiZhu(QMainWindow):
def __init__(self, n=1, sockets=(0, 0, 0), host='jarrod'):
super().__init__()
self.cards = [[], [], [], []]
self.out_cards = [[], [], []]
self.scores = [0, 0, 0]
self.dizhu = 0
self.player_now = self.dizhu
self.tips_cards = []
self.finished = False
self.pass_me = [1, 1, 1]
self.can_pass = False
self.person = [1, 0, 0]
self.names = [host, '', '']
self.time_count = 5000
self.fake_ai_think_time = 200000000
self.winner = '地主'
self.user_acted = 0 # 0 等待叫分, 1 已经叫分, 2 等待出牌, 3 已出牌
self.replay = False
self.gender = ['Man', 'Woman', 'Man']
self.InitUI()
def InitUI(self):
screen = QDesktopWidget().screenGeometry()
self.statusBar().showMessage('Ready to play!')
menubar = self.menuBar()
exitAction = QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
recordsAction = QAction('&Records', self)
recordsAction.setShortcut('Ctrl+R')
recordsAction.setStatusTip('Show your records')
recordsAction.triggered.connect(self.show_records)
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(recordsAction)
fileMenu.addAction(exitAction)
playAction = QAction('Play', self)
playAction.triggered.connect(self.initiate_game)
menubar.addAction(playAction)
tuoguanAction = QAction('Tuoguan', self)
tuoguanAction.triggered.connect(self.tuoguan)
menubar.addAction(tuoguanAction)
hostAction = QAction('Create Room', self)
hostAction.setStatusTip('Create a room and tell other players the room number!')
hostAction.triggered.connect(self.creat_room)
joinAction = QAction('Join Room', self)
joinAction.setStatusTip('Got a room number and join a game!')
joinAction.triggered.connect(self.join_room)
multiplayerMenu = menubar.addMenu('&Multiplayer')
multiplayerMenu.addAction(hostAction)
multiplayerMenu.addAction(joinAction)
# 用来创建窗口内的菜单栏
menubar.setNativeMenuBar(False)
self.timer = QTimer()
self.timer.timeout.connect(self.add_time)
self.widget = QWidget()
self.avatars = []
self.lbl_names = []
for i in range(3):
self.lbl_names.append(QLabel(self.names[i]))
lbl_tmp = QLabel()
lbl_tmp.setPixmap(self.scaled_pixmap(os.path.join('pics', 'doudizhu.png')))
self.avatars.append(lbl_tmp)
self.lbl_top = QLabel('This is for messages!')
self.lcd_time = QLCDNumber()
self.lcd_time.setDigitCount(7)
self.lcd_time.setSegmentStyle(QLCDNumber.Flat)
self.cards_area_one = PukeOne(self.cards[0], False, 1, self)
self.cards_area_two = PukeTwo(self.cards[1], False, 2, self)
self.cards_area_three = PukeTwo(self.cards[2], False, 3, self)
self.out_cards_area_one = PukeOne([], True, 1, self)
self.out_cards_area_two = PukeThree([], True, 2, self)
self.out_cards_area_three = PukeThree([], True, 3, self)
self.card_area_dipai = PukeOne(self.cards[3], True, 4, self)
self.cards_areas = [self.cards_area_one, self.cards_area_two, self.cards_area_three, self.card_area_dipai]
self.out_cards_areas = [self.out_cards_area_one, self.out_cards_area_two, self.out_cards_area_three]
send_button = QPushButton('出牌')
send_button.clicked.connect(self.send_cards)
tips_button = QPushButton('提示')
tips_button.clicked.connect(self.tips)
skip_button = QPushButton('跳过')
skip_button.clicked.connect(self.skip)
grid = QGridLayout()
# 顶部放置玩家2、3的名称头像,提示条,横向、纵向均划为15份
grid.addWidget(self.lbl_names[2], 0, 0, 1, 1)
grid.addWidget(self.avatars[2], 1, 0, 2, 1)
grid.addWidget(self.lbl_top, 0, 1, 1, 4)
grid.addWidget(self.lcd_time, 0, 11, 1, 1)
grid.addWidget(self.lbl_names[1], 0, 12, 1, 1)
grid.addWidget(self.avatars[1], 1, 12, 2, 1)
# 中间从左往右依次是玩家2的牌展示区,出牌区,底牌区,玩家3出牌区,玩家3牌展示区,中间下方为玩家1出牌区
grid.addWidget(self.cards_area_three, 3, 0, 9, 1)
grid.addWidget(self.out_cards_area_three, 3, 1, 6, 5)
grid.addWidget(self.card_area_dipai, 0, 5, 2, 3)
grid.addWidget(self.out_cards_area_two, 3, 7, 6, 5)
grid.addWidget(self.out_cards_area_one, 9, 1, 3, 11)
grid.addWidget(self.cards_area_two, 3, 12, 9, 1)
# 下方从左往右依次为玩家1的头像、名称、牌展示区,三个按钮:出牌、提示、跳过
grid.addWidget(self.avatars[0], 13, 0, 2, 1)
grid.addWidget(self.lbl_names[0], 15, 0, 1, 1)
grid.addWidget(self.cards_area_one, 12, 1, 4, 11)
grid.addWidget(send_button, 13, 12, 1, 1)
grid.addWidget(tips_button, 14, 12, 1, 1)
grid.addWidget(skip_button, 15, 12, 1, 1)
self.widget.setLayout(grid)
self.setCentralWidget(self.widget)
self.setFixedSize(1000, 800)
self.setWindowFlags(Qt.WindowMinimizeButtonHint)
self.setWindowTitle('Doudizhu --' + self.names[0])
self.setWindowIcon(QIcon(os.path.join('pics', 'doudizhu.png')))
self.show()
self.play_music()
def get_default_out_card_height(self):
return self.out_cards_area_one.height()
# 将头像缩小至对应区域的大小,同时保持比例
def scaled_pixmap(self, pic):
avatar = QPixmap(pic)
height = 0
width = 0
max_width = 1 * self.width()/13
max_height = 1 * self.height()/13
avatar_width = avatar.width()
avatar_heigth = avatar.height()
if max_width/max_height > avatar_width/avatar_heigth:
height = int(max_height)
width = int(avatar_width * avatar_heigth/max_height)
else:
width = int(max_width)
height = int(avatar_heigth * avatar_width/max_width)
return avatar.scaled(width, height, aspectRatioMode=Qt.KeepAspectRatio)
def set_avatar(self):
for i in range(3):
if i == 1:
direction = 'left'
else:
direction = 'right'
dizhu_avatar = self.scaled_pixmap(os.path.join('pics', os.path.join('pukeimage', 'dizhu-' + direction + '.jpg')))
nongmin_avatar = self.scaled_pixmap(os.path.join('pics', os.path.join('pukeimage', 'nongmin-' + direction + '.jpg')))
if i == self.dizhu:
self.avatars[i].setPixmap(dizhu_avatar)
else:
self.avatars[i].setPixmap(nongmin_avatar)
def initiate_game(self):
if self.person[0] == 0:
self.tuoguan()
index = randint(1,2)
self.change_music(index)
ai_names = ['Harry', 'Ron', 'Hermione', 'Albus', 'Severus', 'Minerva', 'Hagrid', 'Lupin', 'Moody', 'Horace',
'Filius', 'Dom', 'Brian', 'Mia', 'Letty']
self.scores = [0, 0, 0]
self.finished = False
self.pass_me = [1, 1, 1]
self.can_pass = False
self.time_count = 10000
self.fake_ai_think_time = 100000000 #将这个时间设置一个很大的超过倒计时的值
self.user_acted = 0 # 0 等待叫分, 1 已经叫分, 2 等待出牌, 3 已出牌
self.cards_areas[3].can_display_dipai = False
for i in range(3):
if i > 0:
self.cards_areas[i].diplay_num = False
self.update_cards_area(self.out_cards_areas[i], [])
self.avatars[i].setPixmap(self.scaled_pixmap(os.path.join('pics', 'doudizhu.png')))
if not self.replay:
for i in range(1,3):
t = randint(0, len(ai_names)-1)
self.names[i] = ai_names[t]
self.lbl_names[i].setText(ai_names[t])
ai_names.remove(ai_names[t])
self.cards = poker_distribute()
logger.info("all player cards:" + str(self.cards))
for i in range(4):
self.update_cards_area(self.cards_areas[i], self.cards[i])
self.lbl_top.setText('开始叫分(1-3分)')
self.jiaofen()
def jiaofen(self):
self.timer.start(1)
self.jiaofen_window = JiaoFenWindow(parent=self)
self.jiaofen_window.exec_()
self.scores[0] = self.jiaofen_window.get_score()
logger.info("1号玩家叫分: " + str(self.scores[0]))
self.user_acted = 1
self.reset_timer()
self.jiaofen_window.destroy()
self.after_jiaofen()
def add_time(self):
self.lcd_time.display(int(self.time_count/1000))
self.time_count -= 1
self.fake_ai_think_time -= 1
# 语音提示
if self.time_count in [6000, 5000, 4000, 3000, 2000]:
self.play_sound('Special_Remind.mp3')
if self.time_count < 0:
self.timer.stop()
self.reset_timer()
if self.user_acted == 2:
self.lbl_top.setText('时间到,将自动出牌!')
self.ai_already_acted()
self.user_already_acted()
if self.fake_ai_think_time < 0:
self.ai_already_acted()
self.reset_timer()
self.play_cycle()
def after_jiaofen(self):
for i in range(1, 3):
self.scores[i] = ai_jiaofen(self.cards[i])
logger.info('all jiaofen: ' + str(self.scores))
self.dizhu = self.scores.index(max(self.scores))
self.set_avatar()
self.player_now = self.dizhu
logger.info('dizhu: ' + str(self.player_now+1))
self.lbl_top.setText('地主是' + str(self.player_now+1) + '号玩家:' + self.names[self.player_now])
self.cards[self.player_now] += self.cards[3]
self.cards[self.player_now].sort()
self.update_cards_area(self.cards_areas[self.player_now], self.cards[self.player_now])
self.cards_areas[3].can_display_dipai = True
self.cards_areas[3].update()
self.play_cycle()
def play_cycle(self):
while not self.finished:
if self.cards[self.player_now] == []:
self.lbl_top.setText('游戏结束,赢家是' + self.names[self.player_now])
# 如果上家和上上家都没有出牌,则将对比的last_result初始化
if self.pass_me[(self.player_now - 1) % 3] and self.pass_me[(self.player_now - 2) % 3]:
self.last_result = {'validate': True, 'nums': [0], 'result': 'null'}
self.can_pass = False
self.pass_me[self.player_now] = 0
logger.info(str(self.player_now+1) + '不能跳过')
else:
self.can_pass = True
if self.person[self.player_now]:
self.fake_ai_think_time = 10000000
self.user_acted = 2
else:
out_nums = strategy(self.cards[self.player_now], self.last_result)
if out_nums:
self.fake_ai_think_time = randint(1000, 2000)
else:
self.fake_ai_think_time = 1000
self.reset_timer()
self.timer.start(1) # 之后就交给add_time函数去判断应该是等待用户出牌还是ai出牌
self.update_cards_area(self.out_cards_areas[self.player_now], [])
break
def reset_timer(self):
# 这段代码不知道为什么会引起重玩的时候第一把不能开始计时
# if self.timer.isActive():
# self.timer.stop()
self.lcd_time.display(0)
self.time_count = 20000
logger.info(str(self.player_now+1)+',时间重设为20秒')
def ai_already_acted(self, real_ai=True):
self.pass_me[self.player_now] = 0
out_nums = strategy(self.cards[self.player_now], self.last_result)
logger.info('上家出牌是:' + str(self.last_result) + '\n' + str(self.player_now+1) + '号玩家的牌是:' + str(
self.cards[self.player_now]) + '\n电脑算出的' + str(self.player_now+1) + '号玩家出牌号码是: ' + str(out_nums))
if not out_nums:
self.pass_me[self.player_now] = 1
logger.info(str(self.player_now+1) + '号玩家没有牌可以大的了,过!')
if real_ai:
# 如果上家是地主,对家出牌的时候不压
if (self.player_now - 1) % 3 == self.dizhu and self.pass_me[self.dizhu] and not self.pass_me[(self.player_now - 2) % 3] and not self.person[self.player_now]:
out_nums = []
self.pass_me[self.player_now] = 1
logger.info(str(self.player_now+1) + '号玩家,电脑算出上家是地主,对家出牌的时候不压')
# 如果上家是对家,且出了大牌,那么不压
if self.player_now != self.dizhu and (self.player_now - 1) % 3 != self.dizhu and self.last_result['nums'][0] in [13, 14] and not self.pass_me[
(self.player_now - 1) % 3] and not self.person[self.player_now]:
out_nums = []
self.pass_me[self.player_now] = 1
logger.info(str(self.player_now+1) + '号玩家,电脑算出上家是对家,且出了大牌,那么不压')
out_cards = rearrange(self.cards[self.player_now], out_nums)
logger.info('电脑算出的' + str(self.player_now+1) + '号玩家出牌是: ' + str(out_cards))
if self.pass_me[self.player_now] == 0:
logger.info(str(self.player_now+1) + '号玩家出牌: ' + str(out_cards))
else:
logger.info(str(self.player_now+1) + '号玩家过!')
if real_ai:
self.play_out_cards(out_cards)
return out_cards
def finished_or_not(self):
if len(self.cards[self.player_now]) == 0:
if self.player_now == self.dizhu:
self.winner = '地主'
self.lbl_top.setText('游戏结束,地主胜!')
if self.dizhu == 0:
jifen = self.scores[0] * 2
else:
jifen = -self.scores[0]
else:
if self.dizhu == 0:
jifen = -self.scores[0] * 2
else:
jifen = self.scores[0]
self.winner = '农民'
self.lbl_top.setText('游戏结束,农民胜!')
if jifen>0:
sound_file = 'MusicEx_Win.mp3'
else:
sound_file = 'MusicEx_Lose.mp3'
self.mediaplayer.stop()
self.play_sound(sound_file)
# 将玩家2和3的牌展示出来
for i in range(1, 3):
self.cards_areas[i].diplay_num = True
self.cards_areas[i].update()
if 'data.db' in os.listdir():
write_db(self.names[0], jifen)
replay_window = ReplayWindow(self.winner, self)
replay_window.exec_()
self.replay = replay_window.get_replay()
replay_window.destroy()
if self.replay:
self.initiate_game()
else:
self.close()
return True
else:
return False
# 把牌打出去的动作
def play_out_cards(self, outcards):
self.reset_timer()
if outcards and 161 not in outcards:
for card in outcards:
self.cards[self.player_now].remove(card)
if self.player_now == 0:
self.cards_areas[self.player_now].chosen_cards = []
self.last_result = cards_validate(outcards)
self.update_cards_area(self.cards_areas[self.player_now], self.cards[self.player_now])
self.update_cards_area(self.out_cards_areas[self.player_now], outcards)
else:
self.lbl_top.setText(str(self.player_now + 1) + '号玩家过!')
self.update_cards_area(self.out_cards_areas[self.player_now], [161])
self.finished = self.finished_or_not()
if self.finished:
return
self.player_now = (self.player_now + 1) % 3
def show_records(self):
records_window = RecordsWindow(self.names[0], self)
records_window.exec_()
def creat_room(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s2用于获取IP地址
s2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s2.connect(('114.114.114.114', 80))
addr, port = s2.getsockname()
s2.close()
num_ip = socket.ntohl(struct.unpack("I", socket.inet_aton(addr))[0])
text = ('开房成功,您的房间号是:' + str(num_ip) + ', 快把房间号告诉玩伴吧')
info_window = InfosWindow(text, parent=self)
info_window.exec_()
s.bind((addr, 9125))
s.listen(2)
self.lbl_top.setText('Waiting for connection...')
def join_room(self):
pass
# 更新相应区域的牌
def update_cards_area(self, cards_area, cards):
cards_area.cards = cards
cards_area.update()
if cards_area in self.out_cards_areas and cards:
file = self.decide_sounds(cards)
if 'zha' in file:
self.play_sound_2('Special_Bomb.mp3')
self.change_music(3)
elif 'feiji' in file:
self.play_sound_2('Special_plane.mp3')
self.play_sound(file)
elif cards_area in self.cards_areas and len(cards)<3 and cards:
file = self.gender[self.player_now] + '_baojing' + str(len(cards)) + '.mp3'
self.play_sound_2(file)
def decide_sounds(self, cards):
gender = self.gender[self.player_now]
if cards[0] == 161:
i = randint(1, 3)
file = gender + '_buyao' + str(i) + '.mp3'
else:
validate_result = cards_validate(cards)
result = validate_result['result']
num = validate_result['nums'][0]
if not validate_result['validate']:
file = 'Special_Escape.mp3'
else:
if num >= 14:
pass
elif num >= 12:
num -= 11
else:
num += 2
related_files = {'ones': gender + '_' + str(num) + '.mp3',
'twos': gender + '_dui' + str(num) + '.mp3',
'two_jokers': gender + '_wangzha.mp3',
'threes': gender + '_tuple' + str(num) + '.mp3',
'fours': gender + '_zhadan.mp3',
'three_ones': gender + '_sandaiyi.mp3',
'three_twos': gender + '_sandaiyidui.mp3',
'straights': gender + '_shunzi.mp3',
'straights_double': gender + '_liandui.mp3',
'straights_triple': gender + '_feiji.mp3',
'four_two_ones': gender + '_sidaier.mp3',
'four_two_twos': gender + '_sidailiangdui.mp3',
'st_with_ones': gender + '_feiji.mp3',
'st_with_twos': gender + '_feiji.mp3',
'st3_with_ones': gender + '_feiji.mp3',
'st3_with_twos': gender + '_feiji.mp3',
}
file = related_files[result]
# 如果这时候能跳过而不跳过,那么就是比别人大了
if self.can_pass and result not in ['ones', 'twos', 'threes'] and 'zha' not in file:
i = randint(1, 3)
file = gender + '_dani' + str(i) + '.mp3'
return file
# 发送按钮
def send_cards(self):
if not self.cards[0]:
return
out_cards = self.cards_areas[0].chosen_cards
out_cards.sort()
logger.info('上家出牌是:' + str(self.last_result) + "1号玩家选择的牌是:" + str(out_cards))
out_result = cards_validate(out_cards)
logger.info('系统校验牌是否符合规则,结果是:' + str(out_result))
bigger = compare(self.last_result, out_result)
logger.info('系统校验是否比上家大:' + str(bigger))
if bigger and out_result['validate']:
self.pass_me[self.player_now] = 0
self.play_out_cards(out_cards)
if not self.finished:
self.user_already_acted()
else:
self.lbl_top.setText('出牌不合法,请检查!')
# 跳过按钮
def skip(self):
if not self.cards[0] or self.finished:
return
if not self.can_pass:
self.lbl_top.setText('您不能跳过出牌!')
else:
self.pass_me[self.player_now] = 1
logger.info("1号玩家过")
self.play_out_cards([])
self.user_already_acted()
self.cards_areas[0].chosen_cards = []
self.cards_areas[0].update()
def tips(self):
outcards = self.ai_already_acted(real_ai=False)
self.cards_areas[0].chosen_cards = outcards
self.cards_areas[0].update()
if not outcards:
self.lbl_top.setText('没有牌能大过对方,自动跳过!')
self.skip()
def user_already_acted(self):
self.reset_timer()
self.user_acted = 3
self.play_cycle()
def tuoguan(self):
if self.person[0] == 1:
self.lbl_names[0].setText(self.names[0] + '(托管中……)')
self.person[0] = 0
if self.player_now == 0 and self.user_acted == 2:
self.ai_already_acted()
self.reset_timer()
self.play_cycle()
else:
self.lbl_names[0].setText(self.names[0])
self.person[0] = 1
def play_music(self):
background_musics = ['Welcome', 'Normal', 'Normal2', 'Exciting']
self.mediaplayer = QMediaPlayer()
self.playlist = QMediaPlaylist()
for music in background_musics:
url = os.path.join('sound', 'MusicEx_'+music+'.mp3')
self.playlist.addMedia(QMediaContent(QUrl.fromLocalFile(os.path.abspath(url))))
self.playlist.setCurrentIndex(0)
self.playlist.setPlaybackMode(QMediaPlaylist.CurrentItemInLoop)
self.mediaplayer.setPlaylist(self.playlist)
self.mediaplayer.play()
def change_music(self, index):
self.mediaplayer.pause()
self.playlist.setCurrentIndex(index)
self.mediaplayer.play()
def play_sound(self, file):
url = os.path.join('sound', file)
self.tmp_player = QMediaPlayer()
self.tmp_player.setMedia(QMediaContent(QUrl.fromLocalFile(os.path.abspath(url))))
self.tmp_player.play()
def play_sound_2(self, file):
url = os.path.join('sound', file)
self.tmp_player_2 = QMediaPlayer()
self.tmp_player_2.setMedia(QMediaContent(QUrl.fromLocalFile(os.path.abspath(url))))
self.tmp_player_2.play()
# 底部横向的扑克排列,display_only代表不能点击,cards为牌的数字
class PukeOne(QFrame):
def __init__(self, cards, display_only, player, parent):
super().__init__(parent)
self.cards = cards
self.display_only = display_only
self.player = player
self.parent = parent
self.chosen_cards = []
self.card_infos = []
self.can_display_dipai = False
self.InitUI()
def InitUI(self):
self.show()
def paintEvent(self, event):
painter = QPainter(self)
# painter.begin(self)
self.draw_cards(event, painter)
# painter.end()
def draw_cards(self, event, painter):
self.card_infos = [] #每次画图前要把上次的info清空一遍
for i in range(len(self.cards)):
if self.player == 4 and not self.can_display_dipai:
card_file = os.path.join('pics', os.path.join('pukeimage', 'back.jpg'))
else:
card_file = find_card_image(self.cards[i])
pix_card = QPixmap(card_file)
if self.display_only: # 如果只是展示,牌的高度为模块的高度
# 底牌展示区高度放小一点,player=4代表底牌
if self.player == 4:
card_height = int(self.height()*2/3)
card_width = int(pix_card.width() * card_height / pix_card.height())
else:
card_height = int(self.height())
card_width = int(pix_card.width() * card_height / pix_card.height())
cur_y = 0
else: # 因为需要点击,牌的高度为模块高度的2/3
card_height = int(self.height() * 2 / 3)
card_width = int(pix_card.width() * card_height / pix_card.height())
cur_y = int(self.height() / 3)
pix_card = pix_card.scaledToHeight(card_height)
# 两张牌之间重叠1/3,因此中间点牌占的总宽度就是2/3*w*n+1/3*w
# 判断重叠率是否能展示全,如果不能则增大重叠率,最大5/6
stack_ratio = 1/3
all_cards_width = ((1-stack_ratio) * len(self.cards) + stack_ratio) *card_width
while all_cards_width > self.width():
stack_ratio += 0.01
all_cards_width = ((1 - stack_ratio) * len(self.cards) + stack_ratio) * card_width
if stack_ratio > 5/6:
# 此处写如果重叠率到了3/4还不行的情况,应该折行,懒得写了
break
cur_x = int((self.width() - all_cards_width) / 2 + i * card_width * (1-stack_ratio))
tmp_card_info = {'index': i, 'pix': pix_card, 'x': cur_x, 'y': cur_y}
self.card_infos.append(tmp_card_info)
if self.cards[i] in self.chosen_cards:
painter.drawPixmap(self.card_infos[i]['x'], 0, self.card_infos[i]['pix'])
else:
painter.drawPixmap(cur_x, cur_y, pix_card)
def update_cards(self, cards):
self.cards = cards
self.update()
def mousePressEvent(self, event):
if self.display_only:
return
cur_x = event.x()
cur_y = event.y()
tmp = -1
if len(self.cards) == 0 or cur_x < self.card_infos[0]['x'] or cur_x > (self.card_infos[len(self.card_infos)-1]['x']+self.card_infos[len(self.card_infos)-1]['pix'].width()):
return
for i in range(len(self.card_infos)-1):
if cur_x >= self.card_infos[i]['x'] and cur_x < self.card_infos[i+1]['x']:
tmp = i
break
if tmp == -1:
tmp = len(self.card_infos)-1
if self.cards[tmp] not in self.chosen_cards:
if cur_y < self.height()/3:
return
self.chosen_cards.append(self.cards[tmp])
else:
if cur_y > self.height()*2/3:
return
self.chosen_cards.remove(self.cards[tmp])
self.update()
def cards_sent(self):
for card in self.chosen_cards:
self.cards.remove(card)
self.chosen_cards = []
self.update()
class PukeTwo(QFrame):
def __init__(self, cards, display_num, player, parent):
super().__init__(parent)
self.cards = cards
self.diplay_num = display_num
self.player = player
self.parent = parent
self.InitUI()
def InitUI(self):
self.show()
def paintEvent(self, event):
painter = QPainter(self)
# painter.begin(self)
self.draw_cards(event, painter)
# painter.end()
def draw_cards(self, event, painter):
self.card_infos = [] # 每次画图前要把上次的info清空一遍
for i in range(len(self.cards)):
if self.diplay_num:
card_file = find_card_image(self.cards[i])
else:
card_file = os.path.join('pics', os.path.join('pukeimage', 'back.jpg'))
tranform = QTransform()
if self.player == 3:
tranform.rotate(90)
else:
tranform.rotate(270)
pix_card = QPixmap(card_file)
pix_card = pix_card.transformed(tranform)
# 玩家2和玩家3的出牌区域也横向展示,已废弃
if self.diplay_num:
card_width = int(self.width() / 3)
card_height = int(pix_card.height() * card_width / pix_card.width())
# 如果是玩家3,应该靠左展示
if self.player == 3:
cur_x = 0
elif self.player == 2:
cur_x = int(self.width() * 2 / 3)
else:
# 将存牌区和出牌区隔出1/6的空间
card_width = int(self.width()*5/6)
card_height = int(pix_card.height() * card_width / pix_card.width())
if self.player == 3:
cur_x = 0
elif self.player == 2:
cur_x = int(self.width()/6)
# 两张牌之间重叠1/3,因此中间点牌占的总宽度就是2/3*h*n+1/3*h
stack_ratio = 1 / 3
all_cards_height = ((1 - stack_ratio) * len(self.cards) + stack_ratio) * card_height
while all_cards_height > self.height():
stack_ratio += 0.01
all_cards_height = ((1 - stack_ratio) * len(self.cards) + stack_ratio) * card_height
if stack_ratio > 5 / 6:
# 此处写如果重叠率到了3/4还不行的情况,应该折行,懒得写了
break
cur_y = int((self.height() - all_cards_height) / 2 + i * card_height*(1-stack_ratio))
pix_card = pix_card.scaledToHeight(card_height)
tmp_card_info = {'index': i, 'pix': pix_card, 'x': cur_x, 'y': cur_y}
self.card_infos.append(tmp_card_info)
painter.drawPixmap(cur_x, cur_y, pix_card)
class PukeThree(QFrame):
def __init__(self, cards, display_num, player, parent):
super().__init__(parent)
self.cards = cards
self.diplay_num = display_num
self.player = player
self.parent = parent
self.InitUI()
def InitUI(self):
self.show()
def paintEvent(self, event):
painter = QPainter(self)
# painter.begin(self)
self.draw_cards(event, painter)
# painter.end()
def draw_cards(self, event, painter):
self.card_infos = [] # 每次画图前要把上次的info清空一遍
for i in range(len(self.cards)):
card_file = find_card_image(self.cards[i])
pix_card = QPixmap(card_file)
# 横向的两张牌之间重叠3/4,纵向的牌重叠stack_ratio
card_height = self.parent.get_default_out_card_height() #让2号和3号玩家出牌的高度和1号玩家一致
card_width = int(pix_card.width() * card_height / pix_card.height())
#计算一下一行可以放多少张牌,((n-1)/4+1)*width = self.width, n=4*self.width/width-3
if card_width == 0:
cards_per_row == 1
else:
cards_per_row = int(4 * self.width() / card_width) - 3
# 其实这时候已经放不下一张牌了,就强制放一张好了
if cards_per_row == 0:
cards_per_row = 1
cur_x = (i % cards_per_row) * card_width / 4
# 如果是2号玩家,应该靠右,画图起点在所有牌的
if self.player == 2:
if len(self.cards) < cards_per_row:
cur_x = cur_x + int(self.width() - card_width*(len(self.cards)+3)/4)
# 两张牌之间重叠1/3,因此中间点牌占的总宽度就是2/3*h*n+1/3*h
stack_ratio = 1 / 3
all_cards_height = ((1 - stack_ratio) * len(self.cards) / cards_per_row + stack_ratio) * card_height
while all_cards_height > self.height():
stack_ratio += 0.01
all_cards_height = ((1 - stack_ratio) * len(self.cards) + stack_ratio) * card_height
if stack_ratio > 5 / 6:
# 此处写如果重叠率到了3/4还不行的情况,应该折行,懒得写了
break
cur_y = (1-stack_ratio)*card_height*int(i/cards_per_row)
pix_card = pix_card.scaledToHeight(card_height)
tmp_card_info = {'index': i, 'pix': pix_card, 'x': cur_x, 'y': cur_y}
self.card_infos.append(tmp_card_info)
painter.drawPixmap(cur_x, cur_y, pix_card)
class JiaoFenWindow(QDialog):
def __init__(self, parent=None):
super().__init__()
self.score = 0
self.InitUI()
def InitUI(self):
hbox = QHBoxLayout()
for i in range(3):
button = QPushButton(str(i+1) + '分')
button.clicked.connect(self.button_clicked)
hbox.addWidget(button)
self.setLayout(hbox)
self.setWindowTitle('请叫分:')
self.show()
self.timer = QTimer()
self.timer.singleShot(5000, self.close)
def button_clicked(self):
sender = self.sender()
text = sender.text()
self.score = int(text[:1])
self.close()
def get_score(self):
return self.score
class ReplayWindow(QDialog):
def __init__(self, winner, parent=None):
super().__init__()
self.winner = winner
self.replay = 0
self.InitUI()
def InitUI(self):
hbox = QHBoxLayout()
cancel_button = QPushButton('Cancel')
cancel_button.clicked.connect(self.button_clicked)
replay_button = QPushButton('Replay')
replay_button.clicked.connect(self.button_clicked)
hbox.addWidget(cancel_button)
hbox.addWidget(replay_button)
vbox = QVBoxLayout()
lbl_winner = QLabel(self.winner + '胜!')
vbox.addWidget(lbl_winner)
vbox.addLayout(hbox)
self.setLayout(vbox)
self.setWindowTitle('再玩一局?')
self.show()
def button_clicked(self):
sender = self.sender()
text = sender.text()
if text == 'Replay':
self.replay = True
else:
self.replay = False
self.close()
def get_replay(self):
return self.replay
class RecordsWindow(QDialog):
def __init__(self, name, parent=None):
super().__init__()
self.name = name
self.InitUI()
def InitUI(self):
if 'data.db' in os.listdir():
records = read_db(self.name)
else:
records = {}
if not records:
text = 'No records of ' + self.name + ' found!'
else:
text = self.name + '\的记录:\n 游戏总次数: ' + str(records['total']) + '次\n' + '胜局次数:' +\
str(records['win']) + '次\n' + '获胜率:' + str(100*records['win']/records['total']) + '%\n' + \
'总积分:' + str(records['jifen']) + '分\n\n\n' + '积分规则:胜了则赢取叫分的分值,输了则减掉叫分的分值\n地主加倍'
lbl_records = QLabel(text)
ok_button = QPushButton('OK')
ok_button.clicked.connect(self.close)
vbox = QVBoxLayout()
vbox.addWidget(lbl_records)
vbox.addWidget(ok_button)
self.setLayout(vbox)
self.setWindowTitle('Records')
self.show()
class InfosWindow(QDialog):
def __init__(self, text, parent=None):
super().__init__()
self.text = text
self.InitUI()
def InitUI(self):
text = self.text
lbl_records = QLabel(text)
ok_button = QPushButton('OK')
ok_button.clicked.connect(self.close)
vbox = QVBoxLayout()
vbox.addWidget(lbl_records)
vbox.addWidget(ok_button)
self.setLayout(vbox)
self.setWindowTitle('Records')
self.show()
def find_card_image(num):
n =int(num / 10)
color = num % 10
puke_path = os.path.join('pics', 'pukeimage')
if n == 16:
pass_pics_dir = os.path.join(puke_path, 'pass')
pass_pics = os.listdir(pass_pics_dir)
for pic in pass_pics:
suffix = os.path.splitext(pic)
if suffix not in ['jpg', 'png', 'jpeg']:
pass_pics.remove(pic)
pic_path = os.path.join(pass_pics_dir, pass_pics[randint(0, len(pass_pics)-1)])
return pic_path
elif n >= 14:
return os.path.join(puke_path, str(num) + '.jpg')
elif n >= 12:
return os.path.join(puke_path, os.path.join(str(color), str(n-11)+'.jpg'))
else:
return os.path.join(puke_path, os.path.join(str(color), str(n+2) + '.jpg'))
def read_db(name):
conn = sqlite3.connect('data.db')
cursor = conn.cursor()
cursor.execute('select * from doudizhu where name=?', (name, ))
result = cursor.fetchall()
cursor.close()
conn.close()
if not result:
return {}
records = {}
records['total'] = result[0][2]
records['win'] = result[0][3]
records['jifen'] = result[0][4]
return records
def write_db(name, jifen):
conn = sqlite3.connect('data.db')
cursor = conn.cursor()
records_now = read_db(name)
if jifen:
win = 1
else:
win = 0
if not records_now:
cursor.execute('insert into doudizhu (name, total, win, jifen) VALUES (?, ?, ?, ?)', (name, 1, win, jifen))
else:
total = records_now['total'] + 1
win += records_now['win']
jifen += records_now['jifen']
cursor.execute('update doudizhu set total=?, win=?, jifen=? where name=?', (total, win, jifen, name, ))
conn.commit()
cursor.close()
conn.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
# replay = ReplayWindow('地主')
doudizhu = DouDiZhu(1)
app.exec_() |
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
import django.contrib.staticfiles
urlpatterns = [
url(r'^amadon$', views.index),
url(r'^amadon/checkout$', views.checkout),
url(r'^amadon/buy$', views.buy),
] |
"""
leetcode 108
"""
def convert_sorted_array_to_bst(nums):
if not len(nums):
return None
half = len(nums) // 2
root = TreeNode(nums[half])
root.left = convert_sorted_array_to_bst(nums[:half])
root.right = convert_sorted_array_to_bst(nums[half+1:])
return root
|
import numpy as np
from core.soft import bezier
from numpy import sin, cos
from core.anim import Animation
from core.obj import Vector, Line, Curve
sHandSpeed = 12
mHandSpeed = sHandSpeed/60
hHandSpeed = mHandSpeed/12
b = bezier([0, 0, 0, 1, 1, 1, 1, 1])
def update_s(s, t, tmax):
t *= -sHandSpeed * 2*np.pi
t += np.pi/2
s.p2 = (cos(t)/2, sin(t)/2)
def update_m(v, t, tmax):
t *= -mHandSpeed * 2*np.pi
t += np.pi/2
v.head = (cos(t)/3, sin(t)/3)
def update_h(v, t, tmax):
t *= -hHandSpeed * 2*np.pi
t += np.pi/2
v.head = (cos(t)/5, sin(t)/5)
def update_c(c, t, tmax):
c.tmax = t/tmax
def init(anim):
h = anim.create(
Vector,
0, 1/5,
color='.50',
update=update_h
)
anim.create(
Vector,
0, 1/3,
color='w',
update=update_m
)
anim.create(
Line,
0, 0, 0, 1/2,
color='r',
lw=1,
update=update_s
)
anim.create(
Curve,
lambda t: (2*t-1, 2*b(t)-1),
color='g',
update=update_c
)
if __name__ == '__main__':
global an
an = Animation(
dt=0.0001,
length=1/mHandSpeed,
speed=10,
init_func=init,
repeat=False,
softener=b
).play()
|
import tensorflow as tf
from BNN import readData
import numpy as np
tf.reset_default_graph()
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 down samples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def max_pool_1x4(x):
"""max_pool_1x24downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 1, 4, 1],
strides=[1, 1, 4, 1], padding='SAME')
# Create some variables.
W_conv1 = tf.get_variable("W1", shape=[4, 4, 1, 32])
b_conv1 = tf.get_variable("b1", shape=[32])
W_conv2 = tf.get_variable("W2", shape=[2, 5, 32, 32])
b_conv2 = tf.get_variable("b2", shape=[32])
W_fc1 = tf.get_variable("W3", shape=[2 * 50 * 32, 50])
b_fc1 = tf.get_variable("b3", shape=[50])
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Later, launch the model, use the saver to restore variables from disk, and
# do some work with the model.
with tf.Session() as sess:
# Restore variables from disk.
saver.restore(sess, "../model/regularized_hashnet.ckpt")
print("Model restored.")
# Check the values of the variables
data = readData.read_data_sets("../files/pair_dataset_100000.csv", 0.1, 0.1)
features, labels = data.test.next_batch(100)
r1 = tf.placeholder(tf.float32, [None, 3200])
y_ = tf.placeholder(tf.float32, [None, 1])
labels = np.reshape(labels, [100, 1])
features = np.reshape(features, [100, 3200])
reshaped_data = tf.reshape(r1, [-1, 8, 400, 1])
h_conv1 = tf.nn.relu(conv2d(reshaped_data, W_conv1) + b_conv1)
h_pool1 = max_pool_1x4(h_conv1)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [-1, 4 * 50 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2
final_pred = tf.cast(tf.sign(y_conv), tf.float32)
# test
correct_prediction = tf.equal(final_pred, labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={r1: features, y_: labels})) |
#!/usr/bin/env python
from logging import basicConfig, getLogger, DEBUG
logger = getLogger(__name__)
OBJECT = 'Logical Port'
MODULE = 'Logical Switching'
def get_list(client, logical_switch_id=None):
"""
This function returns all T0 logical routers in NSX
:param client: bravado client for NSX
:return: returns the list of logical routers
"""
param = {}
if logical_switch_id:
param['logical_switch_id'] = logical_switch_id
request = client.__getattr__(MODULE).ListLogicalPorts(**param)
response, _ = request.result()
return response['results']
def get_id(client, data):
if data.has_key('id'):
return data['id']
elif data.has_key('display_name'):
objects = get_list(client)
for obj in objects:
if obj['display_name'] == data['display_name']:
return obj['id']
return None
def update(client, data):
param = {
'lport-id': get_id(client, data),
'LogicalPort': data
}
request = client.__getattr__(MODULE).UpdateLogicalPort(**param)
response, _ = request.result()
return response
def create(client, data):
"""
"""
param = {'LogicalPort': data}
request = client.__getattr__(MODULE).CreateLogicalPort(**param)
response, _ = request.result()
return response
def delete(client, data, force=False):
"""
"""
param = {'lport-id': get_id(client, data)}
if force:
param['detach'] = True
request = client.__getattr__(MODULE).DeleteLogicalPort(**param)
response, _ = request.result()
return response
|
import numpy as np
import pandas as pd
import faiss
from sentence_transformers import SentenceTransformer
from tqdm import tqdm
class Clusterer:
def __init__(self, data: pd.DataFrame, model=None):
self.model = model
self.data = data
@staticmethod
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
@staticmethod
def get_embeddings(inputs: np.array, model: SentenceTransformer,
bs: int) -> np.array:
tot = len(inputs) // bs
inputs = Clusterer.chunks(inputs, bs)
embeddings = []
for _, inp in tqdm(enumerate(inputs), total=tot):
embeddings.extend(model.encode(inp))
return np.vstack(embeddings)
def process(self, embeddings=None, with_gpu=True):
if embeddings is None:
print('Embeddings not passed, will produce them...')
if not self.model:
self.model = SentenceTransformer(
'distilroberta_base_paraphase-v1')
self.embeddings = Clusterer.get_embeddings(self.data.body.values,
self.model, 80)
np.save('sentence_embeddings.npy',
self.embeddings,
allow_pickle=True)
else:
self.embeddings = embeddings
print('Clustering...')
cluster = FaissKMeans(n_clusters=10, n_init=20, max_iter=300)
cluster.fit(self.embeddings, with_gpu=with_gpu)
print('Clustering done...')
self.data['clusters'] = cluster.predict(self.embeddings)
class FaissKMeans:
def __init__(self, n_clusters=8, n_init=10, max_iter=300):
self.n_clusters = n_clusters
self.n_init = n_init
self.max_iter = max_iter
self.kmeans = None
self.cluster_centers_ = None
self.inertia_ = None
def fit(self, X, with_gpu=True):
self.kmeans = faiss.Kmeans(d=X.shape[1],
k=self.n_clusters,
niter=self.max_iter,
nredo=self.n_init,
gpu=with_gpu,
verbose=True)
self.kmeans.train(X.astype(np.float32))
self.cluster_centers_ = self.kmeans.centroids
self.inertia_ = self.kmeans.obj[-1]
def predict(self, X):
return self.kmeans.index.search(X.astype(np.float32), 1)[1]
|
from operator import itemgetter
import psycopg2
import networkx as nx
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import graphviz_layout
from subprocess import call
import pandas as pd
def select(query):
con = None
result = None
try:
con = psycopg2.connect(database='MovieLens', user='postgres', password='postgres', host='localhost')
cur = con.cursor()
cur.execute(query)
result = cur.fetchall()
except psycopg2.DatabaseError as e:
print("Error {}".format(e))
finally:
if con:
con.close()
return result
def select_movies_by_user(user_id, min_rating):
query = "SELECT movie_id, rating_value FROM rating WHERE user_id = {} AND rating_value >= {}".format(user_id, min_rating)
#print(query)
result = select(query)
return [(int(x[0]), movies[int(x[0])], float(x[1])/5) for x in result]
def select_genres_by_movie(movie_id):
query = "SELECT genre_id FROM movie_genre WHERE movie_id = {}".format(movie_id)
#print(query)
result = select(query)
return[(int(x[0]), genres[int(x[0])], 1.0) for x in result]
def select_users_by_movie(movie_id, min_rating):
query = "SELECT user_id, rating_value FROM rating WHERE movie_id = {} AND rating_value >= {} AND user_id <= 5000 ".format(movie_id, min_rating)
#print(query)
result = select(query)
return[(int(x[0]), "USER#{}".format(str(int(x[0]))), float(x[1])/5) for x in result]
def select_gtags_by_movie(movie_id, min_relevance):
query = "SELECT gtag_id, gs_relevance FROM gtag_score WHERE movie_id = {} AND gs_relevance >= {}".format(movie_id, min_relevance)
#print(query)
result = select(query)
return[(int(x[0]), gtags[int(x[0])], float(x[1])) for x in result]
def select_movies_by_genre(genre_id):
query = "SELECT movie_id FROM movie_genre WHERE genre_id = {}".format(genre_id)
#print(query)
result = select(query)
return[(int(x[0]), movies[int(x[0])], 1.0) for x in result]
def select_movies_by_gtag(gtag_id, min_relevance):
query = "SELECT movie_id, gs_relevance FROM gtag_score WHERE gtag_id = {} AND gs_relevance >= {}".format(gtag_id, min_relevance)
#print(query)
result = select(query)
return[(int(x[0]), movies[int(x[0])], float(x[1])) for x in result]
def buildItemsets(min_rating):
user_ratings = dict()
user_movies = dict() # Same as user_ratings but NOT enconded
hash_table = dict()
count = 0
query = "SELECT user_id, movie_id, rating_value FROM rating WHERE rating_value >= {} ORDER BY user_id".format(min_rating)
result = select(query)
for entry in result:
user_id = entry[0]
movie_id = entry[1]
movie_hash = hash_table.get(movie_id, 0)
if movie_hash == 0:
count += 1
hash_table[movie_id] = count
movie_hash = count
ratings = user_ratings.get(user_id, [])
ratings += [movie_hash]
movies = user_movies.get(user_id, [])
movies += [movie_id]
user_ratings[user_id] = ratings
user_movies[user_id] = movies
with open('data/encoded_itemsets.txt', 'w') as f:
for key in user_ratings.keys():
f.write(" ".join(list(map(str, sorted(user_ratings[key])))))
f.write("\n")
with open('data/decoded_itemsets.txt', 'w') as f:
for key in user_movies.keys():
f.write(" ".join(list(map(str, user_movies[key]))))
f.write("\n")
return hash_table
def run_spmf():
args = ["java", "-jar", "../tools/spmf.jar", "run", "Eclat", "data/enconded_itemsets.txt", "data/spmf_output.txt", "10%"]
call(args)
def build_movie_dict():
movies = dict()
query = "SELECT movie_id, movie_title FROM movie"
result = select(query)
prev_size = 0
for entry in result:
id = entry[0]
title = entry[1]
movies[id] = title
return movies
def build_genre_dict():
genres = dict()
query = "SELECT genre_id, genre_name FROM genre"
result = select(query)
for entry in result:
id = entry[0]
name = entry[1]
genres[id] = name
return genres
def build_gtag_dict():
gtags = dict()
query = "SELECT gtag_id, gtag_tag FROM gtag"
result = select(query)
for entry in result:
id = entry[0]
tag = entry[1]
gtags[id] = tag
return gtags
def build_gtag_score_table(min_relevance):
query = "SELECT gtag_id, movie_id, gs_relevance FROM gtag_score WHERE gs_relevance >= {}".format(min_relevance)
result = select(query)
tuples = [(x[0], x[1]) for x in result]
values = [x[2] for x in result]
index = pd.MultiIndex.from_tuples(tuples, names=['gtag_id', 'movie_id'])
s = pd.Series(values, index=index)
return s
def decode(movie_map):
with open("data/spmf_output.txt", "r") as f, open("data/decoded_patterns.txt", 'w') as g:
for line in f:
pattern = line.split(" #SUP: ")[0].split(" ")
for i in range(len(pattern)):
pattern[i] = str(movie_map.get(int(pattern[i]), 0))
g.write(" ".join(pattern) + "\n")
def run():
movie_hashtable = buildItemsets(4)
inv_map = {v: k for k, v in movie_hashtable.items()}
run_spmf()
decode(inv_map)
def build_graphs(distance, min_rating, min_relevance, input_file, output_folder):
count = 0
with open(input_file, "r") as f, open("pr.txt", "w") as out:
for line in f:
node_list = list()
edge_list = list()
movies_in_pattern = list() # Pattern description
pattern = line.strip().split(" ")
for element in pattern:
first_node = (int(element), movies[(int(element))])
movies_in_pattern += [movies[(int(element))]]
nodes = set()
nodes.add(first_node)
# nodes, edges = build_graph(first_node, "movie", nodes, set(), 2)
# nodes, edges = build_graph_lite(first_node, "movie", nodes, set(), distance, min_rating, min_relevance)
nodes, edges = build_graph_fast(first_node, "movie", nodes, set(), distance)
# Keep only the names
node_list += [x[1] for x in nodes]
edge_list += [(x[0][1], x[1][1], x[2]) for x in edges]
# Build the networkx.Graph
g = nx.Graph()
g.add_nodes_from(node_list)
g.add_weighted_edges_from(edge_list)
g.name = ", ".join(movies_in_pattern)
print(nx.info(g))
g_filename = "{}/g{}.gml".format(output_folder, count)
nx.write_gml(g, g_filename)
count += 1
if count % 50 == 0:
print(count)
def build_graph(node, node_type, nodes, edges, distance ):
#print(node_type, distance)
if distance == 0:
return nodes, edges
if node_type == "movie":
movie_id = node[0]
new_nodes = select_genres_by_movie(movie_id)
# print(len(new_nodes))
edges.update( [(node, (new_node[0], new_node[1]), new_node[2]) for new_node in new_nodes] )
nodes.update([(new_node[0], new_node[1]) for new_node in new_nodes])
for new_node in new_nodes:
nodes, edges = build_graph(new_node, "genre", nodes, edges, distance - 1)
new_nodes = select_users_by_movie(movie_id, 5)
# print(len(new_nodes))
edges.update([(node, (new_node[0], new_node[1]), new_node[2]) for new_node in new_nodes])
nodes.update([(new_node[0], new_node[1]) for new_node in new_nodes])
for new_node in new_nodes:
nodes, edges = build_graph(new_node, "user", nodes, edges, distance - 1)
new_nodes = select_gtags_by_movie(movie_id, 0.9)
# print(len(new_nodes))
edges.update([(node, (new_node[0], new_node[1]), new_node[2]) for new_node in new_nodes])
nodes.update([(new_node[0], new_node[1]) for new_node in new_nodes])
for new_node in new_nodes:
nodes, edges = build_graph(new_node, "gtag", nodes, edges, distance - 1)
elif node_type == "genre":
genre_id = node[0]
new_nodes = select_movies_by_genre(genre_id)
edges.update([(node, (new_node[0], new_node[1]), new_node[2]) for new_node in new_nodes])
nodes.update([(new_node[0], new_node[1]) for new_node in new_nodes])
for new_node in new_nodes:
nodes, edges = build_graph(new_node, "movie", nodes, edges, distance - 1)
elif node_type == "gtag":
gtag_id = node[0]
new_nodes = select_movies_by_gtag(gtag_id, 0.9)
edges.update([(node, (new_node[0], new_node[1]), new_node[2]) for new_node in new_nodes])
nodes.update([(new_node[0], new_node[1]) for new_node in new_nodes])
for new_node in new_nodes:
nodes, edges = build_graph(new_node, "movie", nodes, edges, distance - 1)
elif node_type == "user":
user_id = node[0]
new_nodes = select_movies_by_user(user_id, 5)
# edges.update([(node, new_node) for new_node in new_nodes])
# nodes.update(new_nodes)
edges.update([(node, (new_node[0], new_node[1]), new_node[2]) for new_node in new_nodes])
nodes.update([(new_node[0], new_node[1]) for new_node in new_nodes])
for new_node in new_nodes:
nodes, edges = build_graph(new_node, "movie", nodes, edges, distance - 1)
return nodes, edges
def build_graph_lite(node, node_type, nodes, edges, distance, min_rating, min_relevance ):
if distance == 0:
return nodes, edges
if node_type == "movie":
movie_id = node[0]
found_nodes = select_gtags_by_movie(movie_id, min_relevance)
found_nodes = set(found_nodes)
new_nodes = found_nodes.difference(nodes)
# print("Found {} tags for movie {}".format(len(new_nodes), movies[movie_id]))
edges.update([(node, (new_node[0], new_node[1]), new_node[2]) for new_node in new_nodes])
nodes.update([(new_node[0], new_node[1]) for new_node in new_nodes])
for new_node in new_nodes:
nodes, edges = build_graph_lite(new_node, "gtag", nodes, edges, distance - 1, min_rating, min_relevance)
elif node_type == "gtag":
gtag_id = node[0]
found_nodes = select_movies_by_gtag(gtag_id, min_relevance)
found_nodes = set(found_nodes)
new_nodes = found_nodes.difference(nodes)
# print("Found {} movies for tag {}".format(len(new_nodes), gtags[gtag_id]))
edges.update([(node, (new_node[0], new_node[1]), new_node[2]) for new_node in new_nodes])
nodes.update([(new_node[0], new_node[1]) for new_node in new_nodes])
for new_node in new_nodes:
nodes, edges = build_graph_lite(new_node, "movie", nodes, edges, distance - 1, min_rating, min_relevance)
return nodes, edges
def build_graph_fast(node, node_type, nodes, edges, distance):
if distance == 0:
return nodes, edges
if node_type == "movie":
movie_id = node[0]
try:
found_gtags = gtag_score[:, movie_id].to_dict().items()
except:
found_gtags = []
found_nodes = [(x[0], gtags[x[0]], x[1]) for x in found_gtags]
found_nodes = set(found_nodes)
new_nodes = found_nodes.difference(nodes)
# print("Found {} tags for movie {}".format(len(new_nodes), movies[movie_id]))
edges.update([(node, (new_node[0], new_node[1]), new_node[2]) for new_node in new_nodes])
nodes.update([(new_node[0], new_node[1]) for new_node in new_nodes])
for new_node in new_nodes:
nodes, edges = build_graph_fast(new_node, "gtag", nodes, edges, distance - 1)
elif node_type == "gtag":
gtag_id = node[0]
try:
found_movies = gtag_score[gtag_id, :].to_dict().items()
except:
found_movies = []
found_nodes = [(x[0], movies[x[0]], x[1]) for x in found_movies]
found_nodes = set(found_nodes)
new_nodes = found_nodes.difference(nodes)
# print("Found {} movies for tag {}".format(len(new_nodes), gtags[gtag_id]))
edges.update([(node, (new_node[0], new_node[1]), new_node[2]) for new_node in new_nodes])
nodes.update([(new_node[0], new_node[1]) for new_node in new_nodes])
for new_node in new_nodes:
nodes, edges = build_graph_fast(new_node, "movie", nodes, edges, distance - 1)
return nodes, edges
def print_labels():
count = 0
with open("data/decoded_patterns.txt", "r") as f, open("labels.txt", "w") as g:
for line in f:
pattern = line.strip().split(" ")
pattern_labels = [movies[int(x)] for x in pattern]
print(pattern_labels)
g.write("{}: ".format(count) + " ".join(pattern_labels) + "\n")
count += 1
# run()
movies = build_movie_dict()
genres = build_genre_dict()
gtags = build_gtag_dict()
gtag_score = build_gtag_score_table(0.95)
# print_labels()
# build_graphs(3, 4, 0.95, "decoded.txt", "graphs")
build_graphs(3, 4, 0.95, "movies.txt", "movie_graphs") |
import skimage
from skimage import io
import os
import glob
IMAGE_PATH = 'data/raw/images_train_rev1/'
IMAGE_BW_PATH = 'data/bw/images_train_rev1/'
if not os.path.isdir('data/bw/'):
os.mkdir('data/bw/')
if not os.path.isdir(IMAGE_BW_PATH):
os.mkdir(IMAGE_BW_PATH)
images = glob.glob(
os.path.join(IMAGE_PATH, "*.jpg"))
for i, img in enumerate(images):
if not i % 1000:
print i
img_ = io.imread(img)
img_ = skimage.color.rgb2gray(img_)
img_name = os.path.basename(img)
io.imsave(IMAGE_BW_PATH + '/' + img_name, img_)
print 'Done!'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
# 1. variable definition
x1 = 1.0
x1_node = {"requires_grad": True, "grad": 0, "backward_branches": []}
x2 = np.pi / 4
x2_node = {"requires_grad": True, "grad": 0, "backward_branches": []}
# 2. forward propagation
y1 = 2 * x1
y1_node = {
"requires_grad": True,
"grad": 0,
"backward_branches": [{"previous_node": x1_node, "f_grad": 2}],
}
y2 = x1 * np.sin(x2)
y2_node = {
"requires_grad": True,
"grad": 0,
"backward_branches": [
{"previous_node": x1_node, "f_grad": np.sin(x2)},
{"previous_node": x2_node, "f_grad": x1 * np.cos(x2)},
],
}
z = y1 + y2
z_node = {
"requires_grad": True,
"grad": 0,
"backward_branches": [
{"previous_node": y1_node, "f_grad": 1},
{"previous_node": y2_node, "f_grad": 1},
],
}
# 3. backward propagation
def backward(node, grad=1):
if node["requires_grad"]:
node["grad"] += grad
if not node["backward_branches"]:
# reached the end of the chain
return
for backward_branch in node["backward_branches"]:
backward(backward_branch["previous_node"], grad * backward_branch["f_grad"])
backward(z_node)
# 4. visualization
print(x1_node["grad"])
print(x2_node["grad"])
|
# coding: utf-8
import MySQLdb
import datetime, time
import csv
host = 'localhost'
db = MySQLdb.connect(host, 'root', 'vis_2014', 'FinanceVis')
cursor = db.cursor()
headers = ["Date", "Open", "High", "Low", "Close", "Volume", "Adj Close",
"predict_news_word", "predict_twitter_word", "bias_news_word", "bias_twitter_word"]
file_suffix = ""
symbols = ['AAPL']
symbols = ['AAPL', 'GOOG', 'WMT', 'BA', 'BAC', 'GM', 'T', 'XOM'] #, 'GSPC']
# symbols = ['GSPC']
for symbol in symbols:
print symbol
csvfile = file('500Stock_prices/%s.history.price.predict.csv'%symbol, 'wb')
writer = csv.writer(csvfile)
writer.writerow(headers)
sql = 'select Date,Open,High,Low,Close,Volume,Adj_close,predict_news,predict_twitter,bias_news,bias_twitter from stock where symbol="%s" order by date(Date) desc' % symbol
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
data = []
# if time.strptime(row[0], "%Y-%m-%d") < time.strptime('2006-10-20', "%Y-%m-%d"):
# print row[0]
# break
close = float(row[6])
for i in xrange(len(row)):
value = row[i]
if value==None:
value = ""
# elif i==9:
# value = float(value)*close
data.append(value)
# for i in xrange(2):
# data.append("")
# data.append(row[len(row)-1])
# for i in xrange(2):
# data.append("")
writer.writerow(data)
csvfile.close()
cursor.close()
db.close() |
with open('../result/csi.bin', 'U') as inf:
for line in inf:
feats = line.strip().split(' ')
print len(feats)
break
|
from ximea import xiapi
import cv2
import time
from camera_setting import white_balance_adjustment
def adjust_camera(cam):
img =xiapi.Image()
while cv2.waitKey(33) != 27:
cam.get_image(img)
data = img.get_image_data_numpy()
cv2.imshow("img", data)
cam = xiapi.Camera()
# start communication
print('Opening first camera...')
cam.open_device()
# settings
cam.set_imgdataformat('XI_RGB24')
# cam.set_exposure(792)
# cam.set_region_selector(0)
# cam.set_width(1264)
# cam.set_height(1016)
# cam.set_gain(15)
# create instance of Image to store image data and metadata
img = xiapi.Image()
# start data acquisition
print('Starting data acquisition...')
#cam.start_acquisition()
white_balance_adjustment(cam)
print('please adjust camera for pouring measurement')
adjust_camera(cam)
fourcc = cv2.VideoWriter_fourcc('I', '4', '2', '0')
output_video = cv2.VideoWriter("/home/hairui/Videos/experiments/317-11D.avi", fourcc, 24, (640, 600))
try:
while cv2.waitKey(1) != 27:
cam.get_image(img)
data = img.get_image_data_numpy()
output_video.write(data)
except KeyboardInterrupt:
cv2.destroyAllWindows() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.