text stringlengths 38 1.54M |
|---|
# coding:utf-8
import os
import uuid
import errno
import flask
from werkzeug.exceptions import abort
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
try:
from PIL import Image, ImageOps
except ImportError:
raise RuntimeError('Image module of PIL needs to be installed')
IMAGES = ('.jpg', '.jpeg', '.png', '.gif', '.svg', '.bmp', '.webp')
def addslash(url):
if not url:
return None
if url.endswith('/'):
return url
return url + '/'
class UploadNotAllowed(Exception):
"""
This exception is raised if the upload was not allowed. You should catch
it in your view code and display an appropriate message to the user.
"""
class PhotoManagerConfiguration(object):
"""
保存PhotoManager的配置
:param destination: 保存照片的目录
:param base_url: 显示照片的url根路径
:param thumb_destination: 保存缩略图的目录
:param thumb_base_url: 显示缩略图的url根路径
:param allow: 允许的文件扩展名
"""
def __init__(self, destination, base_url, thumb_destination=None, thumb_base_url=None, allow=IMAGES):
self.destination = destination
self.base_url = addslash(base_url)
self.thumb_destination = thumb_destination
self.thumb_base_url = addslash(thumb_base_url)
self.allow = allow
@property
def tuple(self):
return (self.destination, self.base_url,
self.thumb_destination, self.thumb_base_url,
self.allow)
def __eq__(self, other):
return self.tuple == other.tuple
class PhotoManager(object):
"""
处理产品照片的上传,保存,生成缩略图。
设置项目
|Key | Default | Description
|---------------------|---------------------|-------------------
|MEDIA_PHOTOS_FOLDER | 'media/photos' | 保存照片的目录
|MEDIA_THUMBS_FOLDER | 'media/thumbs' | 保存缩略图的目录
|MEDIA_PHOTOS_URL | '/media/photos/' | 显示照片的url根路径
|MEDIA_THUMBS_URL | '/media/thumbs/' | 显示缩略图的url根路径
主要方法
save(file):保存文件
url(filename):返回文件url
thumb(filename):返回缩略图url,自动生成并缓存缩略图
"""
def __init__(self, app=None, ):
self.config = None
self.blueprint = flask.Blueprint('photo_manager', __name__)
if app is not None:
self.app = app
self.init_app(self.app)
else:
self.app = None
def init_app(self, app):
self.app = app
destination = app.config.get('MEDIA_PHOTOS_FOLDER', 'media/photos')
base_url = app.config.get('MEDIA_PHOTOS_URL', '/media/photos/')
thumb_destination = app.config.get('MEDIA_THUMBS_FOLDER', 'media/thumbs')
thumb_base_url = app.config.get('MEDIA_THUMBS_URL', '/media/thumbs/')
self.config = PhotoManagerConfiguration(destination, base_url, thumb_destination, thumb_base_url)
self.blueprint.add_url_rule(self.config.base_url + '<filename>',
endpoint='export_photo', view_func=self.export_photo)
self.blueprint.add_url_rule(self.config.thumb_base_url + '<miniature>',
endpoint='export_thumb', view_func=self.export_thumb)
self.app.register_blueprint(self.blueprint)
self.app.jinja_env.globals.update(photo_url_for=self.url)
self.app.jinja_env.globals.update(thumb_url_for=self.thumb_url)
def export_photo(self, filename):
path = self.config.destination
return flask.send_from_directory(path, filename)
def export_thumb(self, miniature):
return flask.send_from_directory(self.config.thumb_destination, miniature)
def resolve_conflict(self, target_folder, basename):
"""
If a file with the selected name already exists in the target folder,
this method is called to resolve the conflict. It should return a new
basename for the file.
:param target_folder: The absolute path to the target.
:param basename: The file's original basename.
"""
return self.resolve_conflict_by_add_count(target_folder,basename)
@staticmethod
def resolve_conflict_by_add_count(target_folder, basename):
name, ext = os.path.splitext(basename)
count = 0
while True:
count += 1
newname = '%s_%d%s' % (name, count, ext)
if not os.path.exists(os.path.join(target_folder, newname)):
return newname
@staticmethod
def url(filename):
"""
:param filename: The filename to return the URL for.
"""
return flask.url_for('photo_manager.export_photo', filename=filename)
def thumb_url(self, filename, **options):
miniature = self.make_thumb(filename, override=False, **options)
if not miniature:
abort(404)
return flask.url_for('photo_manager.export_thumb', miniature=miniature)
def make_thumb(self, filename, miniature=None, override=False, size='96x96',
width=None, height=None, crop=None, bg=None, quality=85):
"""
生成缩略图
:param filename: 图像源文件名
:param miniature: 缩略图文件名,如果为None则按照参数自动生成
:param override: 是否覆盖同名文件
:param size: 缩略图尺寸,当width和height参数之一为None时生效
:param width: 缩略图宽度
:param height: 缩略图高度
:param crop: 是否需要裁剪
:param bg: 背景颜色
:param quality: 图像压缩质量
"""
if not width or not height:
width, height = [int(x) for x in size.split('x')]
name, fm = os.path.splitext(filename)
if not miniature:
miniature = self._get_name(name, fm, size, crop, bg, quality)
thumb_filename = flask.safe_join(self.config.thumb_destination, miniature)
self._ensure_path(thumb_filename)
if not os.path.exists(thumb_filename) or override:
original_filename = flask.safe_join(self.config.destination, filename)
if not os.path.exists(original_filename):
return None
thumb_size = (width, height)
try:
image = Image.open(original_filename)
except IOError:
return None
if crop == 'fit':
img = ImageOps.fit(image, thumb_size, Image.ANTIALIAS)
else:
img = image.copy()
img.thumbnail((width, height), Image.ANTIALIAS)
if bg:
img = self._bg_square(img, bg)
img.save(thumb_filename, image.format, quality=quality)
return miniature
def save(self, storage, name=None, random_name=True, process=None, **options):
"""
保存文件到设定路径
:param storage: 需要保存的文件,应该是一个FileStorage对象
:param name: 如果为None,自动生成文件名。
可以包含目录路径, 如``photos.save(file, name="someguy/photo_123.")``
:param random_name: 是否生成随机文件名,仅挡name=None时有效
:param process: 对图片的预处理,可以选择```'resize'```或None
:param width: 对图片的预处理参数,限制图片宽度
:param height: 对图片的预处理参数,限制图片高度
"""
if not isinstance(storage, FileStorage):
raise TypeError("storage must be a werkzeug.FileStorage")
basename, ext = os.path.splitext(storage.filename)
ext = ext.lower()
if not (ext in self.config.allow):
raise UploadNotAllowed()
if name:
basename = name
elif random_name:
basename = uuid.uuid4().hex + ext
else:
basename = basename + ext
basename = secure_filename(basename)
target_folder = self.config.destination
if not os.path.exists(target_folder):
os.makedirs(target_folder)
if os.path.exists(os.path.join(target_folder, basename)):
basename = self.resolve_conflict(target_folder, basename)
target = os.path.join(target_folder, basename)
if process == 'resize':
width = options.pop('width', 1024)
height = options.pop('height', 1024)
image = Image.open(storage)
image.thumbnail((width, height), Image.ANTIALIAS)
image.save(target, image.format)
else:
storage.save(target)
return basename
@staticmethod
def _get_name(name, fm, *args):
for v in args:
if v:
name += '_%s' % v
name += fm
return name
@staticmethod
def _ensure_path(full_path):
directory = os.path.dirname(full_path)
try:
if not os.path.exists(full_path):
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@staticmethod
def _bg_square(img, color=0xff):
size = (max(img.size),) * 2
layer = Image.new('L', size, color)
layer.paste(img, tuple(map(lambda x: (x[0] - x[1]) / 2, zip(size, img.size))))
return layer |
"""
Find and Replace Pattern (Medium)
You have a list of words and a pattern, and you want to know which words in words matches the pattern.
A word matches the pattern if there exists a permutation of letters p so that after replacing every letter x in the pattern with p(x), we get the desired word.
(Recall that a permutation of letters is a bijection from letters to letters: every letter maps to another letter, and no two letters map to the same letter.)
Return a list of the words in words that match the given pattern.
You may return the answer in any order.
Example 1:
Input: words = ["abc","deq","mee","aqq","dkd","ccc"], pattern = "abb"
Output: ["mee","aqq"]
Explanation: "mee" matches the pattern because there is a permutation {a -> m, b -> e, ...}.
"ccc" does not match the pattern because {a -> c, b -> c, ...} is not a permutation,
since a and b map to the same letter.
Note:
1 <= words.length <= 50
1 <= pattern.length = words[i].length <= 20
"""
"""
TODO:
- solution
- test cases
""" |
from django.contrib import admin
from django.urls import path
import blog.views
urlpatterns = [
path('<int:blog_id>/', blog.views.detail, name='detail'), # 각 게시물의 id값을 받아 url 디자인
path('new/', blog.views.new, name='new'),
path('<int:blog_id>/edit/', blog.views.edit, name='edit'),
path('<int:blog_id>/delete/', blog.views.delete, name='delete'),
] |
'''Вивести на екран таблицю множення (від 1 до 9).
'''
count = 1
while count < 10:
for i in range(1, 10):
print(' %d * %d = %2d' % (count, i, count * i), end=' ')
print('%d * %d = %2d' % (count + 1, i, (count + 1) * i), end=' ')
print('%d * %d = %2d' % (count + 2, i, (count + 2) * i))
print()
count += 3
|
# -*- encoding: utf-8 -*-
"""Unit tests for src/evergreen/host.py."""
from __future__ import absolute_import
from evergreen.distro import Distro
class TestDistro(object):
def test_get_attributes(self, sample_aws_distro):
distro = Distro(sample_aws_distro, None)
assert distro.name == sample_aws_distro["name"]
assert distro.provider == sample_aws_distro["provider"]
assert distro.planner_settings.version == sample_aws_distro["planner_settings"]["version"]
assert distro.finder_settings.version == sample_aws_distro["finder_settings"]["version"]
def test_settings(self, sample_aws_distro):
distro = Distro(sample_aws_distro, None)
settings = distro.settings
setting_json = sample_aws_distro["settings"]
assert settings.ami == setting_json["ami"]
mount_point = settings.mount_points[0]
assert mount_point.device_name == setting_json["mount_points"][0]["device_name"]
def test_expansions(self, sample_aws_distro):
distro = Distro(sample_aws_distro, None)
for expansion in sample_aws_distro["expansions"]:
assert distro.expansions[expansion["key"]] == expansion["value"]
def test_missing_attributes(self, sample_aws_distro):
del sample_aws_distro["settings"]
distro = Distro(sample_aws_distro, None)
assert not distro.settings
def test_missing_mount_points(self, sample_aws_distro):
del sample_aws_distro["settings"]["mount_points"]
distro = Distro(sample_aws_distro, None)
assert not distro.settings.mount_points
def test_static_distro(self, sample_static_distro):
distro = Distro(sample_static_distro, None)
assert len(distro.settings.hosts) == len(sample_static_distro["settings"]["hosts"])
for host in sample_static_distro["settings"]["hosts"]:
assert host["name"] in distro.settings.hosts
def test_static_distro_missing_hosts(self, sample_static_distro):
del sample_static_distro["settings"]["hosts"]
distro = Distro(sample_static_distro, None)
assert not distro.settings.hosts
def test_unknown_provider_distro(self, sample_aws_distro):
sample_aws_distro["provider"] = "some_unknown_provider"
distro = Distro(sample_aws_distro, None)
assert distro.settings == sample_aws_distro["settings"]
|
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000.
# 3 (1 + .... + 333)
# +
# 5 (1 + ... + 199)
# - 15 (1 + .. + 66)
a= 333*334*3/2 + 5*199*200/2 - 15*66*67/2
print(a) |
# coding=utf8
import json
import util
import math
import random
class VocabManager:
"""
Vocabulary Manager
"""
PADDING_TOKEN_ID = 0
UNKNOWN_TOKEN_ID = 1
GO_TOKEN_ID = 2
EOS_TOKEN_ID = 3
USEFUL_VOCAB_INIT_ID = 4
UNKNOWN_TOKEN = "<U>"
PADDING_TOKEN = "<P>"
GO_TOKEN = "<GO>"
EOS_TOKEN = "<EOS>"
@staticmethod
def _read(vocab_path):
with open(vocab_path, "r") as f:
data = json.load(f)
return data
def __init__(self, vocab_path):
self._vocab = self._read(vocab_path)
self._vocab_id2word = dict()
for (vocab, value) in self._vocab.items():
self._vocab_id2word[value["id"]] = vocab
@property
def vocab(self):
return self._vocab
@property
def vocab_len(self):
return len((self._vocab.keys()))
def word2id(self, word):
return util.get_value(self._vocab, word, {"id": self.UNKNOWN_TOKEN_ID})["id"]
def id2word(self, wid):
return util.get_value(self._vocab_id2word, wid, self.UNKNOWN_TOKEN)
def decode(self, wids, delimiter=" "):
words = list()
for wid in wids:
if wid == self.PADDING_TOKEN_ID or wid == self.UNKNOWN_TOKEN_ID or wid == self.EOS_TOKEN_ID:
continue
words.append(self.id2word(wid))
return delimiter.join(words)
@classmethod
def build_vocab(cls, file, target_file, min_freq=1):
source_vocab = cls._read(file)
def _generate(vocab):
result = dict()
length = cls.USEFUL_VOCAB_INIT_ID
for word, frequency in vocab.items():
if frequency < min_freq:
continue
result[word] = {
"frequency": frequency,
"id": length
}
length += 1
result[cls.UNKNOWN_TOKEN] = {
"frequency": 0,
"id": cls.UNKNOWN_TOKEN_ID
}
result[cls.PADDING_TOKEN] = {
"frequency": 0,
"id": cls.PADDING_TOKEN_ID,
}
result[cls.GO_TOKEN] = {
"frequency": 0,
"id": cls.GO_TOKEN_ID
}
result[cls.EOS_TOKEN] = {
"frequency": 0,
"id": cls.EOS_TOKEN_ID
}
return result
results = _generate(source_vocab)
with open(target_file, "w") as f:
f.write(json.dumps(results, indent=4))
class Batch:
"""
Batch Data
"""
def __init__(self, sentences, cases, sentence_length, case_length, regexs, regex_length, regex_targets):
"""
:param sentences:
:param cases:
:param sentence_length:
:param case_length:
:param regexs:
:param regex_length:
"""
self.sentences = sentences
self.cases = cases
self.sentence_length = sentence_length
self.case_length = case_length
self.regexs = regexs
self.regex_targets = regex_targets
self.regex_length = regex_length
self.sentence_masks = list()
sentence_max_length = len(self.sentences[0])
for l in sentence_length:
self.sentence_masks.append(
[1]*l + [0]*(sentence_max_length-l)
)
self.case_masks = list()
case_max_length = len(cases[0])
for l in case_length:
self.case_masks.append(
[1] * l + [0] * (case_max_length - l)
)
self.regex_masks = []
regex_max_length = len(self.regex_targets[0])
for l in regex_targets:
self.regex_masks.append(
[1]*len(l) + [0]*(regex_max_length - len(l))
)
self._learning_rate = 0.0
@property
def learning_rate(self):
return self._learning_rate
@learning_rate.setter
def learning_rate(self, rate):
self._learning_rate = rate
@property
def batch_size(self):
return len(self.regex_targets)
def _print(self):
print(self.sentences)
print(self.sentence_length)
print(self.cases)
print(self.case_length)
print(self.regexs)
print(self.regex_targets)
print(self.regex_length)
print(self.sentence_masks)
print(self.case_masks)
class DataIterator:
def __init__(self, data_path, sentence_vocab, case_vocab, regex_vocab, max_sentence_len, max_case_len, max_regex_len, batch_size, case_num=5):
self._cursor = 0
self._max_sentence_len = max_sentence_len
self._max_case_len = max_case_len
self._max_regex_len = max_regex_len
self._case_num = case_num
if isinstance(sentence_vocab, VocabManager):
self._sentence_vocab = sentence_vocab
else:
self._sentence_vocab = VocabManager(sentence_vocab)
if isinstance(case_vocab, VocabManager):
self._case_vocab = case_vocab
else:
self._case_vocab = VocabManager(case_vocab)
if isinstance(regex_vocab, VocabManager):
self._regex_vocab = regex_vocab
else:
self._regex_vocab = VocabManager(regex_vocab)
self._data = self._read_data(data_path)
# Remove the training examples that are too long
rm_list = list()
for value in self._data:
sentence = value["sentence"]
regex = value["regex"]
if len(sentence) > self._max_sentence_len or len(regex) > self._max_regex_len:
rm_list.append(value)
continue
for case in value["case"]:
if len(case[0]) > self._max_case_len:
rm_list.append(value)
break
for r in rm_list:
self._data.remove(r)
self._size = len(self._data)
self._batch_size = batch_size
self._batch_per_epoch = math.floor(self._size / self._batch_size)
self.shuffle()
@property
def size(self):
return self._size
@property
def batch_per_epoch(self):
return self._batch_per_epoch
def shuffle(self):
random.shuffle(self._data)
self._cursor = 0
def _read_data(self, data_path):
with open(data_path, "r") as f:
data = json.load(f)
new_data = list()
for sample in data:
sentence, sentence_length = self.process_sentence(sample["sentence"])
cases = list()
for positive_case in sample["positive_case"]:
case, case_length = self.process_case(positive_case)
cases.append((case, case_length))
regex, regex_len = self.process_regex(sample["regex"])
new_data.append({
"sentence": sentence,
"case": cases,
"sentence_length": sentence_length,
"regex": regex,
"regex_length": regex_len
})
return new_data
def process_sentence(self, sentence):
words = sentence.strip().split()
ids = list()
for word in words:
ids.append(self._sentence_vocab.word2id(word))
ids.append(VocabManager.EOS_TOKEN_ID)
sequence_length = len(ids)
temp_length = len(ids)
while temp_length < self._max_sentence_len:
ids.append(VocabManager.PADDING_TOKEN_ID)
temp_length += 1
return ids, sequence_length
def process_case(self, case):
words = case.strip().split()
ids = list()
for word in words:
ids.append(self._case_vocab.word2id(word))
ids.append(VocabManager.EOS_TOKEN_ID)
sequence_length = len(ids)
temp_length = len(ids)
while temp_length < self._max_case_len:
ids.append(VocabManager.PADDING_TOKEN_ID)
temp_length += 1
return ids, sequence_length
def process_regex(self, regex):
words = regex.strip().split()
ids = [VocabManager.GO_TOKEN_ID]
for word in words:
ids.append(self._regex_vocab.word2id(word))
ids.append(VocabManager.EOS_TOKEN_ID)
sequence_length = len(ids)
temp_length = len(ids)
while temp_length < self._max_regex_len:
ids.append(VocabManager.PADDING_TOKEN_ID)
temp_length += 1
return ids, sequence_length
def get_batch(self):
if self._cursor + self._batch_size > self._size:
raise IndexError("Index Error")
samples = self._data[self._cursor:self._cursor+self._batch_size]
self._cursor += self._batch_size
sentence_samples = list()
sentence_length = list()
case_samples = list()
case_length = list()
regex_length = list()
regex_samples = list()
# Remove GO TOKEN ID
regex_targets = list()
for s in samples:
regex_targets.append(s["regex"][1:] + [VocabManager.PADDING_TOKEN_ID])
for i in range(self._case_num):
sentence_samples.append(s["sentence"])
sentence_length.append(s["sentence_length"])
case_samples.append(s["case"][i][0])
case_length.append(s["case"][i][1])
regex_samples.append(s["regex"])
regex_length.append(s["regex_length"])
return Batch(
sentences=sentence_samples,
cases=case_samples,
sentence_length=sentence_length,
case_length=case_length,
regexs=regex_samples,
regex_length=regex_length,
regex_targets=regex_targets
)
|
from django.conf.urls.defaults import *
from settings import ROOT_PATH
import os
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^application/', include('application.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
(r'^logout/$', 'application.base.views.do_logout'),
(r'^files/(?P<path>.*)$', 'django.views.static.serve', {'document_root': os.path.join(ROOT_PATH, 'files')}),
(r'^$', 'application.base.views.home'),
(r'^load_items/(.+)/$', 'application.base.views.load_items'),
(r'^get_metadata/$', 'application.base.views.get_metadata'),
(r'^set_metadata/$', 'application.base.views.set_metadata'),
(r'^save_rating/$', 'application.base.views.save_rating'),
(r'^on_view/$', 'application.base.views.on_view'),
(r'^download_item/$', 'application.base.views.download_item'),
(r'^get_upload_url/$', 'application.base.views.get_upload_url'),
(r'^upload_finished/$', 'application.base.views.upload_finished'),
(r'^get_tags/$', 'application.base.views.get_tags'),
(r'^get_item_tags/$', 'application.base.views.get_item_tags'),
(r'^add_tag/$', 'application.base.views.add_tag'),
(r'^delete_tag/$', 'application.base.views.delete_tag'),
(r'^edit_tag/$', 'application.base.views.edit_tag'),
(r'^delete_tag/$', 'application.base.views.delete_tag'),
(r'^delete_item/$', 'application.base.views.delete_item'),
)
|
from karhu.libs.bcm.utils.decorators import json_view
from django.conf import settings
from karhu.gallery.models import Folder, Image
from karhu.libs.bcm.utils.tools import find_in_list
import json
from time import sleep
@json_view
def folders(request, id=None):
if request.method == 'GET':
if id:
folder = Folder.objects.get(pk=id)
return flat_folder(folder)
else:
#list_type = request.GET.get('type', None)
folders = Folder.objects.all()
#if list_type == 'short':
# response = [flat_folder(folder, False) for folder in folders]
response = [flat_folder(folder) for folder in folders]
return response
elif request.method == 'POST':
P = json.loads(request.body)
data = {
'title': P.get('title', None),
'cover': P.get('cover', None),
'status': P.get('status', None),
'description': P.get('description', None)
}
if id:
folder = Folder.objects.get(pk=id)
folder.title = P['title']
else:
folder = Folder.objects.create()
folder.save()
return flat_folder(folder)
elif request.method == 'DELETE':
folder = Folder.objects.get(pk=id)
folder.delete()
@json_view
def folder_custom_action(request, id=None, action=None):
folder = Folder.objects.get(pk=id)
#sleep(2)
#return flat_folder(folder)
if action == 'delete_cover':
folder.clear_cover()
elif action == 'upload_cover':
print 'got cover'
file = request.FILES.get('file', None)
if file:
print 'really got cover'
#song.mp3 = file
#song.save()
folder.cover = file
folder.save()
print 'after custom action:', folder, folder.cover, folder.cover.url
return flat_folder(folder)
def flat_folder(folder):
response = {
'id': folder.pk,
'title': folder.title,
'description': folder.description,
'order': folder.order,
'status': folder.status
}
if folder.cover:
response['cover'] = {'url': folder.cover_url}
response['size'] = folder.images.count()
return response
@json_view
def images(request, id=None):
Klass = Image
if request.method == 'GET':
if id:
object = Klass.objects.get(pk=id)
return flat_image(object)
else:
objects = Klass.objects.all()
response = [flat_image(obj) for obj in objects]
return response
elif request.method == 'POST':
sleep(3)
#P = json.loads(request.body)
#print 'body before file', P
POST = request.POST
print 'POST before file', POST
print '...and QueryDict', POST.get('folder_id')
file = request.FILES.get('file', None)
if file:
folder_id = request.POST.get('folder_id')
folder = Folder.objects.get(pk=folder_id)
image = Image.objects.create(image=file, folder=folder)
image.save()
return flat_image(image)
else:
P = json.loads(request.body)
if id:
object = Klass.objects.get(pk=id)
else:
object = Klass.objects.create()
object.save()
return flat_image(object)
elif request.method == 'DELETE':
object = Klass.objects.get(pk=id)
sleep(1)
object.delete()
return 'ok'
def flat_image(object):
print 'flating image', object
print 'from folder', object.folder, object.folder.title
response = {
#'folder': {'id': object.folder.pk, 'title': object.folder.title},
'id': object.pk,
'urls': {
'thumbnail': object.image.thumbnail.url}
}
return response
|
import sys
n = int(input())
timeTable = [list(map(int, sys.stdin.readline().split())) for _ in range(n)]
dp = [0 for _ in range(n + 1)]
for i in range(n - 1, -1, -1):
if i + timeTable[i][0] > n:
dp[i] = dp[i + 1]
else:
dp[i] = max(timeTable[i][1] + dp[i + timeTable[i][0]], dp[i + 1])
print(dp[0])
|
import os
from random import randint
class FileFunctions:
def __init__(self, path):
self.path = path
self.dirs = os.walk(self.path)
def findRandomSong(self):
songs = self.getSongList()
start = 0
end = int(len(songs))-1
try:
semiRandom = randint(start, end)
return songs[semiRandom]
except Exception as e:
raise e
def getSongList(self):
songs=[]
while True:
try:
d = self.dirs.__next__()
for file in d[2]:
songs.append(os.path.join(d[0], file))
except:
break
if len(songs) < 1:
raise Exception('No songs')
return songs
|
import numpy as np
import scipy.stats as st
import pymannkendall as mk
class MannKendall(object):
def __init__(self, min_instances = 30, instances_step = 10, alpha = 0.1, slope_threshold = 0.0, test_type = 'hamed_rao_mod', period = 12):
'''
min_instances = minimum instances to be considered before MK test is applied
instances_step = after minimum instances is reached, frequency MK test is applied --> speeds up test significantly if test is not applied every single instance
>> "1" = test is applied for every instance
>> "10" = test is applied every 10th instance
alpha = Significance level of test
test_type = Type of Test used to perform trend detection:
six different tests available:
- 'original_mk' --> Original MK test: Assumption: No temporal relation in data
- 'hamed_rao_mod' --> Hamed and Rao Modification MK test: Assumption: temporal relation in data (signf. autocorrelation present for lag > 1)
- 'yue_wang_mod --> Yue and Wang Modification MK test: Assumption: temporal relation in data (signf. autocorrelation present for lag > 1)
- 'trend_free_pre_whitening_mod' --> Trend Free Pre Whitening Modification MK test: Assumption: temporal relation in data (signf. autocorrelation present for lag > 1)
- 'pre_whitening_mod' --> Pre Whitening Modification MK test: Assumption: Assumption: temporal relation in data (signf. autocorrelation present for lag > 1)
- 'seasonal', period parameter needed! --> Seasonal MK test: Assumption: temporal relation in data + seasonality
period = sesonality pattern in dataset -> "12" = monthly, "52" = weekly
'''
#initialize parameters:
self.min_instances = min_instances
self.alpha = alpha
self.test_type = test_type
self.period = period
self.instance_memory = []
self.slope_threshold = slope_threshold
self.instances_step = instances_step
self.in_concept_change = False
self.trend = None
self.p_value = None
self.sens_slope = 0.0
self.sample_count = 0
self.instance_count = 0
def reset(self):
'''
reset parameters of change detector
'''
self.in_concept_change = False
self.instance_memory = []
self.trend = None
self.p_value = None
self.sens_slope = 0.0
self.sample_count = 0
self.instance_count = 0
# self.__init__(recent_window = self.recent_window, alpha_w = self.alpha_w, alpha_d = self.alpha_d)
def add_element(self, value):
'''
Add new element to the statistic
'''
#reset parameters if change was detected:
if self.in_concept_change:
self.reset()
#append elements:
self.instance_memory.append(value)
if len(self.instance_memory) == self.min_instances:
self.sample_count = 1
if len(self.instance_memory) > self.min_instances:
self.instance_count += 1
#start drift detection: >> min_instances have to be reached, then always perform test once, after that perform test every i_th instance (instances_step)
if len(self.instance_memory) >= self.min_instances and ((self.instance_count == self.instances_step) or (self.sample_count == 1)):
if self.test_type == 'original_mk':
#call corresponding test from package:
#print('Perform MK test')
results_tuple = mk.original_test(self.instance_memory, self.alpha)
#print('MK test ended')
if self.test_type == 'hamed_rao_mod':
#call corresponding test from package:
results_tuple = mk.hamed_rao_modification_test(self.instance_memory, self.alpha)
if self.test_type == 'yue_wang_mod':
#call corresponding test from package:
results_tuple = mk.yue_wang_modification_test(self.instance_memory, self.alpha)
if self.test_type == 'trend_free_pre_whitening_mod':
#call corresponding test from package:
results_tuple = mk.trend_free_pre_whitening_modification_test(self.instance_memory, self.alpha)
if self.test_type == 'pre_whitening_mod':
#call corresponding test from package:
results_tuple = mk.pre_whitening_modification_test(self.instance_memory, self.alpha)
if self.test_type == 'seasonal':
#call corresponding test from package:
results_tuple = mk.seasonal_test(self.instance_memory, period = self.period, alpha = self.alpha)
#reset counter every time a test was performed:
self.sample_count = 0
self.instance_count = 0
#assign results:
self.p_value = results_tuple[2]
self.sens_slope = results_tuple[-1]
self.trend = results_tuple[0]
if self.p_value < self.alpha and np.abs(self.sens_slope) > self.slope_threshold:
self.in_concept_change = True
else:
self.in_concept_change = False
def detected_change(self):
return self.in_concept_change
def get_test_results(self):
test_results = (self.trend, self.p_value, self.sens_slope)
return test_results
|
from abc import ABCMeta, abstractmethod
from constants import GlobalConstants
class Person(object):
# super class for match observers which contains the 'resultant Action on notification'
__metaclass__ = ABCMeta
MESSAGE = '{0} {1}: {2}'
# 0 - name of the person
# 1 - either 'says' or 'reports' or anything based on subclass verb parameter
# 2 - actual message by the person
def __init__(self, name):
self.name = name
self.verb = ''
@abstractmethod
def print_game_over_message(self, game):
# to be implemented by by the subclasses
pass
@abstractmethod
def print_goal_message(self, team_secured_current_goal):
# to be implemented by by the subclasses
pass
def print_message(self, message):
# prints the final message of the observer
print self.MESSAGE.format(self.name, self.verb, message)
def notify(self, notification_type, notification_data):
# triggers the Action related to the give notification type
if notification_type == GlobalConstants.GOAL_KEYWORD:
self.notify_goal(notification_data)
elif notification_type == GlobalConstants.GAME_OVER_KEYWORD:
self.notify_game_over(notification_data)
def notify_goal(self, team):
# calls the sub-class method
self.print_goal_message(team)
def notify_game_over(self, game):
# calls the sub-class method
self.print_game_over_message(game)
|
from django.contrib.auth import login, logout, authenticate
from django.conf import settings
from django.core.urlresolvers import reverse as url_reverse
from django.core import serializers
from django.db import transaction, IntegrityError
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import ensure_csrf_cookie
import facebook
import json
from coldruins.web.decorators import *
from coldruins.web.models import *
from coldruins.web.fbgraph import *
@process_event_decorator
@ensure_csrf_cookie
def home(request):
user_data = ''
if request.user != None and request.user.is_authenticated():
user_data = '<script>window.userid={}</script>'.format(request.user.id)
if request.user.meta.clan != None:
user_data += '<script>window.userclan={}</script>'.format(request.user.meta.clan.id)
resources = list(request.user.meta.get_resources())
user_data += '<script>window.userresources={}</script>'.format(resources)
user_data += '<script>window.userattackingunits={}</script>'.format(
request.user.meta.get_attacking_troops())
return HttpResponse(
open('coldruins/web/static/index.html', 'rt').read() + user_data)
def _verdict_ok(response):
return {
'verdict': 'ok',
'message': response
}
def _verdict_error(message):
return {
'verdict': 'error',
'message': message
}
@ajax_decorator
def near_location(request, center, distance):
return _get_locations(request, center, distance)
def _get_locations(request, center, distance):
# token = 'BAACEdEose0cBAKaRmOZBE29VpXfFHYgZCsWP2zyw7aoQ8GdZBeYtTMiAdbFitCYZA2FM34xoL7MkZC6cfoFQR0dTUx1sBpZCYnyrScZCyZCN4k2ZAMCo1rdS1sxYJqDYjbeOpPlANc1KEurCDSaSFWEbWHRPvOyHzZAZAPGyuMEzCVUQktFj9FdlDEHV0vGXh11ZA78iMdEuPYZBwyuwKaH6U5Fb2hcNeckuEzm9tBa6SZCWOsxAZDZD'
try:
user_meta = UserMeta.objects.get(user=request.user)
token = user_meta.fb_token
except UserMeta.DoesNotExist:
return _verdict_error('Invalid/missing token')
# center = '51.513855129521,-0.12574267294645'
# distance = 500
places = get_places(token, center, distance)
response = []
for p in places:
try:
loc = Location.objects.get(fb_id=p['id'])
response.append(loc)
continue
except Location.DoesNotExist:
pass
try:
categories = [p['category']]
for c in [cat['name'] for cat in p['category_list']]:
categories.append(c)
category = 4 # sensible default :))
potential_cat = {}
for c in categories:
low = c.lower()
if 'restaurant' in low or \
' bar' in low or \
'bar ' in low or \
'bar' == low:
category = 1
potential_cat = {}
break
elif c in static_categories:
key = static_categories[c]
val = potential_cat.setdefault(key, 0)
potential_cat[key] = val + 1
val_max = 0
# this gets ignored if we found a bar/restaurant
for cat, val in potential_cat.iteritems():
if val > val_max:
category = cat
val_max = val
l = Location(
fb_id = p['id'],
name = p['name'],
lat = p['latitude'],
lon = p['longitude'],
category = category
)
l.save()
response.append(l)
except IntegrityError:
pass
ret = [l.export() for l in response]
return _verdict_ok(ret)
@ajax_decorator
def login_view(request, accessToken, userID, **kwargs):
try:
user = User.objects.get(username=userID)
except User.DoesNotExist:
graph = facebook.GraphAPI(accessToken)
profile = graph.get_object('me')
user = User.objects.create_user(userID, profile['email'], userID)
try:
user.first_name = profile['first_name']
user.last_name = profile['last_name']
except KeyError:
pass
user.save()
try:
user_meta = UserMeta.objects.get(user=user)
except UserMeta.DoesNotExist:
user_meta = UserMeta(user=user, fb_token=accessToken)
user_meta.fb_token = accessToken
user_meta.save()
user = authenticate(username=userID, password=userID)
login(request, user)
return HttpResponseRedirect(url_reverse('home'))
@ajax_decorator
def get_location_data(request, location_id):
troops = Troops.get_troops(location_id)
return troops
@ajax_decorator
def make_troops(request, location_db_id, unit_id, count):
Troops.make_troops(request.user.id, location_db_id, unit_id, count)
@ajax_decorator
def buy_troops(request, unit_id, numbers):
if unit_id not in UNITS:
return _verdict_error('Invalid unit id')
else:
remaining = request.user.meta.buy_troops(unit_id, numbers)
if remaining == None:
return _verdict_error('Not enough resources')
else:
return _verdict_ok({'resources_left': list(remaining)})
@ajax_decorator
def checkin(request, location_id):
response = Checkin.make_checkin(request.user.meta, location_id)
return _verdict_ok(response)
@ajax_decorator
def facepile(request):
token = request.user.meta.fb_token
g = Graph(token, 'me')
return _verdict_ok(g.friends())
@ajax_decorator
def attack(request, location_id):
response = OngoingFight.new_fight(request.user.meta, location_id)
return _verdict_ok(response)
@ajax_decorator
def get_fighting_powers(request, location_id):
response = OngoingFight.get(location=location_id).fighting_powers
return _verdict_ok(response[0])
data_providers = {
'login': login_view,
'near_location': near_location,
'buy_troops': buy_troops,
'make_troops': make_troops,
'get_location_data': get_location_data,
'facepile' : facepile,
'checkin': checkin,
'attack': attack,
'get_fighting_powers': get_fighting_powers,
}
@process_event_decorator
def data_provider(request, action):
if request.is_ajax():
try:
payload = json.loads(request.REQUEST.get('payload', '{}'))
except ValueError:
response = {'verdict': 'error', 'message': 'Invalid payload'}
return HttpResponse(json.dumps(response))
if action in data_providers:
data_provider = data_providers.get(action)
try:
return data_provider(request, **payload)
except TypeError:
# Most likely missing or extra payload arguments
response = {
'verdict': 'error',
'message': 'Invalid payload or 500 internal error (TypeError)'
}
return HttpResponse(json.dumps(response))
else:
response = {'verdict': 'error', 'message': 'Unrecognized action'}
return HttpResponse(json.dumps(response))
else:
raise Http404
|
#!/usr/bin/env python
# coding=utf-8
__pkgname__ = __name__
__description__ = "A subprocess application based on Tk."
__version__ = "0.1"
|
def triplet_sum_zero(arr):
arr[:] = list(set(arr))
arr.sort()
triplets = []
for i in range(len(arr)):
current_no = arr[i]
left = i + 1
right = len(arr) - 1
while(left < right):
req_sum = arr[left] + arr[right]
if req_sum == -current_no:
triplets.append([current_no, arr[left], arr[right]])
left += 1
right -= 1
continue
if req_sum < -current_no:
left += 1
else:
right -= 1
print(triplets)
def main():
triplet_sum_zero([-3, 0, 1, 2, -1, 1, -2])
triplet_sum_zero([-5, 2, -1, -2, 3])
triplet_sum_zero([-1,0,1,2,-1,-4])
if __name__ == '__main__':
main()
|
from django.conf.urls import url
from rest_framework_nested import routers
from rest_framework_jwt import views as jwt_views
from .core import views as core_views
from .recipe import views as recipe_views
router = routers.SimpleRouter(trailing_slash=False)
router.register(r'recipes', recipe_views.RecipeViewset)
router.register(r'users', core_views.UserViewset)
recipe_routers = routers.NestedSimpleRouter(router, 'recipes', lookup='recipe',
trailing_slash=False)
recipe_routers.register(r'steps', recipe_views.StepViewset,
base_name='recipe-steps')
recipe_routers.register(r'ingredients', recipe_views.IngredientViewset,
base_name='recipe-ingredients')
auth_urls = [
url(r'^auth$', jwt_views.obtain_jwt_token, name='auth'),
url(r'^auth/refresh$', jwt_views.refresh_jwt_token, name='auth-refresh'),
url(r'^auth/verify$', jwt_views.verify_jwt_token, name='auth-verify'),
]
urlpatterns = router.urls + recipe_routers.urls + auth_urls
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from data import *
class Column_Type(Data_Type):
def __init__(self):
Data_Type.__init__(self)
self.name = 'column'
def copy(self, cell):
new = Column()
new.data_type = self
new.flags = cell.flags.copy()
new.is_null = cell.is_null
for key,value in cell.rows.iteritems():
new.rows[key] = value.copy()
return new
def key_form(self, cell):
return tuple(cell.rows.values())
column_type = Column_Type()
class Column(Cell):
def __init__(self):
self.rows = { }
self.value = self.rows
self.data_type = column_type
def get_row(self, row):
if row in self.rows:
return self.rows[row]
else:
return no_data('row not present')
|
from flask import Blueprint, jsonify, request
from geo_types import GeoSwNe
from geo_hash import GeoHash
from geo_clustering.geo_clustering import GeoClustering
from geo_elasticsearch import GeoElasticSearch
from geo_hash_precision_finder import GeoHashPrecisionFinder
from utils import enter, exit, debug
from range import Range
import sys
import json
import requests
rest_map_blueprint = Blueprint('rest_map', __name__)
es = None
def init_es(elasticsearch):
global es
es = elasticsearch
@rest_map_blueprint.route('/rest/search', methods=['GET'])
def search():
place = request.args['place']
return es.search(place)
@rest_map_blueprint.route('/rest/map', methods=['GET'])
def get_map():
indent = 0
swLatitude = float(request.args['swLatitude'])
swLongitude = float(request.args['swLongitude'])
neLatitude = float(request.args['neLatitude'])
neLongitude = float(request.args['neLongitude'])
markerDiameterKM = float(request.args['markerDiameterKM'])
if request.args.has_key('operators'):
operators = request.args['operators'].split(",")
else:
operators = []
if request.args.has_key('minKw'):
min_kw = float(request.args['minKw'])
else:
min_kw = -1
if request.args.has_key('maxKw'):
max_kw = float(request.args['maxKw'])
else:
max_kw = -1
if min_kw >= 0 and max_kw >= 0:
kw_range = Range(min_kw, max_kw)
else:
kw_range = None
if request.args.has_key('connectionTypes'):
connection_types = request.args['connectionTypes'].split(",")
else:
connection_types = []
'''
print('get_map(swLatitude=' + str(swLatitude) + ', swLongitude=' + str(swLongitude) +
', neLatitude=' + str(neLatitude) + ', neLongitude=' + str(neLongitude) +
', markerDiameterKM=' + str(markerDiameterKM) + ')')
'''
geo_sw_ne = GeoSwNe(swLatitude, swLongitude, neLatitude,
neLongitude)
result = get_map_params(indent + 1, geo_sw_ne, markerDiameterKM, operators, kw_range, connection_types)
exit(indent, 'get_map', '')
return result
def get_map_params(indent, geo_sw_ne, marker_diameter_km, operators, kw_range, connection_types):
enter(indent, 'get_map_params', '')
geo_bounds = geo_sw_ne.to_geo_bounds()
cur_geohash_precision = GeoHashPrecisionFinder.find_geohash_bits_from_width_geo_bounds_kms(
geo_bounds
)
debug(indent, 'get_map_params', 'got precision ' + str(cur_geohash_precision) +
' for ' + str(geo_bounds.width))
es_result = es.aggregate_search_with_filter(cur_geohash_precision, geo_bounds, operators, kw_range, connection_types)
# Aggregate all points
geo_clustering = GeoClustering()
points = geo_clustering.compute_clusters(
indent + 1,
geo_sw_ne,
marker_diameter_km,
es_result.geo_hash_to_count)
debug(indent, 'get_map_params', 'after clustering call')
result_points = []
if points == None:
debug(indent, 'get_map_params', 'no results for bounds')
else:
debug(indent, 'get_map_params', 'found ' + str(len(points)) + ' points')
for point in points:
geo_point = point.get_point()
item = {
"latitude": geo_point.latitude,
"longitude": geo_point.longitude,
"count": point.count
}
result_points.append(item)
result = {
"points": result_points,
"operators": es_result.operator_to_count,
"kw_min_max": {
"min": es_result.kw_min_max.min,
"max": es_result.kw_min_max.max
},
"connection_types": es_result.connection_type_to_count
}
exit(indent, 'get_map_params', str(len(result_points)))
return jsonify(result)
@rest_map_blueprint.route('/rest/reference_data', methods=['GET'])
def get_reference_data():
url = 'https://api.openchargemap.io/v3/referencedata/'
response = requests.get(url)
return response.content
if __name__ == '__main__':
get_map_params(
0,
GeoSwNe(swLatitude=-5.96575367107,
swLongitude=-60.46875,
neLatitude=77.4660284769,
neLongitude=60.0),
markerDiameterKM=726.277934253574)
|
from application import db
from application.models import Base
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy.sql import text
class User(Base):
__tablename__ = "account"
name = db.Column(db.String(144), nullable=False)
username = db.Column(db.String(144), nullable=False)
password_hash = db.Column(db.String(144), nullable=False)
tasklists = db.relationship("Tasklist", backref='account', lazy=True)
#tasks = db.relationship("Task", backref='account', lazy=True)
def __init__(self, name, username, password):
self.name = name
self.username = username
self.password_hash = generate_password_hash(password)
def get_id(self):
return self.id
def is_active(self):
return True
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
@staticmethod
def find_users_with_no_tasks(done=False):
stmt = text("SELECT Account.username FROM Account"
" LEFT JOIN Tasklist ON Tasklist.account_id = Account.id"
" LEFT JOIN Task ON Task.tasklist_id = Tasklist.id"
" WHERE (Task.done IS null OR Task.done = :done)"
" GROUP BY Account.id").params(done=done)
res = db.engine.execute(stmt)
response = []
for row in res:
response.append({"username":row[0]})
return response
|
from django.conf.urls import url, include
from django.urls import path
from . import views
from rest_framework import routers, serializers, viewsets
router = routers.DefaultRouter()
router.register('getDataReviews', views.getDataReviews, basename='review')
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^home/$', views.index, name='home'),
url(r'^sentimen/$', views.sentimen, name='sentimen'),
url(r'^sentimen_manual/$', views.sentimen_manual, name='sentimen_manual'),
path('getData/', include(router.urls))
]
|
import pygame
import sys
import time
import random
import breakout
from pygame.locals import *
#variables
score1=0
score2=0
WHITE = (255, 255, 255)
RED=(255,0,0)
BLUE=(0,0,255)
BLACK=(0,0,0)
GREEN=(0,255,0)
ball_radius=25
paddle1_y=225
paddle2_y=225
paddle1_x=12
paddle2_x=868
xdirection="forward"
ydirection="upward"
x=450
y=300
first_x=450
first_y=300
running=False
screen_width=900
screen_height=600
x_velocity=random.randint(1,2)
y_velocity=random.randint(1,2)
#SCREEN
pygame.init()
pygame.display.set_caption("paddles game")
screen = pygame.display.set_mode((screen_width,screen_height))
screen.fill(BLACK)
#FUNCTIONS
def draw_rectangle(x, y, width, height, color):
pygame.draw.rect(screen, color, (x, y, width, height))
def draw_circle(x, y, radius, color):
pygame.draw.circle(screen, color, (int(x), int(y)), radius)
ball=draw_circle(x,y,ball_radius,GREEN)
def result():
myfont = pygame.font.SysFont("monospace", 100)
label = myfont.render(str(score1),1,BLUE)
screen.blit(label, (20, 30))
myfont = pygame.font.SysFont("monospace", 100)
label = myfont.render(str(score2),1,RED)
screen.blit(label, (830, 30))
def next_round():
pass
#if player 1 or player 2 score ==3
def return_ball(x,y,running):
if x >= screen_width+ball_radius :
x = first_x
y = first_y
running = False
draw_circle(first_x,first_y,ball_radius,GREEN)
if x <= 0-ball_radius:
x = first_x
y = first_y
running=False
draw_circle(first_x,first_y,ball_radius,GREEN)
return x,y,running
#if player 1 or player 2 scored return ball to the middle
while True:
keys = pygame.key.get_pressed()
screen.fill(BLACK)
#PADDLE1
paddle1=draw_rectangle(paddle1_x,paddle1_y,20,150,BLUE)
#PADDLE2
paddle2=draw_rectangle(paddle2_x,paddle2_y,20,150,RED)
#BALL
if keys[pygame.K_o]:
running=True
if keys[pygame.K_i]:
running=False
if running==True:
draw_circle(x,y,ball_radius,GREEN)
if (ydirection=="upward"):
y=y-y_velocity
if (y<=ball_radius):
ydirection="downward"
if (ydirection=="downward"):
y=y+y_velocity
if (y>=screen_height-ball_radius):
ydirection="upward"
if (xdirection=="forward"):
x=x+x_velocity
if y>=paddle2_y and y<=paddle2_y+150 and x==paddle2_x:
xdirection="backward"
y_velocity = random.randint(1,2)
x_velocity = random.randint(1,2)
if (xdirection=="backward"):
x=x-x_velocity
if y>=paddle1_y and y<=paddle1_y+150 and x==paddle1_x+20:
xdirection="forward"
y_velocity = random.randint(1,2)
x_velocity = random.randint(1,2)
x,y,running=return_ball(x,y,running)
#paddle1 bouncing
if x== screen_width:
score1+=1
if x== 0:
score2+=1
if score1 ==5:
screen.fill(BLUE)
if score2 == 5:
screen.fil(RED)
draw_circle(x,y,ball_radius,GREEN)
if keys[pygame.K_w]:
if paddle1_y >= 10:
paddle1_y -=2
if keys[pygame.K_s]:
if paddle1_y <= 440:
paddle1_y +=2
if keys[pygame.K_UP]:
if paddle2_y >= 10:
paddle2_y -=2
if keys[pygame.K_DOWN]:
if paddle2_y <= 440:
paddle2_y +=2
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
#### Respond to events
#### Make animations
#### Update display
result()
pygame.display.update()
time.sleep(0.001)
|
import requests
import numpy as np
import scipy as sp
import sys
import platform
import pandas as pd
from time import time
from operator import itemgetter
from sklearn.cross_validation import StratifiedShuffleSplit, KFold, StratifiedKFold
from sklearn.ensemble import RandomForestClassifier ,ExtraTreesClassifier,AdaBoostClassifier, BaggingClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.naive_bayes import *
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
import re
import random
import warnings
from math import sqrt, exp, log
from csv import DictReader
from sklearn.preprocessing import Imputer
from sklearn.metrics import log_loss
from sklearn.grid_search import GridSearchCV , RandomizedSearchCV, ParameterSampler
from sklearn.ensemble import RandomForestRegressor
from scipy.stats import randint as sp_randint
from sklearn import decomposition, pipeline, metrics
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn import preprocessing
from sklearn.utils import shuffle
from sklearn.metrics import roc_auc_score,roc_curve,auc
import collections
import ast
from sklearn.neighbors import KNeighborsRegressor,RadiusNeighborsRegressor
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet, SGDRegressor, LogisticRegression, \
Perceptron,RidgeCV, TheilSenRegressor
from datetime import date,timedelta as td,datetime as dt
import datetime
from sklearn.feature_selection import SelectKBest,SelectPercentile, f_classif, GenericUnivariateSelect
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC, SVC
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.lda import LDA
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from collections import defaultdict
from sklearn.preprocessing import OneHotEncoder
sys.path.append('C:\\Python34\\Lib\\site-packages\\xgboost')
import xgboost as xgb
# from lasagne.layers import InputLayer, DropoutLayer, DenseLayer, ReshapeLayer,LSTMLayer,RecurrentLayer
# from lasagne.updates import nesterov_momentum,adagrad
# from lasagne.objectives import binary_crossentropy, categorical_crossentropy
# from nolearn.lasagne import NeuralNet
# import theano
# from theano import tensor as T
# from theano.tensor.nnet import sigmoid
# from lasagne import layers
# from lasagne.nonlinearities import softmax, rectify
# from lasagne.updates import nesterov_momentum,sgd,adagrad,adadelta,rmsprop
# from lasagne import nonlinearities as nl
# from nolearn.lasagne import BatchIterator
# from lasagne.regularization import *
########################################################################################################################
#Walmart Recruiting: Trip Type Classification
########################################################################################################################
#--------------------------------------------Algorithm : Random Forest :------------------------------------------------
#Random Forest :
#--------------------------------------------Algorithm : XGB------------------------------------------------------------
#XGB :
#--------------------------------------------Suggestions, Ideas---------------------------------------------------------
#Suggestions, Ideas
#--------------------------------------------with only 7K records-------------------------------------------------------
# RF : 0.7410 - 7414 (with 7k)
########################################################################################################################
#Class AdjustVariable for NN
########################################################################################################################
class AdjustVariable(object):
def __init__(self, name, start=0.03, stop=0.001):
self.name = name
self.start, self.stop = start, stop
self.ls = None
def __call__(self, nn, train_history):
if self.ls is None:
self.ls = np.linspace(self.start, self.stop, nn.max_epochs)
epoch = train_history[-1]['epoch']
new_value = float32(self.ls[epoch - 1])
getattr(nn, self.name).set_value(new_value)
########################################################################################################################
#Class EarlyStopping for NN
########################################################################################################################
class EarlyStopping(object):
def __init__(self, patience=100):
self.patience = patience
self.best_valid = np.inf
self.best_valid_epoch = 0
self.best_weights = None
def __call__(self, nn, train_history):
current_valid = train_history[-1]['valid_loss']
current_epoch = train_history[-1]['epoch']
if current_valid < self.best_valid:
self.best_valid = current_valid
self.best_valid_epoch = current_epoch
self.best_weights = nn.get_all_params_values()
elif self.best_valid_epoch + self.patience < current_epoch:
print("Early stopping.")
print("Best valid loss was {:.6f} at epoch {}.".format(
self.best_valid, self.best_valid_epoch))
nn.load_params_from(self.best_weights)
raise StopIteration()
#########################################################################################################################
def float32(k):
return np.cast['float32'](k)
#########################################################################################################################
#Build Basic Neural Network Model
########################################################################################################################
def build_mlp(input_num_inputs, output_num_units):
print("***************Starting NN1 Classifier***************")
#Define Model parms - 2 hidden layers
clf = NeuralNet(
layers=[
('input', InputLayer),
('dropout0', DropoutLayer),
('hidden1', DenseLayer),
('dropout1', DropoutLayer),
('hidden2', DenseLayer),
('dropout2', DropoutLayer),
('hidden3', DenseLayer),
('dropout3', DropoutLayer),
('output', DenseLayer)
],
##-------------------------------------------------------------------------------------------------------------##
#Input (Input Layer) , Hidden and Output (Dense Layers) parameters
# Layers:- http://lasagne.readthedocs.org/en/latest/modules/layers.html
##### Network input #####
# Input Layer - This layer holds a symbolic variable that represents a network input.
##### Dense Layer #####
# DenseLayer - A fully connected layer.
# NINLayer - Network-in-network layer.
##### Noise layer #####
# DropoutLayer - Dropout layer.
# dropout - alias of DropoutLayer
# GaussianNoiseLayer - Gaussian noise layer.
##-------------------------------------------------------------------------------------------------------------##
# nonlinearity - Non-linear activation functions for artificial neurons.
# http://lasagne.readthedocs.org/en/latest/modules/nonlinearities.html
# sigmoid(x) - Sigmoid activation function (for binary classification)
# softmax(x) - Softmax activation function (for multi class classification)
# tanh(x) - Tanh activation function
# ScaledTanH - Scaled Tanh activation function
# rectify(x) - Rectify activation function max(0,z) -- (ReLU - ln(1 + e exp(x) )
# LeakyRectify([leakiness] - Leaky rectifier
# leaky_rectify(x) - Instance of LeakyRectify with leakines
# very_leaky_rectify(x) - Instance of LeakyRectify with leakiness
# elu(x) - Exponential Linear Unit ( e exp(x) - 1)
# softplus(x) - Softplus activation function log(1 + e exp(x)
# linear(x) - Linear activation function f(x)=x
# identity(x) - Linear activation function f(x)=x
# x = The activation (the summed, weighted input of a neuron)
# Default non-linearity is "linear"
##-------------------------------------------------------------------------------------------------------------##
input_shape=(None, input_num_inputs),
dropout0_p=0.15,
hidden1_num_units=500,
hidden1_nonlinearity=nl.sigmoid,
dropout1_p=0.20,
hidden2_num_units=500,
hidden2_nonlinearity=nl.sigmoid,
dropout2_p=0.20,
hidden3_num_units=500,
hidden3_nonlinearity=nl.sigmoid,
dropout3_p=0.20,
output_nonlinearity=softmax,
output_num_units=output_num_units,
# optimization method:
##-------------------------------------------------------------------------------------------------------------##
#Create update expressions for training, i.e., how to modify the parameters at each training step
# http://lasagne.readthedocs.org/en/latest/modules/updates.html
# sgd - Stochastic Gradient Descent (SGD) updates
# momentum - Stochastic Gradient Descent (SGD) updates with momentum
# nesterov_momentum - Stochastic Gradient Descent (SGD) updates with Nesterov momentum
# adagrad - Adagrad updates
# rmsprop - RMSProp updates
# adadelta - Adadelta updates
# adam - Adam updates
##-------------------------------------------------------------------------------------------------------------##
update=adagrad,
#update=nesterov_momentum,
update_learning_rate=theano.shared(float32(0.01)),
#update_momentum=theano.shared(float32(0.9)),
##-------------------------------------------------------------------------------------------------------------##
# Used for building loss expressions for training or validating a neural network.
# http://lasagne.readthedocs.org/en/latest/modules/objectives.html
# binary_crossentropy - Computes log loss for binary classification
# categorical_crossentropy - Computes the log loss for multi-class classification probs and softmax output units
# squared_error - Computes the element-wise squared difference between two tensors (regression)
# binary_hinge_loss - Computes the binary hinge loss between predictions and targets.
# multiclass_hinge_loss - Computes the multi-class hinge loss between predictions and targets.
# Deaflt - squared_error if regression else categorical_crossentropy
##-------------------------------------------------------------------------------------------------------------##
objective_loss_function = categorical_crossentropy,
##-------------------------------------------------------------------------------------------------------------##
max_epochs=500,
eval_size=0.2,
#train_split=TrainSplit(eval_size=0.2),
regression=False,
verbose=1,
##-------------------------------------------------------------------------------------------------------------##
## If label encoding is needed while clf.fit() ...label is already encoded in our case
use_label_encoder=False,
## batch_iterator_train default is 128
batch_iterator_train=BatchIterator(batch_size=128),
batch_iterator_test=BatchIterator(batch_size=128),
on_epoch_finished=[
AdjustVariable('update_learning_rate', start=0.01, stop=0.0001),
#AdjustVariable('update_momentum', start=0.9, stop=0.999),
EarlyStopping(patience=25)
]
##-------------------------------------------------------------------------------------------------------------##
)
return clf
#########################################################################################################################
#Build Basic Neural Network Model
########################################################################################################################
def build_rnn(input_num_inputs, output_num_units):
print("***************Starting NN1 Classifier***************")
#Define Model parms - 2 hidden layers
clf = NeuralNet(
layers=[
('input', InputLayer),
('lstm1', LSTMLayer),
('rshp1', ReshapeLayer),
('hidden1', DenseLayer),
('output', DenseLayer)
],
##-------------------------------------------------------------------------------------------------------------##
#Input (Input Layer) , Hidden and Output (Dense Layers) parameters
# Layers:- http://lasagne.readthedocs.org/en/latest/modules/layers.html
##### Network input #####
# Input Layer - This layer holds a symbolic variable that represents a network input.
##### Dense Layer #####
# DenseLayer - A fully connected layer.
# NINLayer - Network-in-network layer.
##### Noise layer #####
# DropoutLayer - Dropout layer.
# dropout - alias of DropoutLayer
# GaussianNoiseLayer - Gaussian noise layer.
##-------------------------------------------------------------------------------------------------------------##
# nonlinearity - Non-linear activation functions for artificial neurons.
# http://lasagne.readthedocs.org/en/latest/modules/nonlinearities.html
# sigmoid(x) - Sigmoid activation function (for binary classification)
# softmax(x) - Softmax activation function (for multi class classification)
# tanh(x) - Tanh activation function
# ScaledTanH - Scaled Tanh activation function
# rectify(x) - Rectify activation function max(0,z) -- (ReLU - ln(1 + e exp(x) )
# LeakyRectify([leakiness] - Leaky rectifier
# leaky_rectify(x) - Instance of LeakyRectify with leakines
# very_leaky_rectify(x) - Instance of LeakyRectify with leakiness
# elu(x) - Exponential Linear Unit ( e exp(x) - 1)
# softplus(x) - Softplus activation function log(1 + e exp(x)
# linear(x) - Linear activation function f(x)=x
# identity(x) - Linear activation function f(x)=x
# x = The activation (the summed, weighted input of a neuron)
# Default non-linearity is "linear"
##-------------------------------------------------------------------------------------------------------------##
#Shape input shape(343) * 512 * 37 * 37
input_shape=(None, input_num_inputs),
#batchsize, seqlen, _ = input_input_var.shape,
lstm1_num_units = 512,
lstm1_nonlinearity=nl.sigmoid,
rshp1_shape = (-1, 512),
hidden1_num_units=output_num_units,
hidden1_nonlinearity=nl.sigmoid,
output_nonlinearity=softmax,
output_num_units=output_num_units,
# optimization method:
##-------------------------------------------------------------------------------------------------------------##
#Create update expressions for training, i.e., how to modify the parameters at each training step
# http://lasagne.readthedocs.org/en/latest/modules/updates.html
# sgd - Stochastic Gradient Descent (SGD) updates
# momentum - Stochastic Gradient Descent (SGD) updates with momentum
# nesterov_momentum - Stochastic Gradient Descent (SGD) updates with Nesterov momentum
# adagrad - Adagrad updates
# rmsprop - RMSProp updates
# adadelta - Adadelta updates
# adam - Adam updates
##-------------------------------------------------------------------------------------------------------------##
update=adagrad,
#update=sgd,
update_learning_rate=0.01,
#update_momentum=0.9,
##-------------------------------------------------------------------------------------------------------------##
# Used for building loss expressions for training or validating a neural network.
# http://lasagne.readthedocs.org/en/latest/modules/objectives.html
# binary_crossentropy - Computes log loss for binary classification
# categorical_crossentropy - Computes the log loss for multi-class classification probs and softmax output units
# squared_error - Computes the element-wise squared difference between two tensors (regression)
# binary_hinge_loss - Computes the binary hinge loss between predictions and targets.
# multiclass_hinge_loss - Computes the multi-class hinge loss between predictions and targets.
# Deaflt - squared_error if regression else categorical_crossentropy
##-------------------------------------------------------------------------------------------------------------##
objective_loss_function = categorical_crossentropy,
##-------------------------------------------------------------------------------------------------------------##
max_epochs=50,
eval_size=0.2,
#train_split=TrainSplit(eval_size=0.2),
regression=False,
verbose=1,
##-------------------------------------------------------------------------------------------------------------##
## If label encoding is needed while clf.fit() ...label is already encoded in our case
use_label_encoder=False,
## batch_iterator_train default is 128
batch_iterator_train=BatchIterator(batch_size=128),
batch_iterator_test=BatchIterator(batch_size=128),
# on_epoch_finished=[
# AdjustVariable('update_learning_rate', start=0.01, stop=0.0001),
# AdjustVariable('update_momentum', start=0.9, stop=0.999),
# EarlyStopping(patience=10)
# ]
##-------------------------------------------------------------------------------------------------------------##
)
return clf
########################################################################################################################
#Utility function to report best scores
########################################################################################################################
def report(grid_scores, n_top):
cols_key = []
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
if( i < 5):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
dict1 = collections.OrderedDict(sorted(score.parameters.items()))
if i==0:
for key in dict1.keys():
cols_key.append(key)
cols_key.append('CV')
Parms_DF = pd.DataFrame(columns=cols_key)
cols_val = []
for key in dict1.keys():
cols_val.append(dict1[key])
cols_val.append(score.mean_validation_score)
Parms_DF.loc[i] = cols_val
return Parms_DF
########################################################################################################################
#multiclass_log_loss
########################################################################################################################
def multiclass_log_loss(y_true, y_pred):
return log_loss(y_true,y_pred, eps=1e-15, normalize=True )
########################################################################################################################
#Cross Validation and model fitting
########################################################################################################################
def Nfold_Cross_Valid(X, y, clf):
print("***************Starting Kfold Cross validation***************")
X =np.array(X)
scores=[]
# lbl = preprocessing.LabelEncoder()
# lbl.fit(list(y))
# y = lbl.transform(y)
ss = StratifiedShuffleSplit(y, n_iter=5,test_size=0.2)
#ss = KFold(len(y), n_folds=5,shuffle=False,indices=None)
i = 1
for trainCV, testCV in ss:
X_train, X_test= X[trainCV], X[testCV]
y_train, y_test= y[trainCV], y[testCV]
#clf.fit(X_train, y_train, early_stopping_rounds=25, eval_metric="mlogloss",eval_set=[(X_test, y_test)])
clf.fit(X_train, y_train)
y_pred=clf.predict_proba(X_test)
scores.append(log_loss(y_test,y_pred, eps=1e-15, normalize=True ))
print(" %d-iteration... %s " % (i,scores))
i = i + 1
#Average ROC from cross validation
scores=np.array(scores)
print ("Normal CV Score:",np.mean(scores))
print("***************Ending Kfold Cross validation***************")
return scores
########################################################################################################################
#Data cleansing , feature scalinng , splitting
########################################################################################################################
def Data_Munging(Train_DS,Actual_DS):
print("***************Starting Data cleansing***************")
global Train_DS1
y = Train_DS.TripType.values
lbl = preprocessing.LabelEncoder()
lbl.fit(list(y))
y = lbl.transform(y)
Train_DS = Train_DS.drop(['TripType'], axis = 1)
##----------------------------------------------------------------------------------------------------------------##
Train_DS['Weektype'] = np.where(np.logical_or(Train_DS['Weekday']=='Saturday' ,Train_DS['Weekday']=='Sunday' ), 1,2)
#Label Encode Weekday
lbl = preprocessing.LabelEncoder()
lbl.fit((list(Train_DS['Weekday'].astype(str)) + list(Actual_DS['Weekday'].astype(str))))
Train_DS['Weekday'] = lbl.transform(Train_DS['Weekday'].astype(str))
Actual_DS['Weekday'] = lbl.transform(Actual_DS['Weekday'].astype(str))
#weekday one hot encoding
print("weekday one hot encoding")
New_DS = pd.concat([Train_DS, Actual_DS])
dummies = pd.get_dummies(New_DS['Weekday'])
cols_new = [ 'Weekday'+"_"+str(s) for s in list(dummies.columns)]
New_DS[cols_new] = dummies
Train_DS = New_DS.head(len(Train_DS))
Actual_DS = New_DS.tail(len(Actual_DS))
##----------------------------------------------------------------------------------------------------------------##
#Merge HighLow contrib
print(np.shape(Train_DS))
print(np.shape(Actual_DS))
Train_DS = Train_DS.merge(HLContrib_DS,on='VisitNumber',how='left')
Actual_DS = Actual_DS.merge(HLContrib_DS,on='VisitNumber',how='left')
# Train_DS = Train_DS.merge(Autoencoder_DS,on='VisitNumber',how='left')
# Actual_DS = Actual_DS.merge(Autoencoder_DS,on='VisitNumber',how='left')
# newcols = list(HLContrib_DS_2.ix[:,'DD_buy1_0':'DD_ret1_68'].columns)
# newcols.extend(['VisitNumber'])
# Train_DS = Train_DS.merge(HLContrib_DS_2[newcols],on='VisitNumber',how='left')
# Actual_DS = Actual_DS.merge(HLContrib_DS_2[newcols],on='VisitNumber',how='left')
##----------------------------------------------------------------------------------------------------------------##
#Deleting any features during testing
#ifyou want to delete main Fn
test = Train_DS.head()
test = test.ix[:,'FN_0':'FN_9999'].columns
Train_DS = Train_DS.drop(test, axis = 1)
Actual_DS = Actual_DS.drop(test, axis = 1)
#ifyou want to delete 1000 Fn
test = Train_DS.head()
test = test.ix[:,'FinelineNumber_1000_1.0':'FinelineNumber_1000_9998.0'].columns
Train_DS = Train_DS.drop(test, axis = 1)
Actual_DS = Actual_DS.drop(test, axis = 1)
#ifyou want to delete 1000 Upc
test = Train_DS.head()
test = test.ix[:,'Upc_1000_3082.0':'Upc_1000_775014200016.0'].columns
Train_DS = Train_DS.drop(test, axis = 1)
Actual_DS = Actual_DS.drop(test, axis = 1)
#Delete only if DD with similarity matrix included
test = Train_DS.head()
test = test.ix[:,'DD_buy_0':'DD_ret_WIRELESS'].columns
Train_DS = Train_DS.drop(test, axis = 1)
Actual_DS = Actual_DS.drop(test, axis = 1)
print(np.shape(Train_DS))
print(np.shape(Actual_DS))
##----------------------------------------------------------------------------------------------------------------##
print("Any scaling , log transformations")
Actual_DS = Actual_DS.sort(columns='VisitNumber',ascending=True)
Train_DS, y = shuffle(Train_DS, y)
Train_DS = Train_DS.drop(['VisitNumber'], axis = 1)
Actual_DS = Actual_DS.drop(['VisitNumber'], axis = 1)
Train_DS = Train_DS.replace([np.inf, -np.inf], np.nan)
Actual_DS = Actual_DS.replace([np.inf, -np.inf], np.nan)
Train_DS = Train_DS.fillna(0)
Actual_DS = Actual_DS.fillna(0)
Train_DS = np.array(np.log(100+ Train_DS))
Actual_DS = np.array(np.log(100+ Actual_DS))
#Setting Standard scaler for data
stdScaler = StandardScaler(with_mean=True, with_std=True)
stdScaler.fit(Train_DS,y)
Train_DS = stdScaler.transform(Train_DS)
Actual_DS = stdScaler.transform(Actual_DS)
# Train_DS = np.array(Train_DS)
# Actual_DS = np.array(Actual_DS)
#Use PCA for feature extraction
# pca = PCA(n_components=500)
# pca.fit(Train_DS,y )
# Train_DS = pca.transform(Train_DS)
# Actual_DS = pca.transform(Actual_DS)
print(np.shape(Train_DS))
print(np.shape(Actual_DS))
# pd.DataFrame(Train_DS).to_csv(file_path+'Train_DS_50000.csv')
# sys.exit(0)
print("***************Ending Data cleansing***************")
return Train_DS, Actual_DS, y
########################################################################################################################
#Random Forest Classifier (around 80%)
########################################################################################################################
def RFC_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid):
print("***************Starting Random Forest Classifier***************")
t0 = time()
if Grid:
#used for checking the best performance for the model using hyper parameters
print("Starting model fit with Grid Search")
# specify parameters and distributions to sample from
param_dist = {
"criterion":['gini', 'entropy'],
"max_depth": [1, 2, 3, 4, 5,6,7,8,9,10,11,12,13,14,15, None],
"max_features": [1, 2, 3, 4, 5,6,7,8,9,10,11,12,13,14,15, None,'auto','log2'],
"min_samples_split": sp_randint(1, 50),
"min_samples_leaf": sp_randint(1, 50),
"bootstrap": [True],
"oob_score": [True, False]
}
clf = RandomForestClassifier(n_estimators=100,n_jobs=-1)
# run randomized search
n_iter_search = 3000
clf = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search, scoring = 'roc_auc',cv=5)
start = time()
clf.fit(Train_DS, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(clf.grid_scores_)
print("Best estimator found by grid search:")
print(clf.best_estimator_)
else:
#CV: 1.505327 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using eucledean for DD)
clf = RandomForestClassifier(n_jobs=-1, n_estimators=100, min_samples_split=1,max_features='auto',bootstrap=True,
max_depth = 8, min_samples_leaf = 4,oob_score=True,criterion='entropy')
#CV: 1.995509 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using eucledean for DD)
clf = RandomForestClassifier(n_jobs=-1, n_estimators=100)
Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)
sys.exit(0)
#clf = RandomForestClassifier(n_jobs=-1, n_estimators=2000)
#clf = CalibratedClassifierCV(base_estimator=clf, method='sigmoid')
clf.fit(Train_DS, y)
# #
# feature = pd.DataFrame()
# feature['imp'] = clf.feature_importances_
# feature['col'] = Train_DS1.columns
# feature = feature.sort(['imp'], ascending=False).reset_index(drop=True)
# print(feature)
# pd.DataFrame(feature).to_csv(file_path+'feature_imp.csv')
#Predict actual model
pred_Actual = clf.predict_proba(Actual_DS)
print("Actual Model predicted")
#Get the predictions for actual data set
preds = pd.DataFrame(pred_Actual, index=Sample_DS.VisitNumber.values, columns=Sample_DS.columns[1:])
preds.to_csv(file_path+'output/Submission_Roshan_rfc_1.csv', index_label='VisitNumber')
print("***************Ending Random Forest Classifier***************")
return pred_Actual
########################################################################################################################
#XGB_Classifier
########################################################################################################################
def XGB_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid):
print("***************Starting XGB Classifier***************")
t0 = time()
if Grid:
#used for checking the best performance for the model using hyper parameters
print("Starting model fit with Grid Search")
param_grid = {'n_estimators': [25],
'max_depth': [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,19,20,40,80,100,200],
'min_child_weight': [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,20,40,80,100],
'subsample': [0.1,0.2,0.3,0.4,0.5,0.6, 0.7,0.8, 0.9,1],
'colsample_bytree': [0.1,0.2,0.3,0.4,0.5,0.6, 0.7,0.8, 0.9,1],
'silent':[True],
'gamma':[2,1,0.1,0.2,0.3,0.4,0.5,0.6, 0.7,0.8, 0.9]
}
#run randomized search
n_iter_search = 800
clf = xgb.XGBClassifier(nthread=8)
clf = RandomizedSearchCV(clf, param_distributions=param_grid,
n_iter=n_iter_search, scoring = 'log_loss',cv=3)
start = time()
clf.fit(np.array(Train_DS), np.array(y))
print("GridSearchCV completed")
Parms_DS_Out = report(clf.grid_scores_,n_top=n_iter_search)
Parms_DS_Out.to_csv(file_path+'Parms_DS_XGB_4.csv')
print("Best estimator found by grid search:")
print(clf.best_estimator_)
sys.exit(0)
else:
##----------------------------------------------------------------------------------------------------------------##
#best from grid Search, best n_est=175
#CV:0.936880 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using eucledean for DD)*** current best
clf = xgb.XGBClassifier(n_estimators=100,max_depth=100,learning_rate=0.1,nthread=8,min_child_weight=1,
subsample=0.6,colsample_bytree=0.9,silent=True, gamma = 2 )
##----------------------------------------------------------------------------------------------------------------##
#CV: 0.955185 , 20 K , n_estimators =100 , features = 343 (without FN and Upc)
#CV: 0.935217 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using eucledean for DD)
#CV: 0.927019 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using cos_sim for DD) *****not used ovefitting
#CV: 0.922370 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using eucl + cos_sim for DD) *****not used ovefitting
##................................................................................................................##
#CV: 0.942477 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using eucledean for DD)
#clf = xgb.XGBClassifier(n_estimators=100,nthread=8)
##----------------------------------------------------------------------------------------------------------------##
Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)
sys.exit(0)
X_train = np.array(Train_DS)
Y_train = np.array(y)
clf.fit(X_train, Y_train)
X_Actual = np.array(Actual_DS)
#Predict actual model
pred_Actual = clf.predict_proba(X_Actual)
print("Actual Model predicted")
#Get the predictions for actual data set
preds = pd.DataFrame(pred_Actual, index=Sample_DS.VisitNumber.values, columns=Sample_DS.columns[1:])
preds.to_csv(file_path+'output/Submission_Roshan_xgb_6_withFNnumber.csv', index_label='VisitNumber')
print("***************Ending XGB Classifier***************")
return pred_Actual
########################################################################################################################
#XGB_Classifier
########################################################################################################################
def XGB_Orig_binlog_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid):
print("***************Starting XGB binlog Classifier***************")
t0 = time()
if Grid:
#used for checking the best performance for the model using hyper parameters
print("Starting model fit with Grid Search")
else:
#convert all data frames to numpy arrays for xgb use
dtrain = xgb.DMatrix(Train_DS, label=y)
dtest = xgb.DMatrix(Actual_DS)
print(len(np.unique(y)))
#only for cross validation
# X_train, X_cv, Y_train, Y_cv = train_test_split(Train_DS, y, test_size=0.5, random_state=42)
# dtrain = xgb.DMatrix(X_train, label=Y_train)
# dtest = xgb.DMatrix(X_cv, label=Y_cv)
# specify parameters
# param = {'max_depth':14, 'eta':0.01, 'min_child_weight':8,'subsample': 0.9,'colsample_bytree':0.3,
# 'silent':True, 'gamma': 0.9,'nthread': -1,'objective':'binary:logistic', 'eval_metric':'auc' }
param = {'nthread': 8,'objective':'multi:softprob','num_class':len(np.unique(y)), 'eval_metric':'mlogloss','silent':True}
plst = param.items()
#best with 115 rounds 0.7522
num_round = 115
#print ('running cross validation')
#xgb.cv(param, dtrain, num_round, nfold=2,metrics={'mlogloss'}, seed = 0, show_stdv = False)
# specify validations set to watch performance
watchlist = [(dtest,'eval'), (dtrain,'train')]
print("Starting training")
#clf = xgb.train( plst, dtrain, num_round,watchlist,early_stopping_rounds=50)
clf = xgb.train( plst, dtrain, num_round)
print("training completed")
#print "testing"
pred_Actual = clf.predict(dtest)
print("Actual Model predicted")
#Get the predictions for actual data set
preds = pd.DataFrame(pred_Actual, index=Sample_DS.VisitNumber.values, columns=Sample_DS.columns[1:])
preds.to_csv(file_path+'output/Submission_Roshan_xgb_orig_6_withFNnumber.csv', index_label='VisitNumber')
print("***************Ending XGB Classifier***************")
return pred_Actual
########################################################################################################################
#Misc Classifier
########################################################################################################################
def Misc_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid):
print("***************Starting Misc Classifier***************")
t0 = time()
if Grid:
#used for checking the best performance for the model using hyper parameters
print("Starting model fit with Grid Search")
else:
#CV - 0.666186155556
#CV - 0.6670 - remove date MM/DD/YY and todays difff
# clf = LogisticRegression()
# Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)
#sys.exit(0)
#print("Adaboost")
#CV: 0.7099
#clf = AdaBoostClassifier(n_estimators=100)
# print("BaggingClassifier")
# #CV:
# clf = BaggingClassifier(n_estimators=100)
# Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)
#
# print("ExtraTreesClassifier")
# #CV:2.22
# clf = ExtraTreesClassifier(n_estimators=100)
# Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)
# print("MultinomialNB")
# #CV:
# clf = MultinomialNB()
# Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)
#
# print("BernoulliNB")
# #CV:
# clf = BernoulliNB()
# Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)
#clf = SVC(kernel='rbf', class_weight='auto',C=1e5, gamma= 0.001,probability=True)
clf = SVC(probability=True)
Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)
sys.exit(0)
clf.fit(Train_DS, y)
# feature = pd.DataFrame()
# feature['imp'] = clf.feature_importances_
# feature['col'] = Train_DS1.columns
# feature = feature.sort(['imp'], ascending=False).reset_index(drop=True)
# print(feature)
#Predict actual model
pred_Actual = clf.predict(Actual_DS)[:,1]
print("Actual Model predicted")
#Get the predictions for actual data set
preds = pd.DataFrame(pred_Actual, index=Sample_DS.ID.values, columns=Sample_DS.columns[1:])
preds.to_csv(file_path+'output/Submission_Roshan_Misc_filter_2.csv', index_label='ID')
print("***************Ending Random Forest Classifier***************")
return pred_Actual
#########################################################################################################################
#Neural Network Classifier 1
########################################################################################################################
def NN1_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid):
print("***************Starting NN1 Classifier***************")
t0 = time()
if Grid:
#used for checking the best performance for the model using hyper parameters
print("Starting model fit with Grid Search")
else:
#y = y.reshape((-1, 1))
learning_rate = theano.shared(np.float32(0.1))
y = y.astype(np.int32)
Train_DS = Train_DS.astype('float32')
Actual_DS = Actual_DS.astype('float32')
##----------------------------------------------------------------------------------------------------------------##
#Best CV's
#CV:1.011700 ,sigmoid, max_epochs =15 , Dense = 700,700 (without FN and Upc and using eucledean for DD)
#CV:1.010100 ,sigmoid, max_epochs =15 , Dense = 1000,1000 (without FN and Upc and using eucledean for DD)
#CV:0.963210 ,sigmoid, max_epochs =265 , Dense = 500,500 (without FN and Upc and using eucledean for DD)
#CV:0.962284 ,sigmoid, max_epochs =266 , Dense = 500,500 (without FN and Upc and using eucledean for DD & no np.log )*** current best
#CV:0.965358 ,sigmoid, max_epochs =227 , Dense = 500,500 (without FN and Upc and using eucledean for DD & autoencoder)
##----------------------------------------------------------------------------------------------------------------##
clf = build_mlp(Train_DS.shape[1],len(np.unique(y)))
#clf = build_rnn(Train_DS.shape[1],len(np.unique(y)))
#Train_DS, y = shuffle(Train_DS, y, random_state=123)
clf.fit(Train_DS, y)
# _, X_valid, _, y_valid = clf.train_test_split(Train_DS, y, clf.eval_size)
# probas = clf.predict_proba(X_valid)[:,0]
# print("ROC score", metrics.roc_auc_score(y_valid, probas))
print("done in %0.3fs" % (time() - t0))
sys.exit(0)
#Predict actual model
pred_Actual = clf.predict_proba(Actual_DS)
print("Actual Model predicted")
#Get the predictions for actual data set
preds = pd.DataFrame(pred_Actual, index=Sample_DS.VisitNumber.values, columns=Sample_DS.columns[1:])
preds.to_csv(file_path+'output/Submission_Roshan_NN5.csv', index_label='VisitNumber')
print("***************Ending NN1 Classifier***************")
return pred_Actual
########################################################################################################################
#Main module #
########################################################################################################################
def main(argv):
pd.set_option('display.width', 200)
pd.set_option('display.height', 500)
warnings.filterwarnings("ignore")
global file_path, Train_DS1, Featimp_DS, Mlogloss_scorer, HLContrib_DS, HLContrib_DS_2, Autoencoder_DS
# Mlogloss
Mlogloss_scorer = metrics.make_scorer(multiclass_log_loss, greater_is_better = False)
random.seed(42)
np.random.seed(42)
if(platform.system() == "Windows"):
file_path = 'C:/Python/Others/data/Kaggle/Walmart_Recruiting_TTC/'
else:
file_path = '/home/roshan/Desktop/DS/Others/data/Kaggle/Walmart_Recruiting_TTC/'
########################################################################################################################
#Read the input file , munging and splitting the data to train and test
########################################################################################################################
# Train_DS = pd.read_csv(file_path+'train_Grouped5_withFNnumber.csv',sep=',')
# Actual_DS = pd.read_csv(file_path+'test_Grouped5_withFNnumber.csv',sep=',')
Sample_DS = pd.read_csv(file_path+'sample_submission.csv',sep=',')
#HLContrib_DS = pd.read_csv(file_path+'High_Lowest_Contributors_utilities_new.csv',sep=',',index_col=0)
#HLContrib_DS = pd.read_csv(file_path+'High_Lowest_Contributors_utilities.csv',sep=',',index_col=0)
#Autoencoder_DS = pd.read_csv(file_path+'Autoencoder_output.csv',sep=',',index_col=0)
Create_file = False
count = 5000
ifile = 5
if Create_file:
Train_DS = pd.read_csv(file_path+'train_Grouped_withFNnumber_'+str(ifile)+'.csv',sep=',')
Actual_DS = pd.read_csv(file_path+'test_Grouped_withFNnumber_'+str(ifile)+'.csv',sep=',')
print(np.shape(Train_DS))
print(np.shape(Actual_DS))
##----------------------------------------------------------------------------------------------------------------##
# Train_DS = (Train_DS.reindex(np.random.permutation(Train_DS.index))).reset_index(drop=True)
# Train_DS = Train_DS.head(count)
# pd.DataFrame(Train_DS).to_csv(file_path+'train_Grouped_withFNnumber_temp_'+str(ifile)+'.csv')
#
# Actual_DS = (Actual_DS.reindex(np.random.permutation(Actual_DS.index))).reset_index(drop=True)
# Actual_DS = Actual_DS.head(count)
# pd.DataFrame(Actual_DS).to_csv(file_path+'test_Grouped_withFNnumber_temp_'+str(ifile)+'.csv')
#
# print(np.shape(Train_DS))
# print(np.shape(Actual_DS))
else:
Train_DS = pd.read_csv(file_path+'train_50000.csv',sep=',',index_col=0,nrows = count)
Actual_DS = pd.read_csv(file_path+'test_50000.csv',sep=',',index_col=0,nrows = count)
#Train_DS = (Train_DS.reindex(np.random.permutation(Train_DS.index))).reset_index(drop=True)
#Train_DS = pd.read_csv(file_path+'train_Grouped_withFNnumber_temp_'+str(ifile)+'.csv',sep=',', index_col=0,nrows = count).reset_index(drop=True)
#Actual_DS = pd.read_csv(file_path+'test_Grouped_withFNnumber_temp_'+str(ifile)+'.csv',sep=',', index_col=0,nrows = count).reset_index(drop=True)
##----------------------------------------------------------------------------------------------------------------##
Train_DS, Actual_DS, y = Data_Munging(Train_DS,Actual_DS)
pred_Actual = XGB_Orig_binlog_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid=False)
#pred_Actual = XGB_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid=False)
#pred_Actual = XGB_Classifier1(Train_DS, y, Actual_DS, Sample_DS, Grid=False)
#pred_Actual = RFC_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid=False)
pred_Actual = Misc_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid=False)
#pred_Actual = NN1_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid=False)
########################################################################################################################
#Main program starts here #
########################################################################################################################
if __name__ == "__main__":
main(sys) |
"""Implementation of classes for interacting with SQS"""
import logging
from botocore.exceptions import ClientError
class SqsQueueException(Exception):
"""Exception for SqsQueue errors"""
class QueueMessage:
"""Class to represent an SQS message"""
def __init__(self, msg, queue_resource):
self.logger = logging.getLogger("QueueMessage")
self.logger.setLevel(logging.DEBUG)
self.queue_resource = queue_resource
self.msg = msg
def read(self):
return self.msg.body
def delete(self):
self.logger.info("Deleting message with receipt handle: %s", self.msg.receipt_handle)
msg_txt = self.msg.body
try:
self.queue_resource.delete_messages(Entries=[
{
'Id': "test",
'ReceiptHandle': self.msg.receipt_handle
},
])
except ClientError as ex:
self.logger.error("Failed to delete message from SQS queue")
self.logger.exception(ex)
raise ex
else:
return msg_txt
class SqsQueue:
"""Class to represent an SQS queue"""
def __init__(self, queue_resource, queue_name):
self.queue_resource = queue_resource
self.queue_name = queue_name
self.logger = logging.getLogger("SqsQueue")
def get_message(self):
"""Get a single message off the queue if it exists"""
try:
self.logger.info("Trying to get message from queue %s", self.queue_name)
messages = self.queue_resource.receive_messages(
AttributeNames=['All'],
MessageAttributeNames=['All'],
MaxNumberOfMessages=1,
WaitTimeSeconds=20,
)
except ClientError as e:
self.logger.error("Failed to get message from SQS queue")
self.logger.exception(e)
raise SqsQueueException(f"Failed to get message from SQS queue {self.queue_name}")
if messages is None or len(messages) == 0:
return None
return QueueMessage(messages[0], self.queue_resource)
def put_message(self, msg):
"""Put a single message on the queue"""
try:
self.logger.info("Trying to put message onto queue %s", self.queue_name)
messages = self.queue_resource.send_message(
MessageBody=msg
)
except ClientError as e:
self.logger.error("Failed to put message on SQS queue")
self.logger.exception(e)
raise SqsQueueException(f"Failed to put message on SQS queue {self.queue_name}")
def purge(self):
try:
self.logger.info(f"Trying to purge queue {self.queue_name}")
self.queue_resource.purge()
except ClientError as e:
self.logger.error(f"Failed to purge queue {self.queue_name}")
self.logger.exception(e)
raise SqsQueueException(f"Failed to purge queue {self.queue_name}")
@staticmethod
def get_sqs_queue(queue_name: str):
import boto3
sqs = boto3.resource('sqs')
queue = sqs.get_queue_by_name(QueueName=queue_name)
return SqsQueue(queue, queue_name)
@staticmethod
def get_sqs_queue_from_session(session, queue_name: str):
import boto3
sqs = session.resource('sqs')
queue = sqs.get_queue_by_name(QueueName=queue_name)
return SqsQueue(queue, queue_name) |
class VehicleError(ValueError):
def __init__(self, err):
self.err = err
def __str__(self):
if self.err == 'Already started':
return 'Engine already started'
elif self.err == 'Already stopped':
return 'Engine already stopped'
elif self.err == 'Overflow tank':
return 'Overflow tank'
elif self.err == 'Engine not running':
return 'Movement not possible, engine not running'
elif self.err == 'The tank is empty':
return 'Movement not possible, the tank is empty'
return 'Unknown error'
|
from ai.model import DQN
from ai.memory import ReplayMemory
from game.game_tools import TransitionGenerator
import random
from game.flappybird import FlappyBird
import torch
from math import exp
batch_size = 300
gamma = 0.99
eps_start = 0.9
eps_end = 0.05
eps_decay = 500
action_set = [0, 1]
training_loop_count = 300
target_update = 10
decision_freq = 5
policy_net = DQN()
target_net = DQN()
game_memory = ReplayMemory(2000)
def select_action(state, random_select_prob):
if random.random() < random_select_prob:
action = random.choice(action_set)
else:
action = policy_net(torch.Tensor([state])).max(1)[1].tolist()[0]
return action
def calc_reward(memory, model):
current_state_reward = memory.reward
next_state = memory.next_state
if next_state is None:
reward = current_state_reward - 480
else:
next_state_optimal_reward = model(torch.Tensor([next_state])).max(1)[0].tolist()[0]
reward = current_state_reward + next_state_optimal_reward
return reward
def train():
step = 0
training_loop = 0
flappy_bird = FlappyBird()
current_score = 0
optimizer = torch.optim.SGD(policy_net.parameters(), lr = 0.01)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
high_score = 0
current_state = None
next_state = None
action = None
reward = None
while current_score < 50:
random_select_prob = eps_end + (eps_start - eps_end) * exp(-1 * training_loop / eps_decay)
current_score = flappy_bird.get_score()
print(step)
print(training_loop)
print(game_memory.__len__())
if flappy_bird.get_score() > high_score:
high_score = flappy_bird.get_score()
torch.save(policy_net.state_dict(), './ai/train_model_high_score_{}.pkl'.format(high_score))
if flappy_bird.die:
transition = TransitionGenerator.create_transition(current_state, action, None, -1 * flappy_bird.screen_y)
if transition.state is not None:
game_memory.push(transition)
flappy_bird.run_frame(train=True)
step = step + 1
elif step % decision_freq == 0:
# step 1: make a transition & push into game_memory
next_state = flappy_bird.get_state()
reward = TransitionGenerator.calc_reward(next_state)
transition = TransitionGenerator.create_transition(current_state, action, next_state, reward)
if transition.state is not None:
game_memory.push(transition)
# step 2: store memory frag into game memory then sample if there is enough data for training
if batch_size <= game_memory.__len__():
training_memory = game_memory.sample(batch_size)
# step 3: train the cnn
target_reward = torch.Tensor([[calc_reward(i, target_net)] for i in training_memory])
input_states = torch.Tensor([i.state for i in training_memory])
actions_taken = torch.Tensor([[i.action] for i in training_memory]).long()
predicted_reward = policy_net(input_states).gather(1, actions_taken)
loss = torch.nn.functional.smooth_l1_loss(predicted_reward, target_reward)
optimizer.zero_grad()
loss.backward()
optimizer.step()
training_loop = training_loop + 1
# step 4: update the target cnn
if step % target_update == 0:
target_net.load_state_dict(policy_net.state_dict())
else:
pass
# step 5: move next_stage to current_state, select the action, and perform the action on the game
current_state = next_state
action = select_action(current_state, random_select_prob)
flappy_bird.run_frame(action, train = True)
step = step + 1
else:
flappy_bird.run_frame(train=True)
step = step + 1
target_net.load_state_dict(policy_net.state_dict())
torch.save(target_net.state_dict(), './ai/trained_model_state_dict.pkl')
print('finished training')
train()
|
import tkinter
import sys
from typing import Union, Tuple, Callable
from .ctk_canvas import CTkCanvas
from ..theme_manager import ThemeManager
from ..settings import Settings
from ..draw_engine import DrawEngine
from .widget_base_class import CTkBaseClass
class CTkButton(CTkBaseClass):
""" button with border, rounded corners, hover effect, image support """
def __init__(self, *args,
bg_color: Union[str, Tuple[str, str], None] = None,
fg_color: Union[str, Tuple[str, str], None] = "default_theme",
hover_color: Union[str, Tuple[str, str]] = "default_theme",
border_color: Union[str, Tuple[str, str]] = "default_theme",
text_color: Union[str, Tuple[str, str]] = "default_theme",
text_color_disabled: Union[str, Tuple[str, str]] = "default_theme",
width: int = 140,
height: int = 28,
corner_radius: Union[int, str] = "default_theme",
border_width: Union[int, str] = "default_theme",
text: str = "CTkButton",
textvariable: tkinter.Variable = None,
text_font: any = "default_theme",
image: tkinter.PhotoImage = None,
hover: bool = True,
compound: str = "left",
state: str = "normal",
command: Callable = None,
**kwargs):
# transfer basic functionality (bg_color, size, _appearance_mode, scaling) to CTkBaseClass
super().__init__(*args, bg_color=bg_color, width=width, height=height, **kwargs)
# color
self.fg_color = ThemeManager.theme["color"]["button"] if fg_color == "default_theme" else fg_color
self.hover_color = ThemeManager.theme["color"]["button_hover"] if hover_color == "default_theme" else hover_color
self.border_color = ThemeManager.theme["color"]["button_border"] if border_color == "default_theme" else border_color
self.text_color = ThemeManager.theme["color"]["text"] if text_color == "default_theme" else text_color
self.text_color_disabled = ThemeManager.theme["color"]["text_button_disabled"] if text_color_disabled == "default_theme" else text_color_disabled
# shape
self.corner_radius = ThemeManager.theme["shape"]["button_corner_radius"] if corner_radius == "default_theme" else corner_radius
self.border_width = ThemeManager.theme["shape"]["button_border_width"] if border_width == "default_theme" else border_width
# text, font, image
self.image = image
self.image_label = None
self.text = text
self.text_label = None
self.text_font = (ThemeManager.theme["text"]["font"], ThemeManager.theme["text"]["size"]) if text_font == "default_theme" else text_font
# callback and hover functionality
self.command = command
self.textvariable = textvariable
self.state = state
self.hover = hover
self.compound = compound
self.click_animation_running = False
# configure grid system (2x2)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(1, weight=1)
# canvas
self.canvas = CTkCanvas(master=self,
highlightthickness=0,
width=self.apply_widget_scaling(self._desired_width),
height=self.apply_widget_scaling(self._desired_height))
self.canvas.grid(row=0, column=0, rowspan=2, columnspan=2, sticky="nsew")
self.draw_engine = DrawEngine(self.canvas)
# canvas event bindings
self.canvas.bind("<Enter>", self.on_enter)
self.canvas.bind("<Leave>", self.on_leave)
self.canvas.bind("<Button-1>", self.clicked)
self.canvas.bind("<Button-1>", self.clicked)
self.bind('<Configure>', self.update_dimensions_event)
# configure cursor and initial draw
self.set_cursor()
self.draw()
def set_scaling(self, *args, **kwargs):
super().set_scaling(*args, **kwargs)
if self.text_label is not None:
self.text_label.destroy()
self.text_label = None
if self.image_label is not None:
self.image_label.destroy()
self.image_label = None
self.canvas.configure(width=self.apply_widget_scaling(self._desired_width),
height=self.apply_widget_scaling(self._desired_height))
self.draw()
def set_dimensions(self, width: int = None, height: int = None):
super().set_dimensions(width, height)
self.canvas.configure(width=self.apply_widget_scaling(self._desired_width),
height=self.apply_widget_scaling(self._desired_height))
self.draw()
def draw(self, no_color_updates=False):
requires_recoloring = self.draw_engine.draw_rounded_rect_with_border(self.apply_widget_scaling(self._current_width),
self.apply_widget_scaling(self._current_height),
self.apply_widget_scaling(self.corner_radius),
self.apply_widget_scaling(self.border_width))
if no_color_updates is False or requires_recoloring:
self.canvas.configure(bg=ThemeManager.single_color(self.bg_color, self._appearance_mode))
# set color for the button border parts (outline)
self.canvas.itemconfig("border_parts",
outline=ThemeManager.single_color(self.border_color, self._appearance_mode),
fill=ThemeManager.single_color(self.border_color, self._appearance_mode))
# set color for inner button parts
if self.fg_color is None:
self.canvas.itemconfig("inner_parts",
outline=ThemeManager.single_color(self.bg_color, self._appearance_mode),
fill=ThemeManager.single_color(self.bg_color, self._appearance_mode))
else:
self.canvas.itemconfig("inner_parts",
outline=ThemeManager.single_color(self.fg_color, self._appearance_mode),
fill=ThemeManager.single_color(self.fg_color, self._appearance_mode))
# create text label if text given
if self.text is not None and self.text != "":
if self.text_label is None:
self.text_label = tkinter.Label(master=self,
font=self.apply_font_scaling(self.text_font),
text=self.text,
textvariable=self.textvariable)
self.text_label.bind("<Enter>", self.on_enter)
self.text_label.bind("<Leave>", self.on_leave)
self.text_label.bind("<Button-1>", self.clicked)
self.text_label.bind("<Button-1>", self.clicked)
if no_color_updates is False:
# set text_label fg color (text color)
self.text_label.configure(fg=ThemeManager.single_color(self.text_color, self._appearance_mode))
if self.state == tkinter.DISABLED:
self.text_label.configure(fg=(ThemeManager.single_color(self.text_color_disabled, self._appearance_mode)))
else:
self.text_label.configure(fg=ThemeManager.single_color(self.text_color, self._appearance_mode))
if self.fg_color is None:
self.text_label.configure(bg=ThemeManager.single_color(self.bg_color, self._appearance_mode))
else:
self.text_label.configure(bg=ThemeManager.single_color(self.fg_color, self._appearance_mode))
else:
# delete text_label if no text given
if self.text_label is not None:
self.text_label.destroy()
self.text_label = None
# create image label if image given
if self.image is not None:
if self.image_label is None:
self.image_label = tkinter.Label(master=self)
self.image_label.bind("<Enter>", self.on_enter)
self.image_label.bind("<Leave>", self.on_leave)
self.image_label.bind("<Button-1>", self.clicked)
self.image_label.bind("<Button-1>", self.clicked)
if no_color_updates is False:
# set image_label bg color (background color of label)
if self.fg_color is None:
self.image_label.configure(bg=ThemeManager.single_color(self.bg_color, self._appearance_mode))
else:
self.image_label.configure(bg=ThemeManager.single_color(self.fg_color, self._appearance_mode))
self.image_label.configure(image=self.image) # set image
else:
# delete text_label if no text given
if self.image_label is not None:
self.image_label.destroy()
self.image_label = None
# create grid layout with just an image given
if self.image_label is not None and self.text_label is None:
self.image_label.grid(row=0, column=0, rowspan=2, columnspan=2, sticky="",
pady=(self.apply_widget_scaling(self.border_width), self.apply_widget_scaling(self.border_width) + 1)) # bottom pady with +1 for rounding to even
# create grid layout with just text given
if self.image_label is None and self.text_label is not None:
self.text_label.grid(row=0, column=0, rowspan=2, columnspan=2, sticky="",
padx=self.apply_widget_scaling(self.corner_radius),
pady=(self.apply_widget_scaling(self.border_width), self.apply_widget_scaling(self.border_width) + 1)) # bottom pady with +1 for rounding to even
# create grid layout of image and text label in 2x2 grid system with given compound
if self.image_label is not None and self.text_label is not None:
if self.compound == tkinter.LEFT or self.compound == "left":
self.image_label.grid(row=0, column=0, sticky="e", rowspan=2, columnspan=1,
padx=(max(self.apply_widget_scaling(self.corner_radius), self.apply_widget_scaling(self.border_width)), 2),
pady=(self.apply_widget_scaling(self.border_width), self.apply_widget_scaling(self.border_width) + 1))
self.text_label.grid(row=0, column=1, sticky="w", rowspan=2, columnspan=1,
padx=(2, max(self.apply_widget_scaling(self.corner_radius), self.apply_widget_scaling(self.border_width))),
pady=(self.apply_widget_scaling(self.border_width), self.apply_widget_scaling(self.border_width) + 1))
elif self.compound == tkinter.TOP or self.compound == "top":
self.image_label.grid(row=0, column=0, sticky="s", columnspan=2, rowspan=1,
padx=max(self.apply_widget_scaling(self.corner_radius), self.apply_widget_scaling(self.border_width)),
pady=(self.apply_widget_scaling(self.border_width), 2))
self.text_label.grid(row=1, column=0, sticky="n", columnspan=2, rowspan=1,
padx=max(self.apply_widget_scaling(self.corner_radius), self.apply_widget_scaling(self.border_width)),
pady=(2, self.apply_widget_scaling(self.border_width)))
elif self.compound == tkinter.RIGHT or self.compound == "right":
self.image_label.grid(row=0, column=1, sticky="w", rowspan=2, columnspan=1,
padx=(2, max(self.apply_widget_scaling(self.corner_radius), self.apply_widget_scaling(self.border_width))),
pady=(self.apply_widget_scaling(self.border_width), self.apply_widget_scaling(self.border_width) + 1))
self.text_label.grid(row=0, column=0, sticky="e", rowspan=2, columnspan=1,
padx=(max(self.apply_widget_scaling(self.corner_radius), self.apply_widget_scaling(self.border_width)), 2),
pady=(self.apply_widget_scaling(self.border_width), self.apply_widget_scaling(self.border_width) + 1))
elif self.compound == tkinter.BOTTOM or self.compound == "bottom":
self.image_label.grid(row=1, column=0, sticky="n", columnspan=2, rowspan=1,
padx=max(self.apply_widget_scaling(self.corner_radius), self.apply_widget_scaling(self.border_width)),
pady=(2, self.apply_widget_scaling(self.border_width)))
self.text_label.grid(row=0, column=0, sticky="s", columnspan=2, rowspan=1,
padx=max(self.apply_widget_scaling(self.corner_radius), self.apply_widget_scaling(self.border_width)),
pady=(self.apply_widget_scaling(self.border_width), 2))
def configure(self, require_redraw=False, **kwargs):
if "text" in kwargs:
self.text = kwargs.pop("text")
if self.text_label is None:
require_redraw = True # text_label will be created in .draw()
else:
self.text_label.configure(text=self.text)
if "text_font" in kwargs:
self.text_font = kwargs.pop("text_font")
if self.text_label is not None:
self.text_label.configure(font=self.apply_font_scaling(self.text_font))
if "state" in kwargs:
self.state = kwargs.pop("state")
self.set_cursor()
require_redraw = True
if "image" in kwargs:
self.image = kwargs.pop("image")
require_redraw = True
if "corner_radius" in kwargs:
self.corner_radius = kwargs.pop("corner_radius")
require_redraw = True
if "compound" in kwargs:
self.compound = kwargs.pop("compound")
require_redraw = True
if "fg_color" in kwargs:
self.fg_color = kwargs.pop("fg_color")
require_redraw = True
if "border_color" in kwargs:
self.border_color = kwargs.pop("border_color")
require_redraw = True
if "hover_color" in kwargs:
self.hover_color = kwargs.pop("hover_color")
require_redraw = True
if "text_color" in kwargs:
self.text_color = kwargs.pop("text_color")
require_redraw = True
if "command" in kwargs:
self.command = kwargs.pop("command")
if "textvariable" in kwargs:
self.textvariable = kwargs.pop("textvariable")
if self.text_label is not None:
self.text_label.configure(textvariable=self.textvariable)
if "width" in kwargs:
self.set_dimensions(width=kwargs.pop("width"))
if "height" in kwargs:
self.set_dimensions(height=kwargs.pop("height"))
super().configure(require_redraw=require_redraw, **kwargs)
def set_cursor(self):
if Settings.cursor_manipulation_enabled:
if self.state == tkinter.DISABLED:
if sys.platform == "darwin" and self.command is not None and Settings.cursor_manipulation_enabled:
self.configure(cursor="arrow")
elif sys.platform.startswith("win") and self.command is not None and Settings.cursor_manipulation_enabled:
self.configure(cursor="arrow")
elif self.state == tkinter.NORMAL:
if sys.platform == "darwin" and self.command is not None and Settings.cursor_manipulation_enabled:
self.configure(cursor="pointinghand")
elif sys.platform.startswith("win") and self.command is not None and Settings.cursor_manipulation_enabled:
self.configure(cursor="hand2")
def set_image(self, image):
""" will be removed in next major """
self.configure(image=image)
def set_text(self, text):
""" will be removed in next major """
self.configure(text=text)
def on_enter(self, event=None):
if self.hover is True and self.state == tkinter.NORMAL:
if self.hover_color is None:
inner_parts_color = self.fg_color
else:
inner_parts_color = self.hover_color
# set color of inner button parts to hover color
self.canvas.itemconfig("inner_parts",
outline=ThemeManager.single_color(inner_parts_color, self._appearance_mode),
fill=ThemeManager.single_color(inner_parts_color, self._appearance_mode))
# set text_label bg color to button hover color
if self.text_label is not None:
self.text_label.configure(bg=ThemeManager.single_color(inner_parts_color, self._appearance_mode))
# set image_label bg color to button hover color
if self.image_label is not None:
self.image_label.configure(bg=ThemeManager.single_color(inner_parts_color, self._appearance_mode))
def on_leave(self, event=None):
self.click_animation_running = False
if self.hover is True:
if self.fg_color is None:
inner_parts_color = self.bg_color
else:
inner_parts_color = self.fg_color
# set color of inner button parts
self.canvas.itemconfig("inner_parts",
outline=ThemeManager.single_color(inner_parts_color, self._appearance_mode),
fill=ThemeManager.single_color(inner_parts_color, self._appearance_mode))
# set text_label bg color (label color)
if self.text_label is not None:
self.text_label.configure(bg=ThemeManager.single_color(inner_parts_color, self._appearance_mode))
# set image_label bg color (image bg color)
if self.image_label is not None:
self.image_label.configure(bg=ThemeManager.single_color(inner_parts_color, self._appearance_mode))
def click_animation(self):
if self.click_animation_running:
self.on_enter()
def clicked(self, event=None):
if self.command is not None:
if self.state != tkinter.DISABLED:
# click animation: change color with .on_leave() and back to normal after 100ms with click_animation()
self.on_leave()
self.click_animation_running = True
self.after(100, self.click_animation)
self.command()
|
"""empty message
Revision ID: 0101_een_logo
Revises: 0100_notification_created_by
Create Date: 2017-06-26 11:43:30.374723
"""
from alembic import op
revision = "0101_een_logo"
down_revision = "0100_notification_created_by"
ENTERPRISE_EUROPE_NETWORK_ID = "89ce468b-fb29-4d5d-bd3f-d468fb6f7c36"
def upgrade():
op.execute(
"""INSERT INTO organisation VALUES (
'{}',
'',
'een_x2.png',
'een'
)""".format(
ENTERPRISE_EUROPE_NETWORK_ID
)
)
def downgrade():
op.execute(
"""
DELETE FROM organisation WHERE "id" = '{}'
""".format(
ENTERPRISE_EUROPE_NETWORK_ID
)
)
|
# coding:utf-8
#!/usr/bin/env python
__author__ = 'XingHua'
"""
闲着没事做,前段时间买了个摄像头,在ubuntu上用。打开cheese这个软件,一片空白,怎么不能用阿!
google一番,装上gspca,还是不能用!
用lsusb命令查看下
lingshangwen@eagle:~$ lsusb
Bus 005 Device 001: ID 0000:0000
Bus 004 Device 001: ID 0000:0000
Bus 003 Device 001: ID 0000:0000
Bus 002 Device 002: ID 0c45:5208 Microdia
Bus 002 Device 001: ID 0000:0000
Bus 001 Device 006: ID 058f:3820 Alcor Micro Corp.
Bus 001 Device 005: ID 0a12:0001 Cambridge Silicon Radio, Ltd Bluetooth Dongle (HCI mode)
Bus 001 Device 004: ID 05e3:0606 Genesys Logic, Inc. D-Link DUB-H4 USB 2.0 Hub
Bus 001 Device 001: ID 0000:0000
摄像头已经被识别出来,怎么就是不能用阿!!!!!!
还是自己动手,用python+opencv写段简单的代码吧,然后就有了下面的代码:
"""
import wx
from cv2.cv import *
from cv2.highgui import *
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, 'camera')
self.SetClientSize((640, 480))
self.cap = CreateCameraCapture(0)
self.Bind(wx.EVT_IDLE, self.onIdle)
def onIdle(self, event):
img = QueryFrame(self.cap)
self.displayImage(img)
event.RequestMore()
def displayImage(self, img, offset=(0,0)):
bitmap = wx.BitmapFromBuffer(img.width, img.height, img.imageData)
dc = wx.ClientDC(self)
dc.DrawBitmap(bitmap, offset[0], offset[1], False)
if __name__=="__main__":
app = wx.App()
frame = MyFrame()
frame.Show(True)
app.MainLoop() |
@metadata_reactor
def install_git(metadata):
return {
'apt': {
'packages': {
'git': {
'installed': True,
},
}
},
}
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('at1_with_mean.csv')
df = df[df['type'] == 'A']
print(len(df))
plt.figure(dpi=300)
# plt
plt.hist(df['mean'], bins=10)
plt.xlabel('A供货商年平均供货量(立方米)')
plt.ylabel('频率')
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']
plt.show() |
from django import template
register = template.Library()
# @register.inclusion_tag('articles/grid.html')
@register.simple_tag
def show_grid(articles):
if len(articles) == 1:
grid_type = 'full'
elif len(articles) % 2 == 0:
grid_type = 'double'
else:
grid_type = 'triple'
return grid_type
|
import sys
def addOne(arr):
carry = 1
for i in range(len(arr))[::-1]:
if arr[i] + carry == 2:
arr[i] = 0
carry = 1
else:
arr[i] += carry
carry = 0
return arr
def nonTrivialDivisors(num):
for divisor in range(2, num):
if num % divisor == 0:
return divisor
def isPrime(num):
if num in (2, 3):
return True
if num % 2 == 0 or num <= 1:
return False
for i in range(3, int(num**0.5)+1, 2):
if num % i == 0:
return False
return True
def numConverter(num, base): # num given as a string
num = str(num)
decsum = 0
for n in range(0, len(num)):
decsum += int(num[n])*(base**(len(num)-1-n))
return decsum
def coinJam(N, J):
results = []
start = [0]*N
start[0], start[-1] = 1, 1
num = start
while sum(num) != N:
num = [1] + addOne(num[1:-1]) + [1]
strNum = ''.join(map(str, num))
# print num
isPrimes = []
convertedNums = []
for base in range(2, 11):
convertedNum = numConverter(strNum, base)
convertedNums.append(convertedNum)
isPrimes.append(isPrime(convertedNum))
if any(isPrimes):
break
if not any(isPrimes) and len(results) < J:
divisors = map(nonTrivialDivisors, convertedNums)
results.append(strNum + ' ' + ' '.join(map(str, divisors)))
for res in results:
print res
f = open(sys.argv[1], 'r')
num = f.readline().strip()
N, J = map(int, f.readline().strip().split())
print 'Case #1:'
coinJam(N, J)
|
import json
import time
import requests
import sys
from bs4 import BeautifulSoup
def retriveGitRepoFromPyPI(repo):
param = repo
req = requests.get("https://pypi.org/project/" + param)
if req.status_code == requests.codes.ok:
soup = BeautifulSoup(req.text, 'html.parser')
results = soup.find_all('div', {'class' : 'sidebar-section'})
else:
results = False
if not results:
print("Request error with code " + str(req.status_code))
elif results == None:
print("No git repo")
else:
git_repo = []
for r in results:
h3_child = r.find('h3')
if h3_child.text == "Project links":
children = r.find_all('a', {'class': 'vertical-tabs__tab vertical-tabs__tab--with-icon vertical-tabs__tab--condensed'})
for child in children:
git_repo.append(child['href'])
return git_repo
return None
if __name__ == "__main__":
exit()
|
#librairies
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import quad, fixed_quad, romberg
s0 = np.linspace(1.1, 9.9, 100) #parameter s0 for 1<s0<10
print(s0)
beta0 = np.sqrt(1-1/s0) #parameter beta0 (formula 11 of the Gould's article)
w0 = (1+beta0)/(1-beta0) #parameter w0 (formula 11 of the Gould's article)
n = len(s0) #size of s0
#integrande
def integrand(w):
return (np.log(w+1))/w
#general integration (quad)
#Lq = np.zeros_like(s0)
#for i in range(0,n) :
# Lq[i] = quad(integrand, 1, w0[i])[0]
#Gaussian quadrature
#Lg = fixed_quad(integrand, 1, w0[1])[0]
#Romberg's method
Lr = np.zeros_like(s0)
for i in range(0,n) :
Lr[i] = romberg(integrand, 1, w0[i], tol=1.48e-08, rtol=1.48e-08)
#function phi (formula 10 of the Gould's article)
def phi(beta0, w0, Lr):
a1 = (1+beta0**2)/(1-beta0**2)*np.log(w0)
a2 = -np.log(w0)*beta0**2
a3 = -(np.log(w0))**2
a4 = -(4*beta0)/(1-beta0**2)
a5 = 2*beta0
a6 = 4*np.log(w0)*np.log(w0+1)
a7 = -Lr
return a1 + a2 + a3 + a4 + a5 + a6 + a7
phis = phi(beta0, w0, Lr)
#Graph
plt.plot(s0, phis)
plt.xlabel('parameter s0')
plt.ylabel('phi/(s0-1)')
plt.show()
|
from postgres_connect import Postgres_Connect
class Sentiment_Data_Postgres(object):
def __init__(self):
self.con = Postgres_Connect().getConnection('cse591prj' , 'postgres', 'localhost', 'ok')
self.cur = self.con.cursor()
def load_data(self, file):
self.cur.execute('''
CREATE TABLE reviews(
col1 varchar(10),
col2 varchar(10),
col3 varchar(10),
col4 varchar(10),
col5 varchar(10),
col6 varchar(10),
col7 varchar(10),
col8 varchar(6000));
''')
self.con.commit()
q = "COPY reviews FROM '" + file + "' DELIMITER '|';"
self.cur.execute(q)
self.con.commit()
def get_review(self, key):
self.cur.execute("SELECT col8 FROM reviews WHERE col1 = '"+ key + "';")
return self.cur.fetchone()[0]
def delete_schema(self):
self.cur.execute('DROP TABLE reviews;')
self.con.commit()
#/home/master/Desktop/cse591_adb_project/rowvsdocstore/data/product_reviews_1.dat
#COPY reviews FROM '/home/master/Desktop/cse591_adb_project/rowvsdocstore/data/product_reviews_1.dat' DELIMITER '|'; |
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from .models import *
from .serializers import *
from django.contrib.auth.models import User
# ViewSets define the view behavior.
class ProjectViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated, )
serializer_class = ProjectSerializer
def get_queryset(self):
user = self.request.user
return user.joined_project_set
class EthogramViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated, )
queryset = Ethogram.objects.all()
serializer_class = EthogramSerializer
class BehaviourViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated, )
queryset = Behaviour.objects.all()
serializer_class = BehaviourSerializer
class SessionViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated, )
queryset = Session.objects.all()
serializer_class = SessionSerializer
class ObservationViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated, )
queryset = Observation.objects.all()
serializer_class = ObservationSerializer
class IndividualViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated, )
queryset = Individual.objects.all()
serializer_class = IndividualSerializer
class TagViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated, )
queryset = Tag.objects.all()
serializer_class = TagSerializer
class LocationViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated, )
queryset = Location.objects.all()
serializer_class = LocationSerializer
class WeatherViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated, )
queryset = Weather.objects.all()
serializer_class = WeatherSerializer
class PhotoViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated, )
queryset = Photo.objects.all()
serializer_class = PhotoSerializer
class DummyViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated, )
queryset = Dummy.objects.all()
serializer_class = DummySerializer
|
import os
import shutil
import time
from pathlib import Path
def clear_directory(path):
"""
Removes the content of a directory without removing the directory itself
"""
for root, dirs, files in os.walk(path):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def remove_directory(path):
"""
Removes a directory and its content
"""
shutil.rmtree(path)
def create_temporary_copy(path):
"""
Creates a working directory with a copy of the project files, that can be altered by the program
and used for the compilations.
"""
timestamp = str(time.time())
tmp_path = os.path.join(os.getcwd(), "vary/build", timestamp)
try:
shutil.copytree(path, tmp_path)
macro_path = os.path.join(os.path.split(os.path.realpath(__file__))[0], "../macros.tex")
macro_copy_path = os.path.join(tmp_path, "macros.tex")
shutil.copyfile(macro_path, macro_copy_path)
except shutil.Error:
print("Error creating the temporary copy")
return tmp_path
def create_dir(path):
"""
Creates a directory with the specified path if it does not already exists
"""
Path(path).mkdir(parents=True, exist_ok=True)
def get_secret_key(path):
if os.path.isfile(path):
with open(path, 'rb') as f:
return f.read()
else:
key = os.urandom(16)
with open(path, 'ab') as f:
f.write(key)
return key |
def convert_tuple(tuple_input):
''' function: takes tuple and converts to list
parameters: one tuple
returns: same tuple but as a list'''
index = 0
list_form = []
while len(list_form) < len(tuple_input):
list_form.append(tuple_input[index])
index += 1
return list_form
def main():
tuple_input = ("My", "Name" , "Is", "Katie")
print(convert_tuple(tuple_input))
main()
|
import math
import numpy as np
import writer
import forwardmodel
from datetime import datetime, timedelta
def average_spectra(config, oe_inputs):
tres = config['tres']
td = tres*30
epoch_times = oe_inputs['epoch_times']
center = epoch_times[0] + td
Ys = []
Ps = []
Dts = []
Ep = []
Rain = []
while center <= epoch_times[-1]:
idx = np.where((epoch_times >= center - td) & (epoch_times <= center + td))[0]
new_Y = np.mean(oe_inputs['Y'][idx,:], axis=0)
new_p = np.mean(oe_inputs['p'][idx])
Ep.append(center)
Dts.append(datetime.utcfromtimestamp(center))
Ps.append(new_p)
Ys.append(new_Y)
if np.sum(oe_inputs['rainflags']) > 0:
Rain.append(1)
else:
Rain.append(0)
center = center + 2*td
print "Spectral averaging reduced the number of spectra from: " + str(len(oe_inputs['Y'])) + ' to ' + str(len(Ys))
oe_inputs['dt_times'] = np.asarray(Dts)
oe_inputs['Y'] = np.asarray(Ys)
oe_inputs['rainflags'] = np.asarray(Rain)
oe_inputs['p'] = np.asarray(Ps)
oe_inputs['epoch_times'] = np.asarray(Ep)
return oe_inputs
def getProfilePresRH(alt, T_z, Q_z, sfc_pres):
'''
getProfilePresRH
This function converts surface pressure value and water vapor
mixing ratio profile into a profile of relative humidity
and pressure. It uses the q2rh function and the hypsometric
equation (the virtual temperature correction is not applied here.)
Parameters
----------
alt : an array containing the height grid [meters]
T_z : an array containing the temperature profile [C]
Q_z : an array containing the water vapor mixing ratio profile [g/kg]
sfc_pres : the surface pressure value [mb]
Returns
-------
P_z : an array of the pressure profile [mb]
RH_z : an array of the relative humidity profile [%]
'''
K_C = 273.15
Rd = 287
g = 9.81
# Convert T_zp to Kelvin.
T_z_K = T_z + K_C
p = np.empty(T_z_K.shape[0])
p[0] = sfc_pres
# Compute P_zp.
for i in np.arange(1, len(T_z_K), 1):
avg = (T_z_K[i] + T_z_K[i-1])/2.
p[i]= p[i-1] * np.exp( (-g * (alt[i] - alt[i-1])) / (Rd*avg) )
P_z = p
# Compute RH_zp.
RH_z = q2rh(Q_z ,P_z ,T_z)
return P_z, RH_z
def q2rh(Q_z, P_z, T_z):
'''
q2rh
This helper function converts a profile of water vapor mixing ratio
into a profile of relative humidity.
Parameters
----------
Q_z : an array of water vapor mixing ratio values [g/kg]
P_z : an array of the pressure profile [mb]
T_z : an array of the temperature profile [C]
Returns
-------
RH_z : an array of the relative humidity profile [%]
'''
K_C = 273.15
# Set constant parameters.
epsilon = 622.0 # empirical coeff. to obtain Q(z), dimmensionless
T0 = 273.15 # base temperature in K
Rv = 461.5 # specific water vapor gas constant in J/(kg*K)
L = 2.5*(10**6) # latent heat of vaporization in J/kg
es0 = 611 # measured saturation vapor pressure at temp. T0 in Pa
# Make needed unit conversions.
T_z_K = T_z + K_C # K
P_z_pa = P_z * 100.0 # Pa
# Compute saturation vapor pressure using the Clausius-Clapeyron equation.
es_z = es0 * np.exp((L/Rv)*((1/T0)-(1/T_z_K)))
# Compute the relative humidity.
e_z = (Q_z * P_z_pa) / (epsilon + Q_z)
RH_z = (e_z / es_z) * 100.0 # %
return RH_z
def rms(Y, Fx):
"""
rms
Calculates the root mean squared error.
Parameters
----------
Y : observed spectra used in the retrieval (K)
Fx : forward model calculation from the state vector (K)
Returns
-------
rmse : the root mean squared error (K)
"""
# Calculate the RMSe using Y and F_xop.
rmse = np.sqrt(np.mean(np.power(Y-Fx, 2)))
return rmse
def sic(Sa, Sop):
"""
sic
Calculates the Shannon Information Content.
Parameters
----------
Sa : the prior covariance matrix
Sop : the retrieved profile (optimal) covariance matrix
Returns
-------
sic : the Shannon Information Content
"""
mp = Sa * np.linalg.inv(Sop)
det_mp = np.linalg.det(mp)
sic = 0.5 * np.log(det_mp)
return sic
def dfs(A):
"""
dfs
Calculates the degrees of freedom of signal for portions of the retrieved profile.
Parameters
----------
A : the averaging kernal from the retrieval
Returns
-------
DFS : an array with DFS for the entire retrieval, the temperature profile,
the water vapor mixing ratio profile, and the LWP
"""
tot_num = A.shape[0] - 1
num_alts = tot_num / 2.
dfs = np.empty(4)
# Extract the diagonal of A.
diag_A = np.diag(A,0)
# Compute the total dfs.
dfs[0] = np.sum(diag_A)
# Compute T(z) dfs.
dfs[1] = np.sum(diag_A[0:num_alts])
# Compute Q(z) dfs.
dfs[2] = np.sum(diag_A[num_alts:tot_num])
# Compute LWP dfs.
dfs[3] = diag_A[tot_num]
return dfs
def vres(A, alt):
"""
vres
Calculates the vertical resolution of the retrieved profile via the method used in
Hewson (2007).
Parameters
----------
A : the averaging kernal from the retrieval
alt : the height grid (km)
Returns
-------
T_vres : the vertical resolution of the temperature profile
Q_vres : the vertical resolution of the water vapor mixing ratio profile
"""
tot_num = A.shape[0] - 1
num_alts = tot_num / 2.
zres = [(alt[1] - alt[0])/2.]
for i in np.arange(1,num_alts - 1):
zres.append((alt[i+1]-alt[i-1])/2.)
zres.append((alt[num_alts-1]- alt[num_alts-2])*2.)
A_diag = np.diag(A)
A_diag = np.ma.masked_where(A_diag == 0, A_diag)
T_vres = zres/A_diag[:num_alts]
Q_vres = zres/A_diag[num_alts: num_alts*2]
return T_vres, Q_vres
def convert_time(time, flag):
'''
convert_time
This function allows for string time conversions in the format of
'hhmm' and 'h.hf' using a datetime object.
Parameters
----------
time : the datetime object to be formatted
flag : a flag indicating the format of the string to be returned.
Returns
-------
time_s : a string of the time in the specified format
'''
if flag == 'hhmm':
t_dec,t_int = math.modf(time)
min_s = str(int(t_dec * 60))
hr_s = str(int(t_int))
if len(min_s) < 2:
min_s = '0' + min_s
if len(hr_s) < 2:
hr_s = '0' + hr_s
time_s = hr_s + min_s
elif flag == 'h.hf':
hr_s = time[0:2]
min_s = time[2:4]
min_s = str(float(min_s) / 60).replace('0.','.')
time_s = str(float(hr_s + min_s))
return time_s
|
"""
pygments.lexers.fift
~~~~~~~~~~~~~~~~~~~~
Lexers for fift.
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include
from pygments.token import Literal, Comment, Name, String, Number, Whitespace
__all__ = ['FiftLexer']
class FiftLexer(RegexLexer):
"""
For Fift source code.
"""
name = 'Fift'
aliases = ['fift', 'fif']
filenames = ['*.fif']
url = 'https://ton-blockchain.github.io/docs/fiftbase.pdf'
tokens = {
'root': [
(r'\s+', Whitespace),
include('comments'),
(r'[\.+]?\"', String, 'string'),
# numbers
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'0b[01]+', Number.Bin),
(r'-?[0-9]+("/"-?[0-9]+)?', Number.Decimal),
# slices
(r'b\{[01]+\}', Literal),
(r'x\{[0-9a-fA-F_]+\}', Literal),
# byte literal
(r'B\{[0-9a-fA-F_]+\}', Literal),
# treat anything as word
(r'\S+', Name)
],
'string': [
(r'\\.', String.Escape),
(r'\"', String, '#pop'),
(r'[^\"\r\n\\]+', String)
],
'comments': [
(r'//.*', Comment.Singleline),
(r'/\*', Comment.Multiline, 'comment'),
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
}
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("ANALYSIS")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Run2016G/ZeroBias/MINIAOD/PromptReco-v1/000/280/385/00000/08080C9F-C078-E611-AEF1-FA163E5647FC.root',
'/store/data/Run2016G/ZeroBias/MINIAOD/PromptReco-v1/000/280/385/00000/084B4FBE-C078-E611-9740-02163E013993.root',
'/store/data/Run2016G/ZeroBias/MINIAOD/PromptReco-v1/000/280/385/00000/0A6B1BD0-C078-E611-A332-02163E014113.root'
),
secondaryFileNames = cms.untracked.vstring(),
# lumisToProcess = cms.untracked.VLuminosityBlockRange('258158:1-258158:1786'),
)
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff")
process.GlobalTag.globaltag = '80X_dataRun2_Prompt_v11'
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff")
process.load('Configuration.Geometry.GeometryRecoDB_cff')
basePath = '/afs/cern.ch/user/d/deguio/public/qualityPerSub_2016/'
subsystemList = ['L1tcalo','L1tmu','Hlt','Pix','Strip','Ecal','Hcal','Dt','Rpc','Es','Csc','Track','Egamma','Muon','Jetmet','Lumi']
fileList = []
for sub in subsystemList:
fileList.append(basePath+'Cert_13TeV_2016_'+sub+'.txt')
process.MyAnalysis =cms.EDAnalyzer("MyMiniAODAnalyzer",
#caloJetTag = cms.untracked.InputTag("slimmedJets"),
PFJetTag = cms.untracked.InputTag("slimmedJets"),
metTag = cms.untracked.InputTag("slimmedMETs"),
vtx = cms.untracked.InputTag("offlineSlimmedPrimaryVertices"),
bits = cms.untracked.InputTag("TriggerResults","","HLT"),
prescales = cms.untracked.InputTag("patTrigger"),
maxJetEta = cms.untracked.double(5.0), #is this ok?
minJetPt = cms.untracked.double(10.0), #is this ok?
lumiFile = cms.untracked.string(basePath+'run_ls_lumi_2016.txt'),
subsystems = cms.untracked.vstring(subsystemList),
qualityFiles = cms.untracked.vstring(fileList),
quantiles = cms.untracked.vdouble(0.,0.25,0.50,0.75,1.)
)
process.mypath = cms.Path(process.MyAnalysis)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("miniAODTree.root"),
closeFileFast = cms.untracked.bool(False)
)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.options = cms.untracked.PSet(
#allowUnscheduled = cms.untracked.bool(True),
wantSummary = cms.untracked.bool(True),
)
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cerr'),
cerr = cms.untracked.PSet(
threshold = cms.untracked.string('ERROR'),
),
debugModules = cms.untracked.vstring('*')
)
|
def SmallestMultiple():
num = 2520
flag = 0
while True:
print(num)
if checker(num):
return(num)
num += 2520
def checker(num):
for i in range(1,21):
if num % i != 0:
return(False)
return(True)
print(SmallestMultiple()) |
import numpy as np
a = np.array([[0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, ], [0, 1, 0, 1, 0, 1], [
0, 0, 0, 0, 0, 0, ], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, ]])
u = np.array([[1], [1], [1], [1], [1], [1]])
at = np.transpose(a)
a1 = np.matmul(np.transpose(a), u)
h1 = np.matmul(a, a1)
a2 = np.matmul(at, h1)
h2 = np.matmul(a, a2)
print(a2)
print(h2)
|
#!/usr/bin/env python
import sys, os, shutil
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from local_common import LOCAL_DIR, LocalFile, LocalDownloader, LocalUploader, LocalLinker, FilesAndPaths
sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))
from core.xcor import Xcor
def testXcor(working_dir):
outFolder = "/nfs/0_metadata@bib5/dnanexus_refactor_test/output_test/"
os.chdir(outFolder)
#The files need to be checked
xcr = Xcor("R1.raw.srt.filt.nodup.srt.bam",LocalFile)
xcr.download(LocalDownloader)
xcr.process(working_dir)
xcr.upload(LocalUploader)
output = xcr.output(LocalLinker)
print "************************** output"
for k, v in output.iteritems():
print k, v
def main():
working_dir=os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
print working_dir
print testXcor(working_dir)
if __name__ == '__main__':
sys.exit(main())
|
class Emp:
# Protected
_emp_id = 101
# Private
__emp_name = "Ram"
__emp_sal = 25000
__emp_dept = 'IT'
def show(self):
print(self._emp_id,self.__emp_name,self.__emp_dept,self.__emp_sal)
obj = Emp()
# obj.__emp_name = 'Shyam'
# print(obj._emp_id)
obj.show() |
"""
A company is booking flights to send its employees to its two satellite
offices A and B. The cost of sending the I'th employee to office A and
office B is given by prices[i][0] and prices[i][1] respectively.
Given that half the employees must be sent to office A and half the
employees must be sent to office B, return the minimum cost the company
must pay for all their employees’ flights.
Ex: Give the following prices...
prices = [[40, 30],[300, 200],[50, 50],[30, 60]], return 310
Fly the first personn to office B.
Fly the second person to office B.
Fly the third person to office A.
Fly the fourth person to office A.
"""
# TODO: incomplete
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-17 19:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newshome', '0009_article_article_summary'),
]
operations = [
migrations.AlterField(
model_name='article',
name='article_summary',
field=models.TextField(default='I wil put in shit, wait! '),
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
# Create your models here.
class tipousuario(models.Model):
nombre = models.CharField(max_length=50)
def __str__(self):
return self.nombre
class usuario(models.Model):
nombre = models.CharField(max_length=50)
tipousuario = models.ForeignKey(tipousuario, null=True, blank=True, on_delete=models.CASCADE)
def __str__(self):
return self.nombre |
import logging
from elasticsearch import TransportError
from tastypie.utils import trailing_slash
from tastypie.resources import ModelResource
from tastypie.paginator import Paginator as TastypiePaginator
from haystack.query import SearchQuerySet
from django.conf import settings
from django.conf.urls import url
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404
log = logging.getLogger('django')
class SearchPaginator(TastypiePaginator):
def get_count(self):
return self.objects.paginator.count
def page(self):
page = super(SearchPaginator, self).page()
page['meta']['page'] = self.objects.number
return page
class SimpleSearchResource(object):
min_autocomplete_query_length = 0
force_search_results_limit = 0 # 0 means inf
def get_search_query(self, request, **kwargs):
# Do the query.
cls_meta = self._meta.object_class._meta
django_ct = '{0}.{1}'.format(cls_meta.app_label, cls_meta.object_name)
search_query = SearchQuerySet().filter(django_ct=django_ct.lower())
search_query = self.build_search_query(request, search_query)
if self.force_search_results_limit:
search_query = search_query[:self.force_search_results_limit]
return search_query
def get_paginated_search_query(self, request, **kwargs):
search_query = self.get_search_query(request, **kwargs)
limit = request.GET.get('limit', settings.API_LIMIT_PER_PAGE)
paginator = Paginator(search_query, limit)
try:
page = int(request.GET.get('page', 1))
return paginator.page(page)
except InvalidPage:
raise Http404("Sorry, no results on that page.")
def build_search_query(self, request, search_query):
'''
method used to build_search query. Should return search_query!
:param request: django request object
:param haystack.query.SearchQuerySet search_query: search query set
limited to resource model class
'''
return search_query
class ProtectedModelResource(ModelResource):
''' Override @csrf_exempt decorator added by tastypie to enable CSRF protection'''
def wrap_view(self, view):
def wrapper(request, *args, **kwargs):
return ModelResource.wrap_view(self, view)(request, *args, **kwargs)
return wrapper
class SearchResource(SimpleSearchResource, ProtectedModelResource):
def prepend_urls(self):
return [
url(
r"^(?P<resource_name>{0})/search{1}$".format(
self._meta.resource_name,
trailing_slash()
),
self.wrap_view('get_search'),
name="api_get_search"),
]
def get_search(self, request, **kwargs):
"""Get search result."""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
page = SimpleSearchResource.get_paginated_search_query(self, request, **kwargs)
objects = []
for result in page.object_list:
found_object = result.object
# this won't allow empty objects to appear (like those that fell outside the reach of index_queryset)
if found_object is None:
# self healing mechanism. Remove such object from index
try:
result.searchindex.remove_object(result.id)
except TransportError:
log.error('[SearchResource] failed to remove bogus document {0}'.format(result))
# do not process it
continue
bundle = self.build_bundle(obj=found_object, request=request)
bundle = self.full_dehydrate(bundle)
objects.append(bundle)
object_list = {
'objects': objects
}
self.log_throttled_access(request)
return self.create_response(request, object_list)
|
"""
Game of squares
Two players
Turn: Subtracting a perfect square from the current state,excluding 0
State: Always a non-negative integer
"""
import math
def getOptimalMove(currentState):
array = [-1 for i in range(currentState+1)]
for i in range(1, currentState+1):
upperBound = int(math.sqrt(i)) + 1
for j in range(1, upperBound):
if (array[i - j*j] = -1):
array[i] == j*j
return array[currentState]
|
__author__ = 'Jonathan Mulle & Austin Hurst'
"""This module provides an object type for mapping different SDL Keycodes to human-readable names.
At one point in time, it was used for creating global keymaps that mapped different letters/symbols
to their corresponding keycodes across the KLibs experiment runtime, but currently it is only used
for specifying accepted keys and their corresponding data output labels for the
:class:`KeyPressResponse` response listener in :class:`ResponseCollector` objects.
Because keypress response maps can now be specified using a simple dict, the KeyMap class is only
really required for legacy code and for validating dict key maps internally.
"""
class KeyMap(object):
"""An object that maps SDL Keycodes to human-readable names for UI input and data output.
Primarily for use with the :class:`KeyPressResponse` response listener in a
:class:`ResponseCollector` object. The 'ui_labels', 'data_labels', and 'keycodes' arguments
must all be iterables of identical length.
For example,::
k = KeyMap('response', ['z', '/'], ['left', 'right'], [sdl2.SDLK_z, sdl2.SDLK_SLASH])
would create a keymap with the 'SDLK_z' keycode mapped to the UI label 'z' and the data label
'left', and the 'SDLK_SLASH' keycode mapped to the UI label '/' and the data label 'right'.
Args:
name (str): The name of the KeyMap object. Not currently used for anything.
ui_labels (:obj:`List`): A list of strings corresponding to the list of keycodes provided
in the 'keycodes' argument. Not currently used for anything.
data_labels (:obj:`List`): A list of strings corresponding to the list of keycodes provided
in the 'keycodes' argument. Specifies the output strings to use for different keycodes
by :class:`ResponseCollector` objects.
keycodes (:obj:`List`): A list of SDL Keycodes to map to the labels provided by 'ui_labels'
and 'data_labels'. A complete list of valid keycodes can be found in the following
table: https://wiki.libsdl.org/SDL_Keycode
Attributes:
name (str): The name of the KeyMap object. Not currently used for anything.
Raises:
ValueError: If there are any duplicate labels or keycodes in the lists provided.
TypeError: If the lengths of 'ui_labels', 'data_labels', and 'keycodes' mismatch.
TypeError: If any of the elements of 'ui_labels', 'data_labels', and 'keycodes' are of
the wrong data type.
"""
def __init__(self, name, ui_labels, data_labels, keycodes):
if type(name) is not str:
raise TypeError("KeyMap name must be a string.")
self._ui_label_map = {}
self._data_label_map = {}
self.name = name
self.__register(ui_labels, data_labels, keycodes)
def __register(self, ui_labels, data_labels, keycodes):
"""Validates and registers keycodes with their corresponding UI and data labels within
the keymap. For internal use only: once a KeyMap has been created, it cannot be extended
or modified.
"""
length = len(ui_labels)
for l in [ui_labels, data_labels, keycodes]:
if len(l) != length:
raise TypeError("All list arguments must contain the same number of elements.")
if len(set(l)) != length:
raise ValueError("Label and keycode lists cannot contain duplicate items.")
if isinstance(l, str) or not hasattr(l, '__iter__'):
raise TypeError("'ui_labels', 'data_labels', and 'keycodes' must be iterable.")
if not all(type(code) is int for code in keycodes):
raise TypeError("All elements of 'keycodes' must be valid SDL2 Keycodes.")
if not all(type(label) is str for label in ui_labels):
raise TypeError("All elements of 'ui_labels' must be strings.")
if not all(type(label) in (int, str) for label in data_labels):
raise TypeError("All elements of 'data_labels' must be integers or strings.")
for i in range(0, length):
self._ui_label_map[keycodes[i]] = ui_labels[i]
self._data_label_map[keycodes[i]] = data_labels[i]
def validate(self, keycode):
"""Checks if a keycode has a mapping within the KeyMap.
Args:
keycode (int): The SDL Keycode to check for the presence of within the KeyMap.
Returns:
bool: True if the keycode exists in the KeyMap, otherwise False.
Raises:
TypeError: If the keycode is not an int (i.e. not a valid keycode).
"""
if type(keycode) is not int:
raise TypeError("SDL Keycodes must be of type 'int'.")
return keycode in self._ui_label_map.keys()
def read(self, keycode, label="ui"):
"""Returns the label mapped to a given keycode.
Args:
keycode (int): The SDL Keycode to return the mapped label for.
label (str, optional): The type of label (UI or data) to return. Must be one of
'ui' or 'data'. Defaults to 'ui' if not specified.
Returns:
str: The label mapped to the given keycode.
Raises:
ValueError: If the keycode does not exist within the KeyMap.
"""
if label.lower() not in ["ui", "data"]:
raise ValueError("'label' must be either 'ui' or 'data.")
if self.validate(keycode):
return self._ui_label_map[keycode] if label == "ui" else self._data_label_map[keycode]
else:
e = "The KeyMap '{0}' has no mapping for the keycode '{1}'"
raise ValueError(e.format(self.name, keycode))
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']='mysql+pymysql://root:root@localhost/sample'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
db = SQLAlchemy(app)
class Employee(db.Model):
id = db.Column("id",db.Integer(),primary_key=True)
ename = db.Column("emp_name",db.String(50))
adr = db.relationship('Address', backref='emp', uselist=True,
lazy=False,cascade='all,delete')
class Address(db.Model):
id = db.Column("id", db.Integer(), primary_key=True)
city = db.Column("city", db.String(50))
eid = db.Column("emp_id",db.ForeignKey("employee.id"),
unique=False,nullable=False)
if __name__ == '__main__':
db.create_all()
e1 = Employee(id=101,ename='AAA')
e2 = Employee(id=102, ename='BBB')
db.session.add_all([e1,e2])
db.session.commit()
for item in range(1,11):
ad1 = Address(id=item,city='Pune'+str(item))
ad1.emp=e1
db.session.add(ad1)
db.session.commit() |
from django.contrib import admin
from motorfleet.models import *
admin.site.register(Fleet)
admin.site.register(MotorfleetProfile)
admin.site.register(Policy)
admin.site.register(Claim)
admin.site.register(Conviction)
admin.site.register(Vehicle)
|
# this provide a playground for easier import while debugging the code.
from ImageUtilities.image_io import open_image, write_image, as_matrix, as_image
from ImageUtilities.color_operations import to_grayscale
import os
import pdb
test_file = os.path.join(os.getcwd(), "resource", "sample", "test_image1.jpg")
output = os.path.join(os.getcwd(), "test", "test_grayscale.jpg")
if __name__ == '__main__':
image = open_image(test_file)
s = as_matrix(image)
grey = to_grayscale(s)
new_image = as_image(grey)
print(grey.shape)
write_image(new_image, output)
pdb.set_trace() |
# encoding=utf-8
import os
import dataloader
import numpy as np
import tensorflow as tf
from protein_lstm import Model
from sklearn import metrics
import pickle
loader = dataloader.DataMaster()
batch_size = 32
# batch_size = 32
epoch_num = 40
# keep_pro = 0.9
keep_pro = 0.95
init_learning_rate = 0.0005
decay_rate = 0.96
decay_steps = loader.training_size / batch_size
model = Model(init_learning_rate, decay_steps, decay_rate)
def validataion():
# model.prediction_fused
print('============= begin to test =============')
step_size = 300
outputs = []
logits_pred = []
for i in range(0, len(loader.test_Y), step_size):
batch_X = loader.test_X[i:i + step_size]
batch_E = loader.test_E[i:i + step_size]
batch_L = loader.test_L[i:i + step_size]
batch_Y = loader.test_Y[i:i + step_size]
output, y_logit = sess.run([model.prediction_cnn, model.logits_pred],
feed_dict={model.x: batch_X, model.e: batch_E, model.l: batch_L, model.y: batch_Y,
model.dropout_keep_prob: 1.0})
outputs.append(output)
logits_pred.append(y_logit)
y_pred = np.concatenate(outputs, axis=0)
logits_pred = np.concatenate(logits_pred, axis=0)
print(">>>> accuracy %.6f" % metrics.accuracy_score(loader.test_Y, y_pred))
print(">>>> Precision %.6f" % metrics.precision_score(loader.test_Y, y_pred))
print(">>>> Recall %.6f" % metrics.recall_score(loader.test_Y, y_pred))
print(">>>> f1_score %.6f" % metrics.f1_score(loader.test_Y, y_pred))
fpr, tpr, threshold = metrics.roc_curve(loader.test_Y, logits_pred)
print(">>>> auc_socre %.6f" % metrics.auc(fpr, tpr))
report = metrics.classification_report(loader.test_Y, y_pred,
target_names=['Trivial', 'Essential'])
print(report)
return logits_pred, loader.test_Y
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('pretraining CNN Part')
for epoch in range(epoch_num):
loader.shuffle()
for iter, idx in enumerate(range(0, loader.training_size, batch_size)):
batch_X = loader.train_X[idx:idx + batch_size]
batch_E = loader.train_E[idx:idx + batch_size]
batch_L = loader.train_L[idx:idx + batch_size]
batch_Y = loader.train_Y[idx:idx + batch_size]
batch_loss, y_pred, y_logits, accuracy, _ = sess.run(
[model.loss_cnn, model.prediction_cnn, model.logits_pred, model.accuracy, model.optimizer_cnn],
feed_dict={model.x: batch_X, model.e: batch_E, model.l: batch_L, model.y: batch_Y,
model.dropout_keep_prob: keep_pro})
if iter % 20 == 0:
print("=====epoch:%d iter:%d=====" % (epoch + 1, iter + 1))
print('batch_loss %.3f' % batch_loss)
print("accuracy %.6f" % metrics.accuracy_score(batch_Y, y_pred))
print("Precision %.6f" % metrics.precision_score(batch_Y, y_pred))
print("Recall %.6f" % metrics.recall_score(batch_Y, y_pred))
print("f1_score %.6f" % metrics.f1_score(batch_Y, y_pred))
fpr, tpr, threshold = metrics.roc_curve(batch_Y, y_logits)
print("auc_socre %.6f" % metrics.auc(fpr, tpr))
c = validataion()
|
"""Utility functions."""
import errno
import os
import re
import sys
import urllib.request
import subprocess as sp
from pyfaidx import Fasta
def generate_gap_bed(fname, outname):
""" Generate a BED file with gap locations.
Parameters
----------
fname : str
Filename of input FASTA file.
outname : str
Filename of output BED file.
"""
f = Fasta(fname)
with open(outname, "w") as bed:
for chrom in f.keys():
for m in re.finditer(r"N+", f[chrom][:].seq):
bed.write("{}\t{}\t{}\n".format(chrom, m.start(0), m.end(0)))
def filter_fasta(infa, outfa, regex=".*", v=False, force=False):
"""Filter fasta file based on regex.
Parameters
----------
infa : str
Filename of input fasta file.
outfa : str
Filename of output fasta file. Cannot be the same as infa.
regex : str, optional
Regular expression used for selecting sequences.
v : bool, optional
If set to True, select all sequence *not* matching regex.
force : bool, optional
If set to True, overwrite outfa if it already exists.
Returns
-------
fasta : Fasta instance
pyfaidx Fasta instance of newly created file
"""
if infa == outfa:
raise ValueError("Input and output FASTA are the same file.")
if os.path.exists(outfa):
if force:
os.unlink(outfa)
if os.path.exists(outfa + ".fai"):
os.unlink(outfa + ".fai")
else:
raise ValueError(
"{} already exists, set force to True to overwrite".format(outfa)
)
filt_function = re.compile(regex).search
fa = Fasta(infa, filt_function=filt_function)
seqs = fa.keys()
if v:
original_fa = Fasta(infa)
seqs = [s for s in original_fa.keys() if s not in seqs]
fa = original_fa
if len(seqs) == 0:
raise ValueError("No sequences left after filtering!")
with open(outfa, "w") as out:
for chrom in seqs:
out.write(">{}\n".format(fa[chrom].name))
out.write("{}\n".format(fa[chrom][:].seq))
return Fasta(outfa)
def mkdir_p(path):
""" 'mkdir -p' in Python """
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def cmd_ok(cmd):
"""Returns True if cmd can be run."""
try:
sp.check_call(cmd, stderr=sp.PIPE, stdout=sp.PIPE)
except sp.CalledProcessError:
# bwa gives return code of 1 with no argument
pass
except Exception:
sys.stderr.write("{} not found, skipping\n".format(cmd))
return False
return True
def run_index_cmd(name, cmd):
"""Run command, show errors if the returncode is non-zero."""
sys.stderr.write("Creating {} index...\n".format(name))
# Create index
p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.stderr.write("Index for {} failed\n".format(name))
sys.stderr.write(stdout.decode("utf8"))
sys.stderr.write(stderr.decode("utf8"))
def get_localname(name, localname):
"""
Returns localname if localname is not None, else;
if name is an url (URL provider): Returns parsed filename from name
else: returns name
"""
if localname is None:
try:
urllib.request.urlopen(name)
except (IOError, ValueError):
return name.replace(" ", "_")
else:
# try to get the name from the url
name = name[name.rfind("/") + 1 :]
return name[: name.find(".")]
else:
return localname.replace(" ", "_")
|
from basic_app.models import UserProfileInfo
from django import forms
from django.contrib.auth.models import User
class UserForm(forms.ModelForm):
password=forms.CharField(widget=forms.PasswordInput())
class Meta:
model=User
fields=('username','email','password')
verify_password=forms.CharField(widget=forms.PasswordInput,required=True)
def clean(self):
all_clean_data=super().clean()
p=all_clean_data['password']
v_p=all_clean_data['verify_password']
if p!=v_p:
raise forms.ValidationError('PASSWORD IS NOT THE SAME')
class UserProfileInfoForm(forms.ModelForm):
class Meta:
model=UserProfileInfo
fields=('portfolio_site','profile_pic') |
# Generated by Django 3.2.3 on 2021-06-21 05:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import forum.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Reply',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(max_length=5000)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateField(null=True)),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Forum',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField(max_length=5000, null=True)),
('subject', models.CharField(max_length=255)),
('address', models.TextField(max_length=1000)),
('mainphoto', forum.fields.ThumbnailImageField(upload_to='')),
('cleaned', models.IntegerField()),
('taste', models.IntegerField()),
('kindness', models.IntegerField()),
('last_updated', models.DateField(auto_now_add=True, null=True)),
('slug', models.SlugField(allow_unicode=True, help_text='one word for title alias', max_length=100, verbose_name='SLUG')),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='OWNER')),
],
options={
'verbose_name': 'forum',
'verbose_name_plural': 'forumss',
'db_table': 'forum_forum',
'ordering': ('-last_updated',),
},
),
]
|
from MapMaker import MapMaker
def main():
try:
shape_files_path = "./shapefiles"
map_maker = MapMaker(shape_files_path)
shapefiles = map_maker.get_shape_files()
merged = map_maker.concat_shape_files(shapefiles)
map_maker.plot_maps(merged)
except:
print("Something went wrong")
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from .correios import Correios, ShippingRateResult, ShippingRateResultService
|
# Largest prime factor
# Problem 3
# The prime factors of 13195 are 5, 7, 13 and 29.
# What is the largest prime factor of the number 600851475143 ?
import time
start_time = time.time()
n = 600851475143
factor = 2
lastFactor = 1
while n > 1:
if n % factor == 0:
lastFactor = factor
n = n / factor
while n % factor == 0:
n = n / factor
factor = factor + 1
print(lastFactor)
seconds = time.time() - start_time
print('Time Taken:', time.strftime("%H:%M:%S", time.gmtime(seconds)))
print("--- %s seconds ---" % (time.time() - start_time))
|
from django.shortcuts import render
from django.views import generic
from django.shortcuts import redirect
# Create your views here.
def TemplateView(request):
print(1)
return render(request, 'index.html')
|
from rest_framework import serializers
from .models import *
from django.contrib.auth.models import User
from django.db import transaction
class ProfileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Profile
fields = ('url','id','username','email','address')
class PostSerializer(serializers.ModelSerializer):
userId = ProfileSerializer(serializers.ModelSerializer)
class Meta:
model = Post
fields = ('id','title','body','userId')
class CommentSerializer(serializers.ModelSerializer):
postId = PostSerializer(read_only=True)
class Meta:
model = Comment
fields = ('id','name','body','email', 'postId')
|
import matplotlib
import matplotlib.pyplot as plt
import sqlite3
import os
# set up the cursor
dir = os.path.dirname(__file__)+os.sep
conn = sqlite3.connect(dir+'full_data.db')
cur = conn.cursor()
#-------------------------------------------------TRANSPORTATION DATA -------------------------------------------------
USathome=[]
results = cur.execute('SELECT at_home FROM transport WHERE location_id =1 ORDER BY week_id asc').fetchall()
for num in results:
num = num[0]
USathome.append(num)
USnotathome = []
results = cur.execute('SELECT not_at_home FROM transport WHERE location_id =1 ORDER BY week_id asc').fetchall()
for num in results:
num = num[0]
USnotathome.append(num)
USathomeratio=[]
results = cur.execute('SELECT ratio_at_home FROM transport WHERE location_id =1 ORDER BY week_id asc').fetchall()
for num in results:
num = num[0]
USathomeratio.append(num)
MIathome=[]
results = cur.execute('SELECT at_home FROM transport WHERE location_id =2 ORDER BY week_id asc').fetchall()
for num in results:
num = num[0]
MIathome.append(num)
MInotathome = []
results = cur.execute('SELECT not_at_home FROM transport WHERE location_id =2 ORDER BY week_id asc').fetchall()
for num in results:
num = num[0]
MInotathome.append(num)
MIathomeratio=[]
results = cur.execute('SELECT ratio_at_home FROM transport WHERE location_id =2 ORDER BY week_id asc')
for num in results:
num = num[0]
MIathomeratio.append(num)
#-------------------------------------------------UNEMPLOYMENT DATA-------------------------------------------------
new_unemploy_MI =[]
results = cur.execute('SELECT initial_nsa_claims FROM unemployment_rates WHERE loc =2 ORDER BY week asc').fetchall()
for num in results:
num = num[0]
new_unemploy_MI.append(num)
total_unemploy_MI =[]
results = cur.execute('SELECT total_claims FROM unemployment_rates WHERE loc =2 ORDER BY week asc').fetchall()
for num in results:
num = num[0]
total_unemploy_MI.append(num)
new_unemploy_US=[]
results = cur.execute('SELECT initial_nsa_claims FROM unemployment_rates WHERE loc =1 ORDER BY week asc').fetchall()
for num in results:
num = num[0]
new_unemploy_US.append(num)
total_unemploy_US=[]
results = cur.execute('SELECT total_claims FROM unemployment_rates WHERE loc =1 ORDER BY week asc').fetchall()
for num in results:
num = num[0]
total_unemploy_US.append(num)
#-------------------------------------------------COVID DEATHS-------------------------------------------------
covid_deaths_MI=[]
results = cur.execute('SELECT deaths FROM covid_deaths WHERE loc =2 ORDER BY wk asc').fetchall()
for num in results:
num = num[0]
covid_deaths_MI.append(num)
covid_deaths_US=[]
results = cur.execute ('SELECT deaths FROM covid_deaths WHERE loc =1 ORDER BY wk asc').fetchall()
for num in results:
num = num[0]
covid_deaths_US.append(num)
#-------------------------------------------------VISUALIZATIONS-------------------------------------------------
weeks =[]
count =0
for i in range(53):
count +=1
weeks.append(count)
#1 - US vs MI athome ratio
fig,ax = plt.subplots()
y1 = MIathomeratio
y2 = USathomeratio
ax.plot(weeks,y1, "g-", label = "Michigan")
ax.plot(weeks, y2, 'b-', label = "National")
ax.set_xlabel('Week')
ax.set_ylabel('Ratio of # people at-home / # not at-home')
ax.set_title('Ratio of People At-Home vs Not: Nationally and in MI')
ax.grid()
ax.legend()
#2 new unemployment rates & covid deaths
fig= plt.figure(figsize = (15,8))
ax1=fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.scatter(covid_deaths_MI, new_unemploy_MI, color = "g")
ax1.set_xlabel('MI Weekly Covid Deaths')
ax1.set_ylabel('New MI Unemployment Claims')
ax1.grid()
ax1.set_title('Covid Deaths by New Unemployment Claims: Michigan')
ax2.scatter(covid_deaths_US, new_unemploy_US,color = "b")
ax2.set_xlabel('National Weekly Covid Deaths')
ax2.set_ylabel('New National Unemployment Claims')
ax2.grid()
ax2.set_title('Covid Deaths by New Unemployment Claims: National')
#3 - at home ratio & covid deaths
fig2= plt.figure(figsize = (15,8))
ax1=fig2.add_subplot(121)
ax2 = fig2.add_subplot(122)
ax1.scatter(USnotathome[:-1], covid_deaths_US, color = "purple")
ax1.set_ylabel('National Weekly Covid Deaths')
ax1.set_xlabel('People Not At-Home: National')
ax1.grid()
ax1.set_title('Weekly Covid Deaths by # of People Not at Home: National')
ax2.scatter(MInotathome[:-1],covid_deaths_MI, color = "red")
ax2.set_ylabel('MI Weekly Covid Deaths')
ax2.set_xlabel('People Not At-Home: Michigan')
ax2.grid()
ax2.set_title('Weekly Covid Deaths by # of People Not at Home: Michigan')
# show graphs
plt.show() |
DBS written test:-
1)
var1 = True
var2 = False
var3 = False
if var1 or var2 and var3:
print("True")
else:
print("False")
output:- True
2)
var1 = lambda var: var * 2
ret = lambda var: var * 3
result = 2
result = var1(result)
result = ret(result)
result = var1(result)
print(result)
output:- 24
3)
class abc(object):
def __init__(self):
self.x=5
pass
c = abc()
print c.x
print c.x
output :-
5
5
4)select the following python module which doesnt come default??
output:- mechanicz
5)__doc__ attribute use ??
6)PyChecker use in python ??
7)Python indent related question
8) one question realted to set
9)One question realted to tuple
10)One questio related to exception handling
11) One question related to syntax error |
"""
For this kata you will have to forget how to add two numbers.
In simple terms, our method does not like the principle
of carrying over numbers and just writes down every number
it calculates :-)
You may assume both integers are positive integers.
"""
def add(num1, num2):
result = ''
a = str(num1)
b = str(num2)
if len(a) > len(b):
b = '0'*(len(a)-len(b)) + b
elif len(a) < len(b):
a = '0'*(len(b)-len(a)) + a
for i in range(len(a)):
result += str(int(a[i]) + int(b[i]))
return int(result)
print('Tests:')
print(add(2,11))
print(add(0,1))
print(add(0,0))
print(add(16,18))
print(add(26,39))
print(add(122,81)) |
import torch
from unittest import TestCase
from mock import MagicMock, patch
import torchbearer
from torchbearer.callbacks import UnpackState
class TestUnpackState(TestCase):
def reset_state(self):
return {
torchbearer.X: 'data',
torchbearer.Y_TRUE: 'targets'
}
def test_sample(self):
packer = UnpackState(keys=[torchbearer.X, torchbearer.Y_TRUE])
state = self.reset_state()
packer.on_sample(state)
self.assertTrue(state[torchbearer.X] == {torchbearer.X: 'data', torchbearer.Y_TRUE: 'targets'})
state = self.reset_state()
packer.on_sample_validation(state)
self.assertTrue(state[torchbearer.X] == {torchbearer.X: 'data', torchbearer.Y_TRUE: 'targets'})
def test_no_key(self):
packer = UnpackState()
state = self.reset_state()
packer.on_sample(state)
self.assertTrue(state[torchbearer.X] == 'data')
state = self.reset_state()
packer.on_sample_validation(state)
self.assertTrue(state[torchbearer.X] == 'data')
def test_fill_X(self):
packer = UnpackState(keys=[torchbearer.Y_TRUE])
state = self.reset_state()
packer.on_sample(state)
self.assertTrue(state[torchbearer.X] == {torchbearer.X: 'data', torchbearer.Y_TRUE: 'targets'})
state = self.reset_state()
packer.on_sample_validation(state)
self.assertTrue(state[torchbearer.X] == {torchbearer.X: 'data', torchbearer.Y_TRUE: 'targets'})
def test_forward_no_dict(self):
packer = UnpackState(keys=[torchbearer.Y_TRUE])
state = self.reset_state()
state[torchbearer.Y_PRED] = 1
packer.on_forward(state)
self.assertTrue(state[torchbearer.Y_PRED] == 1)
state = self.reset_state()
state[torchbearer.Y_PRED] = 1
packer.on_forward_validation(state)
self.assertTrue(state[torchbearer.Y_PRED] == 1)
def test_forward_list(self):
packer = UnpackState(keys=[torchbearer.Y_TRUE])
state = self.reset_state()
state[torchbearer.Y_PRED] = [1, 2, 3]
packer.on_forward(state)
self.assertTrue(state[torchbearer.Y_PRED] == [1, 2, 3])
state = self.reset_state()
state[torchbearer.Y_PRED] = [1, 2, 3]
packer.on_forward_validation(state)
self.assertTrue(state[torchbearer.Y_PRED] == [1, 2, 3])
def test_forward_dict_no_y_pred(self):
packer = UnpackState(keys=[torchbearer.Y_TRUE])
state = self.reset_state()
state[torchbearer.Y_PRED] = {'one': 1, 'two': 2}
packer.on_forward(state)
self.assertTrue(state[torchbearer.Y_PRED] == {'one': 1, 'two': 2})
state = self.reset_state()
state[torchbearer.Y_PRED] = {'one': 1, 'two': 2}
packer.on_forward_validation(state)
self.assertTrue(state[torchbearer.Y_PRED] == {'one': 1, 'two': 2})
def test_forward_dict_y_pred(self):
packer = UnpackState(keys=[torchbearer.Y_TRUE])
state = self.reset_state()
state[torchbearer.Y_PRED] = {torchbearer.Y_PRED: 1, 'two': 2}
packer.on_forward(state)
self.assertTrue(state[torchbearer.Y_PRED] == 1)
self.assertTrue(state['two'] == 2)
state = self.reset_state()
state[torchbearer.Y_PRED] = {torchbearer.Y_PRED: 1, 'two': 2}
packer.on_forward_validation(state)
self.assertTrue(state[torchbearer.Y_PRED] == 1)
self.assertTrue(state['two'] == 2) |
from sr.robot import *
import time
R = Robot()
# Your code goes here
R.motors[0].m0.power = 70
R.motors[0].m1.power = 70
time.sleep (5)
R.motors[0].m0.power = 0
R.motors[0].m1.power = 0
R.motors[0].m0.power = 70
R.motors[0].m1.power = -70
time.sleep (5)
R.motors[0].m0.power = 0
R.motors[0].m1.power = 0 |
class Parameter:
def __init__(self):
self.learn_rate = 0.1
self.batch_size = 50
self.iter_num = 512
self.kthi = 0.001
self.class_num = 5
self.clean_switch = True #是否清洗数据
self.ap_iter_num = 1000
self.depth = 0
self.ap_batch_size = 45
# self.test_interval = 256
# self.threshold = 0
self.lamda = 0.1
self.interval_batch_size = 48
# self.model_path = "F:\jcode\englishPCFG.ser.gz" #stanford
self.model_path = "F:\jcode\englishFactored.ser.gz" #stanford
self.graph_dir = "dp_graph2"
self.tree_path = "data/tree.pkl"
self.tree_height = 2
self.features_dir = "features_file"
self.dict_dir = "dict_file"
|
import pandas as pd
path = "pydata-book-2nd-edition/datasets/babynames/yob1880.txt"
names1880 = pd.read_csv(path,names=['name','sex','birth'])
names_sex = names1880.groupby('sex').birth.sum()
print(names_sex)
|
import xadmin
from blog.models import ArticleType, ArticleTag, Article
class ArticleAdmin(object):
list_display = ["title", "describe","tags", "article_type", "article_image", "author", "create_time", ]
search_fields = ['title', "tags", "article_type", ]
list_editable = ['title',"describe", "article_image", "tags", "article_type", "create_time", ]
list_filter = ["title", "author", "tags", "article_type", "create_time", ]
class ArticleTagAdmin(object):
list_display = ("tag_name",)
search_fields = ["tag_name", ]
list_filter = ["tag_name", ]
class ArticleTypeAdmin(object):
list_display = ["type_name", ]
search_fields = ["type_name", ]
list_filter = ["type_name", ]
xadmin.site.register(Article, ArticleAdmin)
xadmin.site.register(ArticleTag, ArticleTagAdmin)
xadmin.site.register(ArticleType, ArticleTypeAdmin)
|
"""
自动狙击
"""
import ctypes
import random
import time
from shoot import automatic_support
def start(system_context):
# 获取盒子 操作
lib, handle = system_context.box_lib, system_context.box_handle64
while True:
try:
# 停止功能
if system_context.automatic_function != 2:
time.sleep(0.5)
continue
# 抓屏 image
capture_image = system_context.capture
# 是否开镜
has_open_mirror = automatic_support.has_sniper_open_mirror(
automatic_support.get_sniper_open_mirror_position_image(
capture_image, automatic_support.get_sniper_open_mirror_position()))
# print('has_open_mirror {}'.format(has_open_mirror))
if not has_open_mirror:
time.sleep(0.03)
continue
# 存在红字,准备杀敌
has_read = automatic_support.has_read_text(
automatic_support.get_read_position_img(
capture_image, automatic_support.get_read_text_position()))
if not has_read:
# 休眠
time.sleep(0.03)
continue
# 检查人物
res = find_color(capture_image)
if not res:
# print('检查人物')
time.sleep(0.03)
continue
# 狙击自动开枪
lib.M_KeyPress(handle, 16, 1)
time.sleep(0.1)
# Q 切枪
lib.M_KeyPress(handle, ctypes.c_uint64(20), 1)
time.sleep(0.1)
lib.M_KeyPress(handle, ctypes.c_uint64(20), 1)
# 随休眠
random_sleep()
except Exception as e:
print(e)
def find_color(img):
try:
win_w, win_h = 1024, 768
ju_w = 4
ju_h = 20
ju_x1 = (win_w / 2 - ju_w / 2) + 2
ju_x2 = ju_x1 + ju_w - 1
ju_y1 = (win_h / 2 - ju_h / 2) + 1
ju_y2 = ju_y1 + ju_h
# 狙击 image
juImg = img.crop((int(ju_x1), int(ju_y1), int(ju_x2), int(ju_y2)))
# juImg.save('cccccc.png')
# 检查红色
colors = [
# 新配置
# 红色 255, 0, 0
# [[200, 255], [0, 60], [0, 60]],
[[200, 255], [0, 200], [0, 180]],
# 蓝色 0,0,255
[[10, 50], [10, 50], [0, 255]],
# 黑色 17,17,17
[[16, 70], [16, 70], [16, 70]],
# 换色 177,114,1 - 203,166,99
[[177, 203], [114, 166], [0, 99]],
# 白色 177,186,188 - 250,251,241
[[177, 255], [186, 255], [188, 255]],
]
# 图片信息
has_find, color_index = find_color_tools(juImg, colors, 1)
if has_find:
# 删除已找到的
colors.pop(color_index)
has_find, color_index = find_color_tools(juImg, colors, 1)
if has_find:
# 删除已找到的
colors.pop(color_index)
has_find, color_index = find_color_tools(juImg, colors, 1)
return has_find
# 没找到返回 False
return False
except Exception as e:
print(e)
def find_color_tools(img, colors, max_count):
# 图片信息
width, height = img.size
pixel = img.load()
count = 0
for j in range(0, height): # 遍历所有宽度的点
# 使用的是 np_img, x y 坐标相反
x, y = 0, j
rgb = pixel[x, y]
r = rgb[0]
g = rgb[1]
b = rgb[2]
for i in range(len(colors)):
color_rgb = colors[i]
if (color_rgb[0][0] <= r <= color_rgb[0][1]
and color_rgb[1][0] <= g <= color_rgb[1][1]
and color_rgb[2][0] <= b <= color_rgb[2][1]):
# 则这些像素点的颜色改成 其他色色值
count = count + 1
if count >= max_count:
return True, i
return False, -1
def get_random_time():
# 调用 box 开枪 随机休眠 避免 36-2
sleeps = [0.01, 0.02, 0.03, 0.008]
return sleeps[random.randint(0, 3)]
def random_sleep():
# 调用 box 开枪 随机休眠 避免 36-2
time.sleep(get_random_time())
|
""" Create an Group of participants in environments.
Base Class:
Group
Subclasses:
"""
#: Dependencies
import numpy
import random
class Group(object):
""" Base class for Group object.
"""
def __init__(self, unique_id, experiment, members=[], name=''):
""" Initialize a uniquely identified group of members in an experiment,
with the option to name it. Note that state is the 'collective state'
of members, and members in a Group object are given as unique_ids of
the actual members stored elsewhere.
Args:
unique_id (int): unique integer identifier
experiment (obj): reference to experiment group is in
members (list): unique_ids of agents to include in group, if any
name (str): string identifier for convenient reference, if given
Attrs:
_unique_id (int)
_experiment (obj)
_members (list)
_name (str)
_state (dict)
"""
self._unique_id = unique_id
self._experiment = experiment
self._members = members
self._name = name
self._state = {}
def get_id(self):
""" Return unique_id.
"""
return self._unique_id
def get_experiment(self):
""" Return experiment.
"""
return self._experiment
def get_members(self):
""" Return current members.
"""
return self._members
def get_state(self):
""" Return state of collective.
"""
return self._state
def get_name(self):
""" Return name, if given, else pass.
"""
return self._name if self._name else 'No Name'
def _set_members(self, members_to_remove, new_members):
""" Set list of members. First remove old members, then add new members.
"""
for r in members_to_remove:
self._members.remove(r)
self._members = self._members + new_members
def _set_state(self):
""" Set a new state.
"""
pass
def _set_name(self, new_name):
""" Set a new name for the group.
"""
self._name = new_name
def step(self):
""" Step method required for all groups. Override to use in
practice.
Requirements: Define in subclasses.
"""
pass
|
"""
Mark classes and their methods as accessible to the remote side.
"""
__all__ = ['Referenceable']
from types import MethodType
from weakref import WeakKeyDictionary
from thinserve.util import Singleton
@Singleton
class Referenceable (object):
"""I am a class and method decorator for referenceable types."""
def __init__(self):
self._classes = {}
self._instances = WeakKeyDictionary()
self._methodcache = {}
# Application Interface:
def __call__(self, cls):
"""Decorate a class; without this, it cannot be remotely referenced."""
methodinfo = {}
for v in vars(cls).itervalues():
try:
remotename = self._methodcache[v]
except KeyError:
# It's not remotely accessible:
pass
else:
methodinfo[remotename] = v
self._classes[cls] = methodinfo
self._methodcache.clear()
return cls
def Method(self, f):
"""Decorate a method; the class must be decorated."""
return self._register_method(f, f.__name__)
def Method_without_prefix(self, prefix):
"""Decorate a method, but drop prefix from the remote name."""
def decorator(f):
assert f.__name__.startswith(prefix), (f, prefix)
return self._register_method(f, f.__name__[len(prefix):])
return decorator
# Framework interface (private to apps):
def _check(self, obj):
return type(obj) in self._classes
def _get_bound_methods(self, obj):
cls = type(obj)
try:
return self._instances[obj]
except KeyError:
boundmethods = dict(
(name, MethodType(m, obj, cls))
for (name, m)
in self._classes[cls].iteritems()
)
self._instances[obj] = boundmethods
return boundmethods
# Class Private:
def _register_method(self, f, name):
self._methodcache[f] = name
return f
|
matrix = [[1, 2 ,3], [4, 5], [6, 7, 8]]
# flatten_matrix = []
# for sublist in matrix:
# for val in sublist:
# flatten_matrix.append(j)
# print("Input matrix is ", matrix)
# print("Output matrix is", flatten_matrix)
flatten_matrix = [val for sublist in matrix for val in sublist]
print(flatten_matrix) |
##################################################################
# common definitions and global variables for IO client/server system
#
import socket
LOCALHOST = socket.gethostname()
# server always uses localhost
SERVERHOST = socket.gethostbyname( LOCALHOST )
# clients usually use a standard name
# (ioclient's -t swtich redirects to localhost )
SERVER = socket.gethostbyname( "igor" )
COMPORT=51001 # communications port
LOGPORT=51003 # log file access
SER0PORT=51010 # serial COMM port access (this and next few)
CMD_PREFIX = '$'
|
from django.urls import path
from django.shortcuts import render
from django.http import HttpResponse
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.index, name='index'),
path('login/', views.login_view, name='login_view'),
path('logout/', views.logout_view, name='logout_view'),
]
|
import sys
import os
sys.path.append('../shared')
sys.path.append('../bus')
sys.path.append('../missions')
import shared_all
import trialbus
import pulluprobot
def execute():
# Must always calib rate gyro
shared_all.calibrate_gyro(0)
# Do the sequence
trialbus.base_to_pullup()
pulluprobot.align()
pulluprobot.run()
trialbus.pullup_to_dance()
## Below lines only for testing
## Comment out when done testing. Do not upload to Git hub without commenting.
execute()
|
import socket
import keyboard
import time
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('127.0.0.1', 10000)
sock.connect(server_address)
while True:
if keyboard.is_pressed('w'):
sock.send("w".encode('utf-8'))
print("Button W Pressed")
if keyboard.is_pressed('a'):
print("Button A Pressed")
sock.send("a".encode('utf-8'))
if keyboard.is_pressed('s'):
print("Button S Pressed")
sock.send("s".encode('utf-8'))
if keyboard.is_pressed('d'):
print("Button D Pressed")
sock.send("d".encode('utf-8'))
if keyboard.is_pressed('z'):
print("Button Z Pressed")
sock.send("z".encode('utf-8'))
if keyboard.is_pressed('x'):
print("Button X Pressed")
sock.send("x".encode('utf-8'))
if keyboard.is_pressed('q'):
sock.send("q".encode('utf-8'))
print("Button Q Pressed")
if keyboard.is_pressed('e'):
print("Button E Pressed")
sock.send("e".encode('utf-8'))
if keyboard.is_pressed('left'):
print("Button Left Arrow Pressed")
sock.send("la".encode('utf-8'))
if keyboard.is_pressed('right'):
print("Button Right Arrow Pressed")
sock.send("ra".encode('utf-8'))
if keyboard.is_pressed('up'):
print("Button Up Arrow Pressed")
sock.send("ua".encode('utf-8'))
if keyboard.is_pressed('down'):
print("Button Down Arrow Pressed")
sock.send("da".encode('utf-8'))
time.sleep(0.1)
sock.close()
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
while(1):
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_color = np.array([30,150,50])
upper_color = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_color, upper_color)
res = cv2.bitwise_and(frame, frame, mask= mask)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.imshow('res', res)
k = cv2.waitKey(5) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
|
import shutil
from PIL import Image
from whylogs.app.config import SessionConfig, WriterConfig
from whylogs.app.session import session_from_config
def test_log_image(tmpdir, image_files):
output_path = tmpdir.mkdir("whylogs")
shutil.rmtree(output_path, ignore_errors=True)
writer_config = WriterConfig("local", ["protobuf"], output_path.realpath())
yaml_data = writer_config.to_yaml()
WriterConfig.from_yaml(yaml_data)
session_config = SessionConfig("project", "pipeline", writers=[writer_config])
session = session_from_config(session_config)
with session.logger("image_test") as logger:
for image_file_path in image_files:
logger.log_image(image_file_path)
profile = logger.profile
columns = profile.columns
assert len(columns) == 19
shutil.rmtree(output_path, ignore_errors=True)
def test_log_pil_image(tmpdir, image_files):
output_path = tmpdir.mkdir("whylogs")
shutil.rmtree(output_path, ignore_errors=True)
writer_config = WriterConfig("local", ["protobuf"], output_path.realpath())
yaml_data = writer_config.to_yaml()
WriterConfig.from_yaml(yaml_data)
session_config = SessionConfig("project", "pipeline", writers=[writer_config])
session = session_from_config(session_config)
with session.logger("image_pil_test", with_rotation_time="s", cache_size=1) as logger:
for image_file_path in image_files:
img = Image.open(image_file_path)
logger.log_image(img)
profile = logger.profile
columns = profile.columns
assert len(columns) == 19
shutil.rmtree(output_path, ignore_errors=True)
|
#该脚本主要用于doc文档利用百度搜索引擎自动查寻并返回类似文本进行高亮批注
#只能读取[.docx]文件,不能读取[.doc]文件
from docx import Document
#此处在r'xxxx'中输入绝对路径
path = r'C:\Users\admin\Desktop\未完整版:美腾科技所处行业分析报告(菁亿投顾-2020-05-25).docx'
def get_docx_paragraph(path):
paragraph = []
document = Document(path)
for i in document.paragraphs:
paragraph.append(i.text) #有陷阱,空list是nonetype,nonetype不能append所以不能用赋值的方法
paragraph = [i for i in paragraph if i != ''] #列表生成式去空集比较简单
return paragraph
#重新输出文本
paragraph = get_docx_paragraph(path)
new_doc = Document()
for i in paragraph:
new_doc.add_paragraph(i)
new_doc.save(r'C:\Users\admin\Desktop\test.docx') |
from models import Pool
from contracts.commit_processor import CommitProcessor
class PoolCreated(CommitProcessor):
def __init__(self):
self.opcode = "pool_created_loanDAO"
def process(self, commit, *args, **kwargs):
data = commit.data
pool = Pool()
pool.id = data.get("poolId")
pool.manager = data.get("manager")
pool.loanId = data.get("loanId")
pool.cosigner = data.get("cosigner")
pool.cosigner_limit = data.get("cosignerLimit")
pool.cosigner_data = data.get("cosignerData")
pool.started = data.get("started")
pool.tracker = data.get("tracker")
pool.token = data.get("token")
pool.raised = data.get("raised")
pool.collected = data.get("collected")
commit.save()
pool.save() |
# -*- coding: utf-8 -*-
from django import forms
class PhotosForm(forms.Form):
name = forms.CharField(max_length = 100, label='名字:')
picture = forms.ImageField(label='图片:') |
import os
import sys
TABSPACE = " " * 4 # Formatting purposes
ini_path = "resources/memory_address.ini"
target_path = "include/offsets.h"
output_header = [
"////////////////////////////////////",
"////// Auto-generated offsets //////",
"////// by Malik R. Booker ///////",
"////////////////////////////////////\n",
"#pragma once\n",
"#include <vector>\n",
"namespace Offsets",
"{",
]
output = ""
output_footer = "};"
def isVector(item):
hex_count = 0
for segment in item:
if segment[0] == ";":
break
if "0x" in segment:
hex_count += 1
return hex_count > 1
def handleRegularItem(item) -> str:
prefix = "unsigned int "
return prefix + " ".join(item[:3])
def handleVectorItem(item) -> str:
prefix = "std::vector<unsigned int> "
suffix = " }"
depth = 1
for segment in item:
if segment[0] == ";":
break
if "0x" in segment:
depth += 1
return prefix + item[0] + " = { " + ", ".join(item[2:depth+1]) + suffix
def parseBuff(buff) -> str:
if buff[0] == "\n":
return ""
elif buff[0] == "":
return ""
elif buff[0] == ";":
return ""
elif buff[0] == "[":
return f"\n{TABSPACE}// {buff.strip()[1:-1:]}"
elif "=" in buff:
item = list(map(str, buff.split(" ")))
if isVector(item):
return handleVectorItem(item) + ";"
else:
return handleRegularItem(item) + ";"
return ""
def parseIniFile(src, dest):
output = "\n".join(output_header)
with open(src, "r") as f:
for line in f.readlines():
line = line.strip()
if line:
buff = parseBuff(line)
if len(buff) > 0:
output += TABSPACE + buff + "\n"
output += output_footer
print(output)
print(f"\n\nWritten to {target_path}.")
with open(dest, "w") as f:
f.write(output)
if __name__ == "__main__":
parseIniFile(ini_path, target_path)
|
def filter(f, L):
res = []
for e in L:
if f(e):
res.append(e)
return res
def map(f, L):
res = []
for e in L:
res.append(f(e))
return res
def foldl(g, z, L):
if len(L) == 0:
return z
x = L[0]
xs = L[1:]
return foldl(g, g(x, z), xs)
|
shopping_list = ["eggs", "milk", "fish", "apples", "bread", "chicken"]
def searchList(item_list, item):
if item in item_list:
return f"Yes! You have {item} on your list!"
else:
return f"No, You don't have {item} on your list."
print(searchList(shopping_list, "milk"))
print(searchList(shopping_list, "bannanas")) |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
fig = plt.figure()
ax = plt.axes(xlim=(0,2),ylim=(-2,2))
line, = ax.plot([],[],lw=2)
def init():
# create current frame
line.set_data([], [])
return line,
def animate(i):
# draw picture
x = np.linspace(0,2,1000)
y = np.sin(2*np.pi*(x-0.01*i))*np.cos(22*np.pi*(x-0.01*i))
line.set_data(x,y)
return line,
animator = animation.FuncAnimation(fig, animate, init_func=init,frames=200,
interval=20, blit=True)
plt.show() |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# author: Leandro Batista
# e-mail: leandrobatistapereira98@gmail.com
carrinho = []
produto1 = {'nome': 'Tenis', 'valor': 21.70}
produto2 = {'nome': 'Meia', 'valor': 10}
produto3 = {'nome': 'Camiseta', 'valor': 17.30}
produto4 = {'nome': 'Calca', 'valor': 300.00}
carrinho.append(produto1)
carrinho.append(produto2)
carrinho.append(produto3)
carrinho.append(produto4)
print "Seu carrinho possui ", len(carrinho), " itens."
total = 0
for c in carrinho:
total += c['valor']
print 'O valor total e de ', total
|
from lib.yaml_config import YamlConfig as Config
# Create global instance of config
# project_dir = "/home/tech/PycharmProjects/mvid"
config = Config('/etc/spyspace/mvid_conf.yaml')
|
import csv
import os
csvfile = open('districtlist.csv', 'r')
reader = csv.reader(csvfile)
for row in reader:
district = row[0]
htmlurl = row[1]
jsurl = row[2]
if not os.path.exists(os.path.dirname(htmlurl)):
try:
os.makedirs(os.path.dirname(htmlurl))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
htmlfile = open(htmlurl, 'w')
jsfile = open(jsurl, 'w')
htmltxt = '''<!DOCTYPE html>
<html lang="en">
<head>
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-164997550-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-164997550-2');
</script>
<title>Coronavirus in '''+district+''' | coronainbihar.github.io</title>
<meta name="title" content="Coronavirus Outbreak in'''+district+''': Blockwise Dashboard"/>
<link rel="icon" type="image/png" href="../../icon.png"/>
<meta charset="utf-8">
<meta name="description" content="An effort to track coronavirus outbreak in Bihar with tables of the number of cases by district and block.">
<meta name="keywords" content="Corona, Bihar, Covid-19, Blockwise, Block, Coronavirus">
<meta name="author" content="Anand Kumar Verma">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" type="text/css" href="../../index.css">
</head>
<body>
<div class="header">
<h1>Covid-19 Dashboard for '''+district+'''</h1>
</div>
<div class="row">
<div class="column side">
</div>
<div class="column middle">
<p><br></p>
<p id="hdemo"></p>
<table id="demo"></table>
<script src="index.js" type="text/javascript"></script>
</div>
<div class="column side">
</div>
</div>
<div class="footer">
<p>Developed by <a target="_blank" href="https://www.linkedin.com/in/anand-kumar-verma/">Anand Kumar Verma.</a></p>
</div>
</body>
</html>
'''
jstxt = '''var xmlhttp = new XMLHttpRequest();
xmlhttp.onreadystatechange = function() {
if (this.readyState == 4 && this.status == 200) {
var myObj = JSON.parse(this.responseText);
var len = myObj.length ;
var htxt = "" ;
htxt += "<c>Confirmed<br>"+myObj[len-1].confirmed+"</c><a >Active<br>"+myObj[len-1].active+"</a><r>Recovered<br>"+myObj[len-1].recovered+"</r><d>Deceased<br>"+myObj[len-1].deceased+"</d>" ;
document.getElementById("hdemo").innerHTML = htxt;
var txt = "" ;
txt += "<table>" ;
txt += "<tr><th onclick=\\"sortTable(0, 0)\\">Block ⇅</th><th onclick=\\"sortTable(1, 1)\\" style=\\"color:orange;\\">Confirmed ⇅</th><th onclick=\\"sortTable(2, 1)\\" style=\\"color:blue;\\">Active ⇅</th><th onclick=\\"sortTable(3, 1)\\" style=\\"color:green;\\">Recovered ⇅</th><th onclick=\\"sortTable(4, 1)\\" style=\\"color:red;\\">Deceased ⇅</th></tr>" ;
var x ;
for (x = 0; x < len-1; x++) {
/* txt += "<tr><td>" + myObj[x].name + "</td></tr>"; */
txt += "<tr><td>"+myObj[x].block+"</td><td>"+myObj[x].confirmed+"</td><td>"+myObj[x].active+"</td><td>"+myObj[x].recovered+"</td><td>"+myObj[x].deceased+"</td></tr>" ;
}
txt += "</table>"
document.getElementById("demo").innerHTML = txt;
}
};
xmlhttp.open("GET", "'''+district+'''csv.json", true);
xmlhttp.send();
function sortTable(n, type) {
var table, rows, switching, i, x, y, shouldSwitch, dir, switchcount = 0;
table = document.getElementById("demo");
switching = true;
// Set the sorting direction to ascending:
dir = "asc";
/* Make a loop that will continue until
no switching has been done: */
while (switching) {
// Start by saying: no switching is done:
switching = false;
rows = table.rows;
/* Loop through all table rows (except the
first, which contains table headers): */
for (i = 1; i < (rows.length - 1); i++) {
// Start by saying there should be no switching:
shouldSwitch = false;
/* Get the two elements you want to compare,
one from current row and one from the next: */
x = rows[i].getElementsByTagName("TD")[n];
y = rows[i + 1].getElementsByTagName("TD")[n];
/* Check if the two rows should switch place,
based on the direction, asc or desc: */
if (type == 0){
if (dir == "asc") {
if (x.innerHTML.toLowerCase() > y.innerHTML.toLowerCase()) {
// If so, mark as a switch and break the loop:
shouldSwitch = true;
break;
}
} else if (dir == "desc") {
if (x.innerHTML.toLowerCase() < y.innerHTML.toLowerCase()) {
// If so, mark as a switch and break the loop:
shouldSwitch = true;
break;
}
}
}
else {
if (dir == "asc") {
if (Number(x.innerHTML) > Number(y.innerHTML)) {
shouldSwitch = true;
break;
}
} else if (dir == "desc") {
if (Number(x.innerHTML) < Number(y.innerHTML)) {
shouldSwitch = true;
break;
}
}
}
}
if (shouldSwitch) {
/* If a switch has been marked, make the switch
and mark that a switch has been done: */
rows[i].parentNode.insertBefore(rows[i + 1], rows[i]);
switching = true;
// Each time a switch is done, increase this count by 1:
switchcount ++;
} else {
/* If no switching has been done AND the direction is "asc",
set the direction to "desc" and run the while loop again. */
if (switchcount == 0 && dir == "asc") {
dir = "desc";
switching = true;
}
}
}
}
'''
htmlfile.write(htmltxt)
jsfile.write(jstxt) |
def swap_case(s):
word_list = list(s)
new_list = []
reverse = ''
for alphabet in word_list:
if alphabet.islower():
new_list.append(alphabet.upper())
elif alphabet.isupper():
new_list.append(alphabet.lower())
else:
new_list.append(alphabet)
print(word_list)
print("swapcase", s.swapcase())
return reverse.join(new_list)
if __name__ == '__main__':
word = 'Www.HackerRank.com 345'
print('word: ', word)
result = swap_case(word)
print(result) |
from django import forms
from django.contrib.auth.forms import UserChangeForm
from django.contrib.auth import get_user_model
from . import models
class LoginForm(forms.Form):
user_id = forms.CharField(label="이메일ID")
user_pw = forms.CharField(widget=forms.PasswordInput, label="비밀번호")
# 필드 내용 검증하는 함수(반드시 접두사 clean_ 사용), 데이터 맞지않으면 지움
def clean(self):
# 정리된 데이터에서 키 값을 이용하여 값 찾기
user_id = self.cleaned_data.get("user_id")
user_pw = self.cleaned_data.get("user_pw")
# 입력데이터 DB의 정보와 일치여부 검증
try:
user = models.User.objects.get(username=user_id)
if user.check_password(user_pw):
return self.cleaned_data
else:
# 해딩 필드에 에러 발생
self.add_error("user_pw", forms.ValidationError("Password is wrong"))
except models.User.DoesNotExist:
self.add_error("user_id", forms.ValidationError("User does not exist"))
# 장고 Model Form이용하여 생성
class SignUpForm(forms.ModelForm):
user_id = forms.CharField(label="아이디")
user_pw = forms.CharField(widget=forms.PasswordInput, label="비밀번호")
re_user_pw = forms.CharField(widget=forms.PasswordInput, label="비밀번호 확인")
class Meta:
model = models.User
fields = (
"user_name",
"birthdate",
"phone_num",
"user_addr",
"post_num",
"avatar",
)
def clean_re_user_pw(self):
user_pw = self.cleaned_data.get("user_pw")
re_user_pw = self.cleaned_data.get("re_user_pw")
if user_pw != re_user_pw:
raise forms.ValidationError("Password confirmation does not match")
else:
return user_pw
def save(self, *args, **kwargs):
# commit=False 옵션 이용해 오브젝트는 생성하지만 DB에는 적용하지 않음
user = super().save(commit=False)
user_id = self.cleaned_data.get("user_id")
user_pw = self.cleaned_data.get("user_pw")
user_name = self.cleaned_data.get("user_name")
user_position = self.cleaned_data.get("user_position")
phone_num = self.cleaned_data.get("phone_num")
birthdate = self.cleaned_data.get("birthdate")
user_addr = self.cleaned_data.get("user_addr")
post_num = self.cleaned_data.get("post_num")
avatar = self.cleaned_data.get("avatar")
user.username = user_id
user.set_password(user_pw) # pw암호화
user.user_name = user_name
user.user_position = user_position
user.phone_num = phone_num
user.birthdate = birthdate
user.user_addr = user_addr
user.post_num = post_num
user.avatar = avatar
user.save() # DB에 유저 저장
class FindPasswordForm(forms.Form):
user_id = forms.CharField(label="이메일ID")
phone_num = forms.CharField(label="전화번호")
# 필드 내용 검증하는 함수(반드시 접두사 clean_ 사용), 데이터 맞지않으면 지움
def clean(self):
# 정리된 데이터에서 키 값을 이용하여 값 찾기
user_id = self.cleaned_data.get("user_id")
phone_num = self.cleaned_data.get("phone_num")
# 입력데이터 DB의 정보와 일치여부 검증
try:
user = models.User.objects.get(username=user_id)
phone_num = models.User.objects.get(phone_num=phone_num)
except models.User.DoesNotExist:
self.add_error("user_id", forms.ValidationError("User does not exist"))
class UpdateProfileForm(forms.ModelForm):
class Meta:
model = models.User
fields = [
"phone_num",
"user_addr",
"post_num",
"user_bio",
"avatar",
]
|
import subprocess
'''
The details of each options is explained in doall.py
You can check it by using:
python3 doall.py --help
'''
'''
# example 1 (same as last Version):
# compare the method using frequent kmers and the method using all kmers
# all other settings are the same
cmd = "python3 doall.py --randSource --randRead"
subprocess.call(cmd, shell=True)
# Note: here the "--reuse" option enables you to use the exact same genome
# and reads used in the previous run
cmd = "python3 doall.py --freqKmer --reuse"
subprocess.call(cmd, shell=True)
'''
# example 2 (updated):
# run multiple times with different min-hash threshold (1st filter threshold)
# and the jaccard threshold (2nd filter threshold)
# Note1: you may want to save the final precision and recall. In order to do this,
# you need to use the option "--save" followed by the file you use to save the result
# the save format is:
# ###file start:
# hThreshold1, jThreshold1, precision1, recall1
# hThreshold2, jThreshold2, precision2, recall2
# ........
# Note2:
# here the jThreshold (the 2nd threshold) can be set using --minjThres and --maxjThres.
# These two options provide the lower and upper bounds of the jThreshold.
# The option jGap gives the increase gap. Namely, the program will try jThreshold:
# minjThres, minjThres + jGap, minjThres + 2*jGap, ...
# Note3:
# here the hThreshold is obtained according to jThreshold.
# hThreshold = jThreshold/ given_times
# And you can set the range of the times using --minTimes and --maxTimes
# e.g. --minTimes 2 --maxTimes 3 means the program will try both:
# hThreshold = jThreshold/2 and hThreshold = jThreshold/3
save_path = "data/precision_and_recall_all_kmer.txt"
cmd = "python3 doall.py --randSource --randRead --minjThres 0.01 --maxjThres 0.10 --jGap 0.02 --minTimes 2 --maxTimes 3 --save %s" % save_path
subprocess.call(cmd, shell=True)
# sample output in the save_path
# 0.005 0.01 0.8675496688741722 0.9978237214363439
# 0.015 0.03 0.9969861362266426 0.8998911860718172
# 0.025 0.05 0.9983818770226537 0.6713819368879217
# 0.034999999999999996 0.06999999999999999 1.0 0.4542981501632209
# 0.045 0.09 1.0 0.1441784548422198
# 0.0033333333333333335 0.01 0.8629107981220657 1.0
# 0.01 0.03 0.9958992384299942 0.9249183895538629
# 0.016666666666666666 0.05 0.9984313725490196 0.6926006528835691
# 0.02333333333333333 0.06999999999999999 1.0 0.48258977149075083
# 0.03 0.09 1.0 0.2578890097932535
save_path = "data/precision_and_recall_freq_kmer.txt"
cmd = "python3 doall.py --reuse --freqKmer --minjThres 0.01 --maxjThres 0.10 --jGap 0.02 --minTimes 2 --maxTimes 3 --save %s" % save_path
subprocess.call(cmd, shell=True)
# example 3 (same as previous Version):
# you may want to use diffrent settings of genome and reads as input data
# let's take the "frequent kmer" mode as an example
cmd = "python3 doall.py --randSource --lenRef 100000 --randRead --nRead 500 --lenRead 1000 --minOvlp 200 --freqKmer"
subprocess.call(cmd, shell=True)
# Also, you can set different mutation rate, indel rate when generating reads
# Note: when you want to use diffrent genome or reads or both from the previous command, do NOT use "--reuse" option.
# Because, if you do, the setting won't change, and you will use the same data as previous one.
cmd = "python3 doall.py --randSource --randRead --mut 0.1 --ins 0.02 --del 0.02 --freqKmer"
subprocess.call(cmd, shell=True)
# of course you can use your own genome, reads intead of random generated ones
# Note: to do so, do NOT use the "--randSource" and "--randRead" option
cmd = "python3 doall.py --ref data/refGenome_rand_len1000000.fa --read data/read_rand_n500_len10000_0.05_0.01_0.01.fa --freqKmer"
subprocess.call(cmd, shell=True)
# and you also can only use your own genome and randomly generate reads from it
cmd = "python3 doall.py --ref data/refGenome_rand_len1000000.fa --randRead --freqKmer"
subprocess.call(cmd, shell=True)
# example 4 (same as previous Version):
# As we discussed, the "--smartFreq" option enables you to smartly choose the kmer frequence range.
# the program will sort the kmer list and only use the first (genome)-length kmers for use
# the frequence is at least 2
cmd = "python3 doall.py --randSource --randRead --freqKmer --smartFreq"
subprocess.call(cmd, shell=True)
###END##########################################
|
from rest_framework.test import APITestCase
from django.contrib.auth.models import User
from rest_framework.test import APIRequestFactory, force_authenticate
from rest_framework import status
from accounts.views import UserProfile
class UserProfileTest(APITestCase):
fixtures = ['dump.json']
def test_get_user_detail_should_return_200(self):
view = UserProfile.as_view()
factory = APIRequestFactory()
request = factory.get('users', format='json')
user = User.objects.get(username='test3')
force_authenticate(request, user=user)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_user_detail_should_return_401_if_unauthorized(self):
view = UserProfile.as_view()
factory = APIRequestFactory()
request = factory.get('users', format='json')
response = view(request)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_user_detail_should_return_profile_of_logged_in_user(self):
view = UserProfile.as_view()
factory = APIRequestFactory()
request = factory.get('users', format='json')
user = User.objects.get(username='test2')
force_authenticate(request, user=user)
response = view(request)
data = response.data
self.assertIn("id", data)
self.assertIn("email", data)
self.assertIn("username", data)
self.assertEqual(data.get("username"), "test2") |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from decimal import Decimal
@python_2_unicode_compatible
class UnidadOrganica(models.Model):
nombre = models.CharField(max_length=255)
especial = models.BooleanField(default=False)
actividades = models.BooleanField(default=False)
techo = models.DecimalField(max_digits = 19, decimal_places = 5, default = Decimal('0.000'))
class Meta:
verbose_name=u'Unidad orgánica'
verbose_name_plural=u'Unidades orgánicas'
ordering = ['nombre']
def __str__(self):
return self.nombre
@python_2_unicode_compatible
class Unidad(models.Model):
pertenece_a = models.ForeignKey(UnidadOrganica)
nombre = models.CharField(max_length=255)
abreviatura = models.CharField(max_length=20, default='')
techo = models.DecimalField(max_digits = 19, decimal_places = 5, default = Decimal('0.000'))
class Meta:
verbose_name='Unidad'
verbose_name_plural='Unidades'
ordering = ['nombre']
def __str__(self):
return self.nombre
@python_2_unicode_compatible
class AsignacionPresupuestal(models.Model):
fuente = models.CharField(max_length = 100)
rubro = models.CharField(max_length = 255)
class Meta:
verbose_name = (u'Asignación presupuestal')
verbose_name_plural = ('Asignaciones presupuestales')
def __str__(self):
return self.rubro
@python_2_unicode_compatible
class Opcion(models.Model):
clave = models.CharField(max_length = 100)
valor = models.TextField()
def __str__(self):
return '%s: %s' % (self.clave, self.valor)
|
import argparse
from functools import partial
from typing import Callable, Tuple
import numpy as np
from starfish.io import Stack
from starfish.pipeline.filter.gaussian_low_pass import GaussianLowPass
from ._base import FilterAlgorithmBase
class GaussianHighPass(FilterAlgorithmBase):
def __init__(self, sigma, **kwargs) -> None:
"""Gaussian high pass filter
Parameters
----------
sigma : int (default = 1)
standard deviation of gaussian kernel
"""
self.sigma = sigma
@classmethod
def add_arguments(cls, group_parser: argparse.ArgumentParser) -> None:
group_parser.add_argument(
"--sigma", default=1, type=int, help="standard deviation of gaussian kernel")
@staticmethod
def gaussian_high_pass(img: np.ndarray, sigma) -> np.ndarray:
"""
Applies a gaussian high pass filter to an image
Parameters
----------
img : numpy.ndarray
Image to filter
sigma : Union[float, int]
Standard deviation of gaussian kernel
Returns
-------
numpy.ndarray :
Filtered image, same shape as input
"""
blurred: np.ndarray = GaussianLowPass.low_pass(img, sigma)
over_flow_ind: np.ndarray[bool] = img < blurred
res: np.ndarray = img - blurred
res[over_flow_ind] = 0
return res
def filter(self, stack: Stack) -> None:
"""
Perform in-place filtering of an image stack and all contained aux images.
Parameters
----------
stack : starfish.Stack
Stack to be filtered.
"""
high_pass: Callable = partial(self.gaussian_high_pass, sigma=self.sigma)
stack.image.apply(high_pass)
# apply to aux dict too:
for auxiliary_image in stack.auxiliary_images.values():
auxiliary_image.apply(high_pass)
|
#!/usr/bin/env python3
# [START sheets_quickstart]
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import csv
import re
from datetime import date, datetime
# Main parameters
cutoff = date.fromisoformat('2019-12-17')
roles = ['TM', 'SP', 'TT', 'GE', 'E', 'GR', 'IT', 'CJ', '1M']
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# The ID and range of a sample spreadsheet.
# SAMPLE_SPREADSHEET_ID = '1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms'
# RANGE_FORMAT = 'Class Data!A2:E'
SPREADSHEET_ID = '1xD1dnISQGhi2xDriT4kG-ABvcZMO-8vCaRclGn3LjsY'
RANGE_FORMAT = 'Schedule 2019!A1:AZ50'
def main():
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,
range=RANGE_FORMAT.format(1)).execute()
values = result.get('values', [])
dates = list(filter(lambda d: d <= cutoff, [datetime.strptime(d, '%d-%b-%Y').date() for d in values[0][1:]]))
with open('roledates.csv', 'w') as wf:
writer = csv.DictWriter(wf, ["Name"]+roles)
writer.writeheader()
for row in values[1:]:
if row[0] == "Open Roles":
break;
# Extract (role, date) iterable -- ignoring empty or N/A rode
rd = filter(lambda p: p[0] and not re.match(r'(?i)N/A', p[0]),
[(row[i+1], dates[i]) for i in range(0, len(dates)-1)])
# Expand multiple roles
rd = [(list(filter(lambda r: re.match(r'\w+', r), re.split(r'\W+', p[0]))), p[1]) for p in rd]
# filter out unwanted roles
dd = dict(filter(lambda rp: rp[0] in roles, [(r, p[1]) for p in rd for r in p[0]]))
roledates = { "Name": row[0] }
roledates.update()
for role in roles:
roledates[role] = dd[role] if role in dd else None
writer.writerow(roledates)
if __name__ == '__main__':
main()
# [END sheets_quickstart] |
import unittest
from jack_analyzer import JackAnalyzer
#python -m unittest discover -p "*_tests.py"
class JackAnalyzerTests(unittest.TestCase):
def test_array(self):
self.compile("ArrayTest", "Main")
def test_main(self):
self.compile("Square", "Main")
def test_squaregame(self):
self.compile("Square", "SquareGame")
def test_square(self):
self.compile("Square", "Square")
def test_main_expressionless(self):
self.compile("ExpressionLessSquare", "Main")
def test_squaregame_expressionless(self):
self.compile("ExpressionLessSquare", "SquareGame")
def test_square_expressionless(self):
self.compile("ExpressionLessSquare", "Square")
#def test_custom(self):
#self.compile("Custom", "MyClass")
def compile(self, dirName, fileName):
jack_analyzer=JackAnalyzer("{0:}/{1:}.jack".format(dirName, fileName))
jack_analyzer.compile()
expected = []
actual = []
with open("{0:}/{1:}.xml".format(dirName, fileName)) as fh:
for line in fh:
expected.append(line.strip())
with open("{0:}/{1:}__.xml".format(dirName, fileName)) as fh:
for line in fh:
actual.append(line.strip())
self.assertListEqual(expected, actual)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.