text stringlengths 38 1.54M |
|---|
import FWCore.ParameterSet.Config as cms
hltDeepInclusiveMergedVerticesPF = cms.EDProducer("CandidateVertexMerger",
maxFraction = cms.double(0.2),
minSignificance = cms.double(10.0),
secondaryVertices = cms.InputTag("hltDeepTrackVertexArbitratorPF")
)
|
class AgenteVainilla:
def __init__(self, cash: float, stock_portfolio: dict, indiccators: object, environment: object) -> None:
pass
def policy():
pass
|
import pkg_resources
from mako.lookup import TemplateLookup
from bravado_types.config import Config
from bravado_types.data_model import SpecInfo
from bravado_types.metadata import Metadata
def render(metadata: Metadata, spec: SpecInfo, config: Config) -> None:
"""
Render module and stub files for a given Swagger schema.
:param metadata: Code generation metadata.
:param spec: SpecInfo representing the schema.
:param config: Code generation configuration.
"""
template_dirs = []
if config.custom_templates_dir:
template_dirs.append(config.custom_templates_dir)
template_dirs.append(
pkg_resources.resource_filename(__name__, "templates/"))
lookup = TemplateLookup(directories=template_dirs)
py_template = lookup.get_template("module.py.mako")
with open(config.py_path, "w") as f:
f.write(py_template.render(metadata=metadata, spec=spec,
config=config))
pyi_template = lookup.get_template("module.pyi.mako")
with open(config.pyi_path, "w") as f:
f.write(pyi_template.render(metadata=metadata, spec=spec,
config=config))
if config.postprocessor:
config.postprocessor(config.py_path, config.pyi_path)
|
# -*- coding: utf8 -*-
import unittest
from source.array_n_string import *
class TestTwoSum(unittest.TestCase):
def test_two_sum_001(self):
numbers = [1, 2, 3, 4, 5]
target = 8
index1, index2 = TwoSum().two_sum(numbers, target)
self.assertEqual(index1, 3, 'first value error:[%d]' % index1)
self.assertEqual(index2, 5, 'first value error:[%d]' % index2)
def test_two_sum_002(self):
numbers = [1, 2, 3, 4]
target = 8
try:
TwoSum().two_sum(numbers, target)
except ValueError:
pass
class TestSortedTwoSum(unittest.TestCase):
def test_sorted_two_sum_001(self):
numbers = [1, 2, 3, 4, 5]
target = 8
index1, index2 = SortedTwoSum().two_sum(numbers, target)
self.assertEqual(index1, 3, 'first value error:[%d]' % index1)
self.assertEqual(index2, 5, 'first value error:[%d]' % index2)
def test_sorted_two_sum_002(self):
numbers = [1, 2, 3, 4]
target = 8
try:
TwoSum().two_sum(numbers, target)
except ValueError:
pass
class TestTwoSumClass(unittest.TestCase):
def test_two_sum_class_001(self):
t = TwoSumClass()
t.add(1)
t.add(3)
t.add(2)
self.assertEqual(True, t.find(4), '1+3=4')
self.assertEqual(False, t.find(7), 'no find 7')
t.add(4)
self.assertEqual(True, t.find(7), '4+3=7')
self.assertEqual(False, t.find(8), 'no find 8')
t.add(4)
self.assertEqual(True, t.find(8), '4+4=8')
class TestValidPalindrome(unittest.TestCase):
# def terst_
pass
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testTwoSum']
unittest.main()
|
import pygame
pygame.init()
clock = pygame.time.Clock()
fps = 60
word_data = []
lines = 16
cols = 31
monitor = pygame.display.Info()
screen_width = monitor.current_w
screen_height = monitor.current_h
screen = pygame.display.set_mode((screen_width, screen_height))
object_size = screen_width // 30
for i in range(lines):
line = []
for j in range(cols):
line.append(0)
word_data.append(line)
object_list = []
grass_image = pygame.image.load("image/grassland.png")
wall_image = pygame.image.load("image/brick_wall.png")
road_image = pygame.image.load("image/cracks.png")
def __drawWorld():
object_list = []
for i in range(lines):
for j in range(cols):
if word_data[i][j] == 0:
img = pygame.transform.scale(grass_image, (object_size, object_size))
img_rect = img.get_rect()
img_rect.x = j * object_size
img_rect.y = i * object_size
object = (img, img_rect)
object_list.append(object)
if word_data[i][j] == 1:
img = pygame.transform.scale(wall_image, (object_size, object_size))
img_rect = img.get_rect()
img_rect.x = j * object_size
img_rect.y = i * object_size
object = (img, img_rect)
object_list.append(object)
if word_data[i][j] == 2:
img = pygame.transform.scale(road_image, (object_size, object_size))
img_rect = img.get_rect()
img_rect.x = j * object_size
img_rect.y = i * object_size
object = (img, img_rect)
object_list.append(object)
for object in object_list:
screen.blit(object[0], object[1])
pygame.draw.rect(screen, (255, 255, 255), object[1], 1)
delay = 0
clicked = False
run = True
while run:
clock.tick(fps)
__drawWorld()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
break
delay += 1
if delay > 10:
clicked = False
delay = 0
if pygame.mouse.get_pressed()[0] == 1 and clicked == False:
# get mouse position
mouse_position = pygame.mouse.get_pos()
for i in range(lines):
for j in range(cols):
# check mouseover and clicked condition
rectangle = pygame.Rect(j * object_size, i * object_size, object_size, object_size)
if rectangle.collidepoint(mouse_position):
word_data[i][j] += 1
clicked = True
if word_data[i][j] > 2:
word_data[i][j] = 0
if clicked == True:
break
if clicked == True:
break
__drawWorld()
pygame.display.update()
for i in range(lines):
for j in range(cols):
print(word_data[i][j], end=" ")
print("")
pygame.quit()
|
def recursive_binary_search(arr,target):
# check the length of the list first
# This condition also accounts for when a number is not in the list,
# Then len of arr evetually turns to zero as the search progresses
if len(arr) == 0:
return False
else:
# get the midpoint
mid_point = len(arr)// 2
if arr[mid_point] == target:
print("target found")
return True
else:
if arr[mid_point] < target:
return recursive_binary_search(arr[mid_point+1 : ], target)
else:
return recursive_binary_search(arr[ : mid_point ], target)
numbers = [1,2,3,4,5,6,7,8,9]
print(recursive_binary_search(numbers,9)) |
'''
Select multiple image of same size that all need cropping in same area.
Select a region to crop on one image, and apply it to all selected images.
Saves cropped images with filename prefix 'Multicropped-'
Particularly useful for cropping the same area of many screenshots.
'''
from tkinter import filedialog
import tkinter as tk
from PIL import ImageTk, Image
import os
class StatusBar(tk.Frame):
'''Instructional status bar along bottom.'''
def __init__(self, master):
tk.Frame.__init__(self, master)
self.variable=tk.StringVar()
self.label=tk.Label(self, bd=1, relief=tk.SUNKEN, anchor=tk.W,
textvariable=self.variable,
font=('arial', 14, 'normal'))
self.variable.set('Click Open.')
self.label.pack(fill=tk.X)
self.pack(side=tk.BOTTOM, fill=tk.X)
def update(self, newtext):
'''Update text displayed in statusbar.'''
self.variable.set(newtext)
class ButtonBar(tk.Frame):
'''Container for all buttons.'''
def __init__(self, master):
tk.Frame.__init__(self, master)
button = tk.Button(self, text='Open', command=self.openimage)
button.pack(side=tk.LEFT)
button_crop = tk.Button(self, text='Crop', command=self.crop)
button_crop.pack(side=tk.LEFT)
button_save = tk.Button(self, text='Save All', command=self.saveimages)
button_save.pack(side=tk.LEFT)
self.pack(side=tk.BOTTOM, fill=tk.X)
self.canvas = self.master.canvas
self.imgfiles = None
self.crops = []
def openimage(self):
'''Select all images to crop. Display first to screen.'''
try:
self.master.statusbar.update('1. Select files to crop. '
'Hold command to select multiple.')
self.imgfiles = filedialog.askopenfilenames()
imgfile = self.imgfiles[0]
if imgfile and allimages(self.imgfiles):
self.canvas.pil_img = Image.open(imgfile)
self.setimage(self.canvas.pil_img)
self.canvas.config(width=self.canvas.pil_img.size[0])
self.canvas.config(height=self.canvas.pil_img.size[1])
self.crops = []
self.master.statusbar.update('2. Draw a region to crop.')
except IOError:
print('Files selected must be images')
self.openimage()
except:
pass
def setimage(self, img):
'''Set canvas to display only the given image.'''
self.canvas.img = ImageTk.PhotoImage(img)
self.canvas.create_image(0, 0, anchor=tk.NW, image=self.canvas.img)
region = (0, 0, self.canvas.img.width(), self.canvas.img.height())
self.canvas.configure(scrollregion=region)
def crop(self):
crop_coords = self.master.crop_coords
'''Crop image on canvas according to created rectangle'''
if not (crop_coords and hasattr(self.canvas, 'pil_img')):
return
# order the rectangle's coordinates to be (left, top, right, bottom)
ordered_coords = (
min(crop_coords[0], crop_coords[2]),
min(crop_coords[1], crop_coords[3]),
max(crop_coords[0], crop_coords[2]),
max(crop_coords[1], crop_coords[3]),
)
self.canvas.delete('rect')
self.canvas.pil_img = self.canvas.pil_img.crop(ordered_coords)
self.setimage(self.canvas.pil_img)
self.canvas.config(width=ordered_coords[2]-ordered_coords[0])
self.canvas.config(height=ordered_coords[3]-ordered_coords[1])
self.crops.append(ordered_coords)
self.master.crop_coords = None
self.master.statusbar.update('All images cropped. Crop again or Save.')
def applyall(self):
'''Apply all crops to all selected image files.'''
croppedimages = []
newfilenames = []
for imgfile in self.imgfiles:
image = Image.open(imgfile)
for cropping in self.crops:
image = image.crop(cropping)
croppedimages.append(image)
newfilenames.append('/Multicropped-'.join(os.path.split(imgfile)))
return croppedimages, newfilenames
def saveimages(self):
'''Save each cropped image.'''
if hasattr(self.canvas, 'pil_img'):
croppedimages, newfilenames = self.applyall()
for image, filename in zip(croppedimages, newfilenames):
image.save(filename)
self.master.statusbar.update('Saved all images with prefix'
'Multicropped-.')
else:
self.master.statusbar.update('Click Open.')
class MulticropApplication(tk.Frame):
'''Main application for Multicrop.'''
def __init__(self, root):
tk.Frame.__init__(self, root)
self.root = root
yscrollbar = tk.Scrollbar(root)
yscrollbar.pack(side=tk.RIGHT, fill=tk.Y)
xscrollbar = tk.Scrollbar(root, orient=tk.HORIZONTAL)
xscrollbar.pack(side=tk.BOTTOM, fill=tk.X)
self.canvas = tk.Canvas(self,
width=400, height=300,
yscrollcommand=yscrollbar.set,
xscrollcommand=xscrollbar.set)
self.b1 = 'up'
self.xold = None
self.yold = None
self.crop_coords = None
self.statusbar = StatusBar(self)
self.bottomframe = ButtonBar(self)
self.canvas.pack(side=tk.TOP, fill=tk.BOTH, expand=tk.YES)
yscrollbar.config(command=self.canvas.yview)
xscrollbar.config(command=self.canvas.xview)
self.canvas.bind('<Motion>', self.motion)
self.canvas.bind('<ButtonPress-1>', self.b1down)
self.canvas.bind('<ButtonRelease-1>', self.b1up)
def b1down(self, event):
'''Left mouse down event. Initiate rectangle drawing.'''
x = int(self.canvas.canvasx(event.x))
y = int(self.canvas.canvasy(event.y))
self.xold = x
self.yold = y
self.b1 = 'down'
def b1up(self, event):
'''Left mouse up event. End rectangle drawing.'''
self.b1 = 'up'
self.xold = None
self.yold = None
def motion(self, event):
'''Mouse movement event. Create rectangle while mouse held down.'''
if self.b1 == 'down':
if self.xold is not None and self.yold is not None:
self.canvas.delete('rect')
xnew = int(self.canvas.canvasx(event.x))
ynew = int(self.canvas.canvasy(event.y))
self.crop_coords = (self.xold, self.yold, xnew, ynew)
self.canvas.create_rectangle(*self.crop_coords, tags='rect',
width=2, outline='white')
self.canvas.create_rectangle(*self.crop_coords, tags='rect',
width=2, dash=(4, 4))
def allimages(imgfiles):
'''Determine if all given, selected files are images.'''
for imgfile in imgfiles:
if os.path.splitext(imgfile)[1] not in ('.png', 'jpg', 'jpeg'):
return False
return True
if __name__ == '__main__':
root = tk.Tk()
root.config(bg='black')
MulticropApplication(root).pack(side='top', fill='both', expand=True)
root.mainloop() |
import sys
sys.path.append('/home/test/automation/All_repo/SDNC-FUNC/HAPPIEST_FRAMEWORK')
from Config import ControllerConfig
from Config import variables
from Supporting_Libs import snmp_utils
import re
oid = '1.3.6.1.2.1.2.2.1.7'
up = []
down = []
## Performing GET-BULK operation on interface status
print('Performing GET-BULK operation on interface status')
code, output = snmp_utils.perform_odl_snmp_operation(IP=variables.SNMP_server_ip,OID=oid,COMMUNITY=variables.SNMP_COMMUNITY,GETTYPE='GET-BULK')
utf8string = output.encode("utf-8")
print(utf8string)
b = re.findall(r"^(1.3.6.1.2.1.2.2.1.7.[0-9]+) (=)+ ([0-9]+)",utf8string,re.M)
for i in range(0,len(b)) :
if b[i][2] == '1':
up.append((b[i][0].split("."))[-1])
up_interfaces = " ".join(up)
else :
down.append((b[i][0].split("."))[-1])
down_interfaces = " ".join(down)
print("The interfaces with idx {0} are in UP state.".format(up_interfaces))
print("The interfaces with idx {0} are in DOWN state.".format(down_interfaces))
if len(up) or len(down) > 0 :
print("Success")
|
# https://leetcode.com/problems/string-to-integer-atoi/
class Solution:
def myAtoi(self, str: str) -> int:
num = 0
is_positive = True
i = 0
while i < len(str) and str[i] == ' ':
i += 1
if i < len(str) and str[i] == '+':
i += 1
elif i < len(str) and str[i] == '-':
is_positive = False
i += 1
while i < len(str) and str[i] in '0123456789':
num = 10 * num + int(str[i])
i += 1
if is_positive:
if num < 2 ** 31 - 1:
return num
else:
return 2 ** 31 - 1
else:
if num < 2 ** 31:
return -num
else:
return -2 ** 31
if __name__ == '__main__':
tests = [
("42", 42),
("", 0),
("-", 0),
("-42", -42),
("4193 with words", 4193),
("words and 987", 0),
("-91283472332", -2147483648),
("91283472332", 2147483647)
]
solution = Solution()
for num_str, num_int in tests:
result = solution.myAtoi(num_str)
if result == num_int:
print("PASSED")
else:
print(f"FAILED at {num_str}, expected {num_int} but got {num_str}") |
import json
from django.db import models
from django.utils.six import python_2_unicode_compatible
from channels import Group
from .settings import MSG_TYPE_MESSAGE
from django.contrib.auth.models import User
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User)
def __str__(self):
return self.user.username
# Create your models here.
@python_2_unicode_compatible
class Room(models.Model):
title = models.CharField(max_length=255)
staff_only = models.BooleanField(default=False)
def __str__(self):
return self.title
@property
def websocket_group(self):
'''
returns the channels group that sockets should
subscribe to get sent messages as they are generated
'''
return Group("room-%s" %self.id)
def send_message(self, message, user, msg_type=MSG_TYPE_MESSAGE):
"""
called to send a message to the room on behalf of a user.
"""
final_msg = {'room': str(self.id), 'message':message, 'username': user.username, 'msg_type': msg_type}
#send out the message to everyone in the room
self.websocket_group.send(
{"text": json.dumps(final_msg)}
)
if(message):
msg = Message()
msg.room = Room.objects.get(pk=self.id)
msg.username = user.username
msg.message = message
msg.save()
class Message(models.Model):
room = models.ForeignKey(Room)
username = models.CharField(max_length=255)
message = models.TextField(max_length=1024)
def __str__(self):
return self.room.title + self.username
|
"""Interface with git locally and remotely."""
import glob
import json
import logging
import os
import re
import sys
import tarfile
import time
from datetime import datetime
from subprocess import CalledProcessError, PIPE, Popen, STDOUT
IS_WINDOWS = sys.platform == 'win32'
RE_ALL_REMOTES = re.compile(r'([\w./-]+)\t([A-Za-z0-9@:/\\._-]+) \((fetch|push)\)\n')
RE_REMOTE = re.compile(r'^(?P<sha>[0-9a-f]{5,40})\trefs/(?P<kind>heads|tags)/(?P<name>[\w./-]+(?:\^\{})?)$',
re.MULTILINE)
RE_UNIX_TIME = re.compile(r'^\d{10}$', re.MULTILINE)
WHITELIST_ENV_VARS = (
'APPVEYOR',
'APPVEYOR_ACCOUNT_NAME',
'APPVEYOR_BUILD_ID',
'APPVEYOR_BUILD_NUMBER',
'APPVEYOR_BUILD_VERSION',
'APPVEYOR_FORCED_BUILD',
'APPVEYOR_JOB_ID',
'APPVEYOR_JOB_NAME',
'APPVEYOR_PROJECT_ID',
'APPVEYOR_PROJECT_NAME',
'APPVEYOR_PROJECT_SLUG',
'APPVEYOR_PULL_REQUEST_NUMBER',
'APPVEYOR_PULL_REQUEST_TITLE',
'APPVEYOR_RE_BUILD',
'APPVEYOR_REPO_BRANCH',
'APPVEYOR_REPO_COMMIT',
'APPVEYOR_REPO_NAME',
'APPVEYOR_REPO_PROVIDER',
'APPVEYOR_REPO_TAG',
'APPVEYOR_REPO_TAG_NAME',
'APPVEYOR_SCHEDULED_BUILD',
'CI',
'CI_PULL_REQUEST',
'CI_PULL_REQUESTS',
'CIRCLE_BRANCH',
'CIRCLE_BUILD_IMAGE',
'CIRCLE_BUILD_NUM',
'CIRCLE_BUILD_URL',
'CIRCLE_COMPARE_URL',
'CIRCLE_PR_NUMBER',
'CIRCLE_PR_REPONAME',
'CIRCLE_PR_USERNAME',
'CIRCLE_PREVIOUS_BUILD_NUM',
'CIRCLE_PROJECT_REPONAME',
'CIRCLE_PROJECT_USERNAME',
'CIRCLE_REPOSITORY_URL',
'CIRCLE_SHA1',
'CIRCLE_TAG',
'CIRCLE_USERNAME',
'CIRCLECI',
'HOSTNAME',
'LANG',
'LC_ALL',
'PLATFORM',
'TRAVIS',
'TRAVIS_BRANCH',
'TRAVIS_BUILD_ID',
'TRAVIS_BUILD_NUMBER',
'TRAVIS_COMMIT',
'TRAVIS_COMMIT_RANGE',
'TRAVIS_EVENT_TYPE',
'TRAVIS_JOB_ID',
'TRAVIS_JOB_NUMBER',
'TRAVIS_OS_NAME',
'TRAVIS_PULL_REQUEST',
'TRAVIS_PYTHON_VERSION',
'TRAVIS_REPO_SLUG',
'TRAVIS_SECURE_ENV_VARS',
'TRAVIS_TAG',
'TRAVIS_TEST_RESULT',
'USER',
)
class GitError(Exception):
"""Raised if git exits non-zero."""
def __init__(self, message, output):
"""Constructor."""
self.message = message
self.output = output
super(GitError, self).__init__(message, output)
def chunk(iterator, max_size):
"""Chunk a list/set/etc.
:param iter iterator: The iterable object to chunk.
:param int max_size: Max size of each chunk. Remainder chunk may be smaller.
:return: Yield list of items.
:rtype: iter
"""
gen = iter(iterator)
while True:
chunked = list()
for i, item in enumerate(gen):
chunked.append(item)
if i >= max_size - 1:
break
if not chunked:
return
yield chunked
def run_command(local_root, command, env_var=True, pipeto=None, retry=0, environ=None):
"""Run a command and return the output.
:raise CalledProcessError: Command exits non-zero.
:param str local_root: Local path to git root directory.
:param iter command: Command to run.
:param dict environ: Environment variables to set/override in the command.
:param bool env_var: Define GIT_DIR environment variable (on non-Windows).
:param function pipeto: Pipe `command`'s stdout to this function (only parameter given).
:param int retry: Retry this many times on CalledProcessError after 0.1 seconds.
:return: Command output.
:rtype: str
"""
log = logging.getLogger(__name__)
# Setup env.
env = os.environ.copy()
if environ:
env.update(environ)
if env_var and not IS_WINDOWS:
env['GIT_DIR'] = os.path.join(local_root, '.git')
else:
env.pop('GIT_DIR', None)
# Run command.
with open(os.devnull) as null:
main = Popen(command, cwd=local_root, env=env, stdout=PIPE, stderr=PIPE if pipeto else STDOUT, stdin=null)
if pipeto:
pipeto(main.stdout)
main_output = main.communicate()[1].decode('utf-8') # Might deadlock if stderr is written to a lot.
else:
main_output = main.communicate()[0].decode('utf-8')
log.debug(json.dumps(dict(cwd=local_root, command=command, code=main.poll(), output=main_output)))
# Verify success.
if main.poll() != 0:
if retry < 1:
raise CalledProcessError(main.poll(), command, output=main_output)
time.sleep(0.1)
return run_command(local_root, command, env_var, pipeto, retry - 1)
return main_output
def get_root(directory):
"""Get root directory of the local git repo from any subdirectory within it.
:raise GitError: If git command fails (dir not a git repo?).
:param str directory: Subdirectory in the local repo.
:return: Root directory of repository.
:rtype: str
"""
command = ['git', 'rev-parse', '--show-toplevel']
try:
output = run_command(directory, command, env_var=False)
except CalledProcessError as exc:
raise GitError('Failed to find local git repository root in {}.'.format(repr(directory)), exc.output)
if IS_WINDOWS:
output = output.replace('/', '\\')
return output.strip()
def list_remote(local_root):
"""Get remote branch/tag latest SHAs.
:raise GitError: When git ls-remote fails.
:param str local_root: Local path to git root directory.
:return: List of tuples containing strings. Each tuple is sha, name, kind.
:rtype: list
"""
command = ['git', 'ls-remote', '--heads', '--tags']
try:
output = run_command(local_root, command)
except CalledProcessError as exc:
raise GitError('Git failed to list remote refs.', exc.output)
# Dereference annotated tags if any. No need to fetch annotations.
if '^{}' in output:
parsed = list()
for group in (m.groupdict() for m in RE_REMOTE.finditer(output)):
dereferenced, name, kind = group['name'].endswith('^{}'), group['name'][:-3], group['kind']
if dereferenced and parsed and kind == parsed[-1]['kind'] == 'tags' and name == parsed[-1]['name']:
parsed[-1]['sha'] = group['sha']
else:
parsed.append(group)
else:
parsed = [m.groupdict() for m in RE_REMOTE.finditer(output)]
return [[i['sha'], i['name'], i['kind']] for i in parsed]
def filter_and_date(local_root, conf_rel_paths, commits):
"""Get commit Unix timestamps and first matching conf.py path. Exclude commits with no conf.py file.
:raise CalledProcessError: Unhandled git command failure.
:raise GitError: A commit SHA has not been fetched.
:param str local_root: Local path to git root directory.
:param iter conf_rel_paths: List of possible relative paths (to git root) of Sphinx conf.py (e.g. docs/conf.py).
:param iter commits: List of commit SHAs.
:return: Commit time (seconds since Unix epoch) for each commit and conf.py path. SHA keys and [int, str] values.
:rtype: dict
"""
dates_paths = dict()
# Filter without docs.
for commit in commits:
if commit in dates_paths:
continue
command = ['git', 'ls-tree', '--name-only', '-r', commit] + conf_rel_paths
try:
output = run_command(local_root, command)
except CalledProcessError as exc:
raise GitError('Git ls-tree failed on {0}'.format(commit), exc.output)
if output:
dates_paths[commit] = [None, output.splitlines()[0].strip()]
# Get timestamps by groups of 50.
command_prefix = ['git', 'show', '--no-patch', '--pretty=format:%ct']
for commits_group in chunk(dates_paths, 50):
command = command_prefix + commits_group
output = run_command(local_root, command)
timestamps = [int(i) for i in RE_UNIX_TIME.findall(output)]
for i, commit in enumerate(commits_group):
dates_paths[commit][0] = timestamps[i]
# Done.
return dates_paths
def fetch_commits(local_root, remotes):
"""Fetch from origin.
:raise CalledProcessError: Unhandled git command failure.
:param str local_root: Local path to git root directory.
:param iter remotes: Output of list_remote().
"""
# Fetch all known branches.
command = ['git', 'fetch', 'origin']
run_command(local_root, command)
# Fetch new branches/tags.
for sha, name, kind in remotes:
try:
run_command(local_root, ['git', 'reflog', sha])
except CalledProcessError:
run_command(local_root, command + ['refs/{0}/{1}'.format(kind, name)])
run_command(local_root, ['git', 'reflog', sha])
def export(local_root, commit, target):
"""Export git commit to directory. "Extracts" all files at the commit to the target directory.
Set mtime of RST files to last commit date.
:raise CalledProcessError: Unhandled git command failure.
:param str local_root: Local path to git root directory.
:param str commit: Git commit SHA to export.
:param str target: Directory to export to.
"""
log = logging.getLogger(__name__)
target = os.path.realpath(target)
mtimes = list()
# Define extract function.
def extract(stdout):
"""Extract tar archive from "git archive" stdout.
:param file stdout: Handle to git's stdout pipe.
"""
queued_links = list()
try:
with tarfile.open(fileobj=stdout, mode='r|') as tar:
for info in tar:
log.debug('name: %s; mode: %d; size: %s; type: %s', info.name, info.mode, info.size, info.type)
path = os.path.realpath(os.path.join(target, info.name))
if not path.startswith(target): # Handle bad paths.
log.warning('Ignoring tar object path %s outside of target directory.', info.name)
elif info.isdir(): # Handle directories.
if not os.path.exists(path):
os.makedirs(path, mode=info.mode)
elif info.issym() or info.islnk(): # Queue links.
queued_links.append(info)
else: # Handle files.
tar.extract(member=info, path=target)
if os.path.splitext(info.name)[1].lower() == '.rst':
mtimes.append(info.name)
for info in queued_links:
# There used to be a check for broken symlinks here, but it was buggy
tar.extract(member=info, path=target)
except tarfile.TarError as exc:
log.debug('Failed to extract output from "git archive" command: %s', str(exc))
# Run command.
run_command(local_root, ['git', 'archive', '--format=tar', commit], pipeto=extract)
# Set mtime.
for file_path in mtimes:
last_committed = int(run_command(local_root, ['git', 'log', '-n1', '--format=%at', commit, '--', file_path]))
os.utime(os.path.join(target, file_path), (last_committed, last_committed))
def clone(local_root, new_root, remote, branch, rel_dest, exclude):
"""Clone "local_root" origin into a new directory and check out a specific branch. Optionally run "git rm".
:raise CalledProcessError: Unhandled git command failure.
:raise GitError: Handled git failures.
:param str local_root: Local path to git root directory.
:param str new_root: Local path empty directory in which branch will be cloned into.
:param str remote: The git remote to clone from to.
:param str branch: Checkout this branch.
:param str rel_dest: Run "git rm" on this directory if exclude is truthy.
:param iter exclude: List of strings representing relative file paths to exclude from "git rm".
"""
log = logging.getLogger(__name__)
output = run_command(local_root, ['git', 'remote', '-v'])
remotes = dict()
for match in RE_ALL_REMOTES.findall(output):
remotes.setdefault(match[0], [None, None])
if match[2] == 'fetch':
remotes[match[0]][0] = match[1]
else:
remotes[match[0]][1] = match[1]
if not remotes:
raise GitError('Git repo has no remotes.', output)
if remote not in remotes:
raise GitError('Git repo missing remote "{}".'.format(remote), output)
# Clone.
try:
run_command(new_root, ['git', 'clone', remotes[remote][0], '--depth=1', '--branch', branch, '.'])
except CalledProcessError as exc:
raise GitError('Failed to clone from remote repo URL.', exc.output)
# Make sure user didn't select a tag as their DEST_BRANCH.
try:
run_command(new_root, ['git', 'symbolic-ref', 'HEAD'])
except CalledProcessError as exc:
raise GitError('Specified branch is not a real branch.', exc.output)
# Copy all remotes from original repo.
for name, (fetch, push) in remotes.items():
try:
run_command(new_root, ['git', 'remote', 'set-url' if name == 'origin' else 'add', name, fetch], retry=3)
run_command(new_root, ['git', 'remote', 'set-url', '--push', name, push], retry=3)
except CalledProcessError as exc:
raise GitError('Failed to set git remote URL.', exc.output)
# Done if no exclude.
if not exclude:
return
# Resolve exclude paths.
exclude_joined = [
os.path.relpath(p, new_root) for e in exclude for p in glob.glob(os.path.join(new_root, rel_dest, e))
]
log.debug('Expanded %s to %s', repr(exclude), repr(exclude_joined))
# Do "git rm".
try:
run_command(new_root, ['git', 'rm', '-rf', rel_dest])
except CalledProcessError as exc:
raise GitError('"git rm" failed to remove ' + rel_dest, exc.output)
# Restore files in exclude.
run_command(new_root, ['git', 'reset', 'HEAD'] + exclude_joined)
run_command(new_root, ['git', 'checkout', '--'] + exclude_joined)
def commit_and_push(local_root, remote, versions):
"""Commit changed, new, and deleted files in the repo and attempt to push the branch to the remote repository.
:raise CalledProcessError: Unhandled git command failure.
:raise GitError: Conflicting changes made in remote by other client and bad git config for commits.
:param str local_root: Local path to git root directory.
:param str remote: The git remote to push to.
:param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
:return: If push succeeded.
:rtype: bool
"""
log = logging.getLogger(__name__)
current_branch = run_command(local_root, ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
run_command(local_root, ['git', 'add', '.'])
# Check if there are no changes.
try:
run_command(local_root, ['git', 'diff', 'HEAD', '--no-ext-diff', '--quiet', '--exit-code'])
except CalledProcessError:
pass # Repo is dirty, something has changed.
else:
log.info('No changes to commit.')
return True
# Check if there are changes excluding those files that always change.
output = run_command(local_root, ['git', 'diff', 'HEAD', '--no-ext-diff', '--name-status'])
for status, name in (l.split('\t', 1) for l in output.splitlines()):
if status != 'M':
break # Only looking for modified files.
components = name.split('/')
if '.doctrees' not in components and components[-1] != 'searchindex.js':
break # Something other than those two dirs/files has changed.
else:
log.info('No significant changes to commit.')
return True
# Commit.
latest_commit = sorted(versions.remotes, key=lambda v: v['date'])[-1]
commit_message_file = os.path.join(local_root, '_scv_commit_message.txt')
with open(commit_message_file, 'w') as handle:
handle.write('AUTO sphinxcontrib-versioning {} {}\n\n'.format(
datetime.utcfromtimestamp(latest_commit['date']).strftime('%Y%m%d'),
latest_commit['sha'][:11],
))
for line in ('{}: {}\n'.format(v, os.environ[v]) for v in WHITELIST_ENV_VARS if v in os.environ):
handle.write(line)
try:
run_command(local_root, ['git', 'commit', '-F', commit_message_file])
except CalledProcessError as exc:
raise GitError('Failed to commit locally.', exc.output)
os.remove(commit_message_file)
# Push.
try:
run_command(local_root, ['git', 'push', remote, current_branch])
except CalledProcessError as exc:
if '[rejected]' in exc.output and '(fetch first)' in exc.output:
log.debug('Remote has changed since cloning the repo. Must retry.')
return False
raise GitError('Failed to push to remote.', exc.output)
log.info('Successfully pushed to remote repository.')
return True
|
from index_map import indexes
from get_objects import get_objects
from PIL import Image
import numpy as np
class Data:
def __init__(self) -> None:
self.englishFile = open("en-es/source.final.file-found")
self.spanishFile = open("en-es/reference.final.file-found")
self.metadataFile = open("en-es/metadata.final.file-found")
self.azureFile = open("machine-translations/azure.txt")
self.ibmFile = open("machine-translations/ibm.txt")
self.indexMap = indexes()
self.objects = get_objects()
def __iter__(self):
self.index = 1
return self
def __next__(self):
if self.index < 27367:
# Machine translations
azureLine = self.azureFile.readline()
ibmLine = self.ibmFile.readline()
translations = [azureLine, ibmLine]
# Original Spanish caption
spanishLine = self.spanishFile.readline()
# English reference caption
englishLine = self.englishFile.readline()
# Image
metadataLine = self.metadataFile.readline()
imageIndex = self.indexMap[metadataLine[:-1]]
imageFilename = "images-en-es/files/" + imageIndex[:-1]
try:
imageFile = Image.open(imageFilename)
imageArray = np.asarray(imageFile)
except:
imageArray = np.zeros([100, 100, 3])
# Objects
imageNumber = int(imageIndex[3:]) - 1
imageObjects = self.objects[imageNumber]
self.index += 1
return (imageArray, imageObjects, spanishLine, translations, englishLine)
else:
raise StopIteration |
import logging
import pandas as pd
from scrapper.configuration import config
from scrapper.infraestructure.TwitterScrapper import TwitterScrapper
class TwitteService:
def __init__(self):
self._twitter_scrapper = TwitterScrapper()
def scrap_profiles_from_user_ids(self, user_ids: list) -> list:
profiles = []
progress = 0
total = len(user_ids)
for user_id in user_ids:
profile = self._twitter_scrapper.retrieve_user_profile(user_id)
if profile['id'] > 0:
profiles.append(profile)
progress += 1
if progress % 10 == 0:
logging.info(f'Current progress retrieving profiles:'
f' {progress/total}')
return profiles
def scrap_tweets_from_users_timelines(self,
user_ids: list,
tweets_to_retrieve: int = config.tweets_retrieve_per_timeline
) -> list:
tweets = []
progress = 0
total = len(user_ids)
# retrieve user timelines
for user_id in user_ids:
user_timeline = self \
._twitter_scrapper \
.retrieve_user_timeline(user_id, tweets_to_retrieve)
for tweet in user_timeline:
if tweet['id'] > 0:
tweets.append(tweet)
progress += 1
if progress % 10 == 0:
logging.info(f'Current progress retrieving user timelines:'
f' {progress / total}')
return tweets |
from flask import Flask, render_template, request, jsonify
from marshmallow import Schema, fields, ValidationError
import SO2002A
import string
# validation
class BaseSchema(Schema):
value = fields.List(fields.String, required=True)
printable = set(string.printable)
# initialise
app = Flask(__name__)
message = []
SO2002A.reset()
# App
def write_to_oled(value):
SO2002A.reset()
SO2002A.write_lines(value)
@app.route('/')
def index():
return render_template('index.htm', message=message)
@app.route('/set', methods=['POST'])
def set_display():
global message
data = request.get_json()
# validate response
schema = BaseSchema()
try:
result = schema.load(data)
except ValidationError as err:
return (jsonify(err.messages), 400)
value = [''.join(filter(lambda x : x in printable, line)) for line in data['value']]
message = value
# write to OLED
write_to_oled(value)
return ('OK', 200)
if (__name__ == '__main__'):
app.run(host='0.0.0.0') |
import sys, re
dico = {
'févriyé':'NOUN'
}
sent_idx = 1
def guess_tag(idx, s, dico, first_alpha):
tag = '_'
# print(idx, s, first_alpha, file=sys.stderr)
if s in "!\"'()+,./:?«»–—‘“”•…":
return 'PUNCT'
elif idx != 1 and s[0].isupper() and not first_alpha and s.title():
return 'PROPN'
elif re.match('^[0-9]+$', s):
return 'NUM'
elif s in dico:
return dico[s]
return tag
for line in sys.stdin.readlines():
# !"'()+,./:?«»–—‘“”•… 0123456789
orig_line = line.strip('\n')
for c in "!\"'()+,./:?«»–—‘“”•…":
line = line.replace(c, ' ' + c + ' ')
line = re.sub(' *', ' ', line)
line = line.strip()
print('# sent_id = %d' % sent_idx)
print('# text = %s' % orig_line)
idx = 0
first_alpha = True
for tok in line.split(' '):
idx += 1
tag = guess_tag(idx, tok, dico, first_alpha)
if tok[0].isalpha() > 0:
first_alpha = False
if tok in '?!:':
first_alpha = True
lemma = tok
# 1 2 3 4 5 6 7 8 9 10
row = (idx, tok, lemma, tag, tag, '_', '_', '_', '_', '_')
print('%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % row)
print('')
sent_idx = sent_idx + 1
|
def main():
#escribe tu código abajo de esta línea
vel = float(5.7)
min = float(input("Dame los minutos: "))
dis = (vel * (min*60))/10
print("Centímentros recorridos: ", dis)
if __name__ == '__main__':
main()
|
"""
This module takes care of starting the API Server, Loading the DB and Adding the endpoints
"""
import os
import sendgrid
from sendgrid.helpers.mail import *
from flask import Flask, request, jsonify, url_for, Blueprint,current_app
from api.models import db, User, Pregunta
from api.utils import generate_sitemap, APIException
from flask_mail import Message
from flask_jwt_extended import create_access_token, JWTManager, jwt_required, get_jwt_identity
api = Blueprint('api', __name__)
#registrar usuario
@api.route('/usuario', methods=['POST'])
def create_User():
data = request.get_json()
if not data:
return jsonify({"msg":"error"}),400
for i in data:
user = User(name=i["name"], password=i["password"], birthday=i["birthday"], gender=i["gender"],
email=i["email"], cant_question=i["cant_question"], nota_alta=i["nota_alta"])
db.session.add(user)
db.session.commit()
return jsonify({"user": "ok"}), 200
#login de usuario
@api.route("/login", methods=["POST"])
def login():
email = request.json.get("email", None)
password = request.json.get("password", None)
if email is None:
return jsonify({"message": "Bad user or password"}), 400
if password is None:
return jsonify({"message": "Bad user or password"}), 400
user = User.query.filter_by(email=email, password=password).first()
if user is None:
return jsonify({"message": "Bad user or password"}), 401
else:
access_token = create_access_token(identity=user.id)
return jsonify({"token": access_token}), 200
@api.route("/protected", methods=["GET"])
@jwt_required()
def protected():
current_user_id = get_jwt_identity()
user = User.query.get(current_user_id)
return jsonify({"id": user.id, "email": user.email})
#get info de usuario
@api.route('/usuario', methods=['GET'])
@jwt_required()
def consulta_User():
current_user_id = get_jwt_identity()
user = User.query.filter_by(id=current_user_id).first()
request = user.serialize()
return jsonify(request), 200
#carga de preguntas a bd
@api.route('/pregunta', methods=['POST'])
def addPregunta():
data = request.get_json()
for i in data:
preg = Pregunta(test_log=i["test_log"],frase=i["frase"],option_correcta=i["option_correcta"],option_mal1=i["option_mal1"],option_mal2=i["option_mal2"],option_mal3=i["option_mal3"])
db.session.add(preg)
db.session.commit()
return jsonify({"data": "ok"}), 200
#get de preguntas
@api.route('/pregunta', methods=['GET'])
def infoPregunta():
preg = Pregunta.query.all()
request = list(map(lambda preg:preg.serialize(),preg))
return jsonify(request), 200
#update nota
@api.route('/usuario', methods=['PUT'])
@jwt_required()
def change_user_data():
# buscamos el registro a actualizar
current_user_id = get_jwt_identity()
user = User.query.get(current_user_id)
# obtenemos los datos parametros de entrada
upd_cant_question = request.json["cant_question"]
upd_nota_alta = request.json["nota_alta"]
if not (upd_cant_question):
return jsonify({"error": "Invalid"}), 400
# actualizamos los nuevos datos
user.cant_question = upd_cant_question
user.nota_alta = upd_nota_alta
db.session.commit()
return jsonify({"msg": "Informacion actualizada"}), 200
# Eliminar usuario
@api.route('/usuario', methods=["DELETE"])
@jwt_required()
def delete_usuario():
current_user_id = get_jwt_identity()
user = User.query.filter_by(id=current_user_id).first()
if user is None:
raise APIException("usuario no existe!",status_code=404)
db.session.delete(user)
db.session.commit()
return jsonify({"Usuario eliminado":"ok"}),200
#RECUPERAR CONTRASEÑA
@api.route("/forgot_pass", methods=["POST"])
def forgot_pass():
#paso1 recibir email y respuesta secreta
#paso2 corroborar si la respuesta secreta es correcta y el mail (CONSULTAR A BASE DE DATOS)
#paso3 si mail y respuesta calzan enviar mail con
email=request.json.get("email", None)
print(email)
if not email:
return jsonify({"message": "Email no registrado"}), 400
email_registrado = User.query.filter_by(email=email).first()
# if not email_registrado:
# return jsonify ({"msg":"Si el correo es válido se ha enviado la información de recuperación"}), 400
# print(email_registrado.password)
sg = sendgrid.SendGridAPIClient(api_key=os.environ.get('SENDGRID_API_KEY'))
from_email = Email("Fitmind@mail.com")
to_email = To(email)
subject = "Recuperacion de contraseña"
content = Content("text/plain", "Su contraseña es: " + email_registrado.password)
mail = Mail(from_email, to_email, subject, content)
try:
response = sg.client.mail.send.post(request_body=mail.get())
return jsonify({"msg": "Password enviado"}), 200
except:
return jsonify({"msg": "failed"}), 400
|
from bear.install_data import testRun
# coding=utf-8
import time
import unittest
class TestSendMessage(unittest.TestCase):
def test_send(self):
print("如果用例失败,代表BearFrame 框架没有正确安装,请用 {pip install BearFramework}命令安装")
self.assertEqual(testRun(),"OK")
|
import numpy as np
np.random.seed(2016)
import os
from random import shuffle
import sys
import pandas as pd
import warnings
import platform
import time as tm
import json
from collections import OrderedDict
import pandas as pd
import numpy as np
import glob
#######################################################################################################################
#Deep Learning Example with Keras and Lasagne
########################################################################################################################
########################################################################################################################
#Read and Load train data from subfolders
########################################################################################################################
def load_train():
data_dirs = [os.path.join(file_path,'test/test_stg1')]
#label_files = [os.path.join(file_path,train_folder, "bet_labels.json")]
#data_dirs = [os.path.join(file_path,'train/BET')]
json_list = []
path = os.path.join(file_path,'test/test_stg1', '*.jpg')
files = glob.glob(path)
for fl in files:
flbase = os.path.basename(fl)
try:
line_dict = OrderedDict()
xy_dict = OrderedDict()
img_filename = flbase
xy_dict['x1'] = 1
xy_dict['y1'] = 1
xy_dict['x2'] = 1
xy_dict['y2'] = 1
line_dict["image_path"] = fl
line_dict["rects"] = [xy_dict]
json_list.append(line_dict)
except:
pass
#shuffle(json_list)
top80 = json_list[:int(len(json_list)*90/100)]
bot20 = json_list[int(len(json_list)*90/100):]
outputfile = os.path.join(file_path, "json_eval.json")
with open(outputfile, 'w') as outfile:
json.dump(json_list, outfile,indent=1)
########################################################################################################################
#Data cleansing , feature scalinng , splitting
########################################################################################################################
def Data_Munging():
print("Starting Train feature creation....... at Time: %s" % (tm.strftime("%H:%M:%S")))
load_train()
print("Ending Train feature creation....... at Time: %s" % (tm.strftime("%H:%M:%S")))
########################################################################################################################
#Main module #
########################################################################################################################
def main(argv):
pd.set_option('display.width', 200)
pd.set_option('display.height', 500)
warnings.filterwarnings("ignore")
global file_path, use_cache, train_folder, test_folder, restore_from_last_checkpoint,\
img_rows,img_cols,color_type_global,nb_epoch,batch_size,outputfile
img_rows, img_cols = 64, 64
color_type_global = 3
batch_size = 16
#nb_epoch = 50
nb_epoch = 5
use_cache = 0
restore_from_last_checkpoint = 0
train_folder = 'train/annotate'
test_folder = 'train/cropped_imgs'
if(platform.system() == "Windows"):
file_path = 'C:\\Python\\Others\\data\\Nature_Conservancy_Fisheries_Monitoring'
else:
file_path = '/mnt/hgfs/Python/Others/data/Nature_Conservancy_Fisheries_Monitoring/'
Data_Munging()
########################################################################################################################
#Main program starts here #
########################################################################################################################
if __name__ == "__main__":
main(sys.argv) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 20 15:10:22 2020
@author: scro3517
"""
import numpy as np
import pandas as pd
import random
def modify_df():
""" Load Dataframe as an Iterable """
iter_csv = pd.read_csv('/home/scro3517/Desktop/mimic-iii-clinical-database-1.4/NOTEEVENTS.csv', iterator=True, chunksize=5000)
""" Filter DF in an Online Manner Keeping Certain Rows """
df = pd.concat([chunk[(chunk['CATEGORY'] == 'ECG').astype(bool) & ~chunk.TEXT.str.contains('ECG interpreted by ordering physician').astype(bool)] for chunk in iter_csv])
""" Determine How Many of the Reports Contain the Following Keywords """
condition = 'Sinus rhythm|sinus rhythm|Sinus bradycardia|sinus bradycardia|bradycardia|Sinus tachycardia|sinus tachycardia|tachycardia|Atrial fibrillation|atrial fibrillation|Atrial flutter|atrial flutter'
""" Bucketize Text According to Groups """
mapping_dict = {'Sinus rhythm|sinus rhythm': 0, 'Sinus bradycardia|sinus bradycardia|bradycardia': 1,
'Sinus tachycardia|sinus tachycardia|tachycardia': 2,
'Atrial fibrillation|atrial fibrillation': 3,
'Atrial flutter|atrial flutter': 4}
""" (1) Assign Each Text Report to a Particular Category """
df_subset = df[df.TEXT.str.contains(condition)].reset_index()
group_df = pd.Series(np.zeros((df_subset.shape[0])))
for key,value in mapping_dict.items():
bool_series = df_subset.TEXT.str.contains(key)
group_df[bool_series] = value
""" Add Category Column to DataFrame """
df_subset['TextCategory'] = group_df
""" (2) Assign Reports to Training and Validation Sets """
random.seed(0) #seed is important since we load this multiple times
shuffled_indices = random.sample(list(range(df_subset.shape[0])),df_subset.shape[0])
training_length = int(len(shuffled_indices)*0.8)
#training_indices = shuffled_indices[:training_length]
validation_indices = shuffled_indices[training_length:]
df_subset['Phase'] = 'train'
#df_subset['Phase'][df_subset.index.isin(training_indices)] = 'train'
df_subset['Phase'][df_subset.index.isin(validation_indices)] = 'val'
return df_subset |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
import argparse
from matplotlib.animation import FuncAnimation
from pose_embedder import FullBodyPoseEmbedder
def tsne_trace(train_path, perp, iter, plot_type, embedder):
df, data_cols, data_subset = load_training_data(train_path)
print(f'training data shape: {df.shape}')
test_df = pd.DataFrame(load_test_data())
test_df.columns = data_cols
print(f'test data shape: {test_df.shape}')
train_and_test_df = pd.concat([df[data_cols], test_df]).values
if embedder:
train_and_test_df = embed_points(train_and_test_df)
print(f'train and test data shape: {train_and_test_df.shape}')
tsne, tsne_results = run_tsne(train_and_test_df, perp, iter)
print(tsne_results.shape)
tsne_df = pd.DataFrame()
tsne_df['tsne-2d-one'] = tsne_results[:,0]
tsne_df['tsne-2d-two'] = tsne_results[:,1]
tsne_df['exercise'] = df['exercise']
exercise_name = df['exercise'].apply(lambda x: ' '.join(x.split('_')[:-1]))
exercise_name = exercise_name.append(pd.Series(['exercise'] * test_df.shape[0]), ignore_index = True)
tsne_df['exercise_name'] = exercise_name
up_down = df['exercise'].apply(lambda x: x.split('_')[-1])
up_down = up_down.append(pd.Series(['up'] * test_df.shape[0]), ignore_index = True)
tsne_df['up_down'] = up_down
# source = df['filename'].apply(lambda x: x.split('.')[0])
source = df['filename'].apply(lambda x: 'Train point')
source = source.append(pd.Series(['Test point'] * test_df.shape[0]), ignore_index = True)
tsne_df['source'] = source
print(set(exercise_name))
print(set(up_down))
print(set(source))
print(f'tsne data shape: {tsne_df.shape}')
if plot_type == 'sns':
sns_plot(tsne_df)
elif plot_type == 'animate':
animate_plot(tsne_df)
else:
print("unknown plot type, must be either 'animate' or 'sns'")
def embed_points(landmarks_coll):
print(landmarks_coll)
print(landmarks_coll.shape)
embedder = FullBodyPoseEmbedder()
result = [landmarks.reshape(33,3) for landmarks in landmarks_coll]
result = np.array([embedder(landmarks) for landmarks in result])
result = result.reshape(-1, 23*3)
print(result)
print(result.shape)
return result
def animate_plot(df):
nrows = len(df.index)
test_len = np.sum(df['source'] == 'Test point')
train_len = nrows - test_len
print(f'[animate_plot] nrows={nrows}; train_len={train_len}; test_len={test_len}')
fig = plt.figure(figsize=(16,10))
train_df = df[:train_len]
one = train_df['tsne-2d-one']
two = train_df['tsne-2d-two']
mask_ids = ['0', '1', 'begin', 'finish', '2']
mask_ids.extend(['throughshoulders', 'standing', 'begin', 'allfours', 'up', 'crawling', 'finish', 'backonheels', 'plank', 'down'])
colours = ['g', 'b', 'y', 'm', 'c']
colours.extend(['y','m','y','m','g','m','y','m','y','b'])
for mask_id, colour in zip(mask_ids, colours):
mask = train_df['up_down'] == mask_id
plt.scatter(one[mask], two[mask], c=colour)
print(f"{train_df[mask]['tsne-2d-one']}")
#plt.scatter(train_df[mask]['tsne-2d-one'], train_df[mask]['tsne-2d-two'], 'go')
graph, = plt.plot([], [], 'r+-')
def animate(i):
print(f'animate with i={i}')
print(f" {df['tsne-2d-one'].iloc[train_len+i]}")
graph.set_data(df['tsne-2d-one'][train_len:train_len+i], df['tsne-2d-two'][train_len:train_len+i])
return graph
ani = FuncAnimation(fig, animate, frames=test_len, interval=200)
# set plot limits
one = df['tsne-2d-one']
two = df['tsne-2d-two']
plt.xlim([one.min(), one.max()])
plt.ylim([two.min(), two.max()])
plt.show()
def sns_plot(tsne_df):
plt.figure(figsize=(16,10))
sns.scatterplot(
x="tsne-2d-one", y="tsne-2d-two",
#hue="exercise_name",
hue="up_down",
style="source",
#palette=sns.color_palette("Paired"),
#palette=sns.color_palette("hls", n_exercises),
data=tsne_df,
legend="full",
alpha=0.3
)
plt.show()
def load_training_data(path):
data_cols = [f'data{ix}' for ix in range(99)]
cols = ['filename', 'exercise']
cols.extend(data_cols)
df = pd.read_csv(path, names=cols)
print(df)
data_subset = df[data_cols].values
return df, data_cols, data_subset
def run_tsne(data_subset, perp, iters):
tsne = TSNE(n_components=2, verbose=1, perplexity=perp, n_iter=iters)
tsne_results = tsne.fit_transform(data_subset)
return tsne, tsne_results
def load_test_data():
test_points = np.loadtxt('temp.csv', delimiter=',')
return test_points
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('train_path', help='Path to CSV of training samples')
parser.add_argument('--perp', default=50, help='TSNE perplexity to use')
parser.add_argument('--iter', default=500, help='TSNE iterations to use')
parser.add_argument('--plot_type', default='animate', help='Type of plot [animate|sns]')
# embedder/no-embedder
parser.add_argument('--embedder', dest='embedder', action='store_true')
parser.add_argument('--no-embedder', dest='embedder', action='store_false')
parser.set_defaults(embedder=True)
return parser.parse_args()
args = parse_args()
tsne_trace(args.train_path, int(args.perp), int(args.iter), args.plot_type, args.embedder)
|
__author__ = 'thiagocastroferreira'
import os
root_dir = '/roaming/tcastrof/names/eacl'
parsed_dir = '/roaming/tcastrof/names/regnames/parsed'
mentions_dir = '/roaming/tcastrof/names/eacl/mentions'
webpages_dir = '/roaming/tcastrof/names/regnames/webpages'
file_dbpedia = os.path.join(root_dir, 'name_base.json')
file_urls = os.path.join(root_dir, 'furls.json')
file_entities = os.path.join(root_dir, 'entities.json')
file_titles = os.path.join(root_dir, 'all_titles.json')
file_appositives = os.path.join(root_dir, 'appositives.json')
file_vocabulary = os.path.join(root_dir, 'stats/voc.json')
evaluation_dir = os.path.join(root_dir, 'evaluation/intrinsic_domain') |
import os, sys
sys.path.append( os.path.dirname(os.path.dirname((os.path.realpath(__file__)))) )
import time
import torch
import numpy as np
import gym
from Games.envs import DiskonnectPlayerEnv
import wandb
from stable_baselines3 import PPO, TD3, SAC, DDPG
from stable_baselines3.common.vec_env import DummyVecEnv, VecCheckNan
from stable_baselines3.common.env_util import make_vec_env
SEED=20200530
PREV_TIMESTEPS=0
def init_model(env, policy_kwargs={}):
lr = policy_kwargs['lr'] if 'lr' in policy_kwargs else 3e-4
batch_size = policy_kwargs['batch_size'] if 'batch_size' in policy_kwargs else 32
# default device
device = "cpu"
# default policy
policy = 'MlpPolicy'
# using length of board for proportional hidden layer shapes
length = env.len
policy_fcn = [int(length*2), int(length*2)]
value_fcn = [int(length*2), int(length*2)]
policy_kwargs = dict( net_arch=[ dict( pi=policy_fcn,
vf=value_fcn ) ] )
# using PPO
return PPO(policy = policy,
env = env,
learning_rate = lr,
batch_size = batch_size,
policy_kwargs = policy_kwargs,
verbose = 0,
seed = SEED,
device = device)
def test(model, env, board_len):
i = 0
scores = [0,0]
obs = env.reset()
while not env.board._is_done_():
print("\nCurrent board:")
env.board._gen_legal_moves_()
env.board.render(mode='human')
if i % 2 == 1: # player 1
print("\nAI's turn!")
env.board._gen_legal_moves_()
action = model.predict(obs)
action = action[0]
obs, rew, _, _ = env.step(action)
if rew > 0:
scores[i%2] += 1
else:
print("\nAI chose bad move: {}".format(action))
elif i % 2 == 0: # player 2
print("\nHuman's turn")
move = [-1,-1]
moving = input("\nChoose a piece to move:\n")
try:
moving = int(moving)
if (moving > board_len-1) or (moving < 0):
raise ValueError("Piece position must be on the board! Within [0,...,{}]".format(board_len-1))
else:
if env.board.board[moving] == -1:
raise ValueError("Can't use the opponent's piece")
else:
move[0] = moving
dir = input("\nWhich direction are you moving? (L or R)\n")
if (dir == 'L') or (dir == 'l'):
landing = moving-2
elif (dir == 'R') or (dir == 'r'):
landing = moving+2
else:
raise ValueError("Invalid direction {} chosen".format(dir))
if (landing > board_len) or (landing <= 0):
raise ValueError("Move is invalid!")
else:
middle = int((moving+landing)/2)
if env.board.board[middle] != -1:
raise ValueError("Invalid move! Must jump over an opponent's piece.")
else:
if (moving, landing) in env.board.legal_moves[1]:
env.board.board[moving] = 0
env.board.board[middle] = 0
env.board.board[landing] = 1
scores[i%2] += 1
else:
raise ValueError("Not a valid move: {}".format((moving, landing)))
except ValueError as error:
print("\nHuman chose bad move\n")
print(error)
i += 1
idx = max(enumerate(scores), key=lambda x: x[1])[0]
if scores[idx] == scores[idx+1%2]:
idx = -1
if idx == 0:
win = "Player"
msg = "Congratulations you beat the machine!"
elif idx == 1:
win = "AI"
msg = "Congratulations you beat the human!"
else:
win = "Neither, it is a tie"
msg = "Good work to you both!"
print("\nGame is Finished!!!\nWinner is {}! {}".format(win, msg))
env.board.render(mode='human')
def main():
# train method
test_method = 'fixed' # can be 'fixed' or 'random'
# using Wandb to visualize results
logging = True
### default env/hyper params
board_len = 9
player_piece = -1
boards = [ np.array([1, -1, 0, 1, -1, 0, -1, 1, -1]) ]
# build environment
env = gym.make("DiskonnectPlayerEnv-v0", player_piece=player_piece, length=board_len, boards=boards, logging=logging)
# building policy model
model = init_model(env)
f = "Saved_models/diskonnect-model-2"
state_dict = torch.load(f)
model.policy.load_state_dict( state_dict['model_state_dict'] )
model.policy.optimizer.load_state_dict( state_dict['optimizer_state_dict'] )
if logging:
wandb.init(name = "random-training-1",
project = '1D-Diskonnect',
monitor_gym = True,
reinit = True)
state_dict = test(model,
env,
board_len
)
if __name__=="__main__":
main()
file_name = "Saved_models/diskonnect-model-2" |
import unittest
from rabin_karp import RabinKarp
class RabinKarpTests(unittest.TestCase):
def setUp(self):
self.rk_one = RabinKarp('abcabaaba', 'aba')
self.rk_two = RabinKarp('abccdhabacfhfhf', 'abac')
self.rk_three = RabinKarp('fabdgdtdf', 'fabdgdtdf')
def test_one(self):
self.assertEqual(self.rk_one.process(), 3)
def test_two(self):
self.assertEqual(self.rk_two.process(), 6)
def test_three(self):
self.assertEqual(self.rk_three.process(), 0)
if __name__ == '__main__':
unittest.main()
|
import cmapPy.pandasGEXpress.parse as parse
import cmapPy.pandasGEXpress.GCToo as GCToo
import numpy as np
import pandas as pd
# TODO add to metadata
invariant_rids = ['c-661', 'c-662', 'c-663', 'c-664', 'c-665', 'c-666', 'c-667', 'c-668', 'c-669', 'c-670']
def normalize(mfi_gctoo, log=True, inv=True, inv_threshold = 600):
# Level 2-3 Normalization based on prism invariant
if inv is True:
mfi_gctoo = remove_outlier_invariants(mfi_gctoo, inv_threshold)
mfi_gctoo.data_df[mfi_gctoo.data_df < 1] = 1
data_df = mfi_gctoo.data_df
data_df = data_df.round(2)
if inv is True:
invs = data_df.loc[invariant_rids].median(axis=0)
data_df = data_df.divide(invs, axis='columns')
if log is True:
data_df = np.log2(data_df)
mfi_gctoo.col_metadata_df['provenance'] = 'assembled | log2 | median normalized'
else:
data_df = data_df
mfi_gctoo.col_metadata_df['provenance'] = 'assembled | median normalized'
mfi_gctoo.col_metadata_df['data_level'] = 'normalized'
data_df = data_df.loc[~data_df.index.isin(invariant_rids)]
row_metadata_df = mfi_gctoo.row_metadata_df.loc[~mfi_gctoo.row_metadata_df.index.isin(invariant_rids)]
new_gctoo = GCToo.GCToo(data_df=data_df, row_metadata_df=row_metadata_df, col_metadata_df=mfi_gctoo.col_metadata_df)
return new_gctoo
def remove_low_bead_wells(mfi_gct, count_gct):
medians = count_gct.data_df.median(axis=0)
bad_wells = medians[medians < 20].index
bad_wells = [x for x in bad_wells if x in mfi_gct.data_df.columns]
data = mfi_gct.data_df.drop(bad_wells, axis=1)
col_data = mfi_gct.col_metadata_df.drop(bad_wells, axis=0)
new_gctoo = GCToo.GCToo(data_df=data, col_metadata_df=col_data, row_metadata_df=mfi_gct.row_metadata_df)
return new_gctoo
def remove_outlier_invariants(gctoo, inv_threshold):
invdata = gctoo.data_df.loc[invariant_rids]
bad_wells = invdata.median()[invdata.median() < inv_threshold].index
data = gctoo.data_df.drop(bad_wells, axis=1).dropna(axis=1, how='all')
col_data = gctoo.col_metadata_df.loc[data.columns]
new_gctoo = GCToo.GCToo(data_df=data, col_metadata_df=col_data, row_metadata_df=gctoo.row_metadata_df)
return new_gctoo
def dp_normalize(filepath, outfile):
# For use with DP11/12 protocol (deprecated)
df = parse.parse(filepath)
dp11_invariant_rids = ['661', '662', '663', '664', '665', '666', '667', '668', '669', '670']
dp12_invariant_rids = ['671', '672', '673', '674', '675', '676', '677', '678', '679', '680']
dp11_dex = df.row_metadata_df[df.row_metadata_df['davepool_id'] == 'DP7'].index.tolist()
dp12_dex = df.row_metadata_df[df.row_metadata_df['davepool_id'] == 'DP8.1'].index.tolist()
dp11_df = df.data_df.loc[dp11_dex]
for column in dp11_df:
median = dp11_df[column][dp11_invariant_rids].median()
for index in dp11_df[column].index:
if index not in dp11_invariant_rids:
dp11_df[column][index] = dp11_df[column][index] / median
dp12_df = df.data_df.loc[dp12_dex]
for column in dp12_df:
median = dp12_df[column][dp12_invariant_rids].median()
for index in dp12_df[column].index:
if index not in dp12_invariant_rids:
dp12_df[column][index] = dp12_df[column][index] / median
recombine = dp11_df.append(dp12_df)
recombine.sort_index(inplace=True)
df.row_metadata_df.sort_index(inplace=True)
df.col_metadata_df['provenance'] = 'assembled | median normalized'
df.col_metadata_df['data_level'] = 'normalized'
my_gctoo = GCToo.GCToo(data_df=recombine, row_metadata_df=df.row_metadata_df, col_metadata_df=df.col_metadata_df)
#write_gct.write(my_gctoo, outfile)
def no_inv_norm(mfi_gctoo, log=True, inv=True):
mfi_gctoo.data_df[mfi_gctoo.data_df < 1] = 1
log_data = np.log2(mfi_gctoo.data_df)
dp_data = []
for x in mfi_gctoo.row_metadata_df['davepool_id'].unique():
temp = log_data.loc[mfi_gctoo.row_metadata_df[mfi_gctoo.row_metadata_df['davepool_id'] == x].index]
medians = temp.median()
group1_dex = medians[medians < np.log2(1500)].index
group2_dex = medians[medians >= np.log2(1500)].index
group1_data = temp.loc[:, group1_dex]
group2_data = temp.loc[:, group2_dex]
group1_norm = group1_data - 14.2551
group2_norm = group2_data.subtract(group2_data.median(), axis='columns')
norm_data = pd.concat([group1_norm, group2_norm], axis=1)
dp_data.append(norm_data)
concat_data = pd.concat(dp_data, axis=0)
concat_data = concat_data.loc[mfi_gctoo.data_df.index,mfi_gctoo.data_df.columns]
norm_gct = GCToo.GCToo(data_df=concat_data, row_metadata_df=mfi_gctoo.row_metadata_df,
col_metadata_df=mfi_gctoo.col_metadata_df)
return norm_gct
|
__author__ = 'mwas'
__author__ = 'mwas'
from django import forms
from customer_feedback import models
class admin_login_form(forms.Form):
password = forms.CharField(required=False,
label="Password",
max_length=255,
widget=forms.PasswordInput,
)
def clean(self):
clean_data = super(admin_login_form, self).clean()
try:
current_password =models.Admin.objects.get(admin_name="admin_name").password
except models.Admin.DoesNotExist:
current_password = ''
#if current_password == None:
# #models.Admin(admin_name='admin_name', passwod = None).save()
# models.Admin.objects.create(admin_name="admin_name", password='password')
try:
if clean_data['password']!= current_password:
self.errors['password'] = ["invalid password try again"]
except KeyError:
raise KeyError
class change_password_form(forms.Form):
current_password = forms.CharField(required=False,
label="Current password",
max_length=255,
widget=forms.PasswordInput,
)
new_password = forms.CharField(required=True,
label="New password",
max_length=255,
widget=forms.PasswordInput,
)
def clean(self):
clean_data = super(change_password_form, self).clean()
try:
current_password =models.Admin.objects.get(admin_name="admin_name").password
except models.Admin.DoesNotExist:
current_password = ''
try:
if clean_data['current_password'] != current_password:
self.errors['current_password'] = ["The password is not the same to the current password"]
except KeyError:
raise KeyError
class AddEmployeeForm(forms.ModelForm):
class Meta:
model = models.Employee
class AddCompanyForm(forms.ModelForm):
#logo = forms.CharField(label="Logo",
#widget=forms.ImageField)
description = forms.CharField(label="Description", required=True,
widget=forms.Textarea)
class Meta:
model = models.Company
class AssignEmployee(forms.Form):
all_employees = models.Employee.objects.all()
choice = []
for i in all_employees:
choice.append([i.pk, i.fname])
choose_employee = forms.ChoiceField(choices=choice)
#choose_employee =forms.ModelChoiceField(models.Employee.objects.all())
class AssingForm(forms.Form):
def __init__(self,companyId,*args, **kwargs):
self.company_id = companyId
super(AssingForm, self).__init__(*args, **kwargs)
#employees that are not assigned
self.fields['choose_employee'] = forms.ModelChoiceField(models.Employee.objects.exclude(assigned__companyName_id = self.company_id))
#choose_employee =forms.ModelChoiceField(models.Employee.objects.exclude(assigned__companyName_id = self.company_id))
class EmployeeLoginForm(forms.Form):
fname = forms.CharField(max_length=255,
required=True, label='First name')
lname = forms.CharField(max_length=255,
required=True, label='Last name')
password = forms.CharField(max_length=255,
required=True, label="Password",
widget=forms.PasswordInput)
def clean(self):
if any(self.errors):
return
clean_data= super(EmployeeLoginForm, self).clean()
record_exists=a = models.Employee.objects.\
filter(fname=clean_data['fname'], lname=clean_data['lname'],
password=clean_data['password']).exists()
#since the fname and lname are unique in Employee if t
#they exit then its only one record and the login is valid
if not record_exists:
if not models.Employee.objects.\
filter(fname=clean_data['fname']).exists():
self.errors['fname'] = ["Invalid first name"]
elif not models.Employee.objects.filter(lname=clean_data['lname']).\
exists():
self.errors['lname'] = ["Invalid last name"]
elif not models.Employee.objects.filter(password=clean_data['password']).\
exists():
self.errors['password'] = ["Invalid password"]
class AddFeedbackForm(forms.Form):
fname = forms.CharField(label="First name", max_length=255, required=True)
lname = forms.CharField(label="Last name", max_length=255, required=True)
phoneNumber = forms.CharField(label="Phone number", max_length=255, required=True)
comment = forms.CharField(label="Comment", required=True, widget=forms.Textarea)
|
from typing import List
class Solution:
def findMaxConsecutiveOnes(self, nums: List[int]) -> int:
max_length = 0
current_length = 0
for num in nums:
if num:
current_length += 1
else:
if current_length >= max_length:
max_length = current_length
current_length = 0
if current_length >= max_length:
max_length = current_length
return max_length
|
"""
SSW533-get_logs by Yuning Sun
11:54 PM 11/30/20
Module documentation:
"""
import json
import requests
repo_name = 'kernel_liteos_a'
url = 'https://gitee.com/api/v5/repos/openharmony/' + repo_name + '/pulls'
params = {"access_token": "5b304d9b1353007d8891cc3ea2c84841", "state": "merged"}
res = requests.get(url=url, params=params)
res = json.loads(res.text)
print()
|
# Original Idea that started it all:
def danny_bub(list):
length = len(list) - 1
sorted = False
counter = 0
while not sorted:
sorted = True
for i in range(counter, length - 1, 1):
if list[i] > list[i + 1]:
list[i], list[i+1] = list[i+1], list[i]
sorted = False
if(not sorted):
for j in range(length, counter, -1):
if list[j] < list[j - 1]:
list[j], list[j-1] = list[j-1], list[j]
counter += 1
length -= 1
print list
danny_bub(my_list)
# Refined and maximized "Ascending" and "Descending" bubble sort for quick implimentation
def danny_bub2(list): # Only 121 "Steps" to solve this one
for i in range((len(list))//2):
for j in range(i, (len(list) - 1) - i):
if list[j] > list[j+1]:
list[j], list[j+1] = list[j+1], list[j]
for k in range((len(list) - 2) - i, i, -1):
if list[k] < list[k - 1]:
list[k], list[k-1] = list[k-1], list[k]
print list
# My version of a select sort which "may" be better for larger lists as it emphasizes removing index length as quickly as possible
def danny_select(list):
maxPos = 0
beg = 0
end = len(list)
for num in range((len(list) - 1)//2):
for loc in range(beg + 1, end - num):
if list[loc] > list[maxPos]:
if loc == end - num - 1:
loc -= 1
end -= 1
else:
maxPos = loc
list[loc], list[maxPos]=list[maxPos], list[loc]
minPos = loc - 1
for loc2 in range(end - 2, num - 1, -1):
if list[loc2] < list[minPos]:
if loc2 == beg:
beg += 1
loc2 += 1
else:
minPos = loc2
list[loc2], list[minPos]=list[minPos], list[loc2]
maxPos = loc2 + 1
if maxPos == minPos:
return list
return list
danny_select(my_list)
# My shorter basic straightforward select which is great for quick implementation
def danny_shortsort_select(list):
maxPos = 0
for num in range((len(list) - 1)//2):
for loc in range(num + 1, len(list) - num):
if list[loc] > list[maxPos]:
maxPos = loc
list[loc], list[maxPos]=list[maxPos], list[loc]
minPos = loc - 1
for loc2 in range(loc - 2, num - 1, -1):
if list[loc2] < list[minPos]:
minPos = loc2
list[loc2], list[minPos]=list[minPos], list[loc2]
maxPos = loc2 + 1
return list
danny_shortsort_select(my_list)
# Basic Sorts for comparison
def bubble_sort(list):
length = len(list) - 1
sorted = False
while not sorted:
sorted = True
for i in range(length):
if list[i] > list[i + 1]:
sorted = False
list[i], list[i+1] = list[i+1], list[i]
print list
bubble_sort(my_list)
def select_sort(list):
for num in range(len(list) - 1, 0, -1):
maxPos = 0
for loc in range(1, num + 1):
if list[loc] > list[maxPos]:
maxPos = loc
list[num], list[maxPos]=list[maxPos], list[num]
return list
select_sort(my_list)
# TEST LISTS
my_list = [98,4,6,8,1,45,87,77,3] #150 to 114 Bubble to Danny
my_list2 = [98,87,77,1,8,45,6,4,3] #221 to 150 with this one!
test_list = [35, 26, -8, -24, 1, -10, 29, -20, -2, 8, 41, 0, -22, 26, 12, -25, 41, 49, 31, 41, -19, 23, 40, 19, -3, -40, -7, 11, 42, 47, -2, 37, -24, 31, 5, -49, 22, -10, 33, 17, -50, -1, 37, -26, -18, 37, -31, 7, -26, 47, 31, -5, -42, -18, 1, 44, -39, 5, 21, 5, 32, 4, 47, 10, 9, -6, 10, -12, 8, -21, -31, -32, 11, 16, -2, -41, 26, 38, 43, 35, 40, 43, 4, -11, 21, -23, 31, 35, 40, 9, 45, 8, -5, 16, 50, 42, 30, -42, 11, -1]
test_list2 = [77, 64, 13, -11, 27, 11, 68, -5, 21, 38, 87, 26, -8, 64, 43, -13, 87, 98, 72, 86, -4, 60, 85, 53, 21, -35, 15, 42, 87, 95, 22, 80, -11, 71, 32, -48, 58, 10, 74, 50, -50, 24, 81, -14, -1, 81, -22, 35, -14, 96, 71, 17, -38, -2, 26, 90, -34, 33, 56, 32, 72, 31, 95, 41, 38, 17, 40, 8, 36, -7, -22, -22, 42, 49, 21, -37, 64, 82, 89, 77, 85, 89, 31, 9, 56, -9, 72, 78, 85, 39, 93, 37, 18, 49, 100, 88, 69, -38, 42, 23]
test_list3 = [85, 76, 42, 26, 51, 40, 79, 30, 48, 58, 91, 50, 28, 76, 62, 25, 91, 99, 81, 91, 31, 73, 90, 69, 47, 10, 43, 61, 92, 97, 48, 87, 26, 81, 55, 1, 72, 40, 83, 67, 0, 49, 87, 24, 32, 87, 19, 57, 24, 97, 81, 45, 8, 32, 51, 94, 11, 55, 71, 55, 82, 54, 97, 60, 59, 44, 60, 38, 58, 29, 19, 18, 61, 66, 48, 9, 76, 88, 93, 85, 90, 93, 54, 39, 71, 27, 81, 85, 90, 59, 95, 58, 45, 66, 100, 92, 80, 8, 61, 49]
|
from veides.sdk.stream_hub import StreamHubClient, AuthProperties, ConnectionProperties
from time import sleep
import logging
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Basic example of connecting to Veides Stream Hub")
parser.add_argument("-u", "--username", required=True, help="User's name")
parser.add_argument("-t", "--token", required=True, help="User's token")
parser.add_argument("-i", "--id", required=True, help="Agent's client id")
parser.add_argument("-H", "--host", required=True, help="Host to connect to")
args = parser.parse_args()
client = StreamHubClient(
connection_properties=ConnectionProperties(host=args.host),
# If you want to provide connection properties in environment, use below line instead
# connection_properties=ConnectionProperties.from_env()
auth_properties=AuthProperties(
username=args.username,
token=args.token,
),
# If you want to provide auth properties in environment, use below line instead
# auth_properties=AuthProperties.from_env()
# Set DEBUG level to see received and sent data. Level is logging.WARN by default
log_level=logging.DEBUG
)
client.connect()
def on_trail(agent, trail):
print(agent, trail)
def on_event(agent, event):
print(agent, event)
# Set a handler for trail
client.on_trail(args.id, 'uptime', on_trail)
# Set a handler for event
client.on_event(args.id, 'ready_to_rock', on_event)
finish = False
while not finish:
try:
sleep(1)
except KeyboardInterrupt:
finish = True
pass
client.disconnect()
|
'''Code snippets on how to unfreeze and train specific layers'''
### How to import a pretrained model from torchvision
import torchvision.models as models
model = models.resnet152()
### How to train the last layer only (classifier)
for param in model.parameters():
param.requires_grad = False
### Training all the layers, basically retraining the whole network
### For that you need to unfreeze all the layers, and for all parameters set to True in order to allow backporpagation
for param in model.parameters():
param.requires_grad = True
### Example: How to freeze all layers except for the last 3
### This is an example used on ResNet152 pre-trained model
for name, child in model.named_children():
if name in ['conv1', 'bn1', 'relu', 'maxpool','layer1', 'layer2']:
print(name + ' is frozen')
for param in child.parameters():
param.requires_grad = False
else:
print(name + ' is unfrozen')
for param in child.parameters():
param.requires_grad = True
### In order to make it work during training you'll also have to adjust your optimizer such as below
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.0006, momentum=0.9, nesterov=True)
### After training, you can find out how many total parameters and training parameters, this can give you a rough size
### of how big your model is
total_params = sum(p.numel() for p in model.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.') |
"""
The models module encapsulates the parameters data inherent in a large database
of phrases.
"""
import hashlib
from sqlalchemy import Column, Integer, String, Unicode, ForeignKey
from sqlalchemy.orm import relationship
from chkphrase.database import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(Unicode(255), unique=True)
full_name = Column(Unicode(255), unique=True)
password = Column(String(64))
def __init__(self, name=None, full_name=None, password=None):
self.name = name
self.full_name = full_name
self.password = hashlib.sha256(password).hexdigest()
def __repr__(self):
return '<User %r>' % (self.name)
class Category(Base):
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
name = Column(String(255), unique=True)
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Category %r>' % (self.name)
class PreCategory(Base):
__tablename__ = 'precategories'
id = Column(Integer, primary_key=True)
name = Column(Unicode(255), unique=True)
def __init__(self, name):
self.name = name
def __repr__(self):
return '<PreCategory %r>' % (self.name)
class Genre(Base):
__tablename__ = 'genres'
id = Column(Integer, primary_key=True)
name = Column(Unicode(255), unique=True)
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Genre %r>' % (self.name)
class Difficulty(Base):
__tablename__ = 'difficulties'
id = Column(Integer, primary_key=True)
name = Column(Unicode(255), unique=True)
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Difficulty %r>' % (self.name)
class Pack(Base):
__tablename__ = 'packs'
id = Column(Integer, primary_key=True)
name = Column(Unicode(255), unique=True)
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Pack %r>' % (self.name)
class Badword(Base):
"""
There are five bad words associated with each 'phrase', for buzzwords a
phrase is actually just a word.
"""
__tablename__ = 'badwords'
id = Column(Integer, primary_key=True)
phrase_id = Column(Integer, ForeignKey('phrases.id'))
word = Column(Unicode(255))
user_id = Column(Integer, ForeignKey('users.id'))
def __init__(self, word=None, phrase_id=None):
"""
Standard constructor for creating a bad word.
"""
self.word = word
self.phrase_id = phrase_id
class Phrase(Base):
__tablename__ = 'phrases'
id = Column(Integer, primary_key=True)
phrase = Column(Unicode(255), unique=True)
source = Column(Unicode(255))
user_id = Column(Integer, ForeignKey('users.id'))
pre_category_id = Column(Integer, ForeignKey('precategories.id'))
genre_id = Column(Integer, ForeignKey('genres.id'))
category_id = Column(Integer, ForeignKey('categories.id'))
difficulty_id = Column(Integer, ForeignKey('difficulties.id'))
pack_id = Column(Integer, ForeignKey('packs.id'))
approved = Column(Integer, default=0)
buzzworthy = Column(Integer, default=0)
# -1 do not use
# 0 in the database
# 1 has been approved (needs bad words)
# 2 has bad words (need to be verified)
# 3 needs further verification
# 4 ready for a pack
# 5 in a shippable pack
stage = Column(Integer, default=0)
user = relationship('User', backref='users')
pre_category = relationship('PreCategory', backref='precategories')
genre = relationship('Genre', backref='genres')
category = relationship('Category', backref='categories')
difficulty = relationship('Difficulty', backref='difficulties')
pack = relationship('Pack', backref='packs')
badwords = relationship('Badword', backref='badwords')
def __init__(self, phrase=None, source=None):
self.phrase = phrase
self.source = source
def __repr__(self):
return '<Phrase %r>' % (self.phrase)
|
"""
Script that trains graph-conv models on Tox21 dataset.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import json
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
from deepchem.molnet import load_tox21
from deepchem.models.tensorgraph.models.graph_models import GraphConvModel
model_dir = "model"
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = load_tox21(featurizer='GraphConv')
with open('tasks.json', 'w') as fp:
json.dump(tox21_tasks, fp)
train_dataset, valid_dataset, test_dataset = tox21_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean, mode="classification")
# Batch size of models
batch_size = 50
model = GraphConvModel(len(tox21_tasks), batch_size=batch_size, mode='classification', model_dir=model_dir)
model.fit(train_dataset, nb_epoch=50)
model.save()
|
#!/usr/bin/env python3
import yaml
from math import log, sqrt, floor, ceil
from itertools import product
from operator import itemgetter
import numpy as np
if __name__ == '__main__':
max_n = 3000000
iv = 10
multiplier = sqrt(2)
max_i = int((log(max_n)-log(iv))/log(multiplier))
mks = [int(round(iv*multiplier**i)) for i in range(max_i+1)]
n_lists = [1<<i for i in range(5,13)]
n_query = 1
# query_args = list(sorted([[mk,mk,n_list,1] for (mk,n_list) in product(mks,n_lists)], key=itemgetter(2)))
query_args = list(sorted([[k,m,n_list,1] for (k,m,n_list) in product(mks,mks,n_lists)], key=itemgetter(2)))
epsilons = np.round(np.arange(1.5,0.05,-0.05),5).tolist()
taus = np.round([0.01 / sqrt(2)**i for i in range(20)],5).tolist()
query_args_hbe = list(sorted([[eps,tau] for (eps,tau) in product(epsilons,taus)], key=itemgetter(1), reverse=True))
ls = [int(round(10*sqrt(2)**i)) for i in range(10)]
trs = list(reversed([round(0.05*i,4) for i in range(11)]))
query_args_sklearn = [[l,0.0,tr] for (l,tr) in product(ls,trs)]
algos = {
'naive' : {
'constructor' : 'Naive',
'wrapper' : 'deann_wrapper',
'docker' : 'deann-experiments-deann'
},
'ann-faiss' : {
'constructor' : 'ANNFaiss',
'query' : query_args,
'wrapper': 'deann_wrapper',
'docker' : 'deann-experiments-deann'
},
'ann-permuted-faiss' : {
'constructor' : 'ANNPermutedFaiss',
'query' : query_args,
'wrapper': 'deann_wrapper',
'docker' : 'deann-experiments-deann'
},
'random-sampling' : {
'constructor' : 'RandomSampling',
'query' : mks,
'wrapper': 'deann_wrapper',
'docker' : 'deann-experiments-deann'
},
'rsp' : {
'constructor' : 'RandomSamplingPermuted',
'query' : mks,
'wrapper': 'deann_wrapper',
'docker' : 'deann-experiments-deann'
},
'hbe' : {
'args' : { 'binary' : 'hbe' },
'constructor' : 'HBEEstimator',
'query' : query_args_hbe,
'wrapper' : 'hbe',
'docker' : 'deann-experiments-hbe',
'separate-queries' : True
},
'rs' : {
'args' : { 'binary' : 'hbe' },
'constructor' : 'RSEstimator',
'query' : query_args_hbe,
'wrapper' : 'hbe',
'docker' : 'deann-experiments-hbe',
'separate-queries' : True
},
'sklearn-balltree' : {
'constructor' : 'SklearnBallTreeEstimator',
'query' : query_args_sklearn,
'wrapper' : 'sklearn',
'docker' : 'deann-experiments-sklearn'
},
'sklearn-kdtree' : {
'constructor' : 'SklearnKDTreeEstimator',
'query' : query_args_sklearn,
'wrapper' : 'sklearn',
'docker' : 'deann-experiments-sklearn'
}
}
print(yaml.dump(algos))
|
# 3. Реализовать функцию my_func(), которая принимает три позиционных
# аргумента и возвращает сумму наибольших двух аргументов.
# реализация функции
def my_func(p_1, p_2, p_3):
"""
Возвращает сумму наибольших двух аргументов.
Именованные параметры:
p_1 -- первое число
p_2 -- второе число
p_3 -- третье число
(number, number, number) -> number
>>> my_func(1, 2, 3)
5
"""
v = [p_1, p_2, p_3]
v.remove(min(v))
return sum(v)
# вывод результата
print(f'Результат вызова функции my_func({1}, {2}, {3}): {my_func(1, 2, 3)}')
|
import json
from django.http import HttpResponse
from user.helper.string import *
def create_json_response(json_dict, error_header, status_code=200, dumps=True):
# if dumps equals to False then jsonDict is already in json format
if dumps:
json_dict = json.dumps(json_dict)
response = HttpResponse(json_dict, content_type=CONTENT_TYPE_JSON, status=status_code)
response['error_code'] = error_header['error_code']
response['error_message'] = error_header['error_message']
return response
|
import random
from matplotlib import pyplot as plt
from matplotlib import animation
from language import Language
from ethnicity import Ethnicity
from utils import *
from math import sqrt, ceil
from threading import *
N = 1000
movement_rate = 0.001
influence_rate = 0.1
propagation_rate = 0.01
chance_to_change = 0.75
languages_n = 4
# TODO: study vowel shifts and other shifts to create sides table
sides = [
('a', 'e'),
('e', 'i'),
('o', 'u'),
('u', 'i'),
('i', 'e'),
]
f = open("languages/spanish", 'r')
spanish_voc = [i.replace("\n", "") for i in f.readlines()]
f.close()
f = open("languages/english", 'r')
english_voc = [i.replace("\n", "") for i in f.readlines()]
f.close()
f = open("languages/french", 'r')
french_voc = [i.replace("\n", "") for i in f.readlines()]
f.close()
f = open("languages/italian", 'r')
italian_voc = [i.replace("\n", "") for i in f.readlines()]
f.close()
spanish_lang = Language(spanish_voc, sides, chance_to_change)
english_lang = Language(english_voc, sides, chance_to_change)
french_lang = Language(french_voc, sides, chance_to_change)
italian_lang = Language(italian_voc, sides, chance_to_change)
# We give form to our graph, in quadrants
fig = plt.figure()
quadrant_lenght = ceil(sqrt(N))
size = quadrant_lenght * languages_n
spaniards = Ethnicity(spanish_lang, N, movement_rate, influence_rate,
1, 1, quadrant_lenght, 'ro')
english = Ethnicity(english_lang, N, movement_rate, influence_rate,
quadrant_lenght+1, 1, quadrant_lenght, 'bo')
frenchs = Ethnicity(french_lang, N, movement_rate, influence_rate,
1, quadrant_lenght+1, quadrant_lenght, 'go')
italians = Ethnicity(italian_lang, N, movement_rate, influence_rate,
quadrant_lenght+1, quadrant_lenght+1, quadrant_lenght, 'yo')
ethnicities = []
ethnicities.append(spaniards)
ethnicities.append(english)
ethnicities.append(frenchs)
ethnicities.append(italians)
ax = plt.axes(xlim=(0, quadrant_lenght*2 + 1), ylim=(0, quadrant_lenght*2 + 1))
for ethnicity in ethnicities:
ethnicity.ax = ax.plot([person.x for person in ethnicity.people],
[person.y for person in ethnicity.people], ethnicity.color)
def movement():
travelers = []
for ethnicity in ethnicities:
travelers += ethnicity.depart()
random.shuffle(travelers)
travel_groups = list(split(travelers, len(ethnicities)))
for ethnicity, arrivers in zip(ethnicities, travel_groups):
ethnicity.arrival(arrivers, propagation_rate)
def animate(i):
movement()
ax.clear()
for ethnicity in ethnicities:
ethnicity.ax = ax.plot([person.x for person in ethnicity.natives],
[person.y for person in ethnicity.natives], ethnicity.color)
anim = animation.FuncAnimation(fig, animate, frames=20, interval=200)
plt.show()
print("Spaniards:")
print(spaniards.calc_dialect())
print("English:")
print(english.calc_dialect())
print("Franch:")
print(frenchs.calc_dialect())
print("Italians:")
print(italians.calc_dialect())
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
# flag = True means rob the root, else not. return max rob value within this root
# time limit exceeded
def tryRob(root, flag):
if not root:
return 0
if not flag:
return max(tryRob(root.left, True),tryRob(root.left, False)) + max(tryRob(root.right, True),tryRob(root.right, False))
else:
res = root.val
if root.left:
res += tryRob(root.left, False)
if root.right:
res += tryRob(root.right, False)
return res
return max(tryRob(root, True), tryRob(root, False))
def rob2(self, root):
def robBool(root):
if not root:
return 0, 0
robTrueLeft, robFalseLeft = robBool(root.left)
robTrueRight, robFalseRight = robBool(root.right)
valueTrue = robFalseLeft + robFalseRight + root.val
valueFalse = max(robTrueLeft, robFalseLeft) + max(robTrueRight, robFalseRight)
return valueTrue, valueFalse
robTrue, robFalse = robBool(root)
return max(robTrue, robFalse)
root = TreeNode(3)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.right = TreeNode(3)
root.right.right = TreeNode(1)
root2 = TreeNode(3)
root2.left = TreeNode(4)
root2.right = TreeNode(5)
root2.left.left = TreeNode(1)
root2.left.right = TreeNode(3)
root2.right.right = TreeNode(1)
s = Solution()
print(s.rob2(root))
print(s.rob2(root2)) |
import SocketServer
from mixins import ThreadPoolMixIn, OtherPoolMixIn
import re
import threading
import os
class MyRequestHandler(SocketServer.BaseRequestHandler):
users = {}
def handle(self):
# print threading.currentThread().getName()
# print "Pid: %d" % os.getpid()
command = self.request.recv(1024).strip()
print command
#import pdb;pdb.set_trace()
match = re.search('user (?P<user>\w+)', command)
if match:
user = match.group('user')
if user and user in self.users:
response = "100 err %s already taken!\r\n" % user
else:
self.users[user] = self.request
response = "200 ok %s successfully registerred" % user
#import pdb;pdb.set_trace()
self.request.send(response)
return
match = re.search('send_to (?P<user>\w+) (?P<message>.*)', command)
if match:
user, message = match.groups()
if user in self.users:
self.users[user].send(message)
self.request.send("200 ok message to %s sent successfully.\r\n" % user)
else:
self.request.send("100 err %s does not exists!\r\n" % user)
return
class MyThreadedServer(ThreadPoolMixIn, SocketServer.TCPServer):
numThreads = 300
class MyForkedServer(SocketServer.ForkingTCPServer):
max_children = 40
class MyOtherServer(OtherPoolMixIn, SocketServer.TCPServer):
numThreads = 2
# class COMMANDS:
# class CommandParser:
# def __init__(self, request):
# command = request.recv(1024).decode('utf8')
if __name__ == '__main__':
address = ('localhost', 53617) # let the kernel give us a port
server = MyOtherServer(address, MyRequestHandler)
server.serve_forever()
|
import numpy as np
import pdb
import cv2
import logging
from skimage.feature import local_binary_pattern
from modshogun import RealFeatures, MulticlassLabels
from modshogun import LMNN as shogun_LMNN
import matplotlib.pyplot as plt
from metric_learn import ITML_Supervised
logging.basicConfig(filename="logs", level=logging.DEBUG)
# Helper Functions
def nearest_neighbour(projs, test_proj):
distances = np.zeros((projs.shape[1], 1))
# Alternatively, you could also try the following line.
# distances = np.linalg.norm(projs - test_proj, axis=1)
for col in range(projs.shape[1]):
distances[col] = np.linalg.norm((projs[:, col] - test_proj))
print "Neighbours at {0}".format(distances)
print "Closest neighbour: {0}".format(np.argmin(distances))
return np.argmin(distances)
class PCA:
""" Class to abstract implementations of PCA. """
def __init__(self):
""" Do nothing in initialization"""
self.cov = None
self.eigen_vals = None
self.eigen_vecs = None
self.A_space = None
self.selected_eigen_vecs = None
self.test_projection = None
self.mean = []
def fit(self, A, labels):
""" Fits the PCA with the given feature vector and the number of components """
A = A.T
num_people = np.max(labels)
num_points = np.shape(labels)[0]/num_people
self.mean = np.mean(A, axis=1)
print "The dimensions of the mean face are: {0}".format(self.mean.shape)
# TODO: Make a way to print / imwrite this average image
for col in range(A.shape[1]):
A[:, col] = A[:, col] - self.mean
n_components = int(A.shape[1])
# Compute the inner feature covariance for simplifying computation
self.cov = np.matrix(A.T) * np.matrix(A)
self.cov /= self.cov.shape[0]
self.eigen_vals, self.eigen_vecs = np.linalg.eig(self.cov)
self.eigen_vals = np.abs(self.eigen_vals)
self.eigen_vecs = self.eigen_vecs.T
self.sort_indices = self.eigen_vals.argsort()[::-1]
self.eigen_vals = self.eigen_vals[self.sort_indices[0:n_components]]
self.eigen_vecs = self.eigen_vecs[self.sort_indices[0:n_components]]
self.eigen_vecs = self.eigen_vecs.T
logging.debug("PCA: Printing shape of eigenvectors {0} and eigenvalues {1}".format(self.eigen_vecs.shape, self.eigen_vals.shape))
logging.debug("PCA: Printing the eigenvalues, {0}".format(self.eigen_vals))
logging.debug("PCA: Printing the eigenvectors, {0}".format(self.eigen_vecs))
# TODO: Conduct slicing.
self.selected_eigen_vecs = np.matrix(A) * np.matrix(self.eigen_vecs)
norms = np.linalg.norm(self.selected_eigen_vecs, axis=0)
self.selected_eigen_vecs /= norms
self.A_space = np.matrix(self.selected_eigen_vecs.T) * np.matrix(A)
# Need to return values to be used in cascaded classifier systems
# (_,feats) = self.A_space.shape
#self.new_space = np.zeros((num_people, num_points, feats))
#for i in xrange(num_people):
# for j in xrange(num_points):
# self.new_space[i,j,:] = self.A_space[i*num_points+j,:]
# TODO: Change the following return to the reshaped vals
return self.selected_eigen_vecs, self.A_space
def transform(self, y):
""" Transforms the given test data with the developed model"""
y = y.T
y = y - self.mean
self.test_projection = np.matrix(self.selected_eigen_vecs.T) * np.matrix(y).T
return self.test_projection, self.A_space
class LDA:
""" Class to abstract implementation of LDA"""
def __init__(self):
""" Computes the LDA in the specified subspace provided. """
self.eigen_vals = None
self.eigen_vecs = None
self.lda_projection = None
self.test_proj = None
def fit(self, A, labels):
""" Check if you really need to specify the n_classes as another argument"""
n_classes = np.max(labels)
n_components = labels.shape[0]
num_imgs = A.shape[1]
imgs_per_person = num_imgs / n_classes
class_means = np.zeros((A.shape[0], n_classes))
within_class_cov = np.zeros((A.shape[0], A.shape[0]))
between_class_cov = np.zeros((A.shape[0], A.shape[0]))
for i in range(n_classes):
class_means[:,i] = np.mean(A[:,i*imgs_per_person:i*imgs_per_person+imgs_per_person], axis=1).ravel()
overall_mean = np.mean(class_means, axis=1).ravel()
for i in range(n_classes):
class_mean_i = class_means[:, i]
class_mat = np.zeros((A.shape[0], imgs_per_person))
#class_mat = np.matrix(A[:, i*imgs_per_person:(i+1)*imgs_per_person]) - class_mean_i
for j in range(i*imgs_per_person, (i+1)*imgs_per_person):
class_mat[:, j-i*imgs_per_person] = A[:, j].ravel() - class_mean_i
within_class_cov += np.matrix(class_mat) * np.matrix(class_mat.T)
diff_mat = (class_mean_i - overall_mean).reshape((A.shape[0], 1))
between_class_cov += np.matrix(diff_mat) * np.matrix(diff_mat.T)
within_class_cov /= 1.0*n_classes
between_class_cov /= 1.0*n_classes
logging.debug("Dimensions of within class scatter matrix are {0}".format(within_class_cov.shape))
logging.debug("Dimensions of between class scatter matrix are {0}".format(between_class_cov.shape))
self.eigen_vals, self.eigen_vecs = np.linalg.eig(np.matrix(np.linalg.inv(within_class_cov)) * np.matrix(between_class_cov))
#pdb.set_trace()
# TODO: Select only some components based on some selection theory
sort_indices = np.abs(self.eigen_vals).argsort()[::-1]
self.eigen_vecs = self.eigen_vecs.T
#pdb.set_trace()
# TODO: In case you wish to remove certain LDA components, do them here.
self.eigen_vals = self.eigen_vals[sort_indices]
self.eigen_vecs = self.eigen_vecs[sort_indices]
self.eigen_vecs = self.eigen_vecs.T
print self.eigen_vecs.T.shape, A.shape
self.lda_projection = np.matrix(self.eigen_vecs.T) * np.matrix(A)
logging.debug("The dimensions of the LDA projection are {0}".format(self.lda_projection.shape))
# Need to return the values to be used in cascaded classifier systems
return self.eigen_vecs, self.lda_projection
def transform(self, y):
""" Function to apply given test data on the created LDA model """
print "Sizes are ", self.eigen_vecs.T.shape, y.shape
self.test_proj = np.matrix(self.eigen_vecs.T) * np.matrix(y)
return self.test_proj, self.lda_projection
class LBP:
"""Class to abstract implementation of LBP"""
def __init__(self):
self.radius = 2
self.n_points = 16
self.model = cv2.createLBPHFaceRecognizer(self.radius,
self.n_points)
def fit(self, features, labels, kparts=2):
nsamples = features.shape[0]
subset_size = nsamples/kparts
for j in xrange(kparts):
if j == 0:
self.model.train(features[0:subset_size][:], labels[0:subset_size])
else:
self.model.update(features[j*subset_size:(j+1)*subset_size][:], labels[j*subset_size:(j+1)*subset_size])
def transform(self, y_test):
""" Uses a nearest neighbour to find the class label """
return self.model.predict(y_test)
def save(self, filename):
return self.model.save(filename)
def load(self, filename):
return self.model.load(filename)
def update(self, features, labels):
return self.model.update(features, labels)
class LMNN:
"""Class to abstract implementation of LMNN."""
def __init__(self, k=3, use_pca=False):
self.k = k
self.eigenvecs = None
self.space = None
self.space_model = PCA()
self.use_pca = use_pca
self.metric_model = None
def fit(self, feats, labels):
self.eigenvecs, self.space = self.space_model.fit(feats, labels)
feat = RealFeatures(self.space)
self.metric_model = shogun_LMNN(feat, MulticlassLabels(labels.astype(np.float64)), self.k)
self.metric_model.set_maxiter(1000)
self.metric_model.set_regularization(0.50)
self.metric_model.set_obj_threshold(0.001)
self.metric_model.set_stepsize(1e-7)
#pdb.set_trace()
L = np.eye(self.space.shape[1])
self.metric_model.train(L)
stats = self.metric_model.get_statistics()
#plt.plot(stats.obj.get())
#plt.grid(True)
#plt.show()
self.linear_transform = self.metric_model.get_linear_transform()
self.projected_data = np.dot(self.linear_transform, self.space)
norms = np.linalg.norm(self.projected_data, axis=0)
self.projected_data /= norms
# Fit the data with PCA first.
# pdb.set_trace()
return self.eigenvecs, self.projected_data
def transform(self, y):
# Transform using PCA first.
test_proj, _ = self.space_model.transform(y)
# On the projection in the resultant space, apply LMNN.
lk = np.dot(self.linear_transform, test_proj)
lk = lk/np.linalg.norm(lk, axis=0)
return lk, self.projected_data
class ITML:
def __init__(self, num_constraints=200):
self.space_model = PCA()
self.metric_model = ITML_Supervised(num_constraints)
def fit(self, feats, labels):
"""Fits the model to the prescribed data."""
pdb.set_trace()
self.eigenvecs, self.space = self.space_model.fit(feats, labels)
pdb.set_trace()
self.metric_model.fit(self.space.T, labels)
def transform(self, y):
"""Transforms the test data according to the model"""
test_proj, _ = self.space_model.transform(y)
pdb.set_trace()
return self.metric_model.transform(y)
|
# Project: Live Webcam Video Filters
# Contributers: Abraham Medina, Gurjot Sandhu, Valentina Fuchs Facht
# Class: CST 205-02 Spring 2017
# Date: March 16, 2017
# Abstract: This program allows the user to activate the webcam of a computer according to a filter they would like put over their face and to be able to then take a photo with the given filter
# Contribution: Abraham and Valentina worked on getting the primary mustache code working then Valentina split off to do the glasses filter while Abraham continued to refine the primary code. Gurjot worked on the GUI and also worked with Abraham to find most of the resources used in the project. Gurjot also worked on the main implementation of the red nose filter.
# Github: https://github.com/abrahamleyva/Project3Team248
import cv2
import sys
import time
from pygame import mixer
# Loading and playing the timer sound
mixer.init()
mixer.music.load('sounds/sound.mp3')
# building necessary cv2 Cascade files
faceCascade = cv2.CascadeClassifier("hars/face.xml")
noseCascade = cv2.CascadeClassifier("hars/nose.xml")
# Loads our image with only the non-transparent sections visible
imgMustache = cv2.imread('filters/mustache.png', -1)
# Create the mask for the filter based on the visible parts of the image
orig_mask = imgMustache[:, :, 3]
# Create the mask for the area surrounding our orig_mask
orig_mask_inv = cv2.bitwise_not(orig_mask)
# Convert mustache image to BGR and save the original image size
imgMustache = imgMustache[:, :, 0:3]
origMustacheHeight, origMustacheWidth = imgMustache.shape[:2]
# get video feed from primary system webcam
video_capture = cv2.VideoCapture(0)
# starts playing timer sound
mixer.music.play()
# starts the 17 second loop
while time.clock() < 17:
# Capture video feed
ret, frame = video_capture.read()
# Create greyscale image from the video feed so hars can read the feed
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in input video stream
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Iterate over each face found in the webcam
for (x, y, w, h) in faces:
# Creats a gray scale and colored version of the area of the face for the hars
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
# Detect all noses found within the face area
nose = noseCascade.detectMultiScale(roi_gray)
for (nx, ny, nw, nh) in nose:
# Resizes the size of the image with respect to the nose found
mustacheWidth = 3 * nw
mustacheHeight = mustacheWidth * origMustacheHeight / origMustacheWidth
# Center the mustache on the bottom of the nose
x1 = nx - (mustacheWidth / 4)
x2 = nx + nw + (mustacheWidth / 4)
y1 = ny + nh - (mustacheHeight / 2)
y2 = ny + nh + (mustacheHeight / 2)
# Prevents the mustache from going outside of the face boundry
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 > w:
x2 = w
if y2 > h:
y2 = h
# Re-re-calculate the width and height of the mustache image
mustacheWidth = x2 - x1
mustacheHeight = y2 - y1
# Re-size the original colored image and the masks to the mustache sizes
# calcualted above
mustache = cv2.resize(imgMustache, (mustacheWidth, mustacheHeight), interpolation=cv2.INTER_AREA)
mask = cv2.resize(orig_mask, (mustacheWidth, mustacheHeight), interpolation=cv2.INTER_AREA)
mask_inv = cv2.resize(orig_mask_inv, (mustacheWidth, mustacheHeight), interpolation=cv2.INTER_AREA)
# equals the ROI image size to the resized BGR image
roi = roi_color[y1:y2, x1:x2]
# finds which pixles the image should not be in
roi_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
# Selects the pixles form the BGR image that should be present
roi_fg = cv2.bitwise_and(mustache, mustache, mask=mask)
# joins the last two images without any overlap
dst = cv2.add(roi_bg, roi_fg)
# place the final image filter over the feed
roi_color[y1:y2, x1:x2] = dst
# breaks out of the loop before multiple noses are found in on one face
break
# Takes a frame image of the frame
cv2.imwrite('image.jpg', frame)
# Display the resulting frame
cv2.imshow('Video', frame)
# Press any key to exit or wait for timer to finish
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
import math
Phi = (1 + math.sqrt(5)) / 2
phi = 1/Phi
def fib(n):
return round((Phi**n / math.sqrt(5)) - (-phi**n / math.sqrt(5)))
n = 1
sum = 0
while fib(n) < 4000000:
if(fib(n) % 2 == 0):
sum += fib(n)
n += 1
print(sum)
|
from pyspark import SparkContext
from collections import OrderedDict
import sys
import itertools
inputfile = sys.argv[1]
outputfile = sys.argv[2]
sc = SparkContext(appName = "Girvan Newman")
data = sc.textFile(inputfile)
data = data.map(lambda x: (eval(x)[0],eval(x)[1]))
g = data.collect()
node_test = sorted(list(set([i for i in g for i in i])))
parallelized_nodes = sc.parallelize(node_test, 7)
def generate_adj_matrix(iterator):
iterator = list(iterator)
yield (iterator[0], iterator[1])
yield (iterator[1], iterator[0])
adj_matrix = data.map(generate_adj_matrix).flatMap(lambda x:x).groupByKey().map(lambda x: (x[0], set(x[1]))).collect()
dict_adj_matrix = dict(adj_matrix)
def bfs_new(node, dict_adj_matrix):
visited_order_set, visit_order, seen, to_visit, to_visit_set = set(), list(), dict(), [node], set()
to_visit_set.add(node)
while(to_visit):
vertex = to_visit.pop(0)
to_visit_set.remove(vertex)
if vertex == node:
seen[node] = [[], 0]
visit_order.append(node)
visited_order_set.add(node)
else:
visit_order.append(vertex)
visited_order_set.add(vertex)
vertex_distance_from_root = seen[vertex][1]
### add predecessors in seen
### filtering out edges which have been already visited
to_be_appended = dict_adj_matrix[vertex] - visited_order_set
### to_be_appended - to_visit_set
### to eliminte nodes which are already queued to be visited
for n in to_be_appended - to_visit_set:
to_visit_set.add(n)
to_visit.append(n)
for item in to_be_appended:
if not seen.get(item):
seen[item] = [[vertex], seen[vertex][1]+1]
else:
if seen[item][1] == vertex_distance_from_root + 1:
seen[item][0].append(vertex)
temp_nodes = {}
temp_edges = {}
for inner_node in reversed(visit_order):
inner_node_value = temp_nodes.get(inner_node)
temp_nodes[inner_node] = inner_node_value+1 if inner_node_value else 1
ends = seen[inner_node][0]
current_inner_node_value = temp_nodes[inner_node]
number_of_influenced_nodes = len(ends)
if number_of_influenced_nodes:
influencing_value = float(current_inner_node_value) / number_of_influenced_nodes
for end in ends:
end_node_value = temp_nodes.get(end)
sorted_edge_list = sorted([inner_node, end])
edge_name = "{} {}".format(sorted_edge_list[0], sorted_edge_list[1])
temp_edges[edge_name] = influencing_value
temp_nodes[end] = end_node_value+influencing_value if end_node_value else influencing_value
return temp_edges
def girvan_newman(iterator):
node = iterator
global dict_adj_matrix
return bfs_new(node, dict_adj_matrix).items()
output = parallelized_nodes.flatMap(girvan_newman).groupByKey().mapValues(lambda x: sum(x)/2).map(lambda x: (tuple([i for i in x[0].split()]),x[1])).sortByKey().collect()
with open(outputfile, 'w') as f:
for i in output:
f.write("({},{}),{}\n".format(i[0][0],i[0][1], i[1]))
|
from django.conf.urls import url
from . import views
app_name = 'weixin'
urlpatterns = [
# ex: /polls/
url(r'^$',views.weixin ,name="index"),
]
|
# -*- coding: UTF-8 -*-
from django.db import models
from apps.seguridad.models import Usuario, MotivoBloqueo
from apps.seguridad.audit import audit
@audit
class BloqueoLog(models.Model):
usuario = models.ForeignKey(Usuario)
motivo = models.ForeignKey(MotivoBloqueo)
fecha = models.DateField()
class Meta:
app_label = 'seguridad'
db_table = 'seguridad_bloqueo_log'
|
from typing import Union
from types import FunctionType
from functools import wraps
__all__ = ('lru_cache', 'cache')
__version__ = '0.4'
def cache(f):
return lru_cache()(f)
def lru_cache(
maxsize: Union[None, "NonNegativeInt", FunctionType, classmethod, staticmethod]=None,
generate_key = lambda *args, **kwargs: (args, frozenset(kwargs.items())),
keep_stat: bool = False # If False cache.misses and cache.hits becomes unavailable - optimisation
):
from bmap import BoundSizedDict
specials = {classmethod, staticmethod, FunctionType}
if maxsize == 0:
return lambda f: f
elif any(isinstance(maxsize, t) for t in specials):
return cache(maxsize)
def wrap(func):
nonlocal generate_key
none = object()
wrapper = type(func)
if wrapper in specials:
if wrapper is FunctionType:
wrapper = None
else:
func = func.__func__
else:
raise TypeError('unsupported descriptor', wrapper)
try:
class Cache(
dict if maxsize is None else
BoundSizedDict if isinstance(maxsize, int) and maxsize >= 0 else
None):
def remove(self, key):
try:
del self[key]
except KeyError:
pass
if keep_stat:
misses = hits = 0
def get(self, name, default):
try:
item = super(self.__class__, self).__getitem__(name)
except KeyError:
self.misses += 1
return default
else:
self.hits += 1
return item
except TypeError:
raise TypeError('Invalid argument', maxsize)
cached = Cache({} if maxsize is None else maxsize)
@wraps(func)
def wrap(*args, **kwargs):
key = generate_key(*args, **kwargs)
value = cached.get(key, none)
if value is none:
calculated = func(*args, **kwargs)
cached[key] = calculated
return calculated
else:
return value
wrap.cache = cached
if wrapper is not None:
wrap = wrapper(wrap)
return wrap
return wrap |
import functools
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
from socketio import base_manager
from socketio import pubsub_manager
class TestBaseManager(unittest.TestCase):
def setUp(self):
mock_server = mock.MagicMock()
self.pm = pubsub_manager.PubSubManager()
self.pm._publish = mock.MagicMock()
self.pm.set_server(mock_server)
self.pm.initialize()
def test_default_init(self):
self.assertEqual(self.pm.channel, 'socketio')
self.assertEqual(len(self.pm.host_id), 32)
self.pm.server.start_background_task.assert_called_once_with(
self.pm._thread)
def test_custom_init(self):
pubsub = pubsub_manager.PubSubManager(channel='foo')
self.assertEqual(pubsub.channel, 'foo')
self.assertEqual(len(pubsub.host_id), 32)
def test_write_only_init(self):
mock_server = mock.MagicMock()
pm = pubsub_manager.PubSubManager(write_only=True)
pm.set_server(mock_server)
pm.initialize()
self.assertEqual(pm.channel, 'socketio')
self.assertEqual(len(pm.host_id), 32)
self.assertEqual(pm.server.start_background_task.call_count, 0)
def test_emit(self):
self.pm.emit('foo', 'bar')
self.pm._publish.assert_called_once_with(
{'method': 'emit', 'event': 'foo', 'data': 'bar',
'namespace': '/', 'room': None, 'skip_sid': None,
'callback': None})
def test_emit_with_namespace(self):
self.pm.emit('foo', 'bar', namespace='/baz')
self.pm._publish.assert_called_once_with(
{'method': 'emit', 'event': 'foo', 'data': 'bar',
'namespace': '/baz', 'room': None, 'skip_sid': None,
'callback': None})
def test_emit_with_room(self):
self.pm.emit('foo', 'bar', room='baz')
self.pm._publish.assert_called_once_with(
{'method': 'emit', 'event': 'foo', 'data': 'bar',
'namespace': '/', 'room': 'baz', 'skip_sid': None,
'callback': None})
def test_emit_with_skip_sid(self):
self.pm.emit('foo', 'bar', skip_sid='baz')
self.pm._publish.assert_called_once_with(
{'method': 'emit', 'event': 'foo', 'data': 'bar',
'namespace': '/', 'room': None, 'skip_sid': 'baz',
'callback': None})
def test_emit_with_callback(self):
with mock.patch.object(self.pm, '_generate_ack_id',
return_value='123'):
self.pm.emit('foo', 'bar', room='baz', callback='cb')
self.pm._publish.assert_called_once_with(
{'method': 'emit', 'event': 'foo', 'data': 'bar',
'namespace': '/', 'room': 'baz', 'skip_sid': None,
'callback': ('baz', '/', '123')})
def test_emit_with_callback_without_server(self):
standalone_pm = pubsub_manager.PubSubManager()
self.assertRaises(RuntimeError, standalone_pm.emit, 'foo', 'bar',
callback='cb')
def test_emit_with_callback_missing_room(self):
with mock.patch.object(self.pm, '_generate_ack_id',
return_value='123'):
self.assertRaises(ValueError, self.pm.emit, 'foo', 'bar',
callback='cb')
def test_emit_with_ignore_queue(self):
self.pm.connect('123', '/')
self.pm.emit('foo', 'bar', room='123', namespace='/',
ignore_queue=True)
self.pm._publish.assert_not_called()
self.pm.server._emit_internal.assert_called_once_with('123', 'foo',
'bar', '/', None)
def test_close_room(self):
self.pm.close_room('foo')
self.pm._publish.assert_called_once_with(
{'method': 'close_room', 'room': 'foo', 'namespace': '/'})
def test_close_room_with_namespace(self):
self.pm.close_room('foo', '/bar')
self.pm._publish.assert_called_once_with(
{'method': 'close_room', 'room': 'foo', 'namespace': '/bar'})
def test_handle_emit(self):
with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit:
self.pm._handle_emit({'event': 'foo', 'data': 'bar'})
super_emit.assert_called_once_with('foo', 'bar', namespace=None,
room=None, skip_sid=None,
callback=None)
def test_handle_emit_with_namespace(self):
with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit:
self.pm._handle_emit({'event': 'foo', 'data': 'bar',
'namespace': '/baz'})
super_emit.assert_called_once_with('foo', 'bar', namespace='/baz',
room=None, skip_sid=None,
callback=None)
def test_handle_emiti_with_room(self):
with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit:
self.pm._handle_emit({'event': 'foo', 'data': 'bar',
'room': 'baz'})
super_emit.assert_called_once_with('foo', 'bar', namespace=None,
room='baz', skip_sid=None,
callback=None)
def test_handle_emit_with_skip_sid(self):
with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit:
self.pm._handle_emit({'event': 'foo', 'data': 'bar',
'skip_sid': '123'})
super_emit.assert_called_once_with('foo', 'bar', namespace=None,
room=None, skip_sid='123',
callback=None)
def test_handle_emit_with_callback(self):
host_id = self.pm.host_id
with mock.patch.object(base_manager.BaseManager, 'emit') as super_emit:
self.pm._handle_emit({'event': 'foo', 'data': 'bar',
'namespace': '/baz',
'callback': ('sid', '/baz', 123)})
self.assertEqual(super_emit.call_count, 1)
self.assertEqual(super_emit.call_args[0], ('foo', 'bar'))
self.assertEqual(super_emit.call_args[1]['namespace'], '/baz')
self.assertIsNone(super_emit.call_args[1]['room'])
self.assertIsNone(super_emit.call_args[1]['skip_sid'])
self.assertIsInstance(super_emit.call_args[1]['callback'],
functools.partial)
super_emit.call_args[1]['callback']('one', 2, 'three')
self.pm._publish.assert_called_once_with(
{'method': 'callback', 'host_id': host_id, 'sid': 'sid',
'namespace': '/baz', 'id': 123, 'args': ('one', 2, 'three')})
def test_handle_callback(self):
host_id = self.pm.host_id
with mock.patch.object(self.pm, 'trigger_callback') as trigger:
self.pm._handle_callback({'method': 'callback',
'host_id': host_id, 'sid': 'sid',
'namespace': '/', 'id': 123,
'args': ('one', 2)})
trigger.assert_called_once_with('sid', '/', 123, ('one', 2))
def test_handle_callback_bad_host_id(self):
with mock.patch.object(self.pm, 'trigger_callback') as trigger:
self.pm._handle_callback({'method': 'callback',
'host_id': 'bad', 'sid': 'sid',
'namespace': '/', 'id': 123,
'args': ('one', 2)})
self.assertEqual(trigger.call_count, 0)
def test_handle_callback_missing_args(self):
host_id = self.pm.host_id
with mock.patch.object(self.pm, 'trigger_callback') as trigger:
self.pm._handle_callback({'method': 'callback',
'host_id': host_id, 'sid': 'sid',
'namespace': '/', 'id': 123})
self.pm._handle_callback({'method': 'callback',
'host_id': host_id, 'sid': 'sid',
'namespace': '/'})
self.pm._handle_callback({'method': 'callback',
'host_id': host_id, 'sid': 'sid'})
self.pm._handle_callback({'method': 'callback',
'host_id': host_id})
self.assertEqual(trigger.call_count, 0)
def test_handle_close_room(self):
with mock.patch.object(base_manager.BaseManager, 'close_room') \
as super_close_room:
self.pm._handle_close_room({'method': 'close_room',
'room': 'foo'})
super_close_room.assert_called_once_with(room='foo',
namespace=None)
def test_handle_close_room_with_namespace(self):
with mock.patch.object(base_manager.BaseManager, 'close_room') \
as super_close_room:
self.pm._handle_close_room({'method': 'close_room',
'room': 'foo', 'namespace': '/bar'})
super_close_room.assert_called_once_with(room='foo',
namespace='/bar')
def test_background_thread(self):
self.pm._handle_emit = mock.MagicMock()
self.pm._handle_callback = mock.MagicMock()
self.pm._handle_close_room = mock.MagicMock()
def messages():
import pickle
yield {'method': 'emit', 'value': 'foo'}
yield {'missing': 'method'}
yield '{"method": "callback", "value": "bar"}'
yield {'method': 'bogus'}
yield pickle.dumps({'method': 'close_room', 'value': 'baz'})
yield 'bad json'
yield b'bad pickled'
self.pm._listen = mock.MagicMock(side_effect=messages)
try:
self.pm._thread()
except StopIteration:
pass
self.pm._handle_emit.assert_called_once_with(
{'method': 'emit', 'value': 'foo'})
self.pm._handle_callback.assert_called_once_with(
{'method': 'callback', 'value': 'bar'})
self.pm._handle_close_room.assert_called_once_with(
{'method': 'close_room', 'value': 'baz'})
|
from PIL import Image
import matplotlib.pyplot as plt
def getRed(redVal):
return '#%02x%02x%02x' % (redVal, 0, 0)
def getGreen(greenVal):
return '#%02x%02x%02x' % (0, greenVal, 0)
def getBlue(blueVal):
return '#%02x%02x%02x' % (0, 0, blueVal)
image = Image.open("C:/Users/DELL/projects/colon/original.png")
image.putpixel((0,1), (1,1,5))
image.putpixel((0,2), (2,1,5))
histogram = image.histogram()
l1 = histogram[0:256]
l2 = histogram[256:512]
l3 = histogram[512:768]
plt.figure(0)
plt.title("Red pixels")
plt.xlabel('x-axis')
plt.ylabel('y-axis')
# R histogram
for i in range(0, 256):
plt.bar(i, l1[i], color = getRed(i), edgecolor=getRed(i), alpha=0.3)
# G histogram
plt.savefig('histogram2.png')
plt.figure(1)
plt.title("Green pixels")
plt.xlabel('x-axis')
plt.ylabel('y-axis')
for i in range(0, 256):
plt.bar(i, l2[i], color = getGreen(i), edgecolor=getGreen(i),alpha=0.3)
# B histogram
plt.savefig('histogram1.png')
plt.figure(2)
plt.title("Blue pixels")
plt.xlabel('x-axis')
plt.ylabel('y-axis')
for i in range(0, 256):
plt.bar(i, l3[i], color = getBlue(i), edgecolor=getBlue(i),alpha=0.3)
plt.savefig('histogram.png')
|
"""
Format the characters in the stdin by the given number of spaces
Python 3
Assumptions:
============
* if a word is greater than width, it will NOT be split across the
lines. Instead, the entire word will be printed out
* the line to be read is not larger than available main memory
* input is formatted correctly (no blank lines etc)
"""
import sys
import unittest
class TestAlg(unittest.TestCase):
def setUp(self):
self.texts = [
"The quick brown fox jumps over the lazy dog.",
"Sheikh Mujibur Rahman (Bengali: শেখ মুজিবুর রহমান Shekh "
"Mujibur Rôhman), (17 March 1920 – 15 August 1975) was a "
"preeminent Bengali nationalist leader of Bangladesh.[1] He "
"headed the Awami League and was the first President of "
"Bangladesh during the Bangladesh Liberation War, and later "
"became Prime Minister in independent Bangladesh. He is "
"popularly referred to as Sheikh Mujib (shortened as Mujib "
"or Mujibur, not Rahman), with the honorary title of "
"Bangabandhu (বঙ্গবন্ধু Bôngobondhu, 'Friend of Bengal'). "
"His eldest daughter, Sheikh Hasina, is the present leader "
"of the Awami League and the current Prime Minister of "
"Bangladesh. As a student political leader, Mujib rose in "
"Bengali politics and within ranks of the Awami League. An "
"advocate of socialism, he became popular for his opposition "
"to the ethnic and institutional discrimination against "
"Bengalis, who comprised the majority of Pakistan's "
"population."
]
self.text60 = [
"Sheikh Mujibur Rahman (Bengali: শেখ মুজিবুর রহমান Shekh",
"Mujibur Rôhman), (17 March 1920 – 15 August 1975) was a",
"preeminent Bengali nationalist leader of Bangladesh.[1] He",
"headed the Awami League and was the first President of",
"Bangladesh during the Bangladesh Liberation War, and later",
"became Prime Minister in independent Bangladesh. He is",
"popularly referred to as Sheikh Mujib (shortened as Mujib",
"or Mujibur, not Rahman), with the honorary title of",
"Bangabandhu (বঙ্গবন্ধু Bôngobondhu, 'Friend of Bengal').",
"His eldest daughter, Sheikh Hasina, is the present leader",
"of the Awami League and the current Prime Minister of",
"Bangladesh. As a student political leader, Mujib rose in",
"Bengali politics and within ranks of the Awami League. An",
"advocate of socialism, he became popular for his opposition",
"to the ethnic and institutional discrimination against",
"Bengalis, who comprised the majority of Pakistan's",
"population.",
]
def test_border(self):
width = 10000000
for text in self.texts:
line = get_lines(text, width).send(None)
self.assertEqual(line, "{}\n".format(text))
width = 1
line = get_lines(self.texts[0], width).send(None)
self.assertEqual(line, "The\n")
def test_sanity(self):
for width in (i for i in range(50, 1000)):
for text in self.texts:
for line in get_lines(text, width):
self.assertTrue(len(line) <= width)
for index, line in enumerate(get_lines(self.texts[1], 60)):
self.assertEqual(line, "{}\n".format(self.text60[index]))
def main():
with open(sys.argv[1], 'r') as o:
while True:
width = o.readline()
text = o.readline()
if width == '':
# reached EOF
break
lines = get_lines(text, int(width))
sys.stdout.writelines(lines)
def get_lines(text, width):
"""Return a generator that yields a new line in every iteration"""
index = 0
while True:
current_line = text[index:index+width]
if index + width > len(text):
yield "{}\n".format(current_line)
break
if current_line == '':
# reached the end of text
break
else:
last_blank = current_line.rfind(' ')
if last_blank == -1:
# no blank in current line, write whole word
word_end_index = text[index:].find(' ')
if word_end_index == -1:
# if last word in text
current_line = text[index:]
index += len(current_line)
else:
current_line = text[index:index+word_end_index]
index += word_end_index + 1 # extra index to skip blank
yield "{}\n".format(current_line)
continue
else:
# blank found, write only words upto blank
yield "{}\n".format(current_line[:last_blank])
index += last_blank + 1 # extra index to skip blank
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
import SimpleITK as sitk
import time
import DataPreparation
def main():
outputDirPath = 'C:/ZCU/Diplomka/Dataset/04/RESULTS/Res_Affine_Conv'
# fixed = DataPreparation.readDICOMSerieToImage('C:/ZCU/DATA_FOR_TEST/MRI/TCGA-LIHC/TCGA-K7-AAU7/07-31-2001-MRI ABDOMEN WWO CONTRAST-59507/1201-C AX 3D LATE PHAS20CC MAGNEVISTE-50651')
# moving = DataPreparation.readDICOMSerieToImage('C:/ZCU/3Dircadb1/3Dircadb1.1/PATIENT_DICOM')
#
#
startTime = time.time()
writer = sitk.ImageFileWriter()
# moving = DataPreparation.readDICOMSerieToImage('C:/ZCU/DATA_FOR_TEST/MRI/TCGA-LIHC/TCGA-K7-AAU7/07-31-2001-MRI ABDOMEN WWO CONTRAST-59507/1201-C AX 3D LATE PHAS20CC MAGNEVISTE-50651')
# fixedR = DataPreparation.readDICOMSerieToImage('C:/ZCU/3Dircadb1/3Dircadb1.1/PATIENT_DICOM')
# movingR = DataPreparation.readDICOMSerieToImage('C:/ZCU/3Dircadb1/3Dircadb1.7/PATIENT_DICOM')
# fixMask = DataPreparation.readDICOMSerieToImage('C:/ZCU/3Dircadb1/3Dircadb1.1/MASKS_DICOM/liver')
# movMas = DataPreparation.readDICOMSerieToImage('C:/ZCU/3Dircadb1/3Dircadb1.7/MASKS_DICOM/liver')
fixedRMHD = 'C:/ZCU/Diplomka/Dataset/01/october-massachusetts-helium-queen_ack-wyoming_4_ca45536493525b615615f4d703c108e994b2dc6ec8b33e58d64c5cd6a92a12f2_v0.mhd'
fixedRMHD = 'C:/ZCU/Diplomka/Dataset/02/summer-eight-blossom-table_diet-kitten_4_45280cdef17c50e470ef9f9990a3d6c9ed15c1e35e84bd43611cfa014abee817_v0.mhd'
fixedRMHD = 'C:/ZCU/DATA_FOR_TEST/TCGA-LIHC/TCGA-BC-A10X/11-22-1992-MRI ABD WWO CONT-49239/11-LIVER-GAD-ENHANCEMENTT1F-68307'
fixedRMHD = 'C:/ZCU/Diplomka/Dataset/02/summer-eight-blossom-table_diet-kitten_4_45280cdef17c50e470ef9f9990a3d6c9ed15c1e35e84bd43611cfa014abee817_v0.mhd'
fixedRMHD = 'C:/ZCU/Diplomka/Dataset/04/hamper-carpet-earth-jersey_lake-fanta_601_447041836b6cf9ef3b328041cf99cac6c8308e90c46d437db62f6c8689fa6b58_v0.mhd'
reader = sitk.ImageFileReader()
reader.SetFileName(fixedRMHD)
fixed = reader.Execute()
movingRMHD = 'C:/ZCU/Diplomka/Dataset/01/october-massachusetts-helium-queen_ack-wyoming_7_ca45536493525b615615f4d703c108e994b2dc6ec8b33e58d64c5cd6a92a12f2_v0.mhd'
movingRMHD = 'C:/ZCU/Diplomka/Dataset/02/summer-eight-blossom-table_diet-kitten_7_45280cdef17c50e470ef9f9990a3d6c9ed15c1e35e84bd43611cfa014abee817_v0.mhd'
movingRMHD = 'C:/ZCU/DATA_FOR_TEST/TCGA-LIHC/TCGA-BC-A10X/03-29-1993-CT ABDOMEN WCONTRAST-43286/4-150cc OMNIPAQUE-36663'
movingRMHD = 'C:/ZCU/Diplomka/Dataset/02/summer-eight-blossom-table_diet-kitten_7_45280cdef17c50e470ef9f9990a3d6c9ed15c1e35e84bd43611cfa014abee817_v0.mhd'
movingRMHD = 'C:/ZCU/Diplomka/Dataset/04/hamper-carpet-earth-jersey_lake-fanta_501_447041836b6cf9ef3b328041cf99cac6c8308e90c46d437db62f6c8689fa6b58_v0.mhd'
# fixedR = DataPreparation.readNrrdToImage(fixedRMHD)
reader.SetFileName(movingRMHD)
moving = reader.Execute()
def observer(method) :
print("{0:3} = {1:10.5f} : {2}".format(method.GetOptimizerIteration(),
method.GetMetricValue(),
method.GetOptimizerPosition()))
print("====Image registrion DICOM files====")
print 'Smoothing'
fixImgSmooth = sitk.CurvatureFlow(image1=fixed,
timeStep=0.35,
numberOfIterations=10)
movImgSmooth = sitk.CurvatureFlow(image1=fixed,
timeStep=0.35,
numberOfIterations=10)
print 'Smoothing ENDED'
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(fixImgSmooth)
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(fixImgSmooth, movImgSmooth.GetPixelID()),
movImgSmooth,
sitk.AffineTransform(3),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
registration_method = sitk.ImageRegistrationMethod()
# registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=255)
# registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
# registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetMetricAsCorrelation()
registration_method.SetInterpolator(sitk.sitkGaussian)
# registration_method.SetInterpolator(sitk.sitkLinear)
# registration_method.SetOptimizerAsGradientDescentLineSearch(learningRate=2.0, numberOfIterations=500)
# registration_method.SetOptimizerScalesFromPhysicalShift()
# registration_method.SetOptimizerAsLBFGSB(gradientConvergenceTolerance=1e-5,
# numberOfIterations=100,
# maximumNumberOfCorrections=5,
# maximumNumberOfFunctionEvaluations=200,
# costFunctionConvergenceFactor=1e+7)
registration_method.SetOptimizerAsGradientDescentLineSearch(learningRate=2.0, numberOfIterations=100)
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.AddCommand( sitk.sitkIterationEvent, lambda: observer(registration_method) )
registration_method.SetInitialTransform(initial_transform, inPlace=False)
final_transform_v1 = registration_method.Execute(sitk.Cast(fixImgSmooth, sitk.sitkFloat32),
sitk.Cast(movImgSmooth, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
print(final_transform_v1)
writer = sitk.ImageFileWriter()
writer.SetFileName(outputDirPath + 'Fixed_Smoothed.nrrd')
writer.Execute(fixImgSmooth)
writer.SetFileName(outputDirPath + 'Moving_Smoothed.nrrd')
writer.Execute(movImgSmooth)
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(fixed)
# SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results.
resample.SetInterpolator(sitk.sitkLinear)
resample.SetTransform(final_transform_v1)
sitk.WriteImage(resample.Execute(moving), outputDirPath+ 'MovingAfterTransform' + '.nrrd')
sitk.WriteTransform(final_transform_v1, outputDirPath+ 'transform' + '.tfm')
# writer.SetFileName(outputDirPath + '/' + '03.nrrd')
# writer.Execute(resample.Execute(moving))
simg1 = sitk.Cast(sitk.RescaleIntensity(fixed), sitk.sitkUInt8)
simg2 = sitk.Cast(sitk.RescaleIntensity(resample.Execute(moving)), sitk.sitkUInt8)
cimg = sitk.Compose(simg1, simg2, simg1 // 2. + simg2 // 2.)
# sitk.Show(cimg, "RESULT")
outFileName = 'ResultOfRegistration.nrrd'
writer.SetFileName(outputDirPath + outFileName)
writer.Execute(cimg)
stopTime = time.time()
print stopTime-startTime
print "====END OF REGISTRATION=====" |
import math
columns_and_rows = int(
input('How many columns and rows do you want in your multiplication table? '))
columns = range(1, columns_and_rows + 1)
rows = range(1, columns_and_rows + 1)
digits = int(math.log10(columns_and_rows)) + 1
for column in columns:
r = ''
for row in rows:
r += f'{row * column:{digits * 3}}'
print(r)
|
from datetime import datetime, timedelta
from config import settings
from TwitterAPI import TwitterAPI
from dateutil.parser import parse
from .data.twitter import lists
import csv
import os
description = """ First-person accounts from regions affected by conflict and diaster. """
definition = {
'internalID': 'b3bf1450-8768-4204-b82e-c14bd2de7bce',
'sourceType': 'twitter',
'language': 'python',
'frequency': 'repeats',
'repeatsEvery': 'hour',
'startDate': datetime.strptime('20140507', "%Y%m%d"),
'endDate': datetime.now() + timedelta(days=365),
'description': description
}
def suck(save_item, handle_error, source):
api = TwitterAPI(settings.TWITTER['consumer_key'],
settings.TWITTER['consumer_secret'],
settings.TWITTER['access_token'],
settings.TWITTER['access_token_secret'])
if 'lastRetrieved' not in source:
source['lastRetrieved'] = {}
for l in lists.items:
request_filters = {
'slug':l['slug'],
'owner_screen_name':l['owner_screen_name'],
'per_page': 100
}
if l['owner_screen_name'] in source['lastRetrieved']:
request_filters['since_id'] = source['lastRetrieved'][l['owner_screen_name']]
r = api.request('lists/statuses', request_filters)
new_since_id = None
if r.status_code == 200:
for record in r.get_iterator():
if not new_since_id:
new_since_id = record['id_str']
source['lastRetrieved'][l['owner_screen_name']] = new_since_id
item = transform(record)
save_item(item)
return source['lastRetrieved']
def transform(record):
data = {
'remoteID': record['id_str'],
'author': {
'name': record['user']['name'],
'username': record['user']['screen_name'],
'remoteID': str(record['user']['id']),
'image': record['user']['profile_image_url']
},
'content': record['text'],
'publishedAt': parse(record['created_at']),
'geo': {
'locationIdentifiers': {
'authorLocationName': record['user']['location'],
'authorTimeZone': record['user']['time_zone']
}
},
'language': {
'code': record['lang']
},
'source': 'twitter',
'lifespan': 'temporary'
}
if 'coords' in record and record['coords']:
data['geo']['coords'] = record['coordinates']['coordinates']
if 'media' in record['entities'] and len(record['entities']['media']) > 0:
media = record['entities']['media'][0]
if media['type'] is 'video':
prop = 'video'
else:
prop = 'image'
data[prop] = media['media_url']
return data
|
import numpy as np
import pandas as pd
import random
import geneticAlgorithm.fitnessFunctions as functions
import geneticAlgorithm.experiment as exp
from classes.Encoding import Encoding
lethalityDict = {}
def generateRandomTraps(encoder: Encoding=None, numTraps=100000):
"""Generates numTraps traps uniformly at random. Returns list of decoded traps."""
traps = []
for _ in range(numTraps):
trap = []
for i in range(12):
if i == 4:
trap.append(1)
elif i == 7:
trap.append(2)
elif i == 10:
trap.append(0)
else:
trap.append(random.randrange(2,93,1))
traps.append(trap)
return encoder.decode(traps)
def getStats(traps, encoder: Encoding = None):
"""Given a list of encoded traps, returns a pandas df with each coherence and lethality"""
if not encoder:
encoder = Encoding()
coherences = []
lethalities = []
for trap in traps:
coherences.append(functions.getCoherence(trap))
lethality, _, _ = exp.runSimulations(encoder.encode(trap), numSimulations=1000, printStatistics = False)
lethalities.append(lethality)
stats = {'Coherence': coherences, 'Lethality': lethalities}
df = pd.DataFrame(stats, columns = ['Coherence', 'Lethality'])
return df
stats = getStats(generateRandomTraps())
|
import re
import datetime
a_monday = datetime.datetime.strptime('2019-08-19', '%Y-%m-%d')
class Line:
def __init__(self, date, l, sticky, attrs):
self.date = date
self.full = l
self.dist = None
self.dur = None
self.d_pos = None
self.d_neg = None
self.shoes = None
if 'shoes' in sticky: self.shoes = sticky['shoes']
if 'shoes' in attrs: self.shoes = attrs['shoes']
def derive_date(o):
date = datetime.datetime.strptime(o.date, '%Y-%m-%d')
delta = date - a_monday
wd = delta.days % 7
o.weekday = wd
o.week = datetime.datetime.strftime(date - datetime.timedelta(wd), '%Y-%m-%d')
o.month = datetime.datetime.strftime(date, '%Y-%m')
derive_date(self)
for e in l.split(' '):
g = []
def pat(reg):
groups = re.fullmatch(reg, e)
g[:] = []
if groups is None:
return False
else:
g[:] = groups.groups()
return True
if e.endswith(','):
e = e[:-1]
if e.startswith('~'):
e = e[1:]
if False: pass
elif pat(r'([0-9.]+)km?'):
self.set_dist(float(g[0]))
elif pat(r'([0-9]+)min'):
self.set_dur(int(g[0]))
elif pat(r'([0-9][0-9]?):([0-9][0-9]?)'):
self.set_dur(int(g[0]) + int(g[1])/60)
elif pat(r'([0-9][0-9]?):([0-9][0-9]?):([0-9][0-9]?)'):
self.set_dur(int(g[0])*60 + int(g[1]) + int(g[2])/60)
elif pat(r'([0-9][0-9]?)h([0-9][0-9]?)'):
self.set_dur(int(g[0])*60 + int(g[1]))
elif pat(r'([0-9]+)D[+]'):
self.set_d_pos(int(g[0]))
elif pat(r'([0-9]+)D[-]'):
self.set_d_neg(int(g[0]))
if self.d_neg is None and self.d_pos is not None:
self.d_neg = self.d_pos
def set_dist(self, d):
if self.dist is not None: raise Exception(f'set_dist({d}): dist already set to {self.dist} // {self.full}')
self.dist = d
def set_dur(self, d):
if self.dur is not None: raise Exception(f'set_dur({d}): dur already set to {self.dur} // {self.full}')
self.dur = d
def set_d_pos(self, d):
if self.d_pos is not None: raise Exception(f'set_d_pos({d}): d_pos already set to {self.d_pos} // {self.full}')
self.d_pos = d
def set_d_neg(self, d):
if self.d_neg is not None: raise Exception(f'set_d_neg({d}): d_pos already set to {self.d_eng} // {self.full}')
self.d_neg = d
def __str__(self):
return f'Line({self.dist}, {self.dur}, {self.d_pos}, {self.d_neg}, {self.full})'
def parse_logdown(fname):
with open(fname, 'r') as f:
content = '\n#'.join(reversed(f.read().split('\n#')))
entries = []
sticky = {}
date = ""
attrs = {}
for l in content.split('\n'):
l = l.rstrip()
if l.startswith('# '):
attrs = {}
date = re.sub('[:,]', ' ', l).split(' ')[1]
g = re.fullmatch(r'# #SET +(.+) *: *(.+)', l)
if g is not None:
sticky[g[1]] = g[2]
elif re.match('- total[0-9]*:.*', l):
l = Line(date, l, sticky, attrs)
entries.append(l)
print(l)
elif l.startswith('- shoes: '):
attrs['shoes'] = l.split(' ')[2]
return entries
def print_shoes_stats(entries):
print(set((l.shoes for l in entries)))
for sh in 'addidas roclite merrell saguaro roclite2 terraultra roclite3 roclite4'.split(' '):
print()
print('#', sh)
subset = [e for e in entries if e.shoes == sh]
print('Distance:', sum([l.dist for l in subset if l.dist]))
print('D+:', sum([l.d_pos for l in subset if l.d_pos]))
print('D-:', sum([l.d_neg for l in subset if l.d_neg]))
def main():
import sys
f = 'course-journal.md'
if len(sys.argv) > 1:
f = sys.argv[1]
entries = parse_logdown(f)
print_shoes_stats(entries)
if __name__== "__main__": main()
|
# Generated by Django 2.2 on 2020-06-12 11:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('biblioteca', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='ejemplar',
name='usuario',
),
migrations.RemoveField(
model_name='libro',
name='paginas',
),
migrations.AddField(
model_name='usuario',
name='ejemplares',
field=models.ManyToManyField(to='biblioteca.Ejemplar'),
),
]
|
import numpy as np
import matplotlib.pyplot as plt
import pickle
from scipy.optimize import curve_fit
def corrected_rotation(x_arr, mu):
"""
Given an input rotation angle [-180,180), return corrected angle to ensure
such that angle lies within mu-180 and mu+180.
:param x_arr:
:param mu:
:rtype :
"""
if x_arr < (mu - 180):
x_arr += 360
elif x_arr > mu + 180:
x_arr -= 360
return x_arr
def single_gaussian(angles, mu, sigma, amp):
x_corrected = np.array([corrected_rotation(angle, mu) for angle in angles])
return amp * np.exp(-(x_corrected - mu)**2 / (2.0 * sigma**2))
def pseudo_symmetric_gaussian(
angles,
mu1, sigma1, amp):
s = single_gaussian(angles, mu1, sigma1, amp)
s += single_gaussian(angles, mu1 + 180, sigma1, amp)
return s
def main(angles_org, firing_rates_org, initial_est, fig_title=''):
# Plot the original data
plt.figure()
plt.title('Rotation Tuning ' + fig_title)
plt.xlabel('Angle(Deg)')
plt.ylabel('Normalized Firing Rate')
plt.scatter(angles_org, firing_rates_org, label='Original Data')
angles_arr = np.arange(-180, 180, step=1)
# -------------------------------------------------------------------------------------------
# Single Gaussian Curve Fit
# -------------------------------------------------------------------------------------------
params_fit, params_cov_mat = curve_fit(
single_gaussian,
angles_org,
firing_rates_org,
p0=initial_est[0, :])
# Standard deviation of fit parameters:
# REF: (1) http://stackoverflow.com/questions/14581358/getting-standard-errors-on-fitted-
# parameters-using-the-optimize-leastsq-method-i
# (2) http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
params_err_std_dev = np.sqrt(np.diag(params_cov_mat))
plt.plot(
angles_arr,
single_gaussian(angles_arr, params_fit[0], params_fit[1], params_fit[2]),
label=r'$1\ Gaussian:\ \mu_1=%0.2f,\ \sigma_1=%0.2f,\ Amp_1=%0.2f$'
% (params_fit[0], params_fit[1], params_fit[2]))
print ("1 Gaussian Fit - standard deviation of errors in parameters:" +
"\n\tmu_1=%0.4f, sigma_1=%0.4f, Amp_1=%0.4f"
% (params_err_std_dev[0], params_err_std_dev[1], params_err_std_dev[2]))
# -------------------------------------------------------------------------------------------
# Mirror Symmetric Fit
# -------------------------------------------------------------------------------------------
params_fit, params_cov_mat = curve_fit(
pseudo_symmetric_gaussian,
angles_org,
firing_rates_org,
p0=initial_est[0, :])
# Standard deviation of fit parameters:
# REF: (1) http://stackoverflow.com/questions/14581358/getting-standard-errors-on-fitted-
# parameters-using-the-optimize-leastsq-method-i
# (2) http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
params_err_std_dev = np.sqrt(np.diag(params_cov_mat))
plt.plot(
angles_arr,
pseudo_symmetric_gaussian(angles_arr, params_fit[0], params_fit[1], params_fit[2]),
label=r'$ Pseudo Sym Gaussian:\ \mu_1=%0.2f,\ \sigma_1=%0.2f,\ Amp_1=%0.2f$'
% (params_fit[0], params_fit[1], params_fit[2]))
print ("Pseudo Sym - standard deviation of errors in parameters:" +
"\n\tmu_1=%0.4f, sigma_1=%0.4f, Amp_1=%0.4f"
% (params_err_std_dev[0], params_err_std_dev[1], params_err_std_dev[2]))
def single_gaussian_fit(angles_org, firing_rates_org, initial_est, axis=None, font_size=50):
if axis is None:
fig, axis = plt.subplots(1)
axis.set_xlabel('Angle(Deg)', fontsize=font_size)
axis.set_ylabel('Normalized Firing Rate (spikes/s)', fontsize=font_size)
axis.tick_params(axis='x', labelsize=font_size)
axis.tick_params(axis='y', labelsize=font_size)
axis.set_xticks([-180, -90, 0, 90, 180])
axis.set_xlim((-179, 180))
angles_arr = np.arange(-180, 180, step=1)
# -------------------------------------------------------------------------------------------
# Single Gaussian Curve Fit
# -------------------------------------------------------------------------------------------
params_fit, params_cov_mat = curve_fit(
single_gaussian,
angles_org,
firing_rates_org,
p0=initial_est[0, :])
# Standard deviation of fit parameters:
# REF: (1) http://stackoverflow.com/questions/14581358/getting-standard-errors-on-fitted-
# parameters-using-the-optimize-leastsq-method-i
# (2) http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
params_err_std_dev = np.sqrt(np.diag(params_cov_mat))
axis.plot(
angles_arr,
single_gaussian(angles_arr, params_fit[0], params_fit[1], params_fit[2]) / params_fit[2],
linewidth=2,
color='green',
label=r'$1\ Gaussian:\ \mu_1=%0.2f,\ \sigma_1=%0.2f,\ Amp_1=%0.2f$'
% (params_fit[0], params_fit[1], params_fit[2]))
# Normalize by the amplitude of fitting gaussian
axis.scatter(angles_org, firing_rates_org / params_fit[2], label='Original Data', s=60)
print ("1 Gaussian Fit - standard deviation of errors in parameters:" +
"\n\tmu_1=%0.4f, sigma_1=%0.4f, Amp_1=%0.4f"
% (params_err_std_dev[0], params_err_std_dev[1], params_err_std_dev[2]))
def double_gaussian_fit(angles_org, firing_rates_org, initial_est, axis=None, font_size=50):
if axis is None:
fig, axis = plt.subplots(1)
axis.set_xlabel('Angle(Deg)', fontsize=font_size)
axis.set_ylabel('Normalized Firing Rate', fontsize=font_size)
axis.tick_params(axis='x', labelsize=font_size)
axis.tick_params(axis='y', labelsize=font_size)
axis.set_xticks([-180, -90, 0, 90, 180])
axis.set_xlim((-179, 180))
angles_arr = np.arange(-180, 180, step=1)
# -------------------------------------------------------------------------------------------
# Single Gaussian Curve Fit
# -------------------------------------------------------------------------------------------
params_fit, params_cov_mat = curve_fit(
pseudo_symmetric_gaussian,
angles_org,
firing_rates_org,
p0=initial_est[0, :])
# Standard deviation of fit parameters:
# REF: (1) http://stackoverflow.com/questions/14581358/getting-standard-errors-on-fitted-
# parameters-using-the-optimize-leastsq-method-i
# (2) http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
params_err_std_dev = np.sqrt(np.diag(params_cov_mat))
axis.plot(
angles_arr,
pseudo_symmetric_gaussian(angles_arr, params_fit[0], params_fit[1], params_fit[2]) / params_fit[2],
linewidth=2,
color='green',
label=r'$1\ Gaussian:\ \mu_1=%0.2f,\ \sigma_1=%0.2f,\ Amp_1=%0.2f$'
% (params_fit[0], params_fit[1], params_fit[2]))
# Normalize by the amplitude of fitting gaussian
axis.scatter(angles_org, firing_rates_org / params_fit[2], label='Original Data', s=60)
print ("1 Gaussian Fit - standard deviation of errors in parameters:" +
"\n\tmu_1=%0.4f, sigma_1=%0.4f, Amp_1=%0.4f"
% (params_err_std_dev[0], params_err_std_dev[1], params_err_std_dev[2]))
if __name__ == "__main__":
plt.ion()
# Load Extracted data
with open('rotationalTolerance.pkl', 'rb') as handle:
data = pickle.load(handle)
# # -------------------------------------------------------------------------------------------
# title = 'Rotation Tuning - Fig 5a, Logothesis, Pauls & Poggio -1995'
#
# x = data['fig5ax']
# y = data['fig5ay']
# y = y/max(y)
#
# InitialEst = -255 * np.ones(shape=(4, 3))
# InitialEst[0, :] = [100, 20, 1.00]
#
# print title
# main(x, y, InitialEst, title)
# plt.legend()
# # ------------------------------------------------------------------------------
# title = 'Fig 5c, logothesis, Pauls & poggio -1995'
# x = data['fig5cx']
# y = data['fig5cy']
# y = y/max(y)
#
# InitialEst = -255 * np.ones(shape=(4, 3))
# InitialEst[0, :] = [120, 30, 1.00]
#
# print title
# main(x, y, InitialEst, title)
# plt.legend()
#
# # -------------------------------------------------------------------------------------------
# title = 'Fig 5e, logothesis, Pauls & poggio -1995'
# x = data['fig5ex']
# y = data['fig5ey']
# y = y/max(y)
#
# InitialEst = -255 * np.ones(shape=(4, 3))
# InitialEst[0, :] = [-70, 100, 1.00]
#
# print title
# main(x, y, InitialEst, title)
# plt.legend()
# ---------------------------------------------------------------------------------------
f, ax_arr = plt.subplots(1, 3, sharey=True)
f_size = 50
title = 'Rotation Tuning - Fig 5a, Logothesis, Pauls & Poggio -1995'
x = data['fig5ax']
y = data['fig5ay']
InitialEst = -255 * np.ones(shape=(4, 3))
InitialEst[0, :] = [103.14, 26.55, 1.00]
print title
single_gaussian_fit(x, y, InitialEst, ax_arr[0])
ax_arr[0].set_xlabel('')
# ---------------------------------------------------------------------------------------
title = 'Fig 5c, logothesis, Pauls & poggio -1995'
x = data['fig5cx']
y = data['fig5cy']
InitialEst = -255 * np.ones(shape=(4, 3))
InitialEst[0, :] = [116.31, 30, 1.00]
print title
double_gaussian_fit(x, y, InitialEst, ax_arr[1])
ax_arr[1].set_ylabel('')
# -------------------------------------------------------------------------------------------
title = 'Fig 5e, logothesis, Pauls & poggio -1995'
x = data['fig5ex']
y = data['fig5ey']
InitialEst = -255 * np.ones(shape=(4, 3))
InitialEst[0, :] = [-26.62, 364, 1.00]
print title
single_gaussian_fit(x, y, InitialEst, ax_arr[2])
ax_arr[2].set_xlabel('')
ax_arr[2].set_ylabel('')
ax_arr[0].set_ylim([0, 1.4])
|
from styn import chore
@chore()
def clean():
pass
# Should be marked as chore.
def html():
pass
# References a non chore.
@chore(clean, html)
def android():
pass
|
print('Hello, welcome to true talk')
ans = input('Are you ready to play ? (yes/no): ')
score = 0
total_q = 4
if ans.lower() == 'yes':
ans = input('1. What is the best programming language ? ')
if ans.lower() == 'python':
score += 1
print('Correct')
else:
print('Incorrect')
ans = input('2. What is 1 + 2 + 3 + 4 ? ')
if ans == '10':
score += 1
print('Correct')
else:
print('Incorrect')
ans = input('3. What is my age ? ')
if ans == '23':
score += 1
print('Correct')
else:
print('Incorrect')
ans = input('4. What is the approximate value of PI ')
if ans == '3.142':
score += 1
print('Correct')
else:
print('Wrong')
ans = input('5. Where do I stay ? ')
if ans.lower() == 'sinchu' or ans.lower() == 'Sinchu alagi':
score += 1
print('Correct')
else:
print('Wrong')
print('Thank your for playing, you got', score, 'questions correct')
mark = (score/total_q) * 100
print('Mark: ', mark)
print('GoodBye')
|
import os
import string
import tarfile
import collections
import requests
from nltk.corpus import stopwords
import numpy as np
import tensorflow as tf
vocabulary_size = 10000
embedding_size = 200
batch_size = 100
num_sampled = int(batch_size/2)
window_size = 2
valid_words = ['cliche', 'love', 'hate', 'silly', 'sad']
def load_movie_data():
save_folder_name = 'temp'
pos_file = os.path.join(save_folder_name, 'rt-polaritydata', 'rt-polarity.pos')
neg_file = os.path.join(save_folder_name, 'rt-polaritydata', 'rt-polarity.neg')
if not os.path.exists(os.path.join(save_folder_name, 'rt-polaritydata')):
movie_data_url = 'http://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz'
req = requests.get(movie_data_url, stream=True)
with open('temp_movie_review_temp.tar.gz', 'wb') as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
tar = tarfile.open('temp_movie_review_temp.tar.gz', 'r:gz')
tar.extractall(path='temp')
tar.close()
pos_data = []
with open(pos_file, 'r', encoding='latin-1') as f:
for line in f:
pos_data.append(line.encode('ascii', errors='ignore').decode())
f.close()
pos_data = [x.rstrip() for x in pos_data]
neg_data = []
with open(neg_file, 'r', encoding='latin-1') as f:
for line in f:
neg_data.append(line.encode('ascii', errors='ignore').decode())
f.close()
neg_data = [x.rstrip() for x in neg_data]
texts = pos_data + neg_data
target = [1] * len(pos_data) + [0] *len(neg_data)
return texts, target
# length is 10662
texts, target = load_movie_data()
def normalize_text(text):
text = [x.lower() for x in text]
text = [''.join(c for c in x if c not in '0123456789') for x in text]
text = [''.join(c for c in x if c not in string.punctuation) for x in text]
text = [' '.join([word for word in x.split() if word not in stopwords.words('english')]) for x in text]
text = [' '.join(x.split()) for x in text]
return text
texts = normalize_text(texts)
# after this operation the length is 10406
target = [target[ix] for ix, x in enumerate(texts) if len(x.split()) > 2]
texts = [x for x in texts if len(x.split()) > 2]
def build_dictionary(sentences, vocabulary_size):
split_sentences = [x.split() for x in sentences]
words = [x for sublist in split_sentences for x in sublist]
word_count = [['RARE', -1]]
word_count.extend(collections.Counter(words).most_common(vocabulary_size-1))
word_dict = {}
for word, count in word_count:
# we only want a Id of every word, useless to store the frequency of every common words
word_dict[word] = len(word_dict)
return word_dict
def text_to_number(sentences, word_dict):
data = []
for sentence in sentences:
sen_data = []
for word in sentence.split(' '):
if word in word_dict:
x = word_dict[word]
else:
x = 0
sen_data.append(x)
data.append(sen_data)
return data
word_dictionary = build_dictionary(texts, vocabulary_size)
word_dictionary_rev = dict(zip(word_dictionary.values(), word_dictionary.keys()))
text_data = text_to_number(texts, word_dictionary)
valid_examples = [word_dictionary[x] for x in valid_words]
def generate_batch_data(sentences, batch_size, window_size, method='skip_gram'):
batch_data = []
label_data = []
while len(batch_data) < batch_size:
rand_sentence = np.random.choice(sentences)
window_sequence = [rand_sentence[max((ix-window_size), 0):(ix+window_size+1)]
for ix, x in enumerate(rand_sentence)]
label_indices = [ix if ix < window_size else window_size for ix, x in enumerate(window_sequence)]
if method == 'skip_gram':
batch_and_labels = [(x[y], x[:y] + x[(y+1):]) for x, y in zip(window_sequence, label_indices)]
tuple_data = [(x, y_) for x, y in batch_and_labels for y_ in y]
else:
raise ValueError('Method {} not implemented yet.'.format(method))
batch, label = [list(x) for x in zip(*tuple_data)]
batch_data.extend(batch[:batch_size])
label_data.extend(label[:batch_size])
batch_data = np.array(batch_data[:batch_size])
label_data = np.transpose(np.array([label_data[:batch_size]]))
return batch_data, label_data
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0/np.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
x_inputs = tf.placeholder(tf.int32, shape=[batch_size])
y_target = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
embed = tf.nn.embedding_lookup(embeddings, x_inputs)
loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights, biases=nce_biases,
labels=y_target, inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
loss_vec = []
loss_x_vec = []
for i in range(10000):
batch_inputs, batch_labels = generate_batch_data(text_data,
batch_size, window_size)
feed_dict = {x_inputs: batch_inputs, y_target: batch_labels}
sess.run(optimizer, feed_dict=feed_dict)
if (i+1) % 1000 == 0:
loss_val = sess.run(loss, feed_dict=feed_dict)
loss_vec.append(loss_val)
loss_x_vec.append(i+1)
print('Loss at step {} : {}'.format(i+1, loss_val))
if (i+1) % 1000 == 0:
sim = sess.run(similarity, feed_dict=feed_dict)
for j in range(len(valid_words)):
valid_word = word_dictionary_rev[valid_examples[j]]
topk = 5
nearest = (-sim[j,:]).argsort()[1:topk+1]
log_str = "Nearest to {}:".format(valid_word)
for k in range(topk):
close_word = word_dictionary_rev[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
# Loss at step 10000 : 4.206537246704102
# Nearest to cliche: irony, shop, platitudes, ensure, fiction,
# Nearest to love: shore, visual, RARE, heres, brash,
# Nearest to hate: strangely, boisterous, scams, dynamic, questions,
# Nearest to silly: dazzling, superman, thcentury, smash, gollums,
# Nearest to sad: contrivance, ultraviolent, bed, coherence, phoniness,
|
import unittest
from unittest.mock import patch
from synapses.model import Model
from synapses.activator import ActivatorEnum, StepActivator
from synapses.perceptron import PerceptronInterface
# noinspection PyMethodMayBeStatic
class ModelTests(unittest.TestCase):
def setUp(self) -> None:
self.xor_model_description = {
"name": "xor",
"desc": "Compute Exclusive Or Operation on 2 inputs.",
"layers": [
[
# nand
{"w": [1, -0.5, -0.5], "a": {"e": ActivatorEnum.Step, "p": {"threshold": 0}}},
# or
{"w": [0, 0.25, 0.25], "a": {"e": ActivatorEnum.Step, "p": {"threshold": 0}}}
],
[
# and
{"w": [-0.25, 0.25, 0.25], "a": {"e": ActivatorEnum.Step, "p": {"threshold": 0}}}
]
]
}
def test_deserialize_sets_name(self):
m = Model()
m.deserialize(self.xor_model_description)
self.assertEqual(m.name, self.xor_model_description["name"])
def test_deserialize_sets_description(self):
m = Model()
m.deserialize(self.xor_model_description)
self.assertEqual(m.desc, self.xor_model_description["desc"])
def test_deserialize_creates_layers(self):
m = Model()
m.deserialize(self.xor_model_description)
self.assertEqual(len(m._layers), 2)
def test_deserialize_adds_correct_count_of_neurons(self):
m = Model()
m.deserialize(self.xor_model_description)
self.assertEqual(len(m._layers[0]), 2)
self.assertEqual(len(m._layers[1]), 1)
def test_deserialize_adds_PerceptronInterface(self):
m = Model()
m.deserialize(self.xor_model_description)
for layer in m._layers:
for n in layer:
self.assertIsInstance(n, PerceptronInterface)
def test_deserialize_sets_perceptron_weights(self):
m = Model()
m.deserialize(self.xor_model_description)
for layer, layer_desc in zip(m._layers, self.xor_model_description["layers"]):
for n, desc in zip(layer, layer_desc):
self.assertListEqual(n.weights, desc["w"])
def test_deserialize_sets_perceptron_activator(self):
m = Model()
m.deserialize(self.xor_model_description)
for layer in m._layers:
for n in layer:
self.assertIsInstance(n.activator, StepActivator)
@patch("synapses.perceptron.Perceptron.attach_to")
def test_deserialize_attaches_perceptrons(self, attach_method):
m = Model()
m.deserialize(self.xor_model_description)
attach_method.assert_called()
def test_prediction(self):
m = Model()
m.deserialize(self.xor_model_description)
for i, ans in [([1, 1], [0]), ([1, 0], [1]), ([0, 1], [1]), ([0, 0], [0])]:
actual = m.predict(i)
self.assertListEqual(actual, ans)
def test_serialize_includes_name(self):
m = Model()
m.deserialize(self.xor_model_description)
ans = m.serialize()
self.assertEqual(ans["name"], self.xor_model_description["name"])
def test_serialize_includes_desc(self):
m = Model()
m.deserialize(self.xor_model_description)
ans = m.serialize()
self.assertEqual(ans["desc"], self.xor_model_description["desc"])
def test_serialize_includes_layers(self):
m = Model()
m.deserialize(self.xor_model_description)
ans = m.serialize()
self.assertEqual(len(ans["layers"]), len(self.xor_model_description["layers"]))
def test_serialize_serialized_neurons(self):
m = Model()
m.deserialize(self.xor_model_description)
ans = m.serialize()
for layer, expected_layer in zip(ans["layers"], self.xor_model_description["layers"]):
for neuron, expected_neuron in zip(layer, expected_layer):
self.assertDictEqual(neuron, expected_neuron)
|
# -*- coding: utf-8 -*-
# @Time : 2021/9/2 15:44
# @Author : CuiShuangqi
# @Email : 1159533975@qq.com
# @File : pytestParaTest.py
import pytest
from selenium import webdriver
from time import sleep
"""
pytest 参数化
当一组测试用例有固定的测试数据时,就可以通过参数化的方式简化测试用例的编写
通过pytest.mark.parametrzie()方法设置参数:
参数名:"search_key,expected"
参数值:通过数组定义参数值时,每一个元组都是一条测试用例的测试数据
ids参数:默认None,用来重新定义测试用例的名称
"""
# 百度搜索案例
# 参数化
@pytest.mark.parametrize(
"search_key,expected",
[
("哈哈哈", "哈哈哈_百度搜索"),
("呵呵呵", "呵呵呵_百度不想搜索"),
],
ids=["case1", "case2"]
)
def test_baidu_search(search_key, expected):
driver = webdriver.Chrome()
driver.get("http://www.baidu.com")
driver.implicitly_wait(10)
driver.find_element_by_id("kw").send_keys(search_key)
driver.find_element_by_id("su").click()
sleep(3)
# == 断言
# 窗口的 title判断
web_title = driver.title
assert expected == web_title
driver.quit()
print("driver已退出!")
if __name__ == '__main__':
pytest.main(["-s", "pytestParaTest.py"]) |
#-*- coding = utf-8 -*-
#@Time : 2020/11/3 15:12
#@Author : 冯朗
#@File : Fit2.py
#@Software : PyCharm
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
# 目标函数
def real_func(x):
return np.sin(2 * np.pi * x)
# 多项式
def fit_func(p,x):
f = np.poly1d(p)
return f(x)
# 残差
def residuals_func(p,x,y):
ret = fit_func(p,x) - y
return ret
# 十个点
x = np.linspace(0,1,10)
x_points = np.linspace(0,1,100)
print(x)
# 正态分布噪音的目标函数值
y_ = real_func(x)
y = [np.random.normal(0,0.1) + y1 for y1 in y_]
print(y)
def fitting(M=0):
# 随机初始化多项式参数
p_init = np.random.rand(M+1)
p_lsq = leastsq(residuals_func,p_init,args=(x,y))
print('Fitting Parameters:',p_lsq[0])
# 画图
plt.plot(x_points,real_func(x_points),label='real')
plt.plot(x_points,fit_func(p_lsq[0],x_points),label='fitted curve')
plt.plot(x,y,'bo',label='noise')
plt.legend() #绘制图例
plt.show()
return p_lsq
p_lsq_0 = fitting(M=3) |
import eventlet
from urllib import request
import random
import datetime
import pandas as pd
import itertools
starttime = datetime.datetime.now()
base_url = "http://splitit.cs.loyola.edu/cgi/splitit.cgi"
max_int = 9999
num_of_splitting = 1
verbose = False
df = pd.read_csv("tmp/cheat_splitting_file.csv", header=None)
identifiers = list(itertools.chain.from_iterable(df.values[34355:, 0:1]))
splitted_identifiers = list(itertools.chain.from_iterable(df.values[34355:, 1:2]))
lendata = len(identifiers)
def split(identifier):
rand = random.randint(0, max_int)
# handle exception of url请求
identifier = identifier.replace('.', '_')
url = base_url + "?&id=" + identifier + "&lang=all&n=" + str(num_of_splitting) + "&rand=" + str(rand)
# print("proceesing ", identifier)
body = request.urlopen(url).read()
# print("done with", identifier)
print(identifier, body)
return identifier, body.decode("utf-8")
# 使用绿色线程池去加快速度
# 线程无序也可以,只要确保返回的信息能够包含找到对应的identifier信息即可
pool = eventlet.GreenPool(200)
count = 0
index = 0
for identifier, body in pool.imap(split, identifiers):
# print("got body from", identifier)
# print(body)
wrong_split = True
softwords = body.split('\n')
gentest_split_result = []
for i in range(len(softwords) - 1):
softword = softwords[i].strip('\t1234567890')
gentest_split_result = gentest_split_result + softword.split('_')
splitted_identifier = splitted_identifiers[index]
parts = splitted_identifier.split('-')
condition = lambda part : part not in ['.', ':', '_', '~']
parts = [x for x in filter(condition, parts)]
if len(parts) == len(gentest_split_result):
difference = list(set(parts).difference(set(gentest_split_result)))
if len(difference) == 0:
count = count + 1
wrong_split = False
if verbose and wrong_split:
print(parts)
print(gentest_split_result)
index = index+1
print(count/lendata)
endtime = datetime.datetime.now()
print((endtime - starttime).total_seconds()) |
from collections import defaultdict
import torch
import numpy as np
import pandas as pd
from pytorch_toolbox.callbacks import Callback, LearnerCallback
from pytorch_toolbox.callbacks import hook_output
from pytorch_toolbox.utils import Phase, to_numpy
from pytorch_toolbox.utils.training import flatten_model
class OutputHookRecorder(Callback):
def __init__(self, learn, module=None):
super().__init__()
self._hook_outputs = []
if module is None:
module = flatten_model(learn.model)[-1]
self.hook = hook_output(module)
@property
def hook_outputs(self):
return np.concatenate(self._hook_outputs, axis=0)
def on_batch_end(self, phase, **kwargs):
self._hook_outputs.append(to_numpy(self.hook.stored))
class ResultRecorder(LearnerCallback):
_order = -10
def __init__(self, learn):
super().__init__(learn)
self.names = []
self.prob_preds = []
self.targets = []
def on_batch_begin(self, last_input, last_target, phase, **kwargs):
self.names.extend(last_target['name'])
if phase == Phase.TRAIN or phase == Phase.VAL:
label = to_numpy(last_target['label'])
self.targets.extend(label)
def on_loss_begin(self, last_output, **kwargs):
prob_pred = to_numpy(torch.sigmoid(last_output))
self.prob_preds.extend(prob_pred)
class OutputRecorder(LearnerCallback):
_order = -10
def __init__(self, learn, save_path, save_img_fn, save_img=False):
super().__init__(learn)
self.save_path = save_path
self.history = defaultdict(list)
self.key = None
self.current_batch = dict()
self.save_img_fn = save_img_fn
self.save_img = save_img
def on_batch_begin(self, last_input, last_target, epoch, phase, **kwargs):
self.key = (phase.name, epoch)
if self.save_img:
inputs = self.save_img_fn(last_input)
self.current_batch['input'] = inputs
self.current_batch['name'] = last_target['name']
if phase == Phase.TRAIN or phase == Phase.VAL:
label = to_numpy(last_target['label'])
self.current_batch['label'] = label
def on_loss_begin(self, last_output, epoch, **kwargs):
prediction_probs = to_numpy(torch.sigmoid(last_output))
self.current_batch['prediction_probs'] = prediction_probs
# prediction = prediction_probs.copy()
# prediction[prediction < 0.5] = 0
# prediction[prediction >= 0.5] = 1
# self.current_batch['prediction'] = prediction
def on_batch_end(self, **kwargs):
for loss in self.learn.loss_func.losses:
name = loss.__class__.__name__
unreduced_loss = to_numpy(loss.unreduced_loss)
# reduced_loss = to_numpy(loss.loss.mean())
self.current_batch[f"{name}"] = unreduced_loss
# self.current_batch[f"{name}_reduced"] = reduced_loss
# prediction = self.current_batch['prediction']
# label = self.current_batch['label']
# n_classes = label.shape[-1]
# indices_to_keep = np.where((prediction == label).sum(axis=1) != n_classes)[0]
"""
self.current_batch is a dictionary with:
{
stat1: [stat1_for_sample_1, stat1_for_sample_2, ...]
stat2: [stat2_for_sample_1, stat2_for_sample_2, ...]
}
"""
# Get the keys, and the array of values associated with each key
stat_names, stat_values = zip(*self.current_batch.items())
# zip the array of values so each element in the zip has [stat1_for_sample_1, stat2_for_sample_1, ...]
stat_values_for_samples = zip(*stat_values)
for stat_values_for_sample in stat_values_for_samples:
sample_to_save = dict()
for stat_name, stat_value in zip(stat_names, stat_values_for_sample):
if stat_name == 'input': continue
sample_to_save[stat_name] = stat_value
self.history[self.key].append(sample_to_save)
def on_epoch_end(self, epoch, **kwargs):
history_save_path = self.save_path / 'training_logs' / f"epoch_{epoch}_train.csv"
history_save_path.parent.mkdir(exist_ok=True, parents=True)
history = self.history[(Phase.TRAIN.name, epoch)]
df = pd.DataFrame(history)
df.to_csv(history_save_path, index=False)
history_save_path = self.save_path / 'training_logs' / f"epoch_{epoch}_val.csv"
history_save_path.parent.mkdir(exist_ok=True, parents=True)
history = self.history[(Phase.VAL.name, epoch)]
df = pd.DataFrame(history)
df.to_csv(history_save_path, index=False)
model_save_path = self.save_path / 'model_checkpoints' / f"epoch_{epoch}.pth"
model_save_path.parent.mkdir(exist_ok=True, parents=True)
self.learn.save_model_with_path(model_save_path)
self.history = defaultdict(list)
|
import unittest
from nose import SkipTest
import numpy as np
from airline_alloc.dataset import Dataset
from airline_alloc.optimization import *
class ObjectiveTestCase(unittest.TestCase):
""" test the get_objective function
"""
def test_3routes(self):
data = Dataset(suffix='after_3routes')
obj_int, obj_con = get_objective(data)
expected_int = np.array([
30078.1801074742,
23390.7454818768,
16779.0566092325,
35794.3199911030,
28282.0591370451,
20794.6131249422
])
expected_con = np.array([
-295.868219080555,
-235.334963855215,
-176.747839203397,
-308.770102562314,
-248.293639817771,
-188.596040811846,
])
self.assertTrue(np.allclose(obj_int, expected_int))
self.assertTrue(np.allclose(obj_con, expected_con))
class ConstraintsTestCase(unittest.TestCase):
""" test the get_constraints function
"""
def test_3routes(self):
data = Dataset(suffix='after_3routes')
A, b = get_constraints(data)
expected_A = np.array([
[0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 0, -1, 0, 0, -1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, -1, 0, 0, -1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, -1],
[10.4652511360000, 8.31288377600000, 6.15314412800000, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 10.4460000480000, 8.29977156800000, 6.14597705600000, 0, 0, 0, 0, 0, 0],
[-107, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, -107, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, -107, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, -122, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, -122, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, -122, 0, 0, 0, 0, 0, 1]
])
expected_b = np.array([
300, 700, 220, -60, -140, -44, 72, 48, 0, 0, 0, 0, 0, 0
]).reshape(-1, 1)
self.assertTrue(np.allclose(A, expected_A))
self.assertTrue(np.allclose(b, expected_b))
class GomoryCutTestCase(unittest.TestCase):
""" test the gomory_cut function
"""
def test_problem(self):
""" test problem from GomoryCut.m
"""
# input arguments
x = np.array([
[55./14.],
[10./7.]
])
A = np.array([
[2./5., 1.],
[2./5., -2./5.]
])
b = np.array([
[3.],
[1.]
])
Aeq = np.array([])
beq = np.array([])
# expected results (from MATLAB)
expected = {
'A_up': np.array([
[0.4000, 1.0000],
[0.4000, -0.4000],
[0.6000, 0.4000]
]),
'b_up': np.array([
[3.0000],
[1.0000],
[2.0000]
]),
'eflag': 1
}
# call the function
A_up, b_up, eflag = gomory_cut(x, A, b, Aeq, beq)
# check answer against expected results
self.assertTrue(np.allclose(A_up, expected['A_up']),
msg='\n' + str(A_up) + '\n' + str(expected['A_up']))
self.assertTrue(np.allclose(b_up, expected['b_up']),
msg='\n' + str(b_up) + '\n' + str(expected['b_up']))
self.assertTrue(eflag == expected['eflag'])
class CutPlaneTestCase(unittest.TestCase):
""" test the cut_plane function
"""
def test_problem(self):
""" test problem from call_Cutplane.m
"""
# input arguments
x = np.array([
[9./4.],
[15./4.],
[1200.],
[500.]
])
A = np.array([
[1., 1., 3., 3.],
[4., 1., 6., 5.],
[1., 1., 0., 0.],
[5., 9., 0., 0.]
])
b = np.array([
[12.],
[1.],
[6.],
[45.]
])
Aeq = np.array([])
beq = np.array([])
ind_con = np.array([1, 2]) - 1 # indices of con rows
ind_int = np.array([3, 4]) - 1 # indices of int rows
indeq_con = np.array([])
indeq_int = np.array([])
num_int = 2
# expected results (from MATLAB)
expected = {
'A_up': np.array([
[1., 1., 3., 3.],
[4., 1., 6., 5.],
[1., 1., 0., 0.],
[5., 9., 0., 0.],
[2., 3., 1., 1.],
]),
'b_up': np.array([
[12.],
[1.],
[6.],
[45.],
[1715.]
]),
}
# call the function
A_up, b_up = cut_plane(x, A, b, Aeq, beq, ind_con, ind_int, indeq_con, indeq_int, num_int)
# check answer against expected results
self.assertTrue(np.allclose(A_up, expected['A_up']),
msg='\n' + str(A_up) + '\n' + str(expected['A_up']))
self.assertTrue(np.allclose(b_up, expected['b_up']),
msg='\n' + str(b_up) + '\n' + str(expected['b_up']))
def test_problem2(self):
""" another test problem
(FIXME: this demonstrates a case where cut_plane does not work as expected)
"""
raise SkipTest("cut_plane currently broken for this test case")
# input arguments
x = np.array([
[0.],
[7.],
[2.0561],
[2.4590],
[0.],
[0.],
[0.],
[700.],
[220.],
[300.],
[0.],
[0.],
])
A = np.array([
[ 0, 0, 0, 0, 0, 0, 1.0000, 0, 0, 1.0000, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 1.0000, 0, 0, 1.0000, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 1.0000, 0, 0, 1.0000],
[ 0, 0, 0, 0, 0, 0, -1.0000, 0, 0, -1.0000, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, -1.0000, 0, 0, -1.0000, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, -1.0000, 0, 0, -1.0000],
[ 10.4653, 8.3129, 6.1531, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 10.4460, 8.2998, 6.1460, 0, 0, 0, 0, 0, 0],
[ -107.0000, 0, 0, 0, 0, 0, 1.0000, 0, 0, 0, 0, 0],
[ 0, -107.0000, 0, 0, 0, 0, 0, 1.0000, 0, 0, 0, 0],
[ 0, 0, -107.0000, 0, 0, 0, 0, 0, 1.0000, 0, 0, 0],
[ 0, 0, 0, -122.0000, 0, 0, 0, 0, 0, 1.0000, 0, 0],
[ 0, 0, 0, 0, -122.0000, 0, 0, 0, 0, 0, 1.0000, 0],
[ 0, 0, 0, 0, 0, -122.0000, 0, 0, 0, 0, 0, 1.0000],
[ 0, -1.0000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
b = np.array([
[ 300.],
[ 700.],
[ 220.],
[ -60.],
[ -140.],
[ -44.],
[ 72.],
[ 48.],
[ 0.],
[ 0.],
[ 0.],
[ 0.],
[ 0.],
[ 0.],
[ -7.]
])
Aeq = np.array([])
beq = np.array([])
ind_con = np.array([1, 2, 3, 4, 5, 6]) - 1 # indices of con rows
ind_int = np.array([7, 8, 9, 10, 11, 12, 13, 14]) - 1 # indices of int rows
indeq_con = np.array([])
indeq_int = np.array([])
num_int = 6
# expected results (from MATLAB)
expected = {
'A_up': np.array([
[ 0, 0, 0, 0, 0, 0, 1.0000, 0, 0, 1.0000, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 1.0000, 0, 0, 1.0000, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 1.0000, 0, 0, 1.0000],
[ 0, 0, 0, 0, 0, 0, -1.0000, 0, 0, -1.0000, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, -1.0000, 0, 0, -1.0000, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, -1.0000, 0, 0, -1.0000],
[ 10.4653, 8.3129, 6.1531, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 10.4460, 8.2998, 6.1460, 0, 0, 0, 0, 0, 0],
[ -107.0000, 0, 0, 0, 0, 0, 1.0000, 0, 0, 0, 0, 0],
[ 0, -107.0000, 0, 0, 0, 0, 0, 1.0000, 0, 0, 0, 0],
[ 0, 0, -107.0000, 0, 0, 0, 0, 0, 1.0000, 0, 0, 0],
[ 0, 0, 0, -122.0000, 0, 0, 0, 0, 0, 1.0000, 0, 0],
[ 0, 0, 0, 0, -122.0000, 0, 0, 0, 0, 0, 1.0000, 0],
[ 0, 0, 0, 0, 0, -122.0000, 0, 0, 0, 0, 0, 1.0000],
[ 0, -1.0000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ -97.0000, -1.0000, 0, -122.0000, 0, 0, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]
]),
'b_up': np.array([
[300],
[700],
[220],
[-60],
[-140],
[-44],
[72],
[48],
[ 0],
[ 0],
[ 0],
[ 0],
[ 0],
[ 0],
[-7],
[924]
]),
}
# call the function
A_up, b_up = cut_plane(x, A, b, Aeq, beq, ind_con, ind_int, indeq_con, indeq_int, num_int)
# check answer against expected results
self.assertTrue(np.allclose(A_up, expected['A_up']),
msg='\n' + str(A_up) + '\n' + str(expected['A_up']))
self.assertTrue(np.allclose(b_up, expected['b_up']),
msg='\n' + str(b_up) + '\n' + str(expected['b_up']))
class BranchCutTestCase(unittest.TestCase):
""" test the branch_cut function
"""
def test_3routes(self):
try:
from lpsolve55 import lpsolve
except ImportError:
raise SkipTest('lpsolve is not available')
data = Dataset(suffix='after_3routes')
# linear objective coefficients
objective = get_objective(data)
f_int = objective[0] # integer type design variables
f_con = objective[1] # continuous type design variables
# coefficient matrix for linear inequality constraints, Ax <= b
constraints = get_constraints(data)
A = constraints[0]
b = constraints[1]
Aeq = np.ndarray(shape=(0, 0))
beq = np.ndarray(shape=(0, 0))
J = data.inputs.DVector.shape[0] # number of routes
K = len(data.inputs.AvailPax) # number of aircraft types
# lower and upper bounds
lb = np.zeros((2*K*J, 1))
ub = np.concatenate((
np.ones((K*J, 1)) * data.inputs.MaxTrip.reshape(-1, 1),
np.ones((K*J, 1)) * np.inf
))
# indices into A matrix for continuous & integer/continuous variables
ind_conCon = range(2*J)
ind_intCon = range(2*J, len(constraints[0]))
# call the branch and cut algorithm to solve the MILP problem
xopt, fopt, can_x, can_F, x_best_relax, f_best_relax, funCall, eflag = \
branch_cut(f_int, f_con, A, b, Aeq, beq, lb, ub,
ind_conCon, ind_intCon, [], [])
# TODO: check return values against MATLAB results
class OutputTestCase(unittest.TestCase):
""" test the output function
"""
def compare(self, outputs1, outputs2):
# check that the dataset matches the output dataset
self.assertTrue(np.allclose(outputs1.DetailTrips, outputs2.DetailTrips),
msg='\n' + str(outputs1.DetailTrips) + '\n' + str(outputs2.DetailTrips))
self.assertTrue(np.allclose(outputs1.Trips, outputs2.Trips),
msg='\n' + str(outputs1.Trips) + '\n' + str(outputs2.Trips))
self.assertTrue(np.allclose(outputs1.FleetUsed, outputs2.FleetUsed),
msg='\n' + str(outputs1.FleetUsed) + '\n' + str(outputs2.FleetUsed))
self.assertTrue(np.allclose(outputs1.Fuel.flatten(), outputs2.Fuel.flatten()),
msg='\n' + str(outputs1.Fuel.flatten()) + '\n' + str(outputs2.Fuel.flatten()))
self.assertTrue(np.allclose(outputs1.Doc.flatten(), outputs2.Doc.flatten()),
msg='\n' + str(outputs1.Doc.flatten()) + '\n' + str(outputs2.Doc.flatten()))
self.assertTrue(np.allclose(outputs1.BlockTime.flatten(), outputs2.BlockTime.flatten()),
msg='\n' + str(outputs1.BlockTime.flatten()) + '\n' + str(outputs2.BlockTime.flatten()))
self.assertTrue(np.allclose(outputs1.Nox.flatten(), outputs2.Nox.flatten()),
msg='\n' + str(outputs1.Nox.flatten()) + '\n' + str(outputs2.Nox.flatten()))
self.assertTrue(np.allclose(outputs1.Maxpax.flatten(), outputs2.Maxpax.flatten()),
msg='\n' + str(outputs1.Maxpax.flatten()) + '\n' + str(outputs2.Maxpax.flatten()))
self.assertTrue(np.allclose(outputs1.Pax.flatten(), outputs2.Pax.flatten()),
msg='\n' + str(outputs1.Pax.flatten()) + '\n' + str(outputs2.Pax.flatten()))
self.assertTrue(np.allclose(outputs1.Miles.flatten(), outputs2.Miles.flatten()),
msg='\n' + str(outputs1.Miles.flatten()) + '\n' + str(outputs2.Miles.flatten()))
self.assertTrue(np.allclose(outputs1.CostDetail, outputs2.CostDetail),
msg='\n' + str(outputs1.CostDetail) + '\n' + str(outputs2.CostDetail))
self.assertTrue(np.allclose(outputs1.RevDetail, outputs2.RevDetail),
msg='\n' + str(outputs1.RevDetail) + '\n' + str(outputs2.RevDetail))
self.assertTrue(np.allclose(outputs1.PaxDetail, outputs2.PaxDetail),
msg='\n' + str(outputs1.PaxDetail) + '\n' + str(outputs2.PaxDetail))
self.assertTrue(np.allclose(outputs1.RevArray, outputs2.RevArray),
msg='\n' + str(outputs1.RevArray) + '\n' + str(outputs2.RevArray))
self.assertTrue(np.allclose(outputs1.CostArray, outputs2.CostArray),
msg='\n' + str(outputs1.CostArray) + '\n' + str(outputs2.CostArray))
self.assertTrue(np.allclose(outputs1.PaxArray, outputs2.PaxArray),
msg='\n' + str(outputs1.PaxArray) + '\n' + str(outputs2.PaxArray))
self.assertTrue(np.allclose(outputs1.ProfitArray, outputs2.ProfitArray),
msg='\n' + str(outputs1.ProfitArray) + '\n' + str(outputs2.ProfitArray))
self.assertTrue(np.allclose(outputs1.Revenue, outputs2.Revenue),
msg='\n' + str(outputs1.Revenue) + '\n' + str(outputs2.Revenue))
self.assertTrue(np.allclose(outputs1.Cost, outputs2.Cost),
msg='\n' + str(outputs1.Cost) + '\n' + str(outputs2.Cost))
self.assertTrue(np.allclose(outputs1.PPNM, outputs2.PPNM),
msg='\n' + str(outputs1.PPNM) + '\n' + str(outputs2.PPNM))
self.assertTrue(np.allclose(outputs1.Profit, outputs2.Profit),
msg='\n' + str(outputs1.Profit) + '\n' + str(outputs2.Profit))
def test_3routes(self):
dataset = Dataset(suffix='after_3routes')
xopt = np.array([0, 3, 2, 2, 3, 0, 0, 321, 214, 244, 366, 0])
fopt = -1.9417e+04
outputs = generate_outputs(xopt, fopt, dataset)
self.compare(outputs, dataset.outputs)
if __name__ == "__main__":
unittest.main()
|
from segmentation.segmentation_abstract import Segmentation
class Probabilistic_old(Segmentation):
def precompute(self,datasetdscr,s_events,a_events,acts):
ws= a_events.groupby('Activity')['Duration'].mean(numeric_only=False)
L=10
w={}
w[0]=min(ws)
w[L]=ws.mean()
for l in range(1,L):
w[l]=(w[L]-w[0])* l/L+w[0]
buffer=Buffer(s_events,0,0)
a_s={sid:{a:0 for a in [-1]+acts} for sid,desc in sensor_desc.iterrows()}
w_a={a:{i:0 for i in range(-1,L+1)} for a in acts}
for i,a in a_events.iterrows():
##print(i)
six=buffer.searchTime(a.StartTime,-1)
eix=buffer.searchTime(a.EndTime,+1)
#window=buffer.data.SID.iloc[six:eix+1];
for si in range(six,eix+1):
s=buffer.data.SID.iat[si]
a_s[s][a.Activity]+=1
a_s[s][-1]+=1
for l in range(L+1):
w_a[a.Activity][l]+=1-min(1,abs(w[l]-a.Duration)/w[l])
w_a[a.Activity][-1]+=1
Pw_a={a:{i:0 for i in range(L+1)} for a in acts}
Pa_s={sid:{a:0 for a in acts} for sid,desc in sensor_desc.iterrows()}
for a in acts:
for l in range(L+1):
Pw_a[a][l]=0 if w_a[a][-1]==0 else w_a[a][l]/w_a[a][-1]
for sid in a_s:
Pa_s[sid][a]=0 if a_s[sid][-1]==0 else a_s[sid][a]/a_s[sid][-1]
Pw_s={sid:{i:0 for i in range(L+1)} for sid,desc in sensor_desc.iterrows()}
w_s={sid:{i:0 for i in range(L+1)} for sid,desc in sensor_desc.iterrows()}
for sid,desc in sensor_desc.iterrows():
for l in range(L+1):
a=argmaxdic(Pa_s[sid])
Pw_s[sid][l]=Pa_s[sid][a]*Pw_a[a][l]
w_s[sid]=argmaxdic(Pw_s[sid])
self.w_s=w_s
self.w=w
self.Pw_a=Pw_a
self.Pw_s=Pw_s
self.Pa_s=Pa_s
self.w_a=w_a
self.a_s=a_s
def reset(self):
self.lastindex=-1
def segment(self,w_history,buffer):
sindex=self.lastindex+1
self.lastindex=sindex
if(sindex >=len(buffer.data)):
return None
sensor=buffer.data.iloc[sindex]
stime=buffer.times[sindex]
sindex=buffer.searchTime(stime,-1)
size=self.w[self.w_s[sensor.SID]]
etime=stime+size
eindex=buffer.searchTime(etime,+1)
#etime=buffer.times[eindex]
window=buffer.data.iloc[sindex:eindex+1];
#buffer.removeTop(sindex)
window.iat[0,1].value
return {'window':window,'start':stime, 'end':etime}
|
"""
earthpy.spatial
===============
Functions to manipulate spatial raster and vector data.
"""
import os
import sys
import contextlib
import warnings
import numpy as np
from shapely.geometry import mapping, box
import geopandas as gpd
import rasterio as rio
from rasterio.mask import mask
def extent_to_json(ext_obj):
"""Convert bounds to a shapely geojson like spatial object.
This format is what shapely uses. The output object can be used
to crop a raster image.
Parameters
----------
ext_obj: list or geopandas geodataframe
If provided with a geopandas geodataframe, the extent
will be generated from that. Otherwise, extent values
should be in the order: minx, miny, maxx, maxy.
Return
------
extent_json: A GeoJSON style dictionary of corner coordinates
for the extent
A GeoJSON style dictionary of corner coordinates representing
the spatial extent of the provided spatial object.
Example
-------
>>> import geopandas as gpd
>>> import earthpy.spatial as es
>>> from earthpy.io import path_to_example
>>> rmnp = gpd.read_file(path_to_example('rmnp.shp'))
>>> es.extent_to_json(rmnp)
{'type': 'Polygon', 'coordinates': (((-105.4935937, 40.1580827), ...),)}
"""
if type(ext_obj) == gpd.geodataframe.GeoDataFrame:
extent_json = mapping(box(*ext_obj.total_bounds))
elif type(ext_obj) == list:
assert ext_obj[0] <= ext_obj[2], "xmin must be <= xmax"
assert ext_obj[1] <= ext_obj[3], "ymin must be <= ymax"
extent_json = mapping(box(*ext_obj))
else:
raise ValueError("Please provide a GeoDataFrame or a list of values.")
return extent_json
def normalized_diff(b1, b2):
"""Take two n-dimensional numpy arrays and calculate the normalized
difference.
Math will be calculated (b1-b2) / (b1 + b2).
Parameters
----------
b1, b2 : numpy arrays
Two numpy arrays that will be used to calculate the normalized
difference. Math will be calculated (b1-b2) / (b1+b2).
Returns
----------
n_diff : numpy array
The element-wise result of (b1-b2) / (b1+b2) calculation. Inf values
are set to nan. Array returned as masked if result includes nan values.
Examples
--------
>>> import numpy as np
>>> import earthpy.spatial as es
>>> # Calculate normalized difference vegetation index
>>> nir_band = np.array([[6, 7, 8, 9, 10], [16, 17, 18, 19, 20]])
>>> red_band = np.array([[1, 2, 3, 4, 5], [11, 12, 13, 14, 15]])
>>> ndvi = es.normalized_diff(b1=nir_band, b2=red_band)
>>> ndvi
array([[0.71428571, 0.55555556, 0.45454545, 0.38461538, 0.33333333],
[0.18518519, 0.17241379, 0.16129032, 0.15151515, 0.14285714]])
>>> # Calculate normalized burn ratio
>>> nir_band = np.array([[8, 10, 13, 17, 15], [18, 20, 22, 23, 25]])
>>> swir_band = np.array([[6, 7, 8, 9, 10], [16, 17, 18, 19, 20]])
>>> nbr = es.normalized_diff(b1=nir_band, b2=swir_band)
>>> nbr
array([[0.14285714, 0.17647059, 0.23809524, 0.30769231, 0.2 ],
[0.05882353, 0.08108108, 0.1 , 0.0952381 , 0.11111111]])
"""
if not (b1.shape == b2.shape):
raise ValueError("Both arrays should have the same dimensions")
# Ignore warning for division by zero
with np.errstate(divide="ignore"):
n_diff = (b1 - b2) / (b1 + b2)
# Set inf values to nan and provide custom warning
if np.isinf(n_diff).any():
warnings.warn(
"Divide by zero produced infinity values that will be replaced "
"with nan values",
Warning,
)
n_diff[np.isinf(n_diff)] = np.nan
# Mask invalid values
if np.isnan(n_diff).any():
n_diff = np.ma.masked_invalid(n_diff)
return n_diff
def stack(band_paths, out_path="", nodata=None):
"""Convert a list of raster paths into a raster stack numpy darray.
Parameters
----------
band_paths : list of file paths
A list with paths to the bands to be stacked. Bands
will be stacked in the order given in this list.
out_path : string (optional)
A path with a file name for the output stacked raster
tif file.
nodata : numeric (optional)
A value (int or float) that represents invalid or missing values to
mask in the output.
Returns
----------
tuple :
numpy array
N-dimensional array created by stacking the raster files provided.
rasterio profile object
A rasterio profile object containing the updated spatial metadata
for the stacked numpy array.
Example
-------
>>> import os
>>> import earthpy.spatial as es
>>> from earthpy.io import path_to_example
>>> band_fnames = ["red.tif", "green.tif", "blue.tif"]
>>> band_paths = [path_to_example(fname) for fname in band_fnames]
>>> destfile = "./stack_result.tif"
>>> arr, arr_meta = es.stack(band_paths, destfile)
>>> arr.shape
(3, 373, 485)
>>> os.path.isfile(destfile)
True
>>> # optionally, clean up example output
>>> os.remove(destfile)
"""
# Set default import to read
kwds = {"mode": "r"}
out_dir = os.path.dirname(out_path)
writing_to_cwd = out_dir == ""
if not os.path.exists(out_dir) and not writing_to_cwd:
raise ValueError(
"The output directory path that you provided does not exist"
)
if len(band_paths) < 2:
raise ValueError(
"The list of file paths is empty. You need at least 2 files to "
"create a stack."
)
# Invalid filename specified and write_raster == True.
# Tell user to specify valid filename
if (len(out_path) > 0) and (
len(os.path.basename(out_path).split(".")) < 2
):
raise ValueError("Please specify a valid file name for output.")
# Set write_raster flag if valid filename provided
write_raster = False
if len(os.path.basename(out_path).split(".")) == 2:
write_raster = True
with contextlib.ExitStack() as context:
sources = [
context.enter_context(rio.open(path, **kwds))
for path in band_paths
]
# Check that the CRS and TRANSFORM are the same
dest_crs = [src.meta["crs"].to_string() for src in sources]
dest_aff = [src.meta["transform"] for src in sources]
dest_shps = [
(src.meta["height"], src.meta["width"]) for src in sources
]
if not len(set(dest_crs)) == 1:
raise ValueError(
"Please ensure all source rasters have the same CRS."
)
if not len(set(dest_aff)) == 1:
raise ValueError(
"Please ensure all source rasters have same affine transform."
)
if not len(set(dest_shps)) == 1:
raise ValueError(
"Please ensure all source rasters have same dimensions "
"(nrows, ncols)."
)
# Update band count
dest_kwargs = sources[0].meta
dest_count = sum(src.count for src in sources)
dest_kwargs["count"] = dest_count
if nodata is not None:
dest_kwargs["nodata"] = nodata
# Stack the bands and return an array, but don't write to disk
if not write_raster:
arr, meta = _stack_bands(sources)
# If user specified nodata, mask the array
if nodata is not None:
# Mask and input data types must be identical for mask_equal()
nodata = np.array([nodata]).astype(arr.dtype)[0]
arr = np.ma.masked_equal(arr, nodata)
return arr, meta
# Write out the stacked array and return a numpy array
else:
# Valid output path checked above
file_fmt = os.path.basename(out_path).split(".")[-1]
# Check that file format for output is the same as source driver
rio_driver = sources[0].profile["driver"]
if file_fmt not in rio_driver.lower():
raise ValueError(
"Source data is {}. Please specify corresponding output "
"extension.".format(rio_driver)
)
# Write stacked gtif file
with rio.open(out_path, "w", **dest_kwargs) as dest:
_stack_bands(sources, write_raster, dest)
# Read and return array
with rio.open(out_path, "r") as src:
arr = src.read()
meta = src.profile
# If user specified nodata, mask the array
if nodata is not None:
# Make sure value is same data type
nodata = np.array([nodata]).astype(arr.dtype)[0]
# Mask the array
arr = np.ma.masked_equal(arr, nodata)
return arr, meta
def _stack_bands(sources, write_raster=False, dest=None):
"""Stack a set of bands into a single file.
Parameters
----------
sources : list of rasterio dataset objects
A list of rasterio dataset objects you wish to stack. Objects
will be stacked in the order provided in this list.
dest : string (optional)
Path to the where the output raster containing the stacked
layers will be stored.
write_raster : bool (default=False)
Boolean to determine whether or not to write out the raster.
Returns
----------
tuple
numpy array
Numpy array generated from the stacked array combining all
bands that were provided in the list.
ret_prof : rasterio profile
Updated rasterio spatial metadata object updated to represent
the number of layers in the stack
"""
try:
for src in sources:
src.profile
except AttributeError:
raise AttributeError("The sources object should be Dataset Reader")
sys.exit()
else:
pass
if write_raster:
for ii, ifile in enumerate(sources):
bands = sources[ii].read()
if bands.ndim != 3:
bands = bands[np.newaxis, ...]
for band in bands:
dest.write(band, ii + 1)
else:
stacked_arr = []
for ii, ifile in enumerate(sources):
bands = sources[ii].read()
if bands.shape[0] == 1:
bands = np.squeeze(bands)
stacked_arr.append(bands)
# Update the profile to have count==number of bands
ret_prof = sources[0].profile.copy()
ret_prof["count"] = len(stacked_arr)
return np.array(stacked_arr), ret_prof
def crop_image(raster, geoms, all_touched=True):
"""Crop a single file using geometry objects.
Parameters
----------
raster : rasterio.io.DatasetReader object
The rasterio object to be cropped.
geoms : geopandas geodataframe or list of polygons
The spatial polygon boundaries in GeoJSON-like dict format
to be used to crop the image. All data outside of the polygon
boundaries will be set to nodata and/or removed from the image.
all_touched : bool (default=True)
Include a pixel in the mask if it touches any of the
shapes. If False, include a pixel only if its center is within one of
the shapes, or if it is selected by Bresenham's line algorithm.
(from rasterio)
Returns
----------
tuple
out_image: cropped numpy array
A numpy array that is cropped to the geoms object
extent with shape (bands, rows, columns)
out_meta: dict
A dictionary containing updated metadata for the cropped raster,
including extent (shape elements) and transform properties.
Example
-------
>>> import geopandas as gpd
>>> import rasterio as rio
>>> import earthpy.spatial as es
>>> from earthpy.io import path_to_example
>>> # Clip an RGB image to the extent of Rocky Mountain National Park
>>> rmnp = gpd.read_file(path_to_example("rmnp.shp"))
>>> with rio.open(path_to_example("rmnp-rgb.tif")) as src_raster:
... cropped_raster, cropped_meta = es.crop_image(src_raster, rmnp)
>>> src_raster.shape
(373, 485)
>>> cropped_raster.shape[1:3]
(265, 281)
"""
if isinstance(geoms, gpd.geodataframe.GeoDataFrame):
clip_extent = [extent_to_json(geoms)]
else:
clip_extent = geoms
out_image, out_transform = mask(
raster, clip_extent, crop=True, all_touched=all_touched
)
out_meta = raster.meta.copy()
out_meta.update(
{
"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform,
}
)
return out_image, out_meta
def crop_all(
raster_paths,
output_dir,
geoms,
overwrite=False,
all_touched=True,
verbose=True,
):
"""Takes a list of rasters and a boundary, and crops them efficiently.
Parameters
----------
raster_paths : list of file paths
List of paths of rasters that will be cropped.
output_dir : string
Provide a single directory path if you wish to specify the
location of the output cropped files only. _crop will be
appended to the file name for each output cropped image.
geoms : geopandas geodataframe or list of polygons
The spatial polygon boundaries in GeoJSON-like dict format
to be used to crop the image. All data outside of the polygon
boundaries will be set to nodata and/or removed from the image.
overwrite : bool (default=False)
Disallows files to be overwritten if they exist already.
Can be changed so that files can be overwritten with each
run of the function. If False, will not overwrite existing
files. If true, existing files will be overwritten.
all_touched : bool (default=True)
Include a pixel in the mask if it touches any of the
shapes. If False, include a pixel only if its center i
s within one of the shapes, or if it is selected by
Bresenham's line algorithm.
(from rasterio)
verbose : bool (default=True)
Returns a list of full file paths created by the function.
If set to false, returns nothing.
Returns
----------
return files : list
List of full file paths created by the function.
Example
-------
>>> import os
>>> import earthpy.spatial as es
>>> import geopandas as gpd
>>> from earthpy.io import path_to_example
>>> band_fnames = ["red.tif", "green.tif", "blue.tif"]
>>> paths = [path_to_example(fname) for fname in band_fnames]
>>> rmnp = gpd.read_file(path_to_example("rmnp.shp"))
>>> out_dir = os.path.commonpath(paths)
>>> output_files = es.crop_all(paths, out_dir, rmnp, overwrite=True)
>>> len(output_files)
3
>>> os.path.isfile(output_files[0])
True
>>> # Clean up example data
>>> for bands in output_files:
... os.remove(bands)
"""
if not os.path.exists(output_dir):
raise ValueError(
"The output directory that you provided does not exist"
)
return_files = []
for i, bands in enumerate(raster_paths):
path_name, extension = bands.rsplit(".", 1)
name = os.path.basename(os.path.normpath(path_name))
outpath = os.path.join(output_dir, name + "_crop." + extension)
return_files.append(outpath)
if os.path.exists(outpath) and not overwrite:
raise ValueError(
"The file {0} already exists. If you wish to overwrite this "
"file, set the overwrite argument to true.".format(outpath)
)
with rio.open(bands) as a_band:
crop, meta = crop_image(a_band, geoms, all_touched=all_touched)
with rio.open(outpath, "w", **meta) as dest:
dest.write(crop)
if verbose:
return return_files
def bytescale(data, high=255, low=0, cmin=None, cmax=None):
"""Byte scales an array (image).
Byte scaling converts the input image to uint8 dtype, and rescales
the data range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
Source code adapted from scipy.misc.bytescale (deprecated in scipy-1.0.0)
Parameters
----------
data : numpy array
image data array.
high : int (default=255)
Scale max value to `high`.
low : int (default=0)
Scale min value to `low`.
cmin : int (optional)
Bias scaling of small values. Default is ``data.min()``.
cmax : int (optional)
Bias scaling of large values. Default is ``data.max()``.
Returns
-------
img_array : uint8 numpy array
The byte-scaled array.
Examples
--------
>>> import numpy as np
>>> from earthpy.spatial import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
"""
if data.dtype == "uint8":
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None or (cmin < data.min()):
cmin = float(data.min())
if (cmax is None) or (cmax > data.max()):
cmax = float(data.max())
# Calculate range of values
crange = cmax - cmin
if crange < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif crange == 0:
raise ValueError(
"`cmax` and `cmin` should not be the same value. Please specify "
"`cmax` > `cmin`"
)
scale = float(high - low) / crange
# If cmax is less than the data max, then this scale parameter will create
# data > 1.0. clip the data to cmax first.
data[data > cmax] = cmax
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype("uint8")
def hillshade(arr, azimuth=30, altitude=30):
"""Create hillshade from a numpy array containing elevation data.
Parameters
----------
arr : numpy array of shape (rows, columns)
Numpy array with elevation values to be used to created hillshade.
azimuth : float (default=30)
The desired azimuth for the hillshade.
altitude : float (default=30)
The desired sun angle altitude for the hillshade.
Returns
-------
numpy array
A numpy array containing hillshade values.
Example
-------
.. plot::
>>> import matplotlib.pyplot as plt
>>> import rasterio as rio
>>> import earthpy.spatial as es
>>> from earthpy.io import path_to_example
>>> with rio.open(path_to_example('rmnp-dem.tif')) as src:
... dem = src.read()
>>> print(dem.shape)
(1, 187, 152)
>>> squeezed_dem = dem.squeeze() # remove first dimension
>>> print(squeezed_dem.shape)
(187, 152)
>>> shade = es.hillshade(squeezed_dem)
>>> plt.imshow(shade, cmap="Greys")
<matplotlib.image.AxesImage object at 0x...>
"""
try:
x, y = np.gradient(arr)
except ValueError:
raise ValueError("Input array should be two-dimensional")
if azimuth <= 360.0:
azimuth = 360.0 - azimuth
azimuthrad = azimuth * np.pi / 180.0
else:
raise ValueError(
"Azimuth value should be less than or equal to 360 degrees"
)
if altitude <= 90.0:
altituderad = altitude * np.pi / 180.0
else:
raise ValueError(
"Altitude value should be less than or equal to 90 degrees"
)
slope = np.pi / 2.0 - np.arctan(np.sqrt(x * x + y * y))
aspect = np.arctan2(-x, y)
shaded = np.sin(altituderad) * np.sin(slope) + np.cos(
altituderad
) * np.cos(slope) * np.cos((azimuthrad - np.pi / 2.0) - aspect)
return 255 * (shaded + 1) / 2
def crs_check(path):
"""Get the CRS of a raster file from a file path.
Parameters
----------
path : string
Path to a raster file in a format that rasterio can read.
Returns
-------
crs : crs object
The CRS object stored in the raster file.
"""
try:
with rio.open(path) as src:
# This section runs when the data is in a hierarchial format
if len(src.subdatasets) > 0:
for data in src.subdatasets:
with rio.open(data) as data_src:
crs = data_src.crs
else:
crs = src.crs
if crs is None:
raise ValueError(
"No CRS found in data. The raster may not have one."
)
else:
return crs
except rio.errors.RasterioIOError:
raise rio.errors.RasterioIOError(
"Oops, your data are not in a format "
"that rasterio can read. Please "
"check the rasterio documentation "
"for accepted file formats and make "
"sure that your data are in raster "
"format..\n"
)
# @deprecate
def stack_raster_tifs(band_paths, out_path, arr_out=True):
"""This function has been deprecated from earthpy.
Please use the stack() function instead.
"""
raise Warning("stack_raster_tifs is deprecated. Use stack(). Exiting...")
sys.exit()
|
#coding=utf-8
"""
@Author: Freshield
@Contact: yangyufresh@163.com
@File: bagOfWords2Vec.py
@Time: 2019-12-12 17:33
@Last_update: 2019-12-12 17:33
@Desc: None
@==============================================@
@ _____ _ _ _ _ @
@ | __|___ ___ ___| |_|_|___| |_| | @
@ | __| _| -_|_ -| | | -_| | . | @
@ |__| |_| |___|___|_|_|_|___|_|___| @
@ Freshield @
@==============================================@
"""
def bagOfWords2Vec(vocabList, inputSet):
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
|
import time
def do_my_sum(xs):
sum = 0
for v in xs:
sum += v
return sum
sz = 10000000 # Lets have 10 million elements in the list
testdata = range(sz)
t0 = time.clock()
my_result = do_my_sum(testdata)
t1= time.clock()
print('my result = {0} (time taken = {1:.4f} seconds)'
.format(my_result, t1-t0))
t2 = time.clock()
their_result = sum(testdata)
t3 = time.clock()
print('their result = {0} (time taken = {1:.4f} seconds)'
.format(their_result, t3-t2))
|
# Copyright 2019 Ondrej Skopek.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Any, Dict, List, Optional, Tuple, Union
import warnings
import matplotlib.pyplot
import torch
from torch import Tensor
from tensorboardX import SummaryWriter
import torchvision
EpochStatsType = Dict[str, float]
class Stats:
def __init__(self,
chkpt_dir: str,
img_dims: Optional[Tuple[int, ...]] = None,
global_step: int = 0,
epoch: int = 0,
train_statistics: bool = False,
show_embeddings: Optional[int] = None):
self._summary_writer = SummaryWriter(chkpt_dir)
self._global_step = global_step
self._epoch = epoch
self._img_dims = img_dims
self.show_embeddings = show_embeddings
self.train_statistics = train_statistics
@property
def img_dims(self) -> Optional[Tuple[int, ...]]:
return self._img_dims
@property
def global_step(self) -> int:
return self._global_step
@global_step.setter
def global_step(self, value: int) -> None:
self._global_step = value
def _step(self, epoch: bool = False) -> int:
if epoch:
return self.epoch
else:
return self.global_step
@property
def epoch(self) -> int:
return self._epoch
@epoch.setter
def epoch(self, value: int) -> None:
self._epoch = value
def add_scalar(self, tag: str, scalar_value: Any, epoch: bool = False) -> None:
self._summary_writer.add_scalar(tag=tag, scalar_value=scalar_value, global_step=self._step(epoch))
@staticmethod
def _show_images(x: Tensor, dims: Tuple[int, ...]) -> Tensor:
return torchvision.utils.make_grid(x.view(*dims))
def add_images(self, tag: str, imgs: Tensor, dims: Optional[Tuple[int, ...]] = None, epoch: bool = False) -> None:
if dims is None:
dims = self._img_dims
if dims is None:
warnings.warn("No img dims specified, will not show reconstructions in TensorBoard.")
return
img_tensor = Stats._show_images(imgs, dims)
self._summary_writer.add_image(tag=tag, img_tensor=img_tensor, global_step=self._step(epoch))
def add_histogram(self, tag: str, values: Tensor, epoch: bool = False) -> None:
self._summary_writer.add_histogram(tag=tag, values=values, global_step=self._step(epoch))
def add_embedding(self, tag: str, mat: Tensor, metadata: List[str], epoch: bool = False) -> None:
self._summary_writer.add_embedding(tag=tag, mat=mat, metadata=metadata, global_step=self._step(epoch))
def add_figure(self, tag: str, figure: matplotlib.pyplot.figure, epoch: bool = False) -> None:
self._summary_writer.add_figure(tag=tag, figure=figure, global_step=self._step(epoch))
def _to_print(stats: Union["BatchStatsFloat", "EpochStats"]) -> EpochStatsType:
return {
"bce": stats.bce,
"kl": stats.kl,
"elbo": stats.elbo,
"ll": 0.0 if stats.log_likelihood is None else stats.log_likelihood,
"mi": 0.0 if stats.mutual_info is None else stats.mutual_info,
"cov_norm": 0.0 if stats.cov_norm is None else stats.cov_norm,
"beta": stats.beta
}
class BatchStatsFloat:
def __init__(self, bce: Tensor, kl: Tensor, elbo: Tensor, log_likelihood: Optional[Tensor],
mutual_info: Optional[Tensor], cov_norm: Optional[Tensor], component_kl: List[Tensor],
beta: float) -> None:
self.bce = bce.item()
self.kl = kl.item()
self.elbo = elbo.item()
self.log_likelihood = None if log_likelihood is None else log_likelihood.item()
self.mutual_info = None if mutual_info is None else mutual_info.item()
self.cov_norm = None if cov_norm is None else cov_norm.item()
self.component_kl = [x.item() for x in component_kl]
self.beta = beta
def summaries(self, stats: Stats, prefix: str = "train/batch") -> None:
stats.add_scalar(prefix + "/bce", self.bce)
stats.add_scalar(prefix + "/kl", self.kl)
stats.add_scalar(prefix + "/elbo", self.elbo)
if self.log_likelihood is not None:
stats.add_scalar(prefix + "/log_likelihood", self.log_likelihood)
if self.mutual_info is not None:
stats.add_scalar(prefix + "/mutual_info", self.mutual_info)
if self.cov_norm is not None:
stats.add_scalar(prefix + "/cov_norm", self.cov_norm)
def to_print(self) -> EpochStatsType:
return _to_print(self)
class BatchStats:
def __init__(self,
bce: Tensor,
component_kl: List[Tensor],
beta: float,
log_likelihood: Optional[Tensor] = None,
mutual_info: Optional[Tensor] = None,
cov_norm: Optional[Tensor] = None) -> None:
self._beta = beta
self._bce = bce
self._component_kl = component_kl
self._component_kl_mean = [x.sum(dim=0) for x in component_kl]
self._log_likelihood = log_likelihood
self._mutual_info = mutual_info
self._cov_norm = cov_norm
self._kl_val = self._kl()
self._elbo_val = self._elbo(beta)
@property
def bce(self) -> Tensor:
return self._bce.sum(dim=0)
@property
def component_kl(self) -> List[Tensor]:
return self._component_kl_mean
@property
def log_likelihood(self) -> Optional[Tensor]:
return None if self._log_likelihood is None else self._log_likelihood.sum(dim=0)
@property
def mutual_info(self) -> Optional[Tensor]:
return None if self._mutual_info is None else self._mutual_info.sum(dim=0)
@property
def cov_norm(self) -> Optional[Tensor]:
return None if self._cov_norm is None else self._cov_norm.sum(dim=0)
@property
def kl(self) -> Tensor:
return self._kl_val.sum(dim=-1)
@property
def elbo(self) -> Tensor:
return self._elbo_val.sum(dim=-1)
@property
def beta(self) -> float:
return self._beta
def _kl(self) -> Tensor:
return torch.sum(torch.cat([x.unsqueeze(dim=-1) for x in self._component_kl], dim=-1), dim=-1)
def _elbo(self, beta: float) -> Tensor:
assert self._bce.shape == self._kl_val.shape
return (-self._bce - beta * self._kl_val).sum(dim=0)
def convert_to_float(self) -> BatchStatsFloat:
return BatchStatsFloat(self.bce,
self.kl,
self.elbo,
self.log_likelihood,
self.mutual_info,
self.cov_norm,
self.component_kl,
beta=self.beta)
class EpochStats:
def __init__(self, bs: List[BatchStatsFloat], length: int) -> None:
assert len(bs) > 0
self.bce = 0.
self.kl = 0.
self.elbo = 0.
self.log_likelihood = 0.
self.mutual_info = 0.
self.cov_norm = 0.
self.component_kl = [0. for _ in bs[0].component_kl]
self.beta = bs[0].beta
assert sum(self.beta == b.beta for b in bs) == len(bs) # Assert all betas in epoch are the same.
for batch in bs:
self.bce += batch.bce
self.kl += batch.kl
self.elbo += batch.elbo
if batch.log_likelihood:
self.log_likelihood += batch.log_likelihood
if batch.mutual_info:
self.mutual_info += batch.mutual_info
if batch.cov_norm:
self.cov_norm += batch.cov_norm
for i in range(len(self.component_kl)):
self.component_kl[i] += batch.component_kl[i]
self.bce /= length
self.kl /= length
self.elbo /= length
self.log_likelihood /= length
self.mutual_info /= length
self.cov_norm /= length
for i in range(len(self.component_kl)):
self.component_kl[i] /= length
def to_print(self) -> EpochStatsType:
return _to_print(self)
def summaries(self, stats: Stats, prefix: str = "train/epoch") -> None:
stats.add_scalar(prefix + "/beta", self.beta, epoch=True)
stats.add_scalar(prefix + "/bce", self.bce, epoch=True)
stats.add_scalar(prefix + "/kl", self.kl, epoch=True)
stats.add_scalar(prefix + "/elbo", self.elbo, epoch=True)
for i in range(len(self.component_kl)):
stats.add_scalar(prefix + f"/kl_comp_{i}", self.component_kl[i], epoch=True)
if self.log_likelihood:
stats.add_scalar(prefix + "/log_likelihood", self.log_likelihood, epoch=True)
if self.mutual_info:
stats.add_scalar(prefix + "/mutual_info", self.mutual_info, epoch=True)
if self.cov_norm:
stats.add_scalar(prefix + "/cov_norm", self.cov_norm, epoch=True)
|
from django.shortcuts import render, redirect
from .models import Task
from django.views.generic import CreateView
from .forms import Create_task
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .serializers import TaskSerializer
from .models import Task
from rest_framework import viewsets, permissions
@login_required(login_url='login/')
def home(request):
if request.method == 'POST':
form = Create_task(request.POST)
# select_task = Select_task(request.POST)
if form.is_valid():
form.instance.user = request.user
form.save()
# select_task.save()
messages.success(request, f"Task added")
return redirect('home')
else:
form = Create_task()
# select_task = Select_task()
context = {
"tasks": Task.objects.all(),
"form": form,
"user": request.user,
# "select_task_form": select_task
}
return render(request, "tasks/home.html", context)
def task_status(request, pk):
task = Task.objects.get(id=pk)
if task.check_done == False:
task.check_done = True
task.save()
return redirect('/')
def update_task_status(request):
print(request.POST.getlist('check_done'))
class TaskViewSet(viewsets.ModelViewSet):
queryset = Task.objects.all()
serializer_class = TaskSerializer
permissions_classes = [permissions.IsAuthenticatedOrReadOnly] |
import re
strings = (
"-a-",
"-b-",
"-x-",
"-aa-",
"-ab-",
"--",
)
for line in strings:
match = re.search(r'-[abc]-', line)
if match:
print(line)
print('=========================')
for line in strings:
match = re.search(r'-[abc]+-', line)
if match:
print(line)
print('=========================')
for line in strings:
match = re.search(r'-[abc]*-', line)
if match:
print(line)
|
## 함수 선언부 ##
## 변수 선언부 ##
money, c500, c100, c50, c10 = [0] * 5 # 돈, 동전 500, 동전 100.....
## 메인 코드부 ##
if __name__ == '__main__' :
money = int(input('바꿀 돈 -->'))
c500 = money // 500; money %= 500
c100 = money // 100; money %= 100
c50 = money // 50; money %= 50
c10 = money // 10; money %= 10
print('500원:',c500,', 100원:',c100,', 50원:',c50,', 10원',c10,', 나머지:', money)
|
from django.shortcuts import render, redirect, HttpResponseRedirect
from .models import User, Item
from django.contrib import messages
from django.contrib.auth import logout
def index(request):
return render(request, 'exam/index.html')
def register(request):
viewsResponse = User.objects.add_user(request.POST)
if viewsResponse['isRegistered']:
request.session['user_id'] = viewsResponse['user'].id
request.session['name'] = viewsResponse['user'].name
request.session['username'] = viewsResponse['user'].username
request.session['date_hired'] = viewsResponse['user'].date_hired
return redirect('exam:dashboard')
else:
for error in viewsResponse['errors']:
messages.error(request, error)
return redirect('exam:index')
def dashboard(request):
user = request.session['user_id']
context = {
"my_items" : Item.objects.filter(user = user)|Item.objects.filter(added_by = user),
"others_items" : Item.objects.exclude(user = user).exclude(added_by = user)
}
print context
print Item.objects.all()
print User.objects.all()
return render(request, 'exam/dashboard.html', context)
def login(request):
viewsResponse = User.objects.login_user(request.POST)
print viewsResponse
if viewsResponse['isLoggedIn']:
request.session['user_id'] = viewsResponse['user'].id
request.session['username'] = viewsResponse['user'].username
return redirect('exam:dashboard')
else:
for error in viewsResponse['errors']:
messages.error(request, error)
return redirect('exam:index')
def create(request):
print id
return render(request, 'exam/create.html')
def create_item(request):
print "*" * 50
print request.POST
user = request.session['user_id']
viewsResponse = Item.objects.add_item(request.POST, user)
print viewsResponse['item'].item_name
print viewsResponse['item'].added_by
print viewsResponse['item'].date_added
print "&" * 50
if viewsResponse['itemExists']:
print "itemexists"
# request.session['item_name'] = viewsResponse['item'].item_name
# request.session['added_by'] = viewsResponse['item'].added_by
# request.session['date_added'] = viewsResponse['item'].date_added
return redirect('exam:dashboard')
else:
print "got errors"
for error in viewsResponse['errors']:
messages.error(request, error)
return HttpResponseRedirect('exam/dashboard.html')
def wish_items(request, id):
user = id
print request.POST
item = Item.objects.get(id = user)
print item
users = item.user.all()
print "*" * 50
print item.added_by.username
print "*" * 50
context = {
'created_by' : item.added_by,
'item': Item.objects.get(id = user),
'users' : item.user.all()
}
print "*" * 50
print context['item']
print context['users']
return render(request, 'exam/wish_items.html', context)
def add_my_item(request, id):
print id
user = User.objects.get(id = request.session['user_id'])
item = Item.objects.get(id = id)
print item.user.count()
item.user.add(user)
print item.user.count()
print "&" * 50
return redirect('exam:dashboard')
def remove_item(request, id):
print id
print request.POST
user = User.objects.get(id = request.session['user_id'])
print user
item = Item.objects.get(id = id)
print item
item.delete()
return redirect('exam:dashboard')
def logout(request):
request.session.clear()
return redirect('exam:index')
|
from flask import Flask, request, render_template
import pandas as pd
import numpy as np
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
test=pd.read_csv("insomniaa test.csv",error_bad_lines=False)
x_test=test.drop('insomnia',axis=1)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST','GET'])
def predict():
if request.method=='POST':
col=x_test.columns
inputt = [str(x) for x in request.form.values()]
b=[0]*4
for x in range(0,4):
for y in inputt:
if(col[x]==y):
b[x]=1
b=np.array(b)
b=b.reshape(1,4)
prediction = model.predict(b)
prediction=prediction[0]
return render_template('index.html', pred="Your insomnia level could be {}".format(prediction))
if __name__ == "__main__":
app.run(debug=True)
|
import sys
input = sys.stdin.readline
N, M = map(int, input().split())
count = dict()
for _ in range(N):
word = input().rstrip()
if len(word) < M:
continue
if word in count:
count[word] += 1
else:
count[word] = 1
words = list(count.keys())
words.sort()
words.sort(key=len, reverse=True)
words.sort(key=count.get, reverse=True)
print("\n".join(words)) |
"""
Zemberek: Turkish Tokenization Example
Java Code Example: https://bit.ly/2PsLOkj
"""
from jpype import JClass, JString
TurkishTokenizer: JClass = JClass('zemberek.tokenization.TurkishTokenizer')
TokenIterator: JClass = JClass(
'zemberek.tokenization.TurkishTokenizer.TokenIterator'
)
Token: JClass = JClass('zemberek.tokenization.Token')
def run(sentence: str) -> None:
"""
Turkish sentence tokenization example.
Args:
sentence (str): Sentence to tokenize.
"""
tokenizer: TurkishTokenizer = TurkishTokenizer.DEFAULT
print('\nToken Iterator Example:\n')
token_iterator: TokenIterator = tokenizer.getTokenIterator(
JString(sentence)
)
for token in token_iterator:
print(
f'Token = {token}'
f'\n | Content = {token.content}'
f'\n | Normalized = {token.normalized}'
f'\n | Type = {token.type}'
f'\n | Start = {token.start}'
f'\n | End = {token.end}\n'
)
print('Default Tokenization Example:\n')
tokenizer: TurkishTokenizer = TurkishTokenizer.DEFAULT
for i, token in enumerate(tokenizer.tokenizeToStrings(JString(sentence))):
print(f' | Token String {i} = {token}')
print('\nCustom Tokenization With Ignored Types Example:\n')
tokenizer: TurkishTokenizer = (
TurkishTokenizer.builder()
.ignoreTypes(
Token.Type.Punctuation, Token.Type.NewLine, Token.Type.SpaceTab
)
.build()
)
for i, token in enumerate(tokenizer.tokenize(JString(sentence))):
print(f' | Token {i} = {token}')
|
import re #for get_application_base_url
from google.appengine.ext import db #for does_tweet_exist
from storage import TWEETS
# INPUT :: http://www.sample.co.uk:8080/folder/file.htm
# OUTPUT :: http://www.sample.co.uk:8080
def get_application_base_url(current_url):
regex = '(https?://[-\w\.]+(:[0-9]{4})?)'
match = re.search(regex, current_url)
if match:
return match.group(1)
else:
return None
def does_tweet_exist(tweet_id):
# make a fast search by key to check if the specified tweet already partially exists in the db
tweets = db.GqlQuery("SELECT __key__ FROM TWEETS WHERE tweet_id =:tw_id LIMIT 1", tw_id = tweet_id)
if tweets.count() >= 1:
return True
return False
def number_of_tweets_to_request_per_cycle():
return 20 |
# Author: Shubham Waghe
# Roll No: 13MF3IM17
# Description: WSD-II Assignment-1
import numpy as np
import matplotlib.pyplot as plt
from random import gauss
import Tkinter as tk
import math
# Given data
READINGS_EACH_DAY = 400
desired_limit = 0.05
p = 0.2
RANGE_VALUE = 3
PLOT_RANGE = 2*p
#Stopping criteria
NO_OF_DAYS = 100
GLOBAL_OBSERVATIONS = []
# Function to calculate sigma
def get_sigma(p):
sigma = math.sqrt( (p*(1-p)/READINGS_EACH_DAY ))
return sigma
def get_number_of_days(p):
n = get_n(p)
return roundup(n)/READINGS_EACH_DAY
def roundup(x):
return int(math.ceil(x / READINGS_EACH_DAY*1.0)) * READINGS_EACH_DAY
def get_l(p):
l = p*desired_limit
return l
def get_n(p):
l = get_l(p)
n = (3.84*p*(1-p))/(l**2)
return n
def get_accuracy(p, n):
x = (3.84*p*(1-p))/(n)
return math.sqrt(x)
days_plotted = 0
plotting_done = False
#Final plot
plt.title("P - Chart :: Shubham Waghe")
plt.xlabel("Mean (p)")
plt.ylabel("Days")
while days_plotted <= NO_OF_DAYS:
sigma, num_days = get_sigma(p), get_number_of_days(p)
print "***********************************************************************"
print "p = ",p, " n = ", get_n(p), " sigma = ", sigma, " Number of days: ", num_days
# directly taking x<bar> mu and variance/n
random_numbers = []
for i in xrange(num_days):
observations = [gauss(0.2, 0.02) for i in range(NO_OF_DAYS)]
random_numbers.append(np.mean(observations))
# print random_numbers
N = len(GLOBAL_OBSERVATIONS)
t_observations = N + len(random_numbers)
axes = plt.axis([0, t_observations + 1, 0, PLOT_RANGE])
ucl, lcl = p + RANGE_VALUE*sigma, p - RANGE_VALUE*sigma
print "UCL: ", ucl, " LCL: ", lcl
print "***********************************************************************"
m_line = plt.axhline(y = p, color='b', linestyle = None)
ucl_line = plt.axhline(y = ucl, color='r', linestyle = '--')
lcl_line = plt.axhline(y = lcl, color='r', linestyle = '--')
offset_length = days_plotted
for i in range(len(random_numbers)):
c_pt = plt.scatter(offset_length+i+1, random_numbers[i])
GLOBAL_OBSERVATIONS.append(random_numbers[i])
days_plotted += 1
if random_numbers[i] > ucl or random_numbers[i] < lcl:
plt.pause(0.05)
print
print "###################################################"
print "Obeservation - Out of Control: ", random_numbers[i]
print "###################################################"
p = np.mean(GLOBAL_OBSERVATIONS)
break
if days_plotted == NO_OF_DAYS:
plotting_done = True
break
plt.pause(0.05)
if plotting_done: break
else:
axes = plt.axis([0, t_observations + 1, 0, PLOT_RANGE])
m_line.remove()
ucl_line.remove()
lcl_line.remove()
print
root = tk.Tk()
root.geometry("300x200+100+100")
root.title("Final Results")
revised_p = np.mean(GLOBAL_OBSERVATIONS)
accuracy = get_accuracy(revised_p, get_n(revised_p))
p_accuracy = "{:.2f}".format(accuracy*100)
heading_label = tk.Label(root, text="Results", height=2, width=100, font=("Helvetica", 14)).pack()
p_label = tk.Label(root, text="Revised p : {:.6f}".format(revised_p), height=0, width=100).pack()
accuracy_label = tk.Label(root, text="Accuracy : {0}".format(p_accuracy + " %"), height=0, width=100).pack()
b = tk.Button(root, text="Close", command=exit).pack(padx = 10, pady = 20)
root.mainloop()
while True:
plt.pause(0.05) |
'''
5125->1259. Handshakes That Don't Cross
Difficulty: Hard
You are given an even number of people num_people that stand around a circle and each person shakes hands with someone else,
so that there are num_people / 2 handshakes total.
Return the number of ways these handshakes could occur such that none of the handshakes cross.
Since this number could be very big, return the answer mod 10^9 + 7
Example 1:
Input: num_people = 2
Output: 1
Example 2:
Input: num_people = 4
Output: 2
Explanation:
There are two ways to do it, the first way is [(1,2),(3,4)] and the second one is [(2,3),(4,1)].
Example 3:
Input: num_people = 6
Output: 5
Example 4:
Input: num_people = 8
Output: 14
Constraints:
2 <= num_people <= 1000
num_people % 2 == 0
''' |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
"""DOCS HERE.
"""
# Copyright (C) 2010
# $Id: scene_object_bounding_box.py 1916 2006-11-21 17:21:05Z $
__version__ = "$Revision: 1916 $"[11:-2]
# numpy lib
import numpy as np
# FrustumTracer CORE Classes
from scene_element import *
from scene_element_point import SceneElementPoint
from scene_object import SceneObject
# --------------------------------------------------------------------------------------------------
class SceneObjectBoundingBox( SceneObject ):
object_type = "BoundingBox"
min_x = None
max_x = None
min_y = None
max_y = None
min_z = None
max_z = None
def __init__(self, object_name, source_target_objects):
SceneObject.__init__(self, object_name)
# if we have just an object in input and not a list of objects, create anyway a list
target_objects = source_target_objects if isinstance(source_target_objects, list) else [source_target_objects]
for target_object in target_objects:
for element in target_object:
for vertex in element:
if self.min_x == None or self.min_x > vertex[0]: self.min_x = vertex[0]
if self.max_x == None or self.max_x < vertex[0]: self.max_x = vertex[0]
if self.min_y == None or self.min_y > vertex[1]: self.min_y = vertex[1]
if self.max_y == None or self.max_y < vertex[1]: self.max_y = vertex[1]
if self.min_z == None or self.min_z > vertex[2]: self.min_z = vertex[2]
if self.max_z == None or self.max_z < vertex[2]: self.max_z = vertex[2]
# ADD bounding Box Points
self.addElement( SceneElementPoint( object_name + "_point_0", [self.min_x, self.min_y, self.min_z] ) )
self.addElement( SceneElementPoint( object_name + "_point_1", [self.min_x, self.min_y, self.max_z] ) )
self.addElement( SceneElementPoint( object_name + "_point_2", [self.min_x, self.max_y, self.max_z] ) )
self.addElement( SceneElementPoint( object_name + "_point_3", [self.min_x, self.max_y, self.min_z] ) )
self.addElement( SceneElementPoint( object_name + "_point_4", [self.max_x, self.min_y, self.min_z] ) )
self.addElement( SceneElementPoint( object_name + "_point_5", [self.max_x, self.min_y, self.max_z] ) )
self.addElement( SceneElementPoint( object_name + "_point_6", [self.max_x, self.max_y, self.max_z] ) )
self.addElement( SceneElementPoint( object_name + "_point_7", [self.max_x, self.max_y, self.min_z] ) )
# set rendering properties
for element in self:
element.element_color = VBase4(0,1,0,1)
# --------------------------------------------------------------------------------------------------
|
import unittest
from api.sources.cptec.getter.cptec_api_getter import CptecAPIGetter
class CptecAPIGetterTest(unittest.TestCase):
def setUp(self):
self.__cptec_api_getter = CptecAPIGetter(-22.87216997446473, -48.44871995614285) # Botucatu - SP
def test_retrieving_data_from_api(self):
self.assertIsNotNone(self.__cptec_api_getter.make_request())
if __name__ == '__main__':
unittest.main()
|
import sys
def dfs(graph, vertex, visited):
visited[vertex] = True
global result
result += 1
for around_vertex in graph[vertex]:
if not visited[around_vertex]:
dfs(graph, around_vertex, visited)
node = int(input())
graph = [[] for _ in range(node + 1)]
edge = int(input())
for _ in range(edge):
node1, node2 = map(int, sys.stdin.readline().split())
graph[node1].append(node2)
graph[node2].append(node1)
visited = [False] * (node + 1)
result = -1
dfs(graph, 1, visited)
print(result) |
#dayNum.py
#program that accepts a date as month/day/year, verifies that it is a valid date, and then calculates the corresponding day number.
def main():
month,day,year = input("Enter date in format (mm/dd/yyyy)").split('/')
month = int(month)
day = int(day)
year = int(year)
dayNum = 31*(month-1) + day
if month > 2:
dayNum -= (4*month + 23)//10
if (year % 4 == 0 or year % 100 == 0) and not(year % 400 == 0) and month > 2:
dayNum += 1
print("The day number is", dayNum)
main()
|
#
# Working with files
#
def main():
# Open a file for writing and create it if it does not exists
# f = open("textfile.txt", "w+") # w = write, + = create if not exists
# Open a file for appending text at the end
#f = open("textfile.txt", "a") # a = append
# Write lines of data to a file
# for i in range(10):
# f.write("Line Number " + str(i) + "\r\n")
# Close the file
# f.close()
# Open file to read contents
f = open("textfile.txt", "r") # r = read
if f.mode == 'r':
# contents = f.read() # Reads everything
# print(contents)
# fl = f.readlines()
# for l in fl:
# print(l)
fl = f.readline(20) # Parameter is the max number of characters to read from the line
while(fl != ""):
print(fl)
fl = f.readline(20)
if __name__ == "__main__":
main() |
from time import time, localtime, sleep
class Clock(object):
"""数字时钟"""
def __init__(self, hour=0, minute=0, second=0):
self._hour = hour
self._minute = minute
self._second = second
@classmethod
def now(cls):
ctime = localtime(time())
return cls(ctime.tm_hour, ctime.tm_min, ctime.tm_sec)
def run(self):
"""走字"""
self._second += 1
if self._second == 60:
self._second = 0
self._minute += 1
if self._minute == 60:
self._minute = 0
self._hour += 1
if self._hour == 24:
self._hour = 0
def show(self):
"""显示时间"""
return '%02d:%02d:%02d' % \
(self._hour, self._minute, self._second)
def main():
"""通过类方法创建对象获取系统时间"""
clock = Clock.now()
while True:
print(clock.show())
sleep(1)
clock.run()
if __name__ == '__main__':
main()
|
from threading import Thread
from server_socket import ServerSocket
from socket_wrapper import SocketWrapper
from response_protocol import *
from db import DB
from config import *
class Server(object):
"""服务器"""
def __init__(self):
# 初始化套接字
self.server_socket = ServerSocket()
# 保存客户端连接套接字
self.clients = dict()
# 请求处理方法
self.request_handle_functions = dict()
# 注册请求处理方法
self.register(REQUEST_LOGIN,
lambda sf, data: self.request_login_handle(sf, data))
self.register(REQUEST_CHAT,
lambda sf, data: self.request_chat_handle(sf, data))
def register(self, request_id, handle_function):
"""注册请求处方法"""
self.request_handle_functions[request_id] = handle_function
def startup(self):
"""启动服务器"""
while True:
# 等待客户端连接
sock, addr = self.server_socket.accept()
# 给客户端sock 增加额外功能
client_sock = SocketWrapper(sock)
# 启动线程处理该用户请求
Thread(target=lambda: self.request_handle(client_sock)).start()
def request_handle(self, client_sock):
"""响应处理方法"""
while True:
# 1.读取客户端数据
request_text = client_sock.recv_data()
if not request_text:
print("客户端下线!")
self.remove_offline_user(client_sock)
break
# 2.解析请求数据
request_data = self.parse_request_text(request_text)
# 3.获取响应处理方法
handle_function = self.request_handle_functions[request_data['request_id']]
# 4.执行响应处理方法
if handle_function:
handle_function(client_sock, request_data)
def remove_offline_user(self, client_sock):
"""移除离线用户连接"""
username = None
for uname, csock in self.clients.items():
if csock['sock'].sock == client_sock.sock:
username = uname
# 删除用户信息
del self.clients[username]
@staticmethod
def parse_request_text(request_text):
"""解析请求信息"""
request_text_list = request_text.split(DELIMITER)
# 保存请求数据
request_data = dict()
request_data['request_id'] = request_text_list[0]
if request_text_list[0] == REQUEST_LOGIN:
request_data['username'] = request_text_list[1]
request_data['password'] = request_text_list[2]
if request_text_list[0] == REQUEST_CHAT:
request_data['username'] = request_text_list[1]
request_data['password'] = request_text_list[2]
return request_data
def request_login_handle(self, client_sock, request_data):
"""处理用户登录"""
# 1.获得登录用户名和密码
username = request_data['username']
password = request_data['password']
# 2.验证用户是否合法
ret, nickname, username = self.check_user_login(username, password)
if ret == '1':
self.clients[username] = {'sock': client_sock, 'nickname': nickname}
# 4.组装响应结果
response_text = ResponseProtocol.response_login_result(ret, nickname, username)
# 发送响应结果
client_sock.send_data(response_text)
def check_user_login(self, username, password):
"""用户名和密码验证"""
# 查询sql
sql = "select * from users where user_name='" + username + "'"
# 创建数据库连接对象
db_conn = DB()
results = db_conn.get_one(sql)
# 未查到数据
if not results:
return "0", "", username
# 用户名和密码不相等
if results['user_password'] != password:
return "0", "", username
return "|", results["user_nickname"], username
def request_chat_handle(self, client_sock, request_data):
"""处理用户聊天"""
# 1.获得当前用户名、发送信息、昵称
username = request_data['username']
messages = request_data['messages']
nickname = self.clients[username]['nickname']
# 创建聊天响应信息
response_text =ResponseProtocol.response_chat(nickname, messages)
# 2.将信息发送到每一个登录客户
for uname, csock in self.clients.items():
# 不发给自己
if uname == username:
continue
# 给其他用户发送信息
csock['sock'].send_data(response_text)
if __name__ == '__main__':
server = Server()
server.startup()
|
import numpy as np
from optimization import kernel_optim
def get_representations(types=[]):
representations = []
if types == []:
representations.extend(['STRF', 'FFT', 'STFT', 'CQT'])
return ['STRF', 'FFT', 'STFT', 'CQT']
if 'STRF' in types:
representations.append('STRF')
else:
raise ValueError('Not implemented')
return representations
def get_features(representation_name='STRF'):
if representation_name == 'STRF':
data = np.array([
np.loadtxt('../tmpdata/data_reduced_sound%02i.txt' %
(sound_i + 1)).flatten() for sound_i in range(16)
]).T
return data
def test_all():
dissimilarities = np.loadtxt('../tmpdata/dissimilarity_matrix.txt')
representations = get_representations(types=['STRF'])
for rep_i, rep_name in enumerate(representations):
features = get_features(rep_name)
correlations = kernel_optim(features, dissimilarities, num_loops=1000)
if __name__ == "__main__":
test_all() |
"""
# Definition for a Node.
class Node:
def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):
self.val = int(x)
self.next = next
self.random = random
"""
# Time: O(N), Space: O(N)
class Solution:
def copyRandomList(self, head: 'Node') -> 'Node':
ans_head = prev = Node(-1, None, None)
original = head
nodes = {}
while original:
if original.random is None:
rand = None
elif original.random in nodes:
rand = nodes[original.random]
else:
rand = Node(original.random.val, None, None)
nodes[original.random] = rand
if original in nodes:
prev.next = nodes[original]
prev.next.random = rand
else:
prev.next = Node(original.val, None, rand)
nodes[original] = prev.next
prev, original = prev.next, original.next
return ans_head.next |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @date : 2019/5/10
# @author : Rong Ya-feng
# @desc : Chapter 1 : Programming Exercise1
# Case Study:Display the current time(GMT)
"""
import time
currentTime = time.time()
# obtain the total seconds since midnight, Jan 1, 1970
totalSeconds = int(currentTime)
currentSecond = totalSeconds % 60
totalMinutes = totalSeconds // 60
currentMinute = totalMinutes % 60
totalHours = totalMinutes // 60
currentHour = totalHours % 24
print("Current time is", currentHour, ":", currentMinute, ":", currentSecond, "GMT")
"""
# Case Study:Computing distance
'''
# Prompt the user for inputting two points
x1, y1 = eval(input("Enter x1 and y1 for point 1: "))
x2, y2 = eval(input("Enter x2 and y2 for point 2: "))
distance = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
print("The distance between the two points is", distance)
'''
# Convert Celsius to Fahrenheit
'''
celsius = eval(input("Input a degree in Celsius:"))
fahrenheit = (9 / 5) * celsius + 32
print(celsius, "Celsius is", fahrenheit, "Fahrenheit")
'''
# Compute the volume of a cylinder
'''
radius, length = eval(input("Enter the radius and length of a cylinder:"))
PI = 3.1415926
area = radius * radius * PI
volume = area * length
print("The area is", area)
print("The volume is", volume)
'''
# Convert feet into meters# Financial application : calculate tips
'''
feet = eval(input("Enter a value for feet:"))
meters = feet * 0.305
print(feet, "feet is", meters, "meters")
'''
# Convert pounds into kilograms
'''
pounds = eval(input("Enter a value in pounds:"))
kilograms = pounds * 0.454
print(pounds, "pounds is", kilograms, "kilograms")
'''
# Calculate tips
'''
subtotal, gratuityRate = eval(input("Enter the subtotal and a gratuity rate:"))
gratuity = subtotal * gratuityRate
total = subtotal + gratuity
print("The gratuity is",gratuity, "and the total is ", total)
'''
# Sum the digits in an integer
'''
digits = eval(input("Enter a number between 0 and 1000:"))
singleDigits = digits % 10
hundredsDigit = digits // 100
tenDigit = (digits // 10) - (hundredsDigit * 10)
sum = hundredsDigit + tenDigit + singleDigits
print("The sum of the digits is",sum)
'''
# Find the number of years and days
'''
minutes = eval(input("Enter the number of minutes:"))
years = minutes // 525600
resMinutes = minutes % 525600
days = resMinutes // 1440
print(minutes, "minutes is approximately", years, "years and",days, "days" )
'''
# Calculate energy
'''
waterAmount = eval(input("Enter the amount of water in kilograms:"))
initialTemperature = eval(input("Enter the initial temperature:"))
finalTemperature = eval(input("Enter the al temperature:"))
energy = waterAmount * (finalTemperature - initialTemperature) * 4184
print("The energy needed is", energy)
'''
# Wind chill temperature
'''
outsideTemperature = eval(input("Enter the temperature in Fahrenheit between -58 and 41:"))
windSpeed = eval(input("Enter the wind speed in miles per hour:"))
windChill = 35.74 + 0.6215 * outsideTemperature - 35.75 * windSpeed ** 0.16 + \
0.4275 * outsideTemperature * windSpeed ** 0.16
print("The wind chill index is", windChill)
'''
# Find runway length
'''
speed, acceleration = eval(input("Enter speed and acceleration:"))
runwayLength = speed**2 / 2 * acceleration
print("The mimium runway length for this plane is", runwayLength, "meters")
'''
# Print a table
'''
print("a b a ** b")
x = 1
for i in range(5):
print(x, " ", x+1, " ", x ** (x + 1))
'''
# Split digits
'''
digit = eval(input("Enter an integer:"))
a3 = digit // 1000
a2 = digit % 1000 // 100
a1 = digit % 1000 % 100 // 10
a0 = digit % 1000 % 100 % 10
print(a3, a2, a1, a0)
'''
# Area of a triangle
'''
x1, y1, x2, y2, x3, y3 = eval(input("Enter three points of a triangle:"))
side1 = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
side2 = ((x2 - x3) ** 2 + (y2 - y3) ** 2) ** 0.5
side3 = ((x1 - x3) ** 2 + (y1 - y3) ** 2) ** 0.5
s = (side1 + side2 + side3) / 2
area = (s * (s - side1) * (s - side2) * (s - side3)) ** 0.5
print("The area of the triangle is", area)
'''
# Area of a hexagon
'''
side = eval(input("Enter the side:"))
area = (3 * 3 ** 0.5 * side ** 2 ) / 2
print("The area of the hexagon is", area)
''' |
# -*- coding: utf-8 -*-
from django.test import (
Client,
TestCase,
)
from service_area_mozio.shapes.models import (
Polygon,
Vertex,
)
class PolygonTestCase(TestCase):
def setUp(self):
"""Create basic instances"""
line = Polygon.objects.create(name='line')
Vertex.objects.create(lat=1, lon=1, polygon=line)
Vertex.objects.create(lat=2, lon=2, polygon=line)
triangle = Polygon.objects.create(name='triangle')
Vertex.objects.create(lat=1, lon=1, polygon=triangle)
Vertex.objects.create(lat=2, lon=2, polygon=triangle)
Vertex.objects.create(lat=3, lon=3, polygon=triangle)
def test_managers(self):
"""Test the default and validating managers"""
# test using the default Manager to get invalid polygons
qs_everything = Polygon.everything.all()
self.assertEqual(qs_everything.count(), 2)
# test the validating manager too
qs_valid = Polygon.objects.all()
self.assertEqual(qs_valid.count(), 1)
def test_sides_property(self):
"""Test the sides property"""
line = Polygon.everything.get(name='line')
triangle = Polygon.everything.get(name='triangle')
self.assertLess(line.sides, 3)
self.assertGreaterEqual(triangle.sides, 3)
def test_polygon_view_list(self):
"""Test the list polygon view"""
client = Client()
response = client.get('/shapes/')
qs = Polygon.objects.all()
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context['object_list']), list(qs))
def test_polygon_view_create(self):
"""Test the create polygon view"""
pre_count = Polygon.objects.all().count()
client = Client()
response = client.post(
'/shapes/create/',
{
'name': 'test',
'vertices[]': (
'10.10, -100.100',
'20.20, -200.200',
'30.30, -300.300',
)
}
)
post_count = Polygon.objects.all().count()
self.assertGreater(post_count, pre_count)
def test_polygon_view_delete(self):
"""Test the delete polygon view"""
triangle = Polygon.objects.create(name='triangle2')
Vertex.objects.create(lat=1, lon=1, polygon=triangle)
Vertex.objects.create(lat=2, lon=2, polygon=triangle)
Vertex.objects.create(lat=3, lon=3, polygon=triangle)
pre_count = Polygon.objects.all().count()
client = Client()
response = client.post(
'/shapes/delete/%s' % triangle.id,
{
'submit': 'delete',
}
)
post_count = Polygon.objects.all().count()
self.assertLess(post_count, pre_count)
|
import win32api
import cv2
import time
import os
import enum
from .CBotDebugState import CBotDebugState
from .extractGameMap import extractGameMap
from .CNavigator import CNavigator
from CMinimapRecognizer import CMinimapRecognizer
class BotState(enum.Enum):
IDLE = 0
MOVING = 1
BATTLE = 2
class CBot:
def __init__(self, logger, navigator=None):
self.logger = logger
self._minimapRecognizer = CMinimapRecognizer()
self.navigator = navigator if navigator else CNavigator()
self.state = BotState.IDLE
self._lastDump = 0
pass
def isActive(self):
return (win32api.GetAsyncKeyState(ord('Q')) & 1) == 0
def process(self, screenshot):
debug = CBotDebugState(screenshot, self.logger)
if (win32api.GetAsyncKeyState(ord('A')) & 1) == 1:
self.saveScreenshot(screenshot)
if BotState.IDLE == self.state:
return self._onIdle(screenshot, debug)
if BotState.MOVING == self.state:
return self._onMoving(screenshot, debug)
return ([], debug)
def _onIdle(self, screenshot, debug):
mapImg = extractGameMap(screenshot)
mapMaskWalls, mapMaskUnknown = self._minimapRecognizer.process(mapImg)
debug.put('map mask walls', mapMaskWalls)
debug.put('map mask unknown', mapMaskUnknown)
# temporally code for collecting data
T = int(time.time())
if 2 < (T - self._lastDump): # every 2 sec
os.makedirs("minimap", exist_ok=True)
fname = 'minimap/%d_%%s.jpg' % T
cv2.imwrite(fname % 'input', mapImg)
cv2.imwrite(fname % 'walls', mapMaskWalls)
cv2.imwrite(fname % 'unknown', mapMaskUnknown)
self._lastDump = T
# self.navigator.update(mapMask)
# TODO: return action for exploring map
return ([], debug)
def _onMoving(self, screenshot, debug):
# TODO: Find a way to detect when moving is ended
return ([], debug)
def saveScreenshot(self, screenshot):
os.makedirs("screenshots", exist_ok=True)
fname = 'screenshots/%d.jpg' % time.time_ns()
cv2.imwrite(fname, screenshot)
self.logger.info('Screenshot saved to %s' % fname)
return |
has_player = False
def vis_print(*kwargs, end="\n"):
if not has_player:
print(*kwargs, end=end)
# ABSTRACT DECISION CLASS
class Decision:
"""DO NOT INSTANTIATE!"""
def __init__(self, agent):
global has_player
self.agent = agent
if agent.get_name() == "Player":
has_player = True
# UPGRADE DECISIONS
class UpgradeDecision(Decision):
"""DO NOT INSTANTIATE!"""
def to_building(self):
raise NotImplementedError()
def execute(self):
raise NotImplementedError()
class UpgradeBarracksDecision(UpgradeDecision):
def to_building(self):
return self.agent.get_barracks()
def execute(self):
vis_print(f"{self.agent.get_name()} has upgraded the Barracks.")
vis_print()
if self.agent.ui is not None:
if self.agent.village.barracks.get_level() > 1:
self.agent.ui.append_message(f"Upgraded Barracks.")
else:
self.agent.ui.append_message(f"Built Barracks.")
self.agent.add_decision(self)
return self.agent.upgrade_barracks()
class UpgradeFarmDecision(UpgradeDecision):
def to_building(self):
return self.agent.get_farm()
def execute(self):
vis_print(f"{self.agent.get_name()} has upgraded the Farm.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Upgraded Farm.")
self.agent.add_decision(self)
return self.agent.upgrade_farm()
class UpgradeMineDecision(UpgradeDecision):
def to_building(self):
return self.agent.get_mine()
def execute(self):
vis_print(f"{self.agent.get_name()} has upgraded the Mine.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Upgraded Mine.")
self.agent.add_decision(self)
return self.agent.upgrade_mine()
class UpgradeQuarryDecision(UpgradeDecision):
def to_building(self):
return self.agent.get_quarry()
def execute(self):
vis_print(f"{self.agent.get_name()} has upgraded the Quarry.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Upgraded Quarry.")
self.agent.add_decision(self)
return self.agent.upgrade_quarry()
class UpgradeSawmillDecision(UpgradeDecision):
def to_building(self):
return self.agent.get_sawmill()
def execute(self):
vis_print(f"{self.agent.get_name()} has upgraded the Sawmill.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Upgraded Sawmill.")
self.agent.add_decision(self)
return self.agent.upgrade_sawmill()
class UpgradeWallDecision(UpgradeDecision):
def to_building(self):
return self.agent.get_wall()
def execute(self):
vis_print(f"{self.agent.get_name()} has upgraded the Wall.")
vis_print()
if self.agent.ui is not None:
if self.agent.village.wall.get_level() > 1:
self.agent.ui.append_message(f"Upgraded Wall.")
else:
self.agent.ui.append_message(f"Built Wall.")
self.agent.add_decision(self)
return self.agent.upgrade_wall()
class UpgradeWarehouseDecision(UpgradeDecision):
def to_building(self):
return self.agent.get_warehouse()
def execute(self):
vis_print(f"{self.agent.get_name()} has upgraded the Warehouse.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Upgraded Warehouse.")
self.agent.add_decision(self)
return self.agent.upgrade_warehouse()
class UpgradeNothingDecision(UpgradeDecision):
def to_building(self):
return None
def execute(self):
vis_print(f"{self.agent.get_name()} has upgraded nothing.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Upgraded nothing.")
self.agent.add_decision(self)
return self.agent.upgrade_nothing()
# RECRUIT DECISIONS
class RecruitDecision(Decision):
"""DO NOT INSTANTIATE!"""
def __init__(self, agent, n="1"):
super().__init__(agent)
n = self.check(n)
self.n = n
def execute(self, n):
n = self.check(n)
if n > self.n:
raise InvalidDecisionException()
@staticmethod
def check(n):
if not (isinstance(n, int) or n.isdigit()):
raise InvalidDecisionException()
n = int(n)
if n < 1:
raise InvalidDecisionException()
return n
class RecruitSpiesDecision(RecruitDecision):
def execute(self, n):
super().execute(n)
self.n = n
vis_print(f"{self.agent.get_name()} has recruited {n} Spies.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Recruited {n} Spies.")
self.agent.add_decision(self)
return self.agent.recruit_spies(n)
class RecruitWarriorsDecision(RecruitDecision):
def execute(self, n):
super().execute(n)
self.n = n
vis_print(f"{self.agent.get_name()} has recruited {n} Warriors.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Recruited {n} Warriors.")
self.agent.add_decision(self)
return self.agent.recruit_warriors(n)
class RecruitArchersDecision(RecruitDecision):
def execute(self, n):
super().execute(n)
self.n = n
vis_print(f"{self.agent.get_name()} has recruited {n} Archers.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Recruited {n} Archers.")
self.agent.add_decision(self)
return self.agent.recruit_archers(n)
class RecruitCatapultsDecision(RecruitDecision):
def execute(self, n):
super().execute(n)
self.n = n
vis_print(f"{self.agent.get_name()} has recruited {n} Catapults.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Recruited {n} Catapults.")
self.agent.add_decision(self)
return self.agent.recruit_catapults(n)
class RecruitCavalrymenDecision(RecruitDecision):
def execute(self, n):
super().execute(n)
self.n = n
vis_print(f"{self.agent.get_name()} has recruited {n} Cavalrymen.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Recruited {n} Cavalrymen.")
self.agent.add_decision(self)
return self.agent.recruit_cavalrymen(n)
class DemoteSpiesDecision(RecruitDecision):
def execute(self, n):
super().execute(n)
self.n = n
vis_print(f"{self.agent.get_name()} has demoted {n} Spies.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Demoted {n} Spies.")
self.agent.add_decision(self)
return self.agent.demote_spies(n)
class DemoteWarriorsDecision(RecruitDecision):
def execute(self, n):
super().execute(n)
self.n = n
vis_print(f"{self.agent.get_name()} has demoted {n} Warriors.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Demoted {n} Warriors.")
self.agent.add_decision(self)
return self.agent.demote_warriors(n)
class DemoteArchersDecision(RecruitDecision):
def execute(self, n):
super().execute(n)
self.n = n
vis_print(f"{self.agent.get_name()} has demoted {n} Archers.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Demoted {n} Archers.")
self.agent.add_decision(self)
return self.agent.demote_archers(n)
class DemoteCatapultsDecision(RecruitDecision):
def execute(self, n):
super().execute(n)
self.n = n
vis_print(f"{self.agent.get_name()} has demoted {n} Catapults.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Demoted {n} Catapults.")
self.agent.add_decision(self)
return self.agent.demote_catapults(n)
class DemoteCavalrymenDecision(RecruitDecision):
def execute(self, n):
super().execute(n)
self.n = n
vis_print(f"{self.agent.get_name()} has demoted {n} Cavalrymen.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Demoted {n} Cavalrymen.")
self.agent.add_decision(self)
return self.agent.demote_cavalrymen(n)
class RecruitNothingDecision(RecruitDecision):
def execute(self, n="0"):
vis_print(f"{self.agent.get_name()} has recruited nothing.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message("Recruited no soldiers.")
self.agent.add_decision(self)
return self.agent.recruit_nothing()
# SPYING DECISIONS
class SpyingDecision(Decision):
"""DO NOT INSTANTIATE!"""
def __init__(self, agent, enemy_village_name=""):
super().__init__(agent)
self.enemy_village_name = enemy_village_name
def execute(self):
raise NotImplementedError()
class SpyVillageDecision(SpyingDecision):
def execute(self):
vis_print(f"{self.agent.get_name()} has spied {self.enemy_village_name}.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Spied {self.enemy_village_name}.")
self.agent.add_decision(self)
return self.agent.spy(self.enemy_village_name)
class SpyNothingDecision(SpyingDecision):
def execute(self):
vis_print(f"{self.agent.get_name()} has spied nothing.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Spied nothing.")
self.agent.add_decision(self)
return self.agent.spy_nothing()
# ATTACK DECISIONS
class AttackDecision(Decision):
"""DO NOT INSTANTIATE!"""
def __init__(self, agent, n_warriors="0", n_archers="0", n_catapults="0", n_cavalrymen="0", enemy_village_name=""):
super().__init__(agent)
n_warriors = self.check(n_warriors)
self.n_warriors = n_warriors
n_archers = self.check(n_archers)
self.n_archers = n_archers
n_catapults = self.check(n_catapults)
self.n_catapults = n_catapults
n_cavalrymen = self.check(n_cavalrymen)
self.n_cavalrymen = n_cavalrymen
self.enemy_village_name = enemy_village_name
def execute(self, n_warriors, n_archers, n_catapults, n_cavalrymen):
n_warriors = self.check(n_warriors)
n_archers = self.check(n_archers)
n_catapults = self.check(n_catapults)
n_cavalrymen = self.check(n_cavalrymen)
if n_warriors > self.n_warriors or n_archers > self.n_archers or \
n_catapults > self.n_catapults or n_cavalrymen > self.n_cavalrymen:
raise InvalidDecisionException()
if self.n_warriors + self.n_archers + self.n_catapults + self.n_cavalrymen <= 0:
raise InvalidDecisionException()
@staticmethod
def check(n):
if not (isinstance(n, int) or n.isdigit()):
raise InvalidDecisionException()
n = int(n)
if n < 0:
raise InvalidDecisionException()
return n
class AttackVillageDecision(AttackDecision):
def __init__(self, agent, n_warriors, n_archers, n_catapults, n_cavalrymen, enemy_village_name):
super().__init__(agent, n_warriors, n_archers, n_catapults, n_cavalrymen, enemy_village_name)
if self.n_warriors + self.n_archers + self.n_catapults + self.n_cavalrymen <= 0:
raise InvalidDecisionException()
def execute(self, n_warriors, n_archers, n_catapults, n_cavalrymen):
super().execute(n_warriors, n_archers, n_catapults, n_cavalrymen)
self.n_warriors = n_warriors
self.n_archers = n_archers
self.n_catapults = n_catapults
self.n_cavalrymen = n_cavalrymen
vis_print(f"{self.agent.get_name()} has attacked {self.enemy_village_name} using ", end="")
vis_print(f"{n_warriors} warriors, {n_archers} archers, {n_catapults} catapults and {n_cavalrymen} cavalrymen.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Attacked {self.enemy_village_name} using {n_warriors} warriors, {n_archers} "
f"archers, {n_catapults} catapults and {n_cavalrymen} cavalrymen.")
self.agent.add_decision(self)
return self.agent.send_attack(n_warriors, n_archers, n_catapults, n_cavalrymen, self.enemy_village_name)
class AttackNothingDecision(AttackDecision):
def execute(self, n_warriors="0", n_archers="0", n_catapults="0", n_cavalrymen="0"):
vis_print(f"{self.agent.get_name()} has attacked nothing.")
vis_print()
if self.agent.ui is not None:
self.agent.ui.append_message(f"Attacked nothing.")
self.agent.add_decision(self)
return self.agent.attack_nothing()
# EXCEPTION
class InvalidDecisionException(BaseException):
def __init__(self):
super().__init__("Invalid decision made. Double check the agent code.")
|
from django.db import models
class OneGame(models.Model):
slug = models.SlugField(unique=True)
|
# ---------------------------------------------------------------------------------------------------------------------
# sys
import sys
# ---------------------------------------------------------------------------------------------------------------------
# system
from math import sqrt, ceil
# ---------------------------------------------------------------------------------------------------------------------
# scientific
import numpy as np
# ---------------------------------------------------------------------------------------------------------------------
# PyQuantum.TC_Lindblad
from PyQuantum.TC_Lindblad.Cavity import Cavity
from PyQuantum.TC_Lindblad.Hamiltonian import Hamiltonian
from PyQuantum.TC_Lindblad.WaveFunction import WaveFunction
from PyQuantum.TC_Lindblad.DensityMatrix import DensityMatrix
from PyQuantum.TC_Lindblad.Evolution import run_out_click
import PyQuantum.TC_Lindblad.config as config
# ---------------------------------------------------------------------------------------------------------------------
# PyQuantum.Tools
from PyQuantum.Tools.LoadPackage import load_pkg
from PyQuantum.Tools.Assert import *
from PyQuantum.Tools.Print import hr
from PyQuantum.Tools.MkDir import *
from PyQuantum.Tools.CSV import *
from PyQuantum.Tools.Units import *
from PyQuantum.Tools.Pickle import *
from copy import copy
# ---------------------------------------------------------------------------------------------------------------------
# PyQuantum.Common
from PyQuantum.Common.Quantum.Operators import operator_a, operator_across
# ---------------------------------------------------------------------------------------------------------------------
config.capacity = 1
config.n_atoms = 2
cavity = Cavity(config.wc, config.wa, config.g, config.n_atoms)
# -----------------------------------------------
l = config.g * 0.01
T = 1 * config.ms
# dt = 0.01 / l
dt = 1 * config.ns
# dt = 1 * config.ns / 10
nt = int(T/dt)
# dt = (0.001/l)
# Assert(dt <= 0.01/l, 'dt > 0.01/l')
nt = int(T/dt)
cavity.info()
cprint('T:', 'green', end='')
print(time_unit_full(T))
cprint('dt:', 'green', end='')
# print(time_unit_full(dt))
cprint('nt:', 'green', end='')
print(nt)
# -----------------------------------------------
H = Hamiltonian(config.capacity, cavity)
s_2 = WaveFunction(states=H.states, init_state=[1, [0, 1]], amplitude=1./sqrt(2)) - \
WaveFunction(states=H.states, init_state=[1, [1, 0]], amplitude=1./sqrt(2))
t_0 = WaveFunction(states=H.states, init_state=[1, [0, 0]])
# ---------------------------------------------------------------------------------------------------------------------
mkdir('sink')
mkdir('sink/1ms_l001g')
for w_0 in [
{
'name': 't0',
'obj': t_0,
},
# {
# 'name': 's2',
# 'obj': s_2,
# },
]:
w_0['obj'].normalize()
ro_0 = DensityMatrix(w_0['obj'])
T_list = []
sink_list = []
T_click = []
for nt in range(0, 100):
t_click = run_out_click({
"ro_0": copy(ro_0),
"H": H,
"dt": dt,
# "sink_list": sink_list,
"T_list": T_list,
"precision": 1e-3,
'sink_limit': 1,
'time_limit': config.ms,
"thres": 0.001,
'lindblad': {
'out': {
'L': operator_a(H, H.capacity, H.cavity.n_atoms),
'l': l
},
},
})
print(t_click)
T_click.append(t_click)
for t in T_click:
print(time_unit_full(t))
# MkDir('sink')
# pickle_dump(T_list, 'sink/1ms_l001g/T_list_' + w_0['name'] + '.pkl')
# pickle_dump(sink_list, 'sink/1ms_l001g/sink_list_' + w_0['name'] + '.pkl')
pickle_dump(T_click, 'T_click_' + w_0['name'] + '.pkl')
# list_to_csv(T_list, 'MM/' + path + '/' + 'T_' + w_0['name'] + '.csv')
# list_to_csv(np.array(T_list) * 1e9, 'MM/' + path + '/' + 'T_' + w_0['name'] + '.csv')
# list_to_csv(sink_list, 'MM/' + path + '/' +
# 'sink_' + w_0['name'] + '.csv')
# ---------------------------------------------------------------------------------------------------------------------
# =====================================================================================================================
|
from instance_recommender.inventory import Inventory
from unittest import TestCase
import json
import pandas
class TestInventory(TestCase):
def __init__(self, methodName):
super().__init__(methodName)
self.source_path = 'inventory/instances.json'
self.dest_path = '/tmp/test_inventory.json'
def create_inventory(self):
return Inventory(source_url='file://{}'.format(self.source_path),
inventory_file_path=self.dest_path,
refresh=True)
def test_inventory_object_creates(self):
inventory = self.create_inventory()
with open(self.source_path, 'r') as source_file:
source = json.loads(source_file.read())
with open(self.dest_path, 'r') as dest_file:
dest = json.loads(dest_file.read())
assert source == dest
def test_get_inventory_constraints(self):
inventory = self.create_inventory()
pricing_with_constraints = inventory.get_pricing_with_constraints(
constraints={
'region': 'us-east-1',
'exclude_burstable': False,
'arch': 'x86_64'
})
assert len(pricing_with_constraints.columns) == 5
|
def ordenar ():
def calcularComision(ventatotal, Objetivo, Sueldoanual):
if ventatotal < ((Objetivo*80)//100):
comision=0
elif ventatotal >= ((Objetivo*80)//100) and ventatotal < Objetivo:
comision= ((Sueldoanual*3)//100)
elif ventatotal >= Objetivo:
comision= ((Sueldoanual*10)//100)
return comision
def liquidar ():
pers={}
arch=open("vendedores.csv","r")
arch3=open("liquidacionComisiones.txt","w")
for renglon in arch:
persona= renglon[:-1].split(",")
pers["Codigo"]= int(persona[0])
pers["Nombre"]= persona[1]
pers["Objetivo"]= int(persona[2])
pers["Sueldoanual"]= float(persona[3])
arch2=open("ventasTarjetas.csv","r")
vent={}
ventatotal=0
for renglon2 in arch2:
ventas= renglon[:-1].split(",")
vent["Codigo"]= int(ventas[0])
vent["mesVenta"]= ventas[1]
vent["cantTarjeta"]= int(ventas[2])
if vent["Codigo"] == pers["Codigo"]:
ventatotal=ventatotal + vent["cantTarjeta"]
arch2.close()
ventaVendedor= calcularComision(ventatotal,pers["Objetivo"],pers["Sueldoanual"])
arch3.write(str(pers["Codigo"]) + "," + str(ventaVendedor) + "\n")
arch3.close()
def topCinco ():
arch=open("vendedores.csv","r")
arch1=open("liquidacionesComisiones.txt","r")
per={}
for renglon in arch:
persona= renglon[:-1].split(",")
per["Codigo"]= int(persona[0])
per["Nombre"]= persona[1]
vent={}
for renglon1 in arch1:
venta=renglon[:-1].split(",")
vent["Codigo"]=venta[0]
vent["Comision"]=venta[1]
if per["Codigo"] == vent["Codigo"]:
def main():
liquidar()
main() |
from src.knn.utils import convert_df_to_np
from src.knn.main import classify
def test_knn(test_df):
np_arr, labels = convert_df_to_np(test_df)
input_data = [1.0, 3.0]
k = 2
result = classify(input_data, np_arr, labels, k)
assert isinstance(result, str)
|
import sys
def combi(n, m):
if dp[n][m] == -1:
if n == m or m == 0:
dp[n][m] = 1
else:
dp[n][m] = combi(n-1, m) + combi(n-1, m-1)
return dp[n][m]
n, m = map(int, sys.stdin.readline().split())
dp = [[-1 for _ in range(n+1)] for _ in range(n+1)]
print(combi(n, m)) |
n = int(input())
for i in range(n):
ox_list = input()
ox_sum = 0
score = 0
for ox in ox_list:
if ox == 'O':
score += 1
else:
score = 0
ox_sum += score
print(ox_sum)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.