text stringlengths 38 1.54M |
|---|
# testing.py
#
# This file is part of scqubits.
#
# Copyright (c) 2019, Jens Koch and Peter Groszkowski
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
import pytest
from scqubits.tests.conftest import TESTDIR
def run():
"""
Run the pytest scripts for scqubits.
"""
# runs tests in scqubits.tests directory
pytest.main(['-v', TESTDIR])
|
#!/usr/bin/env python3
# * Write "zenfilter" (working title) for:
# - Displaying a "COUNT\t\d+" message every --count-step=\d+ lines.
# - Displaying the last --last=\d+ lines as "LAST\t.*"
# - Displaying lines matching --filter=.* (regex) as "FOUND\t.*"
# "make | python zenfilter.py [args]"
import argparse
import re
import sys
def getArgs():
parser = argparse.ArgumentParser()
parser.add_argument("--count-step", type=int)
parser.add_argument("--last", type=int)
parser.add_argument("--filter")
parser.add_argument("--suppress-last-on")
return parser.parse_args()
def zenfilter():
args = getArgs()
lastlines = []
last = args.last
count_step = args.count_step
suppress = args.suppress_last_on
filt = None
if args.filter:
filt = re.compile(args.filter)
for index, line in enumerate(sys.stdin):
if last:
# Append a line to the last lines queue
lastlines.append(line)
if len(lastlines) > last:
# Overflow reached. Remove the first line in the queue.
lastlines.pop(0)
if count_step and index % count_step == 0:
# Line counter
print("COUNT\t{}".format(index), flush=True)
if filt and re.search(filt, line):
# Regex match. Print the line with the "FOUND" prefix.
print("FOUND\t{}".format(line), end="", flush=True)
# Now we print the last lines queue
if ((not suppress) or (not re.search(suppress, ''.join(lastlines)))):
for line in lastlines:
print("LAST\t{}".format(line), end="")
sys.stdout.flush()
if __name__ == "__main__":
zenfilter()
|
import os
import logging
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
class Config:
@staticmethod
def from_file(filename, logger=None):
logger.info(f'Config.from_file({filename})')
with open(filename, 'r') as f:
return Config(yaml.load(f, Loader=Loader), logger)
def __init__(self, config, logger=None):
self._config = config
self._logger = logger if logger is not None else logging.getLogger()
def get_config(self, section, name):
self._logger.debug(f'Config.get_config {section}.{name}')
if section in self._config and name in self._config[section]:
return self._config[section][name]
env = f'{section.upper()}_{name.upper()}'
if env in os.environ:
return os.environ[env]
return None
def get_aprs_host(self):
return self.get_config('aprs', 'host')
def get_aprs_port(self):
return self.get_config('aprs', 'port')
def get_aprs_login(self):
return self.get_config('aprs', 'login')
def get_mqtt_host(self):
return self.get_config('mqtt', 'host')
def get_mqtt_port(self):
return self.get_config('mqtt', 'port')
def get_mqtt_user(self):
return self.get_config('mqtt', 'user')
def get_mqtt_pass(self):
return self.get_config('mqtt', 'pass')
def get_consumers(self):
self._logger.debug('Config.get_consumers')
if 'consumers' in self._config:
return self._config['consumers']
return []
|
#!/usr/bin/python3
import random
from rpgclasses import Player
from game import Game
"""THIS IS KATHY'S ZOMBIE RPG GAME"""
# dict of possible weapons
items = {
"Knife": {
"desc": "This is a rusty knife. Still looks sharp though.",
"success": "You've lunged at the zombie with your knife! Direct hit to the brain.",
"failure": "You slipped! You missed the zombie by a hair!",
"type": "weapons"
},
"Gun": {
"desc": "A good ol' Smith & Wesson. It has ",
"success": "You've fired a shot! It miraculously hits the heart.",
"failure": "You've fired a shot and MISSED! Hope the sound doesn't attract all of the zombies",
"bullets": 6,
"type": "weapons"
},
"Key": {
"desc": "A key. Maybe it'll open one of these doors?",
"success": "You try the lock and it worked!",
"failure": "Door's still locked. Gotta try a different door.",
"type": "aid"
},
"Sandwich": {
"desc": "It's tuna. Seems edible.",
"success": "You ate it. Stomach seems ok.",
"failure": "YIKES! The food had zombie blood. You turned into a zombie.",
"type": "aid"
}
}
# a dictionary linking a room to other rooms
## A dictionary linking a room to other rooms
rooms = {
'Hall': {
'desc': "Just a regular hallway.",
'paths': {
'South': 'Kitchen',
'East': 'Dining Room',
},
'item': 'Key'
},
'Kitchen': {
'desc': "Dishes are piled in the sink. Seems like rats have taken over.",
'paths': {
'North': 'Hall'
},
'item': 'Sandwich',
},
'Dining Room': {
'desc': "The windows are boarded up. I wonder where the people went.",
'paths': {
'West': 'Hall',
'South': 'Garden',
'North': 'Pantry'
},
'item': 'Gun',
},
'Garden': {
'desc': "It's dark out there. No flowers are left.",
'locked': True,
'paths': {
'North': 'Dining Room'
}
},
'Pantry': {
'desc': "Everything's moldy. Yuck.",
'paths': {
'South': 'Dining Room'
},
'item': 'Knife',
}
}
# create a zombie
zombie = {
"health": 30,
"success": "The zombie bit you!",
"failure": "The zombie aimed for you and missed!"
}
hero = Player(inventory={"weapons": {}, "aid": {}})
zombies = Game(items={**items}, rooms={**rooms})
zombies.startGame(hero, zombie)
|
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
import datetime as dt
from django.core.exceptions import ObjectDoesNotExist
from .models import Image, Category, Location
# Create your views here.
def welcome(request):
return redirect(index)
def index(request):
photos = Image.get_all_images()
return render(request, 'index.html', {'photos': photos})
def search_results(request):
if 'category' in request.GET and request.GET["category"]:
search_term = request.GET.get("category")
searched_images = Image.search_image(search_term)
message = f"{search_term}"
return render(request, 'search_results.html',{"message":message,"images": searched_images})
else:
message = "You haven't searched for any term"
return render(request, 'search_results.html',{"message":message})
def filter_results(request, place):
location_images = Image.filter_by_location(place)
return render(request, 'location.html', {'images':location_images, 'place':place}) |
def userInput():
userName = input("What would you like your name to be?")
userAge = input("How old are you?")
userChoice = input("Are you Temporary or Regular?")
class OrigUser:
def __init__(self, name, age):
self.name = name
self.age = age
def printUser(self):
print(self.name)
print(self.age)
userInput()
# user = OrigUser("Carlo", 25)
# user.printUser()
class TempUser:
def __init__(self, name):
self.name = name
def printTemp(self):
print(self.name)
# user2 = TempUser("Jimmy")
# user2.printTemp()
|
#!/usr/bin/env python
"""-----------------------------------------------------------------------
Python script for running the custom tsdat pipeline defined by this
project.
---------------------------------------------------------------------------"""
import argparse
import sys
import os
# Add the project directory to the pythonpath
project_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, project_dir)
from pipeline.runner import run_pipeline
def main():
"""-------------------------------------------------------------------
Main function.
-------------------------------------------------------------------"""
# Parse arguments - a file or list of files
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mode", default='dev', help="Identify the configuration to use. Default is dev.")
parser.add_argument('file', nargs='*', type=str)
args = parser.parse_args()
files = []
for f in args.file:
files.append(f)
# run the pipeline
run_pipeline(mode=args.mode, input_files=files)
if __name__ == "__main__":
main()
|
import math
import yaml
import pygame
import gevent
from gevent import socket
import config
from config import MSG
import utility
from utility import Text
from player import Player
from planet import Planet
from asteroid import AsteroidCloud
from ship import Ship, Projectile
from wormhole import Wormhole
import network
class Game:
def __init__(self, preferences=None, function="", username="", password=""):
self.entities = []
self.planets = []
self.ships = []
self.players = []
self.projectiles = []
self.player = Player(position=(config.SCREEN_WIDTH/2, 100), colour=utility.Colour(config.PLAYER_COLOUR).to_rgba_f(), projectile_pool=self.projectiles)
self.function = function
self.username = username
self.password = password
self.loading = True # is the loading screen showing?
self.player_id = -1
self.system_name = ""
self.system_id = -1
self.system_size = -1
self.system_label = None
self.server_sock = None
self.last_server_frame = None
self.keys = None
self.init_key_config()
self.unclick_hooks = [] # list of callbacks called on MOUSE1_UP
self.init_ui()
def init_key_config(self, preferences=None):
# use the default key configuration...
k_forward = config.KEY_FORWARD
k_left = config.KEY_LEFT
k_right = config.KEY_RIGHT
k_fire = config.KEY_FIRE
k_interact = config.KEY_INTERACT
# ...unless a configuration exists in the preferences
if preferences is not None and preferences.get('keys', None) is not None:
k_forward = preferences['keys'].get('forward', k_forward)
k_left = preferences['keys'].get('left', k_left)
k_right = preferences['keys'].get('right', k_right)
k_fire = preferences['keys'].get('fire', k_fire)
k_interact = preferences['keys'].get('interact', k_fire)
self.keys = {k_forward: Ship.INPUT.FORWARD,
k_left: Ship.INPUT.LEFT,
k_right: Ship.INPUT.RIGHT,
k_fire: Ship.INPUT.FIRE,
k_interact: Ship.INPUT.INTERACT}
def init_ui(self):
self.system_label = Text("", config.FONT, utility.Colour(0xFFFFFFFF), (20, 20))
def update_ui(self, delta):
pass
def render_ui(self):
self.system_label.render()
def update(self, delta):
if self.server_sock is None:
if not self.connect_to_server(self.function, self.username, self.password):
return False
if not self.loading:
#self.update_ui(delta)
for entity in self.entities:
entity.update(delta)
for player in self.players:
player.update(delta)
self.player.update(delta)
for projectile in self.projectiles:
projectile.update(delta)
return True
def render(self):
if not self.loading:
self.render_ui()
for entity in self.entities:
entity.render()
for player in self.players:
player.render()
self.player.render()
for projectile in self.projectiles:
projectile.render()
def logout(self):
self.server_sock.send_message(MSG.CL_GW_LOGOUT)
return True
def connect_to_server(self, function, username, password):
print "Connecting to server"
self.server_sock = network.Socket()
self.server_sock.connect(config.CLIENT_GATEWAY_ADDRESS)
if function == "register":
self.server_sock.send_message(MSG.CL_GW_REGISTER, [username, password])
response = self.server_sock.wait_for_message([MSG.GW_CL_REGISTRATION_SUCCESSFUL,
MSG.GW_CL_REGISTRATION_FAILED])
if response.type == MSG.GW_CL_REGISTRATION_SUCCESSFUL:
print "Registration successful"
else:
print "Registration failed"
return False
print "Logging in"
self.server_sock.send_message(MSG.CL_GW_LOGIN, [username, password])
response = self.server_sock.wait_for_message([MSG.GW_CL_LOGIN_FAILED,
MSG.GW_CL_LOGIN_SUCCESSFUL])
if response is None:
print "Gateway closed connection" # TODO: raise error?
return False # connection closed
if response.type == MSG.GW_CL_LOGIN_SUCCESSFUL:
print "Login successful"
self.player_id = response.data
gevent.spawn(self.receive_server_input)
return True # login succeeded
print "Login failed"
return False # login failed
def receive_server_input(self):
print "Receiving server input"
while True:
msg = self.server_sock.wait_for_message()
if msg.type == config.MSG.GW_CL_SYSTEM_INFO:
self.on_system_info(msg.data)
elif msg.type == config.MSG.GW_CL_MOVING_SYSTEMS:
self.on_moving_systems(msg.data)
elif msg.type == config.MSG.GW_CL_FRAME:
self.on_server_frame(msg.time, msg.data)
def send_input(self, key, state):
self.server_sock.send_message(config.MSG.CL_GW_INPUT, [key, state])
def on_system_info(self, data):
system_data = yaml.load(data)
self.system_id = system_data['id']
self.system_name = system_data['name']
self.system_label.set_text(self.system_name)
self.system_size = system_data['size']
players = system_data['players']
planets = system_data['planets']
wormholes = system_data['wormholes']
self.entities = []
self.planets = []
self.ships = []
self.players = []
self.projectiles = []
# player position
player = system_data['player']
x, y = player['position']
self.player.set_position(x, y)
star = Planet(id=1, size=0, position=(config.SCREEN_WIDTH/2, config.SCREEN_HEIGHT/2), colour=utility.Colour(0xFFFFFFFF).to_rgba_f())
self.planets.append(star)
self.entities.append(star)
for planet in planets:
p = Planet(id=planet['id'], size=planet['size'],
orbiting=star, distance=planet['distance'],
speed=planet['speed'], degree=planet['degree'],
colour=utility.Colour(planet['colour']).to_rgba_f())
self.planets.append(p)
self.entities.append(p)
for wormhole in wormholes:
w = Wormhole(id=wormhole['id'], size=config.WORMHOLE_SIZE,
orbiting=star, distance=wormhole['distance'],
speed=wormhole['speed'], degree=wormhole['degree'],
colour=utility.Colour(config.WORMHOLE_COLOUR).to_rgba_f())
self.entities.append(w)
for player_id, name in players:
self.players.append(Player(projectile_pool=self.projectiles,
colour=utility.Colour(config.ENEMY_COLOUR).to_rgba_f(),
player_id=player_id, name=name))
# turn the loading screen off
self.loading = False
def on_moving_systems(self, name):
self.loading_screen(name)
def loading_screen(self, system_name):
self.loading = True
def on_server_frame(self, time, data):
if self.last_server_frame is None or time > self.last_server_frame:
self.last_server_frame = time
players, projectiles, new_players = data
# new players
if new_players and new_players != [None]:
print "new player"
for player_id, name in new_players:
if player_id != self.player_id:
self.players.append(Player(projectile_pool=self.projectiles,
colour=utility.Colour(config.ENEMY_COLOUR).to_rgba_f(),
player_id=player_id, name=name))
# players
players_by_id = {pl_id: (pl_x, pl_y, pl_direction, pl_inputs, pl_destroyed, pl_respawn_in)
for pl_id, pl_x, pl_y, pl_direction, pl_inputs, pl_destroyed, pl_respawn_in
in players}
for player in self.players:
player.x, player.y, player.direction, player.inputs, player.destroyed, player.respawn_in = players_by_id[player.id]
# projectiles
if projectiles and projectiles != [None]:
projectiles_by_id = {pr_id: (pr_x, pr_y, pr_direction, pr_player)
for pr_id, pr_x, pr_y, pr_direction, pr_player
in projectiles}
for projectile in self.projectiles:
if projectile.id not in projectiles_by_id:
self.projectiles.remove(projectile)
else:
x, y, projectile.direction, player_id = projectiles_by_id[projectile.id]
projectile.set_position(x, y)
del projectiles_by_id[projectile.id]
for pr_id, pr in projectiles_by_id.iteritems():
x, y, direction, player_id = pr
projectile = Projectile(pr_id, (x, y), config.PROJECTILE_SPEED, direction,
utility.Colour(config.ENEMY_COLOUR if player_id != self.player_id else config.PLAYER_COLOUR).to_rgba_f())
projectile.set_position(x, y)
self.projectiles.append(projectile)
# this player
my_x, my_y, my_direction, my_inputs, my_destroyed, my_respawning_in = players_by_id[self.player_id]
if my_destroyed and not self.player.destroyed:
self.player.destroy(my_respawning_in)
for planet in self.planets:
print (planet.x, planet.y)
print (self.player.x, self.player.y)
elif self.player.destroyed and not my_destroyed:
self.player.respawn(my_x, my_y)
self.player.set_position(my_x, my_y)
self.player.direction = my_direction
def on_key(self, key, state):
if key in config.VALID_KEYS: # only act on valid keys
self.player.set_input(self.keys[key], state)
self.send_input(self.keys[key], state)
def on_click(self, position):
pass
def on_unclick(self, position):
for hook in self.unclick_hooks:
hook(position)
self.unclick_hooks = [] |
"""
fs.expose.sftp
==============
Expose an FS object over SFTP (via paramiko).
This module provides the necessary interfaces to expose an FS object over
SFTP, plugging into the infrastructure provided by the 'paramiko' module.
For simple usage, the class 'BaseSFTPServer' provides an all-in-one server
class based on the standard SocketServer module. Use it like so::
server = BaseSFTPServer((hostname,port),fs)
server.serve_forever()
Note that the base class allows UNAUTHENTICATED ACCESS by default. For more
serious work you will probably want to subclass it and override methods such
as check_auth_password() and get_allowed_auths().
To integrate this module into an existing server framework based on paramiko,
the 'SFTPServerInterface' class provides a concrete implementation of the
paramiko.SFTPServerInterface protocol. If you don't understand what this
is, you probably don't want to use it.
"""
import os
import stat as statinfo
import time
import socketserver
import threading
import paramiko
from fs.base import flags_to_mode
from fs.path import *
from fs.errors import *
from fs.local_functools import wraps
from fs.filelike import StringIO
from fs.utils import isdir
# Default host key used by BaseSFTPServer
#
DEFAULT_HOST_KEY = paramiko.RSAKey.from_private_key(StringIO(
"-----BEGIN RSA PRIVATE KEY-----\n" \
"MIICXgIBAAKCAIEAl7sAF0x2O/HwLhG68b1uG8KHSOTqe3Cdlj5i/1RhO7E2BJ4B\n" \
"3jhKYDYtupRnMFbpu7fb21A24w3Y3W5gXzywBxR6dP2HgiSDVecoDg2uSYPjnlDk\n" \
"HrRuviSBG3XpJ/awn1DObxRIvJP4/sCqcMY8Ro/3qfmid5WmMpdCZ3EBeC0CAwEA\n" \
"AQKCAIBSGefUs5UOnr190C49/GiGMN6PPP78SFWdJKjgzEHI0P0PxofwPLlSEj7w\n" \
"RLkJWR4kazpWE7N/bNC6EK2pGueMN9Ag2GxdIRC5r1y8pdYbAkuFFwq9Tqa6j5B0\n" \
"GkkwEhrcFNBGx8UfzHESXe/uE16F+e8l6xBMcXLMJVo9Xjui6QJBAL9MsJEx93iO\n" \
"zwjoRpSNzWyZFhiHbcGJ0NahWzc3wASRU6L9M3JZ1VkabRuWwKNuEzEHNK8cLbRl\n" \
"TyH0mceWXcsCQQDLDEuWcOeoDteEpNhVJFkXJJfwZ4Rlxu42MDsQQ/paJCjt2ONU\n" \
"WBn/P6iYDTvxrt/8+CtLfYc+QQkrTnKn3cLnAkEAk3ixXR0h46Rj4j/9uSOfyyow\n" \
"qHQunlZ50hvNz8GAm4TU7v82m96449nFZtFObC69SLx/VsboTPsUh96idgRrBQJA\n" \
"QBfGeFt1VGAy+YTLYLzTfnGnoFQcv7+2i9ZXnn/Gs9N8M+/lekdBFYgzoKN0y4pG\n" \
"2+Q+Tlr2aNlAmrHtkT13+wJAJVgZATPI5X3UO0Wdf24f/w9+OY+QxKGl86tTQXzE\n" \
"4bwvYtUGufMIHiNeWP66i6fYCucXCMYtx6Xgu2hpdZZpFw==\n" \
"-----END RSA PRIVATE KEY-----\n"
))
def report_sftp_errors(func):
"""Decorator to catch and report FS errors as SFTP error codes.
Any FSError exceptions are caught and translated into an appropriate
return code, while other exceptions are passed through untouched.
"""
@wraps(func)
def wrapper(*args,**kwds):
try:
return func(*args, **kwds)
except ResourceNotFoundError as e:
return paramiko.SFTP_NO_SUCH_FILE
except UnsupportedError as e:
return paramiko.SFTP_OP_UNSUPPORTED
except FSError as e:
return paramiko.SFTP_FAILURE
return wrapper
class SFTPServerInterface(paramiko.SFTPServerInterface):
"""SFTPServerInterface implementation that exposes an FS object.
This SFTPServerInterface subclass expects a single additional argument,
the fs object to be exposed. Use it to set up a transport subsystem
handler like so::
t.set_subsystem_handler("sftp",SFTPServer,SFTPServerInterface,fs)
If this all looks too complicated, you might consider the BaseSFTPServer
class also provided by this module - it automatically creates the enclosing
paramiko server infrastructure.
"""
def __init__(self, server, fs, encoding=None, *args, **kwds):
self.fs = fs
if encoding is None:
encoding = "utf8"
self.encoding = encoding
super(SFTPServerInterface,self).__init__(server, *args, **kwds)
def close(self):
# Close the pyfs file system and dereference it.
self.fs.close()
self.fs = None
@report_sftp_errors
def open(self, path, flags, attr):
return SFTPHandle(self, path, flags)
@report_sftp_errors
def list_folder(self, path):
if not isinstance(path, str):
path = path.decode(self.encoding)
stats = []
for entry in self.fs.listdir(path, absolute=True):
stat = self.stat(entry)
if not isinstance(stat, int):
stats.append(stat)
return stats
@report_sftp_errors
def stat(self, path):
if not isinstance(path, str):
path = path.decode(self.encoding)
info = self.fs.getinfo(path)
stat = paramiko.SFTPAttributes()
stat.filename = basename(path).encode(self.encoding)
stat.st_size = info.get("size")
if 'st_atime' in info:
stat.st_atime = info.get('st_atime')
elif 'accessed_time' in info:
stat.st_atime = time.mktime(info.get("accessed_time").timetuple())
if 'st_mtime' in info:
stat.st_mtime = info.get('st_mtime')
else:
if 'modified_time' in info:
stat.st_mtime = time.mktime(info.get("modified_time").timetuple())
if isdir(self.fs, path, info):
stat.st_mode = 0o777 | statinfo.S_IFDIR
else:
stat.st_mode = 0o777 | statinfo.S_IFREG
return stat
def lstat(self, path):
return self.stat(path)
@report_sftp_errors
def remove(self, path):
if not isinstance(path, str):
path = path.decode(self.encoding)
self.fs.remove(path)
return paramiko.SFTP_OK
@report_sftp_errors
def rename(self, oldpath, newpath):
if not isinstance(oldpath, str):
oldpath = oldpath.decode(self.encoding)
if not isinstance(newpath, str):
newpath = newpath.decode(self.encoding)
if self.fs.isfile(oldpath):
self.fs.move(oldpath, newpath)
else:
self.fs.movedir(oldpath, newpath)
return paramiko.SFTP_OK
@report_sftp_errors
def mkdir(self, path, attr):
if not isinstance(path, str):
path = path.decode(self.encoding)
self.fs.makedir(path)
return paramiko.SFTP_OK
@report_sftp_errors
def rmdir(self, path):
if not isinstance(path, str):
path = path.decode(self.encoding)
self.fs.removedir(path)
return paramiko.SFTP_OK
def canonicalize(self, path):
try:
return abspath(normpath(path)).encode(self.encoding)
except BackReferenceError:
# If the client tries to use backrefs to escape root, gently
# nudge them back to /.
return '/'
@report_sftp_errors
def chattr(self, path, attr):
# f.truncate() is implemented by setting the size attr.
# Any other attr requests fail out.
if attr._flags:
if attr._flags != attr.FLAG_SIZE:
raise UnsupportedError
with self.fs.open(path,"r+") as f:
f.truncate(attr.st_size)
return paramiko.SFTP_OK
def readlink(self, path):
return paramiko.SFTP_OP_UNSUPPORTED
def symlink(self, path):
return paramiko.SFTP_OP_UNSUPPORTED
class SFTPHandle(paramiko.SFTPHandle):
"""SFTP file handler pointing to a file in an FS object.
This is a simple file wrapper for SFTPServerInterface, passing read
and write requests directly through the to underlying file from the FS.
"""
def __init__(self, owner, path, flags):
super(SFTPHandle, self).__init__(flags)
mode = flags_to_mode(flags)
self.owner = owner
if not isinstance(path, str):
path = path.decode(self.owner.encoding)
self.path = path
self._file = owner.fs.open(path, mode)
@report_sftp_errors
def close(self):
self._file.close()
return paramiko.SFTP_OK
@report_sftp_errors
def read(self, offset, length):
self._file.seek(offset)
return self._file.read(length)
@report_sftp_errors
def write(self, offset, data):
self._file.seek(offset)
self._file.write(data)
return paramiko.SFTP_OK
def stat(self):
return self.owner.stat(self.path)
def chattr(self,attr):
return self.owner.chattr(self.path, attr)
class SFTPServer(paramiko.SFTPServer):
"""
An SFTPServer class that closes the filesystem when done.
"""
def finish_subsystem(self):
# Close the SFTPServerInterface, it will close the pyfs file system.
self.server.close()
super(SFTPServer, self).finish_subsystem()
class SFTPRequestHandler(socketserver.BaseRequestHandler):
"""SocketServer RequestHandler subclass for BaseSFTPServer.
This RequestHandler subclass creates a paramiko Transport, sets up the
sftp subsystem, and hands off to the transport's own request handling
thread.
"""
timeout = 60
auth_timeout = 60
def setup(self):
"""
Creates the SSH transport. Sets security options.
"""
self.transport = paramiko.Transport(self.request)
self.transport.load_server_moduli()
so = self.transport.get_security_options()
so.digests = ('hmac-sha1', )
so.compression = ('zlib@openssh.com', 'none')
self.transport.add_server_key(self.server.host_key)
self.transport.set_subsystem_handler("sftp", SFTPServer, SFTPServerInterface, self.server.fs, encoding=self.server.encoding)
def handle(self):
"""
Start the paramiko server, this will start a thread to handle the connection.
"""
self.transport.start_server(server=BaseServerInterface())
# TODO: I like the code below _in theory_ but it does not work as I expected.
# Figure out how to actually time out a new client if they fail to auth in a
# certain amount of time.
#chan = self.transport.accept(self.auth_timeout)
#if chan is None:
# self.transport.close()
def handle_timeout(self):
try:
self.transport.close()
finally:
super(SFTPRequestHandler, self).handle_timeout()
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class BaseSFTPServer(ThreadedTCPServer):
"""SocketServer.TCPServer subclass exposing an FS via SFTP.
Operation is in the standard SocketServer style. The target FS object
can be passed into the constructor, or set as an attribute on the server::
server = BaseSFTPServer((hostname,port),fs)
server.serve_forever()
It is also possible to specify the host key used by the sever by setting
the 'host_key' attribute. If this is not specified, it will default to
the key found in the DEFAULT_HOST_KEY variable.
"""
# If the server stops/starts quickly, don't fail because of
# "port in use" error.
allow_reuse_address = True
def __init__(self, address, fs=None, encoding=None, host_key=None, RequestHandlerClass=None):
self.fs = fs
self.encoding = encoding
if host_key is None:
host_key = DEFAULT_HOST_KEY
self.host_key = host_key
if RequestHandlerClass is None:
RequestHandlerClass = SFTPRequestHandler
socketserver.TCPServer.__init__(self, address, RequestHandlerClass)
def shutdown_request(self, request):
# Prevent TCPServer from closing the connection prematurely
return
def close_request(self, request):
# Prevent TCPServer from closing the connection prematurely
return
class BaseServerInterface(paramiko.ServerInterface):
"""
Paramiko ServerInterface implementation that performs user authentication.
Note that this base class allows UNAUTHENTICATED ACCESS to the exposed
FS. This is intentional, since we can't guess what your authentication
needs are. To protect the exposed FS, override the following methods:
* get_allowed_auths Determine the allowed auth modes
* check_auth_none Check auth with no credentials
* check_auth_password Check auth with a password
* check_auth_publickey Check auth with a public key
"""
def check_channel_request(self, kind, chanid):
if kind == 'session':
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_auth_none(self, username):
"""Check whether the user can proceed without authentication."""
return paramiko.AUTH_SUCCESSFUL
def check_auth_publickey(self, username,key):
"""Check whether the given public key is valid for authentication."""
return paramiko.AUTH_FAILED
def check_auth_password(self, username, password):
"""Check whether the given password is valid for authentication."""
return paramiko.AUTH_FAILED
def get_allowed_auths(self,username):
"""Return string containing a comma separated list of allowed auth modes.
The available modes are "node", "password" and "publickey".
"""
return "none"
# When called from the command-line, expose a TempFS for testing purposes
if __name__ == "__main__":
from fs.tempfs import TempFS
server = BaseSFTPServer(("localhost",8022),TempFS())
try:
#import rpdb2; rpdb2.start_embedded_debugger('password')
server.serve_forever()
except (SystemExit,KeyboardInterrupt):
server.server_close()
|
# -*- coding: utf-8 -*-
import werkzeug
from urllib import urlencode
from urlparse import urlparse, parse_qs, urlunparse
from openerp.osv import orm
from openerp.http import request, local_redirect
from openerp.addons.auth_partner.fstoken_tools import fstoken_check, log_token_usage, store_token_usage
from openerp.addons.web.controllers.main import login_and_redirect, set_cookie_and_redirect
def clean_url_from_fs_ptoken(url):
request_url = request.httprequest.url
url_parsed = urlparse(request_url)
query_dict = parse_qs(url_parsed.query, keep_blank_values=True)
query_dict.pop('fs_ptoken', None)
url_parsed_clean = url_parsed._replace(query=urlencode(query_dict, True))
url_no_fs_ptoken = urlunparse(url_parsed_clean)
return url_no_fs_ptoken
class ir_http(orm.AbstractModel):
_inherit = 'ir.http'
def _dispatch(self):
if not request or not request.httprequest:
return super(ir_http, self)._dispatch()
# Get fs_ptoken from url arguments
fs_ptoken = request.httprequest.args.get('fs_ptoken', None)
if not fs_ptoken:
return super(ir_http, self)._dispatch()
# Clean the url from the fs_ptoken parameter
url_no_fs_ptoken = clean_url_from_fs_ptoken(request.httprequest.url)
# Prepare a redirect to the url-without-token
# redirect_no_fs_ptoken = set_cookie_and_redirect(url_no_fs_ptoken)
redirect_no_fs_ptoken = werkzeug.utils.redirect(url_no_fs_ptoken, '303')
# Check the fs_ptoken
# ATTENTION: This will log the token check and store the token usage for valid tokens!
token_record, token_user, token_errors = fstoken_check(fs_ptoken)
# Token error(s)
if not token_record or token_errors:
# Remove token and token-fs-origin from context
if hasattr(request, 'session') and hasattr(request.session, 'context'):
request.session.context.pop('fs_ptoken', False)
request.session.context.pop('fs_origin', False)
return redirect_no_fs_ptoken
redirect_url_after_token_login = request.httprequest.args.get('redirect_url_after_token_login', '')
# User already logged in
if token_user.id == request.session.uid:
# Store the token usage
# HINT: Token usage would only be stored if the user also get's logged in - to track also further attempts
# we explicitly store the token at this place but with login=False
store_token_usage(fs_ptoken, token_record, token_user, request.httprequest, login=False)
if redirect_url_after_token_login:
assert redirect_url_after_token_login.startswith(
request.httprequest.host_url), 'Only local redirects allowed!'
redirect = werkzeug.utils.redirect(redirect_url_after_token_login, '303')
return redirect
return redirect_no_fs_ptoken
# Logout current user (to destroy the session and clean the cache)
request.session.logout(keep_db=True)
# Login token_user and redirect to url without fs_ptoken (to avoid copy and paste of the url with token)
login = token_user.login
password = token_record.name
if redirect_url_after_token_login:
assert redirect_url_after_token_login.startswith(request.httprequest.host_url), 'Only local redirects allowed!'
redirect = login_and_redirect(request.db, login, password, redirect_url=redirect_url_after_token_login)
else:
redirect = login_and_redirect(request.db, login, password, redirect_url=url_no_fs_ptoken)
# Add token and token-fs-origin to the context (after login_and_redirect because it may change the env)
if hasattr(request, 'session') and hasattr(request.session, 'context'):
request.session.context['fs_ptoken'] = token_record.name
request.session.context['fs_origin'] = token_record.fs_origin or False
return redirect
|
from django.conf.urls import *
from . import views
urlpatterns = patterns(
'',
url(r'^$', views.EntryIndex.as_view(), name='bloghome'),
url((r'^(?P<year>\d{4})/'
'(?P<month>\d{1,2})/'
'(?P<day>\d{1,2})/'
'(?P<pk>\d+)-(?P<slug>[-\w]*)/$'),
views.EntryDetail.as_view(),
name='entry_detail'),
url(r'^tag/(?P<slug>[-\w+]+)/$',
views.TagIndexView.as_view(), name='tagged'),
)
|
from clubsandwich.director import DirectorLoop
from clubsandwich.ui import UIScene, WindowView, KeyAssignedListView, ButtonView
class BasicScene(UIScene):
def __init__(self):
button_generator = (ButtonView(
text="Item {}".format(i),
callback=lambda x=i: print("Called Item {}".format(x)))
for i in range(0, 100)
)
self.key_assign = KeyAssignedListView(
value_controls=button_generator
)
super().__init__(WindowView(
"Scrolling Text", subviews=[self.key_assign]))
class DemoLoop(DirectorLoop):
def get_initial_scene(self):
return BasicScene()
if __name__ == '__main__':
DemoLoop().run()
|
#!/usr/bin/env python
import rospy
from nav_msgs.msg import MapMetaData
from nav_msgs.msg import OccupancyGrid
from nav_msgs.msg import Path
from std_msgs.msg import String
from geometry_msgs.msg import PoseStamped
from scipy.spatial.transform import Rotation as R
import cv2
import numpy as np
import math
from threading import Thread, Lock
import tf
from jetbrain_path import PRM
import argparse
class Planner:
def __init__(self):
# Occupency Grid
#
# Note: we consider only 2d case, so we only take only into account x/y
# Also orientation is not considered because the robot is considered spherical
self.oGrid = None
self.oGridOrigin = None
self.oGridCPM = None
self.oGridWidth = None
self.oGridHeight = None
self.image= None
self.startPoint = None
self.startOrientation = None
self.endPoint = None
self.endOrientation = None
self.path = None
self.path_map = None
#lock for callback
self.mutex = Lock()
#RosCallback
rospy.init_node('jetbrain_path_planner', anonymous=True)
rospy.Subscriber('map', OccupancyGrid, self.callback_OGrid)
rospy.Subscriber('start', PoseStamped, self.callback_startPos)
rospy.Subscriber('end', PoseStamped, self.callback_endPos)
self.pub = rospy.Publisher('/path', Path, latch=True, queue_size=1)
self.pub_rviz = rospy.Publisher('/path_vizualization', Path, latch=True, queue_size=1)
# https://stackoverflow.com/questions/40508651/writing-a-ros-node-with-both-a-publisher-and-subscriber
timer = rospy.Timer(rospy.Duration(0.5), self.callback_timer)
rospy.spin()
timer.shutdown()
def isPlannerExecutable(self):
returnBool = True
# Checking if we're lacking data to execute planner
if self.startPoint is None:
returnBool = False
rospy.loginfo("Can't execute planner, start point is lacking")
else:
if self.startPoint[0] < 0 or self.startPoint[0] > (self.oGridWidth / (self.oGridCPM)):
returnBool = False
rospy.loginfo("Can't execute planner, start point x is false")
if self.startPoint[1] < 0 or self.startPoint[1] > (self.oGridHeight / (self.oGridCPM)):
returnBool = False
rospy.loginfo("Can't execute planner, start point y is false")
if self.endPoint is None:
returnBool = False
rospy.loginfo("Can't execute planner, end point is lacking")
else:
if self.endPoint[0] < 0 or self.endPoint[0] > (self.oGridWidth / (self.oGridCPM)):
returnBool = False
rospy.loginfo("Can't execute planner, end point x is false")
if self.endPoint[1] < 0 or self.endPoint[1] > (self.oGridHeight / (self.oGridCPM)):
returnBool = False
rospy.loginfo("Can't execute planner, end point y is false")
if self.oGrid is None:
returnBool = False
rospy.loginfo("Can't execute planner, map is lacking")
return returnBool
def executePlanner(self):
#Locking
self.mutex.acquire(1)
if(self.isPlannerExecutable()):
sx = self.startPoint[0] # [m]
sy = self.startPoint[1] # [m]
gx = self.endPoint[0] # [m]
gy = self.endPoint[1] # [m]
#Getting parameter
robotFootprint = rospy.get_param("/planner_PRM/robotFootprint")
nSample = rospy.get_param("/planner_PRM/NSample")
maxEdgeFromeOneSamplePoint = rospy.get_param("/planner_PRM/maxEdgeFromeOneSamplePoint")
maxEdgeLength = rospy.get_param("/planner_PRM/maxEdgeLength")
precisionFactor = rospy.get_param("/planner_PRM/precisionFactor")
robotSize = robotFootprint / 2 # [m]
messageToLog = "calling Prm planner with : "
messageToLog += "Sp: " + str(sx) + ", " + str(sy)
messageToLog += ", Gp: " + str(gx) + ", " + str(gy)
messageToLog += ", Rz: " + str(robotSize)
messageToLog += ", CPM: " + str(round(self.oGridCPM)) + "\n"
messageToLog += "nSample: " + str(nSample) + ", "
messageToLog += "maxNbEdge: " + str(maxEdgeFromeOneSamplePoint) + ", "
messageToLog += "maxEdgeLength: " + str(maxEdgeLength) + ", "
messageToLog += "precisionFactor: " + str(precisionFactor)
rospy.loginfo(messageToLog)
beforeTime = rospy.Time.now()
Planner = PRM(self.image , round(self.oGridCPM), sx, sy, gx, gy, robotSize, nSample, maxEdgeFromeOneSamplePoint, maxEdgeLength, precisionFactor)
rx, ry = Planner.startPlanner()
afterTime = rospy.Time.now()
difference = afterTime.secs - beforeTime.secs
difference += (afterTime.nsecs - beforeTime.nsecs) * 0.000000001
if rx is not None and len(rx) > 1:
saveImage = rospy.get_param("/planner_PRM/saveImage")
if(saveImage):
Planner.saveToVideo(rx, ry, True)
npRx = np.asarray(rx)
npRy = np.asarray(ry)
stacked = np.stack((npRx,npRy),-1)
#path_map (for map vizualization)
msg_map = Path()
msg_map.header.frame_id = "/path"
msg_map.header.stamp = rospy.Time.now()
for i in range(len(stacked)-1,-1,-1):
pose = PoseStamped()
#Adapting points to frame of Rviz
pose.pose.position.x = (self.oGridWidth / (self.oGridCPM)) - stacked[i][0]
pose.pose.position.y = (self.oGridHeight / (self.oGridCPM)) - stacked[i][1]
pose.pose.position.z = 0
pose.pose.orientation.x = 0
pose.pose.orientation.y = 0
pose.pose.orientation.z = 0
pose.pose.orientation.w = 1
msg_map.poses.append(pose)
self.path_map = msg_map
#path
msg = Path()
msg.header.frame_id = "/path"
msg.header.stamp = rospy.Time.now()
for i in range(len(stacked)-1,-1,-1):
pose = PoseStamped()
pose.pose.position.x = stacked[i][0]
pose.pose.position.y = stacked[i][1]
pose.pose.position.z = 0
pose.pose.orientation.x = 0
pose.pose.orientation.y = 0
pose.pose.orientation.z = 0
pose.pose.orientation.w = 1
msg.poses.append(pose)
self.path = msg
message = "goal found ! in " + str( difference ) + "s"
rospy.loginfo(message)
else:
message = "goal not found in " + str( difference) + "s... Try to change points or parameter of the planner."
rospy.logwarn(message)
#relashing mutex
self.mutex.release()
def brodcastTransform(self, trans, rot, time, tf1, tf2):
br = tf.TransformBroadcaster()
br.sendTransform(trans, rot, time, tf1, tf2)
####################################################
# Callback
####################################################
def callback_timer(self, msg):
if self.oGrid is not None:
self.brodcastTransform(( - self.oGridOrigin[0], - self.oGridOrigin[1], 0.0), (0.0, 0.0, 0.0, 1.0), rospy.Time.now(), "map", "/path")
if(self.path != None):
self.pub.publish(self.path)
self.pub_rviz.publish(self.path_map)
def callback_startPos(self, msg):
self.startPoint = np.array([msg.pose.position.x, msg.pose.position.y])
log = ": start point updated, point : " + str(self.startPoint)
# Quaternion utilization defined "" just in case "".
# Because the robot is spherical, it's not used in the planenr code thoo
try:
self.startOrientation = R.from_quat([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])
log += ", quaternion : " + str(self.startOrientation.as_quat())
except ValueError:
self.startOrientation = None
rospy.logwarn(rospy.get_caller_id() + ": Invalid quaternion given on topic end, quaternion set to None")
log += ", quaternion : " + str(self.startOrientation)
rospy.loginfo(rospy.get_caller_id() + log)
self.executePlanner()
def callback_endPos(self, msg):
self.endPoint = np.array([msg.pose.position.x, msg.pose.position.y])
log = ": end point updated, point : " + str(self.endPoint)
# Quaternion utilization defined "" just in case "".
# Because the robot is spherical, it's not used in the planner code thoo
try:
self.endOrientation = R.from_quat([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])
log += ", quaternion : " + str(self.endOrientation.as_quat())
except ValueError:
self.endOrientation = None
rospy.logwarn(rospy.get_caller_id() + ": Invalid quaternion given on topic end, quaternion set to None")
log += ", quaternion : " + str(self.endOrientation)
rospy.loginfo(rospy.get_caller_id() + log)
self.executePlanner()
def callback_OGrid(self, msg):
# Generating Ogrid Image through OpenCV + diltation for path-planning
# Inspired by this library: https://github.com/jnez71/lqRRT
self.oGrid = np.array(msg.data).reshape((msg.info.height, msg.info.width))
self.oGridOrigin = np.array([msg.info.origin.position.x, msg.info.origin.position.y])
self.oGridCPM = 1/ msg.info.resolution
self.oGridWidth = msg.info.width
self.oGridHeight = msg.info.height
#if self.oGrid is not None:
# Get opencv-ready image from current ogrid (255 is occupied, 0 is clear)
oGridThreeshold = 90
elements = range(0,oGridThreeshold)
#occImg = 255*np.greater(np.self.oGrid, oGridThreeshold).astype(np.uint8)
occImg = 255*np.isin(self.oGrid, elements, invert=True).astype(np.uint8)
#Flip the image (Might be Uncessary, did it to have the same orientation as Rviz vizualization)
occImgFlip = cv2.flip(occImg, 1)
#cv2.imshow("test",occImgFlip)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
self.image = occImgFlip
height, width = occImg.shape
rospy.loginfo(rospy.get_caller_id() + ": Occupency Grid updated, shape : " + str(self.oGrid.shape) + ", origin : " + str(self.oGridOrigin) + ", res : " + str(msg.info.resolution) )
rospy.loginfo(rospy.get_caller_id() + ": Image shape : " + str(height) + ", " + str(width) )
if __name__ == '__main__':
try:
Planner()
except rospy.ROSInterruptException:
pass
|
""" Making ej5.26 a module
Module containing implementation of Lagrange's interpolation polynomial as well as a ploting such polynomial for a given function, interval and number of sample points."""
import numpy as np
import matplotlib.pyplot as plt
import sys
_filename = sys.argv[0]
_usage = """%s "np.sin(x)", 5, 0, "np.pi" will plot the Lagrange interpolation polynomial p_L for sin(x) in [0,pi] using 5 sample points.
%s test checks for errors in the generation of p_L
""" % (_filename, _filename)
def p_L(x, xp, yp):
pL=0.0
xp=np.asarray(xp)
yp=np.asarray(yp)
if not xp.size == yp.size:
print("Both data arrays must have the same size")
else:
for k in range(xp.size):
pL+=yp[k]*L_k(x, k, xp, yp)
return pL
def L_k(x, k, xp, yp):
Lk=1.0
for i in range(xp.size):
if i != k:
Lk*=(x-xp[i])/(xp[k]-xp[i])
return Lk
def graph(f, n, xmin, xmax, resolution=1001):
xval=np.linspace(xmin,xmax,n) #interpolation points
yval=f(xval)
xplot=np.linspace(xmin,xmax,resolution) #plotting points
yplot=p_L(xplot, xval, yval)
plt.plot(xplot,yplot, "o", markersize=1 )
plt.xlabel("x")
plt.ylabel("y")
plt.title("Interpolation of %s using %d uniformly distributed points \n between %g and %g" %(f.__name__,n,xmin,xmax))
#plt.show()
def test_p_L():
xq=np.linspace(0,np.pi,5)
yq=np.sin(xq)
for j in range(xq.size):
yout=p_L(xq[j], xq, yq)
assert abs(yout-yq[j])<1e-10, "The polynomial misses some point(s)"
x0=(xq[1]+xq[2])/2 #trial point in the middle
y0=np.sin(x0) #exact value
ylagrange=p_L(x0, xq, yq)
print("No errors. \nExample: for x=%.3f, sin(x)=%.4f and the interpolated value with 5 equally spaced points in [0, pi] gives %.4f" %(x0,y0,ylagrange))
if __name__ == "__main__":
if (len(sys.argv) == 2)and(sys.argv[1]=="test"):
test_p_L()
elif(len(sys.argv) == 5):
try:
form=sys.argv[1]
n=int(sys.argv[2])
xmin=eval(sys.argv[3])
xmax=eval(sys.argv[4])
except:
print(_usage)
sys.exit(1)
code=""" \ndef f(x):
return %s
""" %form
exec(code)
graph(f, n, xmin, xmax)
plt.show()
else:
print (_usage)
|
import json
from time import sleep
import requests
import os
import tempfile
import shutil
from flask import render_template
NPM_ADDRESS = 'https://www.npmjs.com/'
PROXIES = {
'http': 'http://127.0.0.1:8080',
'https': 'http://127.0.0.1:8080',
}
def parse_package(file):
return json.load(file)
def extract_packages(file):
parsed = json.load(file)
dependencies = parsed['dependencies']
return dependencies
def check_package_exists(package_name):
# , proxies=PROXIES, verify=False)
response = requests.get(NPM_ADDRESS + "package/" +
package_name, allow_redirects=False)
return (response.status_code == 200)
def is_scoped(package_name):
split_package_name = package_name.split('/')
return (len(split_package_name) > 1)
def check_scope_exists(package_name):
split_package_name = package_name.split('/')
scope_name = split_package_name[0][1:]
# , proxies=PROXIES, verify=False)
response = requests.get(
NPM_ADDRESS + "~" + scope_name, allow_redirects=False)
return (response.status_code == 200)
def is_vulnerable(package_name):
if (not check_package_exists(package_name)):
if(is_scoped(package_name)):
if (not check_scope_exists(package_name)):
return True
else:
return True
return False
def get_vulnerable_packages(packages):
for package in packages:
sleep(1) # prevent npm rate limit ban
if is_vulnerable(package):
yield package
def upload_package_by_npm(path):
oldcwd = os.getcwd()
os.chdir(path)
#os.system('npm pack --pack-destination=' + oldcwd)
os.system('npm publish')
os.chdir(oldcwd)
def remove_package_by_npm(path):
oldcwd = os.getcwd()
os.chdir(path)
os.system('npm unpublish -f')
os.chdir(oldcwd)
def generate_package(project_id, package, publish):
with tempfile.TemporaryDirectory() as poc_dir:
shutil.copy('payload_package/index.js', poc_dir)
shutil.copy('payload_package/extract.js', poc_dir)
packagejson_string = render_template("package.json", package_name=package.name, package_version=prepare_version_number(package.version), project_id=project_id)
with open(poc_dir + "/package.json", "w") as packagejson_file:
packagejson_file.write(packagejson_string)
if publish:
upload_package_by_npm(poc_dir)
else:
remove_package_by_npm(poc_dir)
def prepare_version_number(version: str):
if version[0].isnumeric():
return version
elif version[0] == '^':
split_semver = version[1:].split('.')
return "{}.{}.{}".format(split_semver[0], int(split_semver[1])+1, split_semver[2])
elif version[0] == '~':
split_semver = version[1:].split('.')
return "{}.{}.{}".format(split_semver[0], split_semver[1], int(split_semver[2])+1)
else:
raise "Broken version number" |
from typing import List
'''
问题:
给定一个包含非负整数的 m x n 网格 grid ,请找出一条从左上角到右下角的路径,使得路径上的数字总和为最小。
说明:每次只能向下或者向右移动一步。
定义函数:
f(i,j)=>到i,j位置的最小路径值
边界:
f(0,0) = grid[0][0]
推导过程:
s="11106"
i=1 => ['A']
i=2 => ['AA', 'K']
i=3 => ['AAA', 'KA', 'AK']
i=4 => ['AAJ', 'KJ']
i=5 => ['AAJF', 'KJF']
最优子结构:
f(i,j) = min(f(i-1,j),f(i,j-1))+grid[i][j]
状态转移方程:
f(i,j)=
if i=0,j=0: grid[0][0]
elif i=0,j>0: f(i,j-1)+grid[i][j]
elif i>0,j=0: f(i-1,j)+grid[i][j]
else: min(f(i-1,j),f(i,j-1))+grid[i][j]
重叠子问题:
无
'''
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
m = len(grid)
n = len(grid[0])
for i in range(m):
for j in range(n):
if i == 0 and j > 0:
grid[i][j] += grid[i][j-1]
elif i > 0 and j == 0:
grid[i][j] += grid[i-1][j]
elif i > 0 and j > 0:
grid[i][j] += min(grid[i-1][j], grid[i][j-1])
return grid[-1][-1]
def test1():
grid = [[1,3,1],[1,5,1],[4,2,1]]
print(Solution().minPathSum(grid))
def test2():
grid = [[1,2,3],[4,5,6]]
print(Solution().minPathSum(grid))
if __name__ == '__main__':
test1()
test2() |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import multiprocessing
import time
import os
datalist=['+++']
# 我是
def adddata():
global datalist
datalist.append(1)
datalist.append(2)
datalist.append(3)
print("sub process",os.getpid(),datalist);
if __name__=="__main__":
p=multiprocessing.Process(target=adddata,args=())
p.start()
p.join()
datalist.append("a")
datalist.append("b")
datalist.append("c")
print("main process",os.getpid(),datalist)
|
print"hello world"
import sys
sys.path.append('C:\\Python27\\Scripts\\pip.exe')
pip install beautifulsoup4
|
# standard library imports
import logging
import json
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import images
import webapp2
from web.lib import utils
import cms_utils
from web.lib.basehandler import BaseHandler
from web.lib.decorators import role_required
from web.handlers.cms import cms_forms as forms
from models import Business, ContactInfo
from web.dao.dao_factory import DaoFactory
logger = logging.getLogger(__name__)
class ManageBusinessHandler(blobstore_handlers.BlobstoreUploadHandler, BaseHandler):
businessDao = DaoFactory.create_rw_businessDao()
@role_required('business')
def get(self, business_id=None):
params = {}
params['title'] = 'Create New Business'
upload_url = self.uri_for('create-business')
if business_id is not None and len(business_id) > 1:
upload_url = self.uri_for('edit-business', business_id = business_id)
business = self.businessDao.get_record(business_id)
params['title'] = 'Update - ' + str(business.name)
if business.logo:
params['current_logo'] = images.get_serving_url(business.logo)
self.form = cms_utils.dao_to_form_contact_info(business, forms.BusinessForm(self, business))
logger.debug('upload_url' + upload_url)
params['media_upload_url'] = blobstore.create_upload_url(upload_url)
return self.render_template('/cms/create_business.html', **params)
@role_required('business')
def post(self, business_id=None):
params = {}
if not self.form.validate():
if business_id is not None and len(business_id) > 1:
return self.get(business_id)
else:
return self.get()
business = self.form_to_dao(business_id)
upload_files = self.get_uploads('logo') # 'logo' is file upload field in the form
if upload_files is not None and len(upload_files) > 0:
blob_info = upload_files[0]
business.logo = blob_info.key()
logger.info('Link to logo ' + images.get_serving_url(business.logo))
logger.debug('business populated ' + str(business))
key = self.businessDao.persist(business, self.user_info)
logger.debug('key ' + str(key))
if key is not None:
logger.info('Business succesfully created/updated')
message = ('Business succesfully created/updated.')
self.add_message(message, 'success')
return self.redirect_to('dashboard', **params)
else:
logger.error('business creation failed')
message = ('Business creation failed.')
self.add_message(message, 'error')
self.form = forms.BusinessForm(self, business)
return self.render_template('/cms/create_business.html', **params)
@webapp2.cached_property
def form(self):
return forms.BusinessForm(self)
def form_to_dao(self, business_id):
business = None
if business_id is not None and len(business_id) > 1:
business = self.businessDao.get_record(long(business_id))
logger.debug('business ' + str(business))
else:
business = Business()
logger.debug('business 2 ' + str(business))
business.name = self.form.name.data
#Create an automatic alias for the business
business.alias = utils.slugify(self.form.name.data)
business.description = self.form.description.data
return cms_utils.form_to_dao_contact_info(self.form, business) |
# Write a Python program to access and print a URL's content to the console.
from http.client import HTTPConnection
class contentURL:
def consoleURL(self):
conn=HTTPConnection("example.com")
conn.request("GET","/")
result=conn.getresponse()
contents=result.read()
print(contents)
obj=contentURL()
obj.consoleURL() |
from django.shortcuts import get_object_or_404
from rest_framework import generics, status
from rest_framework.response import Response
from django.forms.models import model_to_dict
from rest_framework.views import APIView
from .models import AbilityScore, Skill, Spell, MagicSchool, SpellCastingClass
from .serializers import AbilityScoreListSerializer, AbilityScoreSerializer, SkillListSerializer, SkillSerializer, SpellsListSerializer, SpellsSerializer
class AbilityScoreList(generics.ListCreateAPIView):
queryset = AbilityScore.objects.all()
serializer_class = AbilityScoreSerializer
class SkillList(generics.ListCreateAPIView):
queryset = Skill.objects.all()
serializer_class = SkillSerializer
def post(self, request, *args, **kwargs):
data = request.data
ability_score = get_attribute_by_name(data, 'ability_score', AbilityScore)
skill = SkillSerializer(data=data)
if skill.is_valid():
skill_object = skill.save()
skill_object.ability_score = ability_score
skill_object = skill.save()
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
return Response(model_to_dict(skill_object), status.HTTP_200_OK)
class GetAbilityScore(APIView):
def get(self, request, name_or_id):
if name_or_id.isdigit():
queryset = get_object_or_404(AbilityScore, id=int(name_or_id))
else:
queryset = get_object_or_404(AbilityScore, name__iexact=name_or_id)
return Response(model_to_dict(queryset), status.HTTP_200_OK)
class GetSkill(APIView):
def get(self, request, name_or_id):
if name_or_id.isdigit():
queryset = get_object_or_404(Skill, id=int(name_or_id))
else:
queryset = get_object_or_404(Skill, name__iexact=name_or_id)
skill = model_to_dict(queryset)
ability_score = model_to_dict(get_object_or_404(AbilityScore, id=skill['ability_score']))
skill['ability_score'] = ability_score
return Response(skill, status.HTTP_200_OK)
class SpellList(generics.ListCreateAPIView):
queryset = Spell.objects.all()
serializer_class = SpellsListSerializer
def post(self, request, *args, **kwargs):
data = request.data
magic_school = get_attribute_by_name(data, 'school', MagicSchool)
spell = SpellsSerializer(data=data)
if spell.is_valid():
spell_object = spell.save()
spell_object.school = magic_school
spell_object = spell.save()
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
return Response(model_to_dict(spell_object), status.HTTP_200_OK)
class GetSpell(APIView):
def get(self, request, name_or_id):
if name_or_id.isdigit():
queryset = get_object_or_404(Spell, id=int(name_or_id))
else:
queryset = get_object_or_404(Spell, name__iexact=name_or_id)
spell = model_to_dict(queryset)
magic_school = model_to_dict(get_object_or_404(MagicSchool, id=spell['school']))
spell['school'] = magic_school
return Response(spell, status.HTTP_200_OK)
class SpellCastingList(generics.ListCreateAPIView):
queryset = SpellCastingClass.objects.all()
serializer_class = SpellsListSerializer
class GetSpellCasting(APIView):
def get(self, request, name_or_id):
if name_or_id.isdigit():
queryset = get_object_or_404(SpellCastingClass, id=int(name_or_id))
else:
queryset = get_object_or_404(SpellCastingClass, name__iexact=name_or_id)
spell_casting = model_to_dict(queryset)
ability_score = model_to_dict(get_object_or_404(AbilityScore, id=spell_casting['ability_score']))
spell_casting['ability_score'] = ability_score
return Response(spell_casting, status.HTTP_200_OK)
def get_attribute_by_name(data, attribute_name, model_type):
attribute_value = data.pop(attribute_name)
attribute_to_get = attribute_value['name']
attribute_model = get_object_or_404(model_type, name__iexact=attribute_to_get)
return attribute_model
|
# Generated by Django 3.1.6 on 2021-02-24 10:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contact', '0013_head'),
]
operations = [
migrations.AddField(
model_name='contact',
name='adress',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='Юр. адрес'),
),
migrations.AddField(
model_name='contact',
name='ipn',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='ІПН'),
),
migrations.AddField(
model_name='contact',
name='name',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='Назва пiдприємства'),
),
migrations.AddField(
model_name='contact',
name='r_r',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='р/р'),
),
migrations.AlterField(
model_name='contact',
name='date',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='contact',
name='edrpo',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='ЄДРПОУ'),
),
migrations.AlterField(
model_name='contact',
name='email',
field=models.EmailField(blank=True, max_length=150, null=True, verbose_name='e-mail'),
),
migrations.AlterField(
model_name='contact',
name='fio',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='ПIБ'),
),
migrations.AlterField(
model_name='contact',
name='login',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='Логин'),
),
migrations.AlterField(
model_name='contact',
name='message',
field=models.TextField(blank=True, null=True, verbose_name='Додаткова iнформацiя про участника'),
),
migrations.AlterField(
model_name='contact',
name='password',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='Пароль'),
),
migrations.AlterField(
model_name='contact',
name='phone',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='Телефон'),
),
]
|
import json
from urllib.parse import urlencode # py2
import pytest
import responses
from dhis2 import exceptions
from dhis2.api import Api
from .common import BASEURL, API_URL
@pytest.fixture # BASE FIXTURE
def api():
return Api(BASEURL, "admin", "district")
@pytest.fixture # BASE FIXTURE
def api_with_api_version():
return Api(BASEURL, "admin", "district", api_version=30)
# ------------------
# GENERAL API STUFF
# ------------------
@responses.activate
def test_post(api):
url = "{}/metadata".format(API_URL)
p = {"obj": "some data"}
responses.add(responses.POST, url, json=p, status=201)
api.post(endpoint="metadata", data=p)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == url
@responses.activate
def test_put(api):
url = "{}/organisationUnits/uid".format(API_URL)
p = {"obj": "some data"}
responses.add(responses.PUT, url, json=p, status=200)
api.put(endpoint="organisationUnits/uid", data=p)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == url
@responses.activate
def test_patch(api):
url = "{}/organisationUnits/uid".format(API_URL)
p = {"obj": "some data"}
responses.add(responses.PATCH, url, json=p, status=200)
api.patch(endpoint="organisationUnits/uid", data=p)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == url
@responses.activate
def test_delete(api):
url = "{}/organisationUnits/uid?a=b".format(API_URL)
p = {"obj": "some data"}
responses.add(responses.DELETE, url, json=p, status=200)
api.delete(endpoint="organisationUnits/uid", json=p, params={"a": "b"})
assert len(responses.calls) == 1
assert responses.calls[0].request.url == url
@responses.activate
def test_info(api):
url = "{}/system/info.json".format(API_URL)
r = {"contextPath": "https://play.dhis2.org/2.30"}
responses.add(responses.GET, url, json=r, status=200)
prop = api.info
assert prop == r
assert len(responses.calls) == 1
assert responses.calls[0].request.url == url
assert responses.calls[0].response.text == json.dumps(r)
@pytest.mark.parametrize(
"status_code",
[
400,
401,
402,
403,
404,
405,
406,
407,
408,
409,
410,
411,
412,
413,
414,
415,
416,
417,
418,
421,
422,
423,
424,
426,
428,
429,
431,
451,
444,
494,
495,
496,
497,
499,
500,
501,
502,
503,
504,
505,
506,
507,
508,
510,
511,
],
)
@responses.activate
def test_client_server_errors(api, status_code):
url = "{}/dataElements/foo.json".format(API_URL)
responses.add(responses.GET, url, body="something failed", status=status_code)
with pytest.raises(exceptions.RequestException) as e:
api.get(endpoint="dataElements/foo")
assert e.value.code == status_code
assert e.value.url == url
assert e.value.description == "something failed"
assert str(e.value)
assert repr(e.value)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == url
# ------------------
# PRE-REQUEST VALIDATION
# ------------------
@pytest.mark.parametrize(
"endpoint", ["", " ", None, [], {"endpoint": "organisationUnits"}]
)
def test_requests_invalid_endpoint(api, endpoint):
with pytest.raises(exceptions.ClientException):
api.get(endpoint)
@pytest.mark.parametrize("endpoint", ["organisationUnits", "schemas", u"schemas"])
@responses.activate
def test_requests_valid_endpoint(api, endpoint):
url = "{}/{}.json".format(API_URL, endpoint)
r = {"version": "unknown"}
responses.add(responses.GET, url, json=r, status=200)
api.get(endpoint)
assert endpoint in responses.calls[0].request.url
@pytest.mark.parametrize("file_type", [".hello", "", " ", u"EXCEL"])
def test_requests_invalid_file_type(api, file_type):
with pytest.raises(exceptions.ClientException):
api.get("organisationUnits", file_type=file_type)
@pytest.mark.parametrize("file_type", ["csv", "CSV", "JSON", "xml", "pdf"])
@responses.activate
def test_requests_valid_file_type(api, file_type):
endpoint = "dataElements"
url = "{}/{}.{}".format(API_URL, endpoint, file_type.lower())
responses.add(responses.GET, url, status=200)
api.get(endpoint, file_type=file_type)
assert "{}.{}".format(endpoint, file_type.lower()) in responses.calls[0].request.url
@pytest.mark.parametrize("params", ['{ "hello": "yes" }', (1, 2)])
def test_requests_invalid_params_tuples(api, params):
with pytest.raises(exceptions.ClientException):
api.get("organisationUnits", params=params)
@pytest.mark.parametrize(
"params", [dict(), {"data": "something"}, [("data", "something")]]
)
@responses.activate
def test_requests_valid_params(api, params):
endpoint = "dataElements"
url = "{}/{}.json".format(API_URL, endpoint)
responses.add(responses.GET, url, status=200)
api.get(endpoint, params=params)
param_string = urlencode(params)
assert param_string in responses.calls[0].request.url
@pytest.mark.parametrize(
"params",
[
("paging", False), # must be list
[("paging", False), 3], # must be list of tuples
],
)
def test_requests_invalid_params_list_of_tuples(api, params):
with pytest.raises(exceptions.ClientException):
api.get("organisationUnits", params=params)
@pytest.mark.parametrize("data", ['{ "hello": "yes" }', (1, 2)])
def test_requests_invalid_data(api, data):
with pytest.raises(exceptions.ClientException):
api.post("organisationUnits", data=data)
@pytest.mark.parametrize("data", [{"data": "something"}])
@responses.activate
def test_requests_valid_data(api, data):
endpoint = "dataElements"
url = "{}/{}".format(API_URL, endpoint)
responses.add(responses.POST, url, json=data, status=204)
api.post(endpoint, data=data)
def test_invalid_http_method(api):
with pytest.raises(exceptions.ClientException):
api._make_request("update", "dataElements")
@responses.activate
def test_json_arg_valid(api):
endpoint = "dataElements"
url = "{}/{}".format(API_URL, endpoint)
data = {"data": "something"}
responses.add(responses.POST, url, json=data, status=204)
api.post(endpoint, data=data)
api.post(endpoint, json=data)
@responses.activate
def test_kwargs(api):
endpoint = "dataElements"
url = "{}/{}.json".format(API_URL, endpoint)
responses.add(responses.GET, url, status=200)
api.get(endpoint, timeout=1)
assert len(responses.calls) == 1
|
def double_char(s):
s=list(s)
for x in range(len(s)):
s[x]=s[x]*2
return ''.join(s)
|
import datetime
from django.test import TestCase, LiveServerTestCase
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import ValidationError
from .models import ProjectDb
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class ProjectViewsTests(LiveServerTestCase):
def setUp(self):
""" Make some users to be project owners. """
self.u1 = User.objects.create_user('u1', 'u1@example.com', 'u1')
self.u2 = User.objects.create_user('u2', 'u2@example.com', 'u2')
def test_thing(self):
return
driver = webdriver.Firefox()
# driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://docs.djangoproject.com/en/1.11/")
search_box = driver.find_element_by_name("q")
search_box.send_keys("testing")
search_box.send_keys(Keys.RETURN)
assert "Search" in driver.title
# Locate first result in page using css selectors.
result = driver.find_element_by_css_selector("div#search-results a")
result.click()
assert "testing" in driver.title.lower()
driver.quit()
def test_something(self):
""" Slug for only project for user is not changed. """
p = ProjectDb()
p.user = self.u1
p.title = "This is a title"
p.save()
# self.assertEqual(p.slug, "this-is-a-title")
# self.assertFalse(p.slug_changed)
def test_something_else(self):
""" Project must have a user. """
with self.assertRaises(ObjectDoesNotExist):
p = ProjectDb()
p.title = "DOG"
p.save()
|
from collections import Counter
with open("alice_in.txt", 'r') as file:
alice = file.readlines() # list of all of our lines
alice = alice[32:3371]
alice = "".join(alice)
alice = alice.split(" ")
l = []
for word in alice:
if len(word) > 0:
l.append("".join(filter(str.isalpha, word)))
print("Number of words: " + str(len(l)))
print("Number of paragraphs: ?")
print("Twenty most common words: ")
# Counter makes an ordered dictionary
twenty = Counter(l).most_common(20)
for i in range(len(twenty)):
if twenty[i] == twenty[-1]:
print(str(twenty[i][0]))
else:
print(str(twenty[i][0]) + ", ", end="")
|
import itertools
N, K = map(int, input().split())
T = [list(map(int, input().split())) for i in range(N)]
ls = []
seq = [i for i in range(N)]
for p in list(itertools.permutations(seq)):
if p[0]!=0:continue
tmp = 0
st = p[0]
ed = p[-1]
for i in range(len(p) - 1):
tmp += T[p[i]][p[i + 1]]
tmp += T[ed][st]
ls.append(tmp)
cnt = 0
for l in ls:
if l == K: cnt += 1
print(cnt) |
import nltk
import sys
import re
'''is used to add the coref label to the dataset'''
entityIdx = {'[':'1',']':'1','(':'2',')':'2','{':'3','}':'3'}
lines = open(sys.argv[1]).readlines()
#lines = ["[The carpenter] is complaining to the cashier becasue [he] was over-charged.","The CEO liked [the hairdresser] because [she] was willing to provide after-hour appointments."]
sents = []
annotations = []
for line in lines:
line = re.sub('^\s*[0-9]+\s*', '', line)
tokens = nltk.word_tokenize(line)
print(tokens)
annotation = []
sent = []
buffer = []
flag = 0
for idx in range(0,len(tokens)):
word = tokens[idx]
if word == '.':
sent.append('.')
annotation.append('-')
break
if word in ['[', '(', '{']:
buffer.append('('+entityIdx[word])
if word == '(' or word == ')': #as we only use "[" and "]" to annotate the coref chain, this is used for our debugging. Change this as needed.
sys.exit()
elif word in [']', ')', '}']:
if flag == 1:
buffer[-1] += ')'
flag = 0
else:
buffer.append(entityIdx[word]+')')
annotation.append('|'.join(buffer))
buffer = []
else:
sent.append(word)
if tokens[idx+1] not in [']',')','}']:
if idx > 0 and (tokens[idx-1] in ['[','(','{']):
annotation.append('|'.join(buffer))
buffer = []
else:
annotation.append('-')
else:
if idx > 0 and (tokens[idx-1] in ['[','(','{']):
flag = 1
# print tokens
# print line
print sent
print annotation
annotations.append(annotation)
sents.append(sent)
#sys.exit()
for i in range(len(annotations)):
print(len(annotations[i]))
print(annotations[i])
with open(sys.argv[2]+str(i), 'r') as f:
doc = []
lines = f.readlines()
print(len(lines)-3)
for line_id in range(len(lines)):
words = lines[line_id].strip().split()
if line_id == 0 or line_id == len(lines)-1 :
doc.append(lines[line_id])
continue
if len(words) == 0:
doc.append('\n')
continue
print(words)
words[9] = 'Speaker#1'
words.insert(-2, '*')
words.insert(-2, '*')
words.insert(-2, '*')
print(line_id)
words[-1] = annotations[i][line_id-1]
doc.append('\t'.join(words)+'\n')
with open(sys.argv[3]+str(i)+".v4_auto_conll", 'w') as f:
for sen in doc:
f.write(sen)
|
total = 0
with open('./input.txt') as f:
for l in f.readlines():
a, l = l.split('-')
b, c, d = l.split(' ')
a = int(a)
b = int(b)
c = c[:-1]
count = sum(1 if x == c else 0 for x in d)
if count >= a and count <= b:
total += 1
print(total) |
#Embedded file name: eve/client/script/ui/services/mail\mailingListsSvc.py
import service
import util
class MailingLists(service.Service):
__exportedcalls__ = {'GetDisplayName': [],
'GetMyMailingLists': [],
'CreateMailingList': [],
'JoinMailingList': [],
'LeaveMaillist': [],
'DeleteMaillist': [],
'GetMembers': [],
'KickMembers': [],
'SetEntityAccess': [],
'ClearEntityAccess': [],
'SetMembersMuted': [],
'SetMembersOperator': [],
'SetMembersClear': [],
'SetDefaultAccess': [],
'GetSettings': [],
'GetWelcomeMail': [],
'SaveWelcomeMail': [],
'SaveAndSendWelcomeMail': [],
'ClearWelcomeMail': []}
__guid__ = 'svc.mailinglists'
__servicename__ = 'mailinglists'
__displayname__ = 'Mailing Lists'
__notifyevents__ = ['OnMailingListSetOperator',
'OnMailingListSetMuted',
'OnMailingListSetClear',
'OnMailingListLeave',
'OnMailingListDeleted']
def __init__(self):
service.Service.__init__(self)
self.myMailingLists = None
def Run(self, memStream = None):
self.state = service.SERVICE_START_PENDING
self.LogInfo('Starting Mailing Lists Svc')
self.objectCaching = sm.services['objectCaching']
self.mailingListsMgr = sm.RemoteSvc('mailingListsMgr')
self.myMailingLists = self.mailingListsMgr.GetJoinedLists()
self.externalLists = {}
self.state = service.SERVICE_RUNNING
def GetMyMailingLists(self):
"""
Get the mailing lists that we have joined. The result is a dict of keyvals
with the listID as the dict key and the keyval with the following entries:
* name
* displayName
* isMuted
* isOperator
* isOwner
"""
return self.myMailingLists
def GetDisplayName(self, listID):
"""
Get the name of a specified mailing list, this can be done also for lists
that you don't belong to.
"""
if listID in self.myMailingLists:
return self.myMailingLists[listID].displayName
if listID in self.externalLists:
return self.externalLists[listID].displayName
info = self.mailingListsMgr.GetInfo(listID)
if info is None:
raise UserError('MailingListNoSuchList')
self.externalLists[listID] = info
return info.displayName
def CreateMailingList(self, name, defaultAccess = const.mailingListAllowed, defaultMemberAccess = const.mailingListMemberDefault, cost = 0):
"""
Create a new mailing list. Raises a user error if creation fails, e.g. if
the name is taken.
"""
ret = sm.RemoteSvc('mailingListsMgr').Create(name, defaultAccess, defaultMemberAccess, cost)
key, displayName = util.GetKeyAndNormalize(name)
self.myMailingLists[ret] = util.KeyVal(name=key, displayName=displayName, isMuted=False, isOperator=False, isOwner=True)
sm.ScatterEvent('OnMyMaillistChanged')
return ret
def JoinMailingList(self, name):
"""
Join the specified mailing list. Raises a user error if the list doesn't exist.
"""
ret = self.mailingListsMgr.Join(name)
self.myMailingLists[ret.id] = ret
sm.ScatterEvent('OnMyMaillistChanged')
return ret.id
def LeaveMaillist(self, listID):
"""
Leave the specified mailing list
"""
self.mailingListsMgr.Leave(listID)
try:
del self.myMailingLists[listID]
except KeyError:
pass
sm.ScatterEvent('OnMyMaillistChanged')
def DeleteMaillist(self, listID):
"""
Delete the specified mailing list. Only the owner of the list can do this.
"""
self.mailingListsMgr.Delete(listID)
try:
del self.myMailingLists[listID]
except KeyError:
pass
sm.ScatterEvent('OnMyMaillistChanged')
def KickMembers(self, listID, memberIDs):
"""
Kicks the specified members from the list. Only an operator can do this.
"""
self.mailingListsMgr.KickMembers(listID, memberIDs)
self.objectCaching.InvalidateCachedMethodCall('mailingListsMgr', 'GetMembers', listID)
def GetMembers(self, listID):
"""
Gets all members of the list. Only an operator can do this.
"""
members = self.mailingListsMgr.GetMembers(listID)
sm.GetService('mailSvc').PrimeOwners(members.keys())
return members
def SetEntityAccess(self, listID, entityID, access):
"""
Set mailing list access to const.mailingListBlocked or const.mailingListAllowed for
the specified entity (char, corp or alliance)
This can only be done by an operator
"""
self.mailingListsMgr.SetEntityAccess(listID, entityID, access)
def ClearEntityAccess(self, listID, entityID):
"""
Clear access setting (blocked/allowed) for the specified entity and the
given mailing list
This can only be done by an operator
"""
self.mailingListsMgr.ClearEntityAccess(listID, entityID)
def SetMembersMuted(self, listID, memberIDs):
"""
Set mailing list access to muted for the specified members.
This can only be done by an operator
"""
self.mailingListsMgr.SetMembersMuted(listID, memberIDs)
self.objectCaching.InvalidateCachedMethodCall('mailingListsMgr', 'GetMembers', listID)
def SetMembersOperator(self, listID, memberIDs):
"""
Set mailing list access to operator for the specified members.
This can only be done by an operator
"""
self.mailingListsMgr.SetMembersOperator(listID, memberIDs)
self.objectCaching.InvalidateCachedMethodCall('mailingListsMgr', 'GetMembers', listID)
def SetMembersClear(self, listID, memberIDs):
"""
Clear mailing list access (operator/muted) for the specified members.
This can only be done by an operator
"""
self.mailingListsMgr.SetMembersClear(listID, memberIDs)
self.objectCaching.InvalidateCachedMethodCall('mailingListsMgr', 'GetMembers', listID)
def SetDefaultAccess(self, listID, defaultAccess, defaultMemberAccess, mailCost = 0):
"""
Set default mailing list access to const.mailingListBlocked or const.mailingListAllowed and
defaultMemberAccess to const.mailingListMemberMuted, const.mailingListMemberDefault or
const.mailingListMemberOperator. Also set charge if any.
This can only be done by an operator
"""
self.mailingListsMgr.SetDefaultAccess(listID, defaultAccess, defaultMemberAccess, mailCost)
def GetSettings(self, listID):
"""
Gets cost and access settings for the list
"""
return self.mailingListsMgr.GetSettings(listID)
def OnMailingListSetOperator(self, listID):
"""
You have been set as operator on the list
"""
if listID in self.myMailingLists:
self.myMailingLists[listID].isOperator = True
self.myMailingLists[listID].isMuted = False
def OnMailingListSetMuted(self, listID):
"""
You have been muted on the list
"""
if listID in self.myMailingLists:
self.myMailingLists[listID].isMuted = True
self.myMailingLists[listID].isOperator = False
def OnMailingListSetClear(self, listID):
"""
You have been unmuted and unset as operator on the list
"""
if listID in self.myMailingLists:
self.myMailingLists[listID].isMuted = False
self.myMailingLists[listID].isOperator = False
def OnMailingListLeave(self, listID, characterID):
"""
Notify when a character leaves a mailing list (is kicked out)
"""
if characterID == session.charid and listID in self.myMailingLists:
try:
del self.myMailingLists[listID]
except KeyError:
pass
sm.ScatterEvent('OnMyMaillistChanged')
def OnMailingListDeleted(self, listID):
"""
Notify when a list is deleted
"""
if listID in self.myMailingLists:
try:
del self.myMailingLists[listID]
except KeyError:
pass
sm.ScatterEvent('OnMyMaillistChanged')
def GetWelcomeMail(self, listID):
return self.mailingListsMgr.GetWelcomeMail(listID)
def SaveWelcomeMail(self, listID, title, body):
return self.mailingListsMgr.SaveWelcomeMail(listID, title, body)
def SaveAndSendWelcomeMail(self, listID, title, body):
return self.mailingListsMgr.SendWelcomeMail(listID, title, body)
def ClearWelcomeMail(self, listID):
self.mailingListsMgr.ClearWelcomeMail(listID)
|
from astropy.io import fits
from decimal import Decimal
def write_txt(smooth_wavelength, smooth_flux, star_name, number_of_variables, wavelength, flux):
L = len(wavelength)
def form_e(q):
a = '%E' % q
return a.split('E')[0].rstrip('0').rstrip('0').rstrip('.')+'E'+a.split('E')[1]
text_file = input("Please name the text file that the data will be written to (for plain text file): ")
with open(text_file, 'w') as f:
f.write("#This is wavelength and flux data for the star: " +str(star_name) +"\n")
f.write(" Wavelength Flux \n")
for i in range (0, number_of_variables):
f.write("%6s" % (""))
f.write("%8s" % (form_e(smooth_wavelength[i])))
f.write("%20s" % (form_e(smooth_flux[i])))
f.write("\n")
with open('raw_data.txt', 'w') as f:
f.write("#Raw data of wavelength and flux data for the star: " +str(star_name) +"\n")
f.write(" Wavelength Flux \n")
for i in range (0, L):
f.write("%6s" % (""))
f.write("%8s" % (form_e(wavelength[i])))
f.write("%20s" % (form_e(flux[i])))
f.write("\n")
|
import numpy as np
field = 341
field = 340
field = 342
field = 344
field = 345
field = 347
field = 346
telescope = 'TS'
telescope = 'TN'
night = '20171127'
night = '20171202'
cut1 = 0
cut2 = 47145
cut1 = 47144
cut2 = 119110
#------------------------------------------------------------------------------
if telescope == 'TS':
folder = "E:/trappist/pho/" # win TS folder
folder = "/media/marin/TRAPPIST3/trappist/pho/" # TS folder
if telescope == 'TN':
folder = "E:/troppist/pho/" # win TS folder
folder = "/media/marin/TRAPPIST3/troppist/pho/" # TN folder
folder += str(field)
fname = '/h.runout1Crop'
data = np.loadtxt(folder+fname)[cut1:cut2-1]
print(data.shape)
fname += '_' + str(field) + '_' + str(night)
np.savetxt(folder + fname, data, fmt=['%10.0f','%10.4f','%10.4f','%10.4f'])
print('File saved with name' + fname )
|
import Level
import random
import Ball
import Player
import Constants
# Stores game variables
class GlobalStore:
def __init__(self):
# Player's lives.
self.lives = 5
# Score of the player during game and the player's highscore (read from file).
self.score = 0
with open("assets/highscore.txt", "r") as file:
self.highscore = int(file.read())
# Whether the game is in menu or in a level.
self.gameState = "Menu"
# Index of the current level.
self.currentLevel = 0
# List containing all levels.
self.levels = []
# Is the game paused
self.paused = False
self.define_levels()
def define_levels(self):
# All the levels.
self.levels = []
self.levels.append(Level.Level([Ball.Ball(1, [2, 1], None, (255, 200, 25), [100, 400])],
Player.Player(650), 780, 1500, (50, 50, 0)))
self.levels.append(Level.Level([Ball.Ball(2, [2, 1], None, (0, 255, 255), [100, 300])],
Player.Player(650), 780, 2000, (50, 50, 50)))
self.levels.append(Level.Level([Ball.Ball(3, [2, 1], None, (255, 55, 35), [100, 250])],
Player.Player(650), 780, 3000, Constants.MENU_IMAGE))
self.levels.append(Level.Level([Ball.Ball(2, [-2, 1], None, (255, 75, 0), [100, 250]),
Ball.Ball(2, [2, 1], None, (255, 75, 0), [1200, 250])],
Player.Player(650), 780, 4000, (50, 50, 0)))
self.levels.append(Level.Level([Ball.Ball(0, [2, 1], None, (0, 255, 255), [50, 100]),
Ball.Ball(0, [2, 1], None, (255, 0, 255), [90, 100]),
Ball.Ball(0, [2, 1], None, (255, 255, 0), [130, 100]),
Ball.Ball(0, [-2, 1], None, (255, 255, 0), [1170, 100]),
Ball.Ball(0, [-2, 1], None, (255, 0, 255), [1210, 100]),
Ball.Ball(0, [-2, 1], None, (0, 255, 255), [1250, 100]),
Ball.Ball(0, [2, 1], None, (255, 255, 0), [300, 100]),
Ball.Ball(0, [2, 1], None, (255, 0, 255), [340, 100]),
Ball.Ball(0, [2, 1], None, (0, 255, 255), [380, 100]),
Ball.Ball(0, [-2, 1], None, (255, 255, 0), [1000, 100]),
Ball.Ball(0, [-2, 1], None, (255, 0, 255), [960, 100]),
Ball.Ball(0, [-2, 1], None, (0, 255, 255), [920, 100])],
Player.Player(650), 350, 1000, (50, 50, 0)))
self.levels.append(Level.Level([Ball.Ball(3, [2, 1], None, (255, 255, 255), [100, 300]),
Ball.Ball(3, [-2, 1], -11, (0, 255, 255), [1200, 300])],
Player.Player(650), 780, 5000, (50, 50, 0)))
self.levels.append(Level.Level([Ball.Ball(3, [4, 1], None, (255, 255, 0), [200, 300])],
Player.Player(650), 780, 4000, (50, 50, 0)))
self.levels.append(Level.Level([Ball.Ball(0, [2, 1], None, (0, 255, 255), [50, 700]),
Ball.Ball(0, [2, 1], None, (255, 0, 255), [90, 700]),
Ball.Ball(0, [2, 1], None, (255, 255, 0), [130, 700]),
Ball.Ball(0, [-2, 1], None, (255, 255, 0), [1170, 700]),
Ball.Ball(0, [-2, 1], None, (255, 0, 255), [1210, 700]),
Ball.Ball(0, [-2, 1], None, (0, 255, 255), [1250, 700]),
Ball.Ball(0, [2, 1], None, (255, 255, 0), [300, 700]),
Ball.Ball(0, [2, 1], None, (255, 0, 255), [340, 700]),
Ball.Ball(0, [2, 1], None, (0, 255, 255), [380, 700]),
Ball.Ball(0, [-2, 1], None, (255, 255, 0), [1000, 700]),
Ball.Ball(0, [-2, 1], None, (255, 0, 255), [960, 700]),
Ball.Ball(0, [-2, 1], None, (0, 255, 255), [920, 700])],
Player.Player(650), 400, 1000, (50, 50, 0)))
self.levels.append(Level.Level([Ball.Ball(2, [2, 1], None, (255, 255, 255), [100, 300]),
Ball.Ball(2, [2, 1], None, (0, 255, 0), [200, 350]),
Ball.Ball(2, [2, 1], None, (255, 255, 0), [300, 400]),
Ball.Ball(2, [2, 1], None, (0, 0, 255), [400, 450]),
Ball.Ball(2, [2, 1], None, (255, 255, 255), [500, 500])],
Player.Player(650), 780, 5000, (50, 50, 0)))
self.levels.append(Level.Level([Ball.Ball(4, [0, -3], None, (255, 0, 0), [400, 300], [2]),
Ball.Ball(3, [2, 1], None, (50, 255, 50), [80, 150]),
Ball.Ball(4, [0, -3], None, (255, 0, 0), [900, 300], [2])],
Player.Player(650), 780, 5000, (50, 50, 0)))
self.levels.append(Level.Level([Ball.Ball(4, [4, 1], None, (255, 255, 0), [200, 300]),
Ball.Ball(4, [-4, 1], None, (0, 255, 255), [1200, 300]),
Ball.Ball(4, [4, 1], None, (255, 255, 0), [700, 300]),
Ball.Ball(4, [-4, 1], None, (0, 255, 255), [500, 300])],
Player.Player(650), 780, 7000, (50, 50, 0)))
self.levels.append(Level.Level([Ball.Ball(random.randint(1, 6), [random.randint(-6, 6), random.randint(-3, 0)],
random.randint(-8, -3), (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),
[random.randint(100, 900), random.randint(100, 200)]) for num in range(1)], Player.Player(200), 700, 6000, (50, 50, 0)))
self.levels.append(Level.Level([Ball.Ball(random.randint(1, 6), [random.randint(-6, 6), random.randint(-3, 0)],
random.randint(-8, -3), (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),
[random.randint(100, 900), random.randint(100, 200)]) for num in range(2)], Player.Player(200), 700, 6000, (50, 50, 0)))
|
from itertools import count
from itertools import combinations
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib import style
import random
import pandas as pd
import numpy as np
from FuncAni_2 import RST_Parameter_Calc
y_vals = []
# accepting file_path
flag = False
while not flag:
file_path = input("Please enter the complete correct file path of the dataset: \n")
ch = (input("Press 'Y' to confirm \nPress 'N' to enter file path again \n")).upper()
if ch == 'Y':
flag = True
elif ch == 'N':
flag = False
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
def animate(i):
# df = pd.read_csv(file_path, index_col=0, delimiter = ' ') # reading csv file data as a data frame variable
df = pd.read_csv(file_path) # reading csv file data as a data frame variable
# print(df)
# print()
index = df.index
columns = df.columns
values = df.values
num_col = df.shape[1]
num_row = df.shape[0]
obj_elem_set = set() # stores serial number of each object as set elements
for i in range(1, df.shape[0]+1):
obj_elem_set.add(i)
# print(obj_elem_set)
# print()
# storage variables used
dict_col = {} # stores cardinal nos. of element values column-wise as well as unique data-wise
elemen_list = [] # stores elementary list numbers of conditional attributes
cris_list = [] # stores crisp set numbers of decision attribute
list_col = [] # stores names of columns
list_combi = [] # stores combinations of columns as tuples
elem_indiscern_2_list = [] # stores List of all indiscernible combinations taking double conditional attributes
elem_list = [] # stores elementary set numbers of conditional attributes
dict_low = {} # stores lower bound set
dict_upp = {} # stores upper bound set
dict_accu = {} # stores accuracy of parameters
dict_SI = {} # stores stability index of parameters
dict_boun = {} # stores boundary region of each combinations
dict_out = {} # stores outside region of each combinations
elem_dict = {} # stores elementary list and dict numbers of conditional attributes
elemen_dict = {} # stores elementary dict numbers of conditional attributes
dict_indiscern_2 = {} # stores elementary list for double conditional attributes
for column in columns: # stores names of columns
list_col.append(column)
list_col.pop() # don't include decision attribute (last column)
len_combi = num_col - 1
col_combi = combinations(list_col, len_combi) # stores combinations of columns taken 'len_combi' at a time
list_combi = list(col_combi) # stores combinations of columns as tuples
# print(list_combi)
# PandasAgeWalkFunc class object created
obj_item = RST_Parameter_Calc(df)
# obtain complete serial numbers of all unique conditional and decision attributes as a dictionary
dict_col = obj_item.col_item_split()
# print(dict_col)
# obtain elementary set and crisp set
elem_dict = obj_item.elem_list(dict_col)
elemen_list = elem_dict['Elem List']
cris_list = elemen_list.pop()
elemen_dict = elem_dict['Elem Dict']
rem_key = columns[-1]
elemen_dict.pop(rem_key)
# print("Elementary List for Single-Conditional Attributes: \n" + str(elemen_list) + "\n")
# print("Crisp List: " + str(cris_list) + "\n")
# print("Elementary Dictionary: " + str(elemen_dict) + "\n")
# Returns elementary list for multiple conditional attributes
dict_indiscern_2 = obj_item.column_combinations(elemen_dict, list_combi)
# print("List of all indiscernible combinations taking multiple conditional attributes is as follows: ")
# print(str(dict_indiscern_2) + "\n")
for val in dict_indiscern_2.values():
elem_indiscern_2_list.append(val)
# print("Elementary List for Multi-Conditional Attributes: \n" + str(elem_indiscern_2_list) + "\n")
elem_list = elem_indiscern_2_list # for multiple conditional attributes
dec_items = sorted(list(set(df[columns[-1]].unique())))
len_dec = len(dec_items)
# print("Lower and Upper Approximations are given below: ")
for i in range(0, len_dec): # calculating the RST Parameters
dec_val = dec_items[i]
# print("RST LA, UA for Decision Attribute Value: " + str(dec_val))
dict_low = obj_item.low_approx(dec_val, dict_col, elem_list, list_combi) # obtain lower approximation
dict_upp = obj_item.upp_approx(dec_val, dict_col, elem_list, list_combi) # obtain upper approximation
# print("Lower Approximation: " + str(dict_low) + "\n")
# print("Upper Approximation: " + str(dict_upp) + "\n")
dict_accu[dec_val] = obj_item.get_accu(dict_low, dict_upp) # obtain accuracy parameter using accuracy = nLa/nUa
dict_SI[dec_val] = obj_item.get_SI(dict_low, dict_upp, len(obj_elem_set)) # obtain stability index(SI) parameter using SI = (n_la + n_ua + 1)/(n + 1) - 0.5
dict_boun[dec_val] = obj_item.get_boundary(dict_low, dict_upp) # get boundary region
dict_out[dec_val] = obj_item.get_outside_region(obj_elem_set, dict_upp) # get outside region
print("Accuracy of the parameters for each decision attribute is given below: ")
print(str(dict_accu) + "\n")
print("Stability Index(SI) of the parameters for each decision attribute is given below: ")
print(str(dict_SI) + "\n")
# print("Boundary region is: Upper Approx. - Lower Approx. = ")
# print(str(dict_boun) + "\n")
# print("Outside region is: Universal Set - Upper Approx: ")
# print(str(dict_out) + "\n")
# y_vals = []
# x_vals = 0
# for value in dict_accu.values():
# y_vals.append(value)
# print(y_vals)
# print()
# print(len(y_vals))
# x_vals= x_vals + 1
# ax1.clear()
# for i in range(0, len_dec):
# ax1.plot(x_vals, y_vals[i], label = dec_items[i])
# plt.scatter(ctr, y_vals[i], label='skitscat', color='k', s=25, marker="o")
# plt.legend(loc='upper right')
ani = FuncAnimation(fig, animate, interval=5000)
plt.show()
|
class Solution(object):
def orangesRotting(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
i need to do bfs and turn all of the 1's to 2's and each lvl of bfs
means i increase a minute
how do i know it is done though? I think i need to keep count of fresh
oranges and turned oranges
"""
que = []
total = 0
for row in range(len(grid)):
for col in range(len(grid[row])):
if grid[row][col] == 1:
total += 1
elif grid[row][col] == 2:
que.append([row,col])
mins = 0
newRotten = 0
if total == 0:
return 0
while que:
mins += 1
length = len(que)
for i in range(length):
curr = que.pop(0)
row,col = curr[0],curr[1]
#up down left right
if row-1 > -1 and grid[row-1][col] == 1:
que.append([row-1,col])
newRotten += 1
grid[row-1][col] = 2
if row+1 < len(grid) and grid[row+1][col] == 1:
que.append([row+1, col])
newRotten += 1
grid[row+1][col] = 2
if col-1 > -1 and grid[row][col-1] == 1:
que.append([row,col-1])
newRotten += 1
grid[row][col-1] = 2
if col+1 < len(grid[0]) and grid[row][col+1] == 1:
que.append([row, col+1])
newRotten += 1
grid[row][col+1] = 2
# print grid, newRotten
if newRotten == total:
return mins-1
else:
return -1 |
import os, sys
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import tensorflow as tf
import cv2
import numpy as np
sys.path.insert(1, os.path.join(sys.path[0], '/mywork/tensorflow-tuts/sd19reader'))
from batches2patches_tensorflow import GetFuncToPatches, GetFuncOverlapAdd
from myutils import describe
from vizutils import bw_grid_vis, color_grid_vis
from mypca import my_PCA_scikitlike as PCA
# load image
path2file = os.path.dirname(os.path.realpath(__file__))
inimg = os.path.join(path2file,'Lenna_noise1.png')
testim = cv2.imread(inimg).astype(np.float32) / 255.0
# will use "valid" conv, so pad 1 wide for 3x3 patches
padtest = np.pad(testim, [(1,1), (1,1), (0,0)], 'edge')
# get patching function for local windows
imshape = [int(ii) for ii in padtest.shape]
batchsize = 1
batchimshapefull = [batchsize,]+imshape
patchsize = 3
bordermode = 'valid'
pimshape = (imshape[0]-patchsize+1,imshape[1]-patchsize+1)
reconstrmode = 'full'
N_PCA_COMPS = 6
batchunpadtest = np.expand_dims(testim, 0)
batchtestims = padtest.reshape(batchimshapefull) # only one in batch, so resize the one
featswrtshape = [int(ii) for ii in batchunpadtest.shape]
featswrtshape[-1] = N_PCA_COMPS
patchtheanofunc = GetFuncToPatches(batchimshapefull, patchsize, border_mode=bordermode, filter_flip=False)
overlapaddfunc = GetFuncOverlapAdd(batchimshapefull, patchsize, pimshape, border_mode=reconstrmode, filter_flip=False)
#########################################
# bilateral filter
#tf_stdv_space = tf.get_variable('tf_stdv_space', initializer=tf.constant(1.0))
#tf_stdv_bilat = tf.get_variable('tf_stdv_bilat', initializer=tf.constant(1.0))
tf_placehold_img = tf.placeholder(tf.float32, batchunpadtest.shape, name="tf_placehold_img")
tf_placehold_wrt = tf.placeholder(tf.float32, featswrtshape, name="tf_placehold_wrt")
from test_utils import *
bilateral_filters = load_func_from_lib()
#########################################
# tensorflow sess init
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
outfold = 'test_patch_pca'
#########################################
# compute patch PCA
patches = patchtheanofunc(batchtestims)
print(" ")
describe("patches",patches)
flatpatches = patches.reshape((patches.shape[0]*patches.shape[1]*patches.shape[2], np.prod(patches.shape[3:])))
describe("flatpatches",flatpatches)
pca = PCA(n_components=N_PCA_COMPS, doplot=False).fit(flatpatches)
transfpatches = pca.transform(flatpatches)
reshtransfpatch = transfpatches.reshape((patches.shape[0], patches.shape[1], patches.shape[2], N_PCA_COMPS))
print(" ")
describe("transfpatches", transfpatches)
describe("reshtransfpatch", reshtransfpatch)
print(" ")
procpatches = pca.inverse_transform(transfpatches).reshape(patches.shape)
tehpidx = -1
for tehpatchs in [patches, procpatches]:
tehpidx += 1
FLPTCHS = tehpatchs.reshape((tehpatchs.shape[0], tehpatchs.shape[1]*tehpatchs.shape[2], np.prod(tehpatchs.shape[3:])))
#describe("FLPTCHS", FLPTCHS)
for jj in range(batchsize):
#describe("FLPTCHS[jj,...]", FLPTCHS[jj,...])
color_grid_vis(FLPTCHS[jj,...], savename=os.path.join(outfold,'pcacnn_FLPTCHS_'+str(tehpidx)+'_'+str(jj)+'.png'), flipbgr=True)
#quit()
#########################################
#define the function that's called every time one of the trackbars is moved
def updateWindow(xxx):
stdspace = float(cv2.getTrackbarPos('std_space*10','ImageWindow')) / 10.
stdcolor = float(cv2.getTrackbarPos('std_color*50','ImageWindow')) / 50.
stdspace = max(1e-3, stdspace)
stdcolor = max(1e-3, stdcolor)
#tf_stdv_space = tf.get_variable('tf_stdv_space', initializer=tf.constant(1.0))
#tf_stdv_bilat = tf.get_variable('tf_stdv_bilat', initializer=tf.constant(1.0))
#tf_placehold_img = tf.placeholder(tf.float32, batchimshapefull, name="tf_placehold_img")
#tf_placehold_wrt = tf.placeholder(tf.float32, featswrtshape, name="tf_placehold_wrt")
ret = bilateral_filters(NHWC_to_NCHW(tf_placehold_img),
NHWC_to_NCHW(tf_placehold_wrt),
stdspace, stdcolor)
outbilNCHW = ret
outbilat = NCHW_to_NHWC(outbilNCHW)
tfret = outbilat.eval({tf_placehold_img: batchunpadtest, tf_placehold_wrt: reshtransfpatch})
describe("tfret00", tfret)
tfret[tfret<0.0] = 0.0
tfret[tfret>1.0] = 1.0
describe("tfret11", tfret)
cv2.imshow("ImageWindow", tfret[0,...])
cv2.namedWindow('ImageWindow')
cv2.createTrackbar('std_space*10','ImageWindow',1,200,updateWindow)
cv2.createTrackbar('std_color*50','ImageWindow',1,200,updateWindow)
updateWindow(0) #Creates the window for the first time
cv2.waitKey(0)
|
def multiplication(x):
return x * x
def square(fn, arg):
return fn(arg)
print(square(multiplication,5)) |
from gensim.models import Word2Vec
import numpy
import re
import os
import theano
import theano.tensor as tensor
import cPickle as pkl
import numpy
import copy
import nltk
from collections import OrderedDict, defaultdict
from scipy.linalg import norm
from nltk.tokenize import word_tokenize
import skipthoughts
#you need to find a way to iterate for each statement/question pair; look at "for line in f:" in theano.util
def prune_thoughts(dataset, questions, input_dir):
i = open(input_dir)
text = i.read()
clean = re.sub("[0-9]", "", text)
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
sents = sent_detector.tokenize(clean)
X = sents
#print sents
Y = dataset
model = skipthoughts.load_model()
vectors = skipthoughts.encode(model, X)
#nearest_neighbor = skipthoughts.nn(model, X, vectors, Y, k=5)
#print dataset
#print questions
return X
def prune_statements(dataset, questions):
total_old = 0
total_new = 0
wvs = Word2Vec(dataset, min_count=0)
for i in range(len(questions)):
question = questions[i]
new_statements = []
old_statements = question[2][:-1]
# Use word vectors and keep only the top 5
sims = []
q = question[2][-1]
for s in old_statements:
sims.append(wvs.n_similarity(q,s))
sims2 = map(lambda x: x if type(x) is numpy.float64 else 0.0, sims)
top = sorted(range(len(sims2)), key=sims2.__getitem__, reverse=True)
new_statements = map(lambda x: old_statements[x], top[:5])
questions[i][2] = new_statements
total_old += len(old_statements)
total_new += len(new_statements)
#print("Question: ", questions[i][2][-1], " before %d after %d" % (len(old_statements), len(new_statements)))
print("Before %d After %d" % (total_old, total_new))
return questions |
import time
import hashlib
import ecdsa
from time import time
import json
import base64
import random
import Crypto.Random
from Crypto.PublicKey import RSA
import binascii
from collections import OrderedDict
class Transaction:
@classmethod
def new(self,sender,receiver,amount,comment=''):
t = Transaction()
t.sender = sender
t.receiver = receiver
t.amount = amount
t.comment = comment
t.time = time()
t.sig = ''
id = json.dumps({
'sender':t.sender,
'receiver':t.receiver,
'amount':t.amount,
'time':t.time
})
t.id = hashlib.sha256(id.encode()).hexdigest()
# t.sk = ecdsa.SigningKey.generate()
# t.vk = t.sk.get_verifying_key()
return t
def serialize(self):
data = {
'sender':self.sender,
'receiver':self.receiver,
'amount':self.amount,
'comment':self.comment,
'time':self.time,
'sig':self.sig
}
json_string = json.dumps(data)
return base64.b64encode(json_string.encode('utf-8')).decode()
@classmethod
def deserialize(self,base64_string):
json_string = base64.b64decode(base64_string.encode()).decode()
json_data = json.loads(json_string)
t = Transaction()
t.sender = json_data['sender']
t.receiver = json_data['receiver']
t.amount = json_data['amount']
t.comment = json_data['comment']
t.time = json_data['time']
t.sig = json_data['sig']
id = json.dumps({
'sender':t.sender,
'receiver':t.receiver,
'amount':t.amount,
'time':t.time
})
t.id = hashlib.sha256(id.encode()).hexdigest()
return t
def sign(self, private_key):
sk = ecdsa.SigningKey.from_string(bytes.fromhex(private_key), curve=ecdsa.SECP256k1)
self.sig = ''
s = self.serialize()
sig = sk.sign(s.encode()).hex()
self.sig = sig
return sig
def validate(self):
try:
vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(self.sender), curve=ecdsa.SECP256k1)
sig = self.sig
sig_bytes = bytes.fromhex(self.sig)
self.sig = ''
s = self.serialize()
self.sig = sig
return vk.verify(sig_bytes,s.encode())
except:
return False
def __eq__(self,other):
return self.id == other.id
# return self.sender == other.sender and self.receiver == other.receiver and self.amount == other.amount and self.comment == other.comment
class TreeNode:
def __init__(self,parent,childLeft,childRight):
self.mode = None
self.parent = parent
self.childLeft = childLeft
self.childRight = childRight
self.childLeft.parent = self
self.childLeft.mode = "L"
self.childRight.parent = self
self.childRight.mode = "R"
self.weight = self.childLeft.weight + self.childRight.weight
self.hash = None
def getHash(self):
concat = self.childLeft.hash + self.childRight.hash
self.hash = hashlib.sha256(concat.encode()).hexdigest()
return self
def addLeave(self, leave):
if self.childLeft.weight > self.childRight.weight:
if isinstance(self.childRight,TreeNode):
self.childRight = self.childRight.addLeave(leave)
self.childRight.mode = "R"
else:
self.childRight = TreeNode(self,self.childRight,leave).getHash()
self.childRight.mode = "R"
self.weight += 1
returnNode = self
elif self.childLeft.weight==self.childRight.weight:
returnNode = TreeNode(self.parent,self,leave).getHash()
self.getHash()
return returnNode
class LeaveNode:
def __init__(self,val):
self.mode = None
self.parent = None
self.val = val
self.weight = 1
self.hash = None
def getHash(self):
self.hash = hashlib.sha256(self.val.serialize().encode()).hexdigest()
return self
class MerkleTree:
def add(self,transaction):
# Add entries to tree
serialized_transaction = transaction.serialize()
leave = LeaveNode(transaction).getHash()
self.val2leave[serialized_transaction] = leave
self.root = self.root.addLeave(leave)
@classmethod
def build(self,description='James Chain'):
# Build tree computing new root
m = MerkleTree()
m.description = description
t_origin = Transaction.new(None,None,0,'Origin')
t_description = Transaction.new(None,None,0,description)
t_origin.time = 0
t_description.time = 0
origin = LeaveNode(t_origin).getHash()
description = LeaveNode(t_description).getHash()
m.root = TreeNode(None,origin,description).getHash()
m.val2leave = OrderedDict()
return m
def get_proof(self,transaction):
# Get membership proof for entry
proof = []
serialized_transaction = transaction.serialize()
node = self.val2leave[serialized_transaction]
while node.parent != None:
if node.parent.childLeft.hash == node.hash:
proof.append(node.parent.childRight)
else:
proof.append(node.parent.childLeft)
node = node.parent
return proof
def get_root(self):
# Return the current root
return self.root.hash
def get_list(self):
return list(self.val2leave.keys())
def serialize(self):
serialized_transactions = list(self.val2leave.keys())
data = {
'description':self.description,
'transactions':serialized_transactions
}
json_string = json.dumps(data)
return base64.b64encode(json_string.encode('utf-8')).decode()
@classmethod
def deserialize(self,base64_string):
json_string = base64.b64decode(base64_string.encode()).decode()
json_data = json.loads(json_string)
# origin = LeaveNode('Origin').getHash()
# description = LeaveNode(json_data['description']).getHash()
m = MerkleTree.build(json_data['description'])
for st in json_data['transactions']:
transaction = Transaction.deserialize(st)
m.add(transaction)
return m
@staticmethod
def validate(node):
if not isinstance(node,LeaveNode):
concat = node.childLeft.hash + node.childRight.hash
hash = hashlib.sha256(concat.encode()).hexdigest()
return node.hash == hash and MerkleTree.validate(node.childLeft) and MerkleTree.validate(node.childRight)
else:
return True
def __eq__(self,other):
if not isinstance(other,MerkleTree):
return False
return self.description == other.description and self.root.hash == other.root.hash
class Block:
@classmethod
def new(self, depth, previous_hash, root_hash, bits, transactions=None):
b = Block()
b.header = {
'depth':depth,
'previous_hash': previous_hash,
'root_hash':root_hash,
'timestamp':time(),
'bits':bits,
'nonce':0
}
b.getHash()
b.transactions = transactions
return b
def proof_of_work(self):
while not int(self.hash,16)<int(self.header['bits'],16):
self.header['nonce'] += 1
self.getHash()
return self.hash
def getHash(self):
json_string = json.dumps(self.header)
hash = hashlib.sha256(json_string.encode()).hexdigest()
self.hash = hash
return hash
def serialize(self):
data_dict = {
'header': self.header,
'transactions': self.transactions.serialize()
}
json_string = json.dumps(data_dict)
return base64.b64encode(json_string.encode('utf-8')).decode()
@classmethod
def deserialize(self,base64_string, type='full'):
b = Block()
json_string = base64.b64decode(base64_string.encode()).decode()
json_data = json.loads(json_string)
b.header = json_data['header']
b.getHash()
if type == 'full':
b.transactions = MerkleTree.deserialize(json_data['transactions'])
elif type == 'simplified':
b.transactions = None
return b
def validate(self):
# validate merkle tree hashes
return MerkleTree.validate(self.transactions.root)
def __eq__(self,other):
return json.dumps(self.header) == json.dumps(other.header)
class Blockchain:
private = '3e59b1763f5191b0ab15975c7a6b77f8a55c922f68baddbf1c1c7348884d1736'
public = '2fba45a1f17dd07e75092fb63b6d7dd79896d05a0c2afc2504706a6ce60e1f9458c47de9651808418fb197209b385cd2b5ba839c865989e187bcad1190704f83'
target = '0000281df3c6c88c98e4f6064fb5e8804812de0fadd6a4d47efa38f8db36346c'
@classmethod
def new(self):
# Instantiates object from passed values
t = Blockchain()
t.transactions = []
t.balance = {'genesis':{}}
genesis_block = Block.new(0, None, None, Blockchain.target, transactions=None)
genesis_block.hash = 'genesis'
t.chain = {'genesis':genesis_block}
t.longest = t.chain['genesis']
return t
@property
def last_block(self):
return self.longest
def addTransaction(self,transaction):
self.transactions.append(transaction)
def addBlock(self, block):
block_hash = block.getHash()
# check hash
if not int(block_hash,16)<int(block.header['bits'],16):
return False
# check previous hash
if not block.header['previous_hash'] in self.chain:
return False
# check transactions validity
if not block.transactions == None:
balance = self.balance[self.last_block.hash].copy()
print(balance)
t_l = block.transactions.get_list()
for t_s in t_l:
t = block.transactions.val2leave[t_s].val
f_b = balance.get(t.sender,0)-t.amount
if f_b<0 and not t.sender == Blockchain.public:
return False
else:
if not t.sender == Blockchain.public:
balance[t.sender] = f_b
balance[t.receiver] = balance.get(t.receiver,0)+t.amount
self.balance[block_hash] = balance
self.chain[block_hash] = block
self.resolve(block)
return True
# DEPRECATED: moved to Miner
def mine(self):
if len(self.transactions)==0:
return False
last_block = self.last_block
transactions = self.transactions
merkleTree = MerkleTree.build()
for transaction in transactions:
if self.validate(transaction):
merkleTree.add(transaction)
block = Block.new(last_block.header['depth'], last_block.header['previous_hash'], merkleTree.root, Blockchain.target, transactions=merkleTree)
# block = Block(last_block.getHash(),merkleTree.root.hash,time())
# print(1,merkleTree.root.hash)
proof = self.proof_of_work(block)
self.addBlock(block,proof)
self.transactions = []
return True
def resolve(self,added_block):
if added_block.header['depth'] > self.longest.header['depth']:
self.longest = added_block
# def add(...):
# # Sign object with private key passed
# # That can be called within new()
# ...
# def validate(...):
# # Validate transaction correctness.
# # Can be called within from_json()
# ...
# def __eq__(...):
# # Check whether transactions are the same
# ...
def verify_proof(entry, proof, root):
# Verify the proof for the entry and given root. Returns boolean.
target_hash = root.hash
serialized_entry = entry.serialize()
curr_hash = hashlib.sha256(serialized_entry.encode()).hexdigest()
for node in proof:
if node.mode == "L":
curr_hash = hashlib.sha256((node.hash+curr_hash).encode()).hexdigest()
elif node.mode == "R":
curr_hash = hashlib.sha256((curr_hash+node.hash).encode()).hexdigest()
return target_hash == curr_hash
class Wallet:
def new_wallet(self):
random_gen = Crypto.Random.new().read
private_key = RSA.generate(1024, random_gen)
public_key = private_key.publickey()
response = {
'private_key': binascii.hexlify(private_key.exportKey(format='DER')).decode('ascii'),
'public_key': binascii.hexlify(public_key.exportKey(format='DER')).decode('ascii')
}
print(response)
return response
|
#!/usr/bin/env python
#coding:utf-8
"""
Author: --<>
Purpose:
Created: 2017/8/22
"""
from lpp import *
if __name__ == '__main__':
all_data = glob.glob(sys.argv[1])
i = 0
AUGUSTUS = open("augustus.list", 'w')
AUGUSTUS.write("Gene\n")
RNA = open("RNA_SEQ.list", 'w')
RNA.write("Gene\n")
PRO = open("Protein.list", 'w')
PRO.write("Gene\n")
SNAP = open("SNAP.list", 'w')
SNAP.write("Gene\n")
GENEMARK = open("GenMark.list",'w')
GENEMARK.write("Gene\n")
for e_f in all_data:
RAW = open(e_f)
all_block = RAW.read().split("\n\n")
for e_b in all_block:
i += 1
name = str(i)
if "SNAP" in e_b:
SNAP.write(name + '\n')
if "AUGUSTUS" in e_b:
AUGUSTUS .write(name + '\n')
if "GeneMark" in e_b:
GENEMARK.write(name + '\n')
if "assembler" in e_b:
RNA.write(name + '\n')
if "GeneWise" in e_b:
PRO.write(name + '\n' )
VENN_R = open( "Draw.R",'w' )
VENN_R.write("""
require("VennDiagram")
temp = venn.diagram(
x = list(
""")
end_list = []
file_hash = {"EST": RNA.name, "Protein": PRO.name, "SNAP": SNAP.name, "GeneMarks": GENEMARK.name, "AUGUSTUS": AUGUSTUS.name,}
for category, file_name in file_hash.items():
end_list.append( """ %s = read.delim( \"%s\", header=TRUE, stringsAsFactors=TRUE )$Gene"""%(
category,
file_name
)
)
VENN_R.write(",".join(end_list))
VENN_R.write("""),
filename = NULL,
fill = c("dodgerblue", "goldenrod1", "darkorange1", "seagreen3", "orchid3"),
alpha = 0.50,
cex = c(1.5, 1.5, 1.5, 1.5, 1.5, 1, 0.8, 1, 0.8, 1, 0.8, 1, 0.8, 1, 0.8,
1, 0.55, 1, 0.55, 1, 0.55, 1, 0.55, 1, 0.55, 1, 1, 1, 1, 1, 1.5),
margin = 0.05,
cat.col = c("dodgerblue", "goldenrod1", "darkorange1", "seagreen3", "orchid3"),
cat.cex = 1,
)
pdf("%s")
grid.draw(temp)
dev.off()
tiff( "%s" )
grid.draw(temp)
dev.off()
"""%(
'stat.pdf',
'stat.tiff'
)
)
|
# app/admin/views.py
from flask import flash, redirect, render_template, url_for, abort
from flask_login import login_required, current_user
from app.administrateur.departement import departement
from app import db
from ..forms import DepartementForm
from ...models import Departement
def check_admin():
"""
Prevent non-admins from accessing the page
"""
if not current_user.is_superadmin:
abort(403)
# Departement Views
@departement.route('/departements', methods=['GET', 'POST'])
@login_required
def list_departements():
"""
List all departments
"""
check_admin()
departements = Departement.query.all()
return render_template('administrateur/departement/departements.html',
departements=departements, title="Departements")
@departement.route('/departments/add', methods=['GET', 'POST'])
@login_required
def add_departement():
"""
Add a department to the database
"""
check_admin()
add_departement = True
form = DepartementForm()
if form.validate_on_submit():
departement = Departement(label_departement=form.label.data,
description=form.description.data)
try:
# add department to the database
db.session.add(departement)
db.session.commit()
flash('You have successfully added a new department.')
except:
# in case department name already exists
flash('Error: department name already exists.')
# redirect to departments page
return redirect(url_for('departement.list_departements'))
# load department template
return render_template('administrateur/departement/departement.html', action="Add",
add_departement=add_departement, form=form,
title="Add Departement")
@departement.route('/departments/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_departement(id):
"""
Edit a department
"""
check_admin()
add_departement = False
departement = Departement.query.get_or_404(id)
form = DepartementForm(obj=departement)
if form.validate_on_submit():
departement.label_departement = form.label.data
departement.description = form.description.data
db.session.commit()
flash('You have successfully edited the department.')
# redirect to the departments page
return redirect(url_for('departement.list_departements'))
form.description.data = departement.description
form.label.data = departement.label_departement
return render_template('administrateur/departement/departement.html', action="Edit",
add_departement=add_departement, form=form,
departement=departement, title="Edit Departement")
@departement.route('/departments/delete/<int:id>', methods=['GET', 'POST'])
@login_required
def delete_departement(id):
"""
Delete a department from the database
"""
check_admin()
departement = Departement.query.get_or_404(id)
db.session.delete(departement)
db.session.commit()
flash('You have successfully deleted the department.')
# redirect to the departments page
return redirect(url_for('departement.list_departements'))
return render_template(title="Delete Departement") |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json, urllib
from urllib import urlencode
import time,random
import MySQLdb
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
pageid = random.randint(1, 1000)
url = "http://japi.juhe.cn/joke/content/list.from"
params = {
"sort": "desc",
"page": pageid,
"pagesize": "20",
"time": str(time.time())[:10],
"key": "19d8c93cd0d36dbfe8599f359ef1fa74"}
params = urlencode(params)
f = urllib.urlopen("%s?%s" % (url, params))
content = f.read()
res = json.loads(content)
if res:
error_code = res["error_code"]
if error_code == 0:
conn = MySQLdb.connect(host='localhost', user='yczjd', passwd='yczjd', db='yczjd')
cur = conn.cursor()
cur.execute('SET NAMES utf8;')
cur.execute('SET CHARACTER SET utf8;')
cur.execute('SET character_set_connection=utf8;')
csql = 'select count(1) from weixin_main_xiaohua;'
idnum3 = cur.execute(csql) + 1
for i in range(20):
xiaohua = res["result"]["data"][i]["content"]
xiaohuaid = res["result"]["data"][i]["hashId"]
xiaohuaid = str(xiaohuaid)
xiaohua = str(xiaohua)
sql = '''insert into weixin_main_xiaohua(xid,content,contentid) VALUES (\'%s\',\'%s\',\'%s\');''' % (idnum3,xiaohua,xiaohuaid)
sql = sql.encode('utf8')
try:
cur.execute(sql)
idnum3 += 1
except MySQLdb.Error:
pass
conn.commit()
cur.close()
conn.close()
else:
print "状态不是200"
else:
print "没有获取到数据" |
from keras.preprocessing.text import one_hot
from keras.preprocessing.text import text_to_word_sequence
# define the document
text = 'The quick brown fox jumped over the lazy dog.'
# estimate the size of the vocabulary
words = set(text_to_word_sequence(text)) #{'brown', 'over', 'the', 'lazy', 'jumped', 'fox', 'dog', 'quick'}
vocab_size = len(words)
print(vocab_size)
# integer encode the document
result = one_hot(text, round(vocab_size*1.3))
print(result)
print()
# from keras.preprocessing.text import Tokenizer
# # define 5 documents
docs = ['Well done!',
'Good work',
'Great effort',
'nice work',
'Excellent!']
# # create the tokenizer
# t = Tokenizer()
# # fit the tokenizer on the documents
# t.fit_on_texts(docs)
# # print(t)
# # summarize what was learned
# print('counts: ', t.word_counts)
# print('document_count: ', t.document_count)
# print('word_index: ', t.word_index)
# print('word_docs: ', t.word_docs)
#
# one_hot(str(docs), 20)
# docs = [[10, 1], [19, 1], [2, 10], [9, 14], [2, 18], [19, 0]]
# resDocs = [[1], [2], [3], [4], [5], [6]]
|
import pandas as pd
import numpy as np
from collections import defaultdict
import re
df = pd.read_csv('train.csv')
df['OutcomeType'].value_counts()
df['AnimalType'].value_counts()
df[['OutcomeType', 'AnimalType']].describe()
def age_to_months(x):
try:
spl = x.split(' ')
except AttributeError:
return np.nan
if spl[1] == 'years' or spl[1] == 'year':
return int(spl[0]) * 12
elif spl[1] == 'weeks' or spl[1] == 'week':
return int(spl[0]) / 4.
elif spl[1] == 'days' or spl[1] == 'day':
return int(spl[0]) / 30.
else:
return int(spl[0])
df['AgeMonths'] = df['AgeuponOutcome'].apply(lambda x: age_to_months(x))
df[['AgeMonths', 'AnimalType']].groupby('AnimalType').mean()
mean_lifespan = df[['AgeMonths', 'AnimalType']].groupby('AnimalType').mean()
df['RelativeAge'] = np.where(df['AnimalType'] == 'Cat', df['AgeMonths'] / mean_lifespan.loc['Cat'].values, df['AgeMonths'] / mean_lifespan.loc['Dog'].values)
df['has_name'] = pd.isnull(df['Name'])
# figure out color frequencies of various basic colors
temp = df['Color'].apply(lambda x : re.split('/| Tabby| ', x))
temp_flat = np.hstack(temp.values)
un = np.unique(temp_flat)[1:] # get rid of ''
# count different colors
d = defaultdict(int)
for w in temp_flat:
d[w] += 1
print d
print un # unique colors
# order of colors with counts > 1000
l = ['White', 'Black', 'Brown', 'Tan', 'Blue', 'Brindle']
res_color = []
for c in df['Color']:
ctemp = re.split('/| Tabby| ', c)
loc = []
for ct in ctemp:
if ct in l:
loc.append(l.index(ct))
if len(loc) > 0:
res_color.append(l[min(loc)])
else:
res_color.append('Other')
df['CleanColor'] = res_color
print df.head()
df[['OutcomeType', 'AnimalType', 'has_name', 'RelativeAge', 'CleanColor']].to_csv('train_clean.csv')
|
from collections import deque
import sys
dirs = [(-1, 0), (1, 0), (0, -1), (0, 1)]
q = deque()
def dfs(startV: int):
x, y = startV
chk[x][y] = 0
q.append(startV)
while q:
x, y = q.pop()
for move in dirs:
moved_x, moved_y = x + move[0], y + move[1]
if 0 <= moved_x < N and 0 <= moved_y < N:
if chk[moved_x][moved_y] == 1:
q.append((moved_x, moved_y))
chk[moved_x][moved_y] = 0
N = int(input())
region = []
for _ in range(N):
region.append(list(map(int, sys.stdin.readline().split())))
maxPptn = max(map(max, region))
max_cnt = 0
for pptn in range(0, maxPptn):
chk = [[0] * N for _ in range(N)]
for i in range(N):
for j in range(N):
if region[i][j] - pptn > 0:
chk[i][j] = 1
else:
chk[i][j] = 0
cnt = 0
for i in range(N):
for j in range(N):
if chk[i][j] == 1:
dfs((i, j))
cnt += 1
max_cnt = max(max_cnt, cnt)
print(max_cnt) |
import collections
import builtins
dic1 = {
'name': 'Gus',
'surname': 'Maquez'
}
dic2 = {
'age': '24'
}
# .ChainMap(diccionarios**)
# une en una tupla todos los diccionarios pasados por parametro
merge_dicts = collections.ChainMap(dic1, dic2)
print("merge_dicts => ", merge_dicts)
# .get('llave')
# Obteniendo informacion de cualquiera de los diccionarios
print("merge_dicts.get('age') => ", merge_dicts.get('name'))
# Counter
# Permite obtener cuantas veces se encuentra en el objeto iterable
# Devuelve un diccionario con los indices como los valores del iterable, y el valor sera la cantidad de veces iteradas
food = ['tomato', 'banana', 'banana', 'pineapple', 'apple', 'orange', 'tomato', 'tomato']
print("collections.Counter(food) => ", collections.Counter(food))
# most_commons(cantidad_De_comunes)
# devuelve los valores que mas se repiten, de acuerdo al parametro enviado
counter_food = collections.Counter(food)
print("counter_food.most_common(2) => ", counter_food.most_common(2))
# .OrderedDict()
# ordena alfabeticamente los indices
print("collections.OrderedDict(sorted(counter_food.items())) => ",
collections.OrderedDict(sorted(counter_food.items())))
|
# Copyright 2015, Thales Services SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import gettext_lazy as _
from horizon import tabs
from openstack_dashboard.api import neutron as api
from openstack_dashboard.dashboards.project.routers.extensions.extraroutes\
import tables as ertbl
LOG = logging.getLogger(__name__)
class ExtraRoutesTab(tabs.TableTab):
table_classes = (ertbl.ExtraRoutesTable,)
name = _("Static Routes")
slug = "extraroutes"
template_name = "horizon/common/_detail_table.html"
def allowed(self, request):
try:
return api.is_extension_supported(request, 'extraroute')
except Exception as e:
LOG.info("Failed to check if Neutron extraroute extension is "
"supported: %s", e)
return False
def get_extra_routes_data(self):
try:
extraroutes = getattr(self.tab_group.kwargs['router'], 'routes')
except AttributeError:
extraroutes = []
return [api.RouterStaticRoute(r) for r in extraroutes]
|
from django.urls import path
from . import views
from rest_framework.routers import DefaultRouter
urlpatterns = [
path('', views.ClientList.as_view(), name='client_list'),
path('clients', views.ClientList.as_view(), name='client_list'),
path('clients/<int:pk>', views.ClientDetail.as_view(), name='client_detail'),
]
|
### IMPORTS ###
import OAuth2Util
import praw
import re
import sys
import threading
import time
import traceback
from wordnik import *
from datetime import datetime
### CLASS ###
class Define_It:
def __init__(self, reddit, footer='', sidebar='', username='', subreddit='', api_file='define_it.conf'):
self._r = reddit
self._o = OAuth2Util.OAuth2Util(self._r)
self._done = DoneList()
self._avoid = AvoidList()
self.message_footer = footer
self.sidebar = sidebar
self.match_pattern = re.compile(r'(?:\n|^)define(?:: ?| )(-ignore|(?:["*\']+)?([^\n,.!?#&_:;"*\\(){}<>[\]]+))(, ?((pro)?noun|(ad)?verb(-(in)?transitive)?|adjective|(abbrevia|preposi|conjunc|interjec)tion))?')
self._api_file = api_file
self._load_dictionary()
self._create_api()
self.username = username
self.subreddit = subreddit
# ### WORDNIK ### #
def _load_dictionary(self):
try:
with open(self._api_file, 'r') as f:
lines = [x.strip() for x in f.readlines()]
self._api_url,self._api_key = lines
except OSError:
print('Could not find config file.')
def _create_api(self):
self._client = swagger.ApiClient(self._api_key,self._api_url)
self._wordApi = WordApi.WordApi(self._client)
# ### REDDIT ### #
def search(self, body):
found = self.match_pattern.search(body)
return found is not None
def _strip_unwanted(self, word):
if isinstance(word, str):
try:
if (word[0] == word[-1] and
word[0] in '"*\''):
word = word[1:-1]
if ' - ' in word:
word = word.split('-')[0].strip()
return word
except IndexError as e:
Error(e, tb=traceback)
def _make(self, body):
pat = re.compile(r' ?(because|but|please).*',re.IGNORECASE)
found = self.match_pattern.search(body)
if found is None:
return
if found.group(3) != None:
return re.sub('["*]+','',body[found.start():found.end()].lstrip()[7:].strip()).split(',')
body = re.sub('["*]+','',body[found.start():found.end()].lstrip()[7:].strip())
if len(body.split(' ')) > 1:
return pat.sub('', self._strip_unwanted(body))
return self._strip_unwanted(body)
def ignore(self, comment):
self._avoid.add(comment.author.name)
comment.reply('This message confirms that you have been added to the ignore list.' + self.message_footer)
def delete(self, comment):
if comment.is_root:
return
parent_id = comment.parent_id
parent = self._r.get_info(thing_id=parent_id)
if parent.author.name != self.username:
return
request_id = parent.parent_id
request = self._r.get_info(thing_id=request_id)
if comment.author.name != request.author.name:
return
parent.delete()
print('%s requested comment get deleted'%comment.author.name)
def _begin(self, comment):
id = comment.id
already_done = self._done.get()
avoid = self._avoid.get()
if id not in already_done:
self._done.add('%s\n'%id)
author = comment.author.name
body = re.sub(r'/u/%s'%self.username,'define',comment.body,flags=re.IGNORECASE)
formatted = self._make(body)
if formatted != None and author not in avoid:
if isinstance(formatted, list):
word = formatted[0]
if word == '-ignore':
self.ignore(comment)
return
elif word == '-delete':
self.delete(comment)
return
part = formatted[1]
else:
if formatted == '-ignore' and author not in avoid:
self.ignore(comment)
return
elif formatted == '-delete':
self.delete(comment)
return
word = formatted
part = ''
self._create_api()
partText = part if part == '' else (' as a ' + part)
definitions = Definition(self._wordApi, word=word, part=part)
formatted = definitions.format()
if len(definitions.definitions) > 0:
print('%s requested "%s"%s'%(author,word,partText))
comment.reply(formatted + self.message_footer)
try:
if self.sidebar != '':
self._r.get_subreddit(self.subreddit).update_settings(description=self.sidebar.format(requester=author,definitions=formatted))
except Exception as e:
Error(e, tb=traceback)
def run(self):
self._o.refresh()
while True:
try:
for x in praw.helpers.comment_stream(self._r, 'all'):
self._o.refresh()
if not self.search(x.body): continue
t2 = threading.Thread(target=self._begin(x))
t2.start()
time.sleep(5)
try:
zzz = next(self._r.get_unread())
messages = True
except StopIteration:
messages = False
if messages:
for y in self._r.get_unread():
try:
y.mark_as_read()
if y.subject == 'comment reply' or (y.subject == 'username mention' and y.was_comment):
t3 = threading.Thread(target=self._begin(y))
t3.start()
except AttributeError:
pass
except praw.errors.Forbidden:
pass
except KeyboardInterrupt:
print('Exiting...')
sys.exit(-1)
class Definition:
def __init__(self, api, **kwargs):
self._api = api
if 'word' in kwargs and 'part' in kwargs:
self.word = kwargs['word']
self.definitions = self.define(kwargs['word'],kwargs['part'])
def define(self, word, part):
f = self._api.getDefinitions
definitions = []
for i in range(3):
try:
d = f(word, partOfSpeech=part, sourceDictionaries='all')
if d is None:
d = f(word.lower(), partOfSpeech=part, sourceDictionaries='all')
if d is not None:
definitions.append((d[i].word,d[i].partOfSpeech,d[i].text))
continue
d = f(word.upper(), partOfSpeech=part, sourceDictionaries='all')
if d is not None:
definitions.append((d[i].word,d[i].partOfSpeech,d[i].text))
continue
d = f(word.capitalize(), partOfSpeech=part, sourceDictionaries='all')
if d is not None:
definitions.append((d[i].word,d[i].partOfSpeech,d[i].text))
continue
break
definitions.append((d[i].word,d[i].partOfSpeech,d[i].text))
except IndexError as e:
Error(e,tb=traceback)
break
except Exception:
break
return definitions
def format(self):
s = ''
if len(self.definitions) >= 1:
for definition in self.definitions:
word = definition[0]
if definition[1] != 'abbreviation':
word = ' '.join([x.capitalize() for x in word.split(' ')])
s += '%s (%s): %s\n\n' % (word, definition[1], definition[2])
return s
class DoneList:
def __init__(self):
self.list = self.get()
def add(self,content,a=True):
if a:
self.read()
with open('done.txt', 'a') as f:
f.write(content)
def read(self):
with open('done.txt') as f:
for i,l in enumerate(f):
pass
if (i+1) >= 200000:
t = self._tail(open('done.txt'), 50000)
open('done.txt', 'w').close()
for x in t[0]:
self.add('%s\n'%x,False)
def _tail(self, f, n, offset=None):
avg_line_length = 7
to_read = n + (offset or 0)
while 1:
try:
f.seek(-(avg_line_length * to_read), 2)
except IOError:
f.seek(0)
pos = f.tell()
lines = f.read().splitlines()
if len(lines) >= to_read or pos == 0:
f.close()
return lines[-to_read:offset and -offset or None], \
len(lines) > to_read or pos > 0
avg_line_length *= 1.3
def get(self):
with open('done.txt') as f:
return [x.strip() for x in f.readlines()]
class AvoidList:
def __init__(self):
self.list = self.get()
def add(self, name):
with open('avoid.txt', 'a') as f:
f.write('%s\n'%name)
def get(self):
with open('avoid.txt') as f:
return [x.strip() for x in f.readlines()]
class Error:
def __init__(self, error, message=None, tb=None):
if message is not None:
print(str(type(error)) + ' ' + message)
else:
print(str(type(error)))
if tb is not None:
d = datetime.now()
name = 'errors\\error{0}.txt'.format(d.strftime('%Y%m%d%H%M%S'))
f = open(name, 'w')
tb.print_exc(file=f)
f.close() |
'''
Created on 2017/10/25
@author: CSYSBP01
'''
from rest_framework import routers
# from .views import CountViewSet
from django.conf.urls import url
from django.contrib import admin
from .views import (
CountListAPIView,
# CountDetailAPIView
)
urlpatterns = [
# url(r'^.*',CountListAPIView.as_view(),name='list'),
url(r'',CountListAPIView.as_view()),
# url(r'^(?P<cnt>[\w-]+)/$', CountDetailAPIView.as_view(), name='detail'),
]
# router = routers.DefaultRouter()
# router.register(r'counts', CountViewSet) |
from typing import List
import copy
from lib.grid import Grid
from lib.update_schemes import UpdateScheme
def simulate(grid: Grid, update_scheme: UpdateScheme, n_steps: int, report_every: int = None,avoid_overlapping: bool = False) -> List[Grid]:
"""
This function simulates the given grid for a set amount of steps and returns a list with all the steps stored as grids
:param grid: The Grid object which is supposed to be simulated
:param update_scheme: The algorithm which is supposed to be used to decide how the pedestrians are going to move
:param n_steps: How many steps are going to be simulated
:param report_every: This indicates after how many steps a message is going to be printed (Helpful if there are a lot of steps and one wants to make sure that the code is progressing)
:param avoid_overlapping: If set to True, this will makes it so that the pedestrians don't walk onto the same cell
:return: A List of Grids that store all the steps of the simulation
"""
states: List[Grid] = [copy.deepcopy(grid)]
for i in range(n_steps):
update_scheme.update(grid,avoid_overlapping)
states.append(copy.deepcopy(grid))
if report_every and i % report_every == 0:
print("Simulated step {}".format(i))
return states
|
import fsm
import sys
sys.path.append(r"c:\Python34\Lib\site-packages")
sys.path.append(r"c:\Python34\Lib")
import serial, os, threading
# Manque des imports ?
# global variables
f = fsm.fsm() # defines finite state machine
# FSM functions
def init():
print "init"
nextEvent = "init_done"
return nextEvent
def decollage():
print "decollage"
#corps de la fonction
nextEvent = "decollage_done"
return nextEvent
def exploration():
print "turn until detect"
if () :
nextEvent = "atterrissage d'urgence"
elif () :
nextEvent = "objet detecte"
elif ():
return nextEvent
def objetDetecte():
print "do something ..."
if ():
nextEvent = "confirmation manuelle"
else ():
nextEvent = "infirmation manuelle"
return nextEvent
def myFunct():
print "do something ..."
nextEvent = "???"
return nextEvent
def myFunct():
print "do something ..."
nextEvent = "???"
return nextEvent
# Main
if __name__== "__main__":
# init FSM
f.add_state ("init")
f.add_state ("decollage")
f.add_state ("exploration")
f.add_state ("objet detecte")
f.add_state ("destruction de nid")
f.add_state ("rtl")
f.add_state ("atterrissage")
f.add_event ("init faite")
f.add_event ("decollage fait")
f.add_event ("zone de patrouille atteinte")
f.add_event ("atterissage d'urgence")
f.add_event ("retour point de depart")
f.add_event ("objet detecte")
f.add_event ("confirmation manuelle")
f.add_event ("infirmation manuelle")
f.add_event ("retour a l'exploration")
f.add_event ("attaque du nid")
f.add_event ("decollage fait")
f.add_event ("nid detruit")
f.add_event ("arrivee au point de depart")
f.add_event ("fin de mission")
f.add_transition ("state1","state2","event",myFunct);
f.add_transition ("state1","state2","event",myFunct);
f.add_transition ("state1","state2","event",myFunct);
f.add_transition ("state1","state2","event",myFunct);
f.add_transition ("state1","state2","event",myFunct);
f.add_transition ("state1","state2","event",myFunct);
f.add_transition ("state1","state2","event",myFunct);
f.add_transition ("state1","state2","event",myFunct);
f.add_transition ("state1","state2","event",myFunct);
f.add_transition ("state1","state2","event",myFunct);
# FSM starting state and event
f.set_state ("init")
f.set_event ("init faite")
# run FSM
#fonctions d'init à appeler ici.
while (f.curState != "??"): # wait for last state to occur
funct = f.run ()
newEvent = funct()
print "New Event : ",newEvent
f.set_event(newEvent)
#fonctions d'arret à appeler là
|
##############################################################################
# Copyright 2016-2019 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import functools
import itertools
from operator import mul
from typing import Dict, List, Iterable, Sequence, Set, Tuple, Union, cast
import networkx as nx
from networkx.algorithms.approximation.clique import clique_removal
from pyquil.experiment._main import Experiment
from pyquil.experiment._result import ExperimentResult
from pyquil.experiment._setting import ExperimentSetting, TensorProductState, _OneQState
from pyquil.experiment._symmetrization import SymmetrizationLevel
from pyquil.paulis import PauliTerm, sI
from pyquil.quil import Program
def get_results_by_qubit_groups(
results: Iterable[ExperimentResult], qubit_groups: Sequence[Sequence[int]]
) -> Dict[Tuple[int, ...], List[ExperimentResult]]:
"""
Organizes ExperimentResults by the group of qubits on which the observable of the result acts.
Each experiment result will be associated with a qubit group key if the observable of the
result.setting acts on a subset of the qubits in the group. If the result does not act on a
subset of qubits of any given group then the result is ignored.
Note that for groups of qubits which are not pairwise disjoint, one result may be associated to
multiple groups.
:param qubit_groups: groups of qubits for which you want the pertinent results.
:param results: ExperimentResults from running an Experiment
:return: a dictionary whose keys are individual groups of qubits (as sorted tuples). The
corresponding value is the list of experiment results whose observables measure some
subset of that qubit group. The result order is maintained within each group.
"""
tuple_groups = [tuple(sorted(group)) for group in qubit_groups]
results_by_qubit_group: Dict[Tuple[int, ...], List[ExperimentResult]] = {group: [] for group in tuple_groups}
for res in results:
res_qs = res.setting.out_operator.get_qubits()
for group in tuple_groups:
if set(res_qs).issubset(set(group)):
results_by_qubit_group[group].append(res)
return results_by_qubit_group
def merge_disjoint_experiments(experiments: List[Experiment], group_merged_settings: bool = True) -> Experiment:
"""
Merges the list of experiments into a single experiment that runs the sum of the individual
experiment programs and contains all of the combined experiment settings.
A group of Experiments whose programs operate on disjoint sets of qubits can be
'parallelized' so that the total number of runs can be reduced after grouping the settings.
Settings which act on disjoint sets of qubits can be automatically estimated from the same
run on the quantum computer.
If any experiment programs act on a shared qubit they cannot be thoughtlessly composed since
the order of operations on the shared qubit may have a significant impact on the program
behaviour; therefore we do not recommend using this method if this is the case.
Even when the individual experiments act on disjoint sets of qubits you must be
careful not to associate 'parallel' with 'simultaneous' execution. Physically the gates
specified in a pyquil Program occur as soon as resources are available; meanwhile, measurement
happens only after all gates. There is no specification of the exact timing of gates beyond
their causal relationships. Therefore, while grouping experiments into parallel operation can
be quite beneficial for time savings, do not depend on any simultaneous execution of gates on
different qubits, and be wary of the fact that measurement happens only after all gates have
finished.
Note that to get the time saving benefits the settings must be grouped on the merged
experiment--by default this is done before returning the experiment.
:param experiments: a group of experiments to combine into a single experiment
:param group_merged_settings: By default group the settings of the merged experiment.
:return: a single experiment that runs the summed program and all settings.
"""
used_qubits: Set[int] = set()
for expt in experiments:
if expt.program.get_qubits().intersection(used_qubits):
raise ValueError(
"Experiment programs act on some shared set of qubits and cannot be " "merged unambiguously."
)
used_qubits = used_qubits.union(cast(Set[int], expt.program.get_qubits()))
# get a flat list of all settings, to be regrouped later
all_settings = [setting for expt in experiments for simult_settings in expt for setting in simult_settings]
merged_program = sum([expt.program for expt in experiments], Program())
merged_program.wrap_in_numshots_loop(max([expt.program.num_shots for expt in experiments]))
symm_levels = [expt.symmetrization for expt in experiments]
symm_level = max(symm_levels)
if SymmetrizationLevel.EXHAUSTIVE in symm_levels:
symm_level = SymmetrizationLevel.EXHAUSTIVE
merged_expt = Experiment(all_settings, merged_program, symmetrization=symm_level)
if group_merged_settings:
merged_expt = group_settings(merged_expt)
return merged_expt
def construct_tpb_graph(experiments: Experiment) -> nx.Graph:
"""
Construct a graph where an edge signifies two experiments are diagonal in a TPB.
"""
g = nx.Graph()
for expt in experiments:
assert len(expt) == 1, "already grouped?"
unpacked_expt = expt[0]
if unpacked_expt not in g:
g.add_node(unpacked_expt, count=1)
else:
g.nodes[unpacked_expt]["count"] += 1
for expt1, expt2 in itertools.combinations(experiments, r=2):
unpacked_expt1 = expt1[0]
unpacked_expt2 = expt2[0]
if unpacked_expt1 == unpacked_expt2:
continue
max_weight_in = _max_weight_state([unpacked_expt1.in_state, unpacked_expt2.in_state])
max_weight_out = _max_weight_operator([unpacked_expt1.out_operator, unpacked_expt2.out_operator])
if max_weight_in is not None and max_weight_out is not None:
g.add_edge(unpacked_expt1, unpacked_expt2)
return g
def group_settings_clique_removal(experiments: Experiment) -> Experiment:
"""
Group experiments that are diagonal in a shared tensor product basis (TPB) to minimize number
of QPU runs, using a graph clique removal algorithm.
:param experiments: a tomography experiment
:return: a tomography experiment with all the same settings, just grouped according to shared
TPBs.
"""
g = construct_tpb_graph(experiments)
_, cliqs = clique_removal(g)
new_cliqs: List[List[ExperimentSetting]] = []
for cliq in cliqs:
new_cliq: List[ExperimentSetting] = []
for expt in cliq:
# duplicate `count` times
new_cliq += [expt] * g.nodes[expt]["count"]
new_cliqs += [new_cliq]
return Experiment(
new_cliqs,
program=experiments.program,
symmetrization=experiments.symmetrization,
)
def _max_weight_operator(ops: Iterable[PauliTerm]) -> Union[None, PauliTerm]:
"""Construct a PauliTerm operator by taking the non-identity single-qubit operator at each
qubit position.
This function will return ``None`` if the input operators do not share a natural tensor
product basis.
For example, the max_weight_operator of ["XI", "IZ"] is "XZ". Asking for the max weight
operator of something like ["XI", "ZI"] will return None.
"""
mapping = dict() # type: Dict[int, str]
for op in ops:
for idx, op_str in op:
assert isinstance(idx, int)
if idx in mapping:
if mapping[idx] != op_str:
return None
else:
mapping[idx] = op_str
op = functools.reduce(mul, (PauliTerm(op, q) for q, op in mapping.items()), sI())
return op
def _max_weight_state(states: Iterable[TensorProductState]) -> Union[None, TensorProductState]:
"""Construct a TensorProductState by taking the single-qubit state at each
qubit position.
This function will return ``None`` if the input states are not compatible
For example, the max_weight_state of ["(+X, q0)", "(-Z, q1)"] is "(+X, q0; -Z q1)". Asking for
the max weight state of something like ["(+X, q0)", "(+Z, q0)"] will return None.
"""
mapping = dict() # type: Dict[int, _OneQState]
for state in states:
for oneq_state in state.states:
if oneq_state.qubit in mapping:
if mapping[oneq_state.qubit] != oneq_state:
return None
else:
mapping[oneq_state.qubit] = oneq_state
return TensorProductState(list(mapping.values()))
def _max_tpb_overlap(
tomo_expt: Experiment,
) -> Dict[ExperimentSetting, List[ExperimentSetting]]:
"""
Given an input Experiment, provide a dictionary indicating which ExperimentSettings
share a tensor product basis
:param tomo_expt: Experiment, from which to group ExperimentSettings that share a tpb
and can be run together
:return: dictionary keyed with ExperimentSetting (specifying a tpb), and with each value being a
list of ExperimentSettings (diagonal in that tpb)
"""
# initialize empty dictionary
diagonal_sets: Dict[ExperimentSetting, List[ExperimentSetting]] = {}
# loop through ExperimentSettings of the Experiment
for expt_setting in tomo_expt:
# no need to group already grouped Experiment
assert len(expt_setting) == 1, "already grouped?"
unpacked_expt_setting = expt_setting[0]
# calculate max overlap of expt_setting with keys of diagonal_sets
# keep track of whether a shared tpb was found
found_tpb = False
# loop through dict items
for es, es_list in diagonal_sets.items():
trial_es_list = es_list + [unpacked_expt_setting]
diag_in_term = _max_weight_state(expst.in_state for expst in trial_es_list)
diag_out_term = _max_weight_operator(expst.out_operator for expst in trial_es_list)
# max_weight_xxx returns None if the set of xxx's don't share a TPB, so the following
# conditional is True if expt_setting can be inserted into the current es_list.
if diag_in_term is not None and diag_out_term is not None:
found_tpb = True
assert len(diag_in_term) >= len(
es.in_state
), "Highest weight in-state can't be smaller than the given in-state"
assert len(diag_out_term) >= len(
es.out_operator
), "Highest weight out-PauliTerm can't be smaller than the given out-PauliTerm"
# update the diagonalizing basis (key of dict) if necessary
if len(diag_in_term) > len(es.in_state) or len(diag_out_term) > len(es.out_operator):
del diagonal_sets[es]
new_es = ExperimentSetting(diag_in_term, diag_out_term)
diagonal_sets[new_es] = trial_es_list
else:
diagonal_sets[es] = trial_es_list
break
if not found_tpb:
# made it through entire dict without finding any ExperimentSetting with shared tpb,
# so need to make a new item
diagonal_sets[unpacked_expt_setting] = [unpacked_expt_setting]
return diagonal_sets
def group_settings_greedy(tomo_expt: Experiment) -> Experiment:
"""
Greedy method to group ExperimentSettings in a given Experiment
:param tomo_expt: Experiment to group ExperimentSettings within
:return: Experiment, with grouped ExperimentSettings according to whether
it consists of PauliTerms diagonal in the same tensor product basis
"""
diag_sets = _max_tpb_overlap(tomo_expt)
grouped_expt_settings_list = list(diag_sets.values())
grouped_tomo_expt = Experiment(
grouped_expt_settings_list,
program=tomo_expt.program,
symmetrization=tomo_expt.symmetrization,
)
return grouped_tomo_expt
def group_settings(experiments: Experiment, method: str = "greedy") -> Experiment:
"""
Group experiments that are diagonal in a shared tensor product basis (TPB) to minimize number
of QPU runs.
.. rubric:: Background
Given some PauliTerm operator, the 'natural' tensor product basis to
diagonalize this term is the one which diagonalizes each Pauli operator in the
product term-by-term.
For example, X(1) * Z(0) would be diagonal in the 'natural' tensor product basis
``{(|0> +/- |1>)/Sqrt[2]} * {|0>, |1>}``, whereas Z(1) * X(0) would be diagonal
in the 'natural' tpb ``{|0>, |1>} * {(|0> +/- |1>)/Sqrt[2]}``. The two operators
commute but are not diagonal in each others 'natural' tpb (in fact, they are
anti-diagonal in each others 'natural' tpb). This function tests whether two
operators given as PauliTerms are both diagonal in each others 'natural' tpb.
Note that for the given example of X(1) * Z(0) and Z(1) * X(0), we can construct
the following basis which simultaneously diagonalizes both operators::
-- |0>' = |0> (|+>) + |1> (|->)
-- |1>' = |0> (|+>) - |1> (|->)
-- |2>' = |0> (|->) + |1> (|+>)
-- |3>' = |0> (-|->) + |1> (|+>)
In this basis, X Z looks like diag(1, -1, 1, -1), and Z X looks like diag(1, 1, -1, -1).
Notice however that this basis cannot be constructed with single-qubit operations, as each
of the basis vectors are entangled states.
.. rubric:: Methods
The "greedy" method will keep a running set of 'buckets' into which grouped ExperimentSettings
will be placed. Each new ExperimentSetting considered is assigned to the first applicable
bucket and a new bucket is created if there are no applicable buckets.
The "clique-removal" method maps the term grouping problem onto Max Clique graph problem.
This method constructs a NetworkX graph where an edge exists between two settings that
share an nTPB and then uses networkx's algorithm for clique removal. This method can give
you marginally better groupings in certain circumstances, but constructing the
graph is pretty slow so "greedy" is the default.
:param experiments: a tomography experiment
:param method: method used for grouping; the allowed methods are one of
['greedy', 'clique-removal']
:return: a tomography experiment with all the same settings, just grouped according to shared
TPBs.
"""
if method == "greedy":
return group_settings_greedy(experiments)
elif method == "clique-removal":
return group_settings_clique_removal(experiments)
else:
allowed_methods = ["greedy", "clique-removal"]
raise ValueError(f"'method' should be one of {allowed_methods}.")
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from .models import Choice, Question
class IndexView(generic.ListView):
template_name='myapp/index.html'
context_object_name='latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
queryset = Question.objects.all()
medel=Question
template_name='myapp/detail.html'
class ResultsView(generic.DetailView):
model=Question
template_name='myapp/results.html'
def vote(request,question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'myapp/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('myapp:results', args=(question.id,)))
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
'''def index(request):
latest_question_list=Question.objects.order_by('-pub_date')[:5]
#output = ','.join([q.question_text for q in latest_question_list])
#template=loader.get_template('myapp/index.html')
#context={
# 'latest_question_list':latest_question_list,
#}
#return HttpResponse (template.render(context,request))
context = {'latest_question_list': latest_question_list}
return render(request, 'myapp/index.html', context)
def detail(request,question_id):
# try:
# question = Question.objects.get(pk=question_id)
# except Question.DoesNotExist:
# raise Http404("Question does not exist")
# return render(request, 'myapp/detail.html', {'question': question})
#return HttpResponse('you are looking at question %s.'% question_id)
question = get_object_or_404(Question, pk=question_id)
return render(request, 'myapp/detail.html', {'question': question})
def results(request,question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'myapp/results.html', {'question': question})
#response='you are looking at the results of question %s.'
#return HttpResponse(response % question_id)
def vote(request,question_id):
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'myapp/detail.html', {'question': question,
'error_message': "You didn't select a choice.", })
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('myapp:results', args=(question.id,)))
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
#return HttpResponse('you are voting on question %s.'%question_id)'''
|
from scipy.ndimage.filters import convolve1d
import numpy as np
EPS = np.finfo(float).eps
def deconv_lucy_richardson(img, psf, max_iter, axis=-1, init_img=None):
'''1D Lucy Richardson Deconvolution'''
assert(psf.ndim == 1) # make sure PSF is 1D
if init_img is None:
u = img
else:
u = init_img
psf_hat = psf[::-1]
for i in xrange(max_iter):
temp = convolve1d(u, psf, axis=axis)
temp[temp == 0.0] = EPS
u = u * convolve1d(img/temp, psf_hat, axis=axis)
return u
|
n=int(input())
if n%2!=0:
print("Weird")
elif n%2==0 and n>2 and n<5 :
print("Not Weird")
elif n%2==0 and n>20 :
print("Not Weird")
elif n%2==0 and n>6 and n<=20:
print("Weird")
|
import asyncio
import arrow
import discord
async def afk_mention_check(ev, message):
if message.guild:
if not message.content.startswith(ev.bot.get_prefix(message)):
if message.mentions:
target = message.mentions[0]
afk_data = ev.db[ev.db.db_cfg.database]['AwayUsers'].find_one({'UserID': target.id})
if afk_data:
time_then = arrow.get(afk_data['Timestamp'])
afk_time = arrow.get(time_then).humanize(arrow.utcnow()).title()
afk_reason = afk_data['Reason']
url = None
for piece in afk_reason.split():
if piece.startswith('http'):
suffix = piece.split('.')[-1]
if suffix in ['gif', 'jpg', 'jpeg', 'png']:
afk_reason = afk_reason.replace(piece, '')
url = piece
break
response = discord.Embed(color=0x3B88C3, timestamp=time_then.datetime)
response.add_field(name=f'ℹ {target.name} is AFK.',
value=f'Reason: {afk_reason}\nWent AFK: {afk_time}')
if url:
response.set_image(url=url)
afk_notify = await message.channel.send(embed=response)
await asyncio.sleep(5)
try:
await afk_notify.delete()
except discord.NotFound:
pass |
print("true and true");
print("true & & false");
print("false & & false");
print("------------------");
print("true | | true");
print("true | | false");
print("false | | false");
print("------------------");
print("!true");
print("!false"); |
# B - 編集
# https://atcoder.jp/contests/abc037/tasks/abc037_b
N, Q = map(int, input().split())
a=[0]*N
for i in range(Q):
L,R,T=map(int, input().split())
for i in range(L,R+1):
a[i-1]=T
print(*a, sep='\n')
|
from django import forms
from apps.place.models import Place, FlagPlace
class PlaceForm(forms.ModelForm):
"""
Place form to create new places
"""
class Meta:
model = Place
exclude = ["is_approved"]
class FlagPlaceForm(forms.ModelForm):
"""
Place form to create new places
"""
class Meta:
model = FlagPlace
exclude = ["place", "is_considered"]
def save(self, request, place, commit=True):
instance = super(FlagPlaceForm, self).save(commit=False)
"""
super save method
"""
instance.place = place
"""
save the relation to the given place
"""
if commit:
instance.save()
return instance
|
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
class SignupForm(forms.Form):
"""Signup form."""
email = forms.EmailField(required=True)
password1 = forms.CharField(widget=forms.PasswordInput(), required=True)
password2 = forms.CharField(widget=forms.PasswordInput(), required=True)
def clean(self):
"""Validate password1 eq password2"""
form_data = super(SignupForm, self).clean()
if form_data["password1"] != form_data["password2"]:
del form_data['password1']
del form_data['password2']
forms.ValidationError("Passwords do not match")
try:
User.objects.get(email=form_data["email"])
del form_data["email"]
forms.ValidationError("Email already registered")
except ObjectDoesNotExist:
# email is unique
pass
return form_data
class LoginForm(forms.Form):
"""Login form."""
email = forms.EmailField(required=True)
password = forms.CharField(widget=forms.PasswordInput(), required=True)
def clean(self):
"""Check if user exists and valid email and password."""
form_data = super(LoginForm, self).clean()
user = None
try:
user = User.objects.get(email=form_data["email"])
except ObjectDoesNotExist:
forms.ValidationError("Invalid email or password")
if user:
user = authenticate(username=user.username, password=form_data["password"])
if not user:
forms.ValidationError("Invalid email or password")
else:
form_data["user"] = user
return form_data
|
from setuptools import setup
setup(
name='flowmon-connector',
version='0.1',
packages=['flowmon_m'],
install_requires=['paramiko==2.4.0', 'structlog']
)
|
'''
#Noraml Way N>10
a = int(input())
for i in range(1,a+1):
st = ""
for x in range(1,i+1):
st += str(x)
y = i-1
while (y>0):
st += str(y)
y-=1
print(st)
'''
for i in range(0,int(input())):
print ([1, 121, 12321, 1234321, 123454321, 12345654321, 1234567654321, 123456787654321, 12345678987654321, 12345678910987654321][i])
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from pathlib import Path, PurePath
import pandas as pd
import requests
from requests.exceptions import HTTPError, ConnectionError
import nltk
from nltk.corpus import stopwords
# nltk.download("punkt")
# pd.options.display.max_colwidth = 500
class NLProcesser:
pass
|
# -*- coding: utf8 -*-
import pymysql
import time
##############################################
def print_time(start, message):
end = time.time()
elapsed = end - start
print " <time:elapsed>", elapsed, message
def insert_data(conn, v_table, v_data):
try:
with conn.cursor() as cursor:
sql = "insert into %s ( name ) values( %%s )" % ( v_table )
print "<sql:insert>:", sql
for data in v_data:
print "<exec>:", sql, ( data["name"] + str(data["id"]) )
cursor.execute( sql, ( data["name"] + str(data["id"]) ) )
finally:
conn.commit()
def select_data(conn, v_table, v_data):
try:
with conn.cursor() as cursor:
sql = "SELECT seq, name, regdate FROM %s WHERE name = %%s ORDER BY seq DESC" % (v_table)
print "<sql:select>:", sql
for data in v_data:
print "<exec>:", sql, ( data["name"] + str(data["id"]) )
cursor.execute( sql, ( data["name"] + str(data["id"]) ) )
result = cursor.fetchone()
print(result)
finally:
conn.commit()
def select_count(conn, v_table, v_data):
try:
with conn.cursor() as cursor:
sql = "SELECT seq, name, count(*) FROM %s WHERE name = %%s GROUP BY name " % (v_table)
print "<sql:select>:", sql
for data in v_data:
print "<exec>:", sql, ( data["name"] + str(data["id"]) )
cursor.execute( sql, ( data["name"] + str(data["id"]) ) )
result = cursor.fetchone()
print(result)
finally:
conn.commit()
def loop_func(toFunc, cnt=1, params=["","",""]):
print "<exec:loopFunc>",cnt
for i in range(cnt):
toVar = toFunc(params[0], params[1], params[2])
print "\t<loop:toFunc>", i, toVar, params
##############################################
if __name__ == "__main__":
## main ---------------------
user_data = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" },
{ "id": 10, "name": "Jen" }
]
# -----------------------------------
#
start = time.time()
conn = pymysql.connect(host='localhost', user='esdev', password='esdev',
db='toku_test', charset='utf8')
#
tab_name="insert_myisam"
# tab_name="insert_innodb"
# tab_name="insert_tokudb"
#insert_data(conn, tab_name, user_data)
loop_func(insert_data, 10000, [conn, tab_name, user_data] )
print_time(start, " -- insert_data -- %s" % (tab_name) )
#
select_data(conn, tab_name , user_data)
print_time(start, " -- select_data -- %s" % (tab_name) )
select_count(conn, tab_name, user_data)
print_time(start, " -- select_count -- %s" % (tab_name) )
#
conn.commit()
conn.close() |
### 検索ツールサンプル
### これをベースに課題の内容を追記してください
# 検索ソース
# source=["ねずこ","たんじろう","きょうじゅろう","ぎゆう","げんや","かなお","ぜんいつ"]
with open("source.csv", encoding="shift-jis") as f:
source = f.read().split('\n')
#print(source)
### 検索ツール
def search():
word =input("鬼滅の登場人物の名前を入力してください >>> ")
if word in source:
print("{}が見つかりました".format(word))
else:
print("{}が見つかりませんでした".format(word))
with open("source.csv", mode='a') as f:
f.write(word)
if __name__ == "__main__":
search()
|
import os
import unittest
import numpy as np
import torch
import torchvision
import wget
from PIL import Image
from test.utils import create_depth_map, read_calib
from DDT.data.cameras_calibration import CamerasCalibration
from DDT.criterion import SpatialPhotometricConsistencyLoss
device = "cpu"
class TestSpatialLoss(unittest.TestCase):
def setUp(self) -> None:
if not os.path.exists("tmp"):
os.mkdir("tmp")
data_link = "http://vision.middlebury.edu/stereo/data/scenes2014/datasets/Adirondack-perfect"
wget.download(f"{data_link}/im0.png", "tmp/im0.png")
wget.download(f"{data_link}/im1.png", "tmp/im1.png")
wget.download(f"{data_link}/calib.txt", "tmp/calib.txt")
wget.download(f"{data_link}/disp0.pfm", "tmp/disp0.pfm")
wget.download(f"{data_link}/disp1.pfm", "tmp/disp1.pfm")
left_current_img = Image.open("tmp/im0.png")
right_current_img = Image.open("tmp/im1.png")
calib = read_calib("tmp/calib.txt")
left_current_depth = create_depth_map("tmp/disp0.pfm", calib)
right_current_depth = np.roll(create_depth_map("tmp/disp1.pfm", calib), -210, axis=1)
transform = torchvision.transforms.ToTensor()
self.left_current_img = transform(left_current_img)[None].to(device).float()
self.right_current_img = transform(right_current_img)[None].to(device).float().roll(-210, dims=3)
self.left_current_depth = transform(left_current_depth.copy())[None].to(device).float()
self.right_current_depth = transform(right_current_depth.copy())[None].to(device).float()
focal = 4161.221
cx = 1445.577
cy = 984.686
camera_matrix = np.array([[focal, 0., cx],
[0., focal, cy],
[0., 0., 1.]])
camera_baseline = 176
self.cameras_calibration = CamerasCalibration(camera_baseline, camera_matrix, camera_matrix, device)
self.lambda_s = 0.85
def test_spatial_loss(self):
loss = SpatialPhotometricConsistencyLoss(self.lambda_s, self.cameras_calibration.left_camera_matrix,
self.cameras_calibration.right_camera_matrix,
self.cameras_calibration.transform_from_left_to_right,
window_size=11, reduction="mean", max_val=1.0)
output = loss(self.left_current_img, self.right_current_img, self.left_current_depth, self.right_current_depth)
print(output)
self.assertEqual(output.shape, torch.Size([]))
self.assertFalse(torch.isnan(output))
self.assertGreater(output, 0.05)
self.assertLess(output, 0.11)
|
'''
Created on Jul 24, 2012
@author: Admin
'''
from view.fullscreenwrapper2 import *
from androidhelper import Android
import image_api as ia
import file_select as fs
import datetime
import os
import view.pathhelpers as pathhelpers
import sys
import time
droid = Android()
# Main Screen Class
class MainScreen(Layout):
ims=[]
ind=0
def __init__(self):
#initialize your class data attributes
#load & set your xml
super(MainScreen,self).__init__(pathhelpers.read_layout_xml("main.xml"),"Ocr")
def on_show(self):
#self.views.tt.add_event(click_EventHandler(self.views.tt,self.get_options))
#self.views.lists.set_listitems(["semir","worku","semir","worku","semir","worku"])
#initialize your layout views on screen_show
self.views.preview.visibility = "visible"
self.views.logo.src = pathhelpers.get_drawable_pathname("logo.png")
self.views.close_app.add_event(click_EventHandler(self.views.close_app,self.cls_app))
self.views.search.add_event(click_EventHandler(self.views.search,self.search_pic))
self.views.next.add_event(click_EventHandler(self.views.next,self.next_pic))
self.views.prev.add_event(click_EventHandler(self.views.prev,self.prev_pic))
def search_pic(self,view,event ):
#self.views.take_pic.visibility = "gone"
#self.views.upload.visibility = "gone"
self.views.preview.visibility = "gone"
self.views.action.visibility = "gone"
self.ind=0
self.ims=[]
title = 'Searching'
message = ''
droid.dialogCreateSpinnerProgress(title, message)
droid.dialogShow()
term=self.views.search_box.text
res= ia.getImages(term,self)
self.views.preview.src = res[self.ind]
self.views.preview.visibility = "visible"
droid.dialogDismiss()
self.views.action.visibility = "visible"
self.views.result.text=str(res)
print res
def next_pic(self,view,event ):
if self.ind < len(self.ims)-1:
self.ind += 1
self.views.preview.src = self.ims[self.ind]
def prev_pic(self,view,event ):
if self.ind > 0:
self.ind -= 1
self.views.preview.src = self.ims[self.ind]
#FullScreenWrapper2App.close_layout()
def cls_app(self,view,event):
FullScreenWrapper2App.close_layout()
def get_options(self,view,event):
title = 'GUI Test?'
choices=['Continue', 'Skip', 'baz']
droid.dialogCreateAlert(title)
droid.dialogSetSingleChoiceItems(choices)
droid.dialogSetPositiveButtonText('Yay!')
droid.dialogShow()
response = droid.dialogGetResponse().result
selected_choices = droid.dialogGetSelectedItems().result
print selected_choices
self.views.tt.text=choices[selected_choices[0]]
return True
def on_close(self):
pass
if __name__ == '__main__':
FullScreenWrapper2App.initialize(droid)
FullScreenWrapper2App.show_layout(MainScreen())
FullScreenWrapper2App.eventloop()
|
import re
from Assmbler.Instruction import Instruction
def assemble(input):
Instructions = []
output = []
dict_address = {}
address = 0x0
with_label = False
pos_label = 0
label = ''
lines = input.split('\n')
lines = [l.strip() for l in lines]
length = len(lines)
# fulfill the dictionary of address dict_address
for i in range(length):
if lines[i] == '': # a blank line
continue
if re.search(r'#', lines[i]): # if this line has annotation
pos_annotation = re.search(r'#', lines[i]).span()[0] # calculate the position of annotation
if(lines[i][0:pos_annotation].strip() == ''): # whether there are codes before annotation
lines[i] = ''
continue
lines[i] = lines[i][0:pos_annotation] # delete the annotation
if re.search(r':', lines[i]): # if this line has a label
with_label = True
pos_label = re.search(r':', lines[i]).span()[0]
label = lines[i][0:pos_label].strip()
dict_address[label] = address # record the address of this label
if lines[i][pos_label+1:len(lines[i])].strip() == '': # if no code after label in this line
continue
else:
with_label = False
if re.search(r'\.text', lines[i]): # pesudo-op '.text'
dict_address[".text"] = address
elif re.search(r'\.data', lines[i]): # pesudo-op '.data'
dict_address[".data"] = address
if not re.search(r'\.', lines[i]):
address += 4
# convert instruction to machine code
for i in range(length):
if lines[i] == '': # a blank line
continue
if re.search(r':', lines[i]): # if this line has a label
with_label = True
pos_label = re.search(r':', lines[i]).span()[0]
label = lines[i][0:pos_label].strip()
if lines[i][pos_label+1:len(lines[i])].strip() == '': # if no code after label in this line
continue
else:
with_label = False
if not re.search(r'\.', lines[i]):
if with_label:
params = lines[i][pos_label+1:len(lines[i])].split(',')
else:
params = lines[i].split(',')
params = [i.strip() for i in params]
op, params[0] = params[0].split(' ') # convert instruction to op and params
address += 4
Instructions.append(Instruction(op, params, dict_address)) # new an object of class Instruction
output.append(Instructions[i].toMachineCode()) # convert to machine code
# file stream
f = open("asm_Result.txt", 'w')
for line in output:
f.write(line + '\n') # write into the file
f.close()
return
if __name__ == '__main__':
# bits = open(r'test.txt', 'r+')
# temp = assemble(bits.read())[0]
# i = 0
# print(len(temp)/32)
# while i < len(temp):
# print(hex(int((i+32)/32)) + ' ' + temp[i:i+32])
# i = i + 32
# bits.close()
assemble('start: add $a0, $s0, $s1\nbeq $s0, $s1, exit\nsll $s0, $s1, 2\nbgez $s0, start\nj start\nexit:') |
import random
import pandas as pd
import pyupbit
class BasicTrader:
def __init__(
self,
upbit,
ticker: str,
):
self.upbit = upbit
self.ticker = ticker
@property
def krw_balance(self):
return self.upbit.get_balance(ticker="KRW")
@property
def ticker_balance(self):
return self.upbit.get_balance(ticker=self.ticker)
def check_market_status_price(
self,
df: pd.DataFrame,
):
# 랜덤으로 사고 팔 것인지 결정합니다.
status = ["buy", "sell", "none"]
status = random.sample(status, 1)[0]
# 거래 금액은 최근의 종가로 합니다.
price = df["close"].iloc[-1]
return status, price
def buy(
self,
price=None,
volume=None,
):
krw_price = 10000
# `price`가 입력되지 않은 경우,
# 현재가로 할당합니다.
if price is None:
price = pyupbit.get_current_price(self.ticker)
# `volume`이 입력되지 않은 경우,
# 10,000원에 해당하는 만큼 할당합니다.
if volume is None:
volume = krw_price / price
volume = round(volume, 5)
krw_price = price * volume
if self.krw_balance > krw_price:
self.upbit.buy_limit_order(
ticker=self.ticker,
price=price,
volume=volume,
)
print(f"Buy {self.ticker}, KRW: {krw_price}")
def sell(
self,
price=None,
volume=None,
):
# `price`가 입력되지 않은 경우,
# 평균 구매 가격의 1%를 더합니다.
if price is None:
price = self.avg_ticker_price * (1.01)
# `volume`이 입력되지 않은 경우,
# 전체 보유 수량으로 할당합니다.
if volume is None:
volume = self.ticker_balance
self.upbit.sell_limit_order(
ticker=self.ticker,
price=price,
volume=volume,
)
print(f"Sell {self.ticker}, Ticker: {self.ticker_balance}")
@property
def avg_ticker_price(self):
balances = self.upbit.get_balances()
for balance in balances:
if self.ticker.split('-')[1] == balance['currency']:
return float(balance['avg_buy_price'])
return 0.0
|
#!/usr/bin/python
import argparse
import os
import sys
import subprocess
class Error(SystemExit):
def __init__(self, msg):
self.msg = msg
super().__init__("%s: %s" % (sys.argv[0], msg))
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--name", help="The container name")
parser.add_argument("origin", nargs="?", help="The source of the mount (block device/bind origin)")
parser.add_argument("mountpoint", nargs="?", help="The name of the mount (becomes /run/lxc/shared/<container>/<name>/)")
parser.add_argument("-t", "--type", help="filesystem type, 'bind' indicates bind mount, 'auto' is automatic block device mount. default=bind", default="bind")
parser.add_argument("-o", "--options", help="Mount options")
parser.add_argument("-u", "--umount", "--unmount", dest="unmount", action="store_true", help="Don't mount, unmount. Origin becomes the mountpoint name, mountpoint shouldn't be specified.")
parser.add_argument("-r", "--remount", action="store_true", help="Remount. Like with unmount, origin becomes the mountpoint name and mountpoint doesn't get speicified.")
parser.add_argument("-l", "--list", action="store_true", help="List current shared mounts")
parser.add_argument("-a", "--all", action="store_true", help="Apply to all containers")
args = parser.parse_args()
if not args.name and not args.all or args.name and args.all:
raise Error("Must specify either --name or --all")
SHARED_DIR = "/run/lxc/shared"
SHARED_DEPTH=len(SHARED_DIR.split("/"))
def get_local_path(path):
return "/".join(path.split("/")[SHARED_DEPTH+1:])
def get_local_path_fancy(path):
return "<shared>/%s" % get_local_path(path)
def get_name(path):
return path.split("/")[SHARED_DEPTH]
def list_mounts():
return [x for x in (line.split() for line in subprocess.check_output("/sbin/mount").decode().split("\n")) if len(x)==6]
if args.all and args.list:
conts={}
for device, on, mountpoint, type, fstype, options in list_mounts():
if mountpoint.startswith(SHARED_DIR + "/"):
cont = get_name(mountpoint)
text = " ".join((device, on, get_local_path_fancy(mountpoint), type, fstype, options))
if cont not in conts:
conts[cont] = [text]
else:
conts[cont].append(text)
for cont, mounts in conts.items():
print("Shared Mounts for container %s:" % cont, end="\n\t")
print("\n\t".join(mounts))
raise SystemExit(0)
if args.all:
SHARED_PATHS = list(filter(os.path.isdir, (os.path.join(SHARED_DIR, name) for name in os.listdir(SHARED_DIR))))
if not SHARED_PATHS:
raise Error("No containers with shared mounts running!")
else:
SHARED_PATHS = [os.path.join(SHARED_DIR, args.name)]
if not os.path.isdir(SHARED_PATHS[0]):
raise Error("%s doesnt exist, aborting!" % SHARED_PATHS[0])
if len(SHARED_PATHS) == 1:
def mount(*args):
os.execv("/sbin/mount", ("mount",) + args)
def umount(*args):
os.execv("/sbin/umount", ("umount",) +args)
else:
def mount(*args):
subprocess.check_call(("/sbin/mount",) +args)
def umount(*args):
subprocess.check_call(("/sbin/umount",) +args)
for SHARED_PATH in SHARED_PATHS:
try:
if args.list:
print("Shared Mounts for %s:" % get_name(SHARED_PATH), end="\n\t")
for device, on, mountpoint, type, fstype, options in list_mounts():
if mountpoint.startswith(SHARED_PATH):
print(" ".join((device, on, get_local_path_fancy(mountpoint), type, fstype, options)), end="\n\t")
print(end="\r")
elif args.unmount or args.remount:
if args.mountpoint:
raise Error("when --unmount'ing or --remount'ing, only origin should be scpecified!")
if args.unmount and args.remount:
raise Error("can only specify either --remount or --unmount!")
mountpoint_name = args.origin
mountpoint = os.path.join(SHARED_PATH, mountpoint_name)
if args.unmount:
umount(mountpoint)
elif not args.options:
raise Error("-o is required when --remount'ing!")
else:
mount(mountpoint, "-o", "remount," + args.options)
else:
if args.mountpoint:
mountpoint_name = args.mountpoint
else:
mountpoint_name = os.path.basename(args.origin)
mountpoint = os.path.join(SHARED_PATH, mountpoint_name)
if not os.path.exists(mountpoint):
os.makedirs(mountpoint)
if args.options:
additional = ("-o", args.options)
else:
additional = ()
if args.type == "bind":
mount("--bind", args.origin, mountpoint, *additional)
else:
mount("-t", args.type, args.origin, mountpoint, *additional)
except Error as e:
if len(SHARED_PATHS) == 1:
raise
print("%s: %s: %s" % (sys.argv[0], get_name(SHARED_PATH), e.args[1]))
|
#!/usr/bin/env python
import json
import os
import requests
from apiclient.discovery import build
DEVELOPER_KEY = 'AIzaSyAlrSswmdF0zAPbeU3bSQwPPBaIw3g8PHw'
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
DEEP_RELEVANCE_MODEL_SERVICE = 'http://0.0.0.0:5000/'
class YouTubeSearcher:
def __init__(self):
self._searcher = build(
YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
def search_for_videos(self, query, max_results=10):
search_response = self._searcher.search().list(
q=query,
part="id,snippet",
maxResults=max_results
).execute()
return self.__extract_videos(search_response)
def __extract_videos(self, search_response):
videos = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
videos.append(
{'title': search_result["snippet"]["title"],
'video_id': search_result["id"]["videoId"],
'description': search_result["snippet"]["description"]})
return videos
def get_most_relevant_video(query, dl_gk=True):
# TODO: Build GK service to toggle
videos = YouTubeSearcher().search_for_videos(query)
if dl_gk:
if videos:
data = {
"query": [],
"title": [],
"description": [],
"ids": [],
}
for video in videos:
data["query"].append(query)
data["title"].append(video['title'])
data["description"].append(video['description'])
data["ids"].append(video['video_id'])
json_data = json.dumps(data)
headers = {
"Content-Type": "application/json"
}
r = requests.post(DEEP_RELEVANCE_MODEL_SERVICE,
headers=headers,
data=json_data)
if r.status_code == 200:
response = r.json()
preds = response['preds']
index = 0
max_prob = 0.0
for i, item in enumerate(preds):
prob = item[0]
if prob > max_prob:
max_prob = prob
index = i
return data["ids"][index]
else:
if videos:
return videos[0]['video_id']
def handle_video_search(keywords):
message_text = keywords
most_relevant_video = get_most_relevant_video(message_text)
if most_relevant_video:
video_link = 'https://www.youtube.com/watch?v={}'.format(
most_relevant_video)
print(video_link)
return video_link
else:
return('Sorry! We could not find a video for the topic') |
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy.signal import cont2discrete as c2d
from KalmanFilter import Kalman_Filter
from StateSpaceSystem import LinearSystem
np.random.seed(22)
def getU(t):
return 0
x0 = np.matrix([0, 2]).T #pos, vel
currTime = 0.0
dt = 0.01
A_c = np.matrix( #actual, continuous-time A matrix
[[0, 1], [0, 0]])
B_c = np.matrix([0, 1]).T #actual, continuous-time B matrix
C_c = np.matrix([1, 0]) #only observing position
D_c = 0
A_d, B_d, C_d, D_d, dt = c2d((A_c, B_c, C_c, D_c), dt) #convert A and B matrices from continuous to discrete
Q_system = [[0.02**2, 0], [0, 0.02**2]]
R_system = 0.02 #0.015
mySystem = LinearSystem(A_d, B_d, C_d, Q_system, R_system, x0, dt)
x0_mean = [0, 0]
P0 = np.matrix([[10**2, 0], [0, 10**2]])
x0_kalman_filter_guess = np.random.multivariate_normal(x0_mean, P0).T
x0_kalman_filter_guess = np.reshape(x0_kalman_filter_guess, (2, 1))
Q_filter = [[0.02**2, 0], [0, 0.02**2]]
R_filter = np.matrix([0.03**2])
kf = Kalman_Filter(A_d, B_d, C_d, P0, Q_filter, R_filter, x0_kalman_filter_guess, dt)
#initializing integration loop variables
prev_u = 0
pos_real = [mySystem.x.item(0)]
pos_sensor = [mySystem.getCurrentMeasurement()]
pos_kalman_forecasted = [kf.x_forecasted.item(0)]
pos_kalman_estimated = [kf.x_estimated.item(0)]
time = [currTime]
currTime += dt
maxTime = 1
while (currTime <= maxTime):
mySystem.update(prev_u) #update the system state with the previous time-step's control input
pos_real.append(mySystem.x.item(0)) #real state without sensor noise, but with model noise
kf.forecast(prev_u) #create the forecasted state in the kalman filter using the previous time-step's control input (predict/forecast step)
sensor_measurment = mySystem.getCurrentMeasurement() #get a sensor measurement of the system's updated state
pos_sensor.append(sensor_measurment)
kf.estimate(sensor_measurment) #find estimated state based on the sensor measurement that just came in (update step)
pos_kalman_forecasted.append(kf.x_forecasted.item(0)) #current forecasted state
pos_kalman_estimated.append(kf.x_estimated.item(0))
curr_u = getU(currTime) #getting the control input for the current time-step. will be used to find the updates states in the next timestep
time.append(currTime)
currTime += dt #next time-step
prev_u = curr_u
plt.title('Example of Kalman filter for tracking a moving object in 1-D')
plt.plot(time, pos_real, label='Actual Position', color='y')
plt.scatter(time, pos_sensor, label='Sensor Measurement', color='b')
plt.scatter(time, pos_kalman_forecasted, label='Kalman forecasted state', color='r') #based on model
plt.plot(time, pos_kalman_estimated, label='Kalman updated state', color='g') #based on model and forecasted state
plt.grid()
plt.xlabel('Time (s)')
plt.ylabel('Position (m)')
plt.legend()
plt.show()
|
# first class fn or clouser
# square
# cube
#
def to_power(x):
def cal_power(n):
return print(f"{n**x}")
return cal_power
cube = to_power(3)
cube(5)
squre = to_power(2)
squre(5)
|
import numpy as np
from eventio import SimTelFile
from matplotlib import pyplot as plt
n_pixels = 10
p = "build/simtel-output.zst"
s = SimTelFile(p)
for event in s:
if event['type'] == 'calibration':
break
adc_samples = event['telescope_events'][1]['adc_samples']
brightest_pixels = np.sum(adc_samples[0, :, :], axis=1).argsort()[::-1]
fig, ax0 = plt.subplots(nrows=1)
for i_pix in brightest_pixels[:n_pixels]:
ax0.plot(adc_samples[0, i_pix, :], "-", label=f"{i_pix}")
for i, ax in enumerate([ax0]):
ax.set_title("ADC Samples of Laser (Calibration) Events")
ax.set_xlabel("sample")
ax.legend(title="pixel id")
fig.tight_layout()
fig.savefig("build/laser_waveforms.png")
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 24 15:13:08 2019
Demo to read shapefile, reverse vertices, write to Hyper
@author: sbattersby
"""
import geopandas as gpd
import fiona
import wkt_vertex_flipper as wktFlip
from tableausdk import *
from tableausdk.HyperExtract import *
def main():
write_shp_to_hyper(
'd:\\_data\\Seattle\\SPD_Beats_WGS84.shp', # shp location
'SPD Beats4', # table name
'd:\\test-seattlePoliceBeats.hyper' # hyper file name
)
return 0
# Given a shapefile in a geodataframe, grab the schema to write into
# a .hyper file
def get_shp_schema(shpLocation):
with fiona.open(shpLocation) as f:
inputSchema = f.schema
return inputSchema['properties']
# Given a shapefile schema, assign the appropriate columns to the hyper schema
# TODO: Check what other data types are caught by fiona in the shapefile dbf
def make_hyper_schema(shpSchema):
schema = TableDefinition()
for key in shpSchema:
colType = shpSchema[key][0:3]
if (colType == 'str'):
schema.addColumn(key, Type.CHAR_STRING)
if (colType == 'int'):
schema.addColumn(key, Type.INTEGER)
if (colType == 'flo'): # float
schema.addColumn(key, Type.DOUBLE)
# Tack on the spatial at the end - it won't be seen in the other atts
# from the shapefile dbf
schema.addColumn('geom', Type.SPATIAL)
return schema
def write_shp_to_hyper(shpLocation,tableName, hyperOutputLocation):
gdf = gpd.GeoDataFrame.from_file(shpLocation)
# Prep Hyper extract for data dump
ExtractAPI.initialize()
extract = Extract(hyperOutputLocation)
# make Hyper schema - could probably do this with geopandas
# so that we don't have to open shp twice, but this was easier than
# reading the helpfiles on geopandas
shpSchema = get_shp_schema(shpLocation)
hyperSchema = make_hyper_schema(shpSchema)
# Make the table and set up the schema
table = extract.addTable(tableName, hyperSchema)
# Add data to the table
currentTable = extract.openTable(tableName)
tableDefn = currentTable.getTableDefinition()
row = Row(tableDefn)
# TODO: Check what other data types are caught. Currently just str, int, float
for i in range(0,len(gdf)):
colCount = 0
for key in shpSchema:
colType = shpSchema[key][0:3]
if colType == 'str':
try:
row.setCharString(colCount, gdf.iloc[i][key])
except:
row.setCharString(colCount, '-999')
if colType == 'int':
row.setInteger(colCount, gdf.iloc[i][key])
if colType == 'flo':
row.setDouble(colCount, gdf.iloc[i][key])
colCount += 1
geoData = wktFlip.reverse_wkt_string(gdf.iloc[i]['geometry'])
row.setSpatial(colCount, geoData.wkt.encode('ascii'))
currentTable.insert(row)
extract.close()
return
if __name__ == "__main__":
main() |
from unittest import TestCase
from feito.filters import Filters
class FiltersTestCase(TestCase):
def test_filter_python_files(self):
files = ['test.py', 'test/another-test.py', 'not-python.rb', '.also-not-python']
filtered_files = Filters.filter_python_files(files)
assert filtered_files == ['test.py', 'test/another-test.py']
|
import json
import boto3
from boto3.dynamodb.conditions import Key
dynamodb = boto3.resource('dynamodb')
dynamodb_client = boto3.client('dynamodb')
#gets machine id by type in Machine_Types
def getMachineIdsByType(type):
#Machine Types Table
table = dynamodb.Table('Machine_Types')
#Get Machine Type
response = table.query(
KeyConditionExpression=Key('Machine_Type').eq(type)
)
#Send Ids From Machine Type
if 'Machines' in response['Items'][0]:
return list(response['Items'][0]['Machines'])
#Return Empty List if No Machine
else:
return []
#Gets Machine Details By Id
def getMachineById(id):
#Machine Table Resource
table = dynamodb.Table('Machines')
#Get Machines
response = table.query(
KeyConditionExpression=Key('Machine_Id').eq(id)
)
#Send Machine
return response['Items'][0]
#calls getMachineIdByType first to get the machine id and then calls
#getMachineById to get the machine information
#currently it returns list of ids instead of all information
#input: ?machine_type=<Type> (case sensitive)
def viewMachineByTypesHandler(event, context):
params = event['queryStringParameters']
#Check for query string params
if(params is None):
return{
'statusCode': 400,
'headers':{
'Content-Type': 'text/plain'
},
'body': json.dumps({
'Message' : 'Failed to provide query string parameters.'
})
}
#Check for machine_type
if('machine_type' not in params):
return{
'statusCode': 400,
'headers':{
'Content-Type': 'text/plain'
},
'body': json.dumps({
'Message' : 'Failed to provide parameter: machine_type',
})
}
#Set param values
machine_type = params['machine_type']
#Get Machine Ids of Type
machine_ids = getMachineIdsByType(machine_type)
ret_obj = []
#Each id in machine ids list
for mid in machine_ids:
#Get Machine By Id
machine = getMachineById(mid)
#Convert Task Set to List
if 'Tasks' in machine:
machine['Tasks'] = list(machine['Tasks'])
#Add Machine to List
ret_obj.append(machine)
#Send Response
return{
'statusCode': 200,
'headers':{
'Content-Type': 'text/plain'
},
'body': json.dumps(ret_obj)
}
|
import ast
import astunparse
import os
import tempfile
import nbformat
import pytest
import shutil
from wranglesearch import convert_candidates as cc
def create_dir_with_files(_dir, file_names):
if not os.path.exists(_dir):
os.mkdir(_dir)
file_paths = [os.path.join(_dir, n) for n in file_names]
for path in file_paths:
with open(path, 'w') as f:
f.write('')
def write_src_to_notebook(src, ipynb_file_name, temp_dir):
# wrap source in a notebook and write out
nb = nbformat.v4.new_notebook()
nb['cells'].append(nbformat.v4.new_code_cell(src))
ipynb_file_path = os.path.join(temp_dir, ipynb_file_name)
nbformat.write(nb, open(ipynb_file_path, 'w'))
return ipynb_file_path
get_files_cases = [
('/tmp/_test1', ['f1.ipynb', 'f1.xypnb', 'f2.R', 'f3.py'], ['/tmp/_test1/f1.ipynb'], ['/tmp/_test1/f3.py']),
('/tmp/_test2', ['f1.xypnb', 'f2.R', 'f3.py'], [], ['/tmp/_test2/f3.py']),
('/tmp/_test3', ['f1.xypnb', 'f2.R'], [], []),
]
@pytest.mark.parametrize('_dir,file_names,expected_ipynb,expected_py', get_files_cases)
def test_get_files(_dir, file_names, expected_ipynb, expected_py):
create_dir_with_files(_dir, file_names)
assert sorted(cc.get_ipython_notebooks(_dir)) == sorted(expected_ipynb)
assert sorted(cc.get_py_scripts(_dir)) == sorted(expected_py)
shutil.rmtree(_dir)
check_can_parse_cases = [
('def f(): return x', True),
('blah ble', False)
]
@pytest.mark.parametrize('src,expected', check_can_parse_cases)
def test_check_can_parse(src, expected):
with tempfile.NamedTemporaryFile(mode='w', delete=True) as f:
f.write(src)
f.flush()
assert cc.check_can_parse(f.name) == expected
convert_2_to_3_cases = [
('print 2', 'print(2)'),
('xrange(10)', 'range(10)'),
('d = {}; d.iteritems()', 'd = {}; iter(d.items())'),
]
@pytest.mark.parametrize('src,expected', convert_2_to_3_cases)
def test_convert_2_to_3(src, expected):
temp_dir = tempfile.mkdtemp()
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=True) as f:
f.write(src)
f.flush()
converted_path = cc.convert_2_to_3(f.name, temp_dir)
with open(converted_path, 'r') as converted_file:
result = converted_file.read().strip()
assert result == expected
shutil.rmtree(temp_dir)
convert_notebook_to_script_cases = [
'print(2)',
'1 + 2',
'def f(x): return x',
'class A(object): pass'
]
@pytest.mark.parametrize('src', convert_notebook_to_script_cases)
def test_convert_notebook_to_script(src):
temp_dir = tempfile.mkdtemp()
ipynb_file_path = write_src_to_notebook(src, 'file.ipynb', temp_dir)
py_file_path = os.path.join(temp_dir, 'file.py')
cc.convert_notebook_to_script(ipynb_file_path, temp_dir)
print(py_file_path)
print(ipynb_file_path)
# parse and unparse to remove ipython notebook comments etc
with open(py_file_path, 'r') as f:
result = f.read()
result_tree = ast.parse(result)
result = astunparse.unparse(result_tree)
src_tree = ast.parse(src)
src = astunparse.unparse(src_tree)
assert result == src
shutil.rmtree(temp_dir)
def test_filter_candidates():
files_and_contents = [
# when things just work
('plain_python.py', '1'),
('plain_notebook.ipynb', '1'),
# when can't parse
#('bad_python.py', 'blah bleh'),
#('bad_notebook.ipynb', 'blah bleh'),
# when can convert from python2
('convertible_2to3_python.py', 'xrange(1)'),
('convertible_2to3_notebook.ipynb', 'xrange(1)'),
# ignored files
('ignore.R', '1')
]
# construct directory and files
temp_dir = tempfile.mkdtemp()
for file_name, src in files_and_contents:
file_path = os.path.join(temp_dir, file_name)
ext = file_name.split('.')[-1]
if ext == 'ipynb':
write_src_to_notebook(src, file_name, temp_dir)
else:
with open(file_path, 'w') as f:
f.write(src)
parsed_dir = os.path.join(temp_dir, 'parsed_dir')
converted_dir = os.path.join(temp_dir, 'converted_dir')
cc.filter_candidates(temp_dir, parsed_dir, converted_dir)
# note that bad_notebook.ipynb is still converted, just not parseable later on
# expected_converted_dir = sorted(['convertible_2to3_notebook.py', 'bad_notebook.py', 'plain_notebook.py'])
expected_converted_dir = sorted(['convertible_2to3_notebook.py', 'plain_notebook.py'])
expected_parsed_dir = sorted(['plain_python.py', 'convertible_2to3_python.py', 'convertible_2to3_notebook.py', 'plain_notebook.py'])
assert sorted(os.listdir(converted_dir)) == expected_converted_dir
assert sorted(os.listdir(parsed_dir)) == expected_parsed_dir
filename_to_expected_src = {
'plain_python.py' : '1',
'plain_notebook.py' : '1',
'convertible_2to3_python.py' : 'range(1)',
'convertible_2to3_notebook.py': 'range(1)',
}
for file_name in expected_parsed_dir:
with open(os.path.join(temp_dir, 'parsed_dir', file_name), 'r') as f:
result = f.read()
result = astunparse.unparse(ast.parse(result))
expected = filename_to_expected_src[file_name]
expected = astunparse.unparse(ast.parse(expected))
assert result == expected, 'Failed on %s' % file_name
shutil.rmtree(temp_dir)
|
#!/usr/bin/env python
# test our rate per second
__author__ = 'Ben Smith'
import sys
from scapy.all import *
def main():
a = 1
conf.iface = "mon1"
data = "A" * 255
f = open("/sys/class/net/mon1/statistics/tx_packets",'r')
oldRX = 0
while(a):
newRX = f.read()
tempRX = int(newRX) - oldRX
print "[*] sending %s packets per second (pps)" % tempRX
oldRX = int(newRX)
SendRates(data,"Test SSID")
time.sleep(1)
def SendRates(rates, ssid):
frame=RadioTap()/\
Dot11(addr1="ff:ff:ff:ff:ff:ff",addr2=RandMAC(),addr3=RandMAC())/\
Dot11Beacon(cap="ESS")/\
Dot11Elt(ID="SSID",len=len(ssid),info=ssid)/\
Dot11Elt(ID="Rates",info=rates)/\
Dot11Elt(ID="DSset",info="\x03")/\
Dot11Elt(ID="TIM",info="\x00\x01\x00\x00")
sendp(frame, verbose=1)
if __name__ == "__main__":
main()
|
# -*- coding:utf-8 -*-
__author__ = 'xuy'
"""
在链表中,从m开始到n进行链表的逆置
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class solution:
def reverselinklist_mn(self,head,m,n):
# head传入的是链表当中的第一个节点
if head is None or head.next is None or m>=n or m<0 or n<0:
return None
# 创建空的头结点
h=ListNode(-1)
h.next=head
pre=h
cur=head
i=1
# 此时cur指向第m个节点,此时pre指向第m-1个节点
while i<m and cur:
pre=cur
cur=cur.next
i+=1
# t1指向第m-1个链表节点,t2指向第m个节点
# 逆置之后t1仍然指向m-1的节点,t2指向第n个节点
t1=pre
t2=cur
# cur指向第n个节点结束,从第m个节点开始进行逆置
while i<=n and cur:
# 链表逆置
lat=cur.next
cur.next=pre
pre=cur
cur=lat
i+=1
# 此时pre指向的是链表逆置之后的头指针
t1.next=pre
# 此时cur指向的是链表逆置之后的尾指针
t2.next=cur
# 返回的是空的头指针的下一个节点,防止m=1的情况
return h.next
def init_linklist(data,num_data):
head=ListNode(None)
point=head
for i in range(num_data):
point.next=ListNode(data[i])
point=point.next
# 最后尾指针设置为空
point.next=None
return head.next
data=[1,2,3,4,5]
num_data=len(data)
m,n=2,4
solu=solution()
head=init_linklist(data,num_data)
head=solu.reverselinklist_mn(head,m,n)
while head:
print(head.val)
head=head.next
|
#!/user/bin/env python3
# -*- utf-8 -*-
# author shengqiong.wu
class Node:
def __init__(self, data, next=None):
self.data = data
self.next = next
def __repr__(self):
return str(self.data)
def isEmpty(self):
return self.length == 0
def update(self, dataOrNode):
item = None
if isinstance(dataOrNode, Node):
item = dataOrNode
else:
item = Node(dataOrNode)
if not self.head:
self.head = item
self.length += 1
else:
node = self.head
while node.next:
node = node.next
node.next = item
self.length += 1
def create(self, data):
pass
def delete(self, index):
if self.isEmpty():
print("this chain table is empty")
return
if index > self.length:
print('out of index')
return
if index == 0:
# 删除的为头节点
self.head = self.head.next
self.length -= 1
return
if __name__ == '__main__':
res = Node(10)
print(res.data)
|
#!/usr/bin/env python
from importlib import import_module
import socket
import os
import sys
from flask import Flask, render_template, Response, request
# import camera driver
# if os.environ.get('CAMERA'):
# Camera = import_module('camera_' + os.environ['CAMERA']).Camera
# else:
# from camera import Camera
from camera_opencv import Camera # Hard code using web cam
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
app = Flask(__name__)
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
@app.route('/pic/<string:address>', methods= ["GET"])
def pic(address):
return app.send_static_file(address)
# background process happening without any refreshing
@app.route('/background_process_test', methods=['POST'])
def background_process_test():
if request.method == 'POST':
direction_action = request.form.get('direction')
print "Direction"
print direction_action
print "End"
return "nothing"
@app.route('/', methods=['GET', 'POST'])
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
my_ip = get_ip()
sys.stderr.write(app.instance_path)
app.run(host=my_ip, threaded=True)
|
from typing import Optional, Any, Dict
from bot.config.token import Tokens
async def mongodb_find_one(client, *args, _filter: Dict = None , **kwargs):
"""
To get a single document from MongoDB asynchronously.
Parameters:
--
- `client`: MotorClient instance.
- `_filter`(optional): A dict specifying the query.
- `*args`(optional): Other positional arguments.
- `**kwargs`(optional): Other keyword arguments.
"""
if _filter == None:
_filter = {}
return await client.find_one(_filter, *args, **kwargs)
async def mongodb_count_documents(client, _filter: Dict, **kwargs) -> Any:
"""
To count the number of documents in the collection asynchronously.
Parameters:
--
- `client`: MotorClient instance.
- `_filter`: A dict specifying the query. Can be an empty document to count all the documents
- `**kwargs`(optional): Other keyword arguments.
"""
return await client.count_documents(_filter, **kwargs)
async def mongodb_find(client, *args, get_list = False, length = None, **kwargs):
"""
Return a MotorCursor instance (asynchronous)
Parameters:
--
- `client`: MotorClient instance.
- `get_list`: To get a list containing all the documents.
- `length`: The number of documents to be in the list.
- `*args`(optional): Other positional arguments.
- `**kwargs`(optional): Other keyword arguments.
"""
cursor = client.find(*args, **kwargs)
if get_list:
return await cursor.to_list(length = length)
return cursor
async def mongodb_update_one(client, _filter, _update, array_filters = None):
"""
Update a document asynchronously.
Parameters:
--
- `client`: MotorClient instance.
- `_filter`: A query to match the document to be updated.
- `_update`: The modification to made to the document.
"""
pass
async def mongodb_post_release_update(client, _filter, drama_data):
"""
Finds and updates a document asynchronously.
Parameters:
--
- `client` - MotorClient instance.
- `_filter` - A query to match the document to be updated.
- `drama_data` - The dict retrieved from the API.
"""
last_ep = drama_data.get("last_episode_to_air")
overview = None
next_date = None
last_ep = None
next_ep = drama_data.get("next_episode_to_air")
if last_ep is not None:
last_ep = drama_data.get("last_episode_to_air").get("episode_number")
overview = drama_data.get("last_episode_to_air").get("overview")
if next_ep is not None:
next_ep = drama_data.get("next_episode_to_air").get("episode_number")
next_date = drama_data.get("next_episode_to_air").get("air_date")
_update = {
"$set":
{
"season": drama_data["seasons"][-1].get("season_number"),
"dates":
{
"last_episode": drama_data.get("last_air_date"),
"next_episode": next_date
},
"episodes":
{
"last_episode": last_ep,
"next_episode": next_ep
},
"poster": drama_data["seasons"][-1].get("poster_path"),
"overview": overview,
}
}
try:
await client.find_one_and_update(_filter, _update)
except Exception as e:
print(e)
|
f = open("Text.txt", "r+")
list_of_lines = f.readlines()
newlist = list_of_lines.split()
print (newlist)
f.close() |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
app = Flask(__name__)
app.config.from_object("settings")
db = SQLAlchemy(app)
ma = Marshmallow(app)
# アプリでDB操作を行えるように初期設定する
def init_db(app):
db.init_app(app) |
from issue_age import group
def test_age():
assert group(12) == "Kid"
assert group(19) == "Teenager"
assert group(28) == "Young Adult"
assert group(64) == "Adult"
assert group(65) == "Senior"
|
#########################################################################################################
# Program Name : jiradataingestion.py #
# Program Description: #
# This program prepares a SQLite table containing Mastersoft Jira issues. #
# #
# Comment Date Author #
# ================================ ========== ================ #
# Initial Version 20/01/2020 Engramar Bollas #
#########################################################################################################
import sqlite3
import sys
import datetime
from datetime import datetime
from jira import JIRA
import arrow
user = 'myemail'
apikey = 'API Key'
server = 'URL'
options = {
'server': server
}
jira = JIRA(options, basic_auth=(user,apikey) )
#######################################################################
### Create ISSUES Table ###
#######################################################################
conn = sqlite3.connect('ISSUES.sqlite')
cur = conn.cursor()
cur.executescript('''
DROP TABLE IF EXISTS ISSUES;
CREATE TABLE issues (
ID INT,
ISSUE_ID varchar(20) PRIMARY KEY,
PARENT_ID varchar(20),
ETA varchar(20),
BUS_IMPACT varchar(20),
DEV_EFFORT varchar(20),
SUMMARY varchar(100),
CONFLUENCE varchar(100),
STATUS varchar(20),
ASSIGNEE varchar(20),
CREATED varchar(20),
TIME_TO_ETA varchar(20)
);
''')
fname = 'cr.txt'
fhand = open(fname)
#######################################################################
### Populate issues table ###
#######################################################################
ID = 0
total = 0
for line in fhand:
fields = line.split('|')
ISSUE_ID = fields[0].strip()
ETA = fields[1].strip()
ASSIGNEE = fields[2].strip()
CONFLUENCE = fields[3].strip()
issue = jira.issue(ISSUE_ID)
SUMMARY = str(issue.fields.summary)
BUS_IMPACT = ' '
DEV_EFFORT = ' '
STATUS = ' '
CREATED = str(issue.fields.created[:10])
CREATED_DATE = (datetime.fromisoformat(CREATED))
TODAY = arrow.now().format('YYYY-MM-DD')
TODAYS_DATE = (datetime.fromisoformat(TODAY))
ETA_DATE = (datetime.fromisoformat(ETA))
ELAPSED = str(ETA_DATE - TODAYS_DATE).split(',')
TIME_TO_ETA = ELAPSED[0]
PARENT_ID = ISSUE_ID
ID += 1
print ('Before Insert '+str(ID)+" "+ISSUE_ID)
cur.execute('''INSERT INTO ISSUES
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
))
conn.commit()
print ('After Insert '+str(ID)+" "+ISSUE_ID)
print (issue.fields.issuelinks)
for link in issue.fields.issuelinks:
if hasattr(link, "outwardIssue"):
outwardIssue = link.outwardIssue
issue = jira.issue(outwardIssue.key)
outwardIssueSummary = issue.fields.summary
outwardIssueCreated = issue.fields.created
outwardIssueStatus = issue.fields.status
outwardIssueAssignee = issue.fields.assignee
print("\t1 Outward: " + outwardIssue.key, outwardIssueSummary, outwardIssueCreated, outwardIssueStatus)
#issueRec = str("1 Outward:"+"|"+str(outwardIssue.key)+"|"+str(outwardIssueSummary)+"|"+str(outwardIssueCreated)+"|"+str(outwardIssueStatus))
#issues.append(issueRec)
ISSUE_ID = str(outwardIssue.key)
SUMMARY = str(outwardIssueSummary)
BUS_IMPACT = ' '
DEV_EFFORT = ' '
STATUS = str(outwardIssueStatus)
ASSIGNEE = str(outwardIssueAssignee)
CONFLUENCE = ' '
CREATED = str(issue.fields.created[:10])
CREATED_DATE = (datetime.fromisoformat(CREATED))
TODAY = arrow.now().format('YYYY-MM-DD')
TODAYS_DATE = (datetime.fromisoformat(TODAY))
ETA_DATE = (datetime.fromisoformat(ETA))
ELAPSED = str(ETA_DATE - TODAYS_DATE).split(',')
TIME_TO_ETA = ELAPSED[0]
ID += 1
try:
cur.execute('''INSERT INTO ISSUES
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
))
except:
print ('Duplicate found', ISSUE_ID)
conn.commit()
for link in issue.fields.issuelinks:
if hasattr(link, "outwardIssue"):
outwardIssue = link.outwardIssue
issue = jira.issue(outwardIssue.key)
outwardIssueSummary = issue.fields.summary
outwardIssueCreated = issue.fields.created
outwardIssueStatus = issue.fields.status
outwardIssueAssignee = issue.fields.assignee
print("\t1-1 Outward: " + outwardIssue.key, outwardIssueSummary, outwardIssueCreated, outwardIssueStatus)
#issueRec = str("1-1 Outward:"+"|"+str(outwardIssue.key)+"|"+str(outwardIssueSummary)+"|"+str(outwardIssueCreated)+"|"+str(outwardIssueStatus))
#issues.append(issueRec)
ISSUE_ID = str(outwardIssue.key)
SUMMARY = str(outwardIssueSummary)
BUS_IMPACT = ' '
DEV_EFFORT = ' '
STATUS = str(outwardIssueStatus)
ASSIGNEE = str(outwardIssueAssignee)
CONFLUENCE = ' '
CREATED = str(issue.fields.created[:10])
CREATED_DATE = (datetime.fromisoformat(CREATED))
TODAY = arrow.now().format('YYYY-MM-DD')
TODAYS_DATE = (datetime.fromisoformat(TODAY))
ETA_DATE = (datetime.fromisoformat(ETA))
ELAPSED = str(ETA_DATE - TODAYS_DATE).split(',')
TIME_TO_ETA = ELAPSED[0]
ID += 1
try:
cur.execute('''INSERT INTO ISSUES
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
))
except:
print ('Duplicate found', ISSUE_ID)
conn.commit()
if hasattr(link, "inwardIssue"):
inwardIssue = link.inwardIssue
issue = jira.issue(inwardIssue.key)
inwardIssueSummary = issue.fields.summary
inwardIssueCreated = issue.fields.created
inwardIssueStatus = issue.fields.status
inwardIssueAssignee = issue.fields.assignee
print("\t1-1 Inward: " + inwardIssue.key, inwardIssueSummary, inwardIssueCreated, inwardIssueStatus)
#issueRec = str("1-1 Inward:"+"|"+str(inwardIssue.key)+"|"+str(inwardIssueSummary)+"|"+str(inwardIssueCreated)+"|"+str(inwardIssueStatus))
#issues.append(issueRec)
ISSUE_ID = str(inwardIssue.key)
SUMMARY = str(inwardIssueSummary)
BUS_IMPACT = ' '
DEV_EFFORT = ' '
STATUS = str(inwardIssueStatus)
ASSIGNEE = str(inwardIssueAssignee)
CONFLUENCE = ' '
CREATED = str(issue.fields.created[:10])
CREATED_DATE = (datetime.fromisoformat(CREATED))
TODAY = arrow.now().format('YYYY-MM-DD')
TODAYS_DATE = (datetime.fromisoformat(TODAY))
ETA_DATE = (datetime.fromisoformat(ETA))
ELAPSED = str(ETA_DATE - TODAYS_DATE).split(',')
TIME_TO_ETA = ELAPSED[0]
ID += 1
try:
cur.execute('''INSERT INTO ISSUES
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
))
except:
print ('Duplicate found', ISSUE_ID)
conn.commit()
if hasattr(link, "inwardIssue"):
inwardIssue = link.inwardIssue
issue = jira.issue(inwardIssue.key)
inwardIssueSummary = issue.fields.summary
inwardIssueCreated = issue.fields.created
inwardIssueStatus = issue.fields.status
inwardIssueAssignee = issue.fields.assignee
print("\t1 Inward: " + inwardIssue.key, inwardIssueSummary, inwardIssueCreated, inwardIssueStatus)
#issueRec = ("1 Inward:"+"|"+str(inwardIssue.key)+"|"+str(inwardIssueSummary)+"|"+str(inwardIssueCreated)+"|"+str(inwardIssueStatus))
#issues.append(issueRec)
ISSUE_ID = str(inwardIssue.key)
SUMMARY = str(inwardIssueSummary)
BUS_IMPACT = ' '
DEV_EFFORT = ' '
STATUS = str(inwardIssueStatus)
ASSIGNEE = str(inwardIssueAssignee)
CONFLUENCE = ' '
CREATED = str(issue.fields.created[:10])
CREATED_DATE = (datetime.fromisoformat(CREATED))
TODAY = arrow.now().format('YYYY-MM-DD')
TODAYS_DATE = (datetime.fromisoformat(TODAY))
ETA_DATE = (datetime.fromisoformat(ETA))
ELAPSED = str(ETA_DATE - TODAYS_DATE).split(',')
TIME_TO_ETA = ELAPSED[0]
ID += 1
try:
cur.execute('''INSERT INTO ISSUES
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
))
except:
print ('Duplicate found', ISSUE_ID)
conn.commit()
for link in issue.fields.issuelinks:
if hasattr(link, "outwardIssue"):
outwardIssue = link.outwardIssue
issue = jira.issue(outwardIssue.key)
outwardIssueSummary = issue.fields.summary
outwardIssueCreated = issue.fields.created
outwardIssueStatus = issue.fields.status
outwardIssueAssignee = issue.fields.assignee
print("\t1-1 Outward: " + outwardIssue.key, outwardIssueSummary, outwardIssueCreated, outwardIssueStatus)
#issueRec = ("1-1 Outward:"+"|"+str(outwardIssue.key)+"|"+str(outwardIssueSummary)+"|"+str(outwardIssueCreated)+"|"+str(outwardIssueStatus))
#issues.append(issueRec)
ISSUE_ID = str(outwardIssue.key)
SUMMARY = str(outwardIssueSummary)
BUS_IMPACT = ' '
DEV_EFFORT = ' '
STATUS = str(outwardIssueStatus)
ASSIGNEE = str(outwardIssueAssignee)
CONFLUENCE = ' '
CREATED = str(issue.fields.created[:10])
CREATED_DATE = (datetime.fromisoformat(CREATED))
TODAY = arrow.now().format('YYYY-MM-DD')
TODAYS_DATE = (datetime.fromisoformat(TODAY))
ETA_DATE = (datetime.fromisoformat(ETA))
ELAPSED = str(ETA_DATE - TODAYS_DATE).split(',')
TIME_TO_ETA = ELAPSED[0]
ID += 1
try:
cur.execute('''INSERT INTO ISSUES
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
))
except:
print ('Duplicate found', ISSUE_ID)
conn.commit()
if hasattr(link, "inwardIssue"):
inwardIssue = link.inwardIssue
issue = jira.issue(inwardIssue.key)
inwardIssueSummary = issue.fields.summary
inwardIssueCreated = issue.fields.created
inwardIssueStatus = issue.fields.status
inwardIssueAssignee = issue.fields.assignee
print("\t1-1 Inward: " + inwardIssue.key, inwardIssueSummary, inwardIssueCreated, inwardIssueStatus)
#issueRec = str("1-1 Inward:"+"|"+str(inwardIssue.key)+"|"+str(inwardIssueSummary)+"|"+str(inwardIssueCreated)+"|"+str(inwardIssueStatus))
#issues.append(issueRec)
ISSUE_ID = str(inwardIssue.key)
SUMMARY = str(inwardIssueSummary)
BUS_IMPACT = ' '
DEV_EFFORT = ' '
STATUS = str(inwardIssueStatus)
ASSIGNEE = str(inwardIssueAssignee)
CONFLUENCE = ' '
CREATED = str(issue.fields.created[:10])
CREATED_DATE = (datetime.fromisoformat(CREATED))
TODAY = arrow.now().format('YYYY-MM-DD')
TODAYS_DATE = (datetime.fromisoformat(TODAY))
ETA_DATE = (datetime.fromisoformat(ETA))
ELAPSED = str(ETA_DATE - TODAYS_DATE).split(',')
TIME_TO_ETA = ELAPSED[0]
ID += 1
try:
cur.execute('''INSERT INTO ISSUES
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
(
ID,
ISSUE_ID,
PARENT_ID,
ETA,
BUS_IMPACT,
DEV_EFFORT,
SUMMARY,
CONFLUENCE,
STATUS,
ASSIGNEE,
CREATED,
TIME_TO_ETA
))
except:
print ('Duplicate found', ISSUE_ID)
conn.commit()
total += 1
print ('Total CRs : ', total)
fhand.close()
print ('Done') |
"""
This driver takes cares of storing information about and sending infrared signals to control my
living room lights
First, a dictionnary of the different commands is available, which linked to the data bits to send.
Then, when a button must be simulated, these databits are converted to IR lengths to send, expanded
on with headers and trailing signals, and finally sent.
== Signals ==
Starts with data bits where each 1 is (High, Low) of (1300, 400) and each 0 is (High, Low) of
(400, 1300). **1
Follows the repetition signal (High, Low) of (450, 22700)
Follows the same data bits as before
Finally, the signal concludes with a Trail (High, Low) of (450, 22700)
**1 : data-bit 1 and 0 could be reversed,
"""
########################
# Import local packages
########################
from global_libraries import signal_sender
from global_libraries import general_utils
####################
# Global parameters
####################
__author__ = 'Baland Adrien'
# Signal parameters
header_signal = [] # Header of signal, before any data bit (On, Off)
one_bit = [1300, 380] # Length for 1-data bit (On, Off)
zero_bit = [450, 1220] # Length for 0-data bit (Off)
n_repeat = 3 # Number of time the signal will be repeated. If > 0, repeat must be non-empty
repeat_signal = [460, 22700] # (On, Off) separating signal repetition.
trail_signal = [460, 22700] # (On, Off) to notify the end of the signal
all_codes = {
'Power': '11010000010',
'Bright+': '11010000100',
'Bright-': '11010010000',
'Yellow': '11010010100',
'White': '11010011000',
'Minimum': '11010001100',
'Maximum': '11010001000',
'A': '11010011100',
'B': '11010010010',
'Mute': '11010010001'
}
####################################################################################################
# send_signal
####################################################################################################
# Revision History:
# 2017-01-27 AB - Created function
# 2017-05-19 Adba : Added gpion_pin parameter
####################################################################################################
def send_signal(remote_button, gpio_pin=21):
"""
Sends infrared signal to air conditioning system with given options.
INPUT:
remote_button (str)
"""
data_bytes = all_codes.get(remote_button, None)
if data_bytes is None:
details = '%s' % (remote_button,)
############################################################
return general_utils.log_error(-505, error_details=details)
############################################################
# Uses remote specific data and data_bytes information to get all sub-signals to create, and
# order in which to send them to get the full signal to send.
all_wave_lengths, wave_order = \
signal_sender.convert_bits_to_length(data_bytes, one_bit, zero_bit, header_signal,
repeat_signal, trail_signal, n_repeat)
# Creates pigpio interface to send infrared signal
ir = signal_sender.PigpioInterface(gpio_pin, 38000, 0.5)
send_status = ir.send_code(all_wave_lengths, wave_order)
###################
return send_status
###################
##################
# END send_signal
##################
|
def parse_bow(line):
import re
feature_pattern = re.compile(r"((?P<feature_id>f\d+):(?P<feature_value>-?\d[.]\d+))")
label_part, bow_part = line.split("|", 1)
label, _ = label_part.split(" ", 1)
features = [(f.group("feature_id"), float(f.group("feature_value")))
for f in feature_pattern.finditer(bow_part)
]
return {"label": int(label), **dict(features)}
def vw_model(bow_string, rating=None):
# [Label] [Importance] [Base] [Tag]|Namespace Features |Namespace Features
if rating and rating != "?":
return "{0} 1.0 |bow {1}".format(rating, bow_string)
else:
return "|bow {0}".format(bow_string)
|
# -*- coding: utf-8 -*-
from webapp2 import Route, RedirectHandler
from webapp2_extras.routes import PathPrefixRoute
def get_rules():
"""Returns a list of URL rules for the Hello, World! application.
:return:
A list of class:`tipfy.Rule` instances.
"""
rules = [
#-------------------------------URL HACKS VAN ACA----------------------------------------
Route('/tsk/fix_images', name='fiximages', handler='apps.backend.hacks.FixImages'),
Route('/tsk/fix_re', name='fixre', handler='apps.backend.hacks.FixRealEstates'),
Route('/tsk/remove_re/<key>', name='removere', handler='apps.backend.hacks.RemoveRealEstate'),
Route('/tsk/fix_prop', name='fixprop', handler='apps.backend.hacks.FixProperty'),
# Campaña lanzamiento
Route('/tsk/start_engine_campaign', name='start_engine_campaign', handler='apps.backend.hacks.StartEngineCampaign'),
# Hacko para EMI
Route('/ver/<archivo>', name='ver_archivo', handler='apps.backend.hacks.VerArchivo'),
# HACKO Unsubscribe campagna la plata
Route('/unsubscribe/<email>', name='unsubscribe', handler='apps.backend.hacks.Unsubscribe'),
#----------------------------------------------------------------------------------------
Route('/', name='frontend/home', handler='apps.frontend.home.Index'),
Route('/mapa', name='frontend/map', handler='apps.frontend.map.Index'),
Route('/inmobiliarias-la-plata', name='frontend/red', handler='apps.frontend.home.Red'),
Route('/terms', name='frontend/terms', handler='apps.frontend.home.Terms'),
Route('/mapa/<slug>/<key_name_or_id>', name='frontend/map/slug/key', handler='apps.frontend.map.Index:slugged_link'),
Route('/mapa/<realestate>', name='frontend/map/realestate', handler='apps.frontend.map.Index:realesate_filtered'),
#Route('/link/copy', name='frontend/link/copy', handler='apps.frontend.link.ShortenLink'),
Route('/link/copy', name='frontend/link/copy', handler='apps.frontend.link.ShortenLocalLink'),
Route('/link/copy/sendmail', name='frontend/link/sendmail', handler='apps.frontend.link.EmailShortenedLink'),
Route('/link/share/', name='frontend/link/share', handler='apps.frontend.link.SearchShare'),
Route('/link/map/<bitly_hash>', name='frontend/link/map', handler='apps.frontend.link.LoadSearchLink'),
Route('/service/search', name='frontend/search', handler='apps.frontend.search.Search'),
Route('/compare/<keys>/<oper>', name='compare', handler='apps.frontend.property_info.Compare'),
Route('/<slug>/ficha-<key>/<oper>', name='frontend/ficha', handler='apps.frontend.property_info.Ficha:full_page'),
Route('/service/popup/<key>/<bubble_css>/<oper>', name='frontend/property_popup', handler='apps.frontend.property_info.PopUp'),
Route('/service/ficha/<key>/<oper>', name='frontend/property_ficha', handler='apps.frontend.property_info.Ficha'),
Route('/service/ficha/email/<key>/<oper>', name='frontend/ficha/sendemail', handler='apps.frontend.property_info.SendMail'),
Route('/landing/<key>', name='landing', handler='apps.frontend.landing.Handler')
]
return rules
|
"""
Implementation of the paper - "Improving Unsupervised Defect Segmentationby Applying Structural Similarity To Autoencoders"
https://arxiv.org/pdf/1807.02011
"""
import tensorflow as tf
from collections import OrderedDict
import numpy as np
import pathlib
import random
from tensorflow.examples.tutorials.mnist import input_data
import cv2
class SSIMAutoEncoder:
def __init__(self,inputshape =(None,128,128,1) ):
self.inputShape = inputshape
self.curr_graph = tf.get_default_graph()
self.inputImage = tf.placeholder(tf.float32, shape=self.inputShape,name="inp")
self.kernelSizesEncdoer = OrderedDict({
"conv1" : [(4,4,self.inputShape[-1],32), (2,2)], #[[h,w,in_channels,out_channels],[stride_H,stride_w]]
"conv2" : [(4,4,32,32), (2,2)],
"conv3" : [(3,3,32,32), (1,1)],
"conv4" : [(4,4,32,64), (2,2)],
"conv5" : [(3,3,64,64), (1,1)],
"conv6" : [(4,4,64,128), (2,2)],
"conv7" : [(3,3,128,64), (1,1)],
"conv8" : [(3,3,64,32), (1,1)],
"conv9" : [(8,8,32,100), (1,1)]
# "conv9" : [(8,8,32,100), (1,1)]
})
def conv_layer_decoder(self,scopeName,prevLayerOut,kernelSize,output_shape,padding = 'SAME',activation = True):
_filter = [kernelSize[0][0],kernelSize[0][1],kernelSize[0][2],kernelSize[0][3]]
_strides = [1,(kernelSize[1][1]),(kernelSize[1][0]),1]
_biases = [_filter[2]]
print(_filter)
# print(_strides)
print(_biases)
print ("-------------------")
print(output_shape)
with tf.name_scope(scopeName+"Decoder") as scope:
kernel = tf.Variable(tf.truncated_normal(_filter, dtype=tf.float32,
stddev=1e-1), name='weight')
conv = tf.nn.conv2d_transpose(prevLayerOut,output_shape=output_shape, filter = kernel, strides = _strides,padding=padding)
# print (scope)
# print (biases)
biases = tf.Variable(tf.constant(0.0, shape=_biases, dtype=tf.float32),
trainable=True, name='bias')
bias = tf.nn.bias_add(conv, biases,name="bias_add")
# if activation :
conv1 = tf.nn.relu(bias, name="activation")
# else:
# conv1 = bias
# return conv1
# print_activations(conv1)
return conv1
def conv_layer_encoder(self,scopeName,prevLayerOut,kernelSize,padding = 'SAME',activation = True):
_filter = list(kernelSize[0])
_strides = [1,(kernelSize[1][1]),(kernelSize[1][0]),1]
_biases = [kernelSize[0][-1]]
print(_filter)
print(_strides)
print(_biases)
with tf.name_scope(scopeName+"Encoder") as scope:
kernel = tf.Variable(tf.truncated_normal(_filter, dtype=tf.float32,
stddev=1e-1), name='weight')
conv = tf.nn.conv2d(prevLayerOut, filter = kernel, strides = _strides,padding=padding)
# print (scope)
# print (biases)
biases = tf.Variable(tf.constant(0.0, shape=_biases, dtype=tf.float32),
trainable=True, name='bias')
bias = tf.nn.bias_add(conv, biases,name = "lastOp")
if activation :
conv1 = tf.nn.relu(bias, name="lastOp")
else:
conv1 = bias
return conv1
# print_activations(conv1)
return conv1
def MultiScaleSSIM(self,img1, img2, max_val=255, filter_size=11, filter_sigma=1.5,
k1=0.01, k2=0.03, weights=None):
"""Return the MS-SSIM score between `img1` and `img2`.
This function implements Multi-Scale Structural Similarity (MS-SSIM) Image
Quality Assessment according to Zhou Wang's paper, "Multi-scale structural
similarity for image quality assessment" (2003).
Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf
Author's MATLAB implementation:
http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Arguments:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
weights: List of weights for each level; if none, use five levels and the
weights from the original paper.
Returns:
MS-SSIM score between `img1` and `img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
print(img1.shape)
if img1.shape != img2.shape:
raise RuntimeError('Input images must have the same shape (%s vs. %s).',
img1.shape, img2.shape)
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d',
img1.ndim)
# Note: default weights don't sum to 1.0 but do match the paper / matlab code.
weights = np.array(weights if weights else
[0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
levels = weights.size
downsample_filter = np.ones((1, 2, 2, 1)) / 4.0
im1, im2 = [x.astype(np.float64) for x in [img1, img2]]
mssim = np.array([])
mcs = np.array([])
for _ in range(levels):
ssim, cs = _SSIMForMultiScale(
im1, im2, max_val=max_val, filter_size=filter_size,
filter_sigma=filter_sigma, k1=k1, k2=k2)
mssim = np.append(mssim, ssim)
mcs = np.append(mcs, cs)
filtered = [convolve(im, downsample_filter, mode='reflect')
for im in [im1, im2]]
im1, im2 = [x[:, ::2, ::2, :] for x in filtered]
return (np.prod(mcs[0:levels-1] ** weights[0:levels-1]) *
(mssim[levels-1] ** weights[levels-1]))
def loss_ssim(self,im1,im2):
ksizes = [1, 11, 11, 1]
strides = [1, 1, 1, 1]
rates = [1, 1, 1, 1]
patches1 = tf.extract_image_patches(
im1,
ksizes,
strides,
padding = 'SAME',
rates = rates,
name=None
)
patches2 = tf.extract_image_patches(
im2,
ksizes,
strides,
padding = 'SAME',
rates = rates,
name=None
)
shape = tf.shape(im1)
patches1 = tf.reshape(patches1,shape=[shape[0],ksizes[1],ksizes[2],-1])
patches2 = tf.reshape(patches2,shape=[shape[0],ksizes[1],ksizes[2],-1])
patches1 = tf.transpose(patches1, perm=[0,3,1,2])
patches1 = tf.expand_dims(
patches1,
axis=4,
name=None,
dim=None
)
patches2 = tf.transpose(patches2, perm=[0,3,1,2])
patches2 = tf.expand_dims(
patches2,
axis=4,
name=None,
dim=None
)
# shape1 = tf.math.reduce_max(tf.reshape(patches1,[-1]))
# shape2 = tf.math.reduce_max(tf.reshape(patches2,[-1]))
# patches1 = (tf.squeeze(patches1, 0))
# patches2 = (tf.squeeze(patches2, 0))
ssim_scores = (self.MultiScaleSSIM(patches1,patches2,max_val = 1.0))
ssim_residual = (tf.reshape(ssim_scores,shape = shape,name = "ssim_residual"))
flattened_res = tf.reshape(ssim_residual,[shape[0],-1])
flattened_res_diss = tf.math.negative(
flattened_res,
name="str_dissim"
)
total_loss = tf.reduce_mean(tf.reduce_sum(flattened_res,name="loss"))
print("---------9-9-9--90-99-9-9-9-9-9-9-9-",total_loss)
l1_loss = tf.losses.absolute_difference(
im1,
im2,
weights=1.0,
scope=None,
loss_collection=tf.GraphKeys.LOSSES
# reduction=Reduction.SUM_BY_NONZERO_WEIGHTS
)
return total_loss , l1_loss
def get_encoder_model(self):
# print (self.kernelSizesEncdoer["conv1"])
for i, (key) in enumerate(self.kernelSizesEncdoer):
# print ("--------------------")
value = self.kernelSizesEncdoer[key]
activation = True
padding = 'SAME'
if (i==0) :
output = self.inputImage
if (i == (len(self.kernelSizesEncdoer)-1)):
activation = False
padding = 'SAME'
output = self.conv_layer_encoder(key,output,value,padding,activation)
return output
def get_decoder_model(self):
padding = 'SAME'
final_layer_encoder = self.get_encoder_model()
all_keys = list(reversed(self.kernelSizesEncdoer))
for i, (key) in enumerate(reversed(self.kernelSizesEncdoer)):
# print ("--------------------")
value = self.kernelSizesEncdoer[key]
activation = True
padding = 'SAME'
if (i==0) :
output = self.curr_graph.get_tensor_by_name(key+"Encoder"+"/lastOp"+":0")
if (i == (len(self.kernelSizesEncdoer)-1)):
activation = False
# padding = 'VALID'
print(key+"Encoder"+"lastOp")
# print ([n.name for n in tf.get_default_graph().as_graph_def().node])
try:
output_shape = tf.shape(self.curr_graph.get_tensor_by_name(all_keys[i+1]+"Encoder"+"/lastOp"+":0"))
except:
output_shape = tf.shape(self.curr_graph.get_tensor_by_name("inp:0"))
# self.output_shape =output_shape
print (output_shape)
output = self.conv_layer_decoder(key,output,value,output_shape,padding,activation)
output = tf.math.sigmoid(output,name = "final_Activation")
#computing ssim loss
ssim2, l1_loss =self.loss_ssim(output,self.curr_graph.get_tensor_by_name("inp:0") )
ssim2= (ssim2*80)/100
l1_loss= (l1_loss*20)/100
# print("nnnnnnnnnnnnnnnnnnnnnn",ssim2)
# ssim2 = tf.image.ssim(reconstructedImage,self.curr_graph.get_tensor_by_name("inp:0") , max_val=1.0)
optimizer = tf.train.AdamOptimizer(learning_rate = 0.00002,beta1=0.000005,name = 'optimizer').minimize(ssim2+l1_loss)
return ssim2,optimizer
def _parse_function(self,filename):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_image(image_string,channels=1)
# image_decoded= image_decoded/255
print("..................",image_decoded)
return image_decoded
def train(self):
loss,optimizer = self.get_decoder_model()
init = tf.initialize_all_variables()
with tf.Session() as sess:
# Run the initializer
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(init)
# Training
data_root2 = "/home/jbmai/defects/DefectDetection/aug"
data_root = pathlib.Path(data_root2)
all_image_paths = list(data_root.glob('*'))
all_image_paths = [str(path) for path in all_image_paths]
print("mmmmmmmmmmmmmmmmmmmmmmm",all_image_paths)
all_image_paths = tf.constant(all_image_paths)
# image_path = random.choice(all_image_paths)
# print("--------------------------",image_path)
# path = "../Images"
# img_raw = tf.read_file(all_image_paths[0])
# img_tensor = tf.image.decode_image(img_raw)
dataset = tf.data.Dataset.from_tensor_slices((all_image_paths))
# dataset = dataset.apply(tf.contrib.data.unbatch())
dataset = dataset.map(self._parse_function)
dataset = dataset.apply(tf.contrib.data.unbatch())
dataset= dataset.batch(1)
print("00000000000000000000000000000000",dataset)
iterator = dataset.make_one_shot_iterator()
next_image_batch = iterator.get_next()
print("===================================================",next_image_batch)
with tf.Session() as session:
img_value = session.run(next_image_batch)
# print("-------------------------",img_value)
img_value = (img_value)/255
print("-------------------------",img_value)
img_n =[]
for img in (img_value):
img_n.append((np.resize(cv2.resize(img,(128,128)),(128,128,1))))
# print(x.shape)
# img_n.a
# img = cv2.cvtColor(img_value,cv2.COLOR_RGB2GRAY)
# cv2.imshow("mm",img_value[])
# cv2.waitKey(0)
# print(img_n.si)
img_n = np.asarray(img_n)
print(img_n)
# print(img_n.shape)
# cv2.waitKey(0)
# print(img_tensor.shape)
step = 1000
for i in range(1, step+1):
# Prepare Data
# Get the next batch of MNIST data (only images are needed, not labels)
# batch_x = cv2.(img_value, [128, 128])
# batch_x = cv2.resize(img,(128,128))
# cv2.imshow("fuck",batch_x)
# cv2.waitKey(0)
# # batch_x = cv2.resize(batch_x,(128,128))
# batch_x = np.reshape(batch_x,(1,128,128,1))
# batch_x =
# Run optimization op (backprop) and cost op (to get loss value)
saver = tf.compat.v1.train.Saver()
l, m = sess.run([optimizer, loss], feed_dict={self.inputImage:img_n})
print (m)
if (i % 1 == 0 or i == 1):
print('Step %i: Minibatch Loss: %f' % (i, m))
if (step % 1 == 0):
# ave_path = saver.save(sess, "/tmp/model.ckpt")
saver.save(sess, 'tr/my-model.ckpt', global_step=step)
writer = tf.compat.v1.summary.FileWriter('tr/events', sess.graph)
mayank = SSIMAutoEncoder()
mayank.train()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pywikibot, re, sys, argparse
import blib
from blib import getparam, rmparam, msg, site, tname
import rulib
def process_text_on_page(index, pagetitle, text):
global args
def pagemsg(txt):
msg("Page %s %s: %s" % (index, pagetitle, txt))
notes = []
russian = blib.find_lang_section_from_text(text, "Russian", pagemsg)
if not russian:
pagemsg("Couldn't find Russian section for %s" % pagetitle)
return
subsections = re.split("(^===+[^=\n]+===+\n)", russian, 0, re.M)
# Go through each subsection in turn, looking for subsection
# matching the POS with an appropriate headword template whose
# head matches the inflected form
for j in range(2, len(subsections), 2):
if "==Etymology" in subsections[j - 1]:
parsed = blib.parse_text(subsections[j])
for t in parsed.filter_templates():
tn = tname(t)
if tn == "diminutive of":
pagemsg("WARNING: Found diminutive-of in etymology: %s" % str(t))
parser = blib.create_argparser("Find uses of {{diminutive of}} in Russian Etymology sections",
include_pagefile=True, include_stdin=True)
args = parser.parse_args()
start, end = blib.parse_start_end(args.start, args.end)
blib.do_pagefile_cats_refs(args, start, end, process_text_on_page, edit=True, stdin=True,
default_cats=["Russian diminutive nouns", "Russian diminutive adjectives"])
|
import discord
from discord.ext import commands
class Jeweler (commands.Cog):
"""Plugin for managing pins and reactions. !pin command requires Manage Roles permission in Discord."""
def __init__ (self, bot):
self.bot = bot
@commands.command()
async def pin (self, ctx):
"""Pins a message if Urist has appropriate permissions."""
try:
await ctx.message.pin()
except:
await ctx.send("Cannot pin message; check permissions.")
def setup (bot):
bot.add_cog(Jeweler(bot)) |
__author__ = 'NLP-PC'
import pickle
import numpy as np
def analysis_result(predict, true):
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_fscore_support
f1 = f1_score(true, predict, average='binary')
precision_binary, recall_binary, fbeta_score_binary, _ = precision_recall_fscore_support(true, predict, average='binary')
accuracy = accuracy_score(true, predict)
print('正确率(Accuracy):%.3f\nF值(Macro-F score):%.3f' % (accuracy, f1))
print('精确度(Precision):%.3f\n召回率:%.3f\nF值: %.3f' % (precision_binary, recall_binary, fbeta_score_binary))
# 画图
from matplotlib import pyplot as plt
n_groups = 5
values = (accuracy, f1, precision_binary, recall_binary, fbeta_score_binary)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width=0.35
rects1 = plt.bar(index+ bar_width/2, values, bar_width,alpha=0.6, color='b')
plt.xlabel('Result')
plt.ylabel('Scores')
plt.title('Experiment analysis')
plt.xticks(index + bar_width, ('Accuracy', 'F', 'Precision', 'Recall', 'F'))
plt.ylim(0,1)
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# TRUE LABELS
from Utils import load_test_data
_, true = load_test_data()
# Predict labels
predict_label_1 = pickle.load(open("./acc_tmp/predict/predict_label_1.p", "rb"))
predict_label_2 = pickle.load(open("./acc_tmp/predict/predict_label_2.p", "rb"))
predict_label_3 = pickle.load(open("./acc_tmp/predict/predict_label_3.p", "rb"))
predict_label_4 = pickle.load(open("./acc_tmp/predict/predict_label_4.p", "rb"))
analysis_result(predict_label_1, true)
analysis_result(predict_label_2, true)
analysis_result(predict_label_3, true)
analysis_result(predict_label_4, true)
predict_without_clustering = pickle.load(open("./acc_tmp/predict/predict_without_clustering.p", "rb"))
analysis_result(predict_without_clustering, true) |
from pathlib import Path
import pandas as pd
import os
from django.conf import settings
from .models import AllMeterFiles,ValidatedFile
from .supportingFunctions import *
from datetime import datetime,timedelta
def validateFile(path,_meterData) :
print("i am in validateFile " + path)
meterFileMainFolder = os.path.join(settings.MEDIA_ROOT,"meterFile",path) #Later "DateFiltered File" path is added
# if not os.path.exists(meterFileMainFolder +'/Validated File(Copy)'): # Not required. Because file is taken from "DateFiltered File" Folder.
# os.makedirs(meterFileMainFolder + '/Validated File(Copy)')
if not os.path.exists(meterFileMainFolder +'/Validated File'):
os.makedirs(meterFileMainFolder + '/Validated File')
# All RealMeters here
realMeterInfo = []
allMeterNumbers = []
masterData = open(meterFileMainFolder+'/NPC Files/Necessary Files Local Copy/master.dat', "r")
masterDataList = masterData.readlines()
masterData.close()
for elem in masterDataList :
if(len(elem) > 1 and isMeterIdPattern(elem.split()[0])) :
# print(elem.split())
allMeterNumbers.append(elem.split()[1])
realMeterInfo.append({"Loc_Id" : elem.split()[0] , "Meter_No" : elem.split()[1] , "ctr" : elem.split()[2] , "ptr" : elem.split()[3] })
# print(realMeterInfo)
# print(allMeterNumbers)
data = pd.read_csv(meterFileMainFolder+'/DateFiltered File/DateFilteredFile.npc', header = None)
dfSeries = pd.DataFrame(data)
df = dfSeries[0]
print(df)
errorLine = 0
errorList = []
# eofCheckFlag = 0
if(df[len(df)-1] != "EOF") :
errorList.append("Structural error. No EOF.")
# return(False)
weekList = []
meterHeaderList = []
meterNamesList = []
try :
for i in range(len(df)-1) : # So EOF should not come at all.
errorLine = i
rowList = df[i].split()
rowListNext = df[i+1].split()
rowList = extraCharHandler(rowList)
rowListNext = extraCharHandler(rowListNext)
if(rowList[0] in checkTimeStamp or isMeterNumberPattern(rowList[0]) or rowList[0] == "WEEK" or rowList[0] == "EOF") :
if(rowList[0] == "WEEK"):
if(isMeterNumberPattern(rowListNext[0])) : weekList.append(i)
else : errorList.append("1.Structural Error. Line :"+str(errorLine+1)+","+str(errorLine+2))
if(isMeterNumberPattern(rowList[0])):
if(rowListNext[0] in checkTimeStamp) : meterHeaderList.append(i)
else : errorList.append("2.Structural Error. Line :"+str(errorLine+1)+","+str(errorLine+2))
if(rowList[0] == "EOF") :
errorList.append("Structural error. Unexpected EOF. Line number : " + str(errorLine+1))
else :
errorList.append("Error at line : "+str(errorLine+1)+" length is "+str(len(rowList[0]))+ ". >> "+str(" ".join(rowList)))
weekList.append(len(df)-1) #EOF index must be added
informationDict = getDfInfo(weekList,meterHeaderList)
# print(informationDict)
# ###############################################################################################################
for weekListIndex in weekList[:-1] : # Will not take the EOF's index
errorLine = weekListIndex
weekHeaderDataRaw = df[weekListIndex] # "WEEK FROM 0000 HRS OF 07-12-20 TO 1033 HRS OF 14-12-20"
weekHeaderData = weekHeaderDataRaw.split()
weekHeaderData = extraCharHandler(weekHeaderData)
validateWeekHeader = weekHeaderCheck(weekHeaderData)
if(not validateWeekHeader['status']) : errorList.append(str(validateWeekHeader['message']) + str(errorLine+1))
weekStartDate_object = datetime.strptime(weekHeaderData[5], "%d-%m-%y")
weekEndDate_object = datetime.strptime(weekHeaderData[10], "%d-%m-%y")
print("For weekheader"+str(weekListIndex)+" : "+str(informationDict[weekListIndex]))
# For weekheader0 : [1, 8, 15, 22, 29, 36, 43, 50, 54]
meterNames = [df[x].split()[0] for x in informationDict[weekListIndex][:-1]] # Must be unique
meterDates = [datetime.strptime(df[x].split()[4], "%d-%m-%y") for x in informationDict[weekListIndex][:-1]] # Must be consequtive in the range of weekStartDate_object to weekEndDate_object
if(not (isMeterNameUnique(meterNames) and isMeterDateConsecutive(meterDates,weekStartDate_object,weekEndDate_object))) :
print("I am error at "+ str(weekListIndex))
errorList.append("Different meter names/non-consecutive dates inside same WEEK Header :{{ " + str(df[weekListIndex]) + " }}. Line number : " + str(errorLine+1))
continue
# if(meterNames[0] in meterNamesList) :
# errorList.append("Same meter name already exists. Line number : " + str(errorLine+1))
# continue
meterNamesList.append(meterNames[0])
for k in range(len(informationDict[weekListIndex])-2):
# Will run for [1,21,31,not 41,not 54] i.e. 2 times less.
# Coz no need to check the last chuck
errorLine = informationDict[weekListIndex][k]
meterHeaderDataRaw = df[informationDict[weekListIndex][k]] # "NP-5851-A 97845.9 35371.6 00136.0 07-12-20"
meterHeaderDataNextRaw = df[informationDict[weekListIndex][k+1]]
meterHeaderData = meterHeaderDataRaw.split()
meterHeaderDataNext = meterHeaderDataNextRaw.split()
meterHeaderData = extraCharHandler(meterHeaderData)
meterHeaderDataNext = extraCharHandler(meterHeaderDataNext)
validateMeterHeader = meterHeaderCheck(meterHeaderData)
if(not validateMeterHeader['status']) : errorList.append(str(validateMeterHeader['message'])+str(errorLine+1))
for line in range(informationDict[weekListIndex][k]+1,informationDict[weekListIndex][k+1]):
errorLine = line
meterMainDataRaw = df[line]
meterMainData = meterMainDataRaw.split()
meterMainData = extraCharHandler(meterMainData)
validateMeterData = mainMeterDataCheck(meterMainData)
# This need to be modified. Because now just skip the last chuck
# And for each chuck we must ensure full data availability.
if(not validateMeterData['status']) : errorList.append(str(validateMeterData['message'])+str(errorLine+1))
errorLine = informationDict[weekListIndex][k+1]
validateMeterHeader = meterHeaderCheck(meterHeaderDataNext)
if(not validateMeterHeader['status']) : errorList.append(str(validateMeterHeader['message'])+str(errorLine+1))
print("Changing meter header")
print("Changing Week header")
print("-----------------------------------------------------------------")
except Exception as e :
errorList.append("Unhandled generic issue : " + str(e) + ". Line no : " + str(errorLine+1))
finally:
# print(set(allMeterNumbers) - set(meterNamesList)) # For these meters we do not have any data
# print(set(meterNamesList) - set(allMeterNumbers)) # These meter names are not defined in master.dat
# for err in errorList :
# print(err)
# if(len(errorList) == 0) :
# print("Proceed")
# else :
# print("Stop")
if(len(errorList) != 0) :
return errorList
else :
# dateFixedDf.to_csv(meterFileMainFolder+'/DateFiltered File(Copy)/DateFilteredFile.npc', header=False, index=None)
# dateFixedDf.to_csv(meterFileMainFolder+'/DateFiltered File/DateFilteredFile.npc', header=False, index=None)
if(not (_meterData.status is None) and (statusCodes.index(_meterData.status) == 3)) :
print("New validatedFileId added")
local_file = open(meterFileMainFolder+'/DateFiltered File/DateFilteredFile.npc',"rb") # Latest DateFiltered File will be picked from this folder.
validatedFileObject = ValidatedFile.objects.create(fileName = 'ValidatedFile.npc', filePath = os.path.join("meterFile",path,"Validated File/ValidatedFile.npc"), meterFile = _meterData)
validatedFileObject.validatedFile.save("ValidatedFile.npc", File(local_file))
validatedFileObject.save()
local_file.close()
AllMeterFiles.objects.filter(id = _meterData.id).update(status="Verified")
else : # That is ValidatedFile Object for this meter already exists.
validatedFileObjects = list(filter(lambda validatedFile: (validatedFile.validatedFileMeterId() == str(_meterData.id)),ValidatedFile.objects.all()))
print(validatedFileObjects)
print(len(validatedFileObjects))
validatedFileObject = validatedFileObjects[0]
local_file = open(meterFileMainFolder+'/DateFiltered File/DateFilteredFile.npc',"rb") # Latest DateFiltered File will be picked from this folder.
validatedFileObject.validatedFile.save("ValidatedFile.npc", File(local_file))
validatedFileObject.save()
return errorList |
from django.utils.text import slugify
from restaurant.models import Element
def run(*args):
elemets = Element.objects.all()
for e in elemets:
if not e.slug:
e.slug = slugify(e.name, allow_unicode=True)
e.save()
|
# -*- coding: utf-8 -*-
"""
Created on Nov 1 14:00:30 2019
Project Title: Text extraction.
Project Objective: Extract text in natural scene images
Team Members: Yuwei Chen, Minhui Chen, Jianyu He
"""
"""
Progrm description
This code is using for detected the text in the natural environments
This code using EAST, non-maximum suppression and Tesseract to implement
EAST is text detector is a deep learning model, based on a novel architecture and training pattern.
non-maximum suppression is using to remove the repetition text bounding box only keep one which is most likely text region
Tesseract is using to convert text region to text and print it
First input image, using EAST and non-maximum suppression to extract Text ROIS
And then using Tesseract OCR to convert ROI to extract the text
Finally print text result on the screen
"""
from imutils.object_detection import non_max_suppression
from PIL import Image
import numpy as np
import cv2
import pytesseract
import easygui
# two prameters scores: the possibility of text region
# geomtery: text bounding box position
def text_detector(scores, geometry):
# The minimum probability of a detected text region
min_confidence = 0.5
(numRows,numCols) = scores.shape[2:4]
rects = []
confidences = []
for y in range(0, numRows):
# extract the scores (probabilities)
scoresData = scores[0,0,y]
data0 = geometry[0,0,y]
data1 = geometry[0,1,y]
data2 = geometry[0,2,y]
data3 = geometry[0,3,y]
anglesData = geometry[0,4,y]
for x in range(0, numCols):
# if the scores doesn't have sufficient probability, ignore it
if scoresData[x] < min_confidence:
continue
# compute the offset factor as our resulting feature
# maps will be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# extract the rotation angle for the prediction and
# then compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# use the geometry volume to derive the width and height
# of the bounding box
h = data0[x] + data2[x]
w = data1[x] + data3[x]
# compute both the starting and ending (x, y)-coordinates
# for the text prediction bounding box
endX = int(offsetX + (cos * data1[x]) + (sin * data2[x]))
endY = int(offsetY - (sin * data1[x]) + (cos * data2[x]))
startX = int(endX - w)
startY = int(endY - h)
# add the bounding box coordinates and probability score
# to our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
# return a tuple of the bounding boxes and associated confidences
return (rects, confidences)
def text_recongnition():
eastModel = "frozen_east_text_detection.pb"
# set the new width and height and then determine the ratio in change for
# both the width and height, both of them are multiples of 32
newW = 320
newH = 320
# The (optional) amount of padding to add to each ROI border
# if find OCR result is incorrect you can try set padding to get bounding box bigger
# e.g 0.03 will incrase 3%
padding = 0.03
# in order to apply Tesseract v4 to OCR text we must supply
# (1) a language, (2) an OEM flag of 4, indicating that the we
# wish to use the LSTM neural net model for OCR, and finally
# (3) an OEM value, in this case, 7 which implies that we are
# treating the ROI as a single line of text
config = ("-l eng --oem 1 --psm 7") # chi_sim
#read image
f = easygui.fileopenbox()
image = cv2.imread(f)
#make a copy for image
origI = image.copy()
#get the image height and width
h,w = image.shape[:2]
# calculate ratios that will be used to scale bounding box coordinates
ratioW = w/float(newW)
ratioH = h/float(newH)
# resize the image and grab the new image dimensions
image = cv2.resize(image,(newW,newH))
(IH,IW) = image.shape[:2]
# define the two output layer names for the EAST detector model the first is the output probabilities
# and the second can be used to derive the bounding box coordinates of text
layerNames = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]
# load the pre-trained EAST text detector
net = cv2.dnn.readNet(eastModel)
# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets
blob = cv2.dnn.blobFromImage(image, 1.0, (IW, IH),(123.68, 116.78, 103.94), swapRB=True, crop=False)
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
# decode the predictions, then apply non-maxima suppression to
# suppress weak, overlapping bounding boxes
(rects, confidences) = text_detector(scores, geometry)
# NMS effectively takes the most likely text regions, eliminating other overlapping regions
boxes = non_max_suppression(np.array(rects), probs=confidences)
# initialize the list of results to contain our OCR bounding boxes and text
results = []
# the bounding boxes represent where the text regions are, then recognize the text.
# loop over the bounding boxes and process the results, preparing the stage for actual text recognition
for (startX, startY, endX, endY) in boxes:
# scale the bounding boxes coordinates based on the respective ratios
startX = int(startX * ratioW)
startY = int(startY * ratioH)
endX = int(endX * ratioW)
endY = int(endY * ratioH)
# in order to obtain a better OCR of the text we can potentially
# add a bit of padding surrounding the bounding box -- here we
# are computing the deltas in both the x and y directions
dX = int((endX - startX) * padding)
dY = int((endY - startY) * padding)
# apply padding to each side of the bounding box, respectively
startX = max(0, startX - dX)
startY = max(0, startY - dY)
endX = min(w, endX + (dX * 2))
endY = min(h, endY + (dY * 2))
# extract the actual padded ROI
roi = origI[startY:endY, startX:endX]
# use Tesseract v4 to recognize a text ROI in an image
text = pytesseract.image_to_string(roi, config=config)
# add the bounding box coordinates and actual text string to the results list
results.append(((startX, startY, endX, endY), text))
# sort the bounding boxes coordinates from top to bottom based on the y-coordinate of the bounding box
results = sorted(results, key=lambda r:r[0][1])
result = origI.copy()
# loop over the results
for ((startX, startY, endX, endY), text) in results:
# display the text OCR'd by Tesseract
print("{}\n".format(text))
# draw the text and a bounding box surrounding the text region of the input image
cv2.rectangle(result, (startX, startY), (endX, endY), (0, 0, 255), 2)
# show the result image
cv2.imshow("Text Recongnition", result)
cv2.waitKey(0)
text_recongnition() |
import logging
from functools import partial
import collections
import matplotlib.pyplot as plt
import numpy as np
from PyQt5 import QtCore, QtWidgets, QtGui
from module.OneDScanProc import OneDScanProc
from module.RawFile import RawFile
class RCurveProc(OneDScanProc):
def __init__(self, *args):
super().__init__(*args)
self.param.update({
'THICKNESS': "900",
'CHI': "11.24",
'H': "1",
'K': "1",
'L': "1",
'BEAM_INT': "100000000"
})
self._peak_side_point = []
self.cfg = {}
@property
def name(self):
return __name__.split('.')[-1]
@property
def supp_type(self):
return "RockingCurve",
def _build_plot_widget(self):
"""
Build the drawing widget.
:return:
"""
super(RCurveProc, self)._build_plot_widget()
filter_tool_button = QtWidgets.QToolButton()
# Build butter filter action
butter_filter_action = QtWidgets.QAction(
QtGui.QIcon(QtGui.QPixmap('icons/filter.png')),
"Filter",
)
butter_filter_action.triggered.connect(self._filter)
filter_tool_button.setDefaultAction(butter_filter_action)
# Build the filter selection menu
filter_tool_button_menu = QtWidgets.QMenu()
filter_tool_button_menu.addAction(
"Butter Filter(Default)", self._filter
)
filter_tool_button_menu.addAction(
"Binning", self._binning_data
)
filter_tool_button.setMenu(filter_tool_button_menu)
self._toolbar.addWidget(filter_tool_button)
# Target tool bar
target_tool_button = QtWidgets.QToolButton()
auto_target_action = QtWidgets.QAction(
QtGui.QIcon(QtGui.QPixmap('icons/target.png')),
"Auto align data",
)
auto_target_action.triggered.connect(lambda: self._target(mode='auto'))
target_tool_button.setDefaultAction(auto_target_action)
target_tool_button_menu = QtWidgets.QMenu()
target_tool_button_menu.addAction(
"Auto(Default)", lambda: self._target(mode='auto')
)
target_tool_button_menu.addAction(
"Manual", lambda: self._target(mode='manual')
)
target_tool_button.setMenu(target_tool_button_menu)
self._toolbar.addWidget(target_tool_button)
# Result action
self.res_action = QtWidgets.QAction(
QtGui.QIcon(QtGui.QPixmap('icons/experiment-results.png')),
"Results..."
)
self.res_action.triggered.connect(self._result)
self._toolbar.addAction(self.res_action)
def _build_config_widget(self):
config_widget = QtWidgets.QWidget(self.plot_widget)
config_layout = QtWidgets.QVBoxLayout()
config_widget.setLayout(config_layout)
disable_log_plot_q_ratio_button = QtWidgets.QRadioButton(
"Disable Y-Log Plot")
disable_log_plot_q_ratio_button.setChecked(self.param["disable_log_y"])
disable_log_plot_q_ratio_button.toggled.connect(
partial(self._upt_param, "disable_log_y"))
config_layout.addWidget(disable_log_plot_q_ratio_button)
intensity_input_layout = IntensityInputWidget(self.param)
config_layout.addLayout(intensity_input_layout)
thickness_input_layout = QtWidgets.QVBoxLayout()
thickness_input_layout.addWidget(
QtWidgets.QLabel('Thickness of Sample(\u212B):'))
thickness_line_edit = QtWidgets.QLineEdit()
thickness_line_edit.setText(self.param['THICKNESS'])
thickness_line_edit.setInputMask("999999999")
thickness_line_edit.textChanged.connect(
partial(self._upt_param, "THICKNESS")
)
thickness_input_layout.addWidget(thickness_line_edit)
config_layout.addLayout(thickness_input_layout)
chi_input_layout = QtWidgets.QVBoxLayout()
chi_input_layout.addWidget(
QtWidgets.QLabel('CHI:'))
chi_line_edit = QtWidgets.QLineEdit()
chi_line_edit.setText(self.param['CHI'])
chi_line_edit.setValidator(QtGui.QIntValidator(0, 360, chi_line_edit))
chi_line_edit.textChanged.connect(partial(self._upt_param, "CHI"))
chi_input_layout.addWidget(chi_line_edit)
config_layout.addLayout(chi_input_layout)
hkl_input_layout = HKLInputComboBox(self.param)
config_layout.addLayout(hkl_input_layout)
return config_widget
def _configuration(self):
self._configuration_wd = self._build_config_widget()
self.q_tab_widget = QtWidgets.QTabWidget()
self.q_tab_widget.addTab(self._configuration_wd, "Rocking Curve")
self.q_tab_widget.closeEvent = self._configuration_close
self.q_tab_widget.show()
@QtCore.pyqtSlot(bool)
def repaint(self, message=True):
logging.debug("Re-Paint rocking curve %s" % self)
if message:
self.figure.clf()
plt.figure(self.figure.number)
if 'disable_log_y' in self.param and self.param['disable_log_y']:
plt.plot(
self.data[0, :],
self.data[1, :],
linewidth=1,
color='C2',
)
else:
plt.semilogy(
self.data[0, :],
self.data[1, :],
linewidth=1,
color='C2',
)
plt.xlabel("{0}".format(self.attr['STEPPING_DRIVE1']))
plt.ylabel("{0}".format("Intensity(CPS)"))
self.canvas.draw()
def plot(self):
"""Plot Image."""
self.figure.clf()
self.repaint(True)
self.plot_widget.show()
return self.plot_widget
def set_data(self, data, attr, *args, **kwargs):
super(RCurveProc, self).set_data(data, attr, *args, **kwargs)
try:
self.cfg = args[0]
except IndexError:
pass
return self
@staticmethod
def i_theory(i_0, v, theta, omega, th, index):
"""
:param i_0: The source intensity
:param v: angular velocity
:param theta: tth/2(emergence angle)
:param omega: omega(incident angle)
:param th: thickness of sample
:param index: correction coefficient
:return:
"""
RO2 = 7.94E-30 # scattering cross section of electron
LAMBDA = 1.5418E-10 # X-ray beam length
F_GAP = 12684.62554 # unit cell structure factor (GaP)
L = 1/np.sin(2*theta) # Lorentz factor
P = (1+np.cos(2*theta)**2)/2 # Polarization factor
V_A = 5.4506E-10 ** 3 # volume of the crystal
U = 1000000 / 37.6416 # mu
c_0 = (
np.sin(2 * theta - omega) /
(np.sin(2 * theta - omega) + np.sin(omega))
)
c_1 = (
1 -
np.exp(
- U * th / 1E10 *
(
1 / np.sin(omega) + 1 / np.sin(2 * theta - omega)
)
)
)
c_2 = RO2 * LAMBDA ** 3 * F_GAP * P * L / V_A ** 2
i_theo = i_0 * c_0 * c_1 * c_2 * index / (v * U)
return i_theo
def _fraction_calculation_param_dialog(self):
config_widget = QtWidgets.QDialog(self.plot_widget)
config_layout = QtWidgets.QVBoxLayout()
config_widget.setLayout(config_layout)
intensity_input_layout = IntensityInputWidget(self.param)
thickness_input_layout = QtWidgets.QVBoxLayout()
thickness_input_layout.addWidget(
QtWidgets.QLabel('Thickness of Sample(\u212B):'))
thickness_line_edit = QtWidgets.QLineEdit()
thickness_line_edit.setText(self.param['THICKNESS'])
thickness_line_edit.setInputMask("999999999")
thickness_line_edit.textChanged.connect(
partial(self._upt_param, "THICKNESS")
)
thickness_input_layout.addWidget(thickness_line_edit)
chi_input_layout = QtWidgets.QVBoxLayout()
chi_input_layout.addWidget(
QtWidgets.QLabel('CHI(\u00B0):'))
chi_line_edit = QtWidgets.QLineEdit()
chi_line_edit.setText(self.param['CHI'])
chi_line_edit.setValidator(
QtGui.QDoubleValidator(
0., 360., 360000
)
)
chi_line_edit.textChanged.connect(partial(self._upt_param, "CHI"))
chi_input_layout.addWidget(chi_line_edit)
hkl_input_layout = HKLInputComboBox(self.param)
config_layout.addLayout(intensity_input_layout)
config_layout.addLayout(thickness_input_layout)
config_layout.addLayout(hkl_input_layout)
config_layout.addLayout(chi_input_layout)
return config_widget
def _result(self):
if not hasattr(self, '_recent_fit_res'):
return
self._tmp_dialog = self._fraction_calculation_param_dialog()
self._tmp_dialog.exec()
try:
step_time = self.attr['_STEPTIME']
step_size = self.attr['_STEP_SIZE']
except KeyError:
step_time = self.attr['STEP_TIME']
step_size = self.attr['STEP_SIZE']
v = np.deg2rad(step_size) / step_time
intensity = (
self._recent_fit_res[0] * self._recent_fit_res[1] /
step_size * step_time)
two_theta = self._bragg_angle_cal(
0.54505,
(
int(self.param['H']),
int(self.param['K']),
int(self.param['L'])
)
)
chi = float(self.param['CHI'])
theta = two_theta / 2
th = int(self.param['THICKNESS'])
bm_int = int(float(self.param['BEAM_INT']))
v = np.deg2rad(step_size) / step_time
theta = np.pi/2 - np.arccos(np.cos(np.deg2rad(chi))*np.sin(np.deg2rad(theta)))
omega = theta
print(np.rad2deg(omega))
i_theo_l = self.i_theory(bm_int, v, theta, omega, th, 1)
self.res_table_wd = QtWidgets.QTableWidget()
try:
self.res_table_wd.resize(
QtCore.QSize(*self.cfg['res_table_wd_size']))
except (AttributeError, KeyError):
pass
self.res_table_wd.setColumnCount(1)
self.res_table_wd.setRowCount(12)
res_dic = collections.OrderedDict(
[
("Integrated Int (Fitted curve) (cp)", intensity),
("Integrated Int (cp)", self._sum() / step_size * step_time),
("Intensity theory (cp)", i_theo_l),
("Volume Fraction(%)", intensity / i_theo_l * 100),
("", ""),
("FWHM", self._recent_fit_res[1]),
("Max", self._recent_fit_res[0]),
("", ""),
("Source Intensity", bm_int),
("Angular Vitesse", v),
("Theta", np.rad2deg(theta)),
("Omega", np.rad2deg(omega)),
("Thickness", th)
]
)
self.res_table_wd.setVerticalHeaderLabels(list(res_dic.keys()))
for idx, key in enumerate(res_dic):
self.res_table_wd.setItem(
idx, 0,
QtWidgets.QTableWidgetItem(str(res_dic[key]))
)
# self.res_table_wd.closeEvent = self._res_table_wd_close
self.res_table_wd.show()
# def _res_table_wd_close(self, event):
# table_size = self.res_table_wd.size()
# table_size = [table_size.width(), table_size.height()]
# self.update_gui_cfg.emit(
# {'MODULE': {self.name: {'res_table_wd_size': table_size}}})
# event.accept()
class IntensityInputWidget(QtWidgets.QVBoxLayout):
def __init__(self, linked_param):
super().__init__()
self._int_line_edit = QtWidgets.QLineEdit(
str(linked_param['BEAM_INT']))
self._int_line_edit.textChanged.connect(
partial(linked_param.__setitem__, 'BEAM_INT'))
q_int_button = self._int_line_edit.addAction(
QtGui.QIcon(QtGui.QPixmap('icons/more.png')),
QtWidgets.QLineEdit.TrailingPosition
)
q_int_button.triggered.connect(self._get_beam_intensity)
self.addWidget(QtWidgets.QLabel("Beam Intensity"))
self.addWidget(self._int_line_edit)
def _get_beam_intensity(self):
def file2int(i):
file_instance = RawFile()
file_instance.get_file(i)
data, attr = file_instance.get_data()
del file_instance
scan_instance = OneDScanProc()
scan_instance.set_data(data, attr)
maxmium_int = scan_instance.get_max(mode='direct')
return maxmium_int
file_names = QtWidgets.QFileDialog.getOpenFileNames(
caption='Open intensity file...',
directory="/",
filter="Raw file (*.raw)"
)
source_file_list = file_names[0]
if not source_file_list:
return
int_l = [file2int(str(i)) for i in source_file_list]
beam_int = np.mean(np.asarray(int_l))*8940
self._int_line_edit.setText(str(beam_int))
class HKLInputComboBox(QtWidgets.QVBoxLayout):
def __init__(self, linked_dict):
super().__init__()
horizontal_layout = QtWidgets.QHBoxLayout()
h_box = QtWidgets.QLineEdit(str(linked_dict['H']))
h_box.setInputMask("#9")
h_box.textChanged.connect(partial(linked_dict.__setitem__, 'H'))
k_box = QtWidgets.QLineEdit(str(linked_dict['K']))
k_box.setInputMask("#9")
k_box.textChanged.connect(partial(linked_dict.__setitem__, 'K'))
l_box = QtWidgets.QLineEdit(str(linked_dict['L']))
l_box.setInputMask("#9")
l_box.textChanged.connect(partial(linked_dict.__setitem__, 'L'))
horizontal_layout.addWidget(QtWidgets.QLabel("H:"))
horizontal_layout.addWidget(h_box)
horizontal_layout.addWidget(QtWidgets.QLabel("K:"))
horizontal_layout.addWidget(k_box)
horizontal_layout.addWidget(QtWidgets.QLabel("L:"))
horizontal_layout.addWidget(l_box)
self.addWidget(QtWidgets.QLabel("Choose the HKL:"))
self.addLayout(horizontal_layout)
|
import requests
url = 'https://api.darksky.net/forecast/8cf0d9b4c83a4030bfaf2c73491334cf/48.1651,17.1457?units=auto'
r = requests.get(url.format()).json()
weather = {
'temperature' : r['currently']['temperature'],
'summary' : r['currently']['summary'],
'icon' : r['currently']['icon']
}
print(weather) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.