text stringlengths 38 1.54M |
|---|
import numpy as np
import matplotlib.pyplot as plt
array = np.load('train_loss_backup.npz')
print('x:\n', array['x'])
print('y:\n', array['y'])
plt.figure()
plt.plot(array['y'], '-', color='#00a0ff')
plt.title('Train Loss')
plt.xlabel('Train Step')
plt.ylabel('Loss')
plt.grid()
plt.show()
plt.savefig('Train_Loss.png') |
import os
import telas.Menu
def main():
os.system("clear")
print("Tema: Locadora de veículos.")
print("Objetivo: Gerenciar frotas, clientes e reservas.")
print("")
print("Desenvolvedores:")
print("Nome: Giovani França Sarchesi")
print("RA: 2840481821011")
print("Nome: Gustavo Ferreira Mota")
print("RA: 2840481821013")
input("Tecle Enter para continuar...")
telas.Menu.main()
|
from __future__ import division
import sys
#sys.path.append('../Benchmarks/wordpress-LDA')
from collections import defaultdict,Counter
#from corp import stop_words, Files, Corp
from gensim import corpora, models, similarities
import logging
import json
import cPickle
import random
import time
import csv
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
DataSet = 'my'
infolder = '../../'+DataSet+'Data/'
testindexdict_savedfolder = DataSet+'Saved/'
testPostsFile = infolder+DataSet+"testPosts.json"
try:
open(testindexdict_savedfolder+'TestIndexDict.saved','r')
open(testindexdict_savedfolder+'TestPostList.saved','r')
except:
testPostIds = []
testPostIndices = {}
with open(testPostsFile) as f:
for i, line in enumerate(f):
post = json.loads(line)
blog_id = post["blog"]
post_id = post["post_id"]
testPostIds.append(post_id)
# testPostIndices is the dict that maps post_id into the index in vectorized LDA/tfidf already constructed
testPostIndices[post_id] = i
cPickle.dump(testPostIndices, open(testindexdict_savedfolder+'TestIndexDict.saved', "w"))
cPickle.dump(testPostIds, open(testindexdict_savedfolder+'TestPostList.saved', "w"))
|
from tbay import User, Item, Bid, session
beyonce = User()
beyonce.username = "bknowles"
beyonce.password = "uhohuhohuhohohnana"
beyonce.id = "78"
session.add(beyonce)
session.commit()
jayz = User()
jayz.username = "jayz"
jayz.password = "99problems"
jayz.id = "56"
session.add(jayz)
session.commit()
solange = User()
solange.username = "solange"
solange.password = "sister"
solange.id = "1234"
session.add(solange)
session.commit()
crown = Item()
crown.name = "crown"
crown.description = "a beautiful crown"
crown.seller_id = solange.id
session.add(crown)
session.commit()
baseball = Item()
baseball.name = "baseball"
baseball.description = "a beautiful golden baseball"
baseball.seller_id = solange.id
session.add(baseball)
session.commit()
jayzbid = Bid()
jayzbid.price = 3
jayzbid.bidder_id = jayz.id
jayzbid.bidded_item = baseball.id
session.add(jayzbid)
session.commit()
beyoncebid = Bid()
beyoncebid.price = 5
beyoncebid.bidder_id = beyonce.id
beyoncebid.bidded_item = baseball.id
session.add(beyoncebid)
session.commit()
solange.auctioned_items.append = baseball
jayz.bids_made.append = jayzbid
beyonce.bids_made.append = beyoncebid
baseball.bids_received.append(beyoncebid)
baseball.bids_received.append(jayzbid)
session.commit()
#Perform a query to find out which user placed the highest bid
if beyoncebid.price > jayzbid.price:
print("Beyonce wins the item!")
elif beyoncebid.price == jayzbid.price:
print("It's a tie!")
else:
print("Jayz wins the item!")
session.commit()
# do you have to commit one at a time? http://stackoverflow.com/questions/974596/what-is-a-database-transaction/974611#974611
# Returns a list of all of the user objects
# Note that user objects won't display very prettily by default -
# you'll see their type (User) and their internal identifiers.
#print(session.query(User).all()) # Returns a list of all of the user objects
# Returns the first user
#print(session.query(User).first())
# Returns the first item
#print(session.query(Item).first())
# Finds the user with the primary key equal to 1
#print(session.query(User).get(1))
# Returns a list of all of the usernames in ascending order
#print(session.query(User.username).order_by(User.username).all())
# Returns a list of all of the items
#print(session.query(Item.name).order_by(Item.name).all())
|
# base on scipy or scikit-learn
# import sklearn.datasets
# import sklearn.cluster
# import scipy.cluster.vq
# import matplotlib.pyplot as plot
#
# n = 100
# k = 3
#
# # Generate fake data
# data, labels = sklearn.datasets.make_blobs(n_samples=n, n_features=2, centers=k)
#
# # scipy
# means, _ = scipy.cluster.vq.kmeans(data, k, iter=300)
#
# # scikit-learn
# kmeans = sklearn.cluster.KMeans(k, max_iter=300)
# kmeans.fit(data)
# means = kmeans.cluster_centers_
#
# plot.scatter(data[:, 0], data[:, 1], c=labels)
# plot.scatter(means[:, 0], means[:, 1], linewidths=2)
# plot.show()
# base on python only
# k--numbers of cluster
import sklearn.datasets
import numpy as np
n = 100
k = 3
def k_means(data, k, number_of_iterations):
n = len(data)
number_of_features = data.shape[1]
# Pick random indices for the initial centroids.
initial_indices = np.random.choice(range(n), k)
# We keep the centroids as |features| x k matrix.
means = data[initial_indices].T
# To avoid loops, we repeat the data k times depthwise and compute the
# distance from each point to each centroid in one step in a
# n x |features| x k tensor.
repeated_data = np.stack([data] * k, axis=-1)
all_rows = np.arange(n)
zero = np.zeros([1, 1, 2])
for _ in range(number_of_iterations):
# Broadcast means across the repeated data matrix, gives us a
# n x k matrix of distances.
distances = np.sum(np.square(repeated_data - means), axis=1)
# Find the index of the smallest distance (closest cluster) for each
# point.
assignment = np.argmin(distances, axis=-1)
# Again to avoid a loop, we'll create a sparse matrix with k slots for
# each point and fill exactly the one slot that the point was assigned
# to. Then we reduce across all points to give us the sum of points for
# each cluster.
sparse = np.zeros([n, k, number_of_features])
sparse[all_rows, assignment] = data
# To compute the correct mean, we need to know how many points are
# assigned to each cluster (without a loop).
counts = (sparse != zero).sum(axis=0)
# Compute new assignments.
means = sparse.sum(axis=0).T / counts.clip(min=1).T
return means.T
# Generate fake data
data, labels = sklearn.datasets.make_blobs(n_samples=n, n_features=2, centers=k)
print(k_means(data, 3, 100000))
|
import numpy as np
class PointBrowser(object):
"""
Click on a point to select and highlight it -- the data that
generated the point will be shown in the lower axes. Use the 'n'
and 'p' keys to browse through the next and previous points
"""
def __init__(self):
pass
def on_button_release(self, mouse_event):
# the click locations
x = mouse_event.xdata
y = mouse_event.ydata
print("selected: {0:f}, {1:f}".format(x,y))
|
from SQLTable import *
# Clean the string of brackets and commas
def cleanStr(m):
return m.replace('(', '').replace(')', '').replace(',', '')
# Return the table the FK is referencing to
def findReferenceTable(tbList, tbName):
for i in tbList:
if i.tableName == tbName:
return i
# Parse the list of words from the SQL Code
# Return a List of SQLTable Objects
def parseSQLTable(wordsLst):
tableLst = []
# Runs through the whole word list and create data structure base on SQL KEY WORDS
for i in range(len(wordsLst)):
# CREATE TABLE Check
if wordsLst[i] == "CREATE" and wordsLst[i+1] == "TABLE":
table = SQLTable(cleanStr(wordsLst[i+2]))
tableLst.append(table)
i += 3
while i < len(wordsLst):
# End of Creating Table
if wordsLst[i] == ')':
i += 1
break
# Primary Key Check
if wordsLst[i] == "PRIMARY" and wordsLst[i + 1] == "KEY":
i += 2
while i < len(wordsLst):
pk = cleanStr(wordsLst[i])
table.primaryKey.append(pk)
if wordsLst[i].find(')') != -1: # Reaches the end of the Primary Key list
i += 1
break
i += 1
# Foreign Key Check
elif wordsLst[i] == "FOREIGN" and wordsLst[i + 1] == "KEY":
i += 2
# Count FK from the same table
fkCounter = 0
# In FK line
while i < len(wordsLst):
if wordsLst[i] == "REFERENCES":
if fkCounter > 1:
lst = []
for ct in range(fkCounter): # Add more than one FK from the same table to the FK list
lst.append(cleanStr(wordsLst[i+ct-fkCounter]))
table.foreignKey.forKeys.append(lst)
else:
# Add one FK to FK list
table.foreignKey.forKeys.append(cleanStr(wordsLst[i-1]))
tb = findReferenceTable(tableLst, wordsLst[i+1]) # The table it references to
i += 2
# After References
while i < len(wordsLst):
if fkCounter > 1:
lst = []
for ct in range(fkCounter):
lst.append(cleanStr(wordsLst[i + ct]))
table.appendAttribute2(tb, lst) # Add a list of Attr from referenced the same table
i += fkCounter - 1
else:
table.appendAttribute(tb, cleanStr(wordsLst[i])) # Add Attr from referenced table
# Last FK to add
if wordsLst[i].find(')') != -1:
i += 1
break
i += 1
break
fkCounter += 1
i += 1
# Adding Attributes
else:
# Check for End line with a comma ( , )
if wordsLst[i+1].find(',') != -1: # Has no limit restriction
attr = table.Attribute(table.tableName, wordsLst[i], cleanStr(wordsLst[i+1]))
table.tableAttributes.append(attr)
i += 2
else: # Has a limit restriction
attr = table.Attribute(table.tableName, wordsLst[i], wordsLst[i + 1], cleanStr(wordsLst[i + 2]))
table.tableAttributes.append(attr)
i += 3
return tableLst
|
# importing required libraries after installing
from tkinter import *
from pytube import YouTube
# creating tkinter object
root = Tk()
# setting dimensions for the GUI application
root.geometry('500x300')
# application cannot be resized
root.resizable(0, 0)
# setting background color to the application
root.configure(bg='yellow')
# setting title of the application
root.title("Youtube Video Downloader")
Label(root, text='Youtube Video Downloader',
font='arial 20 bold', bg='yellow').pack()
# placeholder to enter link of the youtube video to be downloaded
link = StringVar()
Label(root, text='Paste Link Here:', font='arial 15 bold',
bg='yellow').place(x=160, y=60)
link_enter = Entry(root, width=70, textvariable=link).place(
x=32, y=90, height=30)
# function to download video
def Downloader():
# url of the video
url = YouTube(str(link.get()))
video = url.streams.first()
# download function
video.download()
# message to be displayed as "DOWNLOADED" after downloading the video
Label(root, text='DOWNLOADED', font='arial 15 bold',
bg='red', fg='white').place(x=180, y=210)
# Download button
Button(root, text='DOWNLOAD', font='arial 15 bold', bg='black', fg='white',
padx=2, command=Downloader).place(x=180, y=150)
# Running infinite loop of the tkinter object until the user exits the application
root.mainloop()
|
import datetime
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_simplejwt.authentication.JWTAuthentication",
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
),
"DEFAULT_RENDERER_CLASSES": (
"rest_framework.renderers.JSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 50,
"DATETIME_FORMAT": "%Y-%m-%d %H:%M",
"DATE_FORMAT": "%Y-%m-%d",
}
CSRF_TRUSTED_ORIGINS = [
"http://localhost:8000",
]
SWAGGER_SETTINGS = {
"SECURITY_DEFINITIONS": {
"Basic": {"type": "basic"},
"Token": {"type": "apiKey", "name": "Authorization", "in": "header"},
}
}
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": datetime.timedelta(minutes=5),
"REFRESH_TOKEN_LIFETIME": datetime.timedelta(days=1),
"ROTATE_REFRESH_TOKENS": False,
"BLACKLIST_AFTER_ROTATION": True,
"ALGORITHM": "HS256",
"SIGNING_KEY": SECRET_KEY,
"VERIFYING_KEY": None,
"AUDIENCE": None,
"ISSUER": None,
"AUTH_HEADER_TYPES": ("jwt", "Bearer"),
"USER_ID_FIELD": "id",
"USER_ID_CLAIM": "user_id",
"AUTH_TOKEN_CLASSES": ("rest_framework_simplejwt.tokens.AccessToken",),
"TOKEN_TYPE_CLAIM": "token_type",
"JTI_CLAIM": "jti",
"SLIDING_TOKEN_REFRESH_EXP_CLAIM": "refresh_exp",
"SLIDING_TOKEN_LIFETIME": datetime.timedelta(minutes=5),
"SLIDING_TOKEN_REFRESH_LIFETIME": datetime.timedelta(days=1),
}
|
from amuse.lab import *
from matplotlib import pyplot
converter = nbody_system.nbody_to_si(1|units.MSun, 1|units.parsec)
G_si = converter.to_si(nbody_system.G)
#TODO: function with tidal radii?
#uses "HOP: A New Group Finding Algorithm for N-body Simulations"
def find_bound_bodies(bodies):
return bodies.bound_subset(unit_converter=converter, G=G_si)
#assume that bodies that have higher kinetic than potential energy are
#not gravitationally bound, and returns bodies that are bound
def find_bound_bodies_using_energies(bodies):
bound_particles = Particles()
for body in bodies:
body_energy = body.specific_kinetic_energy() + body.potential()
if body_energy < 0 | body_energy.unit:
bound_particles.add_particle(body)
return bound_particles
if __name__ in '__main__':
filename = "nbody.hdf5"
bodies = read_set_from_file(filename, format="hdf5", copy_history=True)
timestamps = []
bound_masses = []
for bodies_at_timestep in bodies.history:
bound_bodies = find_bound_bodies(bodies_at_timestep)
timestamp = bodies_at_timestep.get_timestamp()
bound_mass = bound_bodies.total_mass()
print "at timestamp", timestamp, "total mass of", bound_mass
timestamps.append(timestamp)
bound_masses.append(bound_mass)
pyplot.scatter([i.number for i in timestamps], [i.number for i in bound_masses])
pyplot.xlabel("time in MYear")
pyplot.ylabel("bound mass in kg")
pyplot.savefig("bound_mass_over_time")
|
url = "http://naver.com"
my_str = url.replace("http://", "")
print(my_str)
my_str = my_str[:my_str.index(".")] #[0:5] 0 ~ 5 직전까지
print(my_str)
password = my_str[:3] + str(len(my_str)) + str(my_str.count("e")) + "!"
print("{0}의 비밀번호는 {1}입니다.".format(url, password)) |
import argparse
import numpy as np
import glob
import os
import tensorflow as tf
from tensorflow import keras
# ARG PARSING
parser = argparse.ArgumentParser()
parser.add_argument("nps", help="Number of notes per block", type=int)
group = parser.add_mutually_exclusive_group()
group.add_argument("-l", "--left", help="Use left hand data", action="store_true")
group.add_argument("-r", "--right", help="Use right hand data", action="store_true")
group.add_argument("-s", "--single", help="Use single track data", action="store_true")
args = parser.parse_args()
# Notes per block
nps = args.nps
# Flag to merge tracks or keep separate
single = args.single
right = args.right
left = args.left
if single:
inputs = np.loadtxt("midis/single/{}/inputs.txt".format(nps))
labels = np.loadtxt("midis/single/{}/labels.txt".format(nps))
print("Input dimensions: ", inputs.shape)
print("Label dimensions: ", labels.shape)
model = keras.Sequential([
keras.layers.Dense(nps*3, input_dim=nps*3),
keras.layers.Dense((nps-1)*3, activation='relu'),
keras.layers.Dense((nps-1)*2, activation='relu'),
keras.layers.Dense(3)
])
model.summary()
model.compile(optimizer='adam', loss=keras.losses.mean_squared_error, metrics=['accuracy'])
model.fit(inputs, labels, epochs=100, shuffle=True)
test_loss, test_acc = model.evaluate(inputs, labels, verbose=2)
print('\nTest accuracy:', test_acc)
if single:
model.save("saved_models/single_{}".format(nps))
elif right:
model.save("saved_models/right_{}".format(nps))
elif left:
model.save("saved_models/left_{}".format(nps)) |
from kinsumer import Consumer
TEST_KEY = 'foo'
FOO_OPTION_1 = 'foo option 1'
FOO_OPTION_2 = 'foo option 2'
BAR_STUFF_1 = 'bar stuff 1'
BAR_STUFF_2 = 'bar stuff 2'
def common_object_test(consumer):
assert consumer.config['TEST_KEY'] == 'foo'
assert 'TestConfig' not in consumer.config
def test_config_from_file():
consumer = Consumer(__name__)
consumer.config.from_pyfile(__file__.rsplit('.', 1)[0] + '.py')
common_object_test(consumer)
def test_config_from_object():
consumer = Consumer(__name__)
consumer.config.from_object(__name__)
common_object_test(consumer)
def test_get_namespace():
consumer = Consumer(__name__)
consumer.config.from_object(__name__)
foo_options = consumer.config.get_namespace('FOO_')
assert 2 == len(foo_options)
assert 'foo option 1' == foo_options['option_1']
assert 'foo option 2' == foo_options['option_2']
bar_options = consumer.config.get_namespace('BAR_', lowercase=False)
assert 2 == len(bar_options)
assert 'bar stuff 1' == bar_options['STUFF_1']
assert 'bar stuff 2' == bar_options['STUFF_2']
foo_options = consumer.config.get_namespace('FOO_', trim_namespace=False)
assert 2 == len(foo_options)
assert 'foo option 1' == foo_options['foo_option_1']
assert 'foo option 2' == foo_options['foo_option_2']
bar_options = consumer.config.get_namespace('BAR_',
lowercase=False,
trim_namespace=False)
assert 2 == len(bar_options)
assert 'bar stuff 1' == bar_options['BAR_STUFF_1']
assert 'bar stuff 2' == bar_options['BAR_STUFF_2']
|
import os
from glob import glob
from omegaconf import OmegaConf
from pp.config import CONFIG, conf, logging
def merge_markdown(
reports_directory=CONFIG["doe_directory"],
mdpath=CONFIG["mask_directory"] / "report.md",
**kwargs,
):
"""Merges all individual markdown reports (.md) into a single markdown
you can add a report:[Capacitors, Diodes...] in config.yml to define the merge order
"""
logging.debug("Merging Markdown files:")
configpath = mdpath.with_suffix(".yml")
with open(configpath, "w") as f:
conf.update(**kwargs)
f.write(OmegaConf.to_yaml(conf))
with open(mdpath, "w") as f:
def wl(line="", eol="\n"):
f.write(line + eol)
reports = sorted(glob(os.path.join(reports_directory, "*.md")))
for filename in reports:
with open(filename) as infile:
for line in infile:
f.write(line)
logging.info(f"Wrote {mdpath}")
logging.info(f"Wrote {configpath}")
if __name__ == "__main__":
reports_directory = CONFIG["samples_path"] / "mask" / "does"
merge_markdown(reports_directory)
|
import cv2;
from video_camera import VideoCamera
from face_detector import Detector
import operations as op
webcam = VideoCamera();
detector = Detector();
#uncomment the following line to add new person in the database
#op.take_images(webcam, detector);
#start live recognition
op.train_and_run(webcam, detector)
|
# from JumpScale9AYS.tools.lock.Lock import FileLock
from JumpScale9 import j
import sys
# import random
# import asyncio
# import selectors
from urllib.request import urlopen
import os
import tarfile
import shutil
# import tempfile
import platform
import subprocess
import time
import pystache
import pytoml
import fnmatch
# from subprocess import Popen
import re
# import inspect
# import yaml
import importlib
# import fcntl
class TimeoutError(RuntimeError, TimeoutError):
pass
class SSHMethods():
def _addSSHAgentToBashProfile(self, path=None):
bashprofile_path = os.path.expanduser("~/.bash_profile")
if not self.exists(bashprofile_path):
self.execute('touch %s' % bashprofile_path)
content = self.readFile(bashprofile_path)
out = ""
for line in content.split("\n"):
if line.find("#JSSSHAGENT") != -1:
continue
if line.find("SSH_AUTH_SOCK") != -1:
continue
out += "%s\n" % line
if "SSH_AUTH_SOCK" in os.environ:
self.logger.info("NO NEED TO ADD SSH_AUTH_SOCK to env")
self.writeFile(bashprofile_path, out)
return
# out += "\njs 'j.do._.loadSSHAgent()' #JSSSHAGENT\n"
out += "export SSH_AUTH_SOCK=%s" % self._getSSHSocketpath()
out = out.replace("\n\n\n", "\n\n")
out = out.replace("\n\n\n", "\n\n")
self.writeFile(bashprofile_path, out)
def _initSSH_ENV(self, force=False):
if force or "SSH_AUTH_SOCK" not in os.environ:
os.putenv("SSH_AUTH_SOCK", self._getSSHSocketpath())
os.environ["SSH_AUTH_SOCK"] = self._getSSHSocketpath()
def _getSSHSocketpath(self):
if "SSH_AUTH_SOCK" in os.environ:
return(os.environ["SSH_AUTH_SOCK"])
socketpath = "%s/sshagent_socket" % os.environ.get("HOME", '/root')
os.environ['SSH_AUTH_SOCK'] = socketpath
return socketpath
def SSHAgentCheck(self):
if "SSH_AUTH_SOCK" not in os.environ:
self._initSSH_ENV(True)
self._addSSHAgentToBashProfile()
if not self.SSHAgentAvailable():
self._loadSSHAgent()
def SSHKeyLoad(self, path, duration=3600 * 24):
"""
@param path is name or full path
"""
self.SSHAgentCheck()
if self.SSHAgentCheckKeyIsLoaded(path):
return
self.logger.info("load ssh key:%s" % path)
self.chmod(path, 0o600)
cmd = "ssh-add -t %s %s " % (duration, path)
self.executeInteractive(cmd)
def SSHAgentCheckKeyIsLoaded(self, keyNamePath):
keysloaded = [self.getBaseName(item)
for item in self.SSHKeysListFromAgent()]
if self.getBaseName(keyNamePath) in keysloaded:
self.logger.info("ssh key:%s loaded" % keyNamePath)
return True
else:
self.logger.info("ssh key:%s NOT loaded" % keyNamePath)
return False
def SSHKeysLoad(self, path=None, duration=3600 * 24, die=False):
"""
will see if ssh-agent has been started
will check keys in home dir
will ask which keys to load
will adjust .profile file to make sure that env param is set to allow ssh-agent to find the keys
"""
self.SSHAgentCheck()
if path is None:
path = os.path.expanduser("~/.ssh")
self.createDir(path)
if "SSH_AUTH_SOCK" not in os.environ:
self._initSSH_ENV(True)
self._loadSSHAgent()
keysloaded = [self.getBaseName(item)
for item in self.SSHKeysListFromAgent()]
if self.isDir(path):
keysinfs = [self.getBaseName(item).replace(".pub", "") for item in self.listFilesInDir(
path, filter="*.pub") if self.exists(item.replace(".pub", ""))]
keysinfs = [item for item in keysinfs if item not in keysloaded]
res = self.askItemsFromList(
keysinfs,
"select ssh keys to load, use comma separated list e.g. 1,4,3 and press enter.")
else:
res = [self.getBaseName(path).replace(".pub", "")]
path = self.getParent(path)
for item in res:
pathkey = "%s/%s" % (path, item)
# timeout after 24 h
self.logger.info("load sshkey: %s" % pathkey)
cmd = "ssh-add -t %s %s " % (duration, pathkey)
self.executeInteractive(cmd)
def SSHKeyGetPathFromAgent(self, keyname, die=True):
try:
# TODO: why do we use subprocess here and not self.execute?
out = subprocess.check_output(["ssh-add", "-L"])
except BaseException:
return None
for line in out.splitlines():
delim = ("/%s" % keyname).encode()
if line.endswith(delim):
line = line.strip()
keypath = line.split(" ".encode())[-1]
content = line.split(" ".encode())[-2]
if not self.exists(path=keypath):
if self.exists("keys/%s" % keyname):
keypath = "keys/%s" % keyname
else:
raise RuntimeError(
"could not find keypath:%s" % keypath)
return keypath.decode()
if die:
raise RuntimeError(
"Did not find key with name:%s, check its loaded in ssh-agent with ssh-add -l" %
keyname)
return None
def SSHKeyGetFromAgentPub(self, keyname, die=True):
try:
# TODO: why do we use subprocess here and not self.execute?
out = subprocess.check_output(["ssh-add", "-L"])
except BaseException:
return None
for line in out.splitlines():
delim = (".ssh/%s" % keyname).encode()
if line.endswith(delim):
content = line.strip()
content = content.decode()
return content
if die:
raise RuntimeError(
"Did not find key with name:%s, check its loaded in ssh-agent with ssh-add -l" %
keyname)
return None
def SSHKeysListFromAgent(self, keyIncluded=False):
"""
returns list of paths
"""
if "SSH_AUTH_SOCK" not in os.environ:
self._initSSH_ENV(True)
self._loadSSHAgent()
cmd = "ssh-add -L"
rc, out, err = self.execute(cmd, False, False, die=False)
if rc:
if rc == 1 and out.find("The agent has no identities") != -1:
return []
raise RuntimeError("error during listing of keys :%s" % err)
keys = [line.split()
for line in out.splitlines() if len(line.split()) == 3]
if keyIncluded:
return list(map(lambda key: key[2:0:-1], keys))
else:
return list(map(lambda key: key[2], keys))
def SSHEnsureKeyname(self, keyname="", username="root"):
if not self.exists(keyname):
rootpath = "/root/.ssh/" if username == "root" else "/home/%s/.ssh/"
fullpath = self.joinPaths(rootpath, keyname)
if self.exists(fullpath):
return fullpath
return keyname
def authorize_user(self, sftp_client, ip_address, keyname, username):
basename = self.getBaseName(keyname)
tmpfile = "/home/%s/.ssh/%s" % (username, basename)
self.logger.info("push key to /home/%s/.ssh/%s" % (username, basename))
sftp_client.put(keyname, tmpfile)
# cannot upload directly to root dir
auth_key_path = "/home/%s/.ssh/authorized_keys" % username
cmd = "ssh %s@%s 'cat %s | sudo tee -a %s '" % username, ip_address, tmpfile, auth_key_path
self.logger.info(
"do the following on the console\nsudo -s\ncat %s >> %s" %
(tmpfile, auth_key_path))
self.logger.info(cmd)
self.executeInteractive(cmd)
def authorize_root(self, sftp_client, ip_address, keyname):
tmppath = "%s/authorized_keys" % self.TMPDIR
auth_key_path = "/root/.ssh/authorized_keys"
self.delete(tmppath)
try:
sftp_client.get(auth_key_path, tmppath)
except Exception as e:
if str(e).find("No such file") != -1:
try:
auth_key_path += "2"
sftp_client.get(auth_key_path, tmppath)
except Exception as e:
if str(e).find("No such file") != -1:
self.writeFile(tmppath, "")
else:
raise RuntimeError(
"Could not get authorized key,%s" % e)
C = self.readFile(tmppath)
Cnew = self.readFile(keyname)
key = Cnew.split(" ")[1]
if C.find(key) == -1:
C2 = "%s\n%s\n" % (C.strip(), Cnew)
C2 = C2.strip() + "\n"
self.writeFile(tmppath, C2)
self.logger.info("sshauthorized adjusted")
sftp_client.put(tmppath, auth_key_path)
else:
self.logger.info("ssh key was already authorized")
def SSHAuthorizeKey(
self,
remoteipaddr,
keyname,
login="root",
passwd=None,
sshport=22,
removeothers=False):
"""
this required ssh-agent to be loaded !!!
the keyname is the name of the key as loaded in ssh-agent
if remoteothers==True: then other keys will be removed
"""
keyname = self.SSHEnsureKeyname(keyname=keyname, username=login)
import paramiko
paramiko.util.log_to_file("/tmp/paramiko.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.logger.info("ssh connect:%s %s" % (remoteipaddr, login))
if not self.SSHKeysListFromAgent(self.getBaseName(keyname)):
self.SSHKeysLoad(self.getParent(keyname))
ssh.connect(
remoteipaddr,
username=login,
password=passwd,
allow_agent=True,
look_for_keys=False)
self.logger.info("ok")
ftp = ssh.open_sftp()
if login != "root":
self.authorize_user(
sftp_client=ftp,
ip_address=remoteipaddr,
keyname=keyname,
username=login)
else:
self.authorize_root(
sftp_client=ftp,
ip_address=remoteipaddr,
keyname=keyname)
def _loadSSHAgent(self, path=None, createkeys=False, killfirst=False):
"""
check if ssh-agent is available & there is key loaded
@param path: is path to private ssh key
the primary key is 'id_rsa' and will be used as default e.g. if authorizing another node then this key will be used
"""
with FileLock('/tmp/ssh-agent'):
# check if more than 1 agent
socketpath = self._getSSHSocketpath()
res = [
item for item in self.execute(
"ps aux|grep ssh-agent",
False,
False)[1].split("\n") if item.find("grep ssh-agent") == -
1]
res = [item for item in res if item.strip() != ""]
res = [item for item in res if item[-2:] != "-l"]
if len(res) > 1:
self.logger.info("more than 1 ssh-agent, will kill all")
killfirst = True
if len(res) == 0 and self.exists(socketpath):
self.delete(socketpath)
if killfirst:
cmd = "killall ssh-agent"
# self.logger.info(cmd)
self.execute(cmd, showout=False, outputStderr=False, die=False)
# remove previous socketpath
self.delete(socketpath)
self.delete(self.joinPaths(self.TMPDIR, "ssh-agent-pid"))
if not self.exists(socketpath):
self.createDir(self.getParent(socketpath))
# ssh-agent not loaded
self.logger.info("load ssh agent")
rc, result, err = self.execute(
"ssh-agent -a %s" %
socketpath, die=False, showout=False, outputStderr=False)
if rc > 0:
# could not start ssh-agent
raise RuntimeError(
"Could not start ssh-agent, something went wrong,\nstdout:%s\nstderr:%s\n" %
(result, err))
else:
# get pid from result of ssh-agent being started
if not self.exists(socketpath):
raise RuntimeError(
"Serious bug, ssh-agent not started while there was no error, should never get here")
piditems = [item for item in result.split(
"\n") if item.find("pid") != -1]
# print(piditems)
if len(piditems) < 1:
print("results was:")
print(result)
print("END")
raise RuntimeError("Cannot find items in ssh-add -l")
self._initSSH_ENV(True)
pid = int(piditems[-1].split(" ")[-1].strip("; "))
self.writeFile(
self.joinPaths(
self.TMPDIR,
"ssh-agent-pid"),
str(pid))
self._addSSHAgentToBashProfile()
# ssh agent should be loaded because ssh-agent socket has been
# found
if os.environ.get("SSH_AUTH_SOCK") != socketpath:
self._initSSH_ENV(True)
rc, result, err = self.execute(
"ssh-add -l", die=False, showout=False, outputStderr=False)
if rc == 2:
# no ssh-agent found
print(result)
raise RuntimeError(
"Could not connect to ssh-agent, this is bug, ssh-agent should be loaded by now")
elif rc == 1:
# no keys but agent loaded
result = ""
elif rc > 0:
raise RuntimeError(
"Could not start ssh-agent, something went wrong,\nstdout:%s\nstderr:%s\n" %
(result, err))
def SSHAgentAvailable(self):
if not self.exists(self._getSSHSocketpath()):
return False
if "SSH_AUTH_SOCK" not in os.environ:
self._initSSH_ENV(True)
rc, out, err = self.execute(
"ssh-add -l", showout=False, outputStderr=False, die=False)
if 'The agent has no identities.' in out:
return True
if rc != 0:
return False
else:
return True
class GitMethods():
def rewriteGitRepoUrl(self, url="", login=None, passwd=None, ssh="auto"):
"""
Rewrite the url of a git repo with login and passwd if specified
Args:
url (str): the HTTP URL of the Git repository. ex: 'https://github.com/odoo/odoo'
login (str): authentication login name
passwd (str): authentication login password
ssh = if True will build ssh url, if "auto" will check if there is ssh-agent available & keys are loaded,
if yes will use ssh
Returns:
(repository_host, repository_type, repository_account, repository_name, repository_url)
"""
if ssh == "auto" or ssh == "first":
ssh = self.SSHAgentAvailable()
elif ssh or ssh is False:
pass
else:
raise RuntimeError(
"ssh needs to be auto, first or True or False: here:'%s'" %
ssh)
url_pattern_ssh = re.compile('^(git@)(.*?):(.*?)/(.*?)/?$')
sshmatch = url_pattern_ssh.match(url)
url_pattern_http = re.compile('^(https?://)(.*?)/(.*?)/(.*?)/?$')
httpmatch = url_pattern_http.match(url)
if not sshmatch:
match = httpmatch
else:
match = sshmatch
if not match:
raise RuntimeError(
"Url is invalid. Must be in the form of 'http(s)://hostname/account/repo' or 'git@hostname:account/repo'")
protocol, repository_host, repository_account, repository_name = match.groups()
if protocol.startswith("git") and ssh is False:
protocol = "https://"
if not repository_name.endswith('.git'):
repository_name += '.git'
if login == 'ssh' or ssh:
repository_url = 'git@%(host)s:%(account)s/%(name)s' % {
'host': repository_host,
'account': repository_account,
'name': repository_name,
}
protocol = "ssh"
elif login and login != 'guest':
repository_url = '%(protocol)s%(login)s:%(password)s@%(host)s/%(account)s/%(repo)s' % {
'protocol': protocol,
'login': login,
'password': passwd,
'host': repository_host,
'account': repository_account,
'repo': repository_name,
}
else:
repository_url = '%(protocol)s%(host)s/%(account)s/%(repo)s' % {
'protocol': protocol,
'host': repository_host,
'account': repository_account,
'repo': repository_name,
}
if repository_name.endswith(".git"):
repository_name = repository_name[:-4]
return protocol, repository_host, repository_account, repository_name, repository_url
def getGitRepoArgs(
self,
url="",
dest=None,
login=None,
passwd=None,
reset=False,
branch=None,
ssh="auto",
codeDir=None,
executor=None):
"""
Extracts and returns data useful in cloning a Git repository.
Args:
url (str): the HTTP/GIT URL of the Git repository to clone from. eg: 'https://github.com/odoo/odoo.git'
dest (str): the local filesystem path to clone to
login (str): authentication login name (only for http)
passwd (str): authentication login password (only for http)
reset (boolean): if True, any cached clone of the Git repository will be removed
branch (str): branch to be used
ssh if auto will check if ssh-agent loaded, if True will be forced to use ssh for git
# Process for finding authentication credentials (NOT IMPLEMENTED YET)
- first check there is an ssh-agent and there is a key attached to it, if yes then no login & passwd will be used & method will always be git
- if not ssh-agent found
- then we will check if url is github & ENV argument GITHUBUSER & GITHUBPASSWD is set
- if env arguments set, we will use those & ignore login/passwd arguments
- we will check if login/passwd specified in URL, if yes willl use those (so they get priority on login/passwd arguments)
- we will see if login/passwd specified as arguments, if yes will use those
- if we don't know login or passwd yet then
- login/passwd will be fetched from local git repo directory (if it exists and reset==False)
- if at this point still no login/passwd then we will try to build url with anonymous
# Process for defining branch
- if branch arg: None
- check if git directory exists if yes take that branch
- default to 'master'
- if it exists, use the branch arg
Returns:
(repository_host, repository_type, repository_account, repository_name, dest, repository_url)
- repository_type http or git
Remark:
url can be empty, then the git params will be fetched out of the git configuration at that path
"""
if url == "":
if dest is None:
raise RuntimeError("dest cannot be None (url is also '')")
if not self.exists(dest):
raise RuntimeError(
"Could not find git repo path:%s, url was not specified so git destination needs to be specified." %
(dest))
if login is None and url.find("github.com/") != -1:
# can see if there if login & passwd in OS env
# if yes fill it in
if "GITHUBUSER" in os.environ:
login = os.environ["GITHUBUSER"]
if "GITHUBPASSWD" in os.environ:
passwd = os.environ["GITHUBPASSWD"]
protocol, repository_host, repository_account, repository_name, repository_url = self.rewriteGitRepoUrl(
url=url, login=login, passwd=passwd, ssh=ssh)
repository_type = repository_host.split(
'.')[0] if '.' in repository_host else repository_host
if not dest:
if codeDir is None:
if not executor:
codeDir = j.dirs.CODEDIR
else:
codeDir = executor.prefab.core.dir_paths['CODEDIR']
dest = '%(codedir)s/%(type)s/%(account)s/%(repo_name)s' % {
'codedir': codeDir,
'type': repository_type.lower(),
'account': repository_account.lower(),
'repo_name': repository_name,
}
if reset:
self.delete(dest)
# self.createDir(dest)
return repository_host, repository_type, repository_account, repository_name, dest, repository_url
def pullGitRepo(
self,
url="",
dest=None,
login=None,
passwd=None,
depth=None,
ignorelocalchanges=False,
reset=False,
branch=None,
tag=None,
revision=None,
ssh="auto",
executor=None,
codeDir=None,
timeout=600):
"""
will clone or update repo
if dest is None then clone underneath: /opt/code/$type/$account/$repo
will ignore changes !!!!!!!!!!!
@param ssh ==True means will checkout ssh
@param ssh =="first" means will checkout sss first if that does not work will go to http
"""
if branch == "":
branch = None
if branch is not None and tag is not None:
raise RuntimeError("only branch or tag can be set")
if ssh == "first" or ssh == "auto":
try:
return self.pullGitRepo(
url,
dest,
login,
passwd,
depth,
ignorelocalchanges,
reset,
branch,
tag=tag,
revision=revision,
ssh=True,
executor=executor)
except Exception as e:
base, provider, account, repo, dest, url = self.getGitRepoArgs(
url, dest, login, passwd, reset=reset, ssh=False, codeDir=codeDir, executor=executor)
existsDir = self.exists(dest) if not executor else executor.exists(dest)
if existsDir:
self.delete(dest)
return self.pullGitRepo(
url,
dest,
login,
passwd,
depth,
ignorelocalchanges,
reset,
branch,
tag=tag,
revision=revision,
ssh=False,
executor=executor)
base, provider, account, repo, dest, url = self.getGitRepoArgs(
url, dest, login, passwd, reset=reset, ssh=ssh, codeDir=codeDir, executor=executor)
self.logger.info("%s:pull:%s ->%s" % (executor, url, dest))
existsDir = self.exists(
dest) if not executor else executor.exists(dest)
checkdir = "%s/.git" % (dest)
existsGit = self.exists(
checkdir) if not executor else executor.exists(checkdir)
if existsDir:
raise RuntimeError('%s not a git repository.' % dest)
# if we don't specify the branch, try to find the currently
# checkedout branch
cmd = 'cd %s; git rev-parse --abbrev-ref HEAD' % dest
rc, out, err = self.execute(
cmd, die=False, showout=False, executor=executor)
if rc == 0:
branchFound = out.strip()
else: # if we can't retreive current branch, use master as default
branchFound = 'master'
# raise RuntimeError("Cannot retrieve branch:\n%s\n" % cmd)
if branch is not None and branch != branchFound and ignorelocalchanges is False:
raise RuntimeError(
"Cannot pull repo, branch on filesystem is not same as branch asked for.\nBranch asked for:%s\nBranch found:%s\nTo choose other branch do e.g:\nexport JSBRANCH='%s'\n" %
(branch, branchFound, branchFound))
if ignorelocalchanges:
self.logger.info(
("git pull, ignore changes %s -> %s" %
(url, dest)))
cmd = "cd %s;git fetch" % dest
if depth is not None:
cmd += " --depth %s" % depth
self.execute(cmd, executor=executor)
if branch is not None:
self.logger.info("reset branch to:%s" % branch)
self.execute(
"cd %s;git fetch; git reset --hard origin/%s" %
(dest, branch), timeout=timeout, executor=executor)
else:
if branch is None and tag is None:
branch = branchFound
# pull
self.logger.info(("git pull %s -> %s" % (url, dest)))
if url.find("http") != -1:
cmd = "mkdir -p %s;cd %s;git -c http.sslVerify=false pull origin %s" % (
dest, dest, branch)
else:
cmd = "cd %s;git pull origin %s" % (dest, branch)
self.logger.info(cmd)
self.execute(cmd, timeout=timeout, executor=executor)
else:
self.logger.info(("git clone %s -> %s" % (url, dest)))
# self.createDir(dest)
extra = ""
if depth is not None:
extra = "--depth=%s" % depth
if url.find("http") != -1:
if branch is not None:
cmd = "mkdir -p %s;cd %s;git -c http.sslVerify=false clone %s -b %s %s %s" % (
self.getParent(dest), self.getParent(dest), extra, branch, url, dest)
else:
cmd = "mkdir -p %s;cd %s;git -c http.sslVerify=false clone %s %s %s" % (
self.getParent(dest), self.getParent(dest), extra, url, dest)
else:
if branch is not None:
cmd = "mkdir -p %s;cd %s;git clone %s -b %s %s %s" % (
self.getParent(dest), self.getParent(dest), extra, branch, url, dest)
else:
cmd = "mkdir -p %s;cd %s;git clone %s %s %s" % (
self.getParent(dest), self.getParent(dest), extra, url, dest)
self.logger.info(cmd)
# self.logger.info(str(executor)+" "+cmd)
self.execute(cmd, timeout=timeout, executor=executor)
if tag is not None:
self.logger.info("reset tag to:%s" % tag)
self.execute("cd %s;git checkout tags/%s" %
(dest, tag), timeout=timeout, executor=executor)
if revision is not None:
cmd = "mkdir -p %s;cd %s;git checkout %s" % (dest, dest, revision)
self.logger.info(cmd)
self.execute(cmd, timeout=timeout, executor=executor)
return dest
def getGitBranch(self, path):
# if we don't specify the branch, try to find the currently checkedout
# branch
cmd = 'cd %s;git rev-parse --abbrev-ref HEAD' % path
try:
rc, out, err = self.execute(cmd, showout=False, outputStderr=False)
if rc == 0:
branch = out.strip()
else: # if we can't retreive current branch, use master as default
branch = 'master'
except BaseException:
branch = 'master'
return branch
class FSMethods():
def getBinDirSystem(self):
return "/usr/local/bin/"
def getPythonLibSystem(self, jumpscale=False):
PYTHONVERSION = platform.python_version()
if j.core.platformtype.myplatform.isMac:
destjs = "/usr/local/lib/python3.6/site-packages"
elif j.core.platformtype.myplatform.isWindows:
destjs = "/usr/lib/python3.4/site-packages"
else:
if PYTHONVERSION == '2':
destjs = "/usr/local/lib/python/dist-packages"
else:
destjs = "/usr/local/lib/python3.5/dist-packages"
if jumpscale:
destjs += "/JumpScale/"
self.createDir(destjs)
return destjs
def readFile(self, filename):
"""Read a file and get contents of that file
@param filename: string (filename to open for reading )
@rtype: string representing the file contents
"""
with open(filename) as fp:
data = fp.read()
return data
def touch(self, path):
self.writeFile(path, "")
textstrip = j.data.text.strip
def writeFile(self, path, content, strip=True):
self.createDir(self.getDirName(path))
if strip:
content = self.textstrip(content, True)
with open(path, "w") as fo:
fo.write(content)
def delete(self, path, force=False):
self.removeSymlink(path)
if path.strip().rstrip("/") in ["",
"/",
"/etc",
"/root",
"/usr",
"/opt",
"/usr/bin",
"/usr/sbin",
"/opt/code"]:
raise RuntimeError('cannot delete protected dirs')
# if not force and path.find(j.dirs.CODEDIR)!=-1:
# raise RuntimeError('cannot delete protected dirs')
if self.debug:
self.logger.info(("delete: %s" % path))
if os.path.exists(path) or os.path.islink(path):
if os.path.isdir(path):
# print "delete dir %s" % path
if os.path.islink(path):
os.remove(path)
else:
shutil.rmtree(path)
else:
# print "delete file %s" % path
os.remove(path)
def joinPaths(self, *args):
return os.path.join(*args)
def copyTree(
self,
source,
dest,
keepsymlinks=False,
deletefirst=False,
overwriteFiles=True,
ignoredir=[
"*.egg-info",
"*.dist-info"],
ignorefiles=["*.egg-info"],
rsync=True,
ssh=False,
sshport=22,
recursive=True,
rsyncdelete=False,
createdir=False,
executor=None):
"""
if ssh format of source or dest is: remoteuser@remotehost:/remote/dir
"""
if self.debug:
self.logger.info(("copy %s %s" % (source, dest)))
if not ssh and not self.exists(source, executor=executor):
raise RuntimeError("copytree:Cannot find source:%s" % source)
if executor and not rsync:
raise RuntimeError("when executor used only rsync supported")
if rsync:
excl = ""
for item in ignoredir:
excl += "--exclude '%s/' " % item
for item in ignorefiles:
excl += "--exclude '%s' " % item
excl += "--exclude '*.pyc' "
excl += "--exclude '*.bak' "
excl += "--exclude '*__pycache__*' "
pre = ""
if executor is None:
if self.isDir(source):
if dest[-1] != "/":
dest += "/"
if source[-1] != "/":
source += "/"
# if ssh:
# pass
# # if dest.find(":")!=-1:
# # if dest.find("@")!=-1:
# # desthost=dest.split(":")[0].split("@", 1)[1].strip()
# # else:
# # desthost=dest.split(":")[0].strip()
# # dir_dest=dest.split(":",1)[1]
# # cmd="ssh -o StrictHostKeyChecking=no -p %s %s 'mkdir -p %s'" % (sshport,sshport,dir_dest)
# # print cmd
# # self.executeInteractive(cmd)
# else:
# self.createDir(dest)
if dest.find(':') == -1: # download
# self.createDir(self.getParent(dest))
dest = dest.split(':')[1] if ':' in dest else dest
else:
if not sys.platform.startswith("darwin"):
executor.prefab.package.ensure('rsync')
if executor.prefab.core.dir_exists(source):
if dest[-1] != "/":
dest += "/"
if source[-1] != "/":
source += "/"
dest = dest.replace("//", "/")
source = source.replace("//", "/")
if deletefirst:
pre = "set -ex;rm -rf %s;mkdir -p %s;" % (dest, dest)
elif createdir:
pre = "set -ex;mkdir -p %s;" % dest
cmd = "%srsync " % pre
if keepsymlinks:
#-l is keep symlinks, -L follow
cmd += " -rlptgo --partial %s" % excl
else:
cmd += " -rLptgo --partial %s" % excl
if not recursive:
cmd += " --exclude \"*/\""
# if self.debug:
# cmd += ' --progress'
if rsyncdelete:
cmd += " --delete"
if ssh:
cmd += " -e 'ssh -o StrictHostKeyChecking=no -p %s' " % sshport
cmd += " '%s' '%s'" % (source, dest)
# self.logger.info(cmd)
if executor is not None:
rc, out, err = executor.execute(cmd, showout=False)
else:
rc, out, err = self.execute(cmd, showout=False, outputStderr=False)
# print(rc)
# print(out)
return
else:
old_debug = self.debug
self.debug = False
self._copyTree(
source,
dest,
keepsymlinks,
deletefirst,
overwriteFiles,
ignoredir=ignoredir,
ignorefiles=ignorefiles)
self.debug = old_debug
def _copyTree(
self,
src,
dst,
keepsymlinks=False,
deletefirst=False,
overwriteFiles=True,
ignoredir=[
".egg-info",
"__pycache__"],
ignorefiles=[".egg-info"]):
"""Recursively copy an entire directory tree rooted at src.
The dst directory may already exist; if not,
it will be created as well as missing parent directories
@param src: string (source of directory tree to be copied)
@param dst: string (path directory to be copied to...should not already exist)
@param keepsymlinks: bool (True keeps symlinks instead of copying the content of the file)
@param deletefirst: bool (Set to True if you want to erase destination first, be carefull, this can erase directories)
@param overwriteFiles: if True will overwrite files, otherwise will not overwrite when destination exists
"""
self.logger.info('Copy directory tree from %s to %s' % (src, dst), 6)
if ((src is None) or (dst is None)):
raise TypeError(
'Not enough parameters passed in system.fs.copyTree to copy directory from %s to %s ' %
(src, dst))
if self.isDir(src):
if ignoredir != []:
for item in ignoredir:
if src.find(item) != -1:
return
names = os.listdir(src)
if not self.exists(dst):
self.createDir(dst)
errors = []
for name in names:
# is only for the name
name2 = name
srcname = self.joinPaths(src, name)
dstname = self.joinPaths(dst, name2)
if deletefirst and self.exists(dstname):
if self.isDir(dstname, False):
self.removeDirTree(dstname)
if self.isLink(dstname):
self.unlink(dstname)
if keepsymlinks and self.isLink(srcname):
linkto = self.readLink(srcname)
# self.symlink(linkto, dstname)#, overwriteFiles)
try:
os.symlink(linkto, dstname)
except BaseException:
pass
# TODO: very ugly change
elif self.isDir(srcname):
# print "1:%s %s"%(srcname,dstname)
self.copyTree(
srcname,
dstname,
keepsymlinks,
deletefirst,
overwriteFiles=overwriteFiles,
ignoredir=ignoredir)
else:
# print "2:%s %s"%(srcname,dstname)
extt = self.getFileExtension(srcname)
if extt == "pyc" or extt == "egg-info":
continue
if ignorefiles != []:
for item in ignorefiles:
if srcname.find(item) != -1:
continue
self.copyFile(srcname, dstname, deletefirst=overwriteFiles)
else:
raise RuntimeError(
'Source path %s in system.fs.copyTree is not a directory' %
src)
def copyFile(
self,
source,
dest,
deletefirst=False,
skipIfExists=False,
makeExecutable=False):
"""
"""
if self.isDir(dest):
dest = self.joinPaths(dest, self.getBaseName(source))
if skipIfExists:
if self.exists(dest):
return
if deletefirst:
self.delete(dest)
if self.debug:
self.logger.info(("copy %s %s" % (source, dest)))
shutil.copy(source, dest)
if makeExecutable:
self.chmod(dest, 0o770)
def createDir(self, path):
if not os.path.exists(path) and not os.path.islink(path):
os.makedirs(path)
def changeDir(self, path, create=False):
"""Changes Current Directory
@param path: string (Directory path to be changed to)
"""
self.logger.info('Changing directory to: %s' % path, 6)
if create:
self.createDir(path)
if self.exists(path):
if self.isDir(path):
os.chdir(path)
else:
raise ValueError(
"Path: %s in system.fs.changeDir is not a Directory" %
path)
else:
raise RuntimeError(
"Path: %s in system.fs.changeDir does not exist" %
path)
def isDir(self, path, followSoftlink=False):
"""Check if the specified Directory path exists
@param path: string
@param followSoftlink: boolean
@rtype: boolean (True if directory exists)
"""
if self.isLink(path):
if not followSoftlink:
return False
else:
link = self.readLink(path)
return self.isDir(link)
else:
return os.path.isdir(path)
def isExecutable(self, path):
stat.S_IXUSR & statobj.st_mode
def isFile(self, path, followSoftlink=False):
"""Check if the specified file exists for the given path
@param path: string
@param followSoftlink: boolean
@rtype: boolean (True if file exists for the given path)
"""
if self.isLink(path):
if not followSoftlink:
return False
else:
link = self.readLink(path)
return self.isFile(link)
else:
return os.path.isfile(path)
def isLink(self, path, checkJunction=False):
"""Check if the specified path is a link
@param path: string
@rtype: boolean (True if the specified path is a link)
"""
if path[-1] == os.sep:
path = path[:-1]
if (path is None):
raise TypeError('Link path is None in system.fs.isLink')
if checkJunction and self.isWindows:
cmd = "junction %s" % path
try:
rc, result, err = self.execute(cmd)
except Exception as e:
raise RuntimeError(
"Could not execute junction cmd, is junction installed? Cmd was %s." %
cmd)
if rc != 0:
raise RuntimeError(
"Could not execute junction cmd, is junction installed? Cmd was %s." %
cmd)
if result.lower().find("substitute name") != -1:
return True
else:
return False
if(os.path.islink(path)):
# self.logger.info('path %s is a link'%path,8)
return True
# self.logger.info('path %s is not a link'%path,8)
return False
def list(self, path):
# self.logger.info("list:%s"%path)
if(self.isDir(path)):
s = sorted(["%s/%s" % (path, item) for item in os.listdir(path)])
return s
elif(self.isLink(path)):
link = self.readLink(path)
return self.list(link)
else:
raise ValueError(
"Specified path: %s is not a Directory in self.listDir" %
path)
def exists(self, path, executor=None):
if executor:
return executor.exists(path)
else:
return os.path.exists(path)
def pip(self, items, force=False, executor=None):
"""
@param items is string or list
"""
if isinstance(items, list):
pass
elif isinstance(items, str):
items = self.textstrip(items)
items = [item.strip()
for item in items.split("\n") if item.strip() != ""]
else:
raise RuntimeError("input can only be string or list")
for item in items:
cmd = "pip3 install %s --upgrade" % item
if executor is None:
self.executeInteractive(cmd)
else:
executor.execute(cmd)
def symlink(self, src, dest, delete=False):
"""
dest is where the link will be created pointing to src
"""
if self.debug:
self.logger.info(("symlink: src:%s dest(islink):%s" % (src, dest)))
if self.isLink(dest):
self.removeSymlink(dest)
if delete:
if j.core.platformtype.myplatform.isWindows:
self.removeSymlink(dest)
self.delete(dest)
else:
self.delete(dest)
if j.core.platformtype.myplatform.isWindows:
cmd = "junction %s %s 2>&1 > null" % (dest, src)
os.system(cmd)
# raise RuntimeError("not supported on windows yet")
else:
dest = dest.rstrip("/")
src = src.rstrip("/")
if not self.exists(src):
raise RuntimeError("could not find src for link:%s" % src)
if not self.exists(dest):
os.symlink(src, dest)
def symlinkFilesInDir(self, src, dest, delete=True, includeDirs=False, makeExecutable=False):
if includeDirs:
items = self.listFilesAndDirsInDir(
src, recursive=False, followSymlinks=False, listSymlinks=False)
else:
items = self.listFilesInDir(
src,
recursive=False,
followSymlinks=True,
listSymlinks=True)
for item in items:
dest2 = "%s/%s" % (dest, self.getBaseName(item))
dest2 = dest2.replace("//", "/")
self.logger.info(("link %s:%s" % (item, dest2)))
self.symlink(item, dest2, delete=delete)
if makeExecutable:
# print("executable:%s" % dest2)
self.chmod(dest2, 0o770)
self.chmod(item, 0o770)
def removeSymlink(self, path):
if j.core.platformtype.myplatform.isWindows:
try:
cmd = "junction -d %s 2>&1 > null" % (path)
self.logger.info(cmd)
os.system(cmd)
except Exception as e:
pass
else:
if self.isLink(path):
os.unlink(path.rstrip("/"))
def getBaseName(self, path):
"""Return the base name of pathname path."""
# self.logger.info('Get basename for path: %s'%path,9)
if path is None:
raise TypeError('Path is not passed in system.fs.getDirName')
try:
return os.path.basename(path.rstrip(os.path.sep))
except Exception as e:
raise RuntimeError(
'Failed to get base name of the given path: %s, Error: %s' %
(path, str(e)))
def checkDirOrLinkToDir(self, fullpath):
"""
check if path is dir or link to a dir
"""
if fullpath is None or fullpath.strip == "":
raise RuntimeError("path cannot be empty")
if not self.isLink(fullpath) and os.path.isdir(fullpath):
return True
if self.isLink(fullpath):
link = self.readLink(fullpath)
if self.isDir(link):
return True
return False
def getDirName(self, path, lastOnly=False, levelsUp=None):
"""
Return a directory name from pathname path.
@param path the path to find a directory within
@param lastOnly means only the last part of the path which is a dir (overrides levelsUp to 0)
@param levelsUp means, return the parent dir levelsUp levels up
e.g. ...getDirName("/opt/qbase/bin/something/test.py", levelsUp=0) would return something
e.g. ...getDirName("/opt/qbase/bin/something/test.py", levelsUp=1) would return bin
e.g. ...getDirName("/opt/qbase/bin/something/test.py", levelsUp=10) would raise an error
"""
# self.logger.info('Get directory name of path: %s' % path,9)
if path is None:
raise TypeError('Path is not passed in system.fs.getDirName')
dname = os.path.dirname(path)
dname = dname.replace("/", os.sep)
dname = dname.replace("//", os.sep)
dname = dname.replace("\\", os.sep)
if lastOnly:
dname = dname.split(os.sep)[-1]
return dname
if levelsUp is not None:
parts = dname.split(os.sep)
if len(parts) - levelsUp > 0:
return parts[len(parts) - levelsUp - 1]
else:
raise RuntimeError(
"Cannot find part of dir %s levels up, path %s is not long enough" %
(levelsUp, path))
return dname + os.sep
def readLink(self, path):
"""Works only for unix
Return a string representing the path to which the symbolic link points.
"""
while path[-1] == "/" or path[-1] == "\\":
path = path[:-1]
# self.logger.info('Read link with path: %s'%path,8)
if path is None:
raise TypeError('Path is not passed in system.fs.readLink')
if self.isWindows:
raise RuntimeError('Cannot readLink on windows')
try:
return os.readlink(path)
except Exception as e:
raise RuntimeError(
'Failed to read link with path: %s \nERROR: %s' %
(path, str(e)))
def removeLinks(self, path):
"""
find all links & remove
"""
if not self.exists(path):
return
items = self._listAllInDir(
path=path,
recursive=True,
followSymlinks=False,
listSymlinks=True)
items = [item for item in items[0] if self.isLink(item)]
for item in items:
self.unlink(item)
def _listInDir(self, path, followSymlinks=True):
"""returns array with dirs & files in directory
@param path: string (Directory path to list contents under)
"""
if path is None:
raise TypeError('Path is not passed in system.fs.listDir')
if(self.exists(path)):
if(self.isDir(path)) or (followSymlinks and self.checkDirOrLinkToDir(path)):
names = os.listdir(path)
return names
else:
raise ValueError(
"Specified path: %s is not a Directory in system.fs.listDir" %
path)
else:
raise RuntimeError(
"Specified path: %s does not exist in system.fs.listDir" %
path)
def listDirsInDir(
self,
path,
recursive=False,
dirNameOnly=False,
findDirectorySymlinks=True):
""" Retrieves list of directories found in the specified directory
@param path: string represents directory path to search in
@rtype: list
"""
# self.logger.info('List directories in directory with path: %s, recursive = %s' % (path, str(recursive)),9)
# if recursive:
# if not self.exists(path):
# raise ValueError('Specified path: %s does not exist' % path)
# if not self.isDir(path):
# raise ValueError('Specified path: %s is not a directory' % path)
# result = []
# os.path.walk(path, lambda a, d, f: a.append('%s%s' % (d, os.path.sep)), result)
# return result
if path is None or path.strip == "":
raise RuntimeError("path cannot be empty")
files = self._listInDir(path, followSymlinks=True)
filesreturn = []
for file in files:
fullpath = os.path.join(path, file)
if (findDirectorySymlinks and self.checkDirOrLinkToDir(
fullpath)) or self.isDir(fullpath):
if dirNameOnly:
filesreturn.append(file)
else:
filesreturn.append(fullpath)
if recursive:
filesreturn.extend(
self.listDirsInDir(
fullpath,
recursive,
dirNameOnly,
findDirectorySymlinks))
return filesreturn
def listFilesInDir(
self,
path,
recursive=False,
filter=None,
minmtime=None,
maxmtime=None,
depth=None,
case_sensitivity='os',
exclude=[],
followSymlinks=True,
listSymlinks=False):
"""Retrieves list of files found in the specified directory
@param path: directory path to search in
@type path: string
@param recursive: recursively look in all subdirs
@type recursive: boolean
@param filter: unix-style wildcard (e.g. *.py) - this is not a regular expression
@type filter: string
@param minmtime: if not None, only return files whose last modification time > minmtime (epoch in seconds)
@type minmtime: integer
@param maxmtime: if not None, only return files whose last modification time < maxmtime (epoch in seconds)
@Param depth: is levels deep wich we need to go
@type maxmtime: integer
@Param exclude: list of std filters if matches then exclude
@rtype: list
"""
if depth is not None:
depth = int(depth)
# self.logger.info('List files in directory with path: %s' % path,9)
if depth == 0:
depth = None
# if depth is not None:
# depth+=1
filesreturn, depth = self._listAllInDir(path, recursive, filter, minmtime, maxmtime, depth, type="f",
case_sensitivity=case_sensitivity, exclude=exclude, followSymlinks=followSymlinks, listSymlinks=listSymlinks)
return filesreturn
def listFilesAndDirsInDir(
self,
path,
recursive=False,
filter=None,
minmtime=None,
maxmtime=None,
depth=None,
type="fd",
followSymlinks=True,
listSymlinks=False):
"""Retrieves list of files found in the specified directory
@param path: directory path to search in
@type path: string
@param recursive: recursively look in all subdirs
@type recursive: boolean
@param filter: unix-style wildcard (e.g. *.py) - this is not a regular expression
@type filter: string
@param minmtime: if not None, only return files whose last modification time > minmtime (epoch in seconds)
@type minmtime: integer
@param maxmtime: if not None, only return files whose last modification time < maxmtime (epoch in seconds)
@Param depth: is levels deep wich we need to go
@type maxmtime: integer
@param type is string with f & d inside (f for when to find files, d for when to find dirs)
@rtype: list
"""
if depth is not None:
depth = int(depth)
self.logger.info('List files in directory with path: %s' % path, 9)
if depth == 0:
depth = None
# if depth is not None:
# depth+=1
filesreturn, depth = self._listAllInDir(
path, recursive, filter, minmtime, maxmtime, depth, type=type, followSymlinks=followSymlinks, listSymlinks=listSymlinks)
return filesreturn
def _listAllInDir(
self,
path,
recursive,
filter=None,
minmtime=None,
maxmtime=None,
depth=None,
type="df",
case_sensitivity='os',
exclude=[],
followSymlinks=True,
listSymlinks=True):
"""
# There are 3 possible options for case-sensitivity for file names
# 1. `os`: the same behavior as the OS
# 2. `sensitive`: case-sensitive comparison
# 3. `insensitive`: case-insensitive comparison
"""
dircontent = self._listInDir(path)
filesreturn = []
if case_sensitivity.lower() == 'sensitive':
matcher = fnmatch.fnmatchcase
elif case_sensitivity.lower() == 'insensitive':
def matcher(fname, pattern):
return fnmatch.fnmatchcase(fname.lower(), pattern.lower())
else:
matcher = fnmatch.fnmatch
for direntry in dircontent:
fullpath = self.joinPaths(path, direntry)
if followSymlinks:
if self.isLink(fullpath):
fullpath = self.readLink(fullpath)
if self.isFile(fullpath) and "f" in type:
includeFile = False
if (filter is None) or matcher(direntry, filter):
if (minmtime is not None) or (maxmtime is not None):
mymtime = os.stat(fullpath)[ST_MTIME]
if (minmtime is None) or (mymtime > minmtime):
if (maxmtime is None) or (mymtime < maxmtime):
includeFile = True
else:
includeFile = True
if includeFile:
if exclude != []:
for excludeItem in exclude:
if matcher(direntry, excludeItem):
includeFile = False
if includeFile:
filesreturn.append(fullpath)
elif self.isDir(fullpath):
if "d" in type:
if not(listSymlinks is False and self.isLink(fullpath)):
filesreturn.append(fullpath)
if recursive:
if depth is not None and depth != 0:
depth = depth - 1
if depth is None or depth != 0:
exclmatch = False
if exclude != []:
for excludeItem in exclude:
if matcher(fullpath, excludeItem):
exclmatch = True
if exclmatch is False:
if not(
followSymlinks is False and self.isLink(fullpath)):
r, depth = self._listAllInDir(fullpath, recursive, filter, minmtime, maxmtime, depth=depth,
type=type, exclude=exclude, followSymlinks=followSymlinks, listSymlinks=listSymlinks)
if len(r) > 0:
filesreturn.extend(r)
elif self.isLink(fullpath) and followSymlinks is False and listSymlinks:
filesreturn.append(fullpath)
return filesreturn, depth
def download(
self,
url,
to="",
overwrite=True,
retry=3,
timeout=0,
login="",
passwd="",
minspeed=0,
multithread=False,
curl=False):
"""
@return path of downloaded file
@param minspeed is kbytes per sec e.g. 50, if less than 50 kbytes during 10 min it will restart the download (curl only)
@param when multithread True then will use aria2 download tool to get multiple threads
"""
def download(url, to, retry=3):
if timeout == 0:
handle = urlopen(url)
else:
handle = urlopen(url, timeout=timeout)
nr = 0
while nr < retry + 1:
try:
with open(to, 'wb') as out:
while True:
data = handle.read(1024)
if len(data) == 0:
break
out.write(data)
handle.close()
out.close()
return
except Exception as e:
self.logger.info("DOWNLOAD ERROR:%s\n%s" % (url, e))
try:
handle.close()
except BaseException:
pass
try:
out.close()
except BaseException:
pass
handle = urlopen(url)
nr += 1
self.logger.info(('Downloading %s ' % (url)))
if to == "":
to = self.TMPDIR + "/" + url.replace("\\", "/").split("/")[-1]
if overwrite:
if self.exists(to):
self.delete(to)
self.delete("%s.downloadok" % to)
else:
if self.exists(to) and self.exists("%s.downloadok" % to):
# print "NO NEED TO DOWNLOAD WAS DONE ALREADY"
return to
self.createDir(self.getDirName(to))
if curl and self.checkInstalled("curl"):
minspeed = 0
if minspeed != 0:
minsp = "-y %s -Y 600" % (minspeed * 1024)
else:
minsp = ""
if login:
user = "--user %s:%s " % (login, passwd)
else:
user = ""
cmd = "curl '%s' -o '%s' %s %s --connect-timeout 5 --retry %s --retry-max-time %s" % (
url, to, user, minsp, retry, timeout)
if self.exists(to):
cmd += " -C -"
self.logger.info(cmd)
self.delete("%s.downloadok" % to)
rc, out, err = self.execute(cmd, die=False)
if rc == 33: # resume is not support try again withouth resume
self.delete(to)
cmd = "curl '%s' -o '%s' %s %s --connect-timeout 5 --retry %s --retry-max-time %s" % (
url, to, user, minsp, retry, timeout)
rc, out, err = self.execute(cmd, die=False)
if rc:
raise RuntimeError(
"Could not download:{}.\nErrorcode: {}".format(
url, rc))
else:
self.touch("%s.downloadok" % to)
elif multithread:
raise RuntimeError("not implemented yet")
else:
download(url, to, retry)
self.touch("%s.downloadok" % to)
return to
def downloadExpandTarGz(
self,
url,
destdir,
deleteDestFirst=True,
deleteSourceAfter=True):
self.logger.info((self.getBaseName(url)))
tmppath = self.getTmpPath(self.getBaseName(url))
self.download(url, tmppath)
self.expandTarGz(tmppath, destdir)
def expandTarGz(
self,
path,
destdir,
deleteDestFirst=True,
deleteSourceAfter=False):
import gzip
self.lastdir = os.getcwd()
os.chdir(self.TMPDIR)
basename = os.path.basename(path)
if basename.find(".tar.gz") == -1:
raise RuntimeError("Can only expand a tar gz file now %s" % path)
tarfilename = ".".join(basename.split(".gz")[:-1])
self.delete(tarfilename)
if deleteDestFirst:
self.delete(destdir)
if j.core.platformtype.myplatform.isWindows:
cmd = "gzip -d %s" % path
os.system(cmd)
else:
handle = gzip.open(path)
with open(tarfilename, 'wb') as out:
for line in handle:
out.write(line)
out.close()
handle.close()
t = tarfile.open(tarfilename, 'r')
t.extractall(destdir)
t.close()
self.delete(tarfilename)
if deleteSourceAfter:
self.delete(path)
os.chdir(self.lastdir)
self.lastdir = ""
def getParent(self, path):
"""
Returns the parent of the path:
/dir1/dir2/file_or_dir -> /dir1/dir2/
/dir1/dir2/ -> /dir1/
TODO: why do we have 2 implementations which are almost the same see getParentDirName()
"""
parts = path.split(os.sep)
if parts[-1] == '':
parts = parts[:-1]
parts = parts[:-1]
if parts == ['']:
return os.sep
return os.sep.join(parts)
def getFileExtension(self, path):
extcand = path.split(".")
if len(extcand) > 0:
ext = extcand[-1]
else:
ext = ""
return ext
def chown(self, path, user):
from pwd import getpwnam
getpwnam(user)[2]
uid = getpwnam(user).pw_uid
gid = getpwnam(user).pw_gid
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for ddir in dirs:
path = os.path.join(root, ddir)
try:
os.chown(path, uid, gid)
except Exception as e:
if str(e).find("No such file or directory") == -1:
raise RuntimeError("%s" % e)
for file in files:
path = os.path.join(root, file)
try:
os.chown(path, uid, gid)
except Exception as e:
if str(e).find("No such file or directory") == -1:
raise RuntimeError("%s" % e)
def chmod(self, path, permissions):
"""
@param permissions e.g. 0o660 (USE OCTAL !!!)
"""
os.chmod(path, permissions)
for root, dirs, files in os.walk(path):
for ddir in dirs:
path = os.path.join(root, ddir)
try:
os.chmod(path, permissions)
except Exception as e:
if str(e).find("No such file or directory") == -1:
raise RuntimeError("%s" % e)
for file in files:
path = os.path.join(root, file)
try:
os.chmod(path, permissions)
except Exception as e:
if str(e).find("No such file or directory") == -1:
raise RuntimeError("%s" % e)
def chdir(self, ddir=""):
"""
if ddir=="" then will go to tmpdir
"""
if ddir == "":
ddir = self.TMPDIR
os.chdir(ddir)
def getTmpPath(self, filename):
return "%s/jumpscaleinstall/%s" % (self.TMPDIR, filename)
def getPythonSiteConfigPath(self):
minl = 1000000
result = ""
for item in sys.path:
if len(item) < minl and item.find("python") != -1:
result = item
minl = len(item)
return result
def getWalker(self):
self._initExtra()
return self.extra.getWalker(self)
class ExecutorMethods():
def executeBashScript(
self,
content="",
path=None,
die=True,
remote=None,
sshport=22,
showout=True,
outputStderr=True,
sshkey="",
timeout=600,
executor=None):
"""
@param remote can be ip addr or hostname of remote, if given will execute cmds there
"""
if path is not None:
content = self.readFile(path)
if content[-1] != "\n":
content += "\n"
if remote is None:
tmppath = self.getTmpPath("")
content = "cd %s\n%s" % (tmppath, content)
else:
content = "cd /tmp\n%s" % content
if die:
content = "set -ex\n%s" % content
path2 = self.getTmpPath("do.sh")
self.writeFile(path2, content, strip=True)
if remote is not None:
tmppathdest = "/tmp/do.sh"
if sshkey:
if not self.SSHKeyGetPathFromAgent(sshkey, die=False) is None:
self.execute('ssh-add %s' % sshkey)
sshkey = '-i %s ' % sshkey.replace('!', '\!')
self.execute(
"scp %s -oStrictHostKeyChecking=no -P %s %s root@%s:%s " %
(sshkey, sshport, path2, remote, tmppathdest), die=die, executor=executor)
rc, res, err = self.execute(
"ssh %s -oStrictHostKeyChecking=no -A -p %s root@%s 'bash %s'" %
(sshkey, sshport, remote, tmppathdest), die=die, timeout=timeout, executor=executor)
else:
rc, res, err = self.execute(
"bash %s" %
path2, die=die, showout=showout, outputStderr=outputStderr, timeout=timeout, executor=executor)
return rc, res, err
def executeCmds(
self,
cmdstr,
showout=True,
outputStderr=True,
useShell=True,
log=True,
cwd=None,
timeout=120,
captureout=True,
die=True,
executor=None):
rc_ = []
out_ = ""
for cmd in cmdstr.split("\n"):
if cmd.strip() == "" or cmd[0] == "#":
continue
cmd = cmd.strip()
rc, out, err = self.execute(cmd, showout, outputStderr, useShell, log, cwd,
timeout, captureout, die, executor=executor)
rc_.append(str(rc))
out_ += out
return rc_, out_
def executeInteractive(self, command, die=True):
exitcode = os.system(command)
if exitcode != 0 and die:
raise RuntimeError("Could not execute %s" % command)
return exitcode
def checkInstalled(self, cmdname):
"""
@param cmdname is cmd to check e.g. curl
"""
rc, out, err = self.execute(
"which %s" %
cmdname, die=False, showout=False, outputStderr=False)
if rc == 0:
return True
else:
return False
def loadScript(self, path, executor=None):
self.logger.info("ectr:%s: load jumpscript: %s" % (executor, path))
source = self.readFile(path)
out, tags = self._preprocess(source)
def md5_string(s):
import hashlib
s = s.encode('utf-8')
impl = hashlib.new('md5', s)
return impl.hexdigest()
md5sum = md5_string(out)
modulename = 'JumpScale.jumpscript_%s' % md5sum
codepath = self.joinPaths(
self.getTmpPath(),
"jumpscripts",
"%s.py" %
md5sum)
self.writeFile(filename=codepath, contents=out)
linecache.checkcache(codepath)
self.module = imp.load_source(modulename, codepath)
self.author = getattr(self.module, 'author', "unknown")
self.organization = getattr(self.module, 'organization', "unknown")
self.version = getattr(self.module, 'version', 0)
self.modtime = getattr(self.module, 'modtime', 0)
self.descr = getattr(self.module, 'descr', "")
# identifies the actions & tags linked to it
self.tags = tags
for name, val in list(tags.items()):
self.actions[name] = eval("self.module.%s" % name)
def execute(
self,
command,
showout=True,
outputStderr=True,
useShell=True,
log=True,
cwd=None,
timeout=0,
errors=[],
ok=[],
captureout=True,
die=True,
async=False,
executor=None):
"""
@param errors is array of statements if found then exit as error
return rc,out,err
"""
command = self.textstrip(command)
if executor:
return executor.execute(
command,
die=die,
checkok=False,
showout=True,
timeout=timeout)
else:
return j.tools.executorLocal.execute(
command,
showout=showout,
outputStderr=outputStderr,
die=die)
def psfind(self, name):
rc, out, err = self.execute("ps ax | grep %s" % name, showout=False)
for line in out.split("\n"):
if line.strip() == "":
continue
if "grep" in line:
continue
return True
return False
def killall(self, name):
rc, out, err = self.execute("ps ax | grep %s" % name, showout=False)
for line in out.split("\n"):
# print("L:%s" % line)
if line.strip() == "":
continue
if "grep" in line:
continue
line = line.strip()
pid = line.split(" ")[0]
self.logger.info("kill:%s (%s)" % (name, pid))
self.execute("kill -9 %s" % pid, showout=False)
if self.psfind(name):
raise RuntimeError("stop debug here")
raise RuntimeError(
"Could not kill:%s, is still, there check if its not autorestarting." %
name)
class InstallTools(GitMethods, FSMethods, ExecutorMethods, SSHMethods):
def __init__(self, debug=False):
self.__jslocation__ = "j.core.installtools"
self._extratools = False
self._asyncLoaded = False
self._deps = None
self._config = None
self.platformtype = j.core.platformtype
self.embed = False
self.myplatform = self.platformtype.myplatform
if self.exists("/root/.iscontainer"):
os.environ["GIGDIR"] = "/root/gig"
os.environ["VARDIR"] = "/optvar"
else:
if "GIGDIR" not in os.environ:
os.environ["GIGDIR"] = "%s/gig" % os.environ["HOME"]
if "VARDIR" not in os.environ:
os.environ["VARDIR"] = "%s/var/" % os.environ["GIGDIR"]
self.logger = j.logger.get("installtools")
@property
def mascot(self):
mascotpath = "%s/.mascot.txt" % os.environ["HOME"]
if not j.sal.fs.exists(mascotpath):
print("env has not been installed properly, please follow init instructions on https://github.com/Jumpscale/developer")
sys.exit(1)
return self.readFile(mascotpath)
@property
def config(self):
return j.core.state.config
@property
def env(self):
return os.environ
@property
def debug(self):
return self.config["system"]["debug"]
@property
def container(self):
"""
means we don't work with ssh-agent ...
"""
return self.config["system"]["container"]
@debug.setter
def debug(self, value):
if not isinstance(value, bool):
raise RuntimeError("input for debug needs to be bool")
if self.config != {}:
self.config["system"]["debug"] = value
j.core.state.configSave()
else:
raise RuntimeError("cannot set debug, system is in readonly.")
@container.setter
def container(self, value):
if not isinstance(value, bool):
raise RuntimeError("input for container needs to be bool")
if self.config != {}:
self.config["system"]["container"] = value
j.core.state.configSave()
else:
raise RuntimeError("cannot set container, system is in readonly.")
def initEnv(self):
"""
@type executor: ExecutorBase
"""
if self.exists("/root/.iscontainer"):
container = True
else:
container = False
if container:
T = '''
[dirs]
HOMEDIR = "~"
TMPDIR = "/tmp"
VARDIR = "/optvar"
BASEDIR = "/opt/jumpscale9"
CFGDIR = "{{VARDIR}}/cfg"
DATADIR = "{{VARDIR}}/data"
CODEDIR = "/opt/code"
BUILDDIR = "{{VARDIR}}/build"
LIBDIR = "{{BASEDIR}}/lib/"
TEMPLATEDIR = "{{BASEDIR}}/templates"
'''
else:
T = '''
[dirs]
HOMEDIR = "~"
TMPDIR = "/tmp"
VARDIR = "{{GIGDIR}}/var"
BASEDIR = "{{GIGDIR}}/gig"
CFGDIR = "{{VARDIR}}/cfg"
DATADIR = "{{VARDIR}}/data"
CODEDIR = "{{GIGDIR}}/code"
BUILDDIR = "{{VARDIR}}/build"
LIBDIR = "{{BASEDIR}}/lib/"
TEMPLATEDIR = "{{BASEDIR}}/templates"
'''
T += '''
[email]
from = "info@incubaid.com"
smtp_port = 443
smtp_server = ""
[git.ays]
branch = "master"
url = "https://github.com/Jumpscale/ays9.git"
[git.js]
branch = "master"
url = "https://github.com/Jumpscale/core9.git"
[system]
debug = true
autopip = false
readonly = false
container = false
[grid]
gid = 0
nid = 0
[redis]
port = 6379
addr = "localhost"
[me]
fullname = "Kristof De Spiegeleer"
loginname = "despiegk"
[ssh]
SSHKEYNAME = "id_rsa"
'''
T = j.data.text.strip(T)
T = T.replace("{{GIGDIR}}", os.environ["GIGDIR"])
# will replace ~ and the variables
counter = 0
while "{{" in T and counter < 10:
TT = pytoml.loads(T)
T = pystache.render(T, **TT["dirs"])
counter += 1
TT = pytoml.loads(T)
for key, val in TT["dirs"].items():
val = val.replace(
"~", os.environ["HOME"]).replace(
"//", "/").rstrip("/")
if not j.sal.fs.exists(val):
j.sal.fs.createDir(val)
TT["dirs"][key] = val
if counter > 9:
raise RuntimeError(
"cannot convert default configfile, template arguments still in")
if not container:
# get env dir arguments & overrule them in jumpscale config
for key, val in os.environ.items():
if "DIR" in key and key in TT["dirs"]:
TT["dirs"][key] = val
if container:
TT["system"]["container"] = True
if container:
j.core.state.configUpdate(TT, True) # will overwrite
else:
j.core.state.configUpdate(TT, False) # will not overwrite
# print(j.core.state.config)
# COPY the jumpscale commands
js9_codedir = j.sal.fs.getParent(
j.sal.fs.getParent(
j.sal.fs.getDirName(
j.sal.fs.getPathOfRunningFunction(
j.logger.__init__))))
cmdsDir = j.sal.fs.joinPaths(js9_codedir, "cmds")
for item in j.sal.fs.listFilesInDir(cmdsDir):
j.sal.fs.symlink(
item,
"/usr/local/bin/%s" %
j.sal.fs.getBaseName(item),
overwriteTarget=True)
self.linkJSCommandsToSystem()
def linkJSCommandsToSystem(self):
src = "%s/github/jumpscale/core9/cmds/" % j.core.state.config["dirs"]["CODEDIR"]
self.symlinkFilesInDir(src, "/usr/local/bin", delete=True, includeDirs=False, makeExecutable=True)
# def fixCodeChangeDirVars(self, branch="8.2.0"):
# """
# walk over code dir & find all known old dir arguments & change them to new naming convention
# """
#
# repos = [
# "github/jumpscale/dockers",
# "github/jumpscale/ays_jumpscale9",
# "github/jumpscale/jscockpit",
# "github/jumpscale/jumpscale_portal8"]
# # repos = ["github/jumpscale/jumpscale_core9"] #BE VERY CAREFUL IF YOU DO
# # THIS ONE, THIS FUNCTION WILL BE CHANGED TOO, NEED TO COPY FIRST
# tochange = [
# "logDir",
# "pidDir",
# "hrdDir",
# "goDir",
# "nimDir",
# "codeDir",
# "binDir",
# "jsLibDir",
# "libDir",
# "tmplsDir",
# "homeDir",
# "baseDir",
# "tmpDir",
# "varDir"]
# changeName = {
# "tmplsDir": "TEMPLATEDIR",
# "cfgDir": "JSCFGDIR",
# "appDir": "JSAPPSDIR",
# "jsBase": "JSBASEDIR"}
#
# def do(ffilter):
# for repo in repos:
# rpath = "%s/%s" % (j.dirs.CODEDIR, repo)
# for fpath in self.listFilesInDir(
# rpath,
# recursive=True,
# filter=ffilter,
# followSymlinks=False,
# listSymlinks=False):
# content = self.readFile(fpath)
# content1 = content + "" # make sure we have copy
# for key, val in changeName.items():
# content1 = content1.replace("$%s" % key, "$%s" % val)
# content1 = content1.replace(".%s" % key, ".%s" % val)
# content1 = content1.replace("\"%s" % key, "\"%s" % val)
# content1 = content1.replace("'%s" % key, "'%s" % val)
# for key in tochange:
# content1 = content1.replace(
# "$%s" %
# key,
# "$%s" %
# key.upper())
# content1 = content1.replace(
# ".%s" %
# key,
# ".%s" %
# key.upper())
# content1 = content1.replace(
# "\"%s" %
# key,
# "\"%s" %
# key.upper())
# content1 = content1.replace(
# "'%s" %
# key,
# "'%s" %
# key.upper())
# content1 = content1.replace("$JSBASEDIR", "$BASEDIR")
# content1 = content1.replace("$jsBase", "$JSBASEDIR")
# content1 = content1.replace("$jsBASE", "$JSBASEDIR")
# if content1 != content:
# self.writeFile(fpath, content1, strip=False)
# do("*.py")
# do("*.md")
# do("*.txt")
@property
def epoch(self):
'''
Get epoch timestamp (number of seconds passed since January 1, 1970)
'''
return int(time.time())
@property
def whoami(self):
if self._whoami is not None:
return self._whoami
rc, result, err = self.execute(
"whoami", die=False, showout=False, outputStderr=False)
if rc > 0:
# could not start ssh-agent
raise RuntimeError(
"Could not call whoami,\nstdout:%s\nstderr:%s\n" %
(result, err))
else:
self._whoami = result.strip()
return self._whoami
do = InstallTools()
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2017/8/10 5:07
# @Author : xiongyaokun
# @Site :
# @File : test06.py
# @Software: PyCharm
class Programmer(object):
hobby = 'beautiful girl'
def __init__(self, name, age, weight):
self.name = name
self._age = age
self.__weight = weight
@classmethod
def get_hobby(cls):
return Programmer.hobby
@property
def get_weight(self):
return self.__weight
def self_introduction(self):
return "My name is %s, I am %d years old." %(self.name, self._age)
p = Programmer('xiong', 28, 70)
print dir(p)
print '**'*10, '我是分割线', '**'*10
print p.get_hobby()
print Programmer.get_hobby()
print Programmer.hobby
print p.get_weight
print p.self_introduction() |
from django import forms
from django.forms import ModelForm
from .models import Author
from book.models import Book
class AddAuthorInfo(forms.ModelForm):
class Meta:
model = Author
fields = ['name', 'surname', 'patronymic']
labels = {
'name': 'First name:',
'surname': 'Last name:',
'patronymic': 'Patronymic:',
}
widgets = {
'name': forms.TextInput(attrs={'class':'form-control'}),
'surname': forms.TextInput(attrs={'class':'form-control'}),
'patronymic': forms.TextInput(attrs={'class':'form-control'}),
}
|
#!/usr/bin/env python
# coding=utf-8
from gi.repository import Gtk, GObject
import serial
import time
import goslate
import sqlite3
translator=goslate.Goslate()
connection = sqlite3.connect('dict.db')
sql = connection.cursor()
def insertToDB(word,meaning):
sql.execute("INSERT INTO english(EN) VALUES(?)",(word,))
sql.execute("INSERT INTO bangla(BN) VALUES(?)",(meaning,))
connection.commit()
return
def eng2bn(word):
sql.execute('SELECT * FROM english WHERE EN=?', (word,))
try:
sn=int(sql.fetchone()[0])
sql.execute("SELECT * FROM bangla WHERE SN=?", (sn,))
meaning=sql.fetchone()[1]
return meaning
except TypeError:
return "NotFound"
class TranslatorGUI:
def __init__(self):
builder = Gtk.Builder()
builder.add_from_file("translate.glade")
self.window = builder.get_object("messengerWindow")
self.inputBox=builder.get_object("input_box")
self.showBox=builder.get_object("receive_box")
builder.connect_signals(self)
def on_input_box_activate(self,inputBox):
word=self.inputBox.get_text();
word=word.lower()
lang='bn'
inputBox.set_text("")
meaning=eng2bn(word)
if(meaning=="NotFound"):
try:
meaning=translator.translate(word,lang)
if(meaning!="NotFound" and meaning!=word):
insertToDB(word,meaning)
except:
if(meaning=="NotFound"):
meaning="অর্থ খুঁজে পাওয়া যায়নি , ইন্টারনেট সংযোগ নিশ্চিত করুন "
self.showBox.set_text(meaning)
def mnu_quite_app(self, window):
Gtk.main_quit()
if __name__ == '__main__':
myTranslator = TranslatorGUI()
myTranslator.window.connect("delete-event", Gtk.main_quit)
myTranslator.window.show_all()
Gtk.main()
|
def openOrSenior(data):
dataList = []
for i in data:
if i[0] >= 55 and i[1] >= 7:
dataList.append("Senior")
else:
dataList.append("Open")
return dataList
openOrSenior([[45, 12], [55, 21], [19, -2], [104, 20]]) |
#!/usr/bin/env python
import tornado.ioloop
import tornado.web
import tornado.gen
def async_file_read():
return tornado.gen.sleep(3)
class MainHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def get(self):
yield async_file_read()
self.write("<head><title>Web Page</title></head><h1>Hello!</h1>")
self.finish()
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8080)
tornado.ioloop.IOLoop.current().start()
|
#!/usr/bin/env/ python
key = 'INSERT YOUR BINANCE API KEY'
secret = 'INSERT YOUR BINANCE API SECRET' |
# Ces fonctions de tri doivent etre recoder.abs
# dans python la fonction sort() existe mais nous la recoderons
# Tri a bulle.
# Cet algo est un es plus simple mais il n'est pas tres performant.
# Pour chaque nombre dans la liste, on compare avec tout les autres nombres.
# Si le nombre comparer est plus petit, on inverse la position des 2 abs
# la fonction ne retourne rien, elle modifie la liste directement
def bubble(liste):
pass
if __name__ == '__main__':
li = [3, 6, 2, 3, 6, 7, 1]
bubble(li)
print(li) # -> 1, 2, 3, 3, 6, 6, 7
|
import math
import time
import configparser
import numpy as np
from pynput import keyboard, mouse
from screeninfo import get_monitors
def get_x(v, w, a, t):
return ((w * pow(t, 2)) / 2) + v * math.cos(math.radians(a)) * t
def get_y(v, a, t):
return ((-gravity * pow(t, 2)) / 2) + v * math.sin(math.radians(a)) * t
def calc_distance(point_a, point_b):
x1 = point_a[0]
x2 = point_b[0]
y1 = point_a[1]
y2 = point_b[1]
return math.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))
def on_press(key):
if supress_input:
return
global your_position, target_position, got_you, got_target, debug_position
if key == keyboard.KeyCode(char='1') and your_position is None:
your_position = mouse_position
got_you = True
print("Your position: ", your_position)
elif key == keyboard.KeyCode(char='2') and target_position is None:
target_position = mouse_position
got_target = True
print("Target position: ", target_position)
elif key == keyboard.KeyCode(char='3'):
debug_position = mouse_position
print("Position: ", debug_position)
supress_input = False
monitor = get_monitors()[0]
screen_width = monitor.width
screen_height = monitor.height
# CONFIG
config = configparser.ConfigParser()
config.read('config.ini')
debug_size = config["DEBUG"]["screen_size"] == "True"
debug_wind = config["DEBUG"]["wind"] == "True"
game_width = float(config["SETTINGS"]["game_width"])
game_height = float(config["SETTINGS"]["game_height"])
wind_multiplier = float(config["SETTINGS"]["wind_multiplier"])
def transform_x(x):
global screen_width
return x / screen_width * game_width
def transform_y(y):
global screen_height
return (screen_height - y) / screen_height * game_height
def on_move(mouse_x, mouse_y):
global mouse_position
mouse_position = (transform_x(mouse_x), transform_y(mouse_y))
def calc_velocity():
nearest = None
calculated_velocity = None
calculated_flight_time = None
for v in np.arange(0, 100, 1):
for t in np.arange(0, 30, 0.1):
x = get_x(v, wind, shooting_angle, t)
y = get_y(v, shooting_angle, t)
cords = (x, y)
target = (abs(target_position[0] - your_position[0]), target_position[1] - your_position[1])
distance = calc_distance(target, cords)
if nearest is None or distance < nearest:
nearest = distance
calculated_velocity = v
calculated_flight_time = t
calculated_height = pow(calculated_velocity, 2) / (2 * gravity) * pow(math.sin(math.radians(shooting_angle)), 2)
return calculated_velocity, calculated_flight_time, calculated_height
def calc_wind():
nearest = None
calculated_wind = None
for w in np.arange(-50, 50, 0.1):
for t in np.arange(0, 30, 0.1):
x = get_x(velocity, w, shooting_angle, t)
y = get_y(velocity, shooting_angle, t)
cords = (x, y)
target = (hit_point[0] - your_position[0], hit_point[1] - your_position[1])
distance = calc_distance(target, cords)
if nearest is None or distance < nearest:
nearest = distance
calculated_wind = w
return calculated_wind
if debug_size:
print("Program started in GAME SCREEN SIZE DEBUG MODE ( You can change inside config.ini file )")
elif debug_wind:
print("Program started in WIND DEBUG MODE ( You can change inside config.ini file )")
mouse_position = (0, 0)
your_position = None
target_position = None
debug_position = None
gravity = 9.8
listener = keyboard.Listener(
on_press=on_press)
listener.start()
listener = mouse.Listener(
on_move=on_move)
listener.start()
while True:
print("Waiting for positions (For your position press '1' and press '2' for target)")
your_position = None
target_position = None
while your_position is None or target_position is None:
time.sleep(1)
# input
supress_input = True
shooting_angle = int(input("Angle: "))
wind = int(input("Wind: ")) * wind_multiplier
supress_input = False
# /input
velocity, flight_time, height = calc_velocity()
print("Velocity: ", velocity)
print("Estimated time ", flight_time, " seconds")
print("Estimated height ", height)
if debug_size:
print("DEBUG SCREEN SIZE:")
print("Choose hit point with '3'")
debug_position = None
while debug_position is None:
time.sleep(1)
hit_point = debug_position
print("Choose highest point of trajectory with '3'")
debug_position = None
while debug_position is None:
time.sleep(1)
real_height = debug_position[1] - your_position[1]
print("Height: ", height)
expected_length = abs(target_position[0] - your_position[0])
real_length = abs(hit_point[0] - your_position[0])
x_diff = real_length / expected_length
print("Changed game width from ", game_width, " to ", game_width / x_diff)
game_width /= x_diff
expected_height = height
y_diff = real_height / expected_height
print("Changed game height from ", game_height, " to ", game_height / y_diff)
game_height /= y_diff
config["SETTINGS"] = {
'game_width': str(game_width),
'game_height': str(game_height),
'wind_multiplier': str(wind_multiplier)}
with open('config.ini', 'w') as configfile:
config.write(configfile)
elif debug_wind:
print("DEBUG WIND:")
print("Choose hit point with '3'")
debug_position = None
while debug_position is None:
time.sleep(1)
hit_point = debug_position
print(hit_point)
real_wind = calc_wind()
print(real_wind)
print(wind)
wind_diff = abs(wind / real_wind)
print(wind_diff)
print("Changed wind multiplier from ", wind_multiplier, " to ", wind_multiplier / wind_diff)
wind_multiplier /= wind_diff
config["SETTINGS"] = {
'game_width': str(game_width),
'game_height': str(game_height),
'wind_multiplier': str(wind_multiplier)}
with open('config.ini', 'w') as configfile:
config.write(configfile)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import math
import os
import random
import shutil
from os import path, listdir
import time
import datetime
import tensorflow as tf
import numpy as np
from utils import input_reader_v2
from utils import layers
from utils import tools
__mtime__ = '2018/4/8'
class DeepInterestModelMultiLabel:
def __init__(self,
n_epoch,
batch_size,
embedding_dim,
nn_layer_shape,
feature_field_file,
num_parallel=10,
activation='relu',
learning_rate=0.001,
optimizer='adam',
steps_to_logout=1000,
need_dropout=False,
train_data_split=0.99,
train_end_date="",
train_day_length=15,
max_steps=10000
):
self.n_epoch = n_epoch
self.batch_size = batch_size
self.embedding_dim = embedding_dim
self.nn_layer_shape = nn_layer_shape
self.num_parallel = num_parallel
self.activation = activation
self.learning_rate = learning_rate
self.optimizer = optimizer
self.steps_to_logout = steps_to_logout
self.sparse_field_name_list, \
self.dense_field_name_list, \
self.dense_field_size, \
self.label_size = self.load_index_dic(feature_field_file)
self.feature_field_file_name = path.basename(feature_field_file)
self.need_dropout = need_dropout
self.train_data_split = train_data_split
self.max_steps = max_steps
'''
indexs = tf.placeholder(tf.int64, [None, 2])
ids = tf.placeholder(tf.int64, [None])
values = tf.placeholder(tf.float32, [None])
dense_str = tf.placeholder(tf.string, [None])
feature_shape = tf.placeholder(tf.int64, [2])
self.input_placeholder = {'imei':SparseTensor(indexs,ids,feature_shape),'label_values':SparseTensor(indexs,values,feature_shape),'imei':dense_str}
for field in self.sparse_field_name_list:
self.input_placeholder[field + "_values"] = SparseTensor(indexs,values,feature_shape)
self.input_placeholder[field + "_values_float"] = SparseTensor(indexs,values,feature_shape)
self.input_placeholder[field] = SparseTensor(indexs,ids,feature_shape)
for field in self.dense_field_name_list:
self.input_placeholder[field] = dense_str
'''
#=====================================================================================================================================================================================
self.train_end_date = int(train_end_date)
train_end_datetime = datetime.datetime.fromtimestamp(time.mktime(time.strptime(train_end_date, '%Y%m%d')))
self.train_start_date = int((train_end_datetime-datetime.timedelta(days=train_day_length-1)).strftime("%Y%m%d"))
self.train_day_length = train_day_length
self.method = "DeepInterestModelMultiLabel"
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename='logs/' + self.method + '.' + self.feature_field_file_name + '.' + str(self.embedding_dim) + '.log',
filemode='w')
if train_end_date == "":
logging.error('train_end_date is [' + train_end_date + '] is empty!')
raise RuntimeError('train_end_date is [' + train_end_date + '] is empty!')
logging.info('n_epoch={} batch_size={} embedding_dim={} nn_layer_shape={} num_parallel={} activation={} learning_rate={} optimizer={} steps_to_logout={} sparse_field_name_list={} dense_field_name_list={} label_size={} need_dropout={} train_data_split={} train_end_date={} train_day_length={}'.format(str(self.n_epoch), str(self.batch_size),str(self.embedding_dim),','.join(str(i) for i in self.nn_layer_shape),str(self.num_parallel),self.activation,str(self.learning_rate),self.optimizer,str(self.steps_to_logout),','.join(self.sparse_field_name_list),','.join(self.dense_field_name_list),str(self.label_size),self.need_dropout,str(self.train_data_split),str(self.train_end_date),str(self.train_day_length)))
with tf.variable_scope("tag_embedding_layer", reuse=tf.AUTO_REUSE):
self.tag_embedding_weight = tf.get_variable("tag_embedding",
[self.label_size, self.embedding_dim],
initializer=tf.random_normal_initializer(
stddev=(1 / math.sqrt(float(self.embedding_dim)))),
trainable=True)
self.tag_embedding_biases = tf.get_variable("tag_embedding_biases",
[self.label_size],
initializer=tf.zeros_initializer,
trainable=True)
self.tag_ones = tf.expand_dims(tf.ones([self.label_size]), dim=1)
#=====================================================================================================================================================================================
def load_index_dic(self, feature_field_file):#稀疏,稠密,label分类-----预处理
print(feature_field_file)
f_field_data = open(feature_field_file)
sparse_field_name_list = []
dense_field_name_list = []
dense_field_size = []
label_size = 0
for line in f_field_data:
line = line.strip('\n').strip('\r')
line_arr = line.split(' ')
if line_arr[0] == 'sparse':
sparse_field_name_list.append(line_arr[1])
elif line_arr[0] == 'dense':
dense_field_name_list.append(line_arr[1])
dense_field_size.append(int(line_arr[2]))
elif line_arr[0] == 'label_size':
label_size = int(line_arr[1])
f_field_data.close()
return sparse_field_name_list, dense_field_name_list, dense_field_size, label_size
#==========================================================================================================================================================================================
def get_user_embedding_list(self, batch_parsed_features, combiner='mean',need_dropout=False):#
embedding_list = []
user_long_embedding_size = 0
for field_name in self.sparse_field_name_list:
with tf.variable_scope(field_name + "_embedding_layer", reuse=tf.AUTO_REUSE):#embedding层,自动实现共享变量
field_sparse_ids = batch_parsed_features[field_name]
field_sparse_values = batch_parsed_features[field_name + "_values"]
if combiner == 'mean':
embedding = tf.nn.embedding_lookup_sparse(self.tag_embedding_weight, field_sparse_ids,
field_sparse_values,
combiner="mean")
elif combiner == 'avg':
embedding = tf.nn.embedding_lookup_sparse(self.tag_embedding_weight, field_sparse_ids,
field_sparse_values,
combiner="sum")
sparse_features = tf.sparse_merge(field_sparse_ids, field_sparse_values, vocab_size=self.label_size)
sparse_x_feature_cnt = tf.sparse_tensor_dense_matmul(sparse_features, self.tag_ones)#(稠密矩阵)self.tag_ones*sparse_features(稀疏矩阵)
embedding = tf.div(embedding, sparse_x_feature_cnt) # embedding/(self.tag_ones*sparse_features)
embedding_list.append(embedding)
user_long_embedding_size += self.embedding_dim
for i, field_name in enumerate(self.dense_field_name_list):
with tf.variable_scope(field_name + "_dense_layer", reuse=tf.AUTO_REUSE):#dense层,进行压缩
field_dense_feature_values = tf.decode_raw(batch_parsed_features[field_name], tf.float32)
embedding_list.append(field_dense_feature_values)
user_long_embedding_size += self.dense_field_size[i]
user_long_embedding = tf.concat(embedding_list, 1)
user_long_embedding = tf.reshape(user_long_embedding, shape=[-1, user_long_embedding_size])
print("user_long_embedding_size=" + str(user_long_embedding_size))
with tf.variable_scope("user_nn_layer"):#nn层
input_layer_output = layers.get_nn_layer_v2(user_long_embedding, user_long_embedding_size,
self.nn_layer_shape,
activation=self.activation,
need_dropout=need_dropout)
return input_layer_output
#==============================================================================================================================================================================
def test_op(self, batch_parsed_features):
embedding_list = []
user_long_embedding_size = 0
combiner = 'mean'
for field_name in self.sparse_field_name_list:
with tf.variable_scope(field_name + "_embedding_layer", reuse=tf.AUTO_REUSE): #embedding 层
field_sparse_ids = batch_parsed_features[field_name]
field_sparse_values = batch_parsed_features[field_name + "_values"]
if combiner == 'mean':
embedding = tf.nn.embedding_lookup_sparse(self.tag_embedding_weight, field_sparse_ids,
field_sparse_values,
combiner="mean")
elif combiner == 'avg':
embedding = tf.nn.embedding_lookup_sparse(self.tag_embedding_weight, field_sparse_ids,
field_sparse_values,
combiner="sum")
sparse_features = tf.sparse_merge(field_sparse_ids, field_sparse_values, vocab_size=self.label_size)
sparse_x_feature_cnt = tf.sparse_tensor_dense_matmul(sparse_features, self.tag_ones)
embedding = tf.div(embedding, sparse_x_feature_cnt)
embedding_list.append(embedding)
user_long_embedding_size += self.embedding_dim
for i, field_name in enumerate(self.dense_field_name_list):
with tf.variable_scope(field_name + "_dense_layer", reuse=tf.AUTO_REUSE):
field_dense_feature_values = tf.decode_raw(batch_parsed_features[field_name], tf.float32)
embedding_list.append(field_dense_feature_values)
user_long_embedding_size += self.dense_field_size[i]
user_long_embedding = tf.concat(embedding_list, 1)
print(str(user_long_embedding_size))
return user_long_embedding
def inference_op(self, batch_parsed_features):#返回的为imei,以及nn的输出
imei = batch_parsed_features['imei']
user_nn_layer_output = self.get_user_embedding_list(batch_parsed_features,need_dropout=False)
return imei, user_nn_layer_output
def train_op(self, batch_parsed_features):
batch_labels = tf.sparse_tensor_to_dense(
tf.sparse_merge(batch_parsed_features["label"], batch_parsed_features["label_values"], self.label_size))
user_nn_layer_output = self.get_user_embedding_list(batch_parsed_features,need_dropout=self.need_dropout)
logits = tf.matmul(user_nn_layer_output, tf.transpose(self.tag_embedding_weight))
logits = tf.nn.bias_add(logits, self.tag_embedding_biases)
# train loss
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=batch_labels, logits=logits)
# cross_entropy = tf.losses.sigmoid_cross_entropy(logits=logits, multi_class_labels=batch_labels)
loss = tf.reduce_mean(cross_entropy)
global_step = tf.Variable(0, name="global_step", trainable=False)
train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(loss, global_step=global_step)
tf.summary.scalar('loss', loss)
# accuracy
predictions = tf.nn.sigmoid(logits, name='prediction')
#tf.equal(A, B)是对比这两个矩阵或者向量的相等的元素,如果是相等的那就返回True,反正返回False,返回的值的矩阵维度和A是一样的
#tf.round 将张量的值四舍五入为最接近的整数,元素
correct_prediction = tf.equal(tf.round(predictions), batch_labels)
#tf.cast(x, dtype, name=None) 将输入x转化为dtype型
accuracy = tf.cast(correct_prediction, tf.float32)
#tf.reduce_mean求均值
mean_accuracy = tf.reduce_mean(accuracy)
tf.summary.scalar('mean_accuracy', mean_accuracy)
# mean_average_precision = tf.metrics.average_precision_at_k(tf.cast(batch_labels,tf.int64),predictions,100)
# tf.summary.scalar('mean_average_precision', mean_average_precision[0])
return train_step, global_step,loss
def map_op(self, batch_parsed_features):
batch_labels = tf.sparse_tensor_to_dense(
tf.sparse_merge(batch_parsed_features["label"], batch_parsed_features["label_values"], self.label_size))
user_nn_layer_output = self.get_user_embedding_list(batch_parsed_features,need_dropout=False)
logits = tf.matmul(user_nn_layer_output, tf.transpose(self.tag_embedding_weight))
logits = tf.nn.bias_add(logits, self.tag_embedding_biases)
predictions = tf.nn.sigmoid(logits, name='prediction')#将输出sigmoid后输出
return predictions, batch_labels
#======================================================================================看不懂===========================================================================================================
def fit(self, tf_data_path):
tf_data_files = []
dir_cnt = 0
logging.info("train_start_date:" + str(self.train_start_date))
logging.info("train_end_date:" + str(self.train_end_date))
for dir_name in listdir(tf_data_path):
dir_name_i = int(dir_name)
if dir_name_i < self.train_start_date or dir_name_i > self.train_end_date:#不在训练时间内
continue
dir_cnt += 1
data_path = path.join(tf_data_path, dir_name)
if path.isdir(data_path):
_data_files = input_reader_v2.get_files(data_path)
if len(_data_files) > 0:
tf_data_files.extend(_data_files)
logging.info("train data cnt is " + str(dir_cnt))
if dir_cnt != self.train_day_length:
logging.error('train data is less than ' + str(self.train_day_length))
raise RuntimeError('train data is less than ' + str(self.train_day_length))
random.shuffle(tf_data_files)
validate_file_num = max(math.floor(len(tf_data_files) * (1-self.train_data_split)), 1)
#train_files = tf_data_files
#validate_files = input_reader_v2.get_files('./parse_data_tools/data/tf_data_path/20180621')
train_files = tf_data_files[:-validate_file_num]
validate_files = tf_data_files[-validate_file_num:]
logging.info("train_files : {}".format(','.join(train_files)))
logging.info("validate_files : {}".format(','.join(validate_files)))
next_element = input_reader_v2.get_input(train_files,
self.dense_field_name_list,
self.sparse_field_name_list,
self.num_parallel,
self.batch_size,
self.n_epoch,
buffer_size=self.batch_size * 10)
train_logit = self.train_op(next_element)
#train_logit = self.test_op(next_element)
validate_element = input_reader_v2.get_input(validate_files,
self.dense_field_name_list,
self.sparse_field_name_list,
self.num_parallel,
self.batch_size,
self.n_epoch,
buffer_size=self.batch_size)
validate_logit = self.map_op(validate_element)
#validate_logit = self.test_op(validate_element)
inference_op = self.inference_op(self.input_placeholder)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer(), tf.tables_initializer())
checkpoint_path = "./checkpoint/" + self.method + "/"
try:
shutil.rmtree(checkpoint_path, ignore_errors=True)
except Exception as e:
logging.info("Fail to remove checkpoint_path, exception: {}".format(e))
os.makedirs(checkpoint_path)
checkpoint_file = path.join(checkpoint_path, "checkpoint.ckpt")
saver = tf.train.Saver(max_to_keep=10)
np.set_printoptions(threshold=np.nan)
with tf.Session() as sess:
sess.run(init_op)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('./summary/' + self.method, graph=tf.get_default_graph(),
filename_suffix='_' + self.feature_field_file_name + '_' + '_'.join(
str(i) for i in self.nn_layer_shape))
logging.info("Start train")
loss_sum = 0.0
steps_sum = 0
try:
while True:
train_step, global_step, loss = sess.run(train_logit)
#_ = sess.run(train_logit)
#print(_.shape)
loss_sum += loss
steps_sum += 1
if global_step % self.steps_to_logout == 0:
# predictions,batch_labels = sess.run(map_logit)
result = sess.run(merged)
writer.add_summary(result, global_step)
# mAP = average_precision_score(batch_labels,predictions)
predictions, batch_labels = sess.run(validate_logit)
predictions = predictions[:,1:]
batch_labels = batch_labels[:,1:]
mAP = tools.calculate_mAP_v3(predictions, batch_labels, 100)
logging.info("mAP=" + str(mAP))
logging.info("train loss={}".format(loss_sum / steps_sum))
loss_sum = 0.0
steps_sum = 0
saver.save(sess, checkpoint_file, global_step=global_step)
if self.max_steps > 0 and global_step > self.max_steps:
break
except tf.errors.OutOfRangeError:
logging.info("End of dataset")
saver.save(sess, checkpoint_file, global_step=global_step)
writer.close()
def user_inference(self, data_path):
#tf.reset_default_graph()
data_files = input_reader_v2.get_files(data_path)
next_element = input_reader_v2.get_input(data_files,
self.dense_field_name_list,
self.sparse_field_name_list,
self.num_parallel,
self.batch_size,
1,
buffer_size=self.batch_size * 10)
#saver = tf.train.import_meta_graph("./checkpoint/DeepInterestModelMultiLabel/checkpoint.ckpt-384576.meta")
#train_op = self.train_op(next_element)
user_inference_op = self.inference_op(next_element)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer(), tf.tables_initializer())
saver = tf.train.Saver()
with tf.Session() as sess:
#sess.run(init_op)
saver.restore(sess, tf.train.latest_checkpoint("./checkpoint/DeepInterestModelMultiLabel"))
try:
while True:
imei, logit = sess.run(user_inference_op)
imei = [str(i, encoding="utf-8") for i in imei]
logit = [sorted([(i, v) for i, v in enumerate(predicts)], key=lambda x: x[1], reverse=True)[0:100]
for predicts in logit]
#logit = [' '.join([str(i[0]) + ':' + str(i[1]) for i in predicts]) for predicts in logit]
logit = [' '.join([str(i[0]) for i in predicts]) for predicts in logit]
output = dict(zip(imei, logit))
for imei, predicts in output.items():
print(imei + ' ' + predicts)
break
except tf.errors.OutOfRangeError:
print("End of dataset")
#=============================================================================================================================================================================
def get_app_sim(self):
#维度增加一维,可以使用tf.expand_dims(input, dim, name=None)函数,-1表示在最后加一维
tag_embedding_biases = tf.expand_dims(self.tag_embedding_biases,axis = -1)
embedding_bias = tf.concat([self.tag_embedding_weight,tag_embedding_biases],axis=-1)
embedding_bias_norm = tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.square(embedding_bias), axis=1)),axis=1) # tensor=sqrt(sigma( bias^2) )
embedding_bias_norm = tf.matmul(embedding_bias_norm,tf.transpose(embedding_bias_norm)) # tensor * tensor^T
cosin_dis = tf.matmul(embedding_bias,tf.transpose(embedding_bias))/embedding_bias_norm # embedding_bias * embedding_bias^T / (sigma( bias^2) )
app_sim_top_10 = tf.nn.top_k(cosin_dis, 10000) #tf.nn.top_k(input, k, name=None)返回 input 中每行最大的 k 个数,并且返回它们所在位置的索引
return app_sim_top_10.indices
def dump_app_sim(self):
saver = tf.train.Saver()
#set_printoptions来强制NumPy打印所有数据np.set_printoptions(threshold='nan')
np.set_printoptions(threshold=np.nan)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint("./checkpoint/DeepInterestModelMultiLabel"))
sim_matrix = sess.run(self.get_app_sim())
print(sim_matrix)
|
from Tree.data2 import Data
import math
import random
from Tree.Identifier import Identifier
import copy
class Node:
__slots__ = 'children', 'identifier', 'data', 'maximumBet'
def __init__(self, identifier: Identifier, data: Data, maximumBet = None):
self.children = []
self.identifier = identifier
self.data = data
# = None, if no bets available, = "b1" if b1 available, = "b2" if b1 & b2 available.
self.maximumBet = maximumBet
""" Finds a rooted subtree of node """
def subtree(self):
from Tree.Tree import Tree
new_root = copy.copy(self)
T = Tree(new_tree=True, root=new_root)
child_list = [new_root]
while len(child_list) > 0:
node = child_list.pop()
for c in node.children:
child_list.append(c)
T.nodes[node.identifier.name] = node
return T
def find_distribution(self, win_probability, thresh=150):
softmax_sum = 0
softmax_distribution = []
for child in self.children:
child_split = child.data.find_split(win_probability)
if child.data.N[child_split] > thresh:
beta = 1
else:
beta = child.data.N[child_split] / thresh
if child.data.fold_node:
softmax_sum += math.exp(child.data.c_reward[child_split] * beta)
else:
if child.data.N[child_split] != 0:
softmax_sum += math.exp((child.data.c_reward[child_split] /
find_denom(child.data.N[child_split])) * beta)
else:
softmax_sum += 1
for child in self.children:
child_split = child.data.find_split(win_probability)
if child.data.N[child_split] > thresh:
beta = 1
else:
beta = child.data.N[child_split] / thresh
if child.data.fold_node:
softmax_distribution.append((child,
math.exp(child.data.c_reward[child_split] * beta) /
softmax_sum))
else:
if child.data.N[child_split] == 0:
softmax_distribution.append((child, 1 / softmax_sum))
else:
softmax_distribution.append((child, math.exp((child.data.c_reward[child_split] /
find_denom(child.data.N[
child_split])) * beta) / softmax_sum))
return softmax_distribution
def select_child(self, win_probability, greedy=True, LOG=None, prob=25):
# Zips the distribution into: Children, distribution values
dis = self.find_distribution(win_probability)
Children, distribution = zip(*dis)
# Some probability of being epsilon-greedy:
if greedy and random.randint(0, prob) == 1:
child = random.choice(Children)
if LOG is not None:
LOG.log("E-greedy child selection")
return child, 1 / len(Children)
# Otherwise use the soft-max distribution
if LOG is not None:
LOG.children_log(dis)
child = random.choices(population=Children, weights=distribution, k=1)[0]
return child, distribution[Children.index(child)]
def is_leaf(self):
if len(self.children) == 0:
return True
else:
return False
def add_child(self, new_node):
self.children.append(new_node)
def local_node(self):
children = []
for c in self.children:
children.append(copy.copy(c))
new_node = copy.copy(self)
new_node.children = children
for c in new_node.children:
c.children = []
return new_node
def __str__(self):
return str((self.identifier.name, self.data.__str__(), self.maximumBet))
def find_denom(n, c=0.998):
return (c ** n - 1) / (c - 1)
|
from accurate_bg_check.client import BgCheck
# Add your client key, client secrete here
client = BgCheck('CLIENT_KEY', 'CLIENT_SECRETE') |
from conans import ConanFile, tools, AutoToolsBuildEnvironment
from conans.errors import ConanInvalidConfiguration
import os
class SystemccciConan(ConanFile):
name = "systemc-cci"
version = "1.0.0"
description = """SystemC Configuration, Control and Inspection library"""
homepage = "https://www.accellera.org/"
url = "https://github.com/conan-io/conan-center-index"
license = "Apache-2.0"
topics = ("simulation", "modeling", "esl", "cci")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False]
}
default_options = {
"shared": False,
"fPIC": True
}
requires = "systemc/2.3.3"
generators = "make"
exports_sources = "patches/**"
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.settings.os == "Windows":
raise ConanInvalidConfiguration("Windows build not supported")
tools.check_min_cppstd(self, "11")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("cci-{}".format(self.version), self._source_subfolder)
def build(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
env_build = AutoToolsBuildEnvironment(self)
args = ['CONAN_MAKE_FILE={}'.format(
os.path.join(self.build_folder, "conanbuildinfo.mak"))]
with tools.chdir(os.path.join(self._source_subfolder, "src")):
env_build.make(args=args, target='clean')
env_build.make(args=args)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("NOTICE", dst="licenses", src=self._source_subfolder)
src_dir = os.path.join(self._source_subfolder, "src")
self.copy("*.h", dst="include", src=src_dir)
self.copy("cci_configuration", dst="include", src=src_dir)
lib_dir = os.path.join(self._source_subfolder, "lib")
if self.options.shared:
self.copy("*.so", dst="lib", src=lib_dir)
else:
self.copy("*.a", dst="lib", src=lib_dir)
def package_info(self):
self.cpp_info.libs = ["cciapi"]
|
import glob
import pickle
def act2idGen(mrgs):
def my_match(line):
for key in ["SHIFT", "RIGHT", "LEFT"]:
if key in line:
return True
return False
actions = []
for mrg in mrgs:
with open(mrg) as f:
for line in f:
if not "][" in line and my_match(line):
line = line.rstrip()
actions.append(line)
s_actions = set(actions)
act_map = {v: i for i, v in zip(range(1,len(s_actions)+1), s_actions)}
act_map["empty"] = 0
with open("../model/act_map.pkl", "wb") as f:
pickle.dump(act_map, f)
if __name__ == '__main__':
mrgs = glob.glob("../auto/Penn_Oracle/*/*.oracle")
act2idGen(mrgs)
|
from django.shortcuts import render
# Create your views here.
from django.contrib.auth.models import User, Group
from rest_framework import permissions
from details.serializers import EvaluationSerializer, GroupSerializer, CriteriaSerializer
from .models import Evaluation, Group, Criteria
from rest_framework.views import APIView
from .serializers import *
from rest_framework.response import Response
import datetime
from rest_framework import status
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.authtoken.models import Token
from django.contrib.auth import authenticate, login
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
# class EvaluationView(APIView):
# def get(self, request):
# userdata= request.user
# students = Student.objects.filter(teacher__user = userdata )
# studentdata = {}
# for x in students:
# # movie_id= request.query_params.get('id')
# evaluations = Evaluation.objects.filter(student=x)
# lseval = {}
# for y in evaluations:
# id1 = y.id
# groupdata = Group.objects.filter(evaluation=id1)
# ls = {}
# for i in groupdata:
# temp = Criteria.objects.filter(group = i.id)
# ser4 = CriteriaSerializer(temp, many=True)
# text = "group_" + str(i.id)
# ls[text] = ser4.data
# ser2 = GroupSerializer(groupdata, many=True)
# ser = EvaluationSerializer(y, many=True)
# ls["Group"]=ser2.data
# txt = "eval_"+str(y.id)
# lseval[txt] = ls
# print(lseval)
# # data= { "Evaluations": ser.data, "Group": ser2.data}
# # ls["Evaluations"]=ser.data
# # ls["Group"] = ser2.data
# # print(lseval)
# studentdata[x.student_name] = lseval
# return Response(studentdata)
class StudentCreationView(APIView):
def get(self, request):
studentname = request.POST['studentname']
gender = request.POST['gender']
phonenumber = request.POST['phonenumber']
rollno = request.POST['rollno']
Student.objects.create(student_name = studentname, gender = gender, phone_number = phonenumber, roll_no = rollno)
return Response({"Success":"Student Created Successfully"})
class EvaluationView(APIView):
def get(self, request):
userdata= request.user
students = Student.objects.filter(teacher = userdata )
studentdata = {}
for x in students:
# movie_id= request.query_params.get('id')
evaluations = Evaluation.objects.filter(student=x)
lseval = {}
for y in evaluations:
id1 = y.id
groupdata = Group.objects.filter(evaluation=id1)
ls = {}
for i in groupdata:
temp = Criteria.objects.filter(group = i.id)
ser4 = CriteriaSerializer(temp, many=True)
text = "group_" + str(i.id)
ls[text] = ser4.data
ser2 = GroupSerializer(groupdata, many=True)
ser = EvaluationSerializer(y, many=True)
ls["Group"]=ser2.data
txt = "eval_"+str(y.id)
groupscorelist=[]
for i in groupdata:
temp = Criteria.objects.filter(group = i.id)
sel = 0
total = 0
for criteria in temp:
if criteria.selected == True:
sel= sel+1
total = total + 1
per = (sel/total)*100
# text = "Group_score_"+ str(i.id)
score = { i.group_name: per}
groupscorelist.append(score)
# ls[i.group_name] = per
ls["groupscores"] = groupscorelist
lseval[txt] = ls
studentdata[x.student_name] = lseval
return Response(studentdata)
class IndividualEval(APIView):
def get(self, request):
# studentname = request.GET['studentname']
userdata= request.user
students = Student.objects.filter(teacher = userdata )
indiscores={}
studentdata = {}
groupscorelist=[]
for x in students:
# movie_id= request.query_params.get('id')
evaluations = Evaluation.objects.filter(student=x)
lseval = {}
for y in evaluations:
id1 = y.id
groupdata = Group.objects.filter(evaluation=id1)
ls = {}
for i in groupdata:
temp = Criteria.objects.filter(group = i.id)
ser4 = CriteriaSerializer(temp, many=True)
text = "group_" + str(i.id)
ls[text] = ser4.data
ser2 = GroupSerializer(groupdata, many=True)
ser = EvaluationSerializer(y, many=True)
ls["Group"]=ser2.data
txt = "eval_"+str(y.id)
for i in groupdata:
temp = Criteria.objects.filter(group = i.id)
sel = 0
total = 0
for criteria in temp:
if criteria.selected == True:
sel= sel+1
total = total + 1
per = (sel/total)*100
# text = "Group_score_"+ str(i.id)
score = { i.group_name: per}
groupscorelist.append(score)
# ls[i.group_name] = per
ls["groupscores"] = groupscorelist
lseval[txt] = ls
studentdata[x.student_name] = lseval
indiscores['groupscores'] = groupscorelist
return Response(indiscores) |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 14:54:39 2019
@author: Cobi
"""
from SlowerThanLight.location import Location
class Event(Location):
"""A location in space-time and the type and parameters of the Physical object
which was there at that time. The parameters are not themselves a Physical,
just some sort of compressed description that can be later expanded and
used to see what had happened at this Event.
"""
def __init__(self, loc, phystype, descrip):
super().__init__(loc.t, loc.x, y=loc.y, z=loc.z)
self.descrip = descrip
self.phystype = phystype
def is_visible(self,loc2,c=10):
"""given a viewing space-time location, return the boolean 'light has had
time to travel from the Event to the viewer'"""
assert(self.dim()==loc2.dim())
spatial = self.length_to(loc2)
return ((loc2.t - self.t)*c >= spatial)
def get_image(self):
"""Return an instance of the correct subtype of Physical which matches
the description of this event. That Physical is a 'ghost': not
registered with the Universe and has no Worldline"""
return self.phystype.decompress(self.descrip)
|
from aiohttp import web
routes = web.RouteTableDef()
app = web.Application()
# It'd probably be better to namespace this in a v1 file or such?
@routes.post("/v1/webhook/{hook}")
async def v1_webhook(request):
"""
A webhook that will accept a message
"""
hook = request.match_info.get('hook')
return web.Response(text=f"This could be a webhook! for the hook {hook}")
|
# cp功能
import sys
if len(sys.argv) != 3:
print('usage: cp source_file target_file')
sys.exit()
source_file,target_file=sys.argv[1],sys.argv[2]
with open(source_file,'rb') as read_f,open(target_file,'wb') as write_f:
for line in read_f:
write_f.write(line)
# 练习:基于seek实现tail -f功能
import time
with open('test.txt','rb') as f:
f.seek(0,2)
while True:
line=f.readline()
if line:
print(line.decode('utf-8'))
else:
time.sleep(0.2)
#%% 将硬盘存放的该文件的内容全部加载到内存,在内存中是可以修改的,修改完毕后,再由内存覆盖到硬盘
import os
with open('a.txt') as read_f,open('.a.txt.swap','w') as write_f:
data=read_f.read() #全部读入内存,如果文件很大,会很卡
data=data.replace('albert','NB') #在内存中完成修改
write_f.write(data) #一次性写入新文件
os.remove('a.txt')
os.rename('.a.txt.swap','a.txt')
# 将硬盘存放的该文件的内容一行一行地读入内存,修改完毕就写入新文件,最后用新文件覆盖源文件
import os
with open('a.txt') as read_f,open('.a.txt.swap','w') as write_f:
for line in read_f:
line=line.replace('albert','NB')
write_f.write(line)
os.remove('a.txt')
os.rename('.a.txt.swap','a.txt')
# 练习
'''
1. 文件a.txt内容:每一行内容分别为商品名字,价钱,个数,求出本次购物花费的总钱数
apple 10 3
tesla 100000 1
mac 3000 2
lenovo 30000 3
chicken 10 3
2. 修改文件内容,把文件中的mac都替换成linux
'''
# 写入
f = open('a.txt','w',encoding='utf-8')
f.write('apple 10 3\ntesla 100000 1\nmac 3000 2\nlenovo 30000 3\nchicken 10 3\n')
f.close()
# 修改
import os
with open('a.txt') as read_f,open('.a.txt.swap','w') as write_f:
for line in read_f:
line=line.replace('mac','linux')
write_f.write(line)
os.remove('a.txt')
os.rename('.a.txt.swap','a.txt')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@author: li
'''
import copy
import sys
import Start
import time
class DFS(object):
# init this class
def __init__(self):
self.node_to_go = {}
self.node_has_been = dict()
self.size = -1
self.min_distance = -1
self.optimal_road = []
# run DFS
def dfs_route(self, map, size):
# nodes stack
self.node_to_go = {}
# nodes has been to before
self.node_has_been = dict()
# mark this node in the stack
node_stack_map = dict()
# init the map and size
for i in range(0, size):
for j in range(0, size):
self.node_has_been[(i, j)] = 0
node_stack_map[(i, j)] = 0
self.size = size
self.min_distance = size * size
# set start and end node
start_node = (0, 0)
end_node = (size - 1, size - 1)
node_stack = [start_node]
# init the route trace back
tracert = dict()
tracert[(0, 0)] = [None]
distance = dict()
distance[(0, 0)] = 0
count = 0
max_stack = 0
while len(node_stack) > 0:
# Get last node
count += 1
current_node = node_stack[len(node_stack) - 1]
del node_stack[len(node_stack) - 1]
node_stack_map[current_node] = 0
current_distance = distance[current_node]
if max_stack < len(node_stack) + 1:
max_stack = len(node_stack) + 1
self.node_has_been[current_node] = current_distance
if current_distance >= self.min_distance:
continue
# discover this node
# add nodes to the list end
# -1 0
target = (current_node[0] - 1, current_node[1])
if self.move_to_node(target, map, current_distance + 1, size, node_stack_map):
if target == end_node:
route = self.reach_end(start_node, end_node, tracert, current_distance + 1, current_node, map)
return 1, route, current_distance + 1, count, max_stack
else:
node_stack.append(target)
node_stack_map[target] = 1
tracert[target] = current_node
distance[target] = current_distance + 1
# 0 -1
target = (current_node[0], current_node[1] - 1)
if self.move_to_node(target, map, current_distance + 1, size, node_stack_map):
if target == end_node:
route = self.reach_end(start_node, end_node, tracert, current_distance + 1, current_node, map)
return 1, route, current_distance + 1, count, max_stack
else:
node_stack.append(target)
node_stack_map[target] = 1
tracert[target] = current_node
distance[target] = current_distance + 1
# 1 0
target = (current_node[0] + 1, current_node[1])
if self.move_to_node(target, map, current_distance + 1, size, node_stack_map):
if target == end_node:
route = self.reach_end(start_node, end_node, tracert, current_distance + 1, current_node, map)
return 1, route, current_distance + 1, count, max_stack
else:
node_stack.append(target)
node_stack_map[target] = 1
tracert[target] = current_node
distance[target] = current_distance + 1
# 0 1
target = (current_node[0], current_node[1] + 1)
if self.move_to_node(target, map, current_distance + 1, size, node_stack_map):
if target == end_node:
route = self.reach_end(start_node, end_node, tracert, current_distance + 1, current_node, map)
return 1, route, current_distance + 1, count, max_stack
else:
node_stack.append(target)
node_stack_map[target] = 1
tracert[target] = current_node
distance[target] = current_distance + 1
return 0, None
def move_to_node(self, target, map, distance, size, node_stack):
if 0 <= target[0] < size \
and 0 <= target[1] < size \
and map[target[0]][target[1]] != 1 \
and (self.node_has_been[target] == 0) \
and node_stack[target] == 0:
return 1
else:
return 0
def reach_end(self, start_node, end_node, tracert, distance, current_node, map):
# if a way has reach the end
tracert[end_node] = current_node
# print distance
list = []
list.append(end_node)
while current_node != start_node:
list.append(current_node)
current_node = tracert[current_node]
list.append(start_node)
self.optima_road = list
return list
def print_optimal(self, map):
# print the result
print "optimal_road",
print self.optimal_road
print "distance:",
print len(self.optimal_road)
list = self.optimal_road
result = copy.deepcopy(map)
for node in list:
result[node[0]][node[1]] = 2
for k in range(self.size):
for j in range(self.size):
if result[k][j] == 2:
print "\033[1;35m2\033[0m",
else:
print(result[k][j]),
print('\n'),
return result
if __name__ == "__main__":
print "script_name", sys.argv[0]
for i in range(1, len(sys.argv)):
print "argument", i, sys.argv[i]
print ('start initialize')
# set the size and density of this matrix
size = 10
start = Start.Start(size, 0.3)
# start.print_matrix()
start.paint_random()
# start.print_matrix()
dfs = DFS()
print ('start run')
start_time = time.clock()
# print dfs.dfs_route(start.get_matrix(), size)
result = dfs.dfs_route(start.get_matrix(), size)
elapsed = (time.clock() - start_time)
print result
print("Time used:", elapsed)
if result[0] == 1:
dfs.print_optimal(start.get_matrix())
print ('over')
else:
print "no available way"
print "over" |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'MFC'
__time__ = '18/1/20 20:50'
import time
# data = [3, 7, 9, -1, 20, 30, -2, -7, 18]
data = [3, 7, 9, -1, 20, 30, -2, -7, 18, 33, 34, 22, 79, -21, -7]
# Method 1 -- 使用filter函数,过滤掉负数
start = time.clock()
r1 = filter(lambda x: x >= 0, data)
end = time.clock()
print("R1 run time: %f" % (end-start))
print(list(r1)) # in python3, filter() return need user list() to display
# Method 2 -- 使用列表解析,过滤掉负数 -- quicker -- 更快,故首选列表解析
start = time.clock()
r2 = [x1 for x1 in data if x1 >= 0]
end = time.clock()
print("R2 run time: %f" % (end-start))
print(r2)
print("-"*70)
|
import json
import os
import datetime as dt
import pandas as pd
import numpy as np
from modules.decor import python_operator
from airflow.hooks.base import BaseHook
from airflow.models import Variable
from sqlalchemy import create_engine, String, Integer, Float
#from decor import python_operator
@python_operator()
def connection_operator(**context):
postg_hook = BaseHook.get_hook('airflow_dwh')
@python_operator()
def pushes_to_xcom (**context):
#context['task_instance'].xcom_push('titanic', value=df_xcom)
pass
def get_path(file_name):
return os.path.join(os.path.expanduser('~'), file_name)
@python_operator()
def download_titanic_dataset(**context):
url = 'https://web.stanford.edu/class/archive/cs/cs109/cs109.1166/stuff/titanic.csv'
df = pd.read_csv(url)
df.to_csv(get_path('titanic.csv'), encoding='utf-8')
df_xcom = df.to_json(orient='table')
context['task_instance'].xcom_push('titanic', value=df_xcom)
# return df_xcom
@python_operator()
def mean_fare_per_class(**context):
titanic_df = pd.read_csv(get_path('titanic.csv'))
avg_df = titanic_df.groupby(['Pclass']).mean('Fare')
avg_result = avg_df[['Fare']]
#avg_result.to_csv(get_path('titanic_mean_fares.csv'))
avg_xcom = avg_result.to_json(orient='table')
context['task_instance'].xcom_push('mean_class', value=avg_xcom)
@python_operator()
def pivot_dataset():
titanic_df = pd.read_csv(get_path('titanic.csv'))
df = titanic_df.pivot_table(index=['Sex'],
columns=['Pclass'],
values='Name',
aggfunc='count').reset_index()
df.to_csv(get_path('titanic_pivot.csv'))
@python_operator()
def pull_from_xcom(**context):
avg_pull_xcom = context['task_instance'].xcom_pull(task_ids='mean_fare_per_class', key='mean_class')
df_pull_xcom = context['task_instance'].xcom_pull(task_ids='download_titanic_dataset', key='titanic')
avg_pull_xcom_out =pd.read_json(avg_pull_xcom, orient='table')
df_pull_xcom_out = pd.read_json(df_pull_xcom, orient='table')
avg_pull_xcom_out.to_csv(get_path('avg_pull_xcom_out.csv'))
df_pull_xcom_out.to_csv(get_path('df_pull_xcom_out.csv'))
@python_operator()
def push_to_postgresql():
# create sql engine for sqlalchemy
db_string_airflow = 'postgresql://airflow_xcom:1q2w3e4r5T@192.168.147.128/data_warehouse'
engine = create_engine(db_string_airflow)
# read csv from previous xcom tasks
avg_pull_xcom_df = pd.read_csv(get_path('avg_pull_xcom_out.csv'))
df_pull_xcom_df = pd.read_csv(get_path('df_pull_xcom_out.csv'))
# get names of tables from variables
avg_table = Variable.get('table_dwh_mean')
df_table = Variable.get('table_dwh_data')
# push data to postgresql
avg_pull_xcom_df.to_sql(avg_table, con=engine, if_exists='replace')
df_pull_xcom_df.to_sql(df_table, con=engine, if_exists='replace')
|
# coding=UTF-8
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.mail import send_mail, BadHeaderError
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import FormView
from django.urls import reverse_lazy
from datetime import datetime
from .forms import ContactForm
class ContactFormView(FormView):
template_name= 'contact.html'
form_class = ContactForm
success_url = reverse_lazy('thanks')
def form_valid(self, form):
name = form.cleaned_data['name']
message = form.cleaned_data['message']
from_email = form.cleaned_data['email']
tel = form.cleaned_data['tel']
cc_myself = form.cleaned_data['cc_myself']
msg_tel = ''
if tel:
msg_tel = u'\n\n%s: %s' % (_(u'Telefon'), tel)
body = _(u'Am %(date)s %(name)s schrieb:') % {
'date': datetime.now().strftime('%d.%m.%Y um %H:%M'),
'name': name} + '\nEmail: %s\n\n%s' % (from_email,
message + msg_tel)
# set managers as recipients
recipients = [e for n, e in settings.MANAGERS]
subject = u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, _(u'Kontakt'))
try:
if cc_myself:
send_mail( subject,
_(u'Kopie Ihrer Nachricht:') + '\n\n%s' % body,
settings.SERVER_EMAIL, [from_email])
send_mail(subject, body, settings.SERVER_EMAIL, recipients)
except BadHeaderError:
return HttpResponse(_(u'Falsches Header. Bitte eine gültige '
'Emailadresse angeben.'))
return super(ContactFormView, self).form_valid(form)
def thanks(request):
return render_to_response('thanks.html', {},
context_instance=RequestContext(request))
|
# class Solution: # 答案为['111', '112', '113', '121', '122', '123', '131', '132', '133', '211', '212', '213', '221', '222', '223', '231', '232', '233', '311', '312', '313', '321', '322', '323', '331', '332', '333']
# # 233 这写法也超时
# def getPermutation(self, n, k): # n:元素个数 k:第k个排列
#
# res = []
# string = ''
#
# self.recursion(n, 1, string, res)
# print(res)
#
# return str(res[k])
#
# def recursion(self,n, start, string, res):
#
# if n == len(string):
# res.append(string)
# return
#
# for i in range(1, n+1):
# string += str(i)
# self.recursion(n, i + 1, string, res)
# string = string[:-1]
# class Solution: # Time Limited
# def getPermutation(self, n, k): # n:元素个数 k:第k个排列
#
# string = ''
# nums = [i for i in range(1, n+1)]
# res = self.permute(nums)
# for j in res[k-1]:
# string += str(j)
# return string
#
# def permute(self, nums):
#
# res = []
# if len(nums) == 1: # 结束条件
# return [nums]
# if len(nums) == 2: # 结束条件
# return [nums, nums[::-1]]
# for i in range(len(nums)):
# num = nums[i]
# newnums = nums[:i] + nums[i+1:]
# for item in self.permute(newnums): # 递归调用
# res.append([num] + item)
# return res
class Solution: # 使用递归会轻易超时,利用n可以退出当前的第K的值
def getPermutation(self, n: 'int', k: 'int') -> 'str':
from math import factorial
ans = ""
nums = []
for i in range(1, n + 1):
nums.append(str(i))
while n:
actor = factorial(n-1)
i = (k-1) // actor
cur = nums.pop(i)
ans += cur
k -= actor * i
n -= 1
return ans
if __name__ == '__main__':
s = Solution()
ans = s.getPermutation(3,3)
print(ans) |
import argparse
import json
import logging
import pathlib
import typing
from datetime import date
import aiosqlite
import async_lru
import uvicorn
import uvloop
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Mount, Route
from starlette.staticfiles import StaticFiles
from starlette.templating import _TemplateResponse, Jinja2Templates
DATE_FMT = "%Y-%m-%d"
LOGGER = logging.getLogger(__name__)
TEMPLATES = Jinja2Templates(directory="templates")
async def document_by_id(request: Request) -> Response:
"""Returns the PDF specified by `document_id`."""
database_path = request.app.state.database_path
postings_id = request.path_params["postings_id"]
document = await _retrieve_document_by_id(
database_path=database_path, postings_id=postings_id
)
LOGGER.info(_retrieve_document_by_id.cache_info())
return Response(document, media_type="application/pdf")
async def homepage(request: Request) -> _TemplateResponse:
"""The landing page that presents a list of job postings."""
database_path = request.app.state.database_path
today = date.today().strftime(DATE_FMT)
query = """
SELECT postings_id, title, superior, institution, date(deadline)
FROM metadata
WHERE date(deadline) >= ?
ORDER BY date(deadline) ASC;
"""
async with aiosqlite.connect(database_path) as connection:
async with connection.execute(query, [today]) as cursor:
postings = await cursor.fetchall()
return TEMPLATES.TemplateResponse(
"index.html", {"request": request, "postings": postings}
)
async def result_page(request: Request) -> _TemplateResponse:
"""The result page for keyword searches."""
database_path = request.app.state.database_path
keyword = request.query_params["search_keyword"]
today = date.today().strftime(DATE_FMT)
postings = await _filter_postings_by_keyword(
database_path=database_path, keyword=keyword, date=today
)
return TEMPLATES.TemplateResponse(
"index.html", {"request": request, "postings": postings}
)
def _build_app(database_path: str) -> Starlette:
routes = [
Route("/", homepage),
Route("/documents/{postings_id:int}", document_by_id, name="documents"),
Route("/results", result_page, name="results"),
Mount("/static", app=StaticFiles(directory="static"), name="static"),
]
_app = Starlette(debug=True, routes=routes)
_app.state.database_path = str(pathlib.Path(database_path))
return _app
@async_lru.alru_cache(maxsize=32)
async def _filter_postings_by_keyword(
database_path: str, keyword: str, date: str
) -> typing.Awaitable[typing.List]:
query = """
SELECT m.postings_id, m.title, m.superior, m.institution, date(m.deadline)
FROM metadata m
INNER JOIN fulltexts f
ON m.postings_id = f.postings_id
WHERE date(m.deadline) >= ? AND f.text MATCH ?
ORDER BY date(m.deadline) ASC;
"""
async with aiosqlite.connect(database_path) as connection:
async with connection.execute(query, [date, keyword]) as cursor:
return await cursor.fetchall()
@async_lru.alru_cache(maxsize=32)
async def _retrieve_document_by_id(
database_path: str, postings_id: int
) -> typing.AsyncGenerator[bytes, None]:
query = """
SELECT document
FROM documents
WHERE postings_id = ?
ORDER BY document ASC, postings_id ASC
"""
async with aiosqlite.connect(database_path) as connection:
async with connection.execute(query, [postings_id]) as cursor:
document = await cursor.fetchone()
return document[0]
APP = _build_app(database_path="../postings.db")
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(description="Project e13 server.")
PARSER.add_argument(
"database_path", type=str, help="database path to sqlite3 instance",
)
ARGS = PARSER.parse_args()
APP = _build_app(database_path=ARGS.database_path)
uvloop.install()
uvicorn.run(APP, host="127.0.0.1", port=5000, log_level="info")
|
'''
Pytest test fixtures used in the various unit tess contained in other modules.
'''
import pytest
import gerritssh
import semantic_version as SV
from gerritssh.borrowed.ssh import SSHCommandResult
@pytest.fixture()
def connected_site():
'''
This fixture provides a Site object, monkeypatched so that any attempt to
execute a command acts as if a 'gerrit version' command had been executed.
In essence it provides a properly constructed Site object which will report
a version of 2.9.0 and a site of 'gerrit.example.com.
'''
class DummySSHClient(object):
def __init__(self, *args, **kwargs):
self.connected = False
def execute(self, command):
import io
import sys
self.connected = True
vstr = 'gerrit version 2.9.0\n'
if sys.version_info[0] < 3:
vstr = unicode(vstr)
result = SSHCommandResult(command,
io.StringIO(),
io.StringIO(vstr),
io.StringIO())
return result
def disconnect(self):
self.connected = False
s = gerritssh.Site('gerrit.example.com')
s._Site__ssh = DummySSHClient()
assert not s.connected, 'Thinks its connected after construction'
s.connect()
assert s.connected
assert s.version == SV.Version('2.9.0')
return s
@pytest.fixture
def dummy_site():
def f(exec_func, version):
class DummySite(gerritssh.Site):
def __init__(self):
super(DummySite, self).__init__('gerrit.example.com')
def execute(self, cmd):
return exec_func(cmd)
@property
def version(self):
return SV.Version(version)
@property
def connected(self):
return True
return DummySite()
return f
'''
The file 'testreview.json' contains the result of a query for a (randomly
chosen) open code review at review.openstack.org. It is used within numerous
tests which expect a properly formatted response from a working Gerrit
instance.
'''
with open('test/testreview.json', 'r') as f:
__random_text = f.read()
@pytest.fixture()
def open_review_text():
'''
This fixure returns the plain text response of a query to fetch a single
open review.
'''
return __random_text
@pytest.fixture()
def open_review_json(open_review_text):
'''
This fixture provides the canned open review, converted to a JSON
dictionary.
'''
import json
return [json.loads(l.strip()) for l in open_review_text.split('\n') if l]
@pytest.fixture()
def open_review(open_review_json):
'''
This fixture provides a Review object initialized with a single open
review.
'''
from gerritssh import Review
r = Review(open_review_json[0])
assert r.raw == open_review_json[0]
return r
|
import datetime
def curiousClock(someTime, leavingTime):
yyyy, mm, dd, h, m = (int(i) for i in someTime.split()[0].split("-")+someTime.split()[1].split(":"))
s = datetime.datetime(yyyy, mm, dd, h, m)
yyyy, mm, dd, h, m = (int(i) for i in leavingTime.split()[0].split("-") + leavingTime.split()[1].split(":"))
l = datetime.datetime(yyyy, mm, dd, h, m)
s -= l - s
out = "{}-{:02d}-{:02d} {:02d}:{:02d}".format(s.year, s.month, s.day, s.hour, s.minute)
n = s.strftime("%Y-%m-%d %H:%M")
print(n)
return out
someTime = "2016-08-26 22:40"
leavingTime = "2016-08-29 10:00"
print(curiousClock(someTime, leavingTime))
'''
Benjamin recently bought a digital clock at a magic trick shop.
The seller never told Ben what was so special about it, but mentioned that
one day Benjamin would be faced with a surprise.
Indeed, the clock did surprise Benjamin: without warning, at someTime
the clock suddenly started going in the opposite direction! Unfortunately,
Benjamin has an important meeting very soon, and knows that at leavingTime
he should leave the house so as to not be late. Ben spent all his money on the clock,
so has to figure out what time his clock will show when it's time to leave.
Given the someTime at which the clock started to go backwards,
find out what time will be shown on the curious clock at leavingTime.
For your convenience, here is the list of months lengths (from January to December, respectively):
Months lengths: 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31.
Please, note that in leap years February has 29 days.
Example
For someTime = "2016-08-26 22:40" and leavingTime = "2016-08-29 10:00", the output should be
curiousClock(someTime, leavingTime) = "2016-08-24 11:20".
There are 2 days, 11 hours and 20 minutes till the meeting.
Thus, the clock will show 2016-08-24 11:20 at the leavingTime.
'''
|
import pygame
import random
class Sprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
class Bot(Sprite):
def __init__(self, x, y):
Sprite.__init__(self)
self.speed = 5
self.width = 15
self.height = 15
self.directions = ["N", "E", "S", "W"]
self.current_direction = ""
self.image = pygame.Surface([self.width, self.height])
self.image.blit(pygame.image.load("test_sprite.png"), (0, 0))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def move_self(self, direction):
if direction == "N":
self.rect.y -= self.speed
elif direction == "E":
self.rect.x += self.speed
elif direction == "S":
self.rect.y += self.speed
elif direction == "W":
self.rect.x -= self.speed
if self.rect.x < 0:
self.rect.x = 500 - self.width
elif self.rect.x > 500 - self.width:
self.rect.x = 0
if self.rect.y < 0:
self.rect.y = 500 - self.height
elif self.rect.y > 500 - self.height:
self.rect.y = 0
def rand_direct(self):
d = random.randint(0, len(self.directions) - 1)
return self.directions[d]
def opposite(self, direction):
if direction == "N":
return "S"
if direction == "S":
return "N"
if direction == "W":
return "E"
if direction == "E":
return "W"
class PathMarker(Sprite):
def __init__(self, x, y):
Sprite.__init__(self)
self.image = pygame.Surface([15, 15])
self.image.blit(pygame.image.load("path.png"), (0, 0))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
pygame.init()
screen = pygame.display.set_mode([500, 500])
pygame.display.set_caption("Random Path")
bckg = pygame.image.load("template_bckg.png")
clock = pygame.time.Clock()
f_p_s = 30
path_bot = Bot(250, 250)
Bot_Group = pygame.sprite.Group()
Bot_Group.add(path_bot)
move_count = 0
m_i = (10, 20)
interval = random.randint(m_i[0], m_i[1])
Path = pygame.sprite.Group()
playing = True
while move_count < 777 and playing:
for event in pygame.event.get():
if event.type == pygame.QUIT:
playing = False
for each_bot in Bot_Group:
path_marker = PathMarker(each_bot.rect.x, each_bot.rect.y)
Path.add(path_marker)
if move_count % interval == 0:
each_bot.current_direction = each_bot.rand_direct()
interval = random.randint(m_i[0], m_i[1])
each_bot.move_self(each_bot.current_direction)
move_count += 1
screen.blit(bckg, [0, 0])
Path.draw(screen)
Bot_Group.draw(screen)
pygame.display.flip()
clock.tick(f_p_s)
pygame.quit()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-15 13:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('installations', '0005_auto_20160115_1300'),
]
operations = [
migrations.RemoveField(
model_name='installation',
name='supplier_receipt',
),
migrations.AddField(
model_name='installation',
name='shipping_confirmed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='installation',
name='shipping_receipt',
field=models.FileField(null=True, upload_to='shipping_receipts/%Y/%m/%d'),
),
]
|
import asyncio
import time
import itertools
async def tcp_echo_client(message,id):
reader, writer = await asyncio.open_connection('127.0.0.1', 5000)
print('Send: %r' % message)
writer.write(message.encode())
time.sleep(5)
data = await reader.read(100)
print('Received: %r' % data.decode())
print('Close the socket')
writer.close()
responses = [['A', 8], ['B', 10], ['C', 7], ['D', 9], ['E', 8], ['F', 4], ['G', 7], ['H', 10], ['I', 8], ['J', 5], ['K', 9], ['L', 7], ['M', 7], ['N', 8], ['O', 2], ['P', 2], ['Q', 2], ['R', 9], ['S', 10], ['T', 7], ['U', 3], ['V', 9], ['W', 4], ['X', 4], ['Y', 10], ['Z', 9]]
loop = asyncio.get_event_loop()
# for i in range(1):
# message = f'Message from client {i}!'
# loop.run_until_complete(tcp_echo_client(message))
tasks = itertools.starmap(tcp_echo_client, responses)
loop.run_until_complete(asyncio.gather(*tasks))
loop.close() |
import argparse
import csv
import matplotlib
matplotlib.use("Agg") ## for remote run
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import math
import numpy as np
from SOMMapper import SOMMapper
from UMatrixMapper import UMatrixMapper
from Node import Node
class SOM:
def __init__(self, n, x_len, y_len, epochs, theta_f):
'''
x_len, y_len: grid size
n: vectors length (i.e. number of attributes in dataset)
epochs: number of iterations
theta_naught, theta_f: decay learning constants
grid: Node grid
'''
self.x_len = x_len
self.y_len = y_len
self.n = n
self.epochs = epochs
self.theta_naught = math.sqrt(self.x_len * self.y_len)
self.learning_factor = theta_f / self.theta_naught
self.grid = [[Node(i, j, self.n) for i in range(self.x_len)] for j in range(self.y_len)]
## File names used incidentally during execution
self.map_file_name = "map_file.csv"
self.nodes_file_name = "node_list.csv"
self.u_matrix_output = "u_matrix.png"
def train_map(self, input_file):
for i in range(self.epochs):
## Set neighborhood width to exponential decay
theta = self.theta_naught * (self.learning_factor ** (i / self.epochs))
## Write current grid to file
with open(self.map_file_name, "w") as file_name:
writer = csv.writer(file_name)
writer.writerows(self.grid)
## For every training vector, calculate grid node weights
compute_weights_job = SOMMapper(args = [str(input_file), "--map",
str(self.map_file_name), "--n", str(self.n), "--theta", str(theta)])
## Read output from SOMMapper
with compute_weights_job.make_runner() as compute_weights_runner:
compute_weights_runner.run()
self.extract_weights(compute_weights_job, compute_weights_runner)
def extract_weights(self, job, runner):
## Helper function to extract SOMMapper output
for line in runner.stream_output():
(x, y), value = job.parse_output_line(line)
## Update grid weigts
self.grid[x][y].update_weights(value)
def get_u_matrix(self):
## Write current grid to file
self.write_nodes_to_file()
## Calculate the u-matrix height of each grid node
compute_u_matrix_job = UMatrixMapper(args = [str(self.nodes_file_name),
"--map", str(self.map_file_name)])
## Read output from UMatrixMapper
with compute_u_matrix_job.make_runner() as compute_u_matrix_runner:
compute_u_matrix_runner.run()
matrix = self.extract_u_matrix(compute_u_matrix_job, compute_u_matrix_runner)
return matrix
def write_nodes_to_file(self):
## Helper function to write each grid node's Cartesian coordinates
## and weights to file
with open(self.nodes_file_name, "w") as file_name:
writer = csv.writer(file_name)
for i, row in enumerate(self.grid):
for j, node in enumerate(row):
writer.writerow([i, j] + node.weights)
def extract_u_matrix(self, job, runner):
## Helper function to extract UMatrixMaper output
u_matrix = [[0] * self.y_len for x in range(self.x_len)]
for line in runner.stream_output():
(x, y), value = job.parse_output_line(line)
u_matrix[x][y] = value
return np.array(u_matrix)
def get_bmus(self, vector_list):
## Calculate BMUs for a list of vectors
bmus = [Node.compute_winning_vector(self.grid, x) for x in vector_list]
return np.array(bmus)
'''
Graph function adapted from Peter Wittek: https://github.com/peterwittek/somoclu
'''
def graph_umatrix(self, matrix, bmus = None, labels = None):
plt.clf()
plt.imshow(matrix, aspect = "auto", cmap = cm.coolwarm)
plt.axis("off")
## Set colorbar legend
cmap = cm.ScalarMappable(cmap = cm.coolwarm)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation = "vertical", shrink = .7)
## Add scatter points to graph representing vector BMUs
if bmus is not None:
plt.scatter(bmus[:, 0], bmus[:, 1], c = "gray")
## Add labels to vector scatter points
if labels is not None:
for label, col, row in zip(labels, bmus[:, 0], bmus[:, 1]):
plt.annotate(label, xy = (col, row), xytext = (10, -5),
textcoords = "offset points", ha = "left", va = "bottom",
bbox = {"boxstyle": "round", "fc": "lightgray"})
plt.savefig(self.u_matrix_output)
if __name__ == "__main__":
## Define and collect command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("file_name", help = "file name of song vectors")
parser.add_argument("n", type = int, help = "length of vector attributes")
parser.add_argument("--x_len", default = 10, type = int, help = "number of grid rows")
parser.add_argument("--y_len", default = 10, type = int, help = "number of grid columns")
parser.add_argument("--epochs", default = 15, type = int, help = "number of iterations")
parser.add_argument("--theta_f", default = .2, type = float, help = "exponential decay constant")
args = parser.parse_args()
## Create and train map
som_map = SOM(args.n, args.x_len, args.y_len, args.epochs, args.theta_f)
som_map.train_map(args.file_name)
## Create and save u-matrix
matrix = som_map.get_u_matrix()
som_map.graph_umatrix(matrix)
#goatwhore = [0, 0.2946608101, 0.0603604847]
#throbbing_gristle = [0, 0.4670240136, 0.0830032339]
#britney_spears = [0.9930382894, 0.3205599089, 0.0480877322]
#labels = ["Goatwhore", "Throbbing Gistle", "Britney Spears"]
#bmus = som_map.get_bmus([goatwhore, throbbing_gristle, britney_spears]) |
import pygame, sys
import random as rd
import os
# Farben
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
GREEN = ( 0, 200, 0)
PINK = (238, 18, 137)
ORANGE = (238, 69, 0)
VIOLETTE= (139, 0, 139)
BLUE = ( 0, 0, 139)
BLUE2 = ( 0, 0, 255)
CYAN = ( 0, 255, 255)
BROWN = (139, 69, 19)
BROWN1 = (210, 105, 30)
GREY = (119, 136, 153)
GREY1 = ( 92, 92, 92)
GREEN2 = ( 0, 100, 0)
GREEN3 = (202, 255, 112)
GREEN4 = ( 34, 139, 34)
GREEN5 = (124, 252, 0)
ORANGE1 = (255, 140, 0)
ORANGE2 = (250, 128, 114)
RED = (255, 0, 0)
RED1 = (205, 0, 0)
VIOLET1 = (255, 0, 255)
GELB = (255, 215, 0)
GELB1 = (255, 255, 0)
VIOLET2 = (178, 58, 238)
farbenliste = [PINK, ORANGE, VIOLETTE, BLUE, BLUE2, CYAN, BROWN, BROWN1, GREY, GREY1, GREEN2, GREEN3, GREEN4, GREEN5, ORANGE1, ORANGE2, RED, RED1, VIOLET1, VIOLET2, GELB, GELB1]
CELLWIDTH = 0
class Fruits:
def __init__(self, pos={'x': rd.randint(2, 27), 'y': rd.randint(2, 27)}): # Liste mit der ersten Frucht
self.koord = pos
self.farbe = rd.randint(1, len(farbenliste) - 1)
def zufallsfarbe(self): # Zufällige farbe der Frucht
return farbenliste[self.farbe]
def frucht_neupos(self, pos=None): # funktion für eine neue frucht, wenn die verherige gefressen wurde
if pos is None:
pos = {'x': rd.randint(3, 26), 'y': rd.randint(3, 26)}
self.koord = pos
self.farbe = rd.randint(1, len(farbenliste) - 1)
class Snake:
def __init__(self):
self.körperteile = [{'x': rd.randint(3, 27), 'y': rd.randint(3,
27)}] # liste der Körperteile mit zufallsvariabel für den ersten Körper teil
self.richtung = {'dx': 0, 'dy': 0} # richting der Schlange
def körper_hinzufügen(self): # funktion um einen neuen Körper hinzuzufügen
x = self.körperteile[0]['x'] + self.richtung['dx']
y = self.körperteile[0]['y'] + self.richtung['dy']
nechst = {'x': x, 'y': y}
self.körperteile.insert(0, nechst)
def update(self): # fügt neues körperteil hinzu, updated die Liste
x = self.körperteile[0]['x'] + self.richtung['dx']
y = self.körperteile[0]['y'] + self.richtung['dy']
nechst = {'x': x, 'y': y}
for i in range(len(self.körperteile)):
tmp = self.körperteile[i]
self.körperteile[i] = nechst
nechst = tmp
class Spielfeld:
def __init__(self, groesse):
self.felder = [[0 for j in range(groesse + 1)] for i in range(groesse + 1)] # Spielfeld
for i in range(groesse):
self.felder[0][i] = 1
self.felder[groesse - 1][i] = 1
self.felder[i][0] = 1
self.felder[i][groesse - 1] = 1
class Button:
def create_button(self, surface, color, x, y, length, height, width, text, text_color):
surface = self.draw_button(surface, color, length, height, x, y, width)
surface = self.write_text(surface, text, text_color, length, height, x, y)
self.rect = pygame.Rect(x,y, length, height)
return surface
def write_text(self, surface, text, text_color, length, height, x, y):
font_size = int(length//len(text))
myFont = pygame.font.SysFont("Calibri", font_size)
myText = myFont.render(text, 1, text_color)
surface.blit(myText, ((x+length/2) - myText.get_width()/2, (y+height/2) - myText.get_height()/2))
return surface
def draw_button(self, surface, color, length, height, x, y, width):
for i in range(1,10):
s = pygame.Surface((length+(i*2),height+(i*2)))
s.fill(color)
alpha = (255/(i+2))
if alpha <= 0:
alpha = 1
s.set_alpha(alpha)
pygame.draw.rect(s, color, (x-i,y-i,length+i,height+i), width)
surface.blit(s, (x-i,y-i))
pygame.draw.rect(surface, color, (x,y,length,height), 0)
pygame.draw.rect(surface, WHITE, (x,y,length,height), 1)
return surface
def pressed(self, mouse):
if mouse[0] > self.rect.topleft[0]:
if mouse[1] > self.rect.topleft[1]:
if mouse[0] < self.rect.bottomright[0]:
if mouse[1] < self.rect.bottomright[1]:
print ("Some button was pressed!")
return True
else: return False
else: return False
else: return False
else: return False
class File:
def __init__(self):
#self.a=open('Ergebnisse','a')
pass
def write(self, name, punkte):
#self.a.close()
try:
self.fout=open('Ergebnisse','r+')
except:
self.fout=open('Ergebnisse','a')
self.fout.write(name)
else:
if os.stat("Ergebnisse").st_size==0:
self.fout.write(name)
else:
for line in self.fout:
tmp=line.split(":")
zahl=int(tmp[1])
print(tmp)
if zahl<punkte:
pass
print("Drin")
else:
print(punkte)
print("im else")
self.fout.write(name)
break
def beenden(self):
self.fout.close()
def makemenu(DISPLAYSURF):
pygame.display.set_caption('MENU')
DISPLAYSURF.fill(WHITE)
spielbutton = Button()
spielbutton.create_button(DISPLAYSURF, GREEN, 50, 50, 150, 50, 0, "Spiel", BLACK)
rangbutton = Button()
rangbutton.create_button(DISPLAYSURF, GREEN, 300, 50, 150, 50, 0, "Liste", BLACK)
mehrspielbutton = Button() # neu
mehrspielbutton.create_button(DISPLAYSURF, GREEN, 50, 200, 150, 50, 0, "1 vs. 1", BLACK) # neu
for event in pygame.event.get():
if event.type == pygame.KEYDOWN: # Definition ESC Taste
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
if spielbutton.pressed(pygame.mouse.get_pos()):
return False,True,False
elif mehrspielbutton.pressed(pygame.mouse.get_pos()):
return False,False,True
return True,False,False
def makeend(DISPLAYSURF,punkte):
pygame.display.set_caption('ENDE')
punkte=punkte
print("Done")
DISPLAYSURF.fill(WHITE)
if punkte!=0:
speicherkommentar=Button()
speicherkommentar.create_button(DISPLAYSURF,GREEN,200,200,400,200,0,"Name in Console",BLACK)
pygame.display.update()
punktespeichern(punkte)
else:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN: # Definition ESC Taste
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
elif event.key == pygame.K_SPACE:
return False, True
return True, False
def makegame():
GANZE_LAENGE=800
BOARD_LENGHT = 600
BOARD_HIGHT = BOARD_LENGHT
FPS = 8
CELLSIZE = 20
assert BOARD_LENGHT % CELLSIZE == 0
assert BOARD_HIGHT % CELLSIZE == 0
CELLWIDTH = int(BOARD_LENGHT / CELLSIZE)
my_feld = Spielfeld(CELLWIDTH)
punkte=0
punkte2=0
snake = Snake()
snake2=Snake()
frucht = Fruits()
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((GANZE_LAENGE, BOARD_HIGHT))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
pygame.display.set_caption('SNAKE')
isMenu = True
isEnd=False
isGame=False
is1vs1=False
while True:
if isMenu:
isMenu, isGame, is1vs1 = makemenu(DISPLAYSURF)
elif isEnd:
isEnd, isMenu = makeend(DISPLAYSURF,punkte)
elif isGame:
DISPLAYSURF.fill(WHITE)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN: # Definition ESC Taste
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
elif event.key == pygame.K_a: # Steuerung schlange 1
if snake.richtung["dx"] == 0:
snake.richtung["dx"] = -1
snake.richtung["dy"] = 0
elif snake.richtung["dx"] == 1:
snake.richtung["dx"] == 0
snake.richtung["dy"] = 0
elif event.key == pygame.K_d:
if snake.richtung["dx"] == -1:
snake.richtung["dx"] = 0
snake.richtung["dy"] = 0
elif snake.richtung["dx"] == 0:
snake.richtung["dx"] = 1
snake.richtung["dy"] = 0
elif event.key == pygame.K_w:
if snake.richtung["dy"] == 0:
snake.richtung["dy"] = -1
snake.richtung["dx"] = 0
elif snake.richtung["dy"] == 1:
snake.richtung["dy"] = 0
snake.richtung["dx"] = 0
elif event.key == pygame.K_s:
if snake.richtung["dy"] == -1:
snake.richtung["dy"] = 0
snake.richtung["dx"] = 0
elif snake.richtung["dy"] == 0:
snake.richtung["dy"] = 1
snake.richtung["dx"] = 0
# Rand wird gezeichnet
for i in range(len(my_feld.felder)):
for j in range(len(my_feld.felder[i])):
if my_feld.felder[i][j] == 1:
x, y = board_to_pixel_koord(i, j, CELLSIZE)
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, BLACK, appleRect)
snake.update()
for x in range(0, BOARD_LENGHT, CELLSIZE): # zeichnet senkrechte lienien
pygame.draw.line(DISPLAYSURF, BLACK, (x, 0), (x, BOARD_LENGHT))
for y in range(0, BOARD_HIGHT, CELLSIZE): # zeichnet horizontale lienien
pygame.draw.line(DISPLAYSURF, BLACK, (0, y), (BOARD_HIGHT, y))
# Spielablauf
punktezahl = Button()
punktezahl.create_button(DISPLAYSURF, WHITE, 650, 100, 150, 60, 0, str(punkte), BLACK)
if snake.körperteile[
0] == frucht.koord: # wenn eine frucht gegessen wird, wird hier eine neue frucht aufgerufen
snake.körper_hinzufügen()
frucht.frucht_neupos()
punkte += 1
print(punkte)
for i in snake.körperteile[1:]: # wenn die schlange mit sich selbst kollidiert esc spiel
if snake.körperteile[0] == i:
snake=Snake()
isEnd=True
for körperteil in snake.körperteile:
make_rectangle_snake(körperteil, DISPLAYSURF, CELLSIZE,GREEN)
# frucht
make_rectangle_fruit(frucht, DISPLAYSURF, CELLSIZE)
# Snake wird von Rand gestopt
for i in range(len(my_feld.felder)):
for j in range(len(my_feld.felder[i])):
if my_feld.felder[i][j] == 1:
randliste = {'x': i, 'y': j}
if randliste == snake.körperteile[0]:
snake = Snake()
isEnd=True
elif is1vs1:
DISPLAYSURF.fill(WHITE)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN: # Definition ESC Taste
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
elif event.key == pygame.K_a: # Steuerung schlange 1
if snake.richtung["dx"] == 0:
snake.richtung["dx"] = -1
snake.richtung["dy"] = 0
elif snake.richtung["dx"] == 1:
snake.richtung["dx"] == 0
snake.richtung["dy"] = 0
elif event.key == pygame.K_d:
if snake.richtung["dx"] == -1:
snake.richtung["dx"] = 0
snake.richtung["dy"] = 0
elif snake.richtung["dx"] == 0:
snake.richtung["dx"] = 1
snake.richtung["dy"] = 0
elif event.key == pygame.K_w:
if snake.richtung["dy"] == 0:
snake.richtung["dy"] = -1
snake.richtung["dx"] = 0
elif snake.richtung["dy"] == 1:
snake.richtung["dy"] = 0
snake.richtung["dx"] = 0
elif event.key == pygame.K_s:
if snake.richtung["dy"] == -1:
snake.richtung["dy"] = 0
snake.richtung["dx"] = 0
elif snake.richtung["dy"] == 0:
snake.richtung["dy"] = 1
snake.richtung["dx"] = 0
elif event.key == pygame.K_LEFT: # Steuerung schlange 2 alles neu
if snake2.richtung["dx"] == 0:
snake2.richtung["dx"] = -1
snake2.richtung["dy"] = 0
elif snake2.richtung["dx"] == 1:
snake2.richtung["dx"] == 0
snake2.richtung["dy"] = 0
elif event.key == pygame.K_RIGHT:
if snake2.richtung["dx"] == -1:
snake2.richtung["dx"] = 0
snake2.richtung["dy"] = 0
elif snake2.richtung["dx"] == 0:
snake2.richtung["dx"] = 1
snake2.richtung["dy"] = 0
elif event.key == pygame.K_UP:
if snake2.richtung["dy"] == 0:
snake2.richtung["dy"] = -1
snake2.richtung["dx"] = 0
elif snake2.richtung["dy"] == 1:
snake2.richtung["dy"] = 0
snake2.richtung["dx"] = 0
elif event.key == pygame.K_DOWN:
if snake2.richtung["dy"] == -1:
snake2.richtung["dy"] = 0
snake2.richtung["dx"] = 0
elif snake2.richtung["dy"] == 0:
snake2.richtung["dy"] = 1
snake2.richtung["dx"] = 0
# Rand wird gezeichnet
for i in range(len(my_feld.felder)):
for j in range(len(my_feld.felder[i])):
if my_feld.felder[i][j] == 1:
x, y = board_to_pixel_koord(i, j, CELLSIZE)
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, BLACK, appleRect)
# Snake wird von Rand gestopt
for i in range(len(my_feld.felder)):
for j in range(len(my_feld.felder[i])):
if my_feld.felder[i][j] == 1:
randliste = {'x': i, 'y': j}
if randliste == snake.körperteile[0]:
snake=Snake()
snake2=Snake()
punkte = 0
punkte2=0
isEnd=True
elif randliste == snake2.körperteile[0]:
snake=Snake()
snake2=Snake()
punkte = 0
punkte2 = 0
isEnd=True
snake2.update()
snake.update()
for x in range(0, BOARD_LENGHT, CELLSIZE): # zeichnet senkrechte lienien
pygame.draw.line(DISPLAYSURF, BLACK, (x, 0), (x, BOARD_LENGHT))
for y in range(0, BOARD_HIGHT, CELLSIZE): # zeichnet horizontale lienien
pygame.draw.line(DISPLAYSURF, BLACK, (0, y), (BOARD_HIGHT, y))
# Spielablauf
punktezahl = Button()
punktezahl.create_button(DISPLAYSURF, WHITE, 650, 100, 150, 60, 0, str(punkte), BLACK)
punktezahl2 = Button()
punktezahl2.create_button(DISPLAYSURF, WHITE, 650, 300, 150, 60, 0, str(punkte2), BLACK)
if snake.körperteile[
0] == frucht.koord: # wenn eine frucht gegessen wird, wird hier eine neue frucht aufgerufen
snake.körper_hinzufügen()
frucht.frucht_neupos()
punkte += 1
print(punkte)
elif snake2.körperteile[0] == frucht.koord:
snake2.körper_hinzufügen()
frucht.frucht_neupos()
punkte2 += 1
print(punkte2)
for i in snake.körperteile[1:]: # wenn die schlange mit sich selbst kollidiert esc spiel
if snake.körperteile[0] == i:
snake = Snake()
snake2 = Snake()
punkte = 0
punkte2 = 0
isEnd = True
for i in snake2.körperteile[1:]: # wenn die schlange mit sich selbst kollidiert esc spiel
if snake2.körperteile[0] == i:
snake = Snake()
snake2 = Snake()
punkte = 0
punkte2 = 0
isEnd = True
for i in snake.körperteile[0:]:
if snake2.körperteile[0] == i:
snake = Snake()
snake2 = Snake()
isEnd = True
for i in snake2.körperteile[0:]:
if snake.körperteile[0] == i:
snake = Snake()
snake2 = Snake()
punkte = 0
punkte2 = 0
isEnd = True
for körperteil in snake.körperteile:
make_rectangle_snake(körperteil, DISPLAYSURF, CELLSIZE, GREEN)
for körperteil in snake2.körperteile:
make_rectangle_snake(körperteil, DISPLAYSURF, CELLSIZE, CYAN)
# frucht
make_rectangle_fruit(frucht, DISPLAYSURF, CELLSIZE)
pygame.display.update()
FPSCLOCK.tick(FPS)
def punktespeichern(zahl):
tabelle=File()
name=input("Gebe deinen Namen ein:")
tabelle.write(name+ ":" + str(zahl) + ":\n",zahl)
tabelle.beenden()
def board_to_pixel_koord(i, j, width):
return i * width, j * width
def make_rectangle_snake(dict, display, size,color):
x, y = board_to_pixel_koord(dict["x"], dict["y"], size)
the_rect = pygame.Rect(x, y, size, size)
pygame.draw.rect(display, color, the_rect)
def make_rectangle_fruit(frucht, display, size):
x, y = board_to_pixel_koord(frucht.koord["x"], frucht.koord["y"], size)
the_rect = pygame.Rect(x, y, size, size)
pygame.draw.rect(display, frucht.zufallsfarbe(), the_rect)
if __name__ == "__main__":
makegame()
|
# -*- coding: utf-8 -*-
"""
Tools for filtering unstructured grid data
Created on Tue Dec 18 13:19:39 2012
@author: mrayson
"""
import numpy as np
from scipy import spatial, sparse
import pdb
class ufilter(object):
"""
Unstructured grid filter class
"""
c = 4. # uses point c x p for filter
filtertype = 'gaussian' # 'gaussian' or 'lanczos'
kmax = 50 # Maximum number of points to use in filter matrix
def __init__(self,X,delta_f,**kwargs):
self.X = X
self.delta_f = delta_f
self.__dict__.update(kwargs)
s = np.shape(self.X)
self.n = s[0] # Number of points
if len(s)>1:
self.m = s[1] # Number of dimensions
else:
self.m = 1
self.GetP()
self.BuildFilterMatrix2()
def __call__(self,y):
"""
Performs the filtering operation on data in vector y
"""
return self.G*y
def BuildFilterMatrix(self):
"""
Builds a sparse matrix, G, used to filter data in vector, y, via:
y_filt = G x y
"""
# Compute the spatial tree
kd = spatial.cKDTree(self.X)
eps=1e-6
# Initialise the sparse matrix
self.G = sparse.lil_matrix((self.n,self.n))
printstep = 5
printstep0 = 0
ii=0
for nn in range(self.n):
ii+=1
perccomplete = float(nn)/float(self.n)*100.0
if perccomplete > printstep0:
print '%d %% complete...'%(int(perccomplete))
printstep0+=printstep
#print nn
# Find all of the points within c * p distance from point
dx, i = kd.query(self.X[nn,:]+eps,k=self.n+1,distance_upper_bound=self.c*self.p)
# Calculate the filter weights on these points
ind = dx != np.inf
Gtmp = self.Gaussian(dx[ind])
# Insert the points into the sparse matrix
I = i[ind]
self.G[nn,I] = Gtmp
#self.G[nn,I] = dx[ind] # testing
def BuildFilterMatrix2(self):
"""
Builds a sparse matrix, G, used to filter data in vector, y, via:
y_filt = G x y
Vectorized version of the above
"""
# Compute the spatial tree
kd = spatial.cKDTree(self.X)
eps=1e-6
# Initialise the sparse matrix
self.G = sparse.lil_matrix((self.n,self.n))
# Find all of the points within c * p distance from point
dx, i = kd.query(self.X+eps,k=self.kmax,distance_upper_bound=self.c*self.p)
ind = np.isinf(dx)
# Calculate the filter weights
if self.filtertype=='gaussian':
Gtmp = self.Gaussian(dx)
elif self.filtertype=='lanczos':
Gtmp = self.Lanczos(dx)
# Set the weighting to zero for values outside of the range
Gtmp[ind]=0
i[ind]=0
# Normalise the filter weights
sumG = np.sum(Gtmp,axis=1)
Gout = [Gtmp[ii,:]/G for ii, G in enumerate(sumG)]
for nn,gg in enumerate(Gout):
self.G[nn,i[nn,:]]=gg
# ind = [nn*self.n + i[nn,:] for nn,G in enumerate(G)]
# ind = np.array(ind)
# G=np.array(G)
# pdb.set_trace()
# self.G[ind.ravel()] = G.ravel()
# N = [nn + i[nn,:]*0 for nn,G in enumerate(G)]
# N = np.array(N)
# G=np.array(G)
# self.G[N,i]=G
#[self.G[nn,i[nn,:]] for nn,G in enumerate(tmp)]
# # Calculate the filter weights on these points
# ind = dx != np.inf
# Gtmp = self.Gaussian(dx[ind])
#
# # Insert the points into the sparse matrix
# I = i[ind]
# self.G[nn,I] = Gtmp
# #self.G[nn,I] = dx[ind] # testing
def GetP(self):
"""
Calculate the 'p' parameter
"""
#self.p = self.delta_f**2/40.0
self.p = self.delta_f
#def kmax(self):
# """
# Estimate the maximum number of points in the search radius
# """
# return np.round(self.c*self.p/self.dxmin)
def Gaussian(self,dx):
"""
Calculate the Gaussian filter weights
"""
#Gtmp = 1.0 / (4.0*np.pi*self.p) * np.exp(-dx/np.sqrt(self.p))/dx
#Gtmp = Gtmp[1:] # discard closest point (self)
if self.m == 1:
# 1D gaussian
coef = 1.0/(np.sqrt(2.0*np.pi)*self.p)
elif self.m == 2:
# 2D gaussian
coef = 1.0/(2.0*np.pi*self.p**2)
Gtmp = coef * np.exp(- dx**2 / (2*self.p**2))
return Gtmp / np.sum(Gtmp)
def Lanczos(self,dx):
"""
Lanczos filter weights
!!!Need to check this!!!
"""
a = self.p
Gtmp = np.sinc(dx) * np.sinc(dx/a)
return Gtmp / np.sum(Gtmp)
|
from django.shortcuts import render
from quiz.models import Exam
# Create your views here.
def home(request):
exam = Exam.objects.all()
return render(request,"index.html",{"exam":exam})
|
from django.test import TestCase
from unittest.mock import patch , call
import accounts.views
from accounts.models import Token
class SendLoginEmailViewTest( TestCase ) :
def test_redirects_to_home_page( self ) :
response = self.client.post( '/accounts/send_login_email' , data = {
'email' : 'sanyam1997.iitr@gmail.com'
} )
self.assertRedirects( response , '/' )
def test_creates_token_associated_with_email( self ) :
self.client.post( '/accounts/send_login_email' , data = {
'email' : 'sanyam1997.iitr@gmail.com'
} )
token = Token.objects.first( )
self.assertEqual( token.email , 'sanyam1997.iitr@gmail.com' )
@patch('accounts.views.send_mail')
def test_sends_link_to_login_using_token_uid( self , mock_send_mail ) :
self.client.post( '/accounts/send_login_email' , data = {
'email' : 'sanyam1997.iitr@gmail.com'
} )
token = Token.objects.first( )
expected_url = f'http://testserver/accounts/login?token={token.uid}'
( subject , body , from_email , to_list ) , kwargs = mock_send_mail.call_args
self.assertIn( expected_url , body )
@patch( 'accounts.views.send_mail' )
def test_sends_mail_to_address_from_post( self , mock_send_mail ) :
self.client.post( '/accounts/send_login_email' , data = {
'email': 'sanyam1997.iitr@gmail.com'
} )
self.assertEqual( mock_send_mail.called , True )
( subject , body , from_email , to_list ) , kwargs = mock_send_mail.call_args
self.assertEqual( subject , 'Your login link for Superlists' )
self.assertEqual( from_email , 'noreply@superlists' )
self.assertEqual( to_list , [ 'sanyam1997.iitr@gmail.com' ] )
def test_adds_success_message( self ) :
response = self.client.post( '/accounts/send_login_email' , data = {
'email' : 'sanyam1997.iitr@gmail.com'
} , follow = True )
message = list( response.context[ 'messages' ] )[ 0 ]
self.assertEqual(
message.message ,
"Check your email, we've sent you a link you can use to log in."
)
self.assertEqual( message.tags , "success" )
@patch( 'accounts.views.auth' )
class LoginViewTest( TestCase ) :
def test_redirects_to_home_page( self , mock_auth ) :
response = self.client.get( '/accounts/login?token=abcd123' )
self.assertRedirects( response , '/' )
def test_calls_authenticate_with_uid_from_get_request( self , mock_auth ) :
self.client.get( '/accounts/login?token=abcd123' )
self.assertEqual(
mock_auth.authenticate.call_args ,
call( uid = 'abcd123' )
)
def test_calls_auth_login_with_user_if_there_is_one(self, mock_auth):
response = self.client.get('/accounts/login?token=abcd123')
self.assertEqual(
mock_auth.login.call_args,
call(response.wsgi_request, mock_auth.authenticate.return_value)
)
def test_does_not_login_if_user_is_not_authenticated( self , mock_auth ) :
mock_auth.authenticate.return_value = None
self.client.get( '/accounts/login?token=abcd123' )
self.assertEqual( mock_auth.login.called , False )
|
#未完成,待补全
import requests
#获取具体的请求host和port
from ddt_data.change_data import host,port
#获取登录类
from ddt_data.sys_login_action import get_login
#获取日志对象
from public.log_out import logger
log=logger()
#拿到实时时间
import time
now = time.strftime("%Y-%m-%d %H:%M:%S")
class market_Delete():
def __init__(self,sr):
self.sr=sr
#根据登录类拿到实时的token
self.token=get_login(self.sr).get_token()
#接口请求地址
self.url_market_delete=host+port+r'/info/delete/132'
self.header_suc={
"Accept": "*/*",
"token": self.token,
"Content-Type": "application/json"
}
self.payload={}
def get_market_delete_true(self):
try:
log.info(u"执行market_delete操作")
r=self.sr.get(url=self.url_market_delete,headers=self.header_suc,params=self.payload)
except Exception as msg:
log.error(u"执行market_delete操作报错%s"%msg)
else:
return r.json()
# def insert_data(self):
# try:
# log.info(u"插入门店数据")
if __name__=="__main__":
sr=requests
m=market_Delete(sr)
print(m.get_market_delete_true()) |
import random
def solution():
#map alphabet with index position
alphabet = {'a':0,'b':1,'c':2, 'd':3, 'e':4,'f':5,'g':6,'h':7,'i':8,'j':9,'k':10,'l':11,'m':12,'n':13,'o':14}
# create an list of 15 with zero's
result = [0] * 15
# define columns of matrix
column = 5
# define rows of matrix
row =3
# define matrix of 3*5
matrix = [[0 for x in range(column)] for y in range(row)]
# generate matrix that contains random characters from a-o
for i in range(0,3):
for j in range(0,5):
r = random.choice(alphabet.keys())
matrix[i][j] = r
print "random matrix:"
print matrix
#input as per problem
ip = [[0,0,0,0,0],
[0,0,0,0,1],
[0,0,0,1,0],
[0,1,2,1,0],
[0,1,2,0,0],
[0,1,0,1,0],
[1,0,0,0,0],
[0,1,0,0,0],
[2,0,2,0,0],
[1,1,1,1,1],
[2,2,2,2,2],
[1,2,1,0,1],
[0,0,0,1,2],
[0,0,0,2,2],
[1,2,2,2,2],
[2,0,0,0,0],
[2,1,1,0,0],
[1,0,0,0,1],
[2,0,0,0,2],
[1,1,2,2,1]]
# # input from user take 5 value comma separated
# ip = []
# print "Enter the input : "
# for i in range(0,2):
# inpt = raw_input()
# numbers = map(int, inpt.split(','))
# if len(numbers) == 5:
# if all(i in [0, 1, 2] for i in numbers):
# ip.append(numbers)
# print "Your input is right"
# else:
# print "rows should have in 0,1,2"
# return
# else:
# print "wrong input You have to enter 5 numbers with value 0,1,2 by comma seperate"
# return
# for every input increament result array by 1 where index of result matrix equal to value of symbol in alphabet
# dictionary
for numbers in ip:
for i in range(0,5):
result[alphabet[matrix[numbers[i]][i]]] = result[alphabet[matrix[numbers[i]][i]]] + 1
# so we check that if any position of result array grather than three so we can declare it to win otherwise lost
flag = any(i >= 3 for i in result)
if (flag == False):
print "LOSING INPUT IS "+str(numbers)
else:
print "WINNING INPUT IS "+str(numbers)
result = [0] * 15
solution() |
# Generated by Django 2.2.5 on 2019-11-11 02:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rooms', '0006_auto_20191107_2014'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='file',
field=models.ImageField(upload_to='', verbose_name='room_photos'),
),
]
|
# coding: utf-8
class DictPlus(dict):
def __add__(self, other):
self.update(other)
return self
|
from django.test import TestCase
from questionnaire.forms import get_choices, generate_charfield, generate_textfield, generate_boolean_field, generate_select_dropdown_field, generate_radioselect_field, generate_multiplechoice_field, FIELD_TYPES,\
QuestionGroupForm, _get_fields_for_group, _convert_answerset_to_intial_data
from questionnaire.models import Question, Questionnaire, QuestionGroup, AnswerSet, QuestionAnswer
from django.forms import Textarea, TextInput, BooleanField, ChoiceField, RadioSelect,CheckboxSelectMultiple, CharField
from django.forms.fields import MultipleChoiceField, TypedChoiceField
from mock import MagicMock, patch, call
from django.contrib.auth.models import User
class FormsTestCase(TestCase):
fixtures = ['test_questionnaire_fixtures.json']
def test_get_choices_question_with_options(self):
'''
Assuming that we pass this function a question object that has options defined
we should get back:
1. A list of tuples (option text, option text)
'''
tuple_choices = [(u'Radio 1',u'Radio 1'), (u' Radio 2',u' Radio 2'), (u' Radio 3',u' Radio 3')]
choices_question = Question.objects.get(pk=5)
get_choices_test = get_choices(choices_question)
self.assertEqual(get_choices_test, tuple_choices)
def test_get_choices_question_without_options(self):
'''
If we pass this function a question object that had no options defined we should get None back
'''
choices_question = Question.objects.create(label='test', field_type='select_dropdown_field', selectoptions=None)
self.assertEquals(None, get_choices(choices_question) )
def test_get_choices_not_a_question(self):
'''
If we pass this function anything other than a question object it should raise an AttributeError
Raising AttributeError is choosen because eventhough the method error type are using TypeError and
ValueError, the return value always shows AttributeError
'''
choices_question = Questionnaire.objects.get(pk=1)
self.assertRaises(AttributeError, get_choices, choices_question)
def test_generate_charfield(self):
'''
This should return us a Charfield with a max length of 100, and a TextInput widget
'''
self.assertIsInstance(generate_charfield(), CharField)
self.assertEqual(generate_charfield().max_length, 100, 'max length return should be 100')
self.assertIsInstance(generate_charfield().widget, TextInput)
def test_generate_textfield(self):
'''
This should return us a Charfield without a max length specified, and using a TextArea widget
'''
self.assertEqual(generate_textfield().max_length, None, 'max length should be Not Set')
self.assertIsInstance(generate_textfield(), CharField, 'this returns a charfield!')
self.assertIsInstance(generate_textfield().widget, Textarea)
def test_generate_boolean_field(self):
'''
This should return a TypedChoiceField object with yes and no as options
'''
self.assertIsInstance(generate_boolean_field(), TypedChoiceField, 'The return class should be boolean field')
self.assertEqual(generate_boolean_field().choices, [(1,'Yes'),( 0,'No')])
def test_generate_select_dropdown_field(self):
'''
This should return a Charfield with the choices attribute set to an empty list (to be populated later)
'''
self.assertIsInstance(generate_select_dropdown_field(), ChoiceField )
self.assertEqual(generate_select_dropdown_field().choices, [])
def test_generate_radioselect_field(self):
'''
This should return a ChoiceField with a RadioSelect widget and the choices attribute set to an empty list
'''
self.assertIsInstance(generate_radioselect_field(), ChoiceField)
self.assertIsInstance(generate_radioselect_field().widget, RadioSelect )
self.assertEqual(generate_radioselect_field().choices, [])
def test_generate_multiplechoice_field(self):
'''
This should return a MultipleChoiceField with the choices attribute set to an empty list and a CheckboxSelectMultiple widget
'''
self.assertIsInstance(generate_multiplechoice_field(), MultipleChoiceField)
self.assertIsInstance(generate_multiplechoice_field().widget, CheckboxSelectMultiple)
self.assertEqual(generate_multiplechoice_field().choices, [])
def test_FIELD_TYPES_dict(self):
'''
charfield should map to ``generate_charfield``
textfield should map to ``generate_textfield``
booleanfield should map to ``generate_boolean_field``,
select_dropdown_fieldshould map to ``generate_select_dropdown_field``,
radioselectfield should map to ``generate_radioselect_field``,
multiplechoicefield should map to ``generate_multiplechoice_field``,
'''
self.assertEqual(FIELD_TYPES['charfield'], generate_charfield)
self.assertEqual(FIELD_TYPES['textfield'], generate_textfield)
self.assertEqual(FIELD_TYPES['booleanfield'], generate_boolean_field)
self.assertEqual(FIELD_TYPES['select_dropdown_field'], generate_select_dropdown_field)
self.assertEqual(FIELD_TYPES['radioselectfield'], generate_radioselect_field)
self.assertEqual(FIELD_TYPES['multiplechoicefield'], generate_multiplechoice_field)
def test_get_fields_for_group(self):
'''
Calling this function should create a list of tuples that has the same order as
the ordered questions in the group, and used the FIELD_TYPES dict to create the
fields. As we have already tested the FIELD_TYPE and allof the generator functions
this test will simply test make sure that the function returns a sortedDict
in the correct order
'''
with patch('questionnaire.forms.FIELD_TYPES') as field_dict_mock:
return_values = [MagicMock(name='mock1'),MagicMock(name='mock2')]
def side_effect(*attrs):
return return_values.pop()
field_function_mock = MagicMock(name='field_function_mock')
field_function_mock.side_effect = side_effect
field_dict_mock.__getitem__.return_value = field_function_mock
questiongroup = MagicMock(name='questiongroup')
#prepare a list of mock objects to act as questions when returned by the ordered_question
questiongroup.get_ordered_questions.return_value = [MagicMock(label='question1', id=1, field_type='type1'),
MagicMock(label='question2', id=2, field_type='type2' ), ]
with patch('questionnaire.forms.get_choices') as get_choices_mock:
test_form = _get_fields_for_group(questiongroup=questiongroup)
self.assertEqual(field_dict_mock.__getitem__.mock_calls , [call('type1'), call('type2')])
self.assertEqual(test_form[0][0], '1')
self.assertEqual(test_form[0][1].label, 'question1')
self.assertEqual(test_form[1][0], '2')
self.assertEqual(test_form[1][1].label, 'question2')
self.assertEqual(get_choices_mock.call_count, 2)
def test_convert_answerset_to_intial_data_with_data(self):
'''
if we pass in a valid questionanswer object then we should get back
a dicitonary, that has a entry for eacxh questionsanswer, the key of which is
the question id and the value of which is the question answer
'''
test_answer_set = AnswerSet(user=User.objects.create_user('testUser', 'me@home.com', 'testPass'),
questionnaire=Questionnaire.objects.get(pk=1),
questiongroup=QuestionGroup.objects.get(pk=1))
test_answer_set.save()
#create some answers
answer1 = QuestionAnswer(question=Question.objects.get(pk=1), answer='answer1', answer_set=test_answer_set)
answer2 = QuestionAnswer(question=Question.objects.get(pk=2), answer='answer2', answer_set=test_answer_set)
answer3 = QuestionAnswer(question=Question.objects.get(pk=3), answer='answer3', answer_set=test_answer_set)
answer1.save()
answer2.save()
answer3.save()
initial_data = _convert_answerset_to_intial_data(test_answer_set)
self.assertEqual(initial_data[str(answer1.question.id)], answer1.answer)
self.assertEqual(initial_data[str(answer2.question.id)], answer2.answer)
self.assertEqual(initial_data[str(answer3.question.id)], answer3.answer)
self.assertEqual(len(initial_data), 3)
def test_convert_answerset_to_intial_data_with_empty_data(self):
'''
if we pass in a valid questionanswer object that has not answers then we should get back
an empty dicitonary
'''
test_answer_set = AnswerSet(user=User.objects.create_user('testUser', 'me@home.com', 'testPass'),
questionnaire=Questionnaire.objects.get(pk=1),
questiongroup=QuestionGroup.objects.get(pk=1))
test_answer_set.save()
initial_data = _convert_answerset_to_intial_data(test_answer_set)
self.assertEqual(len(initial_data), 0)
def test_convert_answerset_to_intial_data_with_invalid_argument(self):
'''
if we pass in anything other that a valid questionanswer object then we will get a Attribute error thrown
'''
self.assertRaises(AttributeError, _convert_answerset_to_intial_data, '123')
class FormsTestCase_WithFixture(TestCase):
fixtures = ['forms_test_fixture.json']
def assertQuestionType(self, question_type,question):
assertion_map = {'charfield':(CharField, TextInput,None),
'textfield': (CharField, Textarea, None),
'booleanfield': (BooleanField, None, None),
'select_dropdown_field':(ChoiceField,None, list),
'radioselectfield':(ChoiceField,RadioSelect, list),
'multiplechoicefield':(MultipleChoiceField,CheckboxSelectMultiple,list)}
assertions = assertion_map[question_type]
self.assertIsInstance(question , assertions[0])
if assertions[1] != None:
self.assertIsInstance(question.widget , assertions[1])
if assertions[2] != None:
self.assertIsInstance(question.choices , assertions[2])
class QuestionGroupFormTestCase(TestCase):
def test_create_form_no_initial_data(self):
'''
If I pass in a valid questiongroup object and valid questionaire_id then I should get back:
an subclass of Form
it should have fields representative of the questions in the questiongroup
'''
#mock the _get_fields_for_group function to return a predefined list of tuples
with patch('questionnaire.forms._get_fields_for_group') as get_fields_mock:
mock1 = MagicMock(name='1')
mock2 = MagicMock(name='1')
get_fields_mock.return_value = [('1', mock1), ('2', mock2 )]
question_group = MagicMock('question_group')
test_form = QuestionGroupForm(questiongroup=question_group, initial=None, data=None)
self.assertEqual(test_form.fields['1'], mock1)#assert that the fields contain the fields expected based on the mocked return value
self.assertEqual(test_form.fields['2'], mock2)
def test_create_form_with_initial_data(self):
'''
If I do all of the above, but also pass a dictionary as the instance argument then my form
should have initial data for all of the fields that the question group and the answerset have in common
'''
with patch('questionnaire.forms._get_fields_for_group') as get_fields_mock:
mock1 = MagicMock(name='1')
mock2 = MagicMock(name='1')
get_fields_mock.return_value = [('1', mock1), ('2', mock2 )]
question_group = MagicMock('question_group')
initial_data = {'1':'initial1', '2':'initial2', }
test_data = {'1':'data1', '2':'data2',}
test_form = QuestionGroupForm(questiongroup=question_group,initial=initial_data, data=test_data)
#sanity check should be the same as above
self.assertEqual(test_form.fields['1'], mock1)#assert that the fields contain the fields expected based on the mocked return value
self.assertEqual(test_form.fields['2'], mock2)
#assert the intial data
self.assertEqual(test_form.initial['1'], 'initial1')
self.assertEqual(test_form.initial['2'], 'initial2')
self.assertEqual(test_form.data['1'], 'data1')
self.assertEqual(test_form.data['2'], 'data2')
@patch('questionnaire.forms._convert_answerset_to_intial_data')
def test_create_form_with_initial_answer_set(self, conversion_fucntion_mock):
'''
If I do all of the above, but also pass an answer set as the instance argument then my form will call the
_convert_answerset_to_intial_data with the answer set function, and use the returned dict as the initial data function
'''
conversion_fucntion_mock.return_value = {'1':'initial_answer_1', '2':'initial_answer_2' }
with patch('questionnaire.forms._get_fields_for_group') as get_fields_mock:
mock1 = MagicMock(name='1')
mock2 = MagicMock(name='1')
get_fields_mock.return_value = [('1', mock1), ('2', mock2 )]
question_group = MagicMock('question_group')
initial_data = AnswerSet()
test_data = {'1':'data1', '2':'data2',}
test_form = QuestionGroupForm(questiongroup=question_group,initial=initial_data, data=test_data)
#sanity check should be the same as above
self.assertEqual(test_form.fields['1'], mock1)#assert that the fields contain the fields expected based on the mocked return value
self.assertEqual(test_form.fields['2'], mock2)
#assert the intial data
self.assertEqual(test_form.initial['1'], 'initial_answer_1')
self.assertEqual(test_form.initial['2'], 'initial_answer_2')
self.assertEqual(test_form.data['1'], 'data1')
self.assertEqual(test_form.data['2'], 'data2')
conversion_fucntion_mock.assert_called_once_with(initial_data)
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from rango.models import Category, Page, UserProfile
from rango.forms import CategoryForm, PageForm
from rango.forms import UserForm, UserProfileForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
def main(request):
logout(request)
request.session.set_test_cookie()
return render(request, 'rango/main.html')
def do_login(request):
if request.method == 'POST':
username = request.POST.get('log')
password = request.POST.get('pwd')
user = authenticate(username = username, password = password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/rango')
else:
return HttpResponse('your account is disabled')
else:
print "Invalid login details: {0}, {1}".format(username, password)
return HttpResponse('Invalid login details provided, please re-check.')
else:
return render(request, 'rango/main.html', {})
def do_logout(request):
if request.user.is_authenticated:
logout(request)
#return HttpResponseRedirect('http://127.0.0.1:8000/')
return render(request, 'http://127.0.0.1:8000/')
@login_required
def index(request):
if not request.user.is_authenticated():
return render(request, 'rango/main.html')
most_viewed_pages={'pages':[]}
cat = Category.objects.all()
most_viewed_pages['pages'].append(Page.objects.filter(category__in=cat).order_by('-views')[:5])
cat_dict = {'categories': Category.objects.order_by('-likes')[:5], 'boldmessage':"hello from strong bold message lol",'name':'amarshukla'}
cat_dict.update(most_viewed_pages)
return render(request, 'rango/index.html',cat_dict)
def category(request, category_name_slug):
try:
context_dict={}
category = Category.objects.get(slug=category_name_slug)
context_dict['category_name'] = category.name
context_dict['category_name_slug'] = category_name_slug
pages = Page.objects.filter(category=category).order_by('-views')[:5]
context_dict['pages'] = pages
context_dict['category'] = category
except Category.DoesNotExist:
pass
return render(request, 'rango/category.html', context_dict)
def hey(request, name):
name = {'name':name}
return render(request, 'rango/test.html',name)
def add_category(request):
if request.method == 'POST':
form = CategoryForm(request.POST)
if form.is_valid():
form.save(commit=True)
return index(request)
else:
print form.errors()
else:
form = CategoryForm()
cat_list = Category.objects.all()
return render(request, 'rango/add_category.html',{'form':form,'category':cat_list})
def add_page(request, category_name_slug):
try:
cat_list = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat_list = None
context_dict = {}
context_dict['cat_list'] = cat_list
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
page = form.save(commit=False)
try:
cat = Category.objects.get(name=category_name_slug)
page.category = cat
except Category.DoesNotExist:
return render_to_response( 'rango/add_page.html',
context_dict,
context)
page.views = 0
page.save()
return category(request, category_name_slug)
else:
print form.errors()
else:
form = PageForm()
context_dict['category_name_url']= category_name_slug
context_dict['category_name'] = category_name_slug
context_dict['form'] = form
return render(request, 'rango/add_page.html', context_dict)
def register(request):
if request.session.set_test_cookie_worked():
print "SET cookie worked"
request.session.delete_test_cookie()
registered = False
# If it's a HTTP POST, we're interested in processing form data.
if request.method == 'POST':
# Attempt to grab information from the raw form information.
# Note that we make use of both UserForm and UserProfileForm.
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
# If the two forms are valid...
if user_form.is_valid() and profile_form.is_valid():
# Save the user's form data to the database.
user = user_form.save()
# Now we hash the password with the set_password method.
# Once hashed, we can update the user object.
user.set_password(user.password)
user.save()
# Now sort out the UserProfile instance.
# Since we need to set the user attribute ourselves, we set commit=False.
# This delays saving the model until we're ready to avoid integrity problems.
profile = profile_form.save(commit=False)
profile.user = user
# Did the user provide a profile picture?
# If so, we need to get it from the input form and put it in the UserProfile model.
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
# Now we save the UserProfile model instance.
profile.save()
# Update our variable to tell the template registration was successful.
registered = True
# Invalid form or forms - mistakes or something else?
# Print problems to the terminal.
# They'll also be shown to the user.
else:
print user_form.errors, profile_form.errors
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
else:
user_form = UserForm()
profile_form = UserProfileForm()
# Render the template depending on the context.
return render(request,
'rango/register.html',
{'user_form': user_form, 'profile_form': profile_form, 'registered': registered} )
def listUsers(request):
users = UserProfile.objects.all()
return render(request, 'rango/listUsers.html',{'users':users})
def userDetail(request):
user_name = request.GET.getlist('username') #to get username parameter value from http url.
for i in user_name:
user_name = i
user_detail = UserProfile.objects.get(user__username__iexact=user_name)
print 'amar',user_name
return render(request, 'rango/userDetail.html',{'detail':user_detail}) |
import pytest
import dataneeds as need
import graph
def test_entities():
n = graph.Node()
e = graph.Edge()
with pytest.raises(AttributeError):
n.foobar
with pytest.raises(AttributeError):
n.edges.baz
assert isinstance(n.id.typ, need.Integer)
assert isinstance(n.label.typ, need.String)
assert isinstance(n.edges.towards, graph.Edge) # XXX Relation
assert isinstance(e.id.typ, need.Integer)
assert isinstance(e.weight.typ, need.Floating)
assert isinstance(e.source.towards, graph.Node) # XXX Relation
assert isinstance(e.target.towards, graph.Node) # XXX Relation
assert "graph" in repr(n.id)
assert "Integer" in repr(n.id)
assert "Node" in repr(n.id)
assert "id" in repr(n.id)
assert "graph.Node" in repr(n.edges.id)
assert "graph.Edge" in repr(n.edges.id)
assert "id" in repr(n.edges.id)
def test_binds():
assert graph.Node().id.bindings == graph.Node.id.bindings
assert graph.Edge.id.bindings != graph.Node.id.bindings
assert graph.Node.label.bindings != graph.Node.id.bindings
assert graph.Node.edges.id.bindings != graph.Node.id.bindings
assert len(graph.Node.id.bindings) == 2
assert len(graph.Node.edges.id.bindings) == 2
assert len(graph.Edge.weight.bindings) == 2
a, b, *_ = graph.Node.id.bindings
assert "Node" in str(a)
assert "Node" in str(b)
assert "id" in str(a)
assert "id" in str(b)
assert a != b
assert isinstance(a.input, need.Part)
assert isinstance(a.input.input, need.Cons)
assert a.input.input == graph.Node.label.bindings[0].input.input
e = graph.Node.edges.id.bindings[0]
assert isinstance(e.input, need.Part[need.Each])
assert isinstance(e.input.input.input, need.Sep)
assert a.input.input == e.input.input.input.input.input
assert isinstance(a.input.input.input, need.Sep)
c = graph.Edge.source.id.bindings[1]
assert c.input == b.input
from dataneeds.entity import Relation, Reference
assert isinstance(graph.Node.edges.target, Relation)
assert isinstance(graph.Node.edges.target.label, Reference)
def test_request():
with need.request(graph.Node()) as N:
N.id, N.label, N.edges.id
assert len(N.items) == 3
with need.request(graph.Edge()) as E:
E.id, E.weight, E.source.label, E.target.label
assert len(E.items) == 4
with need.request(graph.Node()) as N:
N.label, sum(N.edges.weight)
assert len(N.items) == 2
def test_resolve():
with need.request(graph.Node()) as N:
N.id, N.label
rs = N.resolve_primary()
assert(len(rs) == 2)
(s0, bs0), (s1, bs1) = rs.items()
assert isinstance(s0, need.Here)
assert isinstance(s1, need.Here)
assert s0 != s1
assert all(b.source == s0 for b in bs0)
assert all(b.source == s1 for b in bs1)
assert N.resolve_joins() == {}
with need.request(graph.Edge()) as E:
E.id, E.weight, E.source.id, E.target.id
rs = E.resolve_primary()
assert all(len(r) == 4 for r in rs.values())
assert len(rs) == 2
assert E.resolve_joins() == {}
def test_resolve_join():
with need.request(graph.Edge()) as E:
E.source.label, E.target.label, E.weight
ps = E.resolve_primary()
assert(len(ps) == 2)
# p1, p2, p3 = ps.values()
p1, p2 = ps.values()
assert p1[0].binds.general == graph.Edge.source.id
assert p1[1].binds.general == graph.Edge.target.id
assert p1[2].binds.general == graph.Edge.weight
joins = E.resolve_joins()
assert len(joins) == 4
assert all(len(js) == 2 for js in joins.values())
assert all(len(js) == 2 for js in joins.values())
js0, js1 = joins[p1[0]][1].values()
assert js0[0].binds.general == graph.Node.id
assert js0[1].binds.general == graph.Node.label
rs = E.resolve_combined()
# assert len(rs) == 12
assert len(rs) == 8
lookup = {(s.name, ja.name, jb.name): rr
for (s, ja, jb), rr in rs.items()}
assert set(lookup.keys()) == {('elf', 'nlf', 'nlf'),
('elf', 'nlf', 'nef'),
('elf', 'nef', 'nlf'),
('elf', 'nef', 'nef'),
('nef', 'nlf', 'nlf'),
('nef', 'nlf', 'nef'),
('nef', 'nef', 'nlf'),
('nef', 'nef', 'nef'), }
# ('enf', 'nef', 'nef'),
# ('enf', 'nlf', 'nlf'),
# ('enf', 'nef', 'nlf'),
# ('enf', 'nlf', 'nef'), }
r, js = lookup['elf', 'nlf', 'nlf']
assert len(js) == 2
def test_resolve_join_same():
with need.request(graph.Edge()) as E:
E.id, E.source.id, E.source.label, E.weight
rs = E.resolve_combined()
# assert len(rs) == 6
assert len(rs) == 4
|
# -*- coding: utf-8 -*-
text = """
a = 4
b = "hello"
for i in range(a):
print(b*i)
"""
exec(text) |
#!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
if __name__ == '__main__':
# This pre-commit hook was replaced by a pre-submit hook (see PRESUBMIT.py).
sys.exit(0)
f = __file__
if os.path.islink(f):
f = os.path.abspath(os.path.join(os.path.dirname(f), os.readlink(f)))
top_dir = os.path.abspath(os.path.join(os.path.dirname(f), '..'))
sys.path.append(top_dir)
from hooks import pre_commit
sys.exit(pre_commit.Main(sys.argv[1:]))
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^index/', views.index),
url(r'^news_retrieval', views.search)
url(r'^notfound/', views.notfound, name='notfound'),
] |
import os
import sys
import shutil
import uuid
import click
from distutils.dir_util import copy_tree
class SkeletonException(Exception):
''' Skeleton exception '''
class SkepyCancelled(SkeletonException):
''' skepy has been cancelled '''
class SkepyTmpdirExist(SkeletonException):
''' tmpdir exist. '''
class Project:
def __init__(self, project_name: str = None):
self._project_name = project_name
if project_name is None:
self._project_path = os.getcwd()
else:
self._project_path = os.path.join(os.getcwd(), project_name)
self._module_path = os.path.dirname(sys.modules[__name__].__file__)
uid = uuid.uuid4()
dir_path = os.path.dirname(sys.modules[__name__].__file__)
self._tmpdir_path = os.path.join(dir_path, f'skepy_tmpdir_{uid}')
def create_skeleton(self) -> int:
try:
if os.path.exists(self._tmpdir_path):
raise SkepyTmpdirExist
self._copy_template_to_tmpdir()
self._apply_pkg_name_to_src_dir()
self._apply_pkg_name_to_templates()
self._copy_templates()
except SkepyCancelled:
click.echo('Cancelled', err=True)
return 1
except SkepyTmpdirExist:
click.echo(f'{self._tmpdir_path} already exist.', err=True)
return 2
finally:
if os.path.exists(self._tmpdir_path):
shutil.rmtree(self._tmpdir_path)
return 0
def _copy_template_to_tmpdir(self):
template_path = os.path.join(self._module_path, 'template')
copy_tree(template_path, self._tmpdir_path)
def _copy_templates(self):
if os.path.exists(self._project_path):
answer = input('Do you want to create your project here? (Y/n):')
if answer != 'Y':
raise SkepyCancelled
copy_tree(self._tmpdir_path, self._project_path)
else:
shutil.copytree(self._tmpdir_path, self._project_path)
def _apply_pkg_name_to_src_dir(self):
proj_path = os.path.join(self._tmpdir_path, 'src', self._project_name)
if os.path.exists(proj_path):
shutil.rmtree(proj_path)
pkg_path = os.path.join(self._tmpdir_path, 'src', 'pkg_name')
os.rename(pkg_path, proj_path)
def _apply_env_to_file(self, path: str):
with open(path, 'r') as f:
expandedvars_setup_py = os.path.expandvars(f.read())
with open(path, 'w') as f:
f.write(expandedvars_setup_py)
def _apply_pkg_name_to_templates(self):
os.environ['PKG_NAME'] = self._project_name
target_files = [
os.path.join(self._tmpdir_path, 'setup.py'),
os.path.join(self._tmpdir_path, 'src', self._project_name, 'cli.py')
]
for target_file in target_files:
self._apply_env_to_file(target_file)
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Post
from django.views.generic import ListView, DetailView, CreateView
# posts = [
# {
# 'author': 'A. P. J. Abdul Kalam',
# 'title': 'Past',
# 'content': 'Accept your past without regret ! \n Handle your present with confidence \n and Face you future without fear',
# 'date_posted': "January 26th 2003"
# },
# {
# 'author': 'Napolean Bonaparte',
# 'title': 'Victory',
# 'content': 'Victory is not always winning the battle \n but rising everytime you fall',
# 'date_posted': 'March 11th 1706'
# },
#
# ]
def home(request):
context = {
'posts': Post.objects.all()
}
return render(request, 'blog/home.html', context)
class PostListView(ListView):
model = Post
template_name = 'blog/home.html'
context_object_name = 'posts'
ordering = ['-date_posted']
class PostDetailView(DetailView):
model = Post
class PostCreateView(CreateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def about(request):
return render(request, 'blog/about.html', {'title': 'About'})
# Create your views here.
|
import os
from collections import defaultdict
from simhash import simhash
import json
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import *
from bs4 import BeautifulSoup
import math
inverted_index = defaultdict(list)
class posting:
def __init__(self,docID,occurence,importance):
self.docID = docID
self.occurence = occurence
self.importance = importance
def fetch(file, num_file):
f = open(file)
json_file = json.load(f)
url = json_file["url"]
soup = BeautifulSoup(json_file["content"].encode(json_file["encoding"], "strict"),features = "html.parser")
important_words = []
tokenizer = RegexpTokenizer(r'[a-zA-Z0-9]+')
for word in soup.find_all('h1'):
important_words+=tokenizer.tokenize(word.get_text().lower())
for word in soup.find_all('h2'):
important_words+=tokenizer.tokenize(word.get_text().lower())
for word in soup.find_all('h3'):
important_words+=tokenizer.tokenize(word.get_text().lower())
for word in soup.find_all('b'):
important_words+=tokenizer.tokenize(word.get_text().lower())
stemmer = PorterStemmer()
important_words = set(stemmer.stem(word) for word in important_words)
text = soup.get_text().lower()
tokens = [stemmer.stem(word) for word in tokenizer.tokenize(text)]
frequency_dict = {}
for word in tokens:
if word in frequency_dict:
frequency_dict[word]+=1
else:
frequency_dict[word] = 1
return_dict = {}
for word,freq in frequency_dict.items():
return_dict[word] = posting(num_file,freq,1 if word in important_words else 0)
return return_dict, url, simhash(frequency_dict)
def off_load(file_name):
global inverted_index
print("off_load")
f = open(file_name,'w')
for token,postings in sorted(inverted_index.items(),key = lambda x: x[0]):
f.write(token+':')
for posting in postings:
f.write(str(posting.docID)+" "+str(posting.occurence)+" "+str(posting.importance)+"\t")
f.write("\n")
f.close()
inverted_index = defaultdict(list)
if __name__ == "__main__":
num_file = 0
doc_info = {}
f_table = open("document_info.txt",'a+')
for root1, dirs1, files1 in os.walk("DEV"):
for dir in dirs1:
for root2, dirs2, files2 in os.walk(root1+os.path.sep+dir):
for file in files2:
num_file+=1
file_dict, url, sim = fetch(root1+os.path.sep+dir+os.path.sep+file, num_file)
doc_info[num_file] = (url, sim)
for k,v in file_dict.items():
inverted_index[k].append(v)
if num_file%20000==0:
off_load("partial_index"+os.path.sep+f"inverted_index{num_file//20000}.txt")
off_load("partial_index"+os.path.sep+f"inverted_index{num_file//20000+1}.txt")
with open("document_info.txt",'w') as outfile:
json.dump(doc_info,outfile)
# for num, (url, sim) in sorted(doc_info.items(), key = lambda x:x[0]):
# f_table.write(str(num)+"\t"+url+"\t"+str(sim)+'\n')
print(num_file)
|
import os
from configparser import ConfigParser
from enum import Enum
NAME = 'gitflow'
AUTHOR = 'samuel.oggier@gmail.com'
with open(os.path.abspath(os.path.join(os.path.dirname(__file__), 'config.ini')), 'r') as __config_file:
__config = ConfigParser()
__config.read_file(f=__config_file)
VERSION = __config.get(section=__config.default_section, option='version', fallback='0.0.0-dev')
class VersioningScheme(Enum):
# SemVer tags
SEMVER = 1,
# SemVer tags, sequence number tags
SEMVER_WITH_SEQ = 2,
VERSIONING_SCHEMES = {
'semver': VersioningScheme.SEMVER,
'semverWithSeq': VersioningScheme.SEMVER_WITH_SEQ,
}
# config keys
CONFIG_VERSIONING_SCHEME = 'versioningScheme'
CONFIG_VERSION_TYPES = 'releaseTypes'
CONFIG_PROJECT_PROPERTY_FILE = 'propertyFile'
CONFIG_VERSION_PROPERTY = 'versionProperty'
CONFIG_SEQUENCE_NUMBER_PROPERTY = 'sequenceNumberProperty'
CONFIG_BUILD = 'build'
CONFIG_ON_VERSION_CHANGE = 'onVersionChange'
CONFIG_RELEASE_BRANCH_BASE = 'releaseBranchBase'
CONFIG_RELEASE_BRANCH_PREFIX = 'releaseBranchPrefix'
CONFIG_RELEASE_BRANCH_PATTERN = 'releaseBranchPattern'
CONFIG_WORK_BRANCH_PATTERN = 'workBranchPattern'
CONFIG_VERSION_TAG_PREFIX = 'versionTagPrefix'
CONFIG_VERSION_TAG_PATTERN = 'versionTagPattern'
CONFIG_DISCONTINUATION_TAG_PREFIX = 'discontinuationTagPrefix'
CONFIG_DISCONTINUATION_TAG_PATTERN = 'discontinuationTagPattern'
CONFIG_INITIAL_VERSION = 'initialVersion'
# config defaults
DEFAULT_CONFIG_FILE_EXTENSIONS = ['yml', 'json']
DEFAULT_CONFIGURATION_FILE_NAMES = ['.gitflow.' + ext for ext in DEFAULT_CONFIG_FILE_EXTENSIONS]
DEFAULT_CONFIG_FILE = DEFAULT_CONFIGURATION_FILE_NAMES[1]
DEFAULT_RELEASE_BRANCH_BASE = "master"
DEFAULT_VERSIONING_SCHEME = 'semver'
DEFAULT_RELEASE_BRANCH_PREFIX = 'release/'
DEFAULT_RELEASE_BRANCH_PATTERN = r'(?P<major>\d+)\.(?P<minor>\d+)'
DEFAULT_WORK_BRANCH_PATTERN = r'(?P<type>feature|fix|chore|issue)/(?P<name>[^/]+)'
DEFAULT_VERSION_VAR_NAME = 'version'
DEFAULT_VERSION_TAG_PREFIX = None
DEFAULT_SEMVER_VERSION_TAG_PATTERN = r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)' \
r'(-(?P<prerelease_type>[a-zA-Z][a-zA-Z0-9]*)' \
r'(\.(?P<prerelease_version>\d+))?)?'
DEFAULT_SEMVER_WITH_SEQ_VERSION_TAG_PATTERN = r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)' \
r'-((?P<prerelease_type>(0|[1-9][0-9]*))?' \
r'([.-](?P<prerelease_version>\d+))?)?'
DEFAULT_DISCONTINUATION_TAG_PREFIX = 'discontinued/'
DEFAULT_DISCONTINUATION_TAG_PATTERN = r'(?P<major>\d+)\.(?P<minor>\d+)(?:\.(?P<patch>\d+)' \
r'(-(?P<prerelease_type>[a-zA-Z][a-zA-Z0-9]*)' \
r'(\.(?P<prerelease_version>\d+))?)?)?'
DEFAULT_PROPERTY_ENCODING = 'UTF-8'
TEXT_VERSION_STRING_FORMAT = "<major:uint>.<minor:uint>.<patch:uint>" \
"[-<prerelease_type:(a-zA-Z)(a-zA-Z0-9)*>.<prerelease_version:uint>]" \
"[+<build_info:(a-zA-Z0-9)+>]"
DEFAULT_PRE_RELEASE_QUALIFIERS = "alpha,beta"
DEFAULT_INITIAL_VERSION = '1.0.0-alpha.1'
DEFAULT_INITIAL_SEQ_VERSION = '1.0.0-1'
DEFAULT_CONFIG = {
CONFIG_PROJECT_PROPERTY_FILE: 'project.properties',
CONFIG_RELEASE_BRANCH_BASE: 'master'
}
# prefixes with a trailing slash for proper prefix matching
LOCAL_BRANCH_PREFIX = 'refs/heads/'
LOCAL_TAG_PREFIX = 'refs/tags/'
REMOTES_PREFIX = 'refs/remotes/'
BRANCH_PATTERN = '(?P<parent>refs/heads/|refs/remotes/(?P<remote>[^/]+)/)(?P<name>.+)'
LOCAL_AND_REMOTE_BRANCH_PREFIXES = [LOCAL_BRANCH_PREFIX, REMOTES_PREFIX]
BRANCH_PREFIX_DEV = 'dev'
BRANCH_PREFIX_PROD = 'prod'
BUILD_STAGE_TYPE_ASSEMBLE = 'assemble'
BUILD_STAGE_TYPE_TEST = 'test'
BUILD_STAGE_TYPE_INTEGRATION_TEST = 'integration_test'
BUILD_STAGE_TYPES = [
BUILD_STAGE_TYPE_ASSEMBLE,
BUILD_STAGE_TYPE_TEST,
BUILD_STAGE_TYPE_INTEGRATION_TEST
]
def __setattr__(self, name, value):
if hasattr(self, name):
raise AttributeError('Can\'t reassign const attribute "' + name + '"')
else:
super(self.__class__, self).__setattr__(name, value)
class BranchClass(Enum):
DEVELOPMENT_BASE = 1,
RELEASE = 2,
WORK_DEV = 3,
WORK_PROD = 4,
BRANCH_CLASS_BY_SUPERTYPE = {
BRANCH_PREFIX_PROD: BranchClass.WORK_PROD,
BRANCH_PREFIX_DEV: BranchClass.WORK_DEV,
}
BRANCHING = {
BranchClass.WORK_DEV: BranchClass.DEVELOPMENT_BASE,
BranchClass.WORK_PROD: BranchClass.RELEASE,
BranchClass.RELEASE: BranchClass.DEVELOPMENT_BASE,
}
# TODO Accounts for two actual arguments. Adjust when docopt option counting is fixed.
ERROR_VERBOSITY = 0
INFO_VERBOSITY = 1
DEBUG_VERBOSITY = 2
TRACE_VERBOSITY = 3
OS_IS_POSIX = os.name == 'posix'
EX_ABORTED = 2
EX_ABORTED_BY_USER = 3
# ['--first-parent'] to ignore merged tags
BRANCH_COMMIT_SCAN_OPTIONS = []
|
#!/usr/bin/env python
from __future__ import print_function
import chainer
import chainer.functions as F
import chainer.links as L
# U-net https://arxiv.org/pdf/1611.07004v1.pdf
# convolution-batchnormalization-(dropout)-relu
class ConvBNR(chainer.Chain):
def __init__(self, ch0, ch1, use_bn=True,
sample='down', activation=F.relu, dropout=False):
self.use_bn = use_bn
self.activation = activation
self.dropout = dropout
w = chainer.initializers.Normal(0.02)
super(ConvBNR, self).__init__()
with self.init_scope():
if sample == 'down':
self.c = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
else:
self.c = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
if use_bn:
self.bn = L.BatchNormalization(ch1)
def forward(self, x):
h = self.c(x)
if self.use_bn:
h = self.bn(h)
if self.dropout:
h = F.dropout(h)
if self.activation is not None:
h = self.activation(h)
return h
class Encoder(chainer.Chain):
def __init__(self, in_ch):
w = chainer.initializers.Normal(0.02)
super(Encoder, self).__init__()
with self.init_scope():
self.c0 = L.Convolution2D(in_ch, 64, 3, 1, 1, initialW=w)
self.c1 = ConvBNR(64, 128, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c2 = ConvBNR(128, 256, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c3 = ConvBNR(256, 512, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c4 = ConvBNR(512, 512, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c5 = ConvBNR(512, 512, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c6 = ConvBNR(512, 512, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c7 = ConvBNR(512, 512, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
def forward(self, x):
hs = [F.leaky_relu(self.c0(x))]
for i in range(1, 8):
hs.append(self['c%d' % i](hs[i-1]))
return hs
class Decoder(chainer.Chain):
def __init__(self, out_ch):
w = chainer.initializers.Normal(0.02)
super(Decoder, self).__init__()
with self.init_scope():
self.c0 = ConvBNR(512, 512, use_bn=True, sample='up',
activation=F.relu, dropout=True)
self.c1 = ConvBNR(1024, 512, use_bn=True,
sample='up', activation=F.relu, dropout=True)
self.c2 = ConvBNR(1024, 512, use_bn=True,
sample='up', activation=F.relu, dropout=True)
self.c3 = ConvBNR(1024, 512, use_bn=True,
sample='up', activation=F.relu, dropout=False)
self.c4 = ConvBNR(1024, 256, use_bn=True,
sample='up', activation=F.relu, dropout=False)
self.c5 = ConvBNR(512, 128, use_bn=True, sample='up',
activation=F.relu, dropout=False)
self.c6 = ConvBNR(256, 64, use_bn=True, sample='up',
activation=F.relu, dropout=False)
self.c7 = L.Convolution2D(128, out_ch, 3, 1, 1, initialW=w)
def forward(self, hs):
h = self.c0(hs[-1])
for i in range(1, 8):
h = F.concat([h, hs[-i-1]])
if i < 7:
h = self['c%d' % i](h)
else:
h = self.c7(h)
return h
class Discriminator(chainer.Chain):
def __init__(self, in_ch, out_ch):
w = chainer.initializers.Normal(0.02)
super(Discriminator, self).__init__()
with self.init_scope():
self.c0_0 = ConvBNR(in_ch, 32, use_bn=False, sample='down',
activation=F.leaky_relu, dropout=False)
self.c0_1 = ConvBNR(out_ch, 32, use_bn=False, sample='down',
activation=F.leaky_relu, dropout=False)
self.c1 = ConvBNR(64, 128, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c2 = ConvBNR(128, 256, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c3 = ConvBNR(256, 512, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c4 = L.Convolution2D(512, 1, 3, 1, 1, initialW=w)
def forward(self, x_0, x_1):
h = F.concat([self.c0_0(x_0), self.c0_1(x_1)])
h = self.c1(h)
h = self.c2(h)
h = self.c3(h)
h = self.c4(h)
return h
|
import numpy as np
import scipy as sp
import networkx as nx
from Kmeans import kmeans
from scipy import linalg
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def normalized_spectral_clustering_shi(data, k): # data is a list of points in R_2
# 1. construct similarity
# 2. construct laplacian
# 3. compute first k eigenvectors
# 4. make U matrix with eigenvectors as columns
# 5. initialize y_i's as rows of the matrix
# 6. apply k means
# 7. output cluster
laplacian, degreeinv = laplacian_matrix(data)
dinvl = degreeinv @ laplacian
u_first_k_evectors = sp.linalg.eigh(dinvl, eigvals=(0, k-1))[1]
#make_plot(u_first_k_evectors)
# for i in range(len(data)):
# #convert arrays to points
# u_first_k_evectors[i]
U = u_first_k_evectors.T
idontneedthis, ineedthis = U.shape
for i in range(k):
please = U[i].T
pleasework = please.reshape((1,ineedthis))
clusters, assns = kmeans(pleasework, k)
print(assns)
make_plot(data,assns,k)
plt.show()
def gen_random_points(number, length):
l = []
for i in range(number):
l.append(10.0*np.random.rand(length))
#print(l)
return l
def laplacian_matrix(data):
similar = similarity_matrix(data)
degree = np.zeros((len(data), len(data)))
degreeinv = np.zeros((len(data), len(data)))
for i in range(len(data)):
degree[i][i] = sum(similar[i])
degreeinv[i][i] = 1/sum(similar[i])
laplacian = degree - similar
return laplacian, degreeinv
def similarity_matrix(data):
similarity_matrix = np.zeros((len(data), len(data)))
for i in range(len(data)):
for j in range(len(data)):
similarity_matrix[i][j] = np.exp(-np.linalg.norm(data[i] - data[j]))
return similarity_matrix
def make_plot(data,assignment,k):
for i in range(k):
d = data[assignment == i].T
x = d[0]
y = d[1]
plt.scatter(x,y)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# for i in range(k):
# d = data[assignment == i].T
# x = d[0]
# y = d[1]
# z = d[2]
# ax.scatter(x,y,z)
data = np.random.normal(size=(100, 2))
normalized_spectral_clustering_shi(data, 2)
|
import pytest
from FeedUnit42v2 import Client, fetch_indicators, get_indicators_command, handle_multiple_dates_in_one_field, \
get_indicator_publication, get_attack_id_and_value_from_name, parse_indicators, parse_campaigns, \
parse_reports_and_report_relationships, create_attack_pattern_indicator, create_course_of_action_indicators, \
get_ioc_type, get_ioc_value, create_list_relationships, extract_ioc_value, \
change_attack_pattern_to_stix_attack_pattern, DemistoException
from test_data.feed_data import INDICATORS_DATA, ATTACK_PATTERN_DATA, MALWARE_DATA, RELATIONSHIP_DATA, REPORTS_DATA, \
REPORTS_INDICATORS, ID_TO_OBJECT, INDICATORS_RESULT, CAMPAIGN_RESPONSE, CAMPAIGN_INDICATOR, COURSE_OF_ACTION_DATA, \
PUBLICATIONS, ATTACK_PATTERN_INDICATOR, COURSE_OF_ACTION_INDICATORS, RELATIONSHIP_OBJECTS, INTRUSION_SET_DATA, \
DUMMY_INDICATOR_WITH_RELATIONSHIP_LIST, STIX_ATTACK_PATTERN_INDICATOR, SUB_TECHNIQUE_INDICATOR, \
SUB_TECHNIQUE_DATA, INVALID_ATTACK_PATTERN_STRUCTURE
@pytest.mark.parametrize('command, args, response, length', [
(get_indicators_command, {'limit': 2, 'indicators_type': 'indicator'}, INDICATORS_DATA, 2),
(get_indicators_command, {'limit': 5, 'indicators_type': 'indicator'}, INDICATORS_DATA, 5)
]) # noqa: E124
def test_commands(command, args, response, length, mocker):
"""
Given
- get_indicators_command func
- command args
- command raw response
When
- mock the Client's get_stix_objects.
Then
- convert the result to human readable table
- create the context
validate the raw_response
"""
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', return_value=response)
command_results = command(client, args)
indicators = command_results.raw_response
assert len(indicators) == length
TYPE_TO_RESPONSE = {
'indicator': INDICATORS_DATA,
'report': REPORTS_DATA,
'attack-pattern': ATTACK_PATTERN_DATA,
'malware': MALWARE_DATA,
'campaign': CAMPAIGN_RESPONSE,
'relationship': RELATIONSHIP_DATA,
'course-of-action': COURSE_OF_ACTION_DATA,
'intrusion-set': INTRUSION_SET_DATA
}
TYPE_TO_RESPONSE_WIITH_INVALID_ATTACK_PATTERN_DATA = {
'indicator': INDICATORS_DATA,
'report': REPORTS_DATA,
'attack-pattern': INVALID_ATTACK_PATTERN_STRUCTURE,
'malware': MALWARE_DATA,
'campaign': CAMPAIGN_RESPONSE,
'relationship': RELATIONSHIP_DATA,
'course-of-action': COURSE_OF_ACTION_DATA,
'intrusion-set': INTRUSION_SET_DATA
}
def test_fetch_indicators_command(mocker):
"""
Given
- fetch incidents command
- command args
- command raw response
When
- mock the Client's get_stix_objects.
Then
- run the fetch incidents command using the Client
Validate the amount of indicators fetched
Validate that the dummy indicator with the relationships list fetched
"""
def mock_get_stix_objects(test, **kwargs):
type_ = kwargs.get('type')
client.objects_data[type_] = TYPE_TO_RESPONSE[type_]
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', side_effect=mock_get_stix_objects)
indicators = fetch_indicators(client, create_relationships=True)
assert len(indicators) == 17
assert DUMMY_INDICATOR_WITH_RELATIONSHIP_LIST in indicators
def test_fetch_indicators_fails_on_invalid_attack_pattern_structure(mocker):
"""
Given
- Invalid attack pattern indicator structure
When
- fetching indicators
Then
- DemistoException is raised.
"""
def mock_get_stix_objects(test, **kwargs):
type_ = kwargs.get('type')
client.objects_data[type_] = TYPE_TO_RESPONSE_WIITH_INVALID_ATTACK_PATTERN_DATA[type_]
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', side_effect=mock_get_stix_objects)
with pytest.raises(DemistoException, match=r"Failed parsing attack indicator"):
fetch_indicators(client, create_relationships=True)
def test_get_attack_id_and_value_from_name_on_invalid_indicator():
"""
Given
- Invalid attack indicator structure
When
- parsing the indicator name.
Then
- DemistoException is raised.
"""
with pytest.raises(DemistoException, match=r"Failed parsing attack indicator"):
get_attack_id_and_value_from_name({"name": "test"})
def test_feed_tags_param(mocker):
"""
Given
- fetch incidents command
- command args
- command raw response
When
- mock the feed tags param.
- mock the Client's get_stix_objects.
Then
- run the fetch incidents command using the Client
Validate The value of the tags field.
"""
def mock_get_stix_objects(test, **kwargs):
type_ = kwargs.get('type')
client.objects_data[type_] = TYPE_TO_RESPONSE[type_]
client = Client(api_key='1234', verify=False)
mocker.patch.object(client, 'fetch_stix_objects_from_api', side_effect=mock_get_stix_objects)
indicators = fetch_indicators(client, ['test_tag'])
assert set(indicators[0].get('fields').get('tags')) == {'malicious-activity', 'test_tag'}
@pytest.mark.parametrize('field_name, field_value, expected_result', [
('created', '2017-05-31T21:31:43.540Z', '2017-05-31T21:31:43.540Z'),
('created', '2019-04-25T20:53:07.719Z\n2019-04-25T20:53:07.814Z', '2019-04-25T20:53:07.719Z'),
('modified', '2017-05-31T21:31:43.540Z', '2017-05-31T21:31:43.540Z'),
('modified', '2020-03-16T15:38:37.650Z\n2020-01-17T16:45:24.252Z', '2020-03-16T15:38:37.650Z'),
])
def test_handle_multiple_dates_in_one_field(field_name, field_value, expected_result):
"""
Given
- created / modified indicator field
When
- this field contains two dates
Then
- run the handle_multiple_dates_in_one_field
Validate The field contain one specific date.
"""
assert handle_multiple_dates_in_one_field(field_name, field_value) == expected_result
def test_get_indicator_publication():
"""
Given
- Indicator with external_reference field
When
- we extract this field to publications grid field
Then
- run the get_indicator_publication
Validate The grid field extracted successfully.
"""
assert get_indicator_publication(ATTACK_PATTERN_DATA[0]) == PUBLICATIONS
@pytest.mark.parametrize('indicator_name, expected_result', [
({"name": "T1564.004: NTFS File Attributes",
"x_mitre_is_subtechnique": True,
"x_panw_parent_technique_subtechnique": "Hide Artifacts: NTFS File Attributes"},
("T1564.004", "Hide Artifacts: NTFS File Attributes")),
({"name": "T1078: Valid Accounts"}, ("T1078", "Valid Accounts"))
])
def test_get_attack_id_and_value_from_name(indicator_name, expected_result):
"""
Given
- Indicator with name field
When
- we extract this field to ID and value fields
Then
- run the get_attack_id_and_value_from_name
Validate The ID and value fields extracted successfully.
"""
assert get_attack_id_and_value_from_name(indicator_name) == expected_result
def test_parse_indicators():
"""
Given
- list of IOCs in STIX format.
When
- we extract this IOCs list to Demisto format
Then
- run the parse_indicators
- Validate The IOCs list extracted successfully.
"""
assert parse_indicators(INDICATORS_DATA, [], '')[0] == INDICATORS_RESULT
def test_parse_reports():
"""
Given
- list of reports in STIX format.
When
- we extract this reports list to Demisto format
Then
- run the parse_reports
Validate The reports list extracted successfully.
"""
assert parse_reports_and_report_relationships(REPORTS_DATA, [], '') == REPORTS_INDICATORS
def test_parse_campaigns():
"""
Given
- list of campaigns in STIX format.
When
- we extract this campaigns list to Demisto format
Then
- run the parse_campaigns
Validate The campaigns list extracted successfully.
"""
assert parse_campaigns(CAMPAIGN_RESPONSE, [], '') == CAMPAIGN_INDICATOR
def test_create_attack_pattern_indicator():
"""
Given
- list of IOCs in STIX format.
When
- we extract this attack pattern list to Demisto format
Then
- run the attack_pattern_indicator
Validate The attack pattern list extracted successfully.
"""
assert create_attack_pattern_indicator(ATTACK_PATTERN_DATA, [], '', True) == ATTACK_PATTERN_INDICATOR
assert create_attack_pattern_indicator(ATTACK_PATTERN_DATA, [], '', False) == STIX_ATTACK_PATTERN_INDICATOR
assert create_attack_pattern_indicator(SUB_TECHNIQUE_DATA, [], '', True) == SUB_TECHNIQUE_INDICATOR
def test_create_course_of_action_indicators():
"""
Given
- list of course of action in STIX format.
When
- we extract this course of action list to Demisto format
Then
- run the create_course_of_action_indicators
Validate The course of action list extracted successfully.
"""
assert create_course_of_action_indicators(COURSE_OF_ACTION_DATA, [], '') == COURSE_OF_ACTION_INDICATORS
def test_get_ioc_type():
"""
Given
- IOC ID to get its type.
When
- we extract its type from the pattern field
Then
- run the get_ioc_type
Validate The IOC type extracted successfully.
"""
assert get_ioc_type('indicator--01a5a209-b94c-450b-b7f9-946497d91055', ID_TO_OBJECT) == 'IP'
assert get_ioc_type('indicator--fd0da09e-a0b2-4018-9476-1a7edd809b59', ID_TO_OBJECT) == 'URL'
def test_get_ioc_value():
"""
Given
- IOC ID to get its value.
When
- we extract its value from the name field
Then
- run the get_ioc_value
Validate The IOC value extracted successfully.
"""
assert get_ioc_value('indicator--01a5a209-b94c-450b-b7f9-946497d91055', ID_TO_OBJECT) == 'T111: Software Discovery'
assert get_ioc_value('indicator--fd0da09e-a0b2-4018-9476-1a7edd809b59', ID_TO_OBJECT) == 'Deploy XSOAR Playbook'
assert get_ioc_value('report--0f86dccd-29bd-46c6-83fd-e79ba040bf0', ID_TO_OBJECT) == '[Unit42 ATOM] Maze Ransomware'
assert get_ioc_value('attack-pattern--4bed873f-0b7d-41d4-b93a-b6905d1f90b0',
ID_TO_OBJECT) == "Virtualization/Sandbox Evasion: Time Based Evasion"
def test_create_list_relationships():
"""
Given
- list of relationships in STIX format.
When
- we extract this relationships list to Demisto format
Then
- run the create_list_relationships
Validate The relationships list extracted successfully.
"""
assert create_list_relationships(RELATIONSHIP_DATA, ID_TO_OBJECT) == RELATIONSHIP_OBJECTS
def test_get_ioc_value_from_ioc_name():
"""
Given
- IOC obj to get its value.
When
- we extract its value from the name field
Then
- run the get_ioc_value
Validate The IOC value extracted successfully.
"""
name = "([file:name = 'blabla' OR file:name = 'blabla'] AND [file:hashes.'SHA-256' = '4f75622c2dd839f'])"
assert extract_ioc_value(name) == "4f75622c2dd839f"
def test_change_attack_pattern_to_stix_attack_pattern():
assert change_attack_pattern_to_stix_attack_pattern({"type": "ind", "fields":
{"killchainphases": "kill chain", "description": "des"}}) == \
{"type": "STIX ind", "fields": {"stixkillchainphases": "kill chain", "stixdescription": "des"}}
|
"""
While / Else
Contadores
Acumuladores
"""
contador = 1
acumulador = 0
while contador <= 10:
print(contador, acumulador)
acumulador += contador
contador += 1
print(acumulador)
|
import numpy as np
import matplotlib.pyplot as plt
import time
import os
from scipy import stats
import pickle
import json
from numpy.random import RandomState
import argparse
import multiprocessing as mp
np.set_printoptions(suppress=True)
np.set_printoptions(precision=3)
np.random.seed(0)
parser = argparse.ArgumentParser(description='Random Games of Skill form DPP')
parser.add_argument('--dim', type=int, default=1000)
parser.add_argument('--nb_iters', type=int, default=200)
args = parser.parse_args()
LR = 0.5
TH = 0.03
expected_card = []
sizes = []
time_string = time.strftime("%Y%m%d-%H%M%S")
PATH_RESULTS = os.path.join('results', time_string + '_' + str(args.dim))
if not os.path.exists(PATH_RESULTS):
os.makedirs(PATH_RESULTS)
# Search over the pure strategies to find the BR to a strategy
def get_br_to_strat(strat, payoffs=None, verbose=False):
row_weighted_payouts = strat @ payoffs
br = np.zeros_like(row_weighted_payouts)
br[np.argmin(row_weighted_payouts)] = 1
if verbose:
print(row_weighted_payouts[np.argmin(row_weighted_payouts)], "exploitability")
return br
# Fictituous play as a nash equilibrium solver
def fictitious_play(iters=2000, payoffs=None, verbose=False):
dim = payoffs.shape[0]
pop = np.random.uniform(0, 1, (1, dim))
pop = pop / pop.sum(axis=1)[:, None]
averages = pop
exps = []
for i in range(iters):
average = np.average(pop, axis=0)
br = get_br_to_strat(average, payoffs=payoffs)
exp1 = average @ payoffs @ br.T
exp2 = br @ payoffs @ average.T
exps.append(exp2 - exp1)
# if verbose:
# print(exp, "exploitability")
averages = np.vstack((averages, average))
pop = np.vstack((pop, br))
return averages, exps
# Solve exploitability of a nash equilibrium over a fixed population
def get_exploitability(pop, payoffs, iters=1000):
emp_game_matrix = pop @ payoffs @ pop.T
averages, _ = fictitious_play(payoffs=emp_game_matrix, iters=iters)
strat = averages[-1] @ pop # Aggregate
test_br = get_br_to_strat(strat, payoffs=payoffs)
exp1 = strat @ payoffs @ test_br.T
exp2 = test_br @ payoffs @ strat
return exp2 - exp1
def joint_loss(pop, payoffs, meta_nash, k, lambda_weight, lr):
dim = payoffs.shape[0]
br = np.zeros((dim,))
values = []
cards = []
for i in range(dim):
br_tmp = np.zeros((dim, ))
br_tmp[i] = 1.
aggregated_enemy = meta_nash @ pop[:k]
value = br_tmp @ payoffs @ aggregated_enemy.T
pop_k = lr * br_tmp + (1 - lr) * pop[k]
pop_tmp = np.vstack((pop[:k], pop_k))
M = pop_tmp @ payoffs @ pop_tmp.T
metanash_tmp, _ = fictitious_play(payoffs=M, iters=1000)
# L = np.diag(metanash_tmp[-1]) @ M @ M.T @ np.diag(metanash_tmp[-1])
L = M @ M.T
l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))
cards.append(l_card)
values.append(value)
if np.random.randn() < lambda_weight:
br[np.argmax(values)] = 1
else:
br[np.argmax(cards)] = 1
return br
def psro_steps(iters=5, payoffs=None, verbose=False, seed=0,
num_learners=4, improvement_pct_threshold=.03, lr=.2, loss_func='dpp', full=False):
dim = payoffs.shape[0]
r = np.random.RandomState(seed)
pop = r.uniform(0, 1, (1 + num_learners, dim))
pop = pop / pop.sum(axis=1)[:, None]
exp = get_exploitability(pop, payoffs, iters=1000)
exps = [exp]
M = pop @ payoffs @ pop.T
L = M @ M.T
l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))
l_cards = [l_card]
learner_performances = [[.1] for i in range(num_learners + 1)]
for i in range(iters):
# Define the weighting towards diversity as a function of the fixed population size, this is currently a hyperparameter
lambda_weight = 0.85
if i % 5 == 0:
print('iteration: ', i, ' exp full: ', exps[-1])
print('size of pop: ', pop.shape[0])
for j in range(num_learners):
# first learner (when j=num_learners-1) plays against normal meta Nash
# second learner plays against meta Nash with first learner included, etc.
k = pop.shape[0] - j - 1
emp_game_matrix = pop[:k] @ payoffs @ pop[:k].T
meta_nash, _ = fictitious_play(payoffs=emp_game_matrix, iters=1000)
population_strategy = meta_nash[-1] @ pop[:k] # aggregated enemy according to nash
if loss_func == 'br':
# standard PSRO
br = get_br_to_strat(population_strategy, payoffs=payoffs)
else:
# Diverse PSRO
br = joint_loss(pop, payoffs, meta_nash[-1], k, lambda_weight, lr)
br_orig = get_br_to_strat(population_strategy, payoffs=payoffs)
# Update the mixed strategy towards the pure strategy which is returned as the best response to the
# nash equilibrium that is being trained against.
pop[k] = lr * br + (1 - lr) * pop[k]
performance = pop[k] @ payoffs @ population_strategy.T + 1 # make it positive for pct calculation
learner_performances[k].append(performance)
# if the first learner plateaus, add a new policy to the population
if j == num_learners - 1 and performance / learner_performances[k][-2] - 1 < improvement_pct_threshold:
learner = np.random.uniform(0, 1, (1, dim))
learner = learner / learner.sum(axis=1)[:, None]
pop = np.vstack((pop, learner))
learner_performances.append([0.1])
# calculate exploitability for meta Nash of whole population
exp = get_exploitability(pop, payoffs, iters=1000)
exps.append(exp)
M = pop @ payoffs @ pop.T
L = M @ M.T
l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))
l_cards.append(l_card)
return pop, exps, l_cards
# Define the self-play algorithm
def self_play_steps(iters=10, payoffs=None, verbose=False, improvement_pct_threshold=.03, lr=.2, seed=0):
dim = payoffs.shape[0]
r = np.random.RandomState(seed)
pop = r.uniform(0, 1, (2, dim))
pop = pop / pop.sum(axis=1)[:, None]
exp = get_exploitability(pop, payoffs, iters=1000)
exps = [exp]
performances = [.01]
M = pop @ payoffs @ pop.T
L = M@M.T
l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))
l_cards = [l_card]
for i in range(iters):
if i % 10 == 0:
print('iteration: ', i, 'exploitability: ', exps[-1])
br = get_br_to_strat(pop[-2], payoffs=payoffs)
pop[-1] = lr * br + (1 - lr) * pop[-1]
performance = pop[-1] @ payoffs @ pop[-2].T + 1
performances.append(performance)
if performance / performances[-2] - 1 < improvement_pct_threshold:
learner = np.random.uniform(0, 1, (1, dim))
learner = learner / learner.sum(axis=1)[:, None]
pop = np.vstack((pop, learner))
exp = get_exploitability(pop, payoffs, iters=1000)
exps.append(exp)
M = pop @ payoffs @ pop.T
L = M @ M.T
l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))
l_cards.append(l_card)
return pop, exps, l_cards
# Define the PSRO rectified nash algorithm
def psro_rectified_steps(iters=10, payoffs=None, verbose=False, eps=1e-2, seed=0,
num_start_strats=1, num_pseudo_learners=4, lr=0.3, threshold=0.001):
dim = payoffs.shape[0]
r = np.random.RandomState(seed)
pop = r.uniform(0, 1, (num_start_strats, dim))
pop = pop / pop.sum(axis=1)[:, None]
exp = get_exploitability(pop, payoffs, iters=1000)
exps = [exp]
counter = 0
M = pop @ payoffs @ pop.T
L = M @ M.T
l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))
l_cards = [l_card]
while counter < iters * num_pseudo_learners:
if counter % (5 * num_pseudo_learners) == 0:
print('iteration: ', int(counter / num_pseudo_learners), ' exp: ', exps[-1])
print('size of population: ', pop.shape[0])
new_pop = np.copy(pop)
emp_game_matrix = pop @ payoffs @ pop.T
averages, _ = fictitious_play(payoffs=emp_game_matrix, iters=iters)
# go through all policies. If the policy has positive meta Nash mass,
# find policies it wins against, and play against meta Nash weighted mixture of those policies
for j in range(pop.shape[0]):
if counter > iters * num_pseudo_learners:
return pop, exps, l_cards
# if positive mass, add a new learner to pop and update it with steps, submit if over thresh
# keep track of counter
if averages[-1][j] > eps:
# create learner
learner = np.random.uniform(0, 1, (1, dim))
learner = learner / learner.sum(axis=1)[:, None]
new_pop = np.vstack((new_pop, learner))
idx = new_pop.shape[0] - 1
current_performance = 0.02
last_performance = 0.01
while current_performance / last_performance - 1 > threshold:
counter += 1
mask = emp_game_matrix[j, :]
mask[mask >= 0] = 1
mask[mask < 0] = 0
weights = np.multiply(mask, averages[-1])
weights /= weights.sum()
strat = weights @ pop
br = get_br_to_strat(strat, payoffs=payoffs)
new_pop[idx] = lr * br + (1 - lr) * new_pop[idx]
last_performance = current_performance
current_performance = new_pop[idx] @ payoffs @ strat + 1
if counter % num_pseudo_learners == 0:
# count this as an 'iteration'
# exploitability
exp = get_exploitability(new_pop, payoffs, iters=1000)
exps.append(exp)
M = pop @ payoffs @ pop.T
L = M @ M.T
l_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0])))
l_cards.append(l_card)
pop = np.copy(new_pop)
return pop, exps, l_cards
def run_experiment(param_seed):
params, seed = param_seed
iters = params['iters']
num_threads = params['num_threads']
dim = params['dim']
lr = params['lr']
thresh = params['thresh']
psro = params['psro']
pipeline_psro = params['pipeline_psro']
dpp_psro = params['dpp_psro']
rectified = params['rectified']
self_play = params['self_play']
psro_exps = []
psro_cardinality = []
pipeline_psro_exps = []
pipeline_psro_cardinality = []
dpp_psro_exps = []
dpp_psro_cardinality = []
rectified_exps = []
rectified_cardinality = []
self_play_exps = []
self_play_cardinality = []
print('Experiment: ', seed + 1)
np.random.seed(seed)
W = np.random.randn(dim, dim)
S = np.random.randn(dim, 1)
payoffs = 0.5 * (W - W.T) + S - S.T
payoffs /= np.abs(payoffs).max()
if psro:
print('PSRO')
pop, exps, cards = psro_steps(iters=iters, num_learners=1, seed=seed+1,
improvement_pct_threshold=thresh, lr=lr,
payoffs=payoffs, loss_func='br')
psro_exps = exps
psro_cardinality = cards
if pipeline_psro:
print('Pipeline PSRO')
pop, exps, cards = psro_steps(iters=iters, num_learners=num_threads, seed=seed+1,
improvement_pct_threshold=thresh, lr=lr,
payoffs=payoffs, loss_func='br')
pipeline_psro_exps = exps
pipeline_psro_cardinality = cards
if dpp_psro:
print('DPP')
pop, exps, cards = psro_steps(iters=iters, num_learners=num_threads, seed=seed+1,
improvement_pct_threshold=thresh, lr=lr,
payoffs=payoffs, loss_func='dpp')
dpp_psro_exps = exps
dpp_psro_cardinality = cards
if rectified:
print('Rectified')
pop, exps, cards = psro_rectified_steps(iters=iters, num_pseudo_learners=num_threads, payoffs=payoffs, seed=seed+1,
lr=lr, threshold=thresh)
rectified_exps = exps
rectified_cardinality = cards
if self_play:
print('Self-play')
pop, exps, cards = self_play_steps(iters=iters, payoffs=payoffs, improvement_pct_threshold=thresh, lr=lr, seed=seed+1)
self_play_exps = exps
self_play_cardinality = cards
return {
'psro_exps': psro_exps,
'psro_cardinality': psro_cardinality,
'pipeline_psro_exps': pipeline_psro_exps,
'pipeline_psro_cardinality': pipeline_psro_cardinality,
'dpp_psro_exps': dpp_psro_exps,
'dpp_psro_cardinality': dpp_psro_cardinality,
'rectified_exps': rectified_exps,
'rectified_cardinality': rectified_cardinality,
'self_play_exps': self_play_exps,
'self_play_cardinality': self_play_cardinality,
}
def run_experiments(num_experiments=1, iters=40, num_threads=20, dim=60, lr=0.6, thresh=0.001, logscale=True,
psro=False,
pipeline_psro=False,
rectified=False,
self_play=False,
dpp_psro=False,
):
params = {
'num_experiments': num_experiments,
'iters': iters,
'num_threads': num_threads,
'dim': dim,
'lr': lr,
'thresh': thresh,
'psro': psro,
'pipeline_psro': pipeline_psro,
'dpp_psro': dpp_psro,
'rectified': rectified,
'self_play': self_play,
}
psro_exps = []
psro_cardinality = []
pipeline_psro_exps = []
pipeline_psro_cardinality = []
dpp_psro_exps = []
dpp_psro_cardinality = []
rectified_exps = []
rectified_cardinality = []
self_play_exps = []
self_play_cardinality = []
with open(os.path.join(PATH_RESULTS, 'params.json'), 'w', encoding='utf-8') as json_file:
json.dump(params, json_file, indent=4)
pool = mp.Pool()
result = pool.map(run_experiment, [(params, i) for i in range(num_experiments)])
for r in result:
psro_exps.append(r['psro_exps'])
psro_cardinality.append(r['psro_cardinality'])
pipeline_psro_exps.append(r['pipeline_psro_exps'])
pipeline_psro_cardinality.append(r['pipeline_psro_cardinality'])
dpp_psro_exps.append(r['dpp_psro_exps'])
dpp_psro_cardinality.append(r['dpp_psro_cardinality'])
rectified_exps.append(r['rectified_exps'])
rectified_cardinality.append(r['rectified_cardinality'])
self_play_exps.append(r['self_play_exps'])
self_play_cardinality.append(r['self_play_cardinality'])
d = {
'psro_exps': psro_exps,
'psro_cardinality': psro_cardinality,
'pipeline_psro_exps': pipeline_psro_exps,
'pipeline_psro_cardinality': pipeline_psro_cardinality,
'dpp_psro_exps': dpp_psro_exps,
'dpp_psro_cardinality': dpp_psro_cardinality,
'rectified_exps': rectified_exps,
'rectified_cardinality': rectified_cardinality,
'self_play_exps': self_play_exps,
'self_play_cardinality': self_play_cardinality,
}
pickle.dump(d, open(os.path.join(PATH_RESULTS, 'data.p'), 'wb'))
def plot_error(data, label=''):
data_mean = np.mean(np.array(data), axis=0)
error_bars = stats.sem(np.array(data))
plt.plot(data_mean, label=label)
plt.fill_between([i for i in range(data_mean.size)],
np.squeeze(data_mean - error_bars),
np.squeeze(data_mean + error_bars), alpha=alpha)
alpha = .4
for j in range(2):
fig_handle = plt.figure()
if psro:
if j == 0:
plot_error(psro_exps, label='PSRO')
elif j == 1:
plot_error(psro_cardinality, label='PSRO')
if pipeline_psro:
if j == 0:
plot_error(pipeline_psro_exps, label='P-PSRO')
elif j == 1:
plot_error(pipeline_psro_cardinality, label='P-PSRO')
if rectified:
if j == 0:
length = min([len(l) for l in rectified_exps])
for i, l in enumerate(rectified_exps):
rectified_exps[i] = rectified_exps[i][:length]
plot_error(rectified_exps, label='PSRO-rN')
elif j == 1:
length = min([len(l) for l in rectified_cardinality])
for i, l in enumerate(rectified_cardinality):
rectified_cardinality[i] = rectified_cardinality[i][:length]
plot_error(rectified_cardinality, label='PSRO-rN')
if self_play:
if j == 0:
plot_error(self_play_exps, label='Self-play')
elif j == 1:
plot_error(self_play_cardinality, label='Self-play')
if dpp_psro:
if j == 0:
plot_error(dpp_psro_exps, label='Ours')
elif j == 1:
plot_error(dpp_psro_cardinality, label='Ours')
plt.legend(loc="upper left")
plt.title('Dim {:d}'.format(args.dim))
if logscale and (j==0):
plt.yscale('log')
plt.savefig(os.path.join(PATH_RESULTS, 'figure_'+ str(j) + '.pdf'))
if __name__ == "__main__":
run_experiments(num_experiments=10, num_threads=2, iters=args.nb_iters, dim=args.dim, lr=.5, thresh=TH,
psro=True,
pipeline_psro=True,
rectified=True,
self_play=True,
dpp_psro=True,
)
|
# 2d difference array
class Solution:
def rangeAddQueries(self, n, q):
g = [[0]*n for _ in range(n)]
for a,b,x,y in q:
g[a][b] += 1
if x < n-1: g[x+1][b] -= 1
if y < n-1: g[a][y+1] -= 1
if x < n-1 and y < n-1: g[x+1][y+1] += 1
for i in range(n):
for j in range(n):
l = g[i-1][j] if i > 0 else 0
r = g[i][j-1] if j > 0 else 0
d = g[i-1][j-1] if i >0 and j > 0 else 0
g[i][j] += l + r - d
return g
|
from selenium import webdriver
driver = webdriver.Chrome()
driver.get('https://web.whatsapp.com/')
all_names = ['Hack', 'Me', 'Bot']
msg = 'Good Morning'
count = 3
input('Enter anything after scanning QR code')
for name in all_names:
user = driver.find_element_by_xpath('//span[@title = "{}"]'.format(name))
user.click()
msg_box = driver.find_element_by_class_name('input-container')
for i in range(count):
msg_box.send_keys(msg)
button = driver.find_element_by_class_name('compose-btn-send')
button.click()
|
# import packages used
import numpy as np
import tools_Exercise_1_6 as tools
import scipy.optimize as optimize
import scipy.interpolate as interpolate
import time as time
def setup():
class par: pass
# Model parameters
par.beta = 0.999
par.B = 0.33
par.upsillon=20
par.Lt = 1
par.W = 20
par.G= 0.99
par.chi = 30
par.xi1 = 0
par.xi2 = 0
par.D = 0.005
par.d = 0.005
par.varphi = 0
par.kappa1 = 1
par.kappa2 = 8
par.Upsillon = 0.51*par.upsillon
par.Z = 75000
par.gamma1 = 0.055
par.tests = 0.01
par.varsigma = 13
par.varrho = 2
par.t = 1.8
par.phi1 = 0.2*0.37
par.phi2 = 0.2*0.33
par.phi3 = 0.2*0.3
par.sigma = 0.001
par.varrho = 0.4
par.alpha=0.3
par.rho=5
par.g=40
par.mu = 2
par.H = 4.7
par.eta = 0.02
par.tau = 0.588
par.testtype = 1
par.T = 0.3
par.diminishtype = 1
par.t2 = 1
par.t1 = 1
par.t3 = 0.9
# Shock parameters
par.num_M = 7
par.M_max = 0.3
par.num_shocks = 8
# Convergens settings
par.max_iter = 5000 # maximum number of iterations
par.tol = 10e-2
# Simulation parameters
par.simN = 720
par.I_ini = 0.01
par.Q_ini = 0.00
par.R_ini = 0.00
par.lw_ini = 1
# Setup grid
setup_grids(par)
return par
def setup_grids(par):
#Grid of disease parameters
par.grid_I = tools.nonlinspace(1.0e-10,par.M_max,par.num_M,1.2) # non-linear spaced points: like np.linspace with unequal spacing
par.grid_Q = tools.nonlinspace(1.0e-10,par.M_max,par.num_M,1.2) # non-linear spaced points: like np.linspace with unequal spacing
par.grid_R = tools.nonlinspace(1.0e-10,0.8,par.num_M,1.5) # non-linear spaced points: like np.linspace with unequal spacing
par.grid_lw = tools.nonlinspace(1.0e-10,1,100,1)
#Gauss-Hermite
# x,w = tools.gauss_hermite(par.num_shocks)
# par.eps = np.exp(par.sigma*np.sqrt(2)*x)
# par.eps_w = w/np.sqrt(np.pi)
return par
def solve_cons_inf(par):
# Initalize
class sol: pass
sol.V = np.ones([par.num_M, par.num_M, par.num_M])*1e-5
sol.lw = np.zeros([par.num_M, par.num_M, par.num_M])
sol.it = 0 #Number of iteration
sol.delta = 1000.0 #Different between V+ and V
sol.S=[]
sol.lo=[]
sol.s=[]
sol.wi=[]
sol.Y=[]
sol.i=[]
sol.l=[]
sol.gamma2=[]
sol.gamma3=[]
sol.I_plus=[]
sol.Q_plus=[]
sol.R_plus=[]
sol.p=[]
sol.pi=[]
prcompo = 0
#precomp
for I in (par.grid_I):
for Q in (par.grid_Q):
for R in (par.grid_R):
for lw in (par.grid_lw):
if lw+Q+par.D*R > 1:
break
S=(1-I-Q-R)
lo=(1 - lw - Q - par.D*R)
s=min(max((lw-(1-par.D)*R)*(1-I/(S+I)),0),1)
wi=min(max((lw-(1-par.D)*R)*(I/(S+I)),0),1)
Y=max(par.H*np.log(par.upsillon*lw+par.Upsillon*lo)-(par.chi*I)**2 - par.varphi*R, 1.0e-8)
w=(lw+Q+lo*par.G)*par.W
#print(Y)
l=((par.Z*par.phi2*I*max(1-R-Q,1.0e-9)/(par.alpha*par.varsigma))**(1/(par.alpha-1)))*Y
if l<0:
p=0
l=0
elif l>1:
l=1
p=((1-par.alpha)*par.varsigma)*Y**-par.alpha
else:
p=((1-par.alpha)*par.varsigma*l**(par.alpha) * Y**(-par.alpha))
if p*Y>w+par.g:
p=(w+par.g)/Y
#print(p)
#print(l)
gamma3=np.array(par.gamma1 * (1+ par.kappa1/(1+Q**(1/par.kappa2))))
if par.testtype == 1:
tests = par.tests
if par.testtype == 2:
tests = par.T*Q**par.tau
if par.testtype == 3:
tests = (par.sigma)/((par.eta-(par.t*I)/(1+I*par.rho)**par.mu))
# Diminshing returns type
if par.diminishtype ==1:
gamma2=np.array(par.sigma + (par.t*par.tests)/((1 +I*par.rho)**par.mu))
if par.diminishtype ==2:
gamma2=np.array(par.sigma + (par.tests)*par.t2/(1-R))
if par.diminishtype ==3:
gamma2=np.array(par.sigma+par.t3*np.log(1+tests/1+I))
sol.I_plus.append(max( min((1-par.gamma1-gamma2)*I + par.phi1*s*wi + par.phi2*S*I*l*l + par.phi3*S*I,1),1.0e-9))
sol.Q_plus.append(max(min((1- gamma3)*Q + gamma2*I,1),1.0e-9))
sol.R_plus.append(max(min(R + par.gamma1*I + gamma3*Q,1),1.0e-9))
sol.pi.append(Y*p - (lw+Q)*par.W - lo*par.G*par.W - par.xi1*I**2 - par.xi2*par.d*R)
#print(Y*p - (lw+Q)*par.W - lo*par.G*par.W - (par.xi1*I)**2 - par.xi2*par.d*R)
#print(par.W+par.g-Y*p)
prcompo +=1
#points=np.meshgrid(par.grid_I, par.grid_Q, par.grid_R, copy=False, indexing='xy')
points = (par.grid_I, par.grid_Q, par.grid_R)
#print(np.shape(points))
#print(max(sol.I_plus))
#print(min(sol.I_plus))
#print(max(sol.Q_plus))
#print(min(sol.Q_plus))
#print(max(sol.R_plus))
#print(min(sol.R_plus))
point = np.transpose(np.array([sol.I_plus, sol.Q_plus, sol.R_plus]))
while (sol.delta >= par.tol and sol.it < par.max_iter):
V_next = sol.V.copy()
V_plus = interpolate.interpn(points, V_next, point, method='linear', bounds_error=False, fill_value=None)
ind = -1
# find V
Ih = -1
Qh = -1
Rh = -1
for I in (par.grid_I):
Ih +=1
for Q in (par.grid_Q):
Qh +=1
for R in (par.grid_R):
Rh +=1
for lw in (par.grid_lw):
if lw+Q+par.D*R > 1:
break
ind += 1
V_guess =sol.pi[ind] + par.beta*V_plus[ind]
if V_guess > sol.V[Ih, Qh, Rh]:
sol.V[Ih, Qh, Rh]=V_guess
sol.lw[Ih, Qh, Rh]=lw
Rh=-1
Qh=-1
# opdate delta and it
sol.it += 1
c_new = np.ravel(sol.V)
c_old = np.ravel(V_next)
#sol.delta = max(abs(sol.V - V_next))
sol.delta = max(abs(c_new - c_old))
print(sol.delta)
return(sol)
def simu(par, sol):
class simu: pass
simu.S=np.zeros([par.simN])
simu.lo=np.zeros([par.simN])
simu.s=np.zeros([par.simN])
simu.wi=np.zeros([par.simN])
simu.Y=np.zeros([par.simN])
simu.l=np.zeros([par.simN])
simu.p=np.zeros([par.simN])
simu.gamma2=np.zeros([par.simN])
simu.gamma3=np.zeros([par.simN])
simu.pi=np.zeros([par.simN])
simu.util=np.zeros([par.simN])
simu.c=np.zeros([par.simN])
simu.I=np.zeros([par.simN+1])
simu.Q=np.zeros([par.simN+1])
simu.R=np.zeros([par.simN+1])
simu.w=np.zeros([par.simN])
simu.Pos=np.zeros([par.simN])
simu.I[0]=(par.I_ini)
simu.Q[0]=(par.Q_ini)
simu.R[0]=(par.R_ini)
simu.lw =np.zeros([par.simN])
simu.lw[0] = 1
simu.tests=np.zeros([par.simN])
ite=0
points = (par.grid_I, par.grid_Q, par.grid_R)
while ite < par.simN:
#Start of simulation.
simu.lw[ite]=min(interpolate.interpn(points, sol.lw, ([simu.I[ite], simu.Q[ite], simu.R[ite]]), method='linear', bounds_error=False, fill_value=None), 1-simu.Q[ite]-simu.R[ite]*par.D)
simu.lw[ite]=min(simu.lw[ite], 1-simu.Q[ite]-simu.R[ite]*par.d)
if ite == 0:
simu.lw[ite]=1
simu.S[ite]=(1-simu.I[ite]-simu.Q[ite]-simu.R[ite])
simu.lo[ite]=(1 - simu.lw[ite] - simu.Q[ite] - par.D*simu.R[ite])
simu.s[ite]=(max((simu.lw[ite]-(1-par.D)*simu.R[ite])*(1-simu.I[ite]/(simu.S[ite]+simu.I[ite])),1.0e-9))
simu.wi[ite]=(max((simu.lw[ite]-(1-par.D)*simu.R[ite])*(simu.I[ite]/(simu.S[ite]+simu.I[ite])),1.0e-9))
simu.Y[ite]=(max(par.H*np.log(par.upsillon*simu.lw[ite]+par.Upsillon*simu.lo[ite])-(par.chi*simu.I[ite])**2 - par.varphi*simu.R[ite], 1.0e-9))
simu.l[ite]=(par.Z*par.phi2*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite])/(par.alpha*par.varsigma))**(1/(par.alpha-1))*simu.Y[ite]
simu.w[ite]=(simu.lo[ite]*par.G+simu.lw[ite]+simu.Q[ite])*par.W
if simu.l[ite] < 0:
simu.l[ite]=0
simu.p[ite] = 0
elif simu.l[ite]>1:
simu.l[ite]=1
simu.p[ite]=((1-par.alpha)*par.varsigma)/(simu.Y[ite]**par.alpha)
else:
simu.p[ite]=(1-par.alpha)*par.varsigma*simu.l[ite]**(par.alpha) * simu.Y[ite]**(-par.alpha)
if simu.p[ite]*simu.Y[ite]>simu.w[ite]+par.g:
simu.p[ite]=(simu.w[ite]+par.g)/simu.Y[ite]
simu.gamma3[ite]=(np.array(par.gamma1 * (1+ par.kappa1/(1+simu.Q[ite]**(1/par.kappa2)))))
if par.testtype == 1:
simu.tests[ite] = par.tests
if par.testtype == 2:
simu.tests[ite] = par.T*simu.Q[ite]**par.tau
if par.testtype == 3:
simu.tests[ite] = (par.sigma*simu.I[ite])/((par.eta-(par.t*simu.I[ite])/(1+simu.I[ite]*par.rho)**par.mu))
# Diminshing returns type
if par.diminishtype ==1:
simu.gamma2[ite]=np.array(par.sigma + (par.t*simu.tests[ite])/((1 + simu.I[ite]*par.rho)**par.mu))
if par.diminishtype ==2:
simu.gamma2[ite]=np.array(par.sigma + (simu.tests[ite])*par.t2/(1-simu.R[ite]))
if par.diminishtype ==3:
simu.gamma2[ite]=np.array(par.sigma+par.t3*np.log(1+simu.tests[ite]/1+simu.I[ite]))
simu.pi[ite]=(simu.Y[ite]*simu.p[ite] -(simu.lw[ite]+simu.Q[ite])*par.W - simu.lo[ite]*par.G*par.W - (par.xi1*simu.I[ite])**2 - par.xi2*par.d*simu.R[ite])
simu.util[ite]=(par.varsigma*simu.l[ite]**par.alpha*simu.Y[ite]**(1-par.alpha)+simu.w[ite]+par.g-simu.p[ite]*simu.Y[ite]-par.Z*par.phi2*simu.I[ite]*simu.l[ite]*(1-simu.R[ite]-simu.Q[ite])- par.Z*par.phi3*simu.I[ite]*(1-simu.R[ite]-simu.Q[ite]))
simu.c[ite]=simu.w[ite]+par.g-simu.p[ite]*simu.Y[ite]
simu.Pos[ite]=simu.gamma2[ite]*simu.I[ite]/(simu.tests[ite])*100
simu.I[ite+1]=(max(min((1-par.gamma1-simu.gamma2[ite])*simu.I[ite] + par.phi1*simu.s[ite]*simu.wi[ite] + par.phi2*simu.S[ite]*simu.I[ite]*simu.l[ite]*simu.l[ite] + par.phi3*simu.S[ite]*simu.I[ite],1),1.0e-9))
simu.Q[ite+1]=(max(min((1- simu.gamma3[ite])*simu.Q[ite] + simu.gamma2[ite]*simu.I[ite],1),1.0e-9))
simu.R[ite+1]=(max(min(simu.R[ite] + par.gamma1*simu.I[ite] + simu.gamma3[ite]*simu.Q[ite],1),1.0e-9))
ite+=1
simu.grid = np.linspace(0,ite,ite)
simu.I = simu.I[0:ite]
simu.Q = simu.Q[0:ite]
simu.R = simu.R[0:ite]
simu.GDP = simu.p*simu.Y
return(simu) |
"""Copyright (c) 2018 Great Ormond Street Hospital for Children NHS Foundation
Trust & Birmingham Women's and Children's NHS Foundation Trust
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import unittest
from django.test import TestCase
from ..database_utils.multiple_case_adder import MultipleCaseAdder
from ..database_utils.case_handler import Case, CaseModel, ManyCaseModel
from ..models import *
import re
import os
import json
import hashlib
import pprint
from datetime import datetime
from django.utils import timezone
class TestCaseOperations(object):
"""
Common operations on our test zoo.
"""
def __init__(self):
self.file_path_list = [
# get the list of absolute file paths for the cases in test_files
os.path.join(
os.getcwd(),
"gel2mdt/tests/test_files/{filename}".format(
filename=filename)
) for filename in os.listdir(
os.path.join(
os.getcwd(),
"gel2mdt/tests/test_files"
)
)
]
self.json_list = [
json.load(open(file_path)) for file_path in self.file_path_list
]
self.request_id_list = [
str(x["interpretation_request_id"]) + "-" + str(x["version"])
for x in self.json_list]
self.json_hashes = {
str(x["interpretation_request_id"]) + "-" + str(x["version"]):
hashlib.sha512(
json.dumps(x, sort_keys=True).encode('utf-8')
).hexdigest() for x in self.json_list
}
def get_case_mapping(self, multiple_case_adder):
"""
Return a tuple mapping of case, test_case for all of the newly
created cases.
"""
test_cases = self.json_list
cases = multiple_case_adder.list_of_cases
case_mapping = []
for case in cases:
for test_case in test_cases:
if case.request_id \
== str(test_case["interpretation_request_id"]) + "-" \
+ str(test_case["version"]):
case_mapping.append(case, test_case)
def add_cases_to_database(self, change_hash=False):
"""
For all the cases we have stored, add them all to the database.
:param change_hash: Default=False. If True, hashes will be changes for
GELInterpretationReport entries so that test cases in MCA get flagged
for update.
"""
# make dummy related tables
clinician = Clinician.objects.create(
name="test_clinician",
email="test@email.com",
hospital="test_hospital"
)
family = Family.objects.create(
clinician=clinician,
gel_family_id=100
)
# convert our test data into IR and IRfamily model instances
ir_family_instances = [InterpretationReportFamily(
participant_family=family,
cip=json["cip"],
ir_family_id=str(json["interpretation_request_id"]) +
"-" + str(json["version"]),
priority=json["case_priority"]
) for json in self.json_list]
InterpretationReportFamily.objects.bulk_create(ir_family_instances)
ir_instances = [GELInterpretationReport(
ir_family=InterpretationReportFamily.objects.get(
ir_family_id=str(json["interpretation_request_id"]) +
"-" + str(json["version"])),
polled_at_datetime=timezone.make_aware(datetime.now()),
sha_hash=self.get_hash(json, change_hash),
status=json["status"][0]["status"],
updated=json["status"][0]["created_at"],
user=json["status"][0]["user"]
) for json in self.json_list]
for ir in ir_instances:
ir.save()
def get_hash(self, json, change_hash):
"""
Take a json and whether or not to change a hash, then return
the hash of that json. Changing hash may be required if testing
whether a case has mismatching hash values from the latest stored.
"""
hash_digest = self.json_hashes[
str(json["interpretation_request_id"]) +
"-" + str(json["version"])]
if change_hash:
hash_digest = hash_digest[::-1]
return hash_digest
class TestUpdateCases(TestCase):
"""
Test that all jsons can be added, then if one changes it is updated
with a new version number on the JSON but still associated with the same
IR family.
"""
def setUp(self):
"""
Instaniate a MCA for the zoo of test cases, unedited.
"""
self.case_update_handler = MultipleCaseAdder(test_data=True)
class TestAddCases(TestCase):
@unittest.skip("skip whilst testing hashcheck")
def setUp(self):
"""
Instantiate a MultipleCaseAdder for the zoo of test cases.
"""
self.case_update_handler = MultipleCaseAdder(test_data=True)
@unittest.skip("skip whilst testing hashcheck")
def test_request_id_format(self):
"""
For each test case, assert that we correctly parse the IR ID.
"""
for case in self.case_update_handler.list_of_cases:
assert re.match("\d+-\d+", case.request_id)
@unittest.skip("skip whilst testing hashcheck")
def test_hash_cases(self):
"""
For each test case, assert that we reliably hash the json.
"""
test_cases = TestCaseOperations()
for case in self.case_update_handler.list_of_cases:
assert case.json_hash == test_cases.json_hashes[case.request_id]
@unittest.skip("skip whilst testing hashcheck")
def test_extract_proband(self):
"""
Test that we can get the proband out of the json as a dict-type.
"""
test_cases = TestCaseOperations()
case_mapping = test_cases.get_case_mapping(case_update_handler)
for case, test_case in case_mapping:
ir_data = test_case["interpretation_request_data"]["json_request"]
participants = ir_data["pedigree"]["participants"]
proband = None
for participant in participants:
if participant["isProband"]:
proband = participant
assert case.proband["gelId"] == proband["gelId"]
@unittest.skip("skip whilst testing hashcheck")
def test_extract_latest_status(self):
"""
Test that the status extracted has the latest date and most progressed
status of all the statuses.
"""
test_cases = TestCaseOperations()
case_mapping = test_cases.get_case_mapping(case_update_handler)
for case, test_case in case_mapping:
status_list = test_case["status"]
max_date = None
max_progress = None
for status in status_list:
if max_date is None:
max_date = timezone.make_aware(status["created_at"])
elif max_date < timezone.make_aware(status["created_at"]):
max_date = timezone.make_aware(status["created_at"])
if max_progress is None:
max_progress = status["status"]
elif status["status"] == "report_sent":
max_progress = "report_sent"
elif status["status"] == "report_generated":
max_progress = "report_generated"
elif status["status"] == "sent_to_gmcs":
max_progress = "sent_to_gmcs"
assert case.status["status"] == max_progress
assert timezone.make_aware(case.status["created_at"]) \
== max_date
class TestIdentifyCases(TestCase):
"""
Tests to ensure that MultipleCaseAdder can correctly determine
which cases should be added, updated, and skipped.
"""
@unittest.skip("long time to poll panelapp")
def test_identify_cases_to_add(self):
"""
MultipleCaseAdder recognises which cases need to be added.
"""
case_update_handler = MultipleCaseAdder(test_data=True)
test_cases = TestCaseOperations()
for case in case_update_handler.cases_to_add:
# all the test cases should be flagged as 'to add' since none added
assert case.request_id in test_cases.request_id_list
assert not case_update_handler.cases_to_update
@unittest.skip("long time to poll panelapp")
def test_identify_cases_to_update(self):
"""
MultipleCaseAdder recognises hash differences to determine updates.
"""
# add all of our test cases first. change hashes to trick MCA into
# thinking the test files need to be updated in the database
test_cases = TestCaseOperations()
test_cases.add_cases_to_database(change_hash=True)
# now cases are added, MCA should recognise this when checking
case_update_handler = MultipleCaseAdder(test_data=True)
to_update = case_update_handler.cases_to_update
assert len(to_update) > 0
for case in to_update:
assert case.request_id in test_cases.request_id_list
@unittest.skip("long time to poll panelapp")
def test_identify_cases_to_skip(self):
"""
MultipleCaseAdder recognises when latest version hashes match current.
"""
test_cases = TestCaseOperations()
# add all the test cases to db but retain hash so attempting to re-add
# should cause a skip
test_cases.add_cases_to_database()
case_update_handler = MultipleCaseAdder(test_data=True)
to_skip = case_update_handler.cases_to_skip
assert len(to_skip) > 0
for case in to_skip:
assert case.request_id in test_cases.request_id_list
class TestCaseModel(TestCase):
"""
Test functions carried out by the CaseModel class, ie. checking if an
entry for a particular case needs to be added or is already present in
the database.
"""
@unittest.skip("skip whilst testing hashcheck")
def test_new_clinician(self):
"""
Return created=False when a Clinician is not known to the db.
"""
clinician_objects = Clinician.objects.all()
clinician = CaseModel(Clinician, {
"name": "test",
"email": "test",
"hospital": "test"
}, clinician_objects)
print(clinician.entry)
assert clinician.entry is False # checking for a literal False
def test_existing_clinician(self):
"""
Returns a clinician object when Clinician is known to the db.
"""
clinician_attributes = {
"name": "test",
"email": "test",
"hospital": "test"
}
archived_clinician = Clinician.objects.create(
**clinician_attributes
)
clinician_objects = Clinician.objects.all()
test_clinician = CaseModel(Clinician, clinician_attributes, clinician_objects)
assert test_clinician.entry.id == archived_clinician.id
class TestAddCases(TestCase):
"""
Test that a case has been faithfully added to the database along with
all of the required related tables when needed.
"""
def test_updated(self):
"""
Generic test to make sure models have become populated by MCM.
"""
check_cases = True
case_list_handler = MultipleCaseAdder(test_data=True)
for model in (
Clinician,
Family,
Proband,
Relative,
Phenotype,
Panel,
PanelVersion,
Transcript,
InterpretationReportFamily,
GELInterpretationReport,
ToolOrAssemblyVersion,
Variant,
TranscriptVariant,
ProbandVariant,
ProbandTranscriptVariant,
ReportEvent,
):
all_models = model.objects.all().values()
if not all_models:
print("Fail on:",model)
check_cases = False
assert check_cases
@unittest.skip("long time to poll panelapp")
def test_add_clinician(self):
"""
Clinician has been fetched or added that matches the json
"""
case_list_handler = MultipleCaseAdder(test_data=True)
try:
Clinician.objects.get(**{
"name": "unknown",
"email": "unknown",
"hospital": "unknown"
})
created = True
except Clinician.DoesNotExist as e:
created = False
assert created
# now check that we are refreshing clinician in the case models:
for case in case_list_handler.cases_to_add:
clinician_cam = case.attribute_managers[Clinician]
assert clinician_cam.case_model.entry is not False
assert False
@unittest.skip("long time to poll panelapp")
def test_add_family(self):
case_list_handler = MultipleCaseAdder(test_data=True)
test_cases = TestCaseOperations()
try:
for test_case in test_cases.json_list:
Family.objects.get(
**{
"gel_family_id": int(test_case["family_id"])
}
)
created = True
except Family.DoesNotExist as e:
created = False
assert created
@unittest.skip("long time to poll panelapp")
def test_add_phenotypes(self):
"""
All phenotypes in json added with HPO & description.
"""
case_list_handler = MultipleCaseAdder(test_data=True)
test_cases = TestCaseOperations()
for case in case_list_handler.cases_to_add:
phenotype_cam = case.attribute_managers[Phenotype]
for phenotype in phenotype_cam.case_model.case_models:
assert phenotype.entry is not False
def test_associated_family_and_phenotypes(self):
"""
Once phenotypes have been added, ensure M2M creation with Family.
"""
pass
def test_add_ir_family(self):
"""
Family matching json data has been added/fetched
"""
pass
def test_add_or_get_panel_version(self):
"""
Panel and panelversion from json added/fetched faithfully.
"""
pass
def test_add_or_get_panel_version_genes(self):
"""
If panel version is new, check that genes corroborate with panelApp.
"""
pass
def test_add_ir_family(self):
"""
Test that a new IRfamily has been made with a request ID matching the
json.
"""
pass
def test_add_ir(self):
"""
Test that a new IR has been made and links to the correct IRfamily.
"""
pass
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import StandardScaler
from math import pi
################################################################################
#clean genres and seperate into columns
################################################################################
dataset = "data/9745_greatest_books_ever.csv"
def award_column_maker(dataset):
a = pd.read_csv(dataset)
## Splits all genres into different columns
xx = len(a.awards.str.split(',', expand=True).columns)
a[[f"a_{i}" for i in range(xx)]] = a.awards.str.split(',', expand=True)
is_duplicate = a[[f"a_{i}" for i in range(0,20)]].apply(pd.Series.duplicated, axis=1).reset_index()
## Takes the current dataframe and where the 'is_duplicate' dataframe has True values, it replaces them with np.nan
a[[f"a_{i}" for i in range(0,xx)]] = a[[f"a_{i}" for i in range(0,xx)]].where(~is_duplicate, np.nan).fillna('zzzyyyy')
# A way to change nan values to another value
for i in range(xx):
a[f"a_{i}"] = a[f"a_{i}"].str.strip()
u, c = np.unique(a[[f"a_{i}" for i in range(xx)]].values, return_counts=True)
new_dd = dict(zip(u, c))
a = pd.DataFrame.from_dict(new_dd, orient='index',columns=['count'])
a.drop(['zzzyyyy'], inplace=True)
a.reset_index(level=0, inplace=True)
# ################################################################################
# # Find the most common genres
# ################################################################################
# a["awards"] = a["awards"].str.replace(r"[^A-Za-z]", " ", regex=True)
# a.to_csv("aaa.csv")
#################################### Creating the specific awards
a_len=len(a['index'].str.split("Award",expand=True).columns)
a[[f"a_{i}" for i in range(a_len)]] = a['index'].str.split("Award",expand=True)
a['a_0'] = a['a_0'].str.strip()
print(len(a['a_0']))
u, c = np.unique(a['index'].values, return_counts=True)
new_dd = dict(zip(u, c))
a = pd.DataFrame.from_dict(new_dd, orient='index',columns=['count'])
a.reset_index(level=0, inplace=True)
a_len=len(a['index'].str.split("Nominee",expand=True).columns)
a[[f"a_{i}" for i in range(a_len)]] = a['index'].str.split("Nominee",expand=True)
a['a_0'] = a['a_0'].str.strip()
print(len(a['a_0']))
u, c = np.unique(a['index'].values, return_counts=True)
new_dd = dict(zip(u, c))
a = pd.DataFrame.from_dict(new_dd, orient='index',columns=['count'])
a.reset_index(level=0, inplace=True)
a_len=len(a['index'].str.split("AAR",expand=True).columns)
a[[f"a_{i}" for i in range(a_len)]] = a['index'].str.split("AAR",expand=True)
a['a_0'] = a['a_0'].str.strip()
print(len(a['a_0']))
#
u, c = np.unique(a['a_0'].values, return_counts=True)
new_dd = dict(zip(u, c))
a = pd.DataFrame.from_dict(new_dd, orient='index',columns=['count'])
a.reset_index(level=0, inplace=True)
a_len=len(a['index'].str.split("by",expand=True).columns)
a[[f"a_{i}" for i in range(a_len)]] = a['index'].str.split("by",expand=True)
a['a_0'] = a['a_0'].str.strip()
print(len(a['a_0']))
u, c = np.unique(a['a_0'].values, return_counts=True)
new_dd = dict(zip(u, c))
a = pd.DataFrame.from_dict(new_dd, orient='index',columns=['count'])
a.reset_index(level=0, inplace=True)
a_len=len(a['index'].str.split("de ",expand=True).columns)
a[[f"a_{i}" for i in range(a_len)]] = a['index'].str.split("de ",expand=True)
a['a_0'] = a['a_0'].str.strip()
print(len(a['a_0']))
#
u, c = np.unique(a['a_0'].values, return_counts=True)
new_dd = dict(zip(u, c))
a = pd.DataFrame.from_dict(new_dd, orient='index',columns=['count'])
a.reset_index(level=0, inplace=True)
a_len=len(a['index'].str.split("for",expand=True).columns)
a[[f"a_{i}" for i in range(a_len)]] = a['index'].str.split("for ",expand=True)
a['a_0'] = a['a_0'].str.strip()
print(len(a['a_0']))
### Final
u, c = np.unique(a['a_0'].values, return_counts=True)
new_dd = dict(zip(u, c))
a = pd.DataFrame.from_dict(new_dd, orient='index',columns=['count'])
a.sort_values('count', ascending=False)
a.reset_index(level=0, inplace=True)
u, c = np.unique(a['index'].values, return_counts=True)
new_dd = dict(zip(u, c))
final = pd.DataFrame.from_dict(new_dd, orient='index',columns=['count'])
final.to_csv('award_categories.csv')
def genre_column_maker():
df = pd.read_csv(dataset)
## Splits all genres into different columns
x =df.genres.str.split(',', expand=True)
y=len(x.columns)
df[[f"genre_{i}" for i in range(0,y)]] = df.genres.str.split(',', expand=True)
### Returns a dataframe that has booleans for whether data is duplicated across columns
is_duplicate = df[[f"genre_{i}" for i in range(0,20)]].apply(pd.Series.duplicated, axis=1).reset_index()
## Takes the current dataframe and where the 'is_duplicate' dataframe has True values, it replaces them with np.nan
df[[f"genre_{i}" for i in range(0,20)]] = df[[f"genre_{i}" for i in range(0,20)]].where(~is_duplicate, np.nan).fillna('zzzyyyy')
# A way to change nan values to another value
for i in range(20):
df[f"genre_{i}"] = df[f"genre_{i}"].str.strip()
################################################################################
# Find the most common genres
################################################################################
## Returns 2 numpy arrays of the unique values and there counts
unique, counts = np.unique(df[[f"genre_{i}" for i in range(0,20)]].values, return_counts=True)
##zips the 2 arrays into a dictionary of "Genre": int (Count)
d = dict(zip(unique, counts))
# Creates a new dataframe from the dictionary with the counts as a column and the genre as the index
genre_df = pd.DataFrame.from_dict(d, orient='index',columns=['count'])
genre_df.drop(['zzzyyyy'], inplace=True)
order_genre = genre_df.sort_values('count', ascending=False)
top_50_genres = order_genre.head(50)
genre_names = list(top_50_genres.index)
#top_50_genres.to_csv("data/top_50_genres.csv")
#################################################################################
# Make columns with genre names and map true or false if book contains that genre
#################################################################################
for genre in genre_names:
df[genre] = df['genres'].str.contains(genre)
#df[genre] = df[genre].map({True: 'Yes', False: 'No'})
df.drop([f"genre_{i}" for i in range(0,20)], inplace=True, axis=1)
df.drop(df.columns[[0,1]], axis=1, inplace=True)
return df.to_csv("data/9745_greatest_books_ever.csv")
#genre_column_maker(dataset)
df = pd.read_csv(dataset)
def audio_book(data):
df = data[['Audiobook', 'good_read_score', 'original_publish_year']]
gdf = df[df["original_publish_year"] > 2000]
|
#encoding: utf-8
from flask_restful import Resource, abort, reqparse
from models.user import User
from extensions import db
loginParse = reqparse.RequestParser()
loginParse.add_argument('username', type=str)
loginParse.add_argument('password', type=str)
class Login(Resource):
def post(self):
data = loginParse.parse_args()
username = data.get('username')
password = data.get('password')
user = User.query.filter(User.username == username).first()
isPasswordValid = user.check_password(password)
print(isPasswordValid,user)
if user and isPasswordValid:
token = user.generate_auth_token()
return token
else:
abort(404, '{} is not found'.format(username))
class SignUp(Resource):
def post(self):
data = loginParse.parse_args()
username = data.get('username')
password = data.get('password')
print (username, password)
user = User.query.filter(User.username == username).first()
if not user:
newUser = User(username, password)
newUser.set_pasword(password)
db.session.add(newUser)
db.session.commit()
return True
else:
abort(404, '{} is already be used, please change another name'.format(username))
|
from django.db import models
from django.contrib.auth.models import User
import datetime
class Branch(models.Model):
branch_name=models.CharField(max_length=100)
def __unicode__(self):
return self.branch_name
class Student(models.Model):
student=models.ForeignKey(User)
branch=models.ForeignKey(Branch)
date_of_birth=models.DateTimeField('dat of birth')
def __unicode__(self):
return self.student.username
class Custom_data(models.Model):
student=models.ForeignKey(User)
field=models.CharField(max_length=100)
value=models.CharField(max_length=500)
def __unicode__(self):
return self.student.username+' - '+self.field
class Subscriber(models.Model):
student=models.ForeignKey(User,related_name="user")
subscriber=models.ForeignKey(User, related_name="subscriber")
TYPE = (('a','ALL') , ('t','TIMELINE') , ('b','BLOG'))
type = models.CharField(max_length=1 , choices=TYPE , default = 'a')
class Meta:
unique_together = ('student' , 'subscriber')
def __unicode__(self):
return self.student.email+' '+self.subscriber.email+' '+self.type
class Tag(models.Model):
tag_name = models.CharField(max_length=30)
class Article(models.Model):
user=models.ForeignKey(User)
heading=models.CharField(max_length=100)
content=models.TextField()
upload=models.DateTimeField(default=datetime.datetime.now())
edited=models.DateTimeField()
tags=models.ManyToManyField(Tag)
|
# -*- coding: utf-8 -*-
# File : 01-basic1.py
# Author: water
# Date : 2019/7/31
for i in range(1,4):
email = input ('email:')
index = email.find("@")
if index>0:
name = email[:index]
email_sort = email[index+1:]
print(f'邮箱名:{name} 类型:{email_sort}')
break
else:
print("input wrong")
else:
print(f"输入{i}次错误,锁定") |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
SUPPORTED_RNNS = {
'lstm': tf.keras.layers.LSTMCell,
'rnn': tf.keras.layers.SimpleRNNCell,
'gru': tf.keras.layers.GRUCell,
}
# Parameters for batch normalization.
_BATCH_NORM_EPSILON = 1e-5
_BATCH_NORM_DECAY = 0.997
# Filters of convolution layer
_CONV_FILTERS = 32
def batch_norm(inputs, training):
return tf.keras.layers.BatchNormalization(
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON
)(inputs, training=training)
def _conv_bn_layer(
inputs, padding, filters, kernel_size, strides, layer_id, training
):
inputs = tf.pad(
inputs,
[[0, 0], [padding[0], padding[0]], [padding[1], padding[1]], [0, 0]],
)
inputs = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='valid',
use_bias=False,
activation=tf.nn.relu6,
name='cnn_{}'.format(layer_id),
)(inputs)
return batch_norm(inputs, training)
def _rnn_layer(
inputs,
rnn_cell,
rnn_hidden_size,
layer_id,
is_batch_norm,
is_bidirectional,
training,
):
if is_batch_norm:
inputs = batch_norm(inputs, training)
if is_bidirectional:
rnn_outputs = tf.keras.layers.Bidirectional(
tf.keras.layers.RNN(
rnn_cell(rnn_hidden_size), return_sequences=True
)
)(inputs)
else:
rnn_outputs = tf.keras.layers.RNN(
rnn_cell(rnn_hidden_size), return_sequences=True
)(inputs)
return rnn_outputs
class Model:
def __init__(
self,
inputs,
num_rnn_layers=5,
rnn_type='gru',
is_bidirectional=True,
rnn_hidden_size=512,
use_bias=True,
training=True,
**kwargs
):
inputs = _conv_bn_layer(
inputs,
padding=(20, 5),
filters=_CONV_FILTERS,
kernel_size=(41, 11),
strides=(2, 2),
layer_id=1,
training=training,
)
inputs = _conv_bn_layer(
inputs,
padding=(10, 5),
filters=_CONV_FILTERS,
kernel_size=(21, 11),
strides=(2, 1),
layer_id=2,
training=training,
)
batch_size = tf.shape(inputs)[0]
feat_size = inputs.get_shape().as_list()[2]
inputs = tf.reshape(inputs, [batch_size, -1, feat_size * _CONV_FILTERS])
rnn_cell = SUPPORTED_RNNS[rnn_type]
for layer_counter in range(num_rnn_layers):
is_batch_norm = layer_counter != 0
inputs = _rnn_layer(
inputs,
rnn_cell,
rnn_hidden_size,
layer_counter + 1,
is_batch_norm,
is_bidirectional,
training,
)
self.logits = batch_norm(inputs, training)
|
#!/usr/local/bin/env python3
# -*- coding: utf-8 -*-
#
# Author : Bhishan Poudel, Physics PhD Student, Ohio University
# Date : Jul 06, 2017 Thu
# Last update :
def main():
"""Main Module."""
# Imports
import numpy as np
import pandas as pd
import time
laml = np.linspace(2208,2764,num=22)
laml = [round(i) for i in laml]
print('laml = ', laml)
for i in range(len(laml) -1):
x = laml[i]
y = laml[i+1]
print('x,y = ', x, y)
if __name__ == '__main__':
main()
|
from collections import OrderedDict
from urllib.parse import urlsplit
import pytz
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.urls import get_script_prefix
from django.utils import timezone, translation
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation.trans_real import (
check_for_language, get_supported_language_variant, language_code_re,
parse_accept_lang_header,
)
from pretix.base.settings import GlobalSettingsObject
from pretix.multidomain.urlreverse import (
get_event_domain, get_organizer_domain,
)
_supported = None
class LocaleMiddleware(MiddlewareMixin):
"""
This middleware sets the correct locale and timezone
for a request.
"""
def process_request(self, request: HttpRequest):
language = get_language_from_request(request)
# Normally, this middleware runs *before* the event is set. However, on event frontend pages it
# might be run a second time by pretix.presale.EventMiddleware and in this case the event is already
# set and can be taken into account for the decision.
if hasattr(request, 'event') and not request.path.startswith(get_script_prefix() + 'control'):
if language not in request.event.settings.locales:
firstpart = language.split('-')[0]
if firstpart in request.event.settings.locales:
language = firstpart
else:
language = request.event.settings.locale
for lang in request.event.settings.locales:
if lang.startswith(firstpart + '-'):
language = lang
break
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
tzname = None
if hasattr(request, 'event'):
tzname = request.event.settings.timezone
elif request.user.is_authenticated:
tzname = request.user.timezone
if tzname:
try:
timezone.activate(pytz.timezone(tzname))
request.timezone = tzname
except pytz.UnknownTimeZoneError:
pass
else:
timezone.deactivate()
def process_response(self, request: HttpRequest, response: HttpResponse):
language = translation.get_language()
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
def get_language_from_user_settings(request: HttpRequest) -> str:
if request.user.is_authenticated:
lang_code = request.user.locale
if lang_code in _supported and lang_code is not None and check_for_language(lang_code):
return lang_code
def get_language_from_session_or_cookie(request: HttpRequest) -> str:
if hasattr(request, 'session'):
lang_code = request.session.get(LANGUAGE_SESSION_KEY)
if lang_code in _supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
def get_language_from_event(request: HttpRequest) -> str:
if hasattr(request, 'event'):
lang_code = request.event.settings.locale
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
def get_language_from_browser(request: HttpRequest) -> str:
accept = request.headers.get('Accept-Language', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
def get_default_language():
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError: # NOQA
return settings.LANGUAGE_CODE
def get_language_from_request(request: HttpRequest) -> str:
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
"""
global _supported
if _supported is None:
_supported = OrderedDict(settings.LANGUAGES)
if request.path.startswith(get_script_prefix() + 'control'):
return (
get_language_from_user_settings(request)
or get_language_from_session_or_cookie(request)
or get_language_from_browser(request)
or get_language_from_event(request)
or get_default_language()
)
else:
return (
get_language_from_session_or_cookie(request)
or get_language_from_user_settings(request)
or get_language_from_browser(request)
or get_language_from_event(request)
or get_default_language()
)
def _parse_csp(header):
h = {}
for part in header.split(';'):
k, v = part.strip().split(' ', 1)
h[k.strip()] = v.split(' ')
return h
def _render_csp(h):
return "; ".join(k + ' ' + ' '.join(v) for k, v in h.items())
def _merge_csp(a, b):
for k, v in a.items():
if k in b:
a[k] += b[k]
for k, v in b.items():
if k not in a:
a[k] = b[k]
class SecurityMiddleware(MiddlewareMixin):
CSP_EXEMPT = (
'/api/v1/docs/',
)
def process_response(self, request, resp):
if settings.DEBUG and resp.status_code >= 400:
# Don't use CSP on debug error page as it breaks of Django's fancy error
# pages
return resp
resp['X-XSS-Protection'] = '1'
# We just need to have a P3P, not matter whats in there
# https://blogs.msdn.microsoft.com/ieinternals/2013/09/17/a-quick-look-at-p3p/
# https://github.com/pretix/pretix/issues/765
resp['P3P'] = 'CP=\"ALL DSP COR CUR ADM TAI OUR IND COM NAV INT\"'
img_src = []
gs = GlobalSettingsObject()
if gs.settings.leaflet_tiles:
img_src.append(gs.settings.leaflet_tiles[:gs.settings.leaflet_tiles.index("/", 10)].replace("{s}", "*"))
h = {
'default-src': ["{static}"],
'script-src': ['{static}', 'https://checkout.stripe.com', 'https://js.stripe.com'],
'object-src': ["'none'"],
# frame-src is deprecated but kept for compatibility with CSP 1.0 browsers, e.g. Safari 9
'frame-src': ['{static}', 'https://checkout.stripe.com', 'https://js.stripe.com'],
'child-src': ['{static}', 'https://checkout.stripe.com', 'https://js.stripe.com'],
'style-src': ["{static}", "{media}"],
'connect-src': ["{dynamic}", "{media}", "https://checkout.stripe.com"],
'img-src': ["{static}", "{media}", "data:", "https://*.stripe.com"] + img_src,
'font-src': ["{static}"],
'media-src': ["{static}", "data:"],
# form-action is not only used to match on form actions, but also on URLs
# form-actions redirect to. In the context of e.g. payment providers or
# single-sign-on this can be nearly anything so we cannot really restrict
# this. However, we'll restrict it to HTTPS.
'form-action': ["{dynamic}", "https:"] + (['http:'] if settings.SITE_URL.startswith('http://') else []),
'report-uri': ["/csp_report/"],
}
if 'Content-Security-Policy' in resp:
_merge_csp(h, _parse_csp(resp['Content-Security-Policy']))
staticdomain = "'self'"
dynamicdomain = "'self'"
mediadomain = "'self'"
if settings.MEDIA_URL.startswith('http'):
mediadomain += " " + settings.MEDIA_URL[:settings.MEDIA_URL.find('/', 9)]
if settings.STATIC_URL.startswith('http'):
staticdomain += " " + settings.STATIC_URL[:settings.STATIC_URL.find('/', 9)]
if settings.SITE_URL.startswith('http'):
if settings.SITE_URL.find('/', 9) > 0:
staticdomain += " " + settings.SITE_URL[:settings.SITE_URL.find('/', 9)]
dynamicdomain += " " + settings.SITE_URL[:settings.SITE_URL.find('/', 9)]
else:
staticdomain += " " + settings.SITE_URL
dynamicdomain += " " + settings.SITE_URL
if hasattr(request, 'organizer') and request.organizer:
if hasattr(request, 'event') and request.event:
domain = get_event_domain(request.event, fallback=True)
else:
domain = get_organizer_domain(request.organizer)
if domain:
siteurlsplit = urlsplit(settings.SITE_URL)
if siteurlsplit.port and siteurlsplit.port not in (80, 443):
domain = '%s:%d' % (domain, siteurlsplit.port)
dynamicdomain += " " + domain
if request.path not in self.CSP_EXEMPT and not getattr(resp, '_csp_ignore', False):
resp['Content-Security-Policy'] = _render_csp(h).format(static=staticdomain, dynamic=dynamicdomain,
media=mediadomain)
for k, v in h.items():
h[k] = ' '.join(v).format(static=staticdomain, dynamic=dynamicdomain, media=mediadomain).split(' ')
resp['Content-Security-Policy'] = _render_csp(h)
elif 'Content-Security-Policy' in resp:
del resp['Content-Security-Policy']
return resp
|
a = int(input('Enter: '))
b = int(input('Search: '))
n = a
count = 0
while n>0:
if n%10 == b:
count += 1
n = n//10
print('Count is: ',count)
|
#The sequence of triangle numbers is generated by adding the natural numbers.
#So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28.
#The first ten terms would be: 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
#Let us list the factors of the first seven triangle numbers:
#1: 1
#3: 1,3
#6: 1,2,3,6
#10: 1,2,5,10
#15: 1,3,5,15
#21: 1,3,7,21
#28: 1,2,4,7,14,28
#We can see that 28 is the first triangle number to have over five divisors.
#What is the value of the first triangle number to have over
#five hundred divisors?
import time
import math
def num_divisors(num):
lim = int(math.sqrt(num))
div_list = []
for i in range(1, lim + 1):
if (num % i == 0):
div_list.append(i)
if i != num/i:
div_list.append(num/i)
return len(div_list)
def isTriangleNum(num):
a = int(math.sqrt(2*num))
return (0.5*a*(a+1)) == num
def lastTerm(num):
if isTriangleNum(num):
return int(math.sqrt(2*num))
else:
return None
def main():
start_time = time.time()
check = 2**4 * 3**4 * 5**4 * 7 * 11
while not isTriangleNum(check):
check+=1
seriesLastTerm = lastTerm(check)
while num_divisors(check) <=500:
check += (seriesLastTerm + 1)
seriesLastTerm += 1
print(check)
elapsed_time = time.time() - start_time
print("\nTime elapsed: " + str(elapsed_time))
main() |
import matplotlib
matplotlib.use('TkAgg')
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime as dt
f = open("2018-11-07.txt","r")
xlist = list()
ylist = list()
pulse = []
sum = 0
count = 0
part = []
for line in f:
s = line.split("|")
if(len(s) >= 2 and s[1] != " TIME "):
xx = s[1]
xxtime = dt.strptime(xx, ' %H:%M:%S ')
yy = s[2]
pulse.append(int(s[2]))
if(xxtime.hour >= 3 and xxtime.minute >= 0 and xxtime.second >= 0):
if (xxtime.hour == 3 and xxtime.minute == 11):
break
xlist.append(xx)
ylist.append(yy)
part.append(int(yy))
sum += int(yy)
count += 1
f.close()
rate = 0
i = 1
for p in part:
x = (p-(sum/count)) ** 2
i += 1
S_D_ = 0
S_E_ = 0
print(x/count)
S_D_ = (x / count) ** 0.5
S_E_ = S_D_ / (count ** 0.5)
print("標準偏差:", S_D_)
print("標準誤差:", S_E_)
fig, ax = plt.subplots(figsize=(20, 20))
ax.plot(xlist , ylist)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=90, fontsize=8)
plt.title('pulse data')
plt.xlabel("time")
plt.ylabel("pulse")
plt.tight_layout()
plt.show()
# def fear_judge(): |
#!/usr/bin/env python2
from __future__ import (unicode_literals, absolute_import,
print_function, division)
import sys
import re
from collections import defaultdict
import rlcompleter
def current_raw(input):
if len(input[-1]) > 0 and input[-1][0] in '"\'':
return input[-1][1:]
return input[-1]
def current_list(input):
return re.split(r'[^a-zA-Z0-9_\.]', current_raw(input))
def current_prefix(input):
return current_list(input)[-1]
def prior(input):
return input[:-1]
def lazy_imports(*args):
query = ' '.join([x for x in args if x])
regex = re.compile("([a-zA-Z_][a-zA-Z0-9_]*)\.?")
matches = regex.findall(query)
for raw_module_name in matches:
if re.match('np(\..*)?$', raw_module_name):
module_name = re.sub('^np', 'numpy', raw_module_name)
elif re.match('pd(\..*)?$', raw_module_name):
module_name = re.sub('^pd', 'pandas', raw_module_name)
else:
module_name = raw_module_name
try:
module = __import__(module_name)
globals()[raw_module_name] = module
except ImportError as e:
pass
def complete_all(prefix, completion_args):
lazy_imports(prefix, completion_args['c_arg'])
if completion_args:
if completion_args['x_arg']:
x = str()
if completion_args['l_arg']:
l = list()
if completion_args['c_arg']:
exec(completion_args['c_arg'].strip('"\'').replace("`", "'"))
context = locals()
context.update(globals())
completer = rlcompleter.Completer(context)
idx = 0
options_set = set()
while completer.complete(prefix, idx):
options_set.add(completer.complete(prefix, idx))
idx += 1
module_completion, module_list = get_completerlib()
try:
options = module_completion("import " + prefix) or []
except: #module_completion may throw exception (e.g. on 'import sqlalchemy_utils.')
options = []
if options:
options = [x.rstrip(' ') for x in options if x.startswith(prefix)]
return options + list(options_set)
def parse_string(input):
if current_raw(input).startswith('--'):
return ['--si', '--so', '--ji', '--jo', '--i']
elif current_raw(input).startswith('-'):
return ['-h', '-x', '-fx', '-l', '-c', '-C']
elif len(prior(input)) > 0 and prior(input)[-1] == '-c':
if 'import'.startswith(current_raw(input)):
options = ["'import"]
elif current_raw(input).startswith('import ') or current_raw(input).startswith('from '):
module_completion, module_list = get_completerlib()
options = module_completion(current_raw(input)) or []
if options:
options = [x.rstrip(' ') for x in options if x.startswith(current_prefix(input))]
else:
options = complete_all(current_prefix(input), defaultdict(lambda: None))
if current_prefix(input).endswith('.'):
options = [x for x in options if '._' not in x]
return options
elif current_raw(input) == '':
options = ['sys', 'json', 're', 'csv', 'datetime', 'hashlib', 'itertools', 'math', 'os', 'random', 'shutil']
if '-x' in input[:-1] or '-fx' in input[:-1]:
options += 'x'
if '-l' in input[:-1]:
options += 'l'
return options
else:
completion_args = defaultdict(lambda: None)
if '-x' in prior(input) or '-fx' in prior(input):
completion_args['x_arg'] = True
if '-l' in prior(input):
completion_args['l_arg'] = True
if '-c' in prior(input):
c_index = prior(input).index('-c')
if (c_index + 1) < len(prior(input)):
completion_args['c_arg'] = prior(input)[c_index + 1]
options = complete_all(current_prefix(input), completion_args)
if current_prefix(input).endswith('.'):
options = [x for x in options if '._' not in x]
return options
def get_completerlib():
"""Implementations for various useful completers.
These are all loaded by default by IPython.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#from __future__ import print_function
import inspect
import os
#import re
#import sys
try:
# Python >= 3.3
from importlib.machinery import all_suffixes
_suffixes = all_suffixes()
except ImportError:
from imp import get_suffixes
_suffixes = [ s[0] for s in get_suffixes() ]
# Third-party imports
from time import time
from zipimport import zipimporter
TIMEOUT_STORAGE = 2
TIMEOUT_GIVEUP = 20
# Regular expression for the python import statement
import_re = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*?)'
r'(?P<package>[/\\]__init__)?'
r'(?P<suffix>%s)$' %
r'|'.join(re.escape(s) for s in _suffixes))
# RE for the ipython %run command (python + ipython scripts)
magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
def module_list(path):
"""
Return the list containing the names of the modules available in the given
folder.
"""
# sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
if path == '':
path = '.'
# A few local constants to be used in loops below
pjoin = os.path.join
if os.path.isdir(path):
# Build a list of all files in the directory and all files
# in its subdirectories. For performance reasons, do not
# recurse more than one level into subdirectories.
files = []
for root, dirs, nondirs in os.walk(path):
subdir = root[len(path)+1:]
if subdir:
files.extend(pjoin(subdir, f) for f in nondirs)
dirs[:] = [] # Do not recurse into additional subdirectories.
else:
files.extend(nondirs)
else:
try:
files = list(zipimporter(path)._files.keys())
except:
files = []
# Build a list of modules which match the import_re regex.
modules = []
for f in files:
m = import_re.match(f)
if m:
modules.append(m.group('name'))
return list(set(modules))
def get_root_modules():
"""
Returns a list containing the names of all the modules available in the
folders of the pythonpath.
ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
"""
#ip = get_ipython()
#rootmodules_cache = ip.db.get('rootmodules_cache', {})
rootmodules_cache = {}
rootmodules = list(sys.builtin_module_names)
start_time = time()
#store = False
for path in sys.path:
try:
modules = rootmodules_cache[path]
except KeyError:
modules = module_list(path)
try:
modules.remove('__init__')
except ValueError:
pass
if path not in ('', '.'): # cwd modules should not be cached
rootmodules_cache[path] = modules
if time() - start_time > TIMEOUT_STORAGE and not store:
#store = True
#print("\nCaching the list of root modules, please wait!")
#print("(This will only be done once - type '%rehashx' to "
#"reset cache!)\n")
sys.stdout.flush()
if time() - start_time > TIMEOUT_GIVEUP:
print("This is taking too long, we give up.\n")
return []
rootmodules.extend(modules)
#if store:
#ip.db['rootmodules_cache'] = rootmodules_cache
rootmodules = list(set(rootmodules))
return rootmodules
def is_importable(module, attr, only_modules):
if only_modules:
return inspect.ismodule(getattr(module, attr))
else:
return not(attr[:2] == '__' and attr[-2:] == '__')
def try_import(mod, only_modules=False):
try:
m = __import__(mod)
except:
return []
mods = mod.split('.')
for module in mods[1:]:
m = getattr(m, module)
m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
completions = []
if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
completions.extend( [attr for attr in dir(m) if
is_importable(m, attr, only_modules)])
completions.extend(getattr(m, '__all__', []))
if m_is_init:
completions.extend(module_list(os.path.dirname(m.__file__)))
completions = set(completions)
if '__init__' in completions:
completions.remove('__init__')
return list(completions)
def module_completion(line):
"""
Returns a list containing the completion possibilities for an import line.
The line looks like this :
'import xml.d'
'from xml.dom import'
"""
words = line.split(' ')
nwords = len(words)
# from whatever <tab> -> 'import '
if nwords == 3 and words[0] == 'from':
return ['import ']
# 'from xy<tab>' or 'import xy<tab>'
if nwords < 3 and (words[0] in ['import','from']) :
if nwords == 1:
return get_root_modules()
mod = words[1].split('.')
if len(mod) < 2:
return get_root_modules()
completion_list = try_import('.'.join(mod[:-1]), True)
return ['.'.join(mod[:-1] + [el]) for el in completion_list]
# 'from xyz import abc<tab>'
if nwords >= 3 and words[0] == 'from':
mod = words[1]
return try_import(mod)
return module_completion, module_list
def remove_trailing_paren(str_):
if str_.endswith('('):
return str_[:-1]
return str_
def main():
input = sys.argv[1:]
if len(input) == 0:
return
elif '<' in input or '>' in input:
print('_longopt')
return
else:
options = list(set(map(remove_trailing_paren, parse_string(input))))
if len(options) == 0:
return
if len(current_list(input)) > 1 and max(map(len, options)) + 1 >= len(current_raw(input)):
options.append(current_prefix(input))
if len(options) <= 1:
options = options + [x + "'" for x in options]
print(' '.join(options))
if __name__ == '__main__':
main()
|
import sys
import heapq
input = sys.stdin.readline
N, K = map(int, input().split())
board = []
for _ in range(N):
board.append(list(map(int, input().split())))
token = [[[] for _ in range(N)] for _ in range(N)]
direction = [0]
next_queue = []
for i in range(1, K+1):
x, y, d = map(int, input().split())
token[x-1][y-1].append(i)
direction.append(d-1)
if len(token[x-1][y-1]) == 1:
heapq.heappush(next_queue, (i, x-1, y-1))
dx = [0, 0, -1, 1]
dy = [1, -1, 0, 0]
turn = 0
is_end = False
while next_queue:
queue = next_queue
next_queue = []
turn += 1
while queue:
number, x, y = heapq.heappop(queue)
dir = direction[number]
nx = x + dx[dir]
ny = y + dy[dir]
if (not (0 <= nx < N and 0 <= ny < N)) or board[nx][ny] == 2:
# 이동하려는 칸이 파란색이거나 체스판 밖일 때 방향 뒤집기
dir = dir+1 if dir%2 == 0 else dir-1
direction[number] = dir
nx = x + dx[dir]
ny = y + dy[dir]
if (not (0 <= nx < N and 0 <= ny < N)) or board[nx][ny] == 2:
# 이동하려는 칸이 또 다시 파란색이거나 보드 밖이면 그대로 끝
heapq.heappush(next_queue, (number, x, y))
continue
elif board[nx][ny] == 0:
# 이동하려는 칸이 흰색
token[nx][ny].extend(token[x][y])
token[x][y] = []
# 빈 칸으로 이동했다면, 다음에도 같은 번호를 기준으로 이동
if token[nx][ny][0] == number:
heapq.heappush(next_queue, (token[nx][ny][0], nx, ny))
else:
# 이동하려는 칸이 빨간색
token[nx][ny].extend(reversed(token[x][y]))
token[x][y] = []
if token[nx][ny][0] <= number:
heapq.heappush(next_queue, (token[nx][ny][0], nx, ny))
else:
heapq.heappush(queue, (token[nx][ny][0], nx, ny))
# 칸에 쌓인 말이 4개 이상인 경우 종료
if len(token[nx][ny]) >= 4:
is_end = True
break
# 종료되는 턴이 1000보다 크다면 -1을 출력
if turn == 1000:
turn = -1
if is_end or turn == -1:
break
print(turn) |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock
import pytest
from torch.utils.data import DataLoader
from pytorch_lightning import Trainer
from pytorch_lightning.trainer.connectors.data_connector import _DataLoaderSource
from tests.helpers import BoringDataModule, BoringModel
class NoDataLoaderModel(BoringModel):
def __init__(self):
super().__init__()
self.train_dataloader = None
self.val_dataloader = None
self.test_dataloader = None
self.predict_dataloader = None
@pytest.mark.parametrize(
"instance,available",
[
(None, True),
(BoringModel().train_dataloader(), True),
(BoringModel(), True),
(NoDataLoaderModel(), False),
(BoringDataModule(), True),
],
)
def test_dataloader_source_available(instance, available):
"""Test the availability check for _DataLoaderSource."""
source = _DataLoaderSource(instance=instance, name="train_dataloader")
assert source.is_defined() is available
def test_dataloader_source_direct_access():
"""Test requesting a dataloader when the source is already a dataloader."""
dataloader = BoringModel().train_dataloader()
source = _DataLoaderSource(instance=dataloader, name="any")
assert not source.is_module()
assert source.is_defined()
assert source.dataloader() is dataloader
def test_dataloader_source_request_from_module():
"""Test requesting a dataloader from a module works."""
module = BoringModel()
module.trainer = Trainer()
module.foo = Mock(return_value=module.train_dataloader())
source = _DataLoaderSource(module, "foo")
assert source.is_module()
module.foo.assert_not_called()
assert isinstance(source.dataloader(), DataLoader)
module.foo.assert_called_once()
|
from django import forms
from bootstrap_datepicker_plus import DatePickerInput
class Concert(forms.Form):
IS_RANKING = (
(0, ("Unranking")),
(1, ("Ranking"))
)
concert_name = forms.CharField(required=False,max_length=100)
city = forms.CharField(required=False,max_length=100)
start_date = forms.DateField(
widget=DatePickerInput(format='%Y-%m-%d'),required=False
)
end_date = forms.DateField(
widget=DatePickerInput(format='%Y-%m-%d'),required=False
)
# artist = forms.CharField(required=False,max_length=100)
# keyword = forms.CharField(required=False,max_length=100)
# ranking = forms.ChoiceField(choices=IS_RANKING,initial=0,required=False) |
# Generated by Django 2.2.15 on 2020-09-03 23:28
from django.db import migrations
def migrate_number_children_questions_forward(apps, _schema_editor):
def _convert_value(value):
if int(value) > 0:
return 'YES'
return'NO'
UserResponse = apps.get_model('core', 'UserResponse')
under_19_responses = UserResponse.objects.filter(question_id='number_children_under_19')
print(f"Converting {under_19_responses.count()} under 19 responses")
for response in under_19_responses:
response.value = _convert_value(response.value)
response.question_id = 'has_children_under_19'
response.save()
over_19_responses = UserResponse.objects.filter(question_id='number_children_over_19')
print(f"Converting {over_19_responses.count()} over 19 responses")
for response in over_19_responses:
UserResponse.objects.create(bceid_user=response.bceid_user, value=response.value, question_id='number_children_over_19_need_support')
response.value = _convert_value(response.value)
response.question_id = 'has_children_over_19'
response.save()
def migrate_number_children_questions_backwards(apps, _schema_editor):
UserResponse = apps.get_model('core', 'UserResponse')
under_19_responses = UserResponse.objects.filter(question_id='has_children_under_19')
print(f"Converting {under_19_responses.count()} under 19 responses")
for response in under_19_responses:
response.value = 1 if response.value == 'YES' else 0
response.question_id = 'number_children_under_19'
response.save()
UserResponse.objects.filter(question_id='has_children_over_19').delete()
UserResponse.objects.filter(question_id='number_children_over_19_need_support').update(question_id='number_children_over_19')
class Migration(migrations.Migration):
dependencies = [
('core', '0019_auto_20191008_2141'),
]
operations = [
migrations.RunPython(migrate_number_children_questions_forward, migrate_number_children_questions_backwards)
]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Thu Aug 21 20:40:26 2014
# by: PyQt5 UI code generator 5.3.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(842, 687)
MainWindow.setIconSize(QtCore.QSize(32, 32))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayoutWidget_4 = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget_4.setGeometry(QtCore.QRect(10, 30, 821, 541))
self.horizontalLayoutWidget_4.setObjectName("horizontalLayoutWidget_4")
self.gdlyMain = QtWidgets.QGridLayout(self.horizontalLayoutWidget_4)
self.gdlyMain.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.gdlyMain.setContentsMargins(5, 5, 5, 5)
self.gdlyMain.setObjectName("gdlyMain")
self.groupBox = QtWidgets.QGroupBox(self.horizontalLayoutWidget_4)
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.horizontalLayoutWidget_3 = QtWidgets.QWidget(self.groupBox)
self.horizontalLayoutWidget_3.setGeometry(QtCore.QRect(240, 10, 551, 501))
self.horizontalLayoutWidget_3.setObjectName("horizontalLayoutWidget_3")
self.hzlyTbwResult = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_3)
self.hzlyTbwResult.setContentsMargins(5, 5, 5, 5)
self.hzlyTbwResult.setObjectName("hzlyTbwResult")
self.tbwResult = QtWidgets.QTableWidget(self.horizontalLayoutWidget_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tbwResult.sizePolicy().hasHeightForWidth())
self.tbwResult.setSizePolicy(sizePolicy)
self.tbwResult.setAutoFillBackground(True)
self.tbwResult.setLineWidth(2)
self.tbwResult.setMidLineWidth(1)
self.tbwResult.setWordWrap(False)
self.tbwResult.setRowCount(0)
self.tbwResult.setColumnCount(10)
self.tbwResult.setObjectName("tbwResult")
item = QtWidgets.QTableWidgetItem()
self.tbwResult.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tbwResult.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tbwResult.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tbwResult.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tbwResult.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tbwResult.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tbwResult.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tbwResult.setHorizontalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.tbwResult.setHorizontalHeaderItem(8, item)
self.hzlyTbwResult.addWidget(self.tbwResult)
self.hzlyTbwResult.setStretch(0, 1)
self.horizontalLayoutWidget_5 = QtWidgets.QWidget(self.groupBox)
self.horizontalLayoutWidget_5.setGeometry(QtCore.QRect(10, 10, 221, 501))
self.horizontalLayoutWidget_5.setObjectName("horizontalLayoutWidget_5")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_5)
self.horizontalLayout.setContentsMargins(5, 5, 5, 5)
self.horizontalLayout.setObjectName("horizontalLayout")
self.tabTasks = QtWidgets.QTabWidget(self.horizontalLayoutWidget_5)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabTasks.sizePolicy().hasHeightForWidth())
self.tabTasks.setSizePolicy(sizePolicy)
self.tabTasks.setObjectName("tabTasks")
self.tabTaskRuning = QtWidgets.QWidget()
self.tabTaskRuning.setAutoFillBackground(False)
self.tabTaskRuning.setObjectName("tabTaskRuning")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.tabTaskRuning)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 201, 461))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.hzlyRunningTask = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.hzlyRunningTask.setContentsMargins(5, 5, 5, 5)
self.hzlyRunningTask.setObjectName("hzlyRunningTask")
self.listRunningTasks = QtWidgets.QListWidget(self.horizontalLayoutWidget)
self.listRunningTasks.setObjectName("listRunningTasks")
self.hzlyRunningTask.addWidget(self.listRunningTasks)
self.tabTasks.addTab(self.tabTaskRuning, "")
self.tabTaskCompleted = QtWidgets.QWidget()
self.tabTaskCompleted.setObjectName("tabTaskCompleted")
self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.tabTaskCompleted)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(0, 0, 201, 461))
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.hzlyCompletedTask = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)
self.hzlyCompletedTask.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.hzlyCompletedTask.setContentsMargins(5, 5, 5, 5)
self.hzlyCompletedTask.setObjectName("hzlyCompletedTask")
self.listCompletedTasks = QtWidgets.QListWidget(self.horizontalLayoutWidget_2)
self.listCompletedTasks.setObjectName("listCompletedTasks")
self.hzlyCompletedTask.addWidget(self.listCompletedTasks)
self.tabTasks.addTab(self.tabTaskCompleted, "")
self.horizontalLayout.addWidget(self.tabTasks)
self.gdlyMain.addWidget(self.groupBox, 0, 0, 1, 1)
self.gdlyMain.setRowStretch(0, 1)
self.horizontalLayoutWidget_6 = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget_6.setGeometry(QtCore.QRect(10, 580, 821, 75))
self.horizontalLayoutWidget_6.setObjectName("horizontalLayoutWidget_6")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_6)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.ltOutput = QtWidgets.QListWidget(self.horizontalLayoutWidget_6)
self.ltOutput.setObjectName("ltOutput")
self.horizontalLayout_2.addWidget(self.ltOutput)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 842, 23))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuExport = QtWidgets.QMenu(self.menuFile)
self.menuExport.setObjectName("menuExport")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
self.menuUser = QtWidgets.QMenu(self.menubar)
self.menuUser.setObjectName("menuUser")
self.menuView = QtWidgets.QMenu(self.menubar)
self.menuView.setObjectName("menuView")
self.menuEdit = QtWidgets.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionLogin = QtWidgets.QAction(MainWindow)
self.actionLogin.setObjectName("actionLogin")
self.actionTo_Excel = QtWidgets.QAction(MainWindow)
self.actionTo_Excel.setObjectName("actionTo_Excel")
self.actionNew_Task = QtWidgets.QAction(MainWindow)
self.actionNew_Task.setShortcutContext(QtCore.Qt.ApplicationShortcut)
self.actionNew_Task.setObjectName("actionNew_Task")
self.actionRegiste = QtWidgets.QAction(MainWindow)
self.actionRegiste.setObjectName("actionRegiste")
self.actionLogout = QtWidgets.QAction(MainWindow)
self.actionLogout.setObjectName("actionLogout")
self.actionTo_File = QtWidgets.QAction(MainWindow)
self.actionTo_File.setObjectName("actionTo_File")
self.actionInsert = QtWidgets.QAction(MainWindow)
self.actionInsert.setObjectName("actionInsert")
self.actionStop = QtWidgets.QAction(MainWindow)
self.actionStop.setObjectName("actionStop")
self.actionStopAll = QtWidgets.QAction(MainWindow)
self.actionStopAll.setObjectName("actionStopAll")
self.menuExport.addAction(self.actionTo_Excel)
self.menuExport.addAction(self.actionTo_File)
self.menuFile.addAction(self.actionNew_Task)
self.menuFile.addAction(self.menuExport.menuAction())
self.menuFile.addAction(self.actionStop)
self.menuFile.addAction(self.actionStopAll)
self.menuUser.addAction(self.actionRegiste)
self.menuUser.addAction(self.actionLogin)
self.menuUser.addAction(self.actionLogout)
self.menuEdit.addAction(self.actionInsert)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuUser.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabTasks.setCurrentIndex(0)
self.tbwResult.cellChanged['int','int'].connect(MainWindow.itemChanged)
self.tabTasks.tabBarClicked['int'].connect(MainWindow.tabTasksClicked)
self.listCompletedTasks.itemClicked['QListWidgetItem*'].connect(MainWindow.completedTasksItemClicked)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "QT"))
item = self.tbwResult.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "公司"))
item = self.tbwResult.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "联系人"))
item = self.tbwResult.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "联系电话"))
item = self.tbwResult.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "手机"))
item = self.tbwResult.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "传真"))
item = self.tbwResult.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "邮编"))
item = self.tbwResult.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "公司主页"))
item = self.tbwResult.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "公司地址"))
item = self.tbwResult.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "备注"))
self.tabTaskRuning.setStatusTip(_translate("MainWindow", "Task is running"))
self.tabTasks.setTabText(self.tabTasks.indexOf(self.tabTaskRuning), _translate("MainWindow", "未完成"))
self.tabTasks.setTabText(self.tabTasks.indexOf(self.tabTaskCompleted), _translate("MainWindow", "已完成"))
self.menuFile.setTitle(_translate("MainWindow", "文件"))
self.menuExport.setTitle(_translate("MainWindow", "Export"))
self.menuHelp.setTitle(_translate("MainWindow", "帮助"))
self.menuUser.setTitle(_translate("MainWindow", "用户"))
self.menuView.setTitle(_translate("MainWindow", "视图"))
self.menuEdit.setTitle(_translate("MainWindow", "编辑"))
self.actionLogin.setText(_translate("MainWindow", "Login"))
self.actionTo_Excel.setText(_translate("MainWindow", "To Excel"))
self.actionNew_Task.setText(_translate("MainWindow", "New Task"))
self.actionNew_Task.setShortcut(_translate("MainWindow", "Ctrl+N"))
self.actionRegiste.setText(_translate("MainWindow", "Registe"))
self.actionRegiste.setShortcut(_translate("MainWindow", "Ctrl+R"))
self.actionLogout.setText(_translate("MainWindow", "Logout"))
self.actionTo_File.setText(_translate("MainWindow", "To File"))
self.actionInsert.setText(_translate("MainWindow", "Insert"))
self.actionStop.setText(_translate("MainWindow", "Stop"))
self.actionStopAll.setText(_translate("MainWindow", "StopAll"))
|
import serial,logging
class RMY85106:
def __init__(self,port,baud=9600):
self._s = serial.Serial(port,baud,timeout=0.1)
def __del__(self):
try:
self._s.close()
except:
pass
def read(self):
#self._s.write('M0!\r') # the sensor is slow at processing commands...
for c in 'M0!\r':
self._s.write(c.encode())
self._s.flushOutput()
line = []
for i in range(20): # should be ~17 chr
c = self._s.read(size=1)
if len(c):
line.extend(c.decode())
if c == '\r':
break
#logger.debug(''.join(line))
#logger.debug([ord(c) for c in line])
if len(line) <= 0:
logging.warning('No response from ultrasonic anemometer')
return
line = ''.join(line).strip().split(' ')
if not ('0' == line[0] and '*' == line[3][2]): # '0' is the address of the sensor
logging.warning('Unexpected response from ultrasonic anemometer: {}'.format(line))
return
return {'v':float(line[1]),'d':float(line[2])}
if '__main__' == __name__:
import time
rmy = RMY85106('/dev/ttyUSB4')
while True:
print(rmy.read())
time.sleep(0.1)
|
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Group Project: Team Red
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Authors:
Mark R. Mahan
Joshua Yamdogo
Samuel Trenter
Shinya Honda
Humberto Colin
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This project is meant to run a platforming game
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
from tkinter import *
class Platformer(Frame):
def __init__(self):
Frame.__init__(self)
self.master.title("Platformer") #Frame's title
self.grid()
self.height = 600
self.width = 1000
self.canvas = Canvas(self, height = 600, width = 1000, bg = "white")
self.canvas.grid(row = 1, column = 0)
self.Game() #Runs the "game"
def Game(self):
print("hello world")
def main():
Platformer().mainloop()
|
class Queue(object):
def __init__(self):
self.items=[]
def enqueue(self,item):
self.items.insert(0,item)
def dequeue(self):
if not self.is_empty():
return self.items.pop()
def is_empty(self):
return len(self.items)==0
def peek(self):
if not self.is_empty():
return self.items[-1].value
def __len__(self):
return self.size()
def size(self):
return len(self.items)
class Node(object):
def __init__(self,value):
self.value=value
self.left=None
self.right=None
class BinaryTree(object):
def __init__(self,root):
self.root=Node(root)
def print_tree(self,traversal_type):
if traversal_type=="levelorder":
return self.levelorder_print(tree.root)
else:
print("Traversal type "+str(traversal_type)+" is not supported")
return False
def levelorder_print(self,start):
if start is None:
return
queue=Queue()
queue.enqueue(start)
traversal = ""
while len(queue)>0:
traversal+=str(queue.peek())+"-"
node=queue.dequeue()
if node.left:
queue.enqueue(node.left)
if node.right:
queue.enqueue(node.right)
return traversal
if __name__=="__main__":
tree=BinaryTree(1)
tree.root.left=Node(2)
tree.root.right=Node(3)
tree.root.left.left=Node(4)
tree.root.left.right=Node(5)
tree.root.right.left=Node(6)
tree.root.right.right=Node(7)
print(tree.print_tree("levelorder"))
|
#1.定义链表的基本元素:节点Node
class Node(object):
'''
data 保存节点的数据
next 保存下一个节点的对象
'''
def __init__(self, data=None, pnext=None):
self.data = data
self.pnext = pnext
def __repr__(self):
'''
用来定义 Node 的字符输出,
print为输出data
'''
return str(self.data)
#2.把node连接起来
# (1)定义一个个的节点
node1 = Node(1)
node2 = Node(2)
node3 = Node(3)
# (2)表示出节点间的关系
node1.pnext = node2
node2.pnext = node3
#3.打印链表
# (1)顺序打印
def printlist(node):
while node:
print(node)
node = node.pnext
# printlist(node1)
# (2)逆序打印
def backwardprintlist(node):
if node == None:
return
backwardprintlist(node.pnext)
print(node)
# backwardprintlist(node1)
#4. 创建一个单链表&链表的基本操作
class Slinkedlist(object):
def __init__(self):
self.headval = None
def append(self, newdata):
'''
在链表的末尾添加一个新的node
'''
newnode = Node(newdata)
if self.headval is None:
self.headval = newnode
return
# 只有遍历一遍才能找到最后一个元素
laste = self.headval
while laste.pnext:
laste = laste.pnext
laste.pnext = newnode
def forwardprint(self):
'''顺序打印单链表'''
printval = self.headval
while printval:
print(printval)
printval = printval.pnext
def backwardprint(self, node):
'''逆序打印单链表'''
if self.headval == None: return
printval = node
if printval == None: return
self.backwardprint(printval.pnext)
print(printval)
def add_left(self, newdata_in):
'''在链表的开头插入一个node'''
newdata_in = Node(newdata_in)
newdata_in.pnext = self.headval
self.headval = newdata_in
def insert(self, selected_node, newdata_in):
'''在链表的两个元素之间插一个元素'''
if not selected_node:
print('error: the node methioned does not exsist')
newnode_in = Node(newdata_in)
newnode_in.pnext = selected_node.pnext
selected_node.pnext = newnode_in
def delete(self, removekey):
'''删除指定位置的node'''
if removekey is None:
return
if self.headval is None:
print('The single list is empty!')
return
if self.headval.data == removekey:
self.headval = self.headval.pnext
return
pre_node = self.headval
cur_node = self.headval.pnext
while cur_node is not None:
if cur_node.data == removekey:
pre_node.pnext = cur_node.pnext
return
else:
pre_node = cur_node
cur_node = cur_node.pnext
# test
l = Slinkedlist()
for i in range(6):
l.append(i)
l.add_left(7)
l.insert(l.headval.pnext.pnext, 'haha')
# l.forwardprint()
# l.backwardprint(l.headval)
l.delete(5)
l.forwardprint()
|
import logging,os,time
from nt import mkdir
from comm.read_conf import Read_conf
from comm import constans
moudle=""
new_name=""
class Record_logging():
'''日志收集器类 '''
def __init__(self,name=None,get_moudle_name=None):
'''name日志收集器名字,get_moudle_name要生成的日志文件的名字'''
self.name=name
"""读取关于log的配置文件信息,包含日志输出路径,日志收集器收集级别,日志渠道输出级别,日志输出格式formmat"""
self.log_set=Read_conf()
self.log_ouyput_level=self.log_set.get("log","log_output_level")
self.log_level=self.log_set.get("log","log_get_level")
self.log_formatter=self.log_set.get("log","log_formatter")
if name!=None:
global new_name
new_name=name
if get_moudle_name!=None:
global moudle
moudle=get_moudle_name
"""设置日志路径"""
now_date=time.strftime('%Y_%m_%d')
'''根据日期新建文件夹存放日志文件,可查看test_result下log_result下的日志生成结果'''
path=constans.log_path+os.sep+now_date
if not os.path.isdir(path):
mkdir(path)
file_path=path+os.sep+moudle+".txt"
"""初始化"""
self.logger=logging.getLogger(self.name)
self.logger.setLevel(self.log_level)
'''该判断,避免一条日志重复打印,可以去除if后看看打印的结果进行对比'''
if not self.logger.handlers:
"""制定统一的输出格式:%(levelname)s 文本形式的日志级别
%(asctime)s 字符串形式的当前时间。默认格式是 “2003-07-08 16:49:45,896”。逗号后面的是毫秒
%(message)s %(message)s 用户输出的消息"""
fmt = logging.Formatter(self.log_formatter)
"""设置控制台日志(信息输出到控制台)"""
cmd_h=logging.StreamHandler()
cmd_h.setLevel(self.log_ouyput_level)
cmd_h.setFormatter(fmt)
"""设置文件日志(输出到指定文件中)"""
file_h=logging.FileHandler(file_path,encoding="utf8") #指定日志路径
file_h.setLevel(self.log_ouyput_level)
file_h.setFormatter(fmt)
"""添加2种日志"""
self.logger.addHandler(cmd_h)
self.logger.addHandler(file_h)
def debug(self,message):
self.logger.debug(message)
def info(self,message):
self.logger.info(message)
def warning(self,message):
self.logger.warning(message)
def error(self,message):
self.logger.error(message)
def critical(self,message):
self.logger.critical(message)
if __name__ == '__main__':
a=Record_logging("rt")
a.info("asdasd") |
from django import forms
from ca.models import UserProfile, Program, University, Package
from django.contrib.auth.models import User
class UserForm(forms.ModelForm):
password = forms.CharField(help_text = "Password", widget = forms.PasswordInput)
password_con = forms.CharField(help_text = "Confirm password", widget = forms.PasswordInput)
username = forms.CharField(help_text = "Username")
email = forms.CharField(help_text = "Email")
def clean_password_con(self):
pw1 = self.cleaned_data.get('password')
pw2 = self.cleaned_data.get('password_con')
if pw1 and pw1 != pw2:
raise forms.ValidationError("Passwords don't match")
return pw2
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
picture = forms.ImageField(help_text = "Select an image", required = False)
fav_program = forms.ModelMultipleChoiceField(queryset = Program.objects.all(), to_field_name = "name", required = False)
fav_university = forms.ModelMultipleChoiceField(queryset = University.objects.all(), required = False)
packages = forms.ModelMultipleChoiceField(queryset = Package.objects.all(), required = False)
class Meta:
model = UserProfile
fields = ['phone', 'picture', 'skype_id', 'qq_id', 'fav_program', 'fav_university', 'packages']
class EditUserForm(forms.ModelForm):
password = forms.CharField(help_text = "Password")
username = forms.CharField(help_text = "Username")
email = forms.CharField(help_text = "Email")
class Meta:
model = User
fields = ['username', 'password', 'email']
class EditProfileForm(forms.ModelForm):
username = forms.CharField(required = True)
email = forms.EmailField(required = True)
class Meta:
model = UserProfile
fields = ['phone', 'picture', 'skype_id', 'qq_id']
|
# coding=utf-8
import tensorflow as tf
def one_multiply_one_convolution(inputs, filter_num, name="one_multiply_one_convolution"):
with tf.variable_scope(name):
outputs = tf.layers.conv1d(inputs, filter_num, 1, strides=1, padding='same')
return outputs
def causal_convolution(inputs, filter_num, name="causal_convolution"):
with tf.variable_scope(name):
outputs = tf.layers.conv1d(inputs, filter_num, 2, strides=1, padding='same')
return outputs
def residual_block(inputs, conditions, filter_num, filter_size, layer_id, dilation_rate):
with tf.variable_scope("residual_block_" + str(layer_id)):
in_filters = inputs.shape[2]
with tf.variable_scope("dilated_causal_convolution"):
input_conv = tf.layers.conv1d(inputs, filter_num, filter_size, padding='same', dilation_rate=dilation_rate)
condition_conv = one_multiply_one_convolution(conditions, filter_num)
filter_conv = tf.tanh(input_conv + condition_conv)
gate_conv = tf.sigmoid(input_conv + condition_conv)
conv = filter_conv + gate_conv
conv = tf.layers.conv1d(conv, filter_num, 1, padding='same')
# skip connection
if in_filters != filter_num:
residual = tf.layers.dense(inputs, filter_num) + conv
else:
residual = inputs + conv
return residual, conv
if __name__ == '__main__':
batch_size = 2
seq_length = 2
input_channels = 2
filter_num = 4
dilated_filter_kernel = 2
class_num = 2
# x = tf.constant(np.random.rand(batch_size, seq_length, input_channels).astype(np.float32))
x = tf.constant([[[0, 1], [1, 0]], [[1, 0], [0, 1]]])
x = tf.to_float(x)
# h = tf.constant(np.random.rand(batch_size, seq_length, input_channels).astype(np.float32))
h = tf.constant([[[1], [1000]], [[1000], [1]]])
h = tf.to_float(h)
o = causal_convolution(x, filter_num)
dilation_rates = [1, 2, 4, 8]
skip_connections = []
for i in range(len(dilation_rates)):
o, c = residual_block(o, h, class_num, dilated_filter_kernel, i, dilation_rates[i])
skip_connections.append(c)
output = sum(skip_connections)
output = tf.layers.conv1d(output, class_num, 1, padding='same', activation=tf.nn.relu)
output = tf.layers.conv1d(output, class_num, 1, padding='same', activation=tf.nn.softmax)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(output).shape)
print(sess.run(output))
|
class Solution:
def getHappyString(self, n: int, k: int) -> str:
def happy(n):
if n==1:
return ["a","b","c"]
val = happy(n-1)
ans = list()
for x in val:
if x[-1] == "a":
ans.append(x + "b")
ans.append(x + "c")
if x[-1] == "b":
ans.append(x + "a")
ans.append(x + "c")
if x[-1] == "c":
ans.append(x + "a")
ans.append(x + "b")
return ans
ans = happy(n)
if len(ans) < k:
return ""
else:
return ans[k-1]
|
import random
import pickle
def w_file(data, fname):
with open(fname, "wb") as file:
pickle.dump(data, file)
def r_file(fname):
with open(fname, "rb") as file:
results = pickle.load(file)
return results
def field_of_dreams():
question, word = random.choice(list(r_file('words.data').items()))
print('Задание: ', question)
attempt = ''
field = '*' * len(word)
while field != word and attempt != word:
attempt = input('Enter the letter or the word: \n')
if attempt != word and attempt in word:
plase_in_field = ''
for please in range(len(word)):
if attempt == word[please]:
plase_in_field += attempt
elif attempt != word[please]:
plase_in_field += field[please]
field = plase_in_field
print(field)
print('Верно!')
words = {
'На свете есть много лягушек: все они очень разные. \
Свои названия они получили за свою внешность: живёт в \
траве - травяная, с острой мордой - остромордая, живёт в пруду - прудовая.\
А эта африканская лягушка получила своё название за необычный рот.\
Как зовут эту лягушку?':'узкорот',
' Эта птица может ходить по дну водоёма, похожа на воробья. Её так и прозвали\
«водяной воробей». Что это за птица?':'оляпка',
'На морском дне растёт очень опасная водоросль: актиния – она больно жжётся. \
А эта рыбка дружит с ней. И наряд у этой рыбки яркий, весёлый, пёстрый. \
Что это за рыбка?':'клоун'
}
w_file(words, 'words.data')
field_of_dreams() |
####################################################
#
# Description: Automatic filter queries
# generation for cecm.usp.br
# Depencencies: unidecode, json, os
# Author: Rafael Badain @ University of São Paulo
#
####################################################
# imports
import os
import json
import unidecode
# search queries
focus_queries = {'concentracoes': []}
specialization_queries = {'especializacoes': []}
# search results
focus_results = {}
specialization_results = {}
# iterates through every scholar .json and catalogs respectives 'focus' and 'specialization'
for cohord in os.listdir("./estudantes"):
for file in os.listdir(f"./estudantes/{cohord}"):
if file.endswith(".json") and cohord not in file:
#print(cohord, file)
with open(f'./estudantes/{cohord}/{file}', encoding='utf-8') as scholar:
scholar_dict = json.load(scholar)
if 'concentracao' in scholar_dict:
# merge unique query values
focus_queries['concentracoes'] = list(set(focus_queries['concentracoes'] + scholar_dict['concentracao']))
# groups name results
for focus in scholar_dict['concentracao']:
if focus not in focus_results:
focus_results[focus] = { scholar_dict['turma']: [ scholar_dict['nome'] ] }
elif scholar_dict['turma'] not in focus_results[focus]:
focus_results[focus][scholar_dict['turma']] = [ scholar_dict['nome'] ]
elif scholar_dict['nome'] not in focus_results[focus][scholar_dict['turma']]:
focus_results[focus][scholar_dict['turma']].append(scholar_dict['nome'])
if 'especializacao' in scholar_dict:
# merge unique query values
specialization_queries['especializacoes'] = list(set(specialization_queries['especializacoes'] + scholar_dict['especializacao']))
# groups name results
for specialization in scholar_dict['especializacao']:
if specialization not in specialization_results:
specialization_results[specialization] = { scholar_dict['turma']: [ scholar_dict['nome'] ] }
elif scholar_dict['turma'] not in specialization_results[specialization]:
specialization_results[specialization][scholar_dict['turma']] = [ scholar_dict['nome'] ]
elif scholar_dict['nome'] not in specialization_results[specialization][scholar_dict['turma']]:
specialization_results[specialization][scholar_dict['turma']].append(scholar_dict['nome'])
# saves to file
focus_queries['concentracoes'].sort()
specialization_queries['especializacoes'].sort()
with open('./filtros/concentracao/concentracao.json', mode="w", encoding="utf-8") as focus_out:
json.dump(focus_queries, focus_out)
for focus in focus_results:
with open(f'./filtros/concentracao/results/{unidecode.unidecode(focus.replace(" ", "-").lower())}.json', mode="w", encoding="utf-8") as results:
json.dump(focus_results[focus], results)
with open('./filtros/especializacao/especializacao.json', mode="w", encoding="utf-8") as specialization_out:
json.dump(specialization_queries, specialization_out)
for specialization in specialization_results:
with open(f'./filtros/especializacao/results/{unidecode.unidecode(specialization.replace(" ", "-").lower())}.json', mode="w", encoding="utf-8") as results:
json.dump(specialization_results[specialization], results)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Created Date: 2020-04-16 15:45:59
# Author: Oleg Kashaev
# Contact: oleg.kashaev.4@gmail.com
# -----
# MIT License
# Copyright (c) 2020 Oleg Kashaev
import logging
from flask_restplus import Namespace, Resource
from flask import request, make_response
from easy_pyrpc.service import rpc_backend
rpc_namespace = Namespace(
'EasyPyRPC',
description='RPC handler namespace')
@rpc_namespace.route("/invoke/<string:source_hash>/<string:method_name>")
class Invoke(Resource):
def post(self, source_hash, method_name):
logging.info("{}: {}".format(request.method, request.url))
data, status_code = rpc_backend.invoke(source_hash, method_name, request.get_data())
resp = make_response(data, status_code)
resp.headers["Content-Type"] = "application/x-binary"
return resp
@rpc_namespace.route("/register/<string:source_hash>/<string:method_name>")
class Register(Resource):
def post(self, source_hash, method_name):
logging.info("{}: {}".format(request.method, request.url))
rpc_backend.register(source_hash, method_name, request.get_data())
data = {"source_hash": source_hash}
resp = make_response(data, 200)
resp.headers["Content-Type"] = "application/json"
return resp
@rpc_namespace.route("/keep-alive/<string:source_hash>")
class KeepAlive(Resource):
def get(self, source_hash):
logging.info(f"{request.method}: {request.url}")
data, status_code = rpc_backend.keep_alive(source_hash)
resp = make_response(data, status_code)
resp.headers["Content-Type"] = "application/json"
return resp
|
# The dependency patterns:
# ("nsubj(hyperHead, hypoHead), cop(hyperHead, was|were|is|are)"),
# ("case(hypoHead, such), mwe(such, as), nmod:such_as(hyperHead, hypoHead)"),
# ("case(hypoHead, including), nmod:including(hyperHead, hypoHead)"),
# ("amod(hyperHead, such), case(hypoHead, as), nmod:as(~, hypoHead)"),
# ("cc(hypoHead, and/or), amod(hyperHead, other), conj:and/or(hypoHead, hyperHead)"),
# ("advmod(hyperHead, especially), dep(hyperHead, hypoHead)")
from common import HyperHypoCouple as HH
from common import core_functions as cf
def get_NP(NPs, index):
for np in NPs:
if int(index) in range(int(np.start), int(np.end) + 1):
return np.text
return ""
def get_couples(parsed_sentence, hyper_index, hypo_index, j_index = 0):
hyper_np = cf.remove_first_occurrences_stopwords(get_NP(parsed_sentence.NPs, hyper_index))
hypo_np = cf.remove_first_occurrences_stopwords(get_NP(parsed_sentence.NPs, hypo_index))
couples = []
hypo_indexes = set()
if hyper_np != "" and hypo_np != "" and hypo_np != hyper_np:
hh = HH.HHCouple(hypo_np, hyper_np)
couples.append(hh)
hypo_indexes.add(hypo_index)
parsed_words = parsed_sentence.words
for i in range(0, len(parsed_words)):
parsed_word = parsed_words[i]
if str(parsed_word.dep_rel).__contains__("appos") and parsed_word.parent_index == hypo_index:
new_hypo_index = parsed_word.index
hypo_index = new_hypo_index
new_hypo_np = cf.remove_first_occurrences_stopwords(get_NP(parsed_sentence.NPs, new_hypo_index))
if hyper_np != "" and new_hypo_np != "" and new_hypo_np != hyper_np:
new_hh = HH.HHCouple(new_hypo_np, hyper_np)
couples.append(new_hh)
hypo_indexes.add(new_hypo_index)
if str(parsed_word.dep_rel).__contains__("conj") and parsed_word.parent_index == hypo_index:
new_hypo_index = parsed_word.index
new_hypo_np = cf.remove_first_occurrences_stopwords(get_NP(parsed_sentence.NPs, new_hypo_index))
if hyper_np != "" and new_hypo_np != "" and new_hypo_np != hyper_np:
new_hh = HH.HHCouple(new_hypo_np, hyper_np)
couples.append(new_hh)
hypo_indexes.add(new_hypo_index)
return couples, hypo_indexes
def such_A_as_B(parsed_sentence):
parsed_words = parsed_sentence.words
for i in range(len(parsed_words)):
parsed_word = parsed_words[i] #("amod(hyperHead, such), case(hypoHead, as), nmod:as(~, hypoHead)"),
if str(parsed_word.dep_rel).__contains__("nmod:as"):
hypo_index = parsed_word.index
if not str(parsed_word.pos).__contains__("NN") and not str(parsed_word.pos).__contains__("JJ"):
continue
flag1 = False
flag2 = False
for j in range(i - 1, max(-1, i-10), -1):
pre_word = parsed_words[j]
if str(pre_word.dep_rel).__contains__("case") and pre_word.word == "as" and pre_word.parent_index == hypo_index:
flag1 = True
elif str(pre_word.dep_rel).__contains__("amod") and pre_word.word == "such":
hyper_index = pre_word.parent_index
flag2 = True
if flag1 and flag2:
couples, hypo_indexes = get_couples(parsed_sentence, hyper_index, hypo_index, j)
if len(couples) > 0:
return True, couples, hypo_indexes, hyper_index
return False, [], [], -1
def A_is_a_B(parsed_sentence):
vtb = ["is", "are", "was", "were"]
parsed_words = parsed_sentence.words
for i in range(len(parsed_words)):
parsed_word = parsed_words[i] #("nsubj(hyperHead, hypoHead), cop(hyperHead, was|were|is|are)"),
if str(parsed_word.dep_rel).__contains__("nsubj"):
hypo_index = parsed_word.index
if not str(parsed_word.pos).__contains__("NN") and not str(parsed_word.pos).__contains__("JJ"):
continue
hyper_index = parsed_word.parent_index
for j in range(i + 1, min(len(parsed_words), i + 10)):
next_word = parsed_words[j]
if str(next_word.dep_rel).__contains__("cop") and next_word.word in vtb and next_word.parent_index == hyper_index:
couples, hypo_indexes = get_couples(parsed_sentence, hyper_index, hypo_index)
if len(couples) > 0:
return True, couples, hypo_indexes, hyper_index
return False, [], [], -1
def A_and_other_B(parsed_sentence):
conj = ["or", "and"]
parsed_words = parsed_sentence.words
for i in range(len(parsed_words)):
parsed_word = parsed_words[i] #("cc(hypoHead, and/or), amod(hyperHead, other), conj:and/or(hypoHead, hyperHead)"),
if str(parsed_word.dep_rel).__contains__("conj"):
hyper_index = parsed_word.index
if not str(parsed_word.pos).__contains__("NN") and not str(parsed_word.pos).__contains__("JJ"):
continue
hypo_index = parsed_word.parent_index
flag1 = False
flag2 = False
for j in range(i - 1, max(-1, i - 10), -1):
pre_word = parsed_words[j]
if str(pre_word.dep_rel).__contains__("amod") and pre_word.word == "other" and pre_word.parent_index == hyper_index:
flag1 = True
elif str(pre_word.dep_rel).__contains__(
"cc") and pre_word.word in conj and pre_word.parent_index == hypo_index:
flag2 = True
if flag1 and flag2:
couples, hypo_indexes = get_couples(parsed_sentence, hyper_index, hypo_index, j)
if len(couples) > 0:
return True, couples, hypo_indexes, hyper_index
return False, [], [], -1
#
def A_especially_B(parsed_sentence):
parsed_words = parsed_sentence.words
for i in range(len(parsed_words)):
parsed_word = parsed_words[i] #("advmod(hyperHead, especially), dep(hyperHead, hypoHead)")
if str(parsed_word.dep_rel).__contains__("dep"):
hypo_index = parsed_word.index
if not str(parsed_word.pos).__contains__("NN") and not str(parsed_word.pos).__contains__("JJ"):
continue
hyper_index = parsed_word.parent_index
for j in range(i - 1, max(-1, i - 10), -1):
pre_word = parsed_words[j]
if str(pre_word.dep_rel).__contains__("advmod") and pre_word.word == "especially" and pre_word.parent_index == hyper_index:
couples, hypo_indexes = get_couples(parsed_sentence, hyper_index, hypo_index, j)
if len(couples) > 0:
return True, couples, hypo_indexes, hyper_index
return False, [], [], -1
def A_including_B(parsed_sentence):
parsed_words = parsed_sentence.words
for i in range(len(parsed_words)):
parsed_word = parsed_words[i] #("case(hypoHead, including), nmod:including(hyperHead, hypoHead)"),
if str(parsed_word.dep_rel).__contains__("nmod:including"):
hypo_index = parsed_word.index
if not str(parsed_word.pos).__contains__("NN") and not str(parsed_word.pos).__contains__("JJ"):
continue
hyper_index = parsed_word.parent_index
for j in range(i - 1, max(-1, i - 10), -1):
pre_word = parsed_words[j]
if str(pre_word.dep_rel).__contains__("case") and pre_word.word == "including" and pre_word.parent_index == hypo_index:
couples, hypo_indexes = get_couples(parsed_sentence, hyper_index, hypo_index, j)
if len(couples) > 0:
return True, couples, hypo_indexes, hyper_index
return False, [], [], -1
def A_such_as_B(parsed_sentence):
parsed_words = parsed_sentence.words
for i in range(len(parsed_words)):
parsed_word = parsed_words[i]
if str(parsed_word.dep_rel).__contains__("nmod:such_as"):
hypo_index = parsed_word.index
if not str(parsed_word.pos).__contains__("NN") and not str(parsed_word.pos).__contains__("JJ"):
continue
hyper_index = parsed_word.parent_index
flag1 = False
flag2 = False
for j in range(i - 1, max(-1, i-10), -1):
pre_word = parsed_words[j]
if str(pre_word.dep_rel).__contains__("mwe") and pre_word.word == "as" and pre_word.parent == "such":
flag1 = True
elif str(pre_word.dep_rel).__contains__("case") and pre_word.word == "such" and pre_word.parent_index == hypo_index:
flag2 = True
if flag1 and flag2:
couples, hypo_indexes = get_couples(parsed_sentence, hyper_index, hypo_index, j)
if len(couples) > 0:
return True, couples, hypo_indexes, hyper_index
return False, [], [], -1
def sentence_couples_annotation(sentence, couples):
sentence = sentence.replace("_hypo", "").replace("_hyper", "").replace("_", " ")
for i, couple in enumerate(couples):
hyper = couple.hypernym
hyper2 = hyper.replace(" ", "_")
sentence = sentence.replace(" " + hyper + " ", " " + hyper2 + "_hyper ").strip()
hypo = couple.hyponym
hypo2 = hypo.replace(" ", "_")
try:
sentence = sentence.replace(" " + hypo + " ", " " + hypo2 + "_hypo ").strip()
except:
sentence = sentence
return sentence
def match(parsed_sentence, sentence = ""):
couples = []
hypo_indexes = set()
hyper_indexes = set()
patterns = []
# NP such as NP
flag, co, hypo_ind, hyper_index = A_such_as_B(parsed_sentence)
if flag:
couples.extend(co)
hypo_indexes = hypo_indexes.union(hypo_ind)
hyper_indexes.add(hyper_index)
patterns.append("NP such as NP")
# NP including NP
flag, co, hypo_ind, hyper_index = A_including_B(parsed_sentence)
if flag:
couples.extend(co)
hypo_indexes = hypo_indexes.union(hypo_ind)
hyper_indexes.add(hyper_index)
patterns.append("NP including NP")
# NP is a NP
flag, co, hypo_ind, hyper_index = A_is_a_B(parsed_sentence)
if flag:
couples.extend(co)
hypo_indexes = hypo_indexes.union(hypo_ind)
hyper_indexes.add(hyper_index)
patterns.append("NP is a NP")
# NP and other NP
flag, co, hypo_ind, hyper_index = A_and_other_B(parsed_sentence)
if flag:
couples.extend(co)
hypo_indexes = hypo_indexes.union(hypo_ind)
hyper_indexes.add(hyper_index)
patterns.append("NP and other NP")
# NP especially NP
flag, co, hypo_ind, hyper_index = A_especially_B(parsed_sentence)
if flag:
couples.extend(co)
hypo_indexes = hypo_indexes.union(hypo_ind)
hyper_indexes.add(hyper_index)
patterns.append("NP especially NP")
# such NP as NP
flag, co, hypo_ind, hyper_index = such_A_as_B(parsed_sentence)
if flag:
couples.extend(co)
hypo_indexes = hypo_indexes.union(hypo_ind)
hyper_indexes.add(hyper_index)
patterns.append("such NP as NP")
if len(couples) == 0:
return False, "", "", "", [], []
return True, couples, patterns, sentence_couples_annotation(sentence, couples), hypo_indexes, hyper_indexes
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.