text stringlengths 38 1.54M |
|---|
import nacl
import nacl.encoding
import nacl.exceptions
import nacl.signing
import secrets
def raw_sign(signing_key: bytes, message: bytes):
sk = nacl.signing.SigningKey(seed=signing_key)
sig = sk.sign(message)
return sig.signature
def raw_verify(verifying_key: bytes, message: bytes, signature: bytes):
vk = nacl.signing.VerifyKey(key=verifying_key)
try:
vk.verify(message, signature)
except nacl.exceptions.BadSignatureError:
return False
return True
class Wallet:
def __init__(self, seed=None):
if seed is None:
seed = secrets.token_bytes(32)
self.sk = nacl.signing.SigningKey(seed=seed)
self.vk = self.sk.verify_key
def sign(self, msg: bytes):
sig = self.sk.sign(msg)
return sig.signature
def verify(self, msg: bytes, signature: bytes):
try:
self.vk.verify(msg, signature)
except nacl.exceptions.BadSignatureError:
return False
return True
|
from matplotlib import pyplot as plt
# Create the plot
f, axarr = plt.subplots(1)
# And plot them
x=[155.0624139,216.088007,216.3490667,137.6847495,185.24,171.8145028,134.8799083,87.10716925]
y=["May", "Jun","July","Augest","sep","Oct","Nov","Dec"]
axarr.bar(y,x)
plt.xlabel('Months ')
plt.ylabel('Eagle owl flight distance in 2012')
plt.title('Bar graph of Eagle owl flight distance in 2012')
plt.show()
|
from . import Base
from sqlalchemy import Column,String,Integer,DateTime
import datetime
class DsLeague(Base):
__tablename__ = 'ds_league'
id = Column(Integer,primary_key=True)
name = Column(String(45))
name_short = Column(String(45))
url = Column(String(400))
created_time = Column(DateTime,nullable=False)
updated_time = Column(DateTime,nullable=False)
def __init__(self):
self.created_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.updated_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
#!/usr/bin/env python
from json import loads
import mechanize
import cookielib
from os import path
from urllib import urlencode
from sploitego.config import config
from sploitego.maltego.utils import debug
from sploitego.framework import configure
from sploitego.maltego.message import EmailAddress, AffiliationFacebook, Label
from easygui import multpasswordbox
import sploitego.hacks.gui
import time
__author__ = 'leres'
__copyright__ = 'Copyright 2012, Socialmedia Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'leres'
__email__ = 'twitleres@gmail.com'
__status__ = 'Development'
__all__ = [
'dotransform'
]
def init_browser():
cookies = cookielib.MozillaCookieJar(config['mechanize/cookie_jar'])
browser = mechanize.Browser()
browser.set_cookiejar(cookies)
browser.set_handle_redirect(True)
browser.set_handle_robots(False)
browser.set_handle_equiv(True)
browser.set_handle_gzip(False)
browser.set_handle_referer(True)
browser.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
browser.addheaders = [('User-Agent', 'Mozilla')]
return browser, cookies
def login(browser, cookies):
if not path.exists(config['mechanize/cookie_jar']):
u, p = multpasswordbox("Enter your facebook credentials.","Facebook Login",["Email", "Password"]);
credentials = {
'email' : u,
'pass' : p,
'default_persistent' : 1,
'lsd' : 'aaaaaaaa',
'charset_test' : '%E2%82%AC%2C%C2%B4%2C%E2%82%AC%2C%C2%B4%2C%E6%B0%B4%2C%D0%94%2C%D0%84',
'timezone' : '240',
'lgnrnd' : '000000_0A0A',
'lgnjs' : int(time.time()),
'local' : 'en_US'
}
browser.open("https://www.facebook.com/")
r = browser.open("https://www.facebook.com/login.php?login_attempt=1", urlencode(credentials))
if r.code == 200:
cookies.save(ignore_discard=True, ignore_expires=True)
else:
cookies.load(config['mechanize/cookie_jar'], ignore_discard=True, ignore_expires=True)
fb_cookies = cookies._cookies['.facebook.com']['/']
return fb_cookies['c_user'].value if 'c_user' in fb_cookies else 0
def getuser(browser, uid, email):
if uid:
data = {
'viewer': uid,
'value': email,
'__a' : 1
}
r = browser.open("http://www.facebook.com/ajax/typeahead/search.php?%s" % urlencode(data))
if r.code == 200:
s = r.read()
json = loads(s[s.find('{'):])
if 'error' in json:
raise Exception("%s: %s" % (json['errorSummary'], json['errorDescription']))
if json['payload']['entries']:
debug(json['payload']['entries'][0])
return json['payload']['entries'][0]
return None
@configure(
label='Email lookup at facebook.com',
description='Search if email has a facebook account',
uuids=[ 'socialmedia.facebook.email' ],
inputs=[ ( 'Socialmedia', EmailAddress ) ],
debug=True
)
def dotransform(request, response):
browser, cookies = init_browser()
uid = login(browser, cookies)
u = getuser(browser, uid, request.value)
if u is None:
return response
else:
e = AffiliationFacebook(u['text'])
e.profileurl = 'http://www.facebook.com%s' % u['path']
e.name = u['text']
e.uid = u['uid']
e += Label('Facebook Link','<A href=http://www.facebook.com%s>Link</A>' % u['path'])
e += Label('Profile Picture', '<A href=%s>Link</A>' % u['photo'])
response += e
return response
|
import powertb
powertb.enable()
def my_func(x):
y = x + 200
print(y)
if x > 0:
my_func(x-1)
else:
return 1 / x
my_func(2)
|
from flask import Flask, jsonify, abort, request, render_template
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import asc
from marshmallow import Serializer
class TodoSerializer(Serializer):
class Meta:
fields = ('id', 'todo', 'done', 'order')
def get_todos_serialized(todo):
return TodoSerializer(todo).data
app = Flask(__name__)
app.config.from_pyfile('config.py')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
class TodoList(db.Model):
id = db.Column(db.Integer, primary_key = True)
todo = db.Column(db.Text)
order= db.Column(db.Integer)
done = db.Column(db.Boolean, default = False)
def __init__(self, todo, order):
self.todo = todo
self.done = False
self.order = order
db.create_all()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/todos/', methods = ['GET'])
def get_todos():
todos = TodoList.query.order_by(asc(TodoList.order)).all()
incomplete = TodoList.query.filter_by(done = False).count()
serialized = [get_todos_serialized(todo) for todo in todos]
return jsonify({'incomplete': incomplete, 'result': serialized})
@app.route('/todos/', methods = ['POST'])
def add_todo():
todo = request.json[0]
order = request.json[1]
new_todo = TodoList(todo, order)
db.session.add(new_todo)
db.session.commit()
todos = TodoList.query.order_by(asc(TodoList.order)).all()
incomplete = TodoList.query.filter_by(done = False).count()
serialized = [get_todos_serialized(todo) for todo in todos]
return jsonify({'incomplete': incomplete, 'result': serialized})
@app.route('/todos/', methods = ['PATCH'])
def update_todo():
id = request.json[0]
todo = TodoList.query.get(id)
if todo.done:
todo.done = False
else:
todo.done = True
db.session.commit()
todos = TodoList.query.order_by(asc(TodoList.order)).all()
incomplete = TodoList.query.filter_by(done = False).count()
serialized = [get_todos_serialized(todo) for todo in todos]
return jsonify({'incomplete': incomplete, 'result': serialized})
@app.route('/todos/', methods = ['DELETE'])
def delete_todo():
id = request.json[0]
todo = TodoList.query.get(id)
db.session.delete(todo)
db.session.commit()
todos = TodoList.query.order_by(asc(TodoList.order)).all()
incomplete = TodoList.query.filter_by(done = False).count()
serialized = [get_todos_serialized(todo) for todo in todos]
return jsonify({'incomplete': incomplete, 'result': serialized})
@app.route('/todos/update/', methods = ['PATCH'])
def complete_all():
todos = TodoList.query.all()
for todo in todos:
todo.done = True
db.session.commit()
todos = TodoList.query.order_by(asc(TodoList.order)).all()
incomplete = TodoList.query.filter_by(done = False).count()
serialized = [get_todos_serialized(todo) for todo in todos]
return jsonify({'incomplete': incomplete, 'result': serialized})
@app.route('/todos/order/', methods = ['PATCH'])
def todo_order():
current_id = request.json[0]
other_id = request.json[1]
current = TodoList.query.get(current_id)
other = TodoList.query.get(other_id)
current_order = current.order
other_order = other.order
current.order = other_order
other.order = current_order
db.session.commit()
todos = TodoList.query.order_by(asc(TodoList.order)).all()
incomplete = TodoList.query.filter_by(done = False).count()
serialized = [get_todos_serialized(todo) for todo in todos]
return jsonify({'incomplete': incomplete, 'result': serialized})
if __name__ == "__main__":
app.run()
|
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.back.replacement import BackReplacementPattern
from mo.graph.graph import Node, Graph
from mo.utils.error import Error
class DisableUnsupportedNDOperations(BackReplacementPattern):
"""
This pass disables ND Convolutions/Deconvolutions/Poolings
"""
enabled = False
unsupported_operations = ['Convolution', 'Deconvolution', 'Pooling']
def find_and_replace_pattern(self, graph: Graph):
unsupported_nodes = []
for node in graph.nodes():
node = Node(graph, node)
if node.kind == 'op' and node.soft_get('type') in self.unsupported_operations:
input_shape = node.in_node(0).shape
if len(input_shape) > 4:
unsupported_nodes.append((node.id, node.type))
if len(unsupported_nodes) == 0:
return
error_message = "\nOperations below were marked as unsupported due to they expect more than two spatial dims" \
" (input shape length more than 4)\n"
error_message += "List of unsupported operations ({})\n".format(len(unsupported_nodes))
for node, type in unsupported_nodes:
error_message += " {} {}\n".format(type, node)
raise Error(error_message)
|
from flask import Blueprint
from flask import flash
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from werkzeug.exceptions import abort
from coolspace.auth import login_required
from coolspace.db import get_db
# The following import statements are necessary for the clustering algorithm to work.
from pathlib import Path
import spacy
from spacy.util import minibatch, compounding
import json
import sys
import textacy
import textacy.keyterms
from collections import defaultdict
import random
import os
import itertools
# End of import statements for clustering analysis.
bp = Blueprint("post", __name__)
@bp.route("/")
def index():
"""Show all the posts, most recent first."""
db = get_db()
posts = db.execute(
"SELECT p.id, title, body, created, author_id, username"
" FROM post p JOIN user u ON p.author_id = u.id"
" ORDER BY created DESC"
).fetchall()
return render_template("post/index.html", posts=posts)
def get_post(id, check_author=True):
"""Get a post and its author by id.
Checks that the id exists and optionally that the current user is
the author.
:param id: id of post to get
:param check_author: require the current user to be the author
:return: the post with author information
:raise 404: if a post with the given id doesn't exist
:raise 403: if the current user isn't the author
"""
post = (
get_db()
.execute(
"SELECT p.id, title, body, created, author_id, username"
" FROM post p JOIN user u ON p.author_id = u.id"
" WHERE p.id = ?",
(id,),
)
.fetchone()
)
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post["author_id"] != g.user["id"]:
abort(403)
return post
@bp.route("/create", methods=("GET", "POST"))
@login_required
def create():
"""Create a new post for the current user."""
if request.method == "POST":
title = request.form["title"]
body = request.form["body"]
error = None
if not title:
error = "Title is required."
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
"INSERT INTO post (title, body, author_id) VALUES (?, ?, ?)",
(title, body, g.user["id"]),
)
db.commit()
return redirect(url_for("post.index"))
return render_template("post/create.html")
@bp.route("/<int:id>/update", methods=("GET", "POST"))
@login_required
def update(id):
"""Update a post if the current user is the author."""
post = get_post(id)
if request.method == "POST":
title = request.form["title"]
body = request.form["body"]
error = None
if not title:
error = "Title is required."
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
"UPDATE post SET title = ?, body = ? WHERE id = ?", (title, body, id)
)
db.commit()
return redirect(url_for("post.index"))
return render_template("post/update.html", post=post)
@bp.route("/<int:id>/delete", methods=("POST",))
@login_required
def delete(id):
"""Delete a post.
Ensures that the post exists and that the logged in user is the
author of the post.
"""
get_post(id)
db = get_db()
db.execute("DELETE FROM post WHERE id = ?", (id,))
db.commit()
return redirect(url_for("post.index"))
@bp.route('/getkeywords', methods=('GET',))
def get_keywords():
mess_sql = get_db().execute("SELECT * FROM post").fetchall()
mess = []
for item in mess_sql:
mess.append({"message": "{} {}".format(str(item[0]), item[4])})
clustering_results = clustering_analysis(input=mess)
result1 = clustering_results.split(",")
final_json = {"keywords":result1}
return jsonify(final_json)
"""
Below are helper functions for the clustering analysis to work.
"""
def clustering_analysis(input=None, algorithm="s", n_key_float=0.75, n_grams="1,2,3,4",
cutoff=10, threshold=0.5):
if algorithm != "t" and algorithm != "s":
return("Specify an algorithm! (t)extrank or (s)grank")
alldata = []
for curline in input:
alldata.append(curline["message"])
# the cummulative tally of common keywords
word_keyterm_cummula = defaultdict(lambda: 0)
# the mapping of journals to the common keywords
word_keyterm_journals = defaultdict(lambda: [])
en = textacy.load_spacy_lang("en_core_web_sm", disable=("parser",))
for item in alldata:
msgid = item.split(' ')[0]
curline = item.replace(msgid, '').strip()
curdoc = textacy.make_spacy_doc(curline.lower(), lang=en)
curdoc_ranks = []
if algorithm == "t":
if n_key_float > 0.0 and n_key_float < 1.0:
curdoc_ranks = textacy.keyterms.textrank(curdoc,
normalize="lemma", n_keyterms=n_key_float)
else:
curdoc_ranks = textacy.keyterms.textrank(curdoc,
normalize="lemma", n_keyterms=n_key)
elif algorithm == "s":
ngram_str = set(n_grams.split(','))
ngram = []
for gram in ngram_str:
ngram.append(int(gram))
curdoc_ranks = textacy.keyterms.sgrank(curdoc,
window_width=1500, ngrams=ngram, normalize="lower",
n_keyterms=n_key_float)
for word in curdoc_ranks:
word_keyterm_cummula[word[0]] += 1
word_keyterm_journals[word[0]].append((msgid, word[1]))
if len(word_keyterm_journals[word[0]]) > 10:
newlist = []
min_tuple = word_keyterm_journals[word[0]][0]
for tuple in word_keyterm_journals[word[0]]:
if tuple[1] < min_tuple[1]:
min_tuple = tuple
for tuple in word_keyterm_journals[word[0]]:
if tuple[0] != min_tuple[0]:
newlist.append(tuple)
word_keyterm_journals[word[0]] = newlist
word_keyterm_cummula_sorted = sorted(word_keyterm_cummula.items(),
key=lambda val: val[1], reverse=True)
quint = 0
quint_printout = ""
for entry in word_keyterm_cummula_sorted[:cutoff]:
quint_printout += entry[0] + ","
quint += 1
quint_printout = quint_printout[:-1]
#print(quint_printout)
return quint_printout
|
import cv2
import numpy as np
from numpy.linalg import inv
class Transform:
# Create the perspective mapping at init time instead of run time.
def __init__(self, src, from_size=(1280, 720), to_size=(800, 600)):
self.src = src
# Definable sizes for the new images
self.from_size = from_size
self.to_size = to_size
dest = np.float32([[200, 0], [600, 0], [600, to_size[1]], [200, to_size[1]]])
# Create and save the resulting warp matrix
M = cv2.getPerspectiveTransform(self.src, dest)
self.M = M
self.invM = inv(M)
def warp(self, img):
return cv2.warpPerspective(img, self.M, self.to_size, flags=cv2.INTER_LINEAR)
def unwarp(self, img):
return cv2.warpPerspective(img, self.invM, self.from_size, flags=cv2.INTER_LINEAR) |
class Shape:
def what_am_i(self):
print("I am a shape")
class Rectangle(Shape): # Inherit Shape class
pass
class Square(Shape): # Inherit Shape class
pass
rectangle = Rectangle()
rectangle.what_am_i() # call the Shape class method
square = Square()
square.what_am_i() # call the Shape class method
|
import keras
from keras.models import Sequential
from keras.layers import Dense
classifier = Sequential()
classifier.add(Dense(output_dim = 6, init = "uniform", activation = 'relu',input_dim = 11 ))
classifier.add(Dense(output_dim = 6, init = "uniform", activation = 'relu' ))
classifier.add(Dense(output_dim = 1, init = "uniform", activation = 'sigmoid' ))
classifier.compile(optimizer = 'adam',loss = 'binary_crossentropy',merics = ['accuracy'])
classifier.fit(x,y,batch_size = 10, nb_epoch = 100) #x,y represent dependent and independent varuiable |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# django import
from django import forms
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
from django.utils.encoding import force_unicode
from emencia.django.layout.designer.settings import MEDIA_URL
class LayoutDesignerWidget(forms.Textarea):
"""
"""
class Media:
js = (
MEDIA_URL + 'js/bewype-yui3-gallery/yui/yui.js',
MEDIA_URL + 'js/layout.designer.js'
)
css = { 'all': (MEDIA_URL + 'css/layout.designer.css',) }
def render(self, name, value, attrs={}):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
return mark_safe(u'''<div id='%s' class='yui3-skin-sam'>%s</div><br/>
<center><a id="designer_preview" href="#">Preview</a></center><br/>
<input id="content_id" type="hidden" value="%s" name="content_id"/>
<input id="upload_url" type="hidden" value="%s" name="content_id"/>
<input id="file_static_path" type="hidden" value="%s" name="content_id"/>''' % (
final_attrs['id'], force_unicode(value),
final_attrs['id'], reverse('layout_designer_upload'),
MEDIA_URL + 'uploads/'))
|
from StudentRepoClass import *
from StudentClass import Student
from ExceptionsClass import *
class StudentTextFileModificationsRepository(StudentRepo):
def __init__(self, fileName, auxiliaryFileName):
StudentRepo.__init__(self)
self._fileName = fileName
self._auxiliaryFileName = auxiliaryFileName
def copy_file(self):
filepath2 = self._fileName
filepath = self._auxiliaryFileName
f = open(filepath, "r")
f2 = open(filepath2, "w")
line = f.readline().strip()
while len(line) > 0:
f2.write(line)
line = f.readline().strip()
f.close()
f2.close()
def add_student(self, obj):
filepath = self._fileName
filepath2 = self._auxiliaryFileName
f = open(filepath, "r")
f2 = open(filepath2, "w")
line = f.readline().strip()
while len(line) > 0:
line = line.split(",")
if int(line[0]) == obj.studentId:
raise DuplicateIdException("Duplicate Id")
line = ""
line += str(line[0]) + "," + str(line[1]) + "\n"
f2.write(line)
line = f.readline().strip()
line = ""
line += str(obj.studentId) + "," + str(obj.studentName) + "\n"
f2.write(line)
f.close()
f2.close()
#copy the auxiliary file to the main file
self.copy_file()
def remove_student(self, stud_id):
self._students = []
self._loadFile()
removed_stud = StudentRepo.remove_student(self, stud_id)
self._saveFile()
return removed_stud
def update_student(self, new_stud):
self._students = []
self._loadFile()
StudentRepo.update_student(self, new_stud)
self._saveFile()
def _saveFile(self):
'''
1. Open text file for writing 'w'
2. for each car in the repository:
a. transform it into one-line string
b. write it to the file
3. close file
'''
filepath = self._fileName
f = open(filepath, 'w')
line = ""
for stud in StudentRepo.getAll(self):
line = ""
line += str(stud.studentId) + "," + str(stud.studentName) + "\n"
f.write(line)
f.close() |
import os
import xml.etree.ElementTree as ET
import sys
import shutil
currentDir = os.getcwd()
userAppDir = sys.argv[1]
# 1. Copying needed files in the right directories
localMacroDir = os.path.join(currentDir,'Macros')
localIconDir = os.path.join(currentDir,'icons')
localFontsDir = os.path.join(currentDir,'fonts')
targetMacroDir = os.path.join(userAppDir,'Macro')
targetIconDir = os.path.join(userAppDir,'icons')
targetFontsDir = os.path.join(userAppDir,'fonts')
if ( not os.path.exists(targetIconDir) ):
os.mkdir(targetIconDir)
if ( not os.path.exists(targetFontsDir) ):
os.mkdir(targetFontsDir)
for file in os.listdir(localMacroDir):
if ('.py' in file):
fullFileName = os.path.join(localMacroDir,file)
shutil.copy(fullFileName,targetMacroDir)
for file in os.listdir(localIconDir):
fullFileName = os.path.join(localIconDir,file)
shutil.copy(fullFileName,targetIconDir)
for file in os.listdir(localFontsDir):
fullFileName = os.path.join(localFontsDir,file)
shutil.copy(fullFileName,targetFontsDir)
# 2. Configuring the user profile to add the PACE button
userConfig = ET.parse(os.path.join('install','cleanUser.cfg'))
# Forcing user macro dir
macroPref = userConfig.find('.//FCParamGroup[@Name="Preferences"]').find('FCParamGroup[@Name="Macro"]')
macroDirElem = ET.Element('FCText')
macroDirElem.set('Name','MacroPath')
macroDirElem .text=targetMacroDir
macroPref.append(macroDirElem)
# adding icons folder
bitMaps = userConfig.find('.//FCParamGroup[@Name="Bitmaps"]')
iconPathElem = ET.Element('FCText')
iconPathElem.set('Name','CustomPath0')
iconPathElem.text=targetIconDir
bitMaps.append(iconPathElem)
# adding PACE macro
macrosElement = userConfig.find('*[@Name="Root"]').find('*[@Name="BaseApp"]').find('*[@Name="Macro"]').find('*[@Name="Macros"]')
paceMacroElement = ET.Element("FCParamGroup")
paceMacroElement.set('Name','Std_Macro1')
myDict = {'Script':'PACE_FreeCad_GUI.py',
'Menu':'PACE',
'Tooltip':'PACE',
'WhatsThis':'PACE',
'Statustip':'PACE',
'Pixmap':'pace_logo',
'Accel':'none'
}
for Name,text in myDict.items():
tmpElem = ET.Element("FCText")
tmpElem.set("Name",Name)
tmpElem.text=text
paceMacroElement.append(tmpElem)
systemElem = ET.Element("FCBool")
systemElem.set('Name','System')
systemElem.set('Value','0')
paceMacroElement.append(systemElem)
macrosElement.append(paceMacroElement)
# Toolbar
workbenchesElem = userConfig.find('.//FCParamGroup[@Name="Workbench"]')
globalElem = ET.Element('FCParamGroup',{'Name':'Global'})
toolBarElem = ET.Element('FCParamGroup',{'Name':'Toolbar'})
custom1Elem = ET.Element('FCParamGroup',{'Name':'Custom_1'})
textElem = ET.Element('FCText',{'Name':'Name'})
textElem.text = 'PACE_Toolbar'
boolElem = ET.Element('FCBool',{'Name':'Active','Value':'1'})
macroElem = ET.Element('FCText',{'Name':'Std_Macro1'})
macroElem.text = 'FreeCAD'
workbenchesElem.append(globalElem)
globalElem.append(toolBarElem)
toolBarElem.append(custom1Elem)
custom1Elem.append(textElem)
custom1Elem.append(boolElem)
custom1Elem.append(macroElem)
userConfigFileName = os.path.join(userAppDir,'user.cfg')
userConfig.write(userConfigFileName)
|
#!/usr/bin/env python
import rospy
import tf_conversions
import tf2_ros
import geometry_msgs.msg
import turtlesim.msg
import math
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseStamped, PoseWithCovarianceStamped
import copy
import numpy as np
class TfForExperiment:
def __init__(self):
# Initialize ros node
rospy.init_node('tf_broadcaster', anonymous=True)
# parameter for transportation
self.opti_x_v3 = + 0.16
self.opti_y_v3 = - 0.09
# Subscriber
rospy.Subscriber('/vrpn_client_node/RigidBody/pose', PoseStamped, self.handle_optitrack_pose, queue_size=1)
rospy.Subscriber('robot_pose', PoseWithCovarianceStamped, self.handle_robot_pose, queue_size=1)
rospy.Subscriber('odom', Odometry, self.handle_odom_pose, queue_size=1)
# Broadcaster
self.br = tf2_ros.TransformBroadcaster()
self.odom_pub = rospy.Publisher('odom_aligned',Odometry, queue_size=1)
# self.robot_pose_pub = rospy.Publisher('robot_pose_aligned', PoseWithCovarianceStamped, queue_size=1)
self.optitrack_pub = rospy.Publisher('/vrpn_client_node/RigidBody/pose_aligned', PoseStamped, queue_size=1)
self.transform_buffer = geometry_msgs.msg.TransformStamped()
def spin(self):
rospy.spin()
def handle_optitrack_pose(self, msg):
'''
transform from optitrack frame to robot(base_link)
'''
t = geometry_msgs.msg.TransformStamped()
t.header.stamp = msg.header.stamp
t.header.frame_id = "world"
t.child_frame_id = "base_link"
t.transform.translation.x = 0.0
t.transform.translation.y = 0.0
t.transform.translation.z = 0.0
theta = self.theta_from_degree(180)
q = tf_conversions.transformations.quaternion_from_euler(0, 0, theta)
t.transform.rotation.x = q[0]
t.transform.rotation.y = q[1]
t.transform.rotation.z = q[2]
t.transform.rotation.w = q[3]
self.optitrack_update(msg)
self.br.sendTransform(t)
def optitrack_update(self, msg):
'''
align opti track orientation value to synchronize with stargazer frame
'''
optitrack_msg = copy.deepcopy(msg)
optitrack_msg.pose.position.x = msg.pose.position.x + self.opti_x_v3
optitrack_msg.pose.position.y = - msg.pose.position.y + self.opti_y_v3
quaternion = np.array([optitrack_msg.pose.orientation.x,
optitrack_msg.pose.orientation.y,
optitrack_msg.pose.orientation.z,
optitrack_msg.pose.orientation.w])
quaternion_aligned = self.checkFlipping(quaternion)
optitrack_msg.pose.orientation.x = quaternion_aligned[0]
optitrack_msg.pose.orientation.y = quaternion_aligned[1]
optitrack_msg.pose.orientation.z = quaternion_aligned[2]
optitrack_msg.pose.orientation.w = quaternion_aligned[3]
self.optitrack_pub.publish(optitrack_msg)
def handle_odom_pose(self, msg):
'''
transform from optitrack frame to robot(odom)
'''
t = geometry_msgs.msg.TransformStamped()
t.header.stamp = msg.header.stamp
t.header.frame_id = "world"
t.child_frame_id = "odom" # odom have child_framd as base_footprint
t.transform.translation.x = 0.00
t.transform.translation.y = 5.0
t.transform.translation.z = 0.0
theta = self.theta_from_degree(-90)
q = tf_conversions.transformations.quaternion_from_euler(0, 0, theta)
t.transform.rotation.x = q[0]
t.transform.rotation.y = q[1]
t.transform.rotation.z = q[2]
t.transform.rotation.w = q[3]
self.odom_update(msg)
self.br.sendTransform(t)
def odom_update(self, msg):
'''
align odom twist value from x to y to synchronize with stargazer frame
'''
odom_msg = copy.deepcopy(msg)
odom_msg.pose.pose.position.x = msg.pose.pose.position.y # foward at stargazer frame
odom_msg.pose.pose.position.y = msg.pose.pose.position.x
odom_msg.twist.twist.linear.x = msg.twist.twist.linear.y # foward at stargazer frame
odom_msg.twist.twist.linear.y = msg.twist.twist.linear.x
self.odom_pub.publish(odom_msg)
def handle_robot_pose(self, msg):
'''
synchronize stargazer's frame of global(map) and local center(stragazer)
'''
t = geometry_msgs.msg.TransformStamped()
t.header.stamp = msg.header.stamp
t.header.frame_id = "stargazer"
t.child_frame_id = "map"
t.transform.translation.x = 0
t.transform.translation.y = 0
t.transform.translation.z = 0.00
theta = self.theta_from_degree(0)
q = tf_conversions.transformations.quaternion_from_euler(0, 0, theta)
t.transform.rotation.x = q[0]
t.transform.rotation.y = q[1]
t.transform.rotation.z = q[2]
t.transform.rotation.w = q[3]
self.br.sendTransform(t)
def theta_from_degree(self, degree):
return degree * math.pi / 180
def checkFlipping(self, quaternion):
'''
prevent quaternion from filping
'''
quaternion_inverse = np.array([-quaternion[0],-quaternion[1],-quaternion[2],quaternion[3]])
dot_quat = np.dot(quaternion,quaternion_inverse)
# if dot_quat < 0:
# quaternion_aligned = quaternion_inverse
# else:
# quaternion_aligned = quaternion
if quaternion[3] < 0:
quaternion_aligned = - quaternion
else:
quaternion_aligned = quaternion
return quaternion_aligned
# sample function for dynamic transformation
# def handle_optitrack_pose(self, msg):
# t = self.transform_buffer
# t.header.stamp = msg.header.stamp
# t.header.frame_id = "world"
# t.child_frame_id = "base_link"
# t.transform.translation.x = msg.pose.position.x
# t.transform.translation.y = msg.pose.position.y
# t.transform.translation.z = 0.0
# # q = tf_conversions.transformations.quaternion_from_euler(0, 0, msg.theta)
# t.transform.rotation.x = msg.pose.orientation.x
# t.transform.rotation.y = msg.pose.orientation.y
# t.transform.rotation.z = msg.pose.orientation.z
# t.transform.rotation.w = msg.pose.orientation.w
# self.br.sendTransform(t)
if __name__ == '__main__':
try:
node = TfForExperiment()
node.spin()
except rospy.ROSInterruptException:
rospy.loginfo("node terminated") |
import Tkinter as tk
from Tkinter import *
import ttk
import tkMessageBox
from threading import Timer
import ctypes
from ctypes import cdll
from ctypes import c_byte
from ctypes import c_int
import re
cCanvasWidth = 640
cCanvasHeight = 480
cCanvasGrid = 10
cTimePeriod = 1
cBufferLen = 1024
def StartTimerTask():
tabwave.tm = Timer(cTimePeriod, TimerTask)
tabwave.tm.start()
def StopTimerTask():
if tabwave.tm != 0:
tabwave.tm.cancel()
def DrawGrid():
line = tabwave.canvas.create_line(0, (cCanvasHeight/2), cCanvasWidth, (cCanvasHeight/2), fill='lightgray', width=1, tag='grid')
line = tabwave.canvas.create_line((cCanvasWidth/2), 0, (cCanvasWidth/2), cCanvasHeight, fill='lightgray', width=1, tag='grid')
cnt = cCanvasHeight/2/cCanvasGrid
for i in range(1, cnt):
x = i*cnt + cCanvasWidth/2
line = tabwave.canvas.create_line(x, 0, x, cCanvasHeight, fill='dimgray', width=1, dash=(4, 4), tag='grid')
x = cCanvasWidth/2 - i*cnt
line = tabwave.canvas.create_line(x, 0, x, cCanvasHeight, fill='dimgray', width=1, dash=(4, 4), tag='grid')
cnt = cCanvasWidth/2/cCanvasGrid
for i in range(1, cnt):
y = i*cnt + cCanvasHeight/2
line = tabwave.canvas.create_line(0, y, cCanvasWidth, y, fill='dimgray', width=1, dash=(4, 4), tag='grid')
y = cCanvasHeight/2 - i*cnt
line = tabwave.canvas.create_line(0, y, cCanvasWidth, y, fill='dimgray', width=1, dash=(4, 4), tag='grid')
def LoadData():
tabwave.arr = []
iCnt = tkDemo.drv.model_fifo_read(0, 21, cBufferLen, tkDemo.bytes)
for i in range(0, iCnt):
val = 0
val |= (tkDemo.bytes[4*i + 0]&0xff)
val |= (tkDemo.bytes[4*i + 1]&0xff)<<8
val |= (tkDemo.bytes[4*i + 2]&0xff)<<16
val |= (tkDemo.bytes[4*i + 3]&0xff)<<24
if val&0x80000000 != 0:
val -= 0x100000000
tabwave.arr.append(val)
def DispWave():
horizonPos = cCanvasHeight/2
LoadData()
for i in range(0, len(tabwave.arr)-1):
line = tabwave.canvas.create_line(i, (tabwave.arr[i] + horizonPos), (i+1), (tabwave.arr[i+1] + horizonPos), fill='lime', width=1, tag='wave')
def DispClear():
tabwave.canvas.delete('wave')
def TimerTask():
DispClear()
DispWave()
tabwave.tm.cancel()
tabwave.tm = Timer(cTimePeriod, TimerTask)
tabwave.tm.start()
def CloseMe():
StopTimerTask()
tkDemo.drv.model_close()
tkDemo.quit()
tkDemo.destroy()
exit()
def ReadValue():
modAddr = int(rdModSel.get())
regAddr = int(rdRegSel.get())
tkDemo.drv.model_reg_read(modAddr, regAddr, 0, tkDemo.rdVals)
txtOut.set('%04x'%tkDemo.rdVals[0])
def WriteValue():
modAddr = int(rdModSel.get())
regAddr = int(rdRegSel.get())
if(txtIn.get() != ""):
try:
tkDemo.rdVals[0] = int(txtIn.get(), 16)
tkDemo.drv.model_reg_write(modAddr, regAddr, 0, tkDemo.rdVals)
except:
tkMessageBox.showinfo('Warning','Please input a hex string!')
if __name__ == '__main__':
tkDemo = tk.Tk()
tkDemo.title("Data Acquisition Demo")
tabCtrl = ttk.Notebook(tkDemo)
tabwave = ttk.Frame(tabCtrl)
tabCtrl.add(tabwave, text='Wave')
tabParam = ttk.Frame(tabCtrl)
tabCtrl.add(tabParam, text='Param')
tabCtrl.pack(expand=1, fill="both")
tabwave.tm = 0
tabwave.arr = []
tkDemo.drv = cdll.LoadLibrary('./modeio.so')
tkDemo.bytes = (c_byte*(cBufferLen*4))()
tkDemo.rdVals = (c_int*cBufferLen)()
tkDemo.drv.model_init()
tkDemo.drv.model_rst()
tabwave.canvas = Canvas(tabwave, width = cCanvasWidth, height = cCanvasHeight, bg = "black")
tabwave.canvas.pack()
frame = Frame(tkDemo)
frame.pack()
btnStart = Button(frame, text="Start", command = StartTimerTask)
btnClose = Button(frame, text="Close", command = CloseMe)
btnStart.grid(row = 1, column = 1)
btnClose.grid(row = 1, column = 2)
DrawGrid()
RdFrm = ttk.LabelFrame(tabParam, text='Read Parameter')
RdFrm.grid(column=0, row=0, padx=8, pady=4)
lblMA = ttk.Label(RdFrm, text="Module Address")
lblMA.grid(column=0, row=0, sticky='W')
lblSP = ttk.Label(RdFrm, text=" ")
lblSP.grid(column=1, row=0, sticky='W')
lblRA = ttk.Label(RdFrm, text="Register Address")
lblRA.grid(column=2, row=0, sticky='W')
lblRV = ttk.Label(RdFrm, text="Read value(Hex)")
lblRV.grid(column=0, row=3, sticky='W')
lblSP = ttk.Label(RdFrm, text="")
lblSP.grid(column=0, row=5, sticky='W')
rdMod = tk.StringVar()
rdModSel = ttk.Combobox(RdFrm, textvariable=rdMod, state='readonly')
rdModSel['values'] = range(0,16)
rdModSel.grid(column=0, row=1)
rdModSel.current(0)
rdReg = tk.StringVar()
rdRegSel = ttk.Combobox(RdFrm, textvariable=rdReg, state='readonly')
rdRegSel['values'] = range(0,32)
rdRegSel.grid(column=2, row=1)
rdRegSel.current(0)
txtOut = tk.StringVar()
txtRd = ttk.Entry(RdFrm, width = 22, textvariable=txtOut, state='readonly')
txtRd.grid(column=0, row=4, sticky='W')
btnRd = ttk.Button(RdFrm, width = 22, text="Read", command = ReadValue)
btnRd.grid(column=2, row=4, sticky='W')
WrFrm = ttk.LabelFrame(tabParam, text='Write Parameter')
WrFrm.grid(column=0, row=0+6, padx=8, pady=4)
lblMA = ttk.Label(WrFrm, text="Module Address")
lblMA.grid(column=0, row=0+6, sticky='W')
lblSP = ttk.Label(WrFrm, text=" ")
lblSP.grid(column=1, row=0+6, sticky='W')
lblRA = ttk.Label(WrFrm, text="Register Address")
lblRA.grid(column=2, row=0+6, sticky='W')
lblRV = ttk.Label(WrFrm, text="Write value(Hex)")
lblRV.grid(column=0, row=3+6, sticky='W')
lblSP = ttk.Label(WrFrm, text="")
lblSP.grid(column=0, row=5+6, sticky='W')
wrMod = tk.StringVar()
wrModSel = ttk.Combobox(WrFrm, textvariable=wrMod, state='readonly')
wrModSel['values'] = range(0,16)
wrModSel.grid(column=0, row=1+6)
wrModSel.current(0)
wrReg = tk.StringVar()
wrRegSel = ttk.Combobox(WrFrm, textvariable=wrReg, state='readonly')
wrRegSel['values'] = range(0,32)
wrRegSel.grid(column=2, row=1+6)
wrRegSel.current(0)
txtIn = tk.StringVar()
txtWr = ttk.Entry(WrFrm, width = 22, textvariable=txtIn)
txtWr.grid(column=0, row=4+6, sticky='W')
btnWr = ttk.Button(WrFrm, width = 22, text="Write", command = WriteValue)
btnWr.grid(column=2, row=4+6, sticky='W')
tkDemo.resizable(0,0)
tkDemo.protocol("WM_DELETE_WINDOW", CloseMe)
tkDemo.mainloop()
|
import os
import numpy as np
import scipy.io
import glob
from helpers import util, visualize
import sklearn.metrics
from globals import class_names
import torch
import exp_mill_bl as emb
from debugging_graph import readTrainTestFile
from sklearn.cluster import KMeans
from sklearn.externals import joblib
def make_clusters(train_file, n_per_video, n_clusters,out_dir):
npy_files, anno_all = readTrainTestFile(train_file)
feat_to_keep = []
for npy_file in npy_files:
feat_curr = np.load(npy_file)
feat_to_keep.append(feat_curr[::n_per_video,:])
feat_to_keep = np.concatenate(feat_to_keep, axis = 0)
mean = np.mean(feat_to_keep,axis = 0, keepdims = True)
std = np.std(feat_to_keep,axis = 0, keepdims = True)
print mean.shape, std.shape
feat_to_keep = (feat_to_keep -mean)/std
print 'fitting'
kmeans = KMeans(n_clusters = n_clusters).fit(feat_to_keep)
out_file_mean_std = os.path.join(out_dir,'mean_std.npz')
np.savez_compressed(out_file_mean_std, mean = mean, std = std)
out_file_kmeans = os.path.join(out_dir,'kmeans.joblib')
joblib.dump(kmeans,out_file_kmeans)
def save_kmean_labels(file_curr, k_means_dir, out_dir):
npy_files, anno_all = readTrainTestFile(file_curr)
out_file_mean_std = os.path.join(k_means_dir,'mean_std.npz')
# np.savez_compressed(out_file_mean_std, mean = mean, std = std)
out_file_kmeans = os.path.join(k_means_dir,'kmeans.joblib')
kmeans = joblib.load(out_file_kmeans)
mean_std = np.load(out_file_mean_std)
mean = mean_std['mean']
std = mean_std['std']
for npy_file in npy_files:
feat_curr = np.load(npy_file)
feat_curr = (feat_curr-mean)/std
# print feat_curr.shape
labels_curr = kmeans.predict(feat_curr)
print labels_curr.shape, np.min(labels_curr), np.max(labels_curr)
out_file = os.path.join(out_dir, os.path.split(npy_file)[1])
print out_file
np.save(out_file, labels_curr)
def set_k_mul(k_count,labels):
k = k_count.shape[0]
for i in range(k):
i_count = np.sum(labels==i)
k_count[i,i] += i_count
for j in range(i+1,k):
j_count = np.sum(labels==j)
j_count = i_count*j_count
k_count[i,j] += j_count
k_count[j,i] += j_count
return k_count
def normalize_k_mul(k_count):
k = k_count.shape[0]
for i in range(k):
self_count = max(1,k_count[i,i])
k_count[i,:] = k_count[i,:]/self_count
k_count[:,i] = k_count[:,i]/self_count
k_count[i,i] = 1
return k_count
def set_k_int(k_count,labels):
k = k_count.shape[0]
for i in range(k):
i_count = np.sum(labels==i)
k_count[i,i] += i_count
for j in range(i+1,k):
j_count = np.sum(labels==j)
j_count = min(i_count,j_count)
k_count[i,j] += j_count
k_count[j,i] += j_count
return k_count
def normalize_k_union(k_count):
k = k_count.shape[0]
# print k_count[10:12,10:12]
for i in range(k):
for j in range(i+1,k):
div = k_count[i,i]+k_count[j,j]
if div==0:
assert k_count[i,j]==0
assert k_count[j,i]==0
else:
k_count[i,j] = k_count[i,j]/div
k_count[j,i] = k_count[j,i]/div
# k_count[:,i] = k_count[:,i]/self_count
k_count[i,i] = 1
# print k_count[10:12,10:12]
return k_count
def getting_edge_weights(file_curr, out_dir_labels,out_dir,k, set_k = set_k_mul, normalize_k = normalize_k_mul):
npy_files, anno_all = readTrainTestFile(file_curr)
k_count = np.zeros((len(class_names),k,k))
k_count_big = np.zeros((k,k))
for npy_file,anno_curr in zip(npy_files,anno_all):
label_file = os.path.join(out_dir_labels, os.path.split(npy_file)[1])
labels = np.load(label_file)
k_count_big = set_k(k_count_big,labels)
for gt_idx in np.where(anno_curr)[0]:
k_count[gt_idx] = set_k(k_count[gt_idx],labels)
k_count_big = normalize_k(k_count_big)
print k_count_big.shape
out_file = os.path.join(out_dir,'all_classes_mul.npy')
np.save(out_file, k_count_big )
out_file = os.path.join(out_dir,'all_classes_mul.jpg')
visualize.saveMatAsImage(k_count_big, out_file)
for class_idx in range(len(class_names)):
k_count[class_idx] = normalize_k(k_count[class_idx])
class_name = class_names[class_idx]
out_file = os.path.join(out_dir,class_name+'.npy')
np.save(out_file, k_count[class_idx])
out_file = os.path.join(out_dir,class_name+'.jpg')
visualize.saveMatAsImage(k_count[class_idx], out_file)
visualize.writeHTMLForFolder(out_dir)
def get_gt_vector(vid_name, out_shape_curr, class_idx, loaded):
class_name = class_names[class_idx]
gt_vid_names_all = loaded['gtvideonames'][0]
gt_class_names = loaded['gt_events_class'][0]
gt_time_intervals = loaded['gt_time_intervals'][0]
gt_time_intervals = np.array([a[0] for a in gt_time_intervals])
# print class_name
bin_keep = np.array(gt_vid_names_all) == vid_name
# print np.where(bin_keep)[0]
# print gt_vid_names_all[bin_keep]
# print 'bef',gt_time_intervals[bin_keep]
# print gt_class_names[bin_keep], class_name
bin_keep = np.logical_and(bin_keep,gt_class_names==class_name)
# print np.where(bin_keep)[0]
# print 'aft',gt_time_intervals[bin_keep]
gt_time_intervals = gt_time_intervals[bin_keep]
# print gt_time_intervals
# print gt_class_names[bin_keep]
# print np.sum(gt_class_names==class_name)
det_times = np.array(range(0,out_shape_curr))*16./25.
gt_vals = np.zeros(det_times.shape)
for gt_time_curr in gt_time_intervals:
idx_start = np.argmin(np.abs(det_times-gt_time_curr[0]))
idx_end = np.argmin(np.abs(det_times-gt_time_curr[1]))
gt_vals[idx_start:idx_end] = 1
# if gt_return:
# return gt_vals, det_times,gt_time_intervals
# else:
return gt_vals, det_times
def double_check_anno(file_curr, out_file_curr, is_test ):
npy_files, anno_all = readTrainTestFile(file_curr)
out_lines = []
class_name = class_names[0]
if is_test:
mat_file = os.path.join('../TH14evalkit','mat_files', class_name+'_test.mat')
else:
mat_file = os.path.join('../TH14evalkit', class_name+'.mat')
loaded = scipy.io.loadmat(mat_file)
for idx_npy_file, (npy_file, anno) in enumerate(zip(npy_files,anno_all)):
if idx_npy_file%10==0:
print idx_npy_file,len(npy_files)
# anno_curr = np.where(anno)[0]
vid_name = os.path.split(npy_file)[1]
vid_name = vid_name[:vid_name.rindex('.')]
out_shape_curr = np.load(npy_file).shape[0]
found = []
for class_idx in range(20):
gt_vec, _ = get_gt_vector(vid_name, out_shape_curr, class_idx, loaded)
if np.sum(gt_vec)>0:
found.append(1)
else:
found.append(0)
# print anno
# print found
line_curr = ' '.join([str(val) for val in [npy_file]+found])
out_lines.append(line_curr)
if not np.all(np.array(anno)==np.array(found)):
print vid_name
print anno
print found
if out_file_curr is not None:
util.writeFile(out_file_curr,out_lines)
# raw_input()
def script_correct_anno():
dir_train_test_files = '../data/ucf101/train_test_files'
train_file = os.path.join(dir_train_test_files,'train_corrected.txt')
out_train_file = os.path.join(dir_train_test_files,'train_ultra_correct.txt')
# double_check_anno(train_file, out_train_file, False)
# print 'new file!', out_train_file
# double_check_anno(out_train_file, None, False)
# test_file = os.path.join(dir_train_test_files,'test_corrected.txt')
out_test_file = os.path.join(dir_train_test_files,'test_ultra_correct.txt')
# double_check_anno(test_file, out_test_file, True)
# print 'new file!', out_test_file
double_check_anno(out_test_file, None, True)
def write_train_test_files(train_file, post_pend, out_dir_labels):
out_file = train_file[:train_file.rindex('.')]+'_'+post_pend+'.txt'
npy_files, anno_all = readTrainTestFile(train_file)
out_lines = []
for npy_file, anno_curr in zip(npy_files, anno_all):
label_file = os.path.join(out_dir_labels,os.path.split(npy_file)[1])
assert os.path.exists(label_file)
line_curr = ' '.join([str(val) for val in [npy_file, label_file]+ anno_curr])
# print line_curr
out_lines.append(line_curr)
util.writeFile(out_file, out_lines)
def save_neg_cooc_graphs(out_dir):
all_file = os.path.join(out_dir,'all_classes_mul.npy')
all_cooc = np.load(all_file)
for class_name in class_names:
in_file = os.path.join(out_dir,class_name+'.npy')
curr_cooc = np.load(in_file)
out_cooc = curr_cooc - all_cooc
out_cooc = out_cooc + np.eye(out_cooc.shape[0])
out_file = os.path.join(out_dir,class_name+'neg.jpg')
visualize.saveMatAsImage(out_cooc, out_file)
# print 'curr_cooc',curr_cooc.shape,np.min(curr_cooc),np.max(curr_cooc)
# print 'out_cooc',out_cooc.shape,np.min(out_cooc),np.max(out_cooc)
# print 'all_cooc',all_cooc.shape,np.min(all_cooc),np.max(all_cooc)
# print out_file
out_file = os.path.join(out_dir,class_name+'neg.npy')
np.save(out_file, out_cooc)
visualize.writeHTMLForFolder(out_dir)
def save_neg_exp_cooc_graphs(out_dir):
for class_name in class_names:
in_file = os.path.join(out_dir,class_name+'neg.npy')
curr_cooc = np.load(in_file)
print np.min(curr_cooc),np.max(curr_cooc)
out_cooc = np.exp(curr_cooc-1)
print np.min(out_cooc),np.max(out_cooc)
out_file = os.path.join(out_dir,class_name+'negexp.jpg')
visualize.saveMatAsImage(out_cooc, out_file)
# print out_file
# print 'curr_cooc',curr_cooc.shape,np.min(curr_cooc),np.max(curr_cooc)
# print 'out_cooc',out_cooc.shape,np.min(out_cooc),np.max(out_cooc)
# print 'all_cooc',all_cooc.shape,np.min(all_cooc),np.max(all_cooc)
out_file = os.path.join(out_dir,class_name+'negexp.npy')
print out_file
np.save(out_file, out_cooc)
# raw_input()
visualize.writeHTMLForFolder(out_dir)
def main():
dir_train_test_files = '../data/ucf101/train_test_files'
train_file = os.path.join(dir_train_test_files,'train_ultra_correct.txt')
test_file = os.path.join(dir_train_test_files,'test_ultra_correct.txt')
n_per_video = 3
k = 100
post_pend = 'k_'+str(k)
out_dir_meta = '../data/ucf101/kmeans'
util.mkdir(out_dir_meta)
dir_curr = '_'.join([str(val) for val in [n_per_video,k]])
dir_curr = os.path.join(out_dir_meta, dir_curr)
util.mkdir(dir_curr)
out_dir_labels = os.path.join(dir_curr,'npy_labeled')
out_dir_edges = out_dir_labels
# out_dir_edges = os.path.join(out_dir_labels,'int_union')
util.mkdir(out_dir_labels)
util.mkdir(out_dir_edges)
# make_clusters(train_file, n_per_video,k, dir_curr)
# save_kmean_labels(train_file,dir_curr, out_dir_labels)
# save_kmean_labels(test_file,dir_curr, out_dir_labels)
# getting_edge_weights(train_file, out_dir_labels, out_dir_edges, k)
# getting_edge_weights(train_file, out_dir_labels, out_dir_edges, k, set_k = set_k_int, normalize_k = normalize_k_union)
# save_neg_cooc_graphs(out_dir_edges)
save_neg_exp_cooc_graphs(out_dir_edges)
# write_train_test_files(train_file, post_pend, out_dir_labels)
# write_train_test_files(test_file, post_pend, out_dir_labels)
print 'hello'
if __name__=='__main__':
main() |
#-*- coding:utf-8 -*-
from flask import Flask,session,redirect,url_for,escape,request
import json, urllib
import config
def auth_sso():
token = request.cookies.get(config.SSO_TOKEN)
url = "https://sso.jk.cn/auth/auth_sso_token_api?token_cookie=%s" % token
result = urllib.urlopen(url)
json_data=json.loads([i for i in result][0])
result.close
if json_data['success'] != "true":
return False
else:
return json_data['userinfo']
|
from numpy import ndarray, array
from mdtraj import Topology, Trajectory
def write_bonds_tcl(bond_idxs, outfile="bonds.tcl"):
bond_idxs = check_bond_idxs(bond_idxs)
molid = 0
bondstring = lambda molid, idx1, idx2: \
'''set sel [atomselect {0} "index {1} {2}"]
lassign [$sel getbonds] bond1 bond2
set id [lsearch -exact $bond1 {2}]
if {{ $id == -1 }} {{
lappend bond1 {2}
}}
set id [lsearch -exact $bond2 {1}]
if {{ $id == -1 }} {{
lappend bond2 {1}
}}
$sel setbonds [list $bond1 $bond2]
$sel delete'''.format(molid, idx1, idx2)
tclstring = ''
for idx1, idx2 in bond_idxs:
tclstring += bondstring(molid, idx1, idx2) + "\n"
with open(outfile, 'w') as fout:
fout.write(tclstring)
def write_bonds_conect(bond_idxs, outfile="conect.pdb"):
bond_idxs = check_bond_idxs(bond_idxs)
conectstring = ''
for idx1, idx2 in bond_idxs:
conectstring += "CONECT{:5d}{:5d}\n".format(idx1, idx2)
with open(outfile, 'w') as fout:
fout.write(conectstring)
def check_bond_idxs(thing):
if type(thing) == Topology:
bond_idxs = array([ [atm1.serial, atm2.serial] for atm1, atm2 in thing.bonds ])
elif type(thing) == Trajectory:
bond_idxs = array([ [atm1.serial, atm2.serial] for atm1, atm2 in thing.top.bonds ])
elif type(thing) == ndarray:
if thing.shape[1] != 2:
raise IOError("array must be size (n_bonds, 2). Inputted: {}".format(thing.shape))
else:
bond_idxs = thing
else:
raise IOError("Invalid Input Type for Writing Out Bonds. Must be: mdtraj.Topology, mdtraj.Trajectory, or numpy.ndarray of shape (N,2)")
return bond_idxs
if __name__ == "__main__":
import mdtraj as md
traj = md.load("1SHG.pdb")
write_bonds_tcl(traj.top)
write_bonds_conect(traj.top)
|
from trading.signal.base_signal import BaseSignal
from datetime import datetime, timedelta
import rqdatac as rqd
from util.selectstock import filter_stock_pool
from pymongo import UpdateOne
rqd.init()
class DailyUpBreakMa10(BaseSignal):
def __init__(self):
BaseSignal.__init__(self, 'daily_up_break_ma10')
def compute(self, begin_date=None, end_date=None):
begin_date = datetime.strptime(begin_date, '%Y%m%d') - timedelta(days=20)
all_filter_codes = filter_stock_pool(begin_date)
for code in all_filter_codes:
df_daily = rqd.get_price(code, begin_date, end_date)
df_daily['ma10'] = df_daily['close'].rolling(10).mean()
df_daily['delta'] = df_daily['close'] - df_daily['ma10']
df_daily['delta_pre'] = df_daily['delta'].shift(1)
df_daily = df_daily[(df_daily['delta'] > 0) & (df_daily['delta_pre'] < 0)]
update_requests = []
for date in df_daily.index:
date = date.strftime('%Y%m%d')
update_requests.append(
UpdateOne(
{'code': code, 'date': date},
{'$set': {'code': code, 'date': date}},
upsert=True
)
)
if len(update_requests) > 0:
update_result = self.collection.bulk_write(update_requests)
print('保存-%s-%s数据 , 插入:%4d , 更新:%4d'
% (code, self.name, update_result.upserted_count, update_result.modified_count),
flush=True)
class DailyDownBreakMa10(BaseSignal):
def __init__(self):
BaseSignal.__init__(self, 'daily_down_break_ma10')
def compute(self, begin_date=None, end_date=None):
begin_date = datetime.strptime(begin_date, '%Y%m%d') - timedelta(days=20)
all_filter_codes = filter_stock_pool(begin_date)
for code in all_filter_codes:
df_daily = rqd.get_price(code, begin_date, end_date)
df_daily['ma10'] = df_daily['close'].rolling(10).mean()
df_daily['delta'] = df_daily['close'] - df_daily['ma10']
df_daily['delta_pre'] = df_daily['delta'].shift(1)
df_daily = df_daily[(df_daily['delta'] < 0) & (df_daily['delta_pre'] > 0)]
update_requests = []
for date in df_daily.index:
date = date.strftime('%Y%m%d')
update_requests.append(
UpdateOne(
{'code': code, 'date': date},
{'$set': {'code': code, 'date': date}},
upsert=True
)
)
if len(update_requests) > 0:
update_result = self.collection.bulk_write(update_requests)
print('保存-%s-%s数据 , 插入:%4d , 更新:%4d'
% (code, self.name, update_result.upserted_count, update_result.modified_count),
flush=True)
if __name__ == '__main__':
start_date = '20140101'
end_date = '20200101'
DailyUpBreakMa10().compute(start_date, end_date)
DailyDownBreakMa10().compute(start_date, end_date)
|
import cv2
import numpy as np
import imutils
pts=[]
cap= cv2.VideoCapture(0)
while True:
ret, frame= cap.read()
r1= np.array([29,86,6])
r2= np.array([64,255,255])
hsv= cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask= cv2.inRange(hsv,r1,r2)
mask= cv2.erode(mask, None, iterations=2)
mask=cv2.dilate(mask, None, iterations=2)
contours= cv2.findContours(mask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
contours= contours[0] if imutils.is_cv2() else contours[1]
center= None
if len(contours)>0:
c= max(contours, key=cv2.contourArea)
((x,y), radius)= cv2.minEnclosingCircle(c)
M= cv2.moments(c)
#cx= int(M["m10"]/M["m00"])
#cy=int(M["m01"]/M["m00"])
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius>10:
cv2.circle(frame, (int(x), int(y)), int(radius), (0,255,255),2)
cv2.circle(frame,center,5,(0,0,255),-1)
pts.append(center)
for i in range(1, (len(pts)-1)):
for j in range(2,len(pts)):
if pts[j-1] is None or pts[j] is None:
continue
cv2.line(frame, pts[j-1], pts[j], (0,0,255), 4)
continue
cv2.imshow('frame', frame)
k= cv2.waitKey(30) & 0xff
if k==27:
break
cap.release()
cv2.destroyAllWindows()
|
from urllib.parse import parse_qs, urlencode, urlparse
from django.template import Library
register = Library()
@register.simple_tag
def set_url_param(full_path, param, value):
if '?' not in full_path:
full_path += "{}{}={}".format('?', param, value)
return full_path
base = full_path.split('?')[0]
parsed_url = urlparse(full_path)
url_params = parse_qs(parsed_url.query)
if param in url_params:
url_params[param][0] = value
else:
url_params[param] = [value]
return base + '?' + urlencode(url_params, doseq=True)
|
from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
import sys
sys.path.append('pygcn/pygcn')
from utils import load_data, accuracy
from layers import GraphConvolution
from graphs import Graph
sys.path.append('../../')
import dl2lib as dl2
class GCN(nn.Module):
def __init__(self, nclass, N, H, dropout=0.3):
super(GCN, self).__init__()
self.fc1 = nn.Linear(N * N, H)
self.fc2 = nn.Linear(H, H)
self.fc3 = nn.Linear(H, H)
self.fc4 = nn.Linear(H, N)
self.drop = nn.Dropout(dropout)
def forward(self, adj):
y = adj.view(-1)
y = self.drop(F.relu(self.fc1(y)))
y = self.drop(F.relu(self.fc2(y)))
y = self.drop(F.relu(self.fc3(y)))
y = self.fc4(y)
return y
# Training settings
parser = argparse.ArgumentParser()
parser = dl2.add_default_parser_args(parser)
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=60000,
help='Number of epochs to train.')
parser.add_argument('--n_train', type=int, default=300,
help='Number of train samples.')
parser.add_argument('--n_valid', type=int, default=150,
help='Number of valid samples.')
parser.add_argument('--lr', type=float, default=0.0001,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--dropout', type=float, default=0.3,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--hidden', type=int, default=1000,
help='number of units in hidden layers')
parser.add_argument('-n', type=int, default=15,
help='number of nodes in the graph')
parser.add_argument('--baseline', type=dl2.str2bool, default=False,
help='run supervised learning baseline')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Load data
print('Generating train set...')
train_graphs, valid_graphs, test_graphs = [], [], []
for it in range(args.n_train):
m = np.random.randint(args.n-1, int(args.n*(args.n-1)/2+1))
train_graphs.append(Graph.gen_random_graph(args.n, m))
print('Generating valid set...')
for it in range(args.n_valid):
m = np.random.randint(args.n-1, int(args.n*(args.n-1)/2+1))
valid_graphs.append(Graph.gen_random_graph(args.n, m))
print('Generating test set...')
for it in range(args.n_valid):
m = np.random.randint(args.n-1, int(args.n*(args.n-1)/2+1))
test_graphs.append(Graph.gen_random_graph(args.n, m))
# Model and optimizer
model = GCN(nclass=1, N=args.n, H=args.hidden, dropout=args.dropout)
if args.cuda:
model.to('cuda:0')
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
def train(epoch):
tot_err, tot_dl2_loss = 0, 0
random.shuffle(train_graphs)
for i, g in enumerate(train_graphs):
model.train()
with torch.no_grad():
idx = torch.LongTensor([g.x, g.y])
v = torch.FloatTensor(np.ones(len(g.x)))
adj = torch.sparse.FloatTensor(idx, v, torch.Size([g.n, g.n])).to_dense()
if args.cuda:
adj = adj.cuda()
optimizer.zero_grad()
out = model.forward(adj)
dist = torch.FloatTensor([g.p[0, i] for i in range(g.n)])
if args.cuda:
dist = dist.cuda()
err = torch.mean((dist - out) * (dist - out))
tot_err += err.detach()
if not args.baseline:
conjunction = []
for a in range(1, g.n):
disjunction = []
for b in range(g.n):
if adj[a, b]:
disjunction.append(dl2.EQ(out[a], out[b] + 1))
conjunction.append(dl2.LEQ(out[a], out[b] + 1))
conjunction.append(dl2.Or(disjunction))
conjunction.append(dl2.EQ(out[0], 0))
for a in range(0, g.n):
conjunction.append(dl2.GEQ(out[0], 0))
constraint = dl2.And(conjunction)
dl2_loss = constraint.loss(args)
dl2_loss.backward()
tot_dl2_loss += dl2_loss.detach()
else:
err.backward()
optimizer.step()
def test(val=True, e=None):
model.eval()
tot_err = 0
baseline_err = 0
all_ones = torch.ones(args.n)
if args.cuda:
all_ones = all_ones.cuda()
for i, g in enumerate(valid_graphs if val else test_graphs):
model.eval()
with torch.no_grad():
idx = torch.LongTensor([g.x, g.y])
v = torch.FloatTensor(np.ones(len(g.x)))
adj = torch.sparse.FloatTensor(idx, v, torch.Size([g.n, g.n])).to_dense()
if args.cuda:
adj = adj.cuda()
out = model.forward(adj)
dist = torch.FloatTensor([g.p[0, i] for i in range(g.n)])
if args.cuda:
dist = dist.cuda()
err = torch.mean((dist - out) * (dist - out))
baseline_err += torch.mean((dist - all_ones) * (dist - all_ones))
tot_err += err
if e is not None:
print(str(e) + ' ', end='')
print('[Valid] Average error: ', tot_err/float(len(valid_graphs)))
if val is False:
print('[Valid] Baseline err: ', baseline_err/float(len(valid_graphs)))
# Train model
t_total = time.time()
for epoch in range(1, args.epochs):
train(epoch)
print('.', end='', flush=True)
if epoch % 50 == 0:
print()
test(e=epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
# Testing
test(val=False)
|
name = 'stevesie'
from stevesie.resources.proxies import Proxies
from stevesie.resources.proxy import Proxy
from stevesie.resources.task import Task
from stevesie.resources.task_collection import TaskCollection
from stevesie.resources.task_collection_field import TaskCollectionField
from stevesie.resources.task_collection_result import TaskCollectionResult
from stevesie.resources.task_collection_result_set import TaskCollectionResultSet
from stevesie.resources.task_dependency import TaskDependency
from stevesie.resources.worker import Worker
from stevesie.resources.worker_collection_results import WorkerCollectionResults
from stevesie.resources.workflow import Workflow
from stevesie.resources.workflow_parameter import WorkflowParameter
from stevesie.resources.workflow_task import WorkflowTask
|
from django.contrib.auth.views import LoginView
from django.urls import path
from .views import *
urlpatterns = [
path('signup' , SignUp.as_view()),
path('login' , LoginView.as_view())
] |
from enum import Enum
class UnitRegion(Enum):
US = 1
EU = 2
class UnitController:
def __init__(self, region=UnitRegion.US):
self.region = region
@staticmethod
def convert_c_to_f(temp_c):
temp_f = temp_c * (9/5)
temp_f = temp_f + 32
return temp_f
@staticmethod
def convert_f_to_c(temp_f):
temp_c = temp_f - 32
temp_c = temp_c * (5/9)
return temp_c
def convert(self, data_dict):
if self.region == UnitRegion.US:
data_dict['temperature'] = self.convert_c_to_f(data_dict.get('temperature'))
data_dict['set_temperature'] = self.convert_c_to_f(data_dict.get('set_temperature'))
return data_dict
return data_dict
def convert_incomming(self, temperature):
if self.region == UnitRegion.US:
return self.convert_f_to_c(temperature)
return temperature
|
import argparse
import os
import socket
import sys
import time
from glob import glob
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from scipy import spatial
from tqdm import tqdm
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import data_provider
import model_utils
import generator1_upsample2_4d2 as MODEL_GEN
from data_provider import NUM_EDGE, NUM_FACE
from GKNN import GKNN
from tf_ops.sampling.tf_sampling import farthest_point_sample
from utils import pc_util
parser = argparse.ArgumentParser()
parser.add_argument('--phase', default='test', help='train or test [default: train]')
parser.add_argument('--gpu', default='0', help='GPU to use [default: GPU 0]')
parser.add_argument('--log_dir', default='../model/NEWVirtualscan_generator1_1k_crop_l2_4d2', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [1024/2048] [default: 1024]')
parser.add_argument('--num_addpoint', type=int, default=96, help='Add Point Number [default: 600]')# train(1k) is 512, test is 96 or 96*2?
parser.add_argument('--up_ratio', type=int, default=4, help='Upsampling Ratio [default: 2]')
parser.add_argument('--is_crop',type= bool, default=True, help='Use cropped points in training [default: False]')
parser.add_argument('--max_epoch', type=int, default=200, help='Epoch to run [default: 500]') #(nocrop:180 crop:200)
parser.add_argument('--batch_size', type=int, default=12, help='Batch Size during training [default: 32]') #(512:16 1k:8,is_crop:16)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--assign_model_path',default=None, help='Pre-trained model path [default: None]')
parser.add_argument('--use_uniformloss',type= bool, default=False, help='Use uniformloss [default: False]')
FLAGS = parser.parse_args()
print socket.gethostname()
print FLAGS
ASSIGN_MODEL_PATH=FLAGS.assign_model_path
USE_UNIFORM_LOSS = FLAGS.use_uniformloss
IS_CROP = FLAGS.is_crop
PHASE = FLAGS.phase
GPU_INDEX = FLAGS.gpu
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
NUM_ADDPOINT = FLAGS.num_addpoint
UP_RATIO = FLAGS.up_ratio
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
MODEL_DIR = FLAGS.log_dir
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_INDEX
def log_string(out_str):
global LOG_FOUT
LOG_FOUT.write(out_str)
LOG_FOUT.flush()
class Network(object):
def __init__(self):
return
def build_graph(self,is_training=True,scope='generator'):
bn_decay = 0.95
self.step = tf.Variable(0, trainable=False)
self.pointclouds_input = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_POINT, 3))
self.pointclouds_radius = tf.placeholder(tf.float32, shape=(BATCH_SIZE))
self.pointclouds_poisson = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_POINT, 3))
# self.pointclouds_dist = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_POINT))
self.pointclouds_idx = tf.placeholder(tf.int32,shape=(BATCH_SIZE,NUM_POINT,2))
self.pointclouds_edge = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_EDGE, 6))
self.pointclouds_plane = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_FACE, 9))
self.pointclouds_plane_normal = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_FACE,3))
# create the generator model
self.pred_dist, self.pred_coord,self.idx,self.transform = MODEL_GEN.get_gen_model(self.pointclouds_input, is_training, scope=scope, bradius=1.0,
num_addpoint=NUM_ADDPOINT,reuse=None, use_normal=False, use_bn=False,use_ibn=False,
bn_decay=bn_decay, up_ratio=UP_RATIO,idx=self.pointclouds_idx,is_crop=IS_CROP)
###calculate the distance ground truth of upsample_point
self.pointclouds_dist = model_utils.distance_point2edge(self.pred_coord,self.pointclouds_edge)
self.pointclouds_dist = tf.sqrt(tf.reduce_min(self.pointclouds_dist,axis=-1))
self.pointclouds_dist_truncated = tf.minimum(0.5,self.pointclouds_dist)
self.pred_dist = tf.minimum(0.5,tf.maximum(0.0,self.pred_dist))
# gather the edge
self.pred_edgecoord = tf.gather_nd(self.pred_coord, self.idx)
self.pred_edgedist = tf.gather_nd(self.pred_dist, self.idx)
self.edgedist = tf.gather_nd(self.pointclouds_dist_truncated,self.idx)
# ## The following code is okay when the batch size is 1
self.edge_threshold = tf.constant(0.05,tf.float32,[1]) # select a small value when use 1k points
indics = tf.where(tf.less_equal(self.pred_edgedist,self.edge_threshold)) #(?,2)
self.select_pred_edgecoord = tf.gather_nd(self.pred_edgecoord, indics) #(?,3)
self.select_pred_edgedist = tf.gather_nd(self.pred_edgedist, indics) #(?,3)
if is_training is False:
return
self.dist_mseloss = 1.0/(0.4+self.pointclouds_dist_truncated)*(self.pointclouds_dist_truncated - self.pred_dist) ** 2
self.dist_mseloss = 5 * tf.reduce_mean(self.dist_mseloss / tf.expand_dims(self.pointclouds_radius ** 2, axis=-1))
tf.summary.scalar('loss/dist_loss', self.dist_mseloss)
tf.summary.histogram('dist/gt', self.pointclouds_dist_truncated)
tf.summary.histogram('dist/edge_dist', self.edgedist)
tf.summary.histogram('dist/pred', self.pred_dist)
weight = tf.maximum(0.5 - tf.to_float(self.step) / 20000.0, 0.0)
self.edgemask = tf.to_float(tf.less_equal(weight * self.edgedist + (1 - weight) * self.pred_edgedist, 0.15))
self.edge_loss = 50*tf.reduce_sum(self.edgemask * self.edgedist**2 / tf.expand_dims(self.pointclouds_radius ** 2, axis=-1)) / (tf.reduce_sum(self.edgemask) + 1.0)
tf.summary.scalar('weight',weight)
tf.summary.histogram('loss/edge_mask', self.edgemask)
tf.summary.scalar('loss/edge_loss', self.edge_loss)
with tf.device('/gpu:0'):
self.plane_dist = model_utils.distance_point2mesh(self.pred_coord, self.pointclouds_plane)
self.plane_dist = tf.reduce_min(self.plane_dist, axis=2)
# idx = tf.argmin(self.plane_dist, axis=2,output_type=tf.int32)
# idx0 = tf.tile(tf.reshape(tf.range(BATCH_SIZE), (BATCH_SIZE, 1)), (1, NUM_POINT*UP_RATIO/2))
# face_normal = tf.gather_nd(self.pointclouds_plane_normal,tf.stack([idx0,idx],axis=-1))
# dist = tf.where(tf.is_nan(dist),tf.zeros_like(dist),dist)
self.plane_loss = 500*tf.reduce_mean(self.plane_dist / tf.expand_dims(self.pointclouds_radius**2, axis=-1))
tf.summary.scalar('loss/plane_loss', self.plane_loss)
#self.perulsionloss = 10*model_utils.get_uniform_loss1_orthdistance(self.pred_coord,face_normal, numpoint=NUM_POINT*UP_RATIO)
self.perulsionloss = 500*model_utils.get_perulsion_loss1(self.pred_coord, numpoint=NUM_POINT * UP_RATIO)
tf.summary.scalar('loss/perulsion_loss', self.perulsionloss)
# # Enforce the transformation as orthogonal matrix
# K = transform.get_shape()[1].value # BxKxK
# mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0, 2, 1]))
# mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
# self.mat_diff_loss = 0.01*tf.nn.l2_loss(mat_diff)
# tf.summary.scalar('loss/mat_loss', self.mat_diff_loss)
self.total_loss = self.dist_mseloss + self.plane_loss + self.edge_loss + self.perulsionloss + tf.losses.get_regularization_loss()
gen_update_ops = [op for op in tf.get_collection(tf.GraphKeys.UPDATE_OPS) if op.name.startswith(scope)]
gen_tvars = [var for var in tf.trainable_variables() if var.name.startswith(scope)]
with tf.control_dependencies(gen_update_ops):
self.pre_gen_train = tf.train.AdamOptimizer(BASE_LEARNING_RATE, beta1=0.9).minimize(self.total_loss, var_list=gen_tvars,
colocate_gradients_with_ops=False,
global_step=self.step)
# merge summary and add pointclouds summary
tf.summary.scalar('loss/regularation', tf.losses.get_regularization_loss())
tf.summary.scalar('loss/total_loss', self.total_loss)
self.merged = tf.summary.merge_all()
self.pointclouds_image_input = tf.placeholder(tf.float32, shape=[None, 500, 1500, 1])
pointclouds_input_summary = tf.summary.image('1_input', self.pointclouds_image_input, max_outputs=1)
self.pointclouds_image_pred = tf.placeholder(tf.float32, shape=[None, 500, 1500, 1])
pointclouds_pred_summary = tf.summary.image('2_pred', self.pointclouds_image_pred, max_outputs=1)
self.pointclouds_image_gt = tf.placeholder(tf.float32, shape=[None, 500, 1500, 1])
pointclouds_gt_summary = tf.summary.image('3_edge', self.pointclouds_image_gt, max_outputs=1)
self.image_merged = tf.summary.merge([pointclouds_input_summary, pointclouds_pred_summary, pointclouds_gt_summary])
def train(self,assign_model_path=None):
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
# config.log_device_placement = False
with tf.Session(config=config) as self.sess:
self.train_writer = tf.summary.FileWriter(os.path.join(MODEL_DIR, 'train'), self.sess.graph)
init = tf.global_variables_initializer()
self.sess.run(init)
# restore the model
saver = tf.train.Saver(max_to_keep=10)
restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(MODEL_DIR)
global LOG_FOUT
if restore_epoch == 0:
LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(socket.gethostname()) + '\n')
LOG_FOUT.write(str(FLAGS) + '\n')
else:
LOG_FOUT = open(os.path.join(MODEL_DIR, 'log_train.txt'), 'a')
saver.restore(self.sess, checkpoint_path)
###assign the generator with another model file
if assign_model_path is not None:
print "Load pre-train model from %s" % (assign_model_path)
assign_saver = tf.train.Saver(
var_list=[var for var in tf.trainable_variables() if var.name.startswith("generator")])
assign_saver.restore(self.sess, assign_model_path)
##read data
self.fetchworker = data_provider.Fetcher(BATCH_SIZE, NUM_POINT)
self.fetchworker.start()
for epoch in tqdm(range(restore_epoch, MAX_EPOCH + 1), ncols=45):
log_string('**** EPOCH %03d ****\t' % (epoch))
self.train_one_epoch()
if epoch % 20 == 0:
saver.save(self.sess, os.path.join(MODEL_DIR, "model"), global_step=epoch)
self.fetchworker.shutdown()
def train_one_epoch(self):
loss_sum = []
fetch_time = 0
for batch_idx in range(self.fetchworker.num_batches):
start = time.time()
batch_data_input, batch_data_clean, batch_data_dist, batch_data_edgeface, radius,point_order = self.fetchworker.fetch()
batch_data_edge = np.reshape(batch_data_edgeface[:,0:2*NUM_EDGE,:],(BATCH_SIZE,NUM_EDGE,6))
batch_data_face = np.reshape(batch_data_edgeface[:, 2*NUM_EDGE:2*NUM_EDGE+3*NUM_FACE,:],(BATCH_SIZE, NUM_FACE, 9))
A = batch_data_face[:,:,3:6]-batch_data_face[:,:,0:3]
B = batch_data_face[:,:,6:9]-batch_data_face[:,:,0:3]
batch_data_normal = np.cross(A,B)+1e-12
batch_data_normal = batch_data_normal / np.sqrt(np.sum(batch_data_normal ** 2, axis=-1, keepdims=True))
batch_data_edgepoint =batch_data_edgeface[:, 2*NUM_EDGE+3*NUM_FACE:, :]
end = time.time()
fetch_time += end - start
feed_dict = {self.pointclouds_input: batch_data_input,
self.pointclouds_poisson: batch_data_clean,
# self.pointclouds_dist: batch_data_dist,
self.pointclouds_idx: point_order,
self.pointclouds_edge: batch_data_edge,
self.pointclouds_plane: batch_data_face,
self.pointclouds_plane_normal:batch_data_normal,
self.pointclouds_radius: radius}
_, summary, step, pred_coord, pred_edgecoord, edgemask, edge_loss = self.sess.run(
[self.pre_gen_train, self.merged, self.step, self.pred_coord, self.pred_edgecoord, self.edgemask, self.edge_loss], feed_dict=feed_dict)
self.train_writer.add_summary(summary, step)
loss_sum.append(edge_loss)
edgemask[:,0:5]=1
pred_edgecoord = pred_edgecoord[0][edgemask[0]==1]
if step % 30 == 0:
pointclouds_image_input = pc_util.point_cloud_three_views(batch_data_input[0, :, 0:3])
pointclouds_image_input = np.expand_dims(np.expand_dims(pointclouds_image_input, axis=-1), axis=0)
pointclouds_image_pred = pc_util.point_cloud_three_views(pred_coord[0, :, 0:3])
pointclouds_image_pred = np.expand_dims(np.expand_dims(pointclouds_image_pred, axis=-1), axis=0)
pointclouds_image_gt = pc_util.point_cloud_three_views(pred_edgecoord[:, 0:3])
pointclouds_image_gt = np.expand_dims(np.expand_dims(pointclouds_image_gt, axis=-1), axis=0)
feed_dict = {self.pointclouds_image_input: pointclouds_image_input,
self.pointclouds_image_pred: pointclouds_image_pred,
self.pointclouds_image_gt: pointclouds_image_gt}
summary = self.sess.run(self.image_merged, feed_dict)
self.train_writer.add_summary(summary, step)
if step % 100 ==0:
loss_sum = np.asarray(loss_sum)
log_string('step: %d edge_loss: %f\n' % (step, round(loss_sum.mean(), 4)))
print 'datatime:%s edge_loss:%f' % (round(fetch_time, 4), round(loss_sum.mean(), 4))
loss_sum = []
def patch_prediction(self, patch_point, sess, ratio, edge_threshold=0.05):
#normalize the point clouds
patch_point, centroid, furthest_distance = data_provider.normalize_point_cloud(patch_point)
new_idx = np.stack((np.zeros((NUM_POINT)).astype(np.int64), np.arange(NUM_POINT)), axis=-1)
pred, pred_edge, pred_edgedist = sess.run([self.pred_coord, self.select_pred_edgecoord, self.select_pred_edgedist],
feed_dict={self.pointclouds_input: np.expand_dims(patch_point,axis=0),
self.pointclouds_radius: np.ones(1),
self.edge_threshold: np.asarray([edge_threshold])/ratio,
self.pointclouds_idx: np.expand_dims(new_idx, axis=0)
})
# ##calculate the pca of edge
# if pred_edge.shape[0]>=2:
# new_pred_edge = []
# pca = PCA(n_components=1)
# dist = spatial.distance.squareform(spatial.distance.pdist(pred_edge))
# for item in dist:
# idx = np.where(item<0.05)[0]
# idx = np.random.permutation(idx)[:15]
# data = pred_edge[idx]
# # print len(data)
# pca.fit(data)
# newdata = pca.transform(data[0:1,:]) * pca.components_ + pca.mean_
# new_pred_edge.append(newdata[0])
# pred_edge = np.asarray(new_pred_edge)
# else:
# print "No edge point or one edge point"
pred = np.squeeze(centroid + pred * furthest_distance, axis=0)
pred_edge = centroid + pred_edge * furthest_distance
pred_edgedist = pred_edgedist * furthest_distance
return pred, pred_edge, pred_edgedist
def patch_prediction_avg(self, patch_point, sess,ratio,edge_threshold=0.05):
#normalize the point clouds
patch_point, centroid, furthest_distance = data_provider.normalize_point_cloud(patch_point)
# print furthest_distance*0.075
pred_list = []
pred_edgecoord_list=[]
pred_edgedist_list = []
for iter in xrange(3):
idx,new_idx = data_provider.get_inverse_index(patch_point.shape[0])
new_idx = np.stack((np.zeros((NUM_POINT)).astype(np.int64), new_idx), axis=-1)
patch_point_input = patch_point[idx].copy()
pred, pred_edgecoord, pred_edgedist = sess.run([self.pred_coord, self.pred_coord, self.pred_dist],
feed_dict={self.pointclouds_input: np.expand_dims(patch_point_input,axis=0),
self.pointclouds_radius: np.ones(1),
self.edge_threshold: np.asarray([edge_threshold]) / ratio,
self.pointclouds_idx: np.expand_dims(new_idx,axis=0)
})
pred_list.append(pred)
pred_edgecoord_list.append(pred_edgecoord)
pred_edgedist_list.append(pred_edgedist)
pred = np.asarray(pred_list).mean(axis=0)
pred_edgecoord = np.asarray(pred_edgecoord_list).mean(axis=0)
pred_edgedist = np.asarray(pred_edgedist_list).mean(axis=0)
idx = np.argsort(pred_edgedist,axis=-1)
pred_edgedist = pred_edgedist[0][idx[0,:NUM_ADDPOINT]]
pred_edgecoord = pred_edgecoord[0][idx[0,:NUM_ADDPOINT]]
pred_edgecoord = pred_edgecoord[pred_edgedist<edge_threshold/ratio] #0.015 / furthest_distance
pred_edgedist = pred_edgedist[pred_edgedist<edge_threshold/ratio]
pred = np.squeeze(centroid + pred * furthest_distance, axis=0)
pred_edgecoord = centroid + pred_edgecoord * furthest_distance
pred_edgedist = pred_edgedist * furthest_distance
return pred, pred_edgecoord, pred_edgedist
def pc_prediction(self, gm, sess, patch_num_ratio=3, edge_threshold=0.05, edge=None):
## get patch seed from farthestsampling
points = tf.convert_to_tensor(np.expand_dims(gm.data,axis=0),dtype=tf.float32)
start= time.time()
seed1_num = int(gm.data.shape[0] / (NUM_POINT/2) * patch_num_ratio)
## FPS sampling
seed = farthest_point_sample(seed1_num*2, points).eval()[0]
#seed = np.random.permutation(gm.data.shape[0])
seed_list = seed[:seed1_num]
print "farthest distance sampling cost", time.time() - start
if edge is None:
ratios = np.random.uniform(1.0,1.0,size=[seed1_num])
else:
edge_tree = spatial.cKDTree(edge)
seed_data = gm.data[np.asarray(seed_list)]
seed_tree = spatial.cKDTree(seed_data)
indics = seed_tree.query_ball_tree(edge_tree,r=0.02)
ratios = []
cnt = 0
for item in indics:
if len(item)>=3:
#ratios.append(np.random.uniform(1.0,2.0))
ratios.append(1.0)
cnt = cnt + 1
else:
# ratios.append(np.random.uniform(1.0,3.0))
ratios.append(3.0)
print "total %d edge patch"%(cnt)
######
mm1 = {}
mm2 = {}
mm3 = {}
# for i in xrange(gm.data.shape[0]):
for i in xrange(10):
mm1[i]=[]
mm2[i]=[]
mm3[i]=[]
######
input_list = []
up_point_list=[]
up_edge_list = []
up_edgedist_list = []
fail = 0
for seed,ratio in tqdm(zip(seed_list,ratios)):
try:
patch_size = int(NUM_POINT * ratio)
#idx = np.asarray(gm.bfs_knn(seed,patch_size))
idx = np.asarray(gm.geodesic_knn(seed,patch_size))
if len(idx)<NUM_POINT:
fail = fail + 1
continue
idx1 = np.random.permutation(idx.shape[0])[:NUM_POINT]
idx1.sort()
idx = idx[idx1]
point = gm.data[idx]
except:
fail= fail+1
continue
up_point,up_edgepoint,up_edgedist = self.patch_prediction(point, sess,ratio,edge_threshold)
# ## handle with the points of same point
# for cnt, item in enumerate(idx[:128]):
# if item <10000:
# mm1[item].append(up_point[cnt])
# mm2[item].append(up_point[cnt+128])
# mm3[item].append(up_point[cnt+128*2])
# # mm[item].append(up_point[cnt+128*3])
# ########
input_list.append(point)
up_point_list.append(up_point)
up_edge_list.append(up_edgepoint)
up_edgedist_list.append(up_edgedist)
print "total %d fails" % fail
# ##
# colors = np.random.randint(0,255,(10000,3))
# color_point = []
# for item in mm1.keys():
# aa = np.asarray(mm1[item])
# if len(aa)==0:
# continue
# aa = np.concatenate([aa,np.tile(colors[item],(len(aa),1))],axis=-1)
# color_point.extend(aa)
# color_point = np.asarray(color_point)
# data_provider.save_xyz('/home/lqyu/server/proj49/PointSR2/'+point_path.split('/')[-1][:-4] +'1.txt',color_point)
#
# color_point = []
# for item in mm2.keys():
# aa = np.asarray(mm2[item])
# if len(aa) == 0:
# continue
# aa = np.concatenate([aa, np.tile(colors[item], (len(aa), 1))], axis=-1)
# color_point.extend(aa)
# color_point = np.asarray(color_point)
# data_provider.save_xyz('/home/lqyu/server/proj49/PointSR2/'+point_path.split('/')[-1][:-4] +'2.txt', color_point)
#
# color_point = []
# for item in mm3.keys():
# aa = np.asarray(mm3[item])
# if len(aa) == 0:
# continue
# aa = np.concatenate([aa, np.tile(colors[item], (len(aa), 1))], axis=-1)
# color_point.extend(aa)
# color_point = np.asarray(color_point)
# data_provider.save_xyz('/home/lqyu/server/proj49/PointSR2/'+point_path.split('/')[-1][:-4] +'3.txt', color_point)
# ##
input = np.concatenate(input_list,axis=0)
pred = np.concatenate(up_point_list,axis=0)
pred_edge = np.concatenate(up_edge_list, axis=0)
print "total %d edgepoint" % pred_edge.shape[0]
pred_edgedist = np.concatenate(up_edgedist_list,axis=0)
rgba = data_provider.convert_dist2rgba(pred_edgedist, scale=10)
pred_edge = np.hstack((pred_edge, rgba, pred_edgedist.reshape(-1, 1)))
return input, pred, pred_edge
# t1 = time.time()
# edge_dist = np.zeros(pred_edge.shape[0])
# for sid in range(0,pred_edge.shape[0],20000):
# eid = np.minimum(pred_edge.shape[0],sid+20000)
# tf_point = tf.placeholder(tf.float32,[1,eid-sid,3])
# tf_edge = tf.placeholder(tf.float32,[1,gm.edge.shape[0],6])
# pred_edge_dist_tf = model_utils.distance_point2edge(tf_point,tf_edge)
# pred_edge_dist_tf = tf.sqrt(tf.reduce_min(pred_edge_dist_tf, axis=-1))
# edge_dist[sid:eid] = sess.run(pred_edge_dist_tf,feed_dict={tf_point:np.expand_dims(pred_edge[sid:eid], axis=0),
# tf_edge:np.expand_dims(gm.edge, axis=0)})
# t2 = time.time()
# print "tf time %f"%(t2-t1)
# rgba = data_provider.convert_dist2rgba(edge_dist, scale=10)
# path = os.path.join(save_path, point_path.split('/')[-1][:-4] + "_outputedgeerror.ply")
# data_provider.save_ply(path, np.hstack((pred_edge, rgba, edge_dist.reshape(-1, 1))))
def test_hierarical_prediction(self):
data_folder = '../data/virtualscan/chair_test1/*3_noise_half.xyz'
data_folder = '/home/lqyu/server/proj49/PointSR2/data/paper_result_data4/*_noise_half.xyz'
phase = data_folder.split('/')[-3]+"_"+data_folder.split('/')[-2]
save_path = os.path.join(MODEL_DIR, 'result/' +phase+'_512_0.05_dynamic_96')
# data_folder = '../../PointSR_data/tmp/moniter_input/*.xyz'
# save_path = os.path.join('../../PointSR_data/tmp/moniter1024_noresidual_0.05')
self.saver = tf.train.Saver()
_, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
print restore_model_path
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.Session(config=config) as sess:
self.saver.restore(sess, restore_model_path)
total_time = 0
samples = glob(data_folder)
samples.sort()
for point_path in samples:
# if 'no_noise' in point_path:
# continue
edge_path = point_path.replace('new_simu_noise', 'mesh_edge').replace('_noise_double.xyz', '_edge.xyz')
edge_path = None
print point_path, edge_path
start = time.time()
gm = GKNN(point_path, edge_path, patch_size=NUM_POINT, patch_num=30,add_noise=False,normalization=False)
##get the edge information
_,pred,pred_edge = self.pc_prediction(gm,sess,patch_num_ratio=3, edge_threshold=0.05)
end = time.time()
print "total time: ",end-start
## re-prediction with edge information
# input, pred,pred_edge = self.pc_prediction(gm,sess,patch_num_ratio=3, edge_threshold=0.05,edge=pred_edge[:,0:3])
path = os.path.join(save_path, point_path.split('/')[-1][:-4] + "_input.xyz")
data_provider.save_xyz(path, gm.data)
path = os.path.join(save_path, point_path.split('/')[-1][:-4] + "_output.xyz")
data_provider.save_xyz(path, pred)
path = os.path.join(save_path, point_path.split('/')[-1][:-4] + "_outputedge.ply")
data_provider.save_ply(path, pred_edge)
print total_time/len(samples)
def test(self, show=False, use_normal=False):
data_folder = '../../PointSR_data/CAD/mesh_MC16k'
phase = data_folder.split('/')[-2]+data_folder.split('/')[-1]
save_path = os.path.join(MODEL_DIR, 'result/' + phase)
self.saver = tf.train.Saver()
_, restore_model_path = model_utils.pre_load_checkpoint(MODEL_DIR)
print restore_model_path
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.Session(config=config) as sess:
self.saver.restore(sess, restore_model_path)
samples = glob(data_folder+"/.xyz")
samples.sort()
total_time = 0
#input, dist, edge, data_radius, name = data_provider.load_patch_data(NUM_POINT, True, 30)
#edge = np.reshape(edge,[-1,NUM_EDGE,6])
for i,item in tqdm(enumerate(samples)):
input = np.loadtxt(item)
edge = np.loadtxt(item.replace('mesh_MC16k','mesh_edge').replace('.xyz','_edge.xyz'))
idx = np.all(edge[:, 0:3] == edge[:, 3:6], axis=-1)
edge = edge[idx == False]
l = len(edge)
idx = range(l) * (1300 / l) + list(np.random.permutation(l)[:1300 % l])
edge = edge[idx]
# # coord = input[:, 0:3]
# # centroid = np.mean(coord, axis=0, keepdims=True)
# # coord = coord - centroid
# # furthest_distance = np.amax(np.sqrt(np.sum(abs(coord) ** 2, axis=-1)))
# # coord = coord / furthest_distance
# # input[:, 0:3] = coord
input = np.expand_dims(input,axis=0)
# input = data_provider.jitter_perturbation_point_cloud(input, sigma=0.01, clip=0.02)
start_time = time.time()
edge_pl = tf.placeholder(tf.float32, [1, edge.shape[0], 6])
dist_gt_pl = tf.sqrt(tf.reduce_min(model_utils.distance_point2edge(self.pred, edge_pl), axis=-1))
pred, pred_dist,dist_gt = sess.run([self.pred,self.pred_dist,dist_gt_pl],
feed_dict={self.pointclouds_input: input[:,:,0:3],
self.pointclouds_radius: np.ones(BATCH_SIZE),
edge_pl:np.expand_dims(edge,axis=0)})
total_time +=time.time()-start_time
norm_pl = np.zeros_like(pred)
##--------------visualize predicted point cloud----------------------
if show:
f,axis = plt.subplots(3)
axis[0].imshow(pc_util.point_cloud_three_views(input[:,0:3],diameter=5))
axis[1].imshow(pc_util.point_cloud_three_views(pred[0,:,:],diameter=5))
axis[2].imshow(pc_util.point_cloud_three_views(gt[:,0:3], diameter=5))
plt.show()
path = os.path.join(save_path, item.split('/')[-1][:-4]+".ply")
# rgba =data_provider.convert_dist2rgba(pred_dist2,scale=10)
# data_provider.save_ply(path, np.hstack((pred[0, ...],rgba,pred_dist2.reshape(NUM_ADDPOINT,1))))
path = os.path.join(save_path, item.split('/')[-1][:-4] + "_gt.ply")
rgba = data_provider.convert_dist2rgba(dist_gt[0],scale=5)
data_provider.save_ply(path, np.hstack((pred[0, ...], rgba, dist_gt.reshape(NUM_ADDPOINT, 1))))
path = path.replace(phase, phase+"_input")
path = path.replace('xyz','ply')
rgba = data_provider.convert_dist2rgba(pred_dist[0],scale=5)
data_provider.save_ply(path, np.hstack((input[0],rgba,pred_dist.reshape(NUM_POINT,1))))
print total_time/len(samples)
if __name__ == "__main__":
np.random.seed(int(time.time()))
tf.set_random_seed(int(time.time()))
if PHASE=='train':
assert not os.path.exists(os.path.join(MODEL_DIR, 'code/'))
os.makedirs(os.path.join(MODEL_DIR, 'code/'))
os.system('cp -r * %s' % (os.path.join(MODEL_DIR, 'code/'))) # bkp of model def
network = Network()
network.build_graph(is_training=True)
network.train()
LOG_FOUT.close()
else:
network = Network()
BATCH_SIZE = 1
NUM_EDGE = 1000
network.build_graph(is_training=False)
network.test_hierarical_prediction()
|
from vm_spawner import VMSpawner
from threading import Thread
import glob, json, os
def second_pass(vms):
for configPath in sorted(glob.glob('./vm_config_*')):
with open(configPath, 'r') as f: aVMConfig = json.loads(f.read())
Thread(target= vms.execute_commands_at, args=( \
aVMConfig['username'], \
vms.get_floating_ip_of(aVMConfig['openStackVMname']), \
aVMConfig['password'], \
['echo ' + aVMConfig['password'] + ' | sudo -S service default_startups start'] )
).start()
'''
vms.execute_commands_at( \
aVMConfig['username'], \
vms.get_floating_ip_of(aVMConfig['openStackVMname']), \
aVMConfig['password'], \
['echo ' + aVMConfig['password'] + ' | sudo -S service default_startups start'] )
'''
if __name__=='__main__':
if os.path.exists(os.getcwd()+'/config.json'):
with open('config.json', 'r') as f:
mainConfig = json.loads(f.read())
vms = VMSpawner(mainConfig['floating_ip_pool'])
second_pass(vms) |
import TileArea as ta
class CentralArea(ta.TileArea):
"""
Central area - area in the middle of all the pads, where the unselected
tiles go when a player selects a given color. Also contains the 1st
player tile, which goes to the first player to draw out of the middle.
>>> ca = CentralArea()
>>> for cnt in range(3): # did not know about the ellipsis in doctests
... ca.addtile('R')
... ca.addtile('B')
... ca.addtile('Y')
>>> print(ca.tileplayer1)
True
>>> seltiles = ca.takecolor('Y')
>>> print(seltiles)
['Y', 'Y', 'Y', '1']
>>> print(ca.tileplayer1)
False
>>> seltiles = ca.takecolor('R')
>>> print(seltiles)
['R', 'R', 'R']
"""
def __init__(self):
super().__init__()
self._firstplayer = True
@property
def tileplayer1(self):
return (self._firstplayer)
def addtile(self, tile):
if tile == '1':
self._firstplayer = True
if '1' in self.tiles:
return ()
super().addtile(tile)
def takecolor(self, color):
# assert color != '1' # this really shouldn't happen
if len(self.tiles) == 1 and color == "1":
print("someone selected 1 when no tiles in center")
retlist = super().takecolor(color)
if self._firstplayer:
retlist.append('1')
while '1' in self.tiles:
self.tiles.remove('1')
self._firstplayer = False
return (retlist)
def reset(self):
self._firstplayer = True
def __str__(self):
retstr = ""
idx = 0
halfway = int(len(self.tiles) / 2) + 1
self.tiles.sort()
for tile in self.tiles:
idx += 1
if idx > halfway:
retstr += "/"
idx = 0
retstr += tile
return (retstr)
def onlyplayer1(self):
return (len(self.tiles) == 1 and self.tiles[0] == '1')
if __name__ == "__main__":
import doctest
doctest.testmod()
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
browser = webdriver.Chrome()
browser.get("http://www.baidu.com")
# 在输入框输入内容
browser.find_element_by_id("kw").send_keys("selenium")
# 删除多输入的内容
browser.find_element_by_id("kw").send_keys(Keys.BACK_SPACE)
# 输入空格键+“教程”
browser.find_element_by_id("kw").send_keys(Keys.SPACE)
browser.find_element_by_id("kw").send_keys("教程")
# 按【Ctrl+A】组合键全选输入框的内容
browser.find_element_by_id("kw").send_keys(Keys.CONTROL, 'a')
# 按【Ctrl+X】组合键剪切输入框中的内容
browser.find_element_by_id("kw").send_keys(Keys.CONTROL, 'x')
# 按【Ctrl+V】组合键将内容粘贴到输入框中
browser.find_element_by_id("kw").send_keys(Keys.CONTROL, 'v')
# 通过【Enter】键代替单机操作
browser.find_element_by_id("kw").send_keys(Keys.ENTER)
browser.quit()
|
x0=1.3
x1=1.6
x2=1.9
f0=0.6200860
f1=0.4554022
f2=0.2818186
df0=-0.5220232
df1=-0.5698959
df2=-0.5811571
x=[x0,x1,x2]
f=[f0,f1,f2]
df=[df0,df1,df2]
z=[0,0,0,0,0,0]
Q=[[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]
for i in range(len(x)):
z[2*i]=x[i]
z[2*i+1]=x[i]
Q[2*i][0]=f[i]
Q[2*i+1][0]=f[i]
Q[2*i+1][1]=df[i]
if i!=0:
Q[2*i][1]=(Q[2*i][0]-Q[2*i-1][0])/(z[2*i]-z[2*i-1])
for i in range(2,2*len(x)-1):
for j in range(2,i+1):
Q[i][j]=(Q[i][j-1]-Q[i-1][j-1])/(z[i]-z[i-j])
for i in range(2*len(x)-1):
print(Q[i][i])
print(Q)
value=1.5
a=1
h=Q[0][0]
#print(Q[0][0]+Q[1][1]*(value-x[0])+Q[2][2]*(value-x[0])**2+Q[3][3]*(value-x[0])**2*(value-x[1])+Q[4][4]*(value-x[0])**2*(value-x[1])**2+Q[5][5]*(value-x[0])**2*(value-x[1])**2*(value-x[2]))
'''the below is equal to the last line printQ[0][0]...'''
for i in range(1,2*len(x)):
a=a*(value-z[i-1])
h=h+a*Q[i][i]
print(h) |
# !/usr/bin/env python
# _*_ coding:utf-8 _*_
# 查询 数据库goods--中students的所有数据
# 1.导包
import pymysql
try:
# 2.连接mysql数据库的服务
connc = pymysql.Connect(
# mysql服务端的IP 默认127.0.0.1/localhost-真实IP
host='192.168.90.172',
user='root',
password="mysql",
database='goods',
port=3306,
charset='utf8'
)
# 3.创建游标对象
cur = connc.cursor()
# 4.编写SQL语句
sql = 'select * from students;'
# 5.使用游标对象去调用SQL
cur.execute(sql)
# 6.获取查询的结果 --print()
result = cur.fetchall()
print(result)
# 7.关闭游标对象
cur.close()
# 8.关闭连接
connc.close()
except Exception as e:
print(e)
|
import os
import py
from pypy.translator.test import snippet
from pypy.translator.squeak.test.runtest import compile_function
class TestGenSqueak:
def test_while(self):
def addon(i):
while not i == 1:
i -= 1
return i
fn = compile_function(addon, [int])
assert fn(4) == 1
def test_theanswer(self):
def theanswer():
return 42
fn = compile_function(theanswer)
assert fn() == "42"
def test_simplemethod(self):
class A:
def m(self):
return 42
def simplemethod():
return A().m()
fn = compile_function(simplemethod)
assert fn() == "42"
def test_argfunction(self):
def function(i, j=2):
return i + j
fn = compile_function(function, [int, int])
assert fn(1, 3) == "4"
def test_argmethod(self):
class A:
def m(self, i, j, h=2):
return i + j + h
def simplemethod(i):
return A().m(i, j=3)
fn = compile_function(simplemethod, [int])
assert fn(1) == "6"
def test_nameclash_classes(self):
from pypy.translator.squeak.test.support import A as A2
class A:
def m(self, i): return 2 + i
class Functions:
def m(self, i): return 1 + i
def f():
return A().m(0) + A2().m(0) + Functions().m(-1)
fn = compile_function(f)
assert fn() == "3"
def test_nameclash_classes_mean(self):
class A:
def m(self, i): return 1 + i
A2 = A
class A:
def m(self, i): return 2 + i
def f():
return A().m(0) + A2().m(0)
fn = compile_function(f)
assert fn() == "3"
def test_nameclash_camel_case(self):
class ASomething:
def m(self, i): return 1 + i
class A_Something:
def m(self, i): return 2 + i
def f():
x = ASomething().m(0) + A_Something().m(0)
return x + ASomething().m(0) + A_Something().m(0)
fn = compile_function(f)
assert fn() == "6"
def test_nameclash_functions(self):
from pypy.translator.squeak.test.support import f as f2
def f(i):
return i + 2
def g():
return f(0) + f2(0)
fn = compile_function(g)
assert fn() == "3"
def test_nameclash_methods(self):
class A:
def some_method(self, i): return i + 1
def someMethod(self, i): return i + 2
def f():
a = A()
return a.some_method(0) + a.someMethod(0)
fn = compile_function(f)
assert fn() == "3"
def test_nameclash_fields(self):
class A:
def m(self, i):
self.var1 = i
self.var_1 = i + 1
def f():
a = A()
a.m(1)
return a.var1 + a.var_1
fn = compile_function(f)
assert fn() == "3"
def test_direct_call(self):
def h(i):
return g(i) + 1 # another call to g to try to trap GenSqueak
def g(i):
return i + 1
def f(i):
return h(i) + g(i)
fn = compile_function(f, [int])
assert fn(1) == "5"
def test_getfield_setfield(self):
class A:
def set(self, i):
self.i_var = i
def inc(self):
self.i_var = self.i_var + 1
def f(i):
a = A()
a.set(i)
i = a.i_var
a.i_var = 3
a.inc()
return i + a.i_var
fn = compile_function(f, [int])
assert fn(2) == "6"
def test_classvars(self):
class A: i = 1
class B(A): i = 2
def pick(i):
if i == 1:
c = A
else:
c = B
return c
def f(i):
c = pick(i)
return c.i
fn = compile_function(f, [int])
assert fn(1) == "1"
assert fn(2) == "2"
class TestException:
def test_simpleexception(self):
def raising(i):
if i > 0:
raise ValueError
else:
return i + 1
def f(i):
try:
return raising(i)
except ValueError, val:
return i - 1
fn = compile_function(f, [int])
assert fn(-1) == "0"
assert fn(2) == "1"
def test_exceptbranch(self):
def raising(i):
if i == 0:
raise ValueError
elif i < 0:
raise AttributeError
else:
return i + 1
def f(i):
try:
return raising(i)
except ValueError:
return i
except AttributeError:
return -i
fn = compile_function(f, [int])
assert fn(-1) == "1"
assert fn(0) == "0"
assert fn(2) == "3"
def test_exceptreraise(self):
def raising(i):
if i == 0:
raise ValueError
elif i < 0:
raise AttributeError
else:
return i + 1
def f(i):
try:
return raising(i)
except ValueError:
return i
def g(i):
try:
return f(i)
except AttributeError:
return -i
fn = compile_function(g, [int])
assert fn(-2) == "2"
|
# pylint: disable=protected-access
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is licensed under the Apache License 2.0.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright 2020, CTERA Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import unittest.mock as mock
import munch
try:
from cterasdk import config, CTERAException
except ImportError: # pragma: no cover
pass
import ansible_collections.ctera.ctera.plugins.modules.ctera_filer_cloud_services as ctera_filer_cloud_services
import tests.ut.mocks.ctera_filer_base_mock as ctera_filer_base_mock
from tests.ut.base import BaseTest
class TestCteraFilerCloudServices(BaseTest):
def setUp(self):
super().setUp()
ctera_filer_base_mock.mock_bases(self, ctera_filer_cloud_services.CteraFilerCloudServices)
def test__execute(self):
default_ssl_configuration = config.connect['ssl']
for state in ['connected', 'disconnected']:
for trust_certificate in [True, False]:
for is_connected in [True, False]:
# Reset SSL configuration to default
config.connect['ssl'] = default_ssl_configuration
self._test__execute(state, trust_certificate, is_connected, default_ssl_configuration)
def _test__execute(self, state, trust_certificate, is_connected, default_ssl_configuration):
status = munch.Munch(dict(connected=is_connected))
cloud_cache = ctera_filer_cloud_services.CteraFilerCloudServices()
cloud_cache.parameters = dict(state=state, trust_certificate=trust_certificate, server='test.example.com')
cloud_cache._ctera_filer.services.get_status.return_value = status
cloud_cache._ensure_connected = mock.MagicMock()
cloud_cache._execute()
self.assertEqual('Trust' if trust_certificate else default_ssl_configuration, config.connect['ssl'])
if state == 'connected':
cloud_cache._ensure_connected.assert_called_once_with(status)
else:
if is_connected:
cloud_cache._ctera_filer.services.disconnect.assert_called_once_with()
else:
cloud_cache._ctera_filer.services.disconnect.assert_not_called()
def test__ensure_connected(self):
for is_connected in [True, False]:
for modify_return in [True, False]:
self._test__ensure_connected(is_connected, modify_return)
@staticmethod
def _test__ensure_connected(is_connected, modify_return):
status = munch.Munch(dict(connected=is_connected))
cloud_cache = ctera_filer_cloud_services.CteraFilerCloudServices()
cloud_cache.parameters = dict(server='test.example.com')
cloud_cache._handle_modify = mock.MagicMock(return_value=modify_return)
cloud_cache._do_connect = mock.MagicMock()
cloud_cache._ensure_sso_state = mock.MagicMock()
cloud_cache._ensure_connected(status)
if is_connected:
cloud_cache._handle_modify.assert_called_once_with(status, mock.ANY)
cloud_cache._do_connect.assert_not_called()
else:
cloud_cache._handle_modify.assert_not_called()
cloud_cache._do_connect.assert_called_once_with()
if is_connected and not modify_return:
cloud_cache._ensure_sso_state.assert_not_called()
else:
cloud_cache._ensure_sso_state.assert_called_once_with(mock.ANY)
def test__ensure_sso_state(self):
for is_sso_enabled in [True, False]:
for desired_sso_state in [True, False]:
self._test__ensure_sso_state(is_sso_enabled, desired_sso_state)
@staticmethod
def _test__ensure_sso_state(is_sso_enabled, desired_sso_state):
cloud_cache = ctera_filer_cloud_services.CteraFilerCloudServices()
cloud_cache.parameters = dict(sso=desired_sso_state)
cloud_cache._ctera_filer.services.sso_enabled.return_value = is_sso_enabled
cloud_cache._ensure_sso_state(dict(changed=[], skipped=[]))
if is_sso_enabled == desired_sso_state:
cloud_cache._ctera_filer.services.enable_sso.assert_not_called()
cloud_cache._ctera_filer.services.disable_sso.assert_not_called()
elif desired_sso_state:
cloud_cache._ctera_filer.services.enable_sso.assert_called_once_with()
cloud_cache._ctera_filer.services.disable_sso.assert_not_called()
else:
cloud_cache._ctera_filer.services.enable_sso.assert_not_called()
cloud_cache._ctera_filer.services.disable_sso.assert_called_once_with()
@staticmethod
def test_do_connect():
connect_parameters = dict(server='test.example.com', user='admin', password='password')
cloud_cache = ctera_filer_cloud_services.CteraFilerCloudServices()
cloud_cache.parameters = copy.deepcopy(connect_parameters)
cloud_cache.parameters['unused_param'] = True
cloud_cache._do_connect()
cloud_cache._ctera_filer.services.connect.assert_called_once_with(**connect_parameters)
def test__handle_modify(self):
for change_server in [True, False]:
for connect_success in [True, False]:
for force_reconnect in [True, False]:
self._test__handle_modify(change_server, connect_success, force_reconnect)
def _test__handle_modify(self, change_server, connect_success, force_reconnect):
current_server = 'current_server'
other_server = 'other_server'
status = munch.Munch(dict(server_address=current_server))
cloud_cache = ctera_filer_cloud_services.CteraFilerCloudServices()
cloud_cache.parameters = dict(server=other_server if change_server else current_server, force_reconnect=force_reconnect)
cloud_cache._do_connect = mock.MagicMock()
if not connect_success:
cloud_cache._do_connect.side_effect = CTERAException()
ret = cloud_cache._handle_modify(status, dict(changed=[], skipped=[]))
if change_server:
cloud_cache._ctera_filer.services.disconnect.assert_called_once_with()
cloud_cache._do_connect.assert_called_once_with()
self.assertEqual(ret, connect_success)
else:
if force_reconnect:
cloud_cache._ctera_filer.services.reconnect.assert_called_once_with()
else:
cloud_cache._ctera_filer.services.reconnect.assert_not_called()
self.assertTrue(ret)
|
# -*- coding: utf-8 -*-
"""Datacenter topology.
Consists of two core switches, one switch of access layer and leaf one switch per segment
_________terminate_switch_____________________
| |
core_sw----------------core_sw
| |
------------------------------------------------------
| | | |
leaf_sw1 leaf_sw2 .... leaf_sw_n leaf_sw_n+1
| | | |
servers servers servers servers
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.node import RemoteController, OVSSwitch
from functools import partial
class MyTopo( Topo ):
"Simple topology example."
def __init__( self, leaf_sw_am, serv_per_sw, ips):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add hosts and switches
ts1 = self.addSwitch( 's1', dpid='%x' % 11)
cs1 = self.addSwitch( 'cs1', dpid='%x' % 12)
cs2 = self.addSwitch( 'cs2', dpid='%x' % 13)
self.addLink( ts1, cs1, 2, 1 )
self.addLink( ts1, cs1, 3, 1 )
cs_pnum = 2
ip_num = 0
max_ip_num = len(ips)
ip_addr = []
for i in range (max_ip_num):
ip_addr.append(10)
for i in range(1, leaf_sw_am+1):
# create leaf sw and add hosts from one segment to it
s = self.addSwitch( 'ls%s'%i, dpid='%x' % (13+i))
serv_pnum = 3
self.addLink( s, cs1, 1, cs_pnum )
self.addLink( s, cs2, 2, cs_pnum )
cs_pnum += 1
# add servers
for j in range(1, serv_per_sw+1):
ip_addr[ip_num] += 1
serv = self.addHost( 'serv%s-%s'%(i,j), ip=str( ips[ip_num]+'%s/24'%ip_addr[ip_num] ) )
self.addLink( serv, s, 0, serv_pnum )
serv_pnum += 1
ip_num += 1
if ip_num >= max_ip_num:
ip_num = 0
def runMinimalTopo():
CONTROLLER_IP = '192.168.2.4'
leaf_sw_am = 6
serv_am = 5
ips = ['172.16.24.', '172.16.0.', '172.16.16.', '172.16.28.', '172.16.40.', '172.16.32.']
topo = MyTopo(leaf_sw_am, serv_am, ips)
net = Mininet(topo = topo,
controller=lambda name: RemoteController( name, ip=CONTROLLER_IP),
switch=partial(OVSSwitch, protocols='OpenFlow13'),
autoSetMacs=True )
net.start()
ip_num = 0
max_ip_num = len(ips)
for i in range(1, leaf_sw_am+1):
for j in range(1, serv_am+1):
net.get('serv%s-%s'%(i,j) ).cmd('ip route add default via '+ ips[ip_num]+'1')
ip_num += 1
if ip_num >= max_ip_num:
ip_num = 0
net.get('s1').cmd('ovs-vsctl add-port s1 eth1')
cli = CLI(net)
# After the user exits the CLI, shutdown the network.
net.stop()
if __name__ == '__main__':
# This runs if this file is executed directly
setLogLevel( 'info' )
runMinimalTopo()
topos = { 'mytopo': MyTopo } |
from typing import List
class Solution:
def sortEvenOdd(self, nums: List[int]) -> List[int]:
index_odd = []
index_even = []
for index, val in enumerate(nums):
if index & 1:
index_odd.append(val)
else:
index_even.append(val)
index_even.sort()
index_odd.sort(reverse=True)
for index in range(len(nums)):
if index & 1:
nums[index] = index_odd[index // 2]
else:
nums[index] = index_even[index // 2]
return nums
def main():
sol = Solution()
print(sol.sortEvenOdd([4,1,2,3]))
print(sol.sortEvenOdd([2,1]))
if __name__ == '__main__':
main() |
"""Test that feature spec objects work as intended."""
from typing import List
import numpy as np
from timeseriesflattener.aggregation_fns import maximum
from timeseriesflattener.feature_specs.group_specs import (
NamedDataframe,
OutcomeGroupSpec,
PredictorGroupSpec,
)
def test_skip_all_if_no_need_to_process(empty_named_df: NamedDataframe):
"""Test that no combinations are created if no need to process."""
assert (
len(
PredictorGroupSpec(
named_dataframes=[empty_named_df],
lookbehind_days=[1],
aggregation_fns=[maximum],
fallback=[0],
).create_combinations(),
)
== 1
)
def test_skip_one_if_no_need_to_process(empty_named_df: NamedDataframe):
"""Test that one combination is skipped if no need to process."""
created_combinations = PredictorGroupSpec(
named_dataframes=[empty_named_df],
lookbehind_days=[1, 2],
aggregation_fns=[maximum],
fallback=[0],
).create_combinations()
assert len(created_combinations) == 2
def test_aggregation_fn_to_str(empty_named_df: NamedDataframe):
"""Test that aggregation_fn is converted to str correctly."""
pred_spec_batch = PredictorGroupSpec(
named_dataframes=[empty_named_df],
lookbehind_days=[365, 730],
fallback=[np.nan],
aggregation_fns=[maximum],
).create_combinations()
assert "maximum" in pred_spec_batch[0].get_output_col_name()
def test_lookbehind_days_handles_floats(empty_named_df: NamedDataframe):
"""Test that lookbheind days does not coerce floats into ints."""
pred_spec_batch = PredictorGroupSpec(
named_dataframes=[empty_named_df],
lookbehind_days=[2, 0.5],
fallback=[np.nan],
aggregation_fns=[maximum],
).create_combinations()
assert pred_spec_batch[1].lookbehind_days == 0.5
def get_lines_with_diff(text1: str, text2: str) -> List[str]:
"""Find all lines in text1 which are different from text2."""
# Remove whitespace and periods
text_1 = text1.replace(" ", "").replace(".", "")
text_2 = text2.replace(" ", "").replace(".", "")
lines1 = text_1.splitlines()
lines2 = text_2.splitlines()
return [line for line in lines1 if line not in lines2]
def test_create_combinations_outcome_specs(empty_named_df: NamedDataframe):
"""Test that create_combinations() creates the correct outcome_specs."""
outc_spec_batch = OutcomeGroupSpec(
named_dataframes=[empty_named_df],
lookahead_days=[1, 2],
aggregation_fns=[maximum],
fallback=[0],
incident=[True],
).create_combinations()
assert len(outc_spec_batch) == 2
|
'''
Copyright (c) 2017, Megat Harun Al Rashid bin Megat Ahmad, Suhairy bin Sani and Shukri bin Mohd.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# WT.py for displaying wavelet transformation
class WT_Display(object):
def __init__(wtd,filename):
# Extracting data from file
fh = open(filename)
wtd.fhList = open(filename).readlines()
fh.close()
wtd.freq = np.array(wtd.fhList[2:][0].split(',')[1:]).astype(np.float)
data = []
for i,j in enumerate(wtd.fhList[2:][1:]):
data = data + [j.split(',')]
data = np.array(data).astype(np.float)
wtd.T = data.T[0]
wtd.data2 = np.repeat(data[:,1:], 10).reshape(1000,1000).T
# Display image
def imageView(wtd,imageFile = 'obj_ort_test.jpeg', intPol = 'spline36',\
colorMap = cm.gist_rainbow_r):
fig, ax = plt.subplots()
cax = ax.imshow(wtd.data2, interpolation=intPol, cmap=colorMap)
ax.set_ylim(ymin=0,ymax=1000)
ax.set_xticklabels([0,0,20,40,60,80,100])
ax.set_xlabel(r'Time,[$\mu$s]')
ax.set_ylabel(r'Frequency, [kHz]')
cbar = fig.colorbar(cax)
plt.savefig(imageFile)
plt.show() |
n1 = 8
n2 = 5
result = n1 > n2
print("n1 > n2:", result)
result = n1 == n2 # 赋值运算的优先级较低,先判断n1==n2
print("n1 == n2", result)
m1 = "hello"
m2 = "hello"
result = m1 == m2
print("m1==m2:", result)
# username = input("输入用户名:")
# uname = "admin123"
# result = username != uname # 如果两个不相等时返回True,相等时返回False
# print("用户名的验证结果是:", result)
# is 用户对象的比较
age = 20
age1 = 20
print(id(age)) # id()用于获取对象的内存地址
print(id(age1))
print("age is age1:",age is age1)
money = 999999
salary = 1000000
print(id(money))
print(id(salary))
print("money is salary:",money is salary)
'''
在交互式环境(CMD)下money,salary是不同地址
在源文件下,是同一地址
是因为源文件在处理时是批量处理的,是将文件一次性送入解释器,自上而下开始
发现有两个200万,会复用
交互式中是所见即所得,默认有一个小整数对象池,在[-5,256]之间会进入池子中,相当复用,不会垃圾回收
''' |
# Generated by Django 3.0.7 on 2020-06-20 14:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_app', '0002_details_employee_skill'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('person_name', models.CharField(max_length=50)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Club',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('club_name', models.CharField(max_length=50)),
('members', models.ManyToManyField(to='django_app.Person')),
],
),
]
|
import time
import pandas as pd
def load_user_info_data(df, col_val):
return df.loc[df['e_id'] == col_val]
def load_all_user_info_data(size=1.0):
t_start = time.time()
data = pd.read_csv("raw_data/user_info.txt", names = ['e_id','e_tag','w_id','c_id'], delim_whitespace = True)
data = data.sample(frac = size)
t_end = time.time()
return data
|
from functions import *
# create chatbot
home_bot = create_bot('Jordan')
# train all data
train_all_data(home_bot)
# check identity
identity = input("State your identity please: ")
# rules for responding to different identities
if identity == "Mark":
print("Welcome, Mark. Happy to have you at home.")
elif identity == "Jane":
print("Mark is out right now, but you are welcome to the house.")
else:
print("Your access is denied here.")
exit()
# custom data
house_owner = [
"Who is the owner of this house?",
"Mark Nicholas is the owner of this house."
]
custom_train(home_bot, house_owner)
print("------ Training custom data ------")
# write and train your custom data here IF the identity is Mark
if identity == 'Mark':
city_born = [
"Where was I born?",
"Mark, you were born in Seattle."
]
fav_book = [
"What is my favourite book?",
"That is easy. Your favourite book is The Great Gatsby."
]
fav_movie = [
"What is my favourite movie?",
"You have watched Interstellar more times than I can count."
]
fav_sports = [
"What is my favourite sport?",
"You have always loved baseball."
]
# train chatbot with your custom data
custom_train(home_bot, city_born)
custom_train(home_bot, fav_book)
custom_train(home_bot, fav_movie)
custom_train(home_bot, fav_sports)
# start chatbot
start_chatbot(home_bot)
|
import pandas as pd
import statsmodels.api as sm
import pylab as pl
import numpy as np
# read the data in
df = pd.read_json("dataset.json")
# df.convert_objects(convert_numeric=True)
print df.head()
print df.describe()
data = df[['cancelled', 'amount', 'cab_service_req', 'is_phone_booking', 'made_on_behalf', 'number_of_rooms',
'special_request_made']]
data.hist()
pl.show()
data['intercept'] = 1;
train_cols = data.columns[1:]
print train_cols
logit = sm.Logit(data['cancelled'], data[train_cols])
# fit the model
result = logit.fit()
print result.summary() |
import random
import math
class CombatEngine:
def __init__(self,die_mode,sided_die,board,game):
self.board=board
self.die_mode=die_mode
self.sided_die=sided_die
self.rolls=[]
self.roll_die()
self.game=game
def roll_die(self):
if self.die_mode=="random":
for n in range(self.sided_die):
self.rolls.append(math.ceil(10*random.random()))
else:
self.rolls=[n+1 for n in range(self.sided_die)]
if self.die_mode=="decending":
self.rolls=self.rolls[::-1]
def locate_combat(self):
combat_dict={}
for coord in self.board.board_dict:
if len(self.board.board_dict[coord]["units"])>1:
player_count=[[],[]]
for unit in self.board.board_dict[coord]["units"]:
player_count[unit.player_index].append(unit.name)
if len(player_count[0])!=0 and len(player_count[1])!=0:
if player_count[0]==["Colony"] or player_count[1]==["Colony"]:
combat_dict[coord]={}
combat_dict[coord]= [
{"type": unit.name,"player":unit.player_index,
"num":unit.unit_index,'tactics':unit.tactics,
"coords":unit.coords,
"technology":{"defense": unit.defense,"attack": unit.attack,"movement": unit.movement},
"hits_left":unit.armor,
'turn_created':unit.turn_made
}for unit in self.combat_order(coord) if unit.exists==True]
else:
combat_dict[coord]={}
combat_dict[coord]= [
{"type": unit.name,"player":unit.player_index,
"num":unit.unit_index,'tactics':unit.tactics,
"coords":unit.coords,
"technology":{"defense": unit.defense,"attack": unit.attack,"movement": unit.movement},
"hits_left":unit.armor,
'turn_created':unit.turn_made
}for unit in self.combat_order(coord) if unit.exists==True and unit.name!= "Colony"]
return combat_dict
def kill_bystanders(self,combat_state):
for coord in combat_state:
units=[unit for unit in self.board.board_dict[coord]["units"]]
for unit in units:
if unit.combat_ready != True:
unit.destroy()
def combat_order(self,coord):
order=[]
for unit in self.board.board_dict[coord]["units"]:
order.append(unit)
return sorted(order,key = lambda unit:(unit.tactics,-unit.player.player_index,-unit.unit_index),reverse=True)
def complete_combat_phase(self):
self.kill_bystanders(self.locate_combat())
if len(self.locate_combat())>0:
self.game.log("Combat Locations:")
for key in self.locate_combat():
for unit in self.combat_order(key):
self.game.log("Player "+str(unit.player_index)+" "+str(unit.name)+" "+str(unit.unit_index))
next_coord=[key for key in self.locate_combat()][0]
while len(self.locate_combat())>0 and self.game.winner==None:
combat_coord=[key for key in self.locate_combat()][0]
if(len(self.locate_combat())==next_coord):
self.game.log("Combat at "+str(next_coord))
if len(self.locate_combat())>1:
next_coord=[key for key in self.locate_combat()][1]
else:
next_coord=None
#SCREAN
order = self.combat_order(combat_coord)
for unit in order:
if unit.exists and combat_coord in [key for key in self.locate_combat()] and self.game.winner==None and unit.name!="Colony":
target=unit.player.strat.decide_which_unit_to_attack(self.locate_combat()[combat_coord], combat_coord,unit.name,unit.unit_index)
# print(target)
enemy="no"
for vs_unit in self.combat_order(combat_coord):
if vs_unit.unit_index==target["number"] and vs_unit.player_index!=unit.player_index:
enemy=vs_unit
if enemy != "no":
self.do_combat(unit,enemy)
for player in self.game.players:
player.update_indexes()
def do_combat(self,attacker,target):
if len(self.rolls) == 0:
self.roll_die()
roll=self.rolls[0]
self.rolls.remove(roll)
attack=attacker.attack-target.defense
self.game.log(str(attacker.name)+str(attacker.unit_index)+","+str(attacker.player_index)+" VS "+str(target.name)+str(target.unit_index)+","+str(target.player_index)+" Roll:"+str(roll)+" Threshold:"+str(attack))
if attack>=roll or roll==1:
target.armor-=1
self.game.log(str(target.name)+" hit")
if target.armor<=0:
target.destroy()
self.game.log(str(target.name)+" destroyed")
|
# Django settings for ntucker project.
from __future__ import unicode_literals
import posixpath
import os.path
import urlparse
import dj_database_url
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
try:
from local_settings import DEBUG
except ImportError:
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Nathaniel Tucker', 'natmaster@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': dj_database_url.config()
}
DATABASES['default']['OPTIONS'] = {'autocommit': True,}
DB_POOL_SIZE = int(os.environ.get('DB_POOL_SIZE', 4))
USE_DB_CONNECTION_POOLING = os.environ.get('USE_DB_CONNECTION_POOLING', "True") == "True"
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [".ntucker.me", "ntucker.herokuapp.com", ]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'US/Pacific'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = False
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
STATIC_URL = "https://s3.amazonaws.com/ntucker.me/"
STATIC_ROOT = STATIC_URL
MEDIA_URL = STATIC_URL + "media/"
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, "static"),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
# Django Storages/S3 Settings
DEFAULT_FILE_STORAGE = 'utils.s3backend.MediaRootS3BotoStorage'
STATICFILES_STORAGE = 'utils.s3backend.StaticRootS3BotoStorage'
COMPRESS_STORAGE = 'utils.s3backend.CachedRootS3BotoStorage'
# AWS Settings
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_STORAGE_BUCKET_NAME = 'static.ntucker.me'
AWS_HEADERS = {
b'x-amz-acl': b'public-read',
b'Expires': b'Thu, 15 Apr 2020 20:00:00 GMT',
b'Cache-Control': b'max-age=86400',
}
from boto.s3.connection import ProtocolIndependentOrdinaryCallingFormat
AWS_S3_CALLING_FORMAT = ProtocolIndependentOrdinaryCallingFormat()
AWS_S3_CUSTOM_DOMAIN = 'static.ntucker.me'
AWS_PRELOAD_METADATA = True
AWS_IS_GZIPPED = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_SECURE_URLS = False
MEDIA_ROOT = '/%s/' % 'media'
MEDIA_URL = '//%s/media/' % AWS_STORAGE_BUCKET_NAME
STATIC_ROOT = os.path.join(PROJECT_ROOT, "..", "static")
STATIC_URL = '//%s/static/' % AWS_STORAGE_BUCKET_NAME
COMPRESS_URL = STATIC_URL
COMPRESS_ROOT = STATIC_ROOT
COMPRESS_OFFLINE = True
# Subdirectory of COMPRESS_ROOT to store the cached media files in
COMPRESS_OUTPUT_DIR = "compress"
COMPRESS_PARSER = "compressor.parser.Html5LibParser"
COMPRESS_CSS_FILTERS = ['compressor.filters.css_default.CssAbsoluteFilter', 'compressor.filters.cssmin.CSSMinFilter', 'compressor.filters.datauri.CssDataUriFilter']
COMPRESS_JS_FILTERS = ['compressor.filters.jsmin.SlimItFilter']
COMPRESS_DATA_URI_MAX_SIZE = 1024
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ['SECRET_KEY']
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
#"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"cms.context_processors.media",
"sekizai.context_processors.sekizai",
)
CMS_TEMPLATES = (
('index.html', 'Index'),
('simple.html', 'Simple'),
('four.html', 'Four blocks'),
('two.html', 'Two blocks'),
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
if DEBUG:
MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + ('utils.middleware.QueryDebuggerMiddleware',)
ROOT_URLCONF = 'ntucker.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'ntucker.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'debug_toolbar',
'storages',
'compressor',
'south',
'django.contrib.sitemaps',
#cms stuff
'cms',
'mptt',
'menus',
'sekizai',
'cms.plugins.text',
'cms.plugins.picture',
'cms.plugins.link',
'cms.plugins.file',
'cms.plugins.teaser',
'cms.plugins.video',
'cms.plugins.googlemap',
'tinymce',
)
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = os.environ['SENDGRID_USERNAME']
EMAIL_HOST_PASSWORD = os.environ['SENDGRID_PASSWORD']
SERVER_EMAIL = "admin@ntucker.me"
DEFAULT_FROM_EMAIL = "admin@ntucker.me"
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
INTERNAL_IPS = ('127.0.0.1',)
CMS_MEDIA_PATH = "cms/"
CMS_MEDIA_ROOT = os.path.join(MEDIA_ROOT, CMS_MEDIA_PATH)
CMS_MEDIA_URL = posixpath.join(MEDIA_URL, CMS_MEDIA_PATH)
CMS_PAGE_MEDIA_PATH = "cms_page_media/"
CMS_VIEW_PERMISSION = False
CMS_LANGUAGES = {
1: [
{
'code': 'en',
'name': 'English',
'public': True,
},
],
'default': {
'fallbacks': ['en',],
'redirect_on_fallback':True,
'public': False,
'hide_untranslated': False,
}
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
"root": {
"level" : "WARNING",
"handlers": ["console"],
"propagate": True,
},
"formatters": {
"simple": {
"format": "%(levelname)s %(message)s"
},
"simple_time": {
"format": "%(asctime)s : %(levelname)s %(message)s"
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
"console":{
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "simple"
},
},
'loggers': {
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
if "REDISTOGO_URL" in os.environ:
urlparse.uses_netloc.append("redis")
url = urlparse.urlparse(os.environ["REDISTOGO_URL"])
REDIS_HOST = url.hostname
REDIS_PORT = url.port
REDIS_PASSWORD = url.password
# Caching
CACHES = {
"default": {
"BACKEND": "autocache.cache.RedisHerdCache",
"LOCATION": ":".join([REDIS_HOST, str(REDIS_PORT)]),
"OPTIONS": {
"DB": 0,
"PASSWORD": REDIS_PASSWORD,
},
"VERSION": 0,
},
}
if os.environ.get("CACHE_KEY_PREFIX"):
CACHES['default']['KEY_PREFIX'] = os.environ.get("CACHE_KEY_PREFIX")
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
CACHE_MIDDLEWARE_SECONDS = 120
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass
|
import cv2, cv, numpy as np,time, pickle
def main():
#fname = "ft_video3.wmv"
fname = "pass3.mpg"
videoProc(fname)
def nothing(args):
pass
def settingUpdate(hsvL,hsvU,blurRad,rgbLim):
hsvL[0] = cv2.getTrackbarPos('hLower','sliders1')
hsvL[1] = cv2.getTrackbarPos('sLower','sliders1')
hsvL[2] = cv2.getTrackbarPos('vLower','sliders1')
#ensure that the upper bound is greater than the lower bound:
hsvU[0] = np.amax(np.array([cv2.getTrackbarPos('hUpper','sliders1'),hsvL[0]+1]))
hsvU[1] = np.amax(np.array([cv2.getTrackbarPos('sUpper','sliders1'),hsvL[1]+1]))
hsvU[2] = np.amax(np.array([cv2.getTrackbarPos('vUpper','sliders1'),hsvL[2]+1]))
blurRad = cv2.getTrackbarPos('blur','camera')
rgbLim[0,0] = cv2.getTrackbarPos('rLower','sliders2')
rgbLim[0,1] = cv2.getTrackbarPos('gLower','sliders2')
rgbLim[0,2] = cv2.getTrackbarPos('bLower','sliders2')
rgbLim[1,0] = cv2.getTrackbarPos('rUpper','sliders2')
rgbLim[1,1] = cv2.getTrackbarPos('gUpper','sliders2')
rgbLim[1,2] = cv2.getTrackbarPos('bUpper','sliders2')
return
def videoProc(fname):
#initialize variables for HSV limits and blur radius:
blurRad = 3#image blur radius
hsvl = np.array([19,17,208])#lower HSV cutoff
hsvu = np.array([31,143,255])#upper HSV cutoff
rgbLim = np.array([[122,190,219],[255,255,255]])#rgb lower/upper cutoffs
#load camera
cv2.namedWindow('camera')#camera image
#create trackbars for HSV limits and blur value:
cv2.namedWindow('sliders1')
cv2.namedWindow('sliders2')
cv2.createTrackbar('hLower', 'sliders1', hsvl[0], 255, nothing)
cv2.createTrackbar('sLower', 'sliders1', hsvl[1], 255, nothing)
cv2.createTrackbar('vLower', 'sliders1', hsvl[2], 255, nothing)
cv2.createTrackbar('hUpper', 'sliders1', hsvu[0], 255, nothing)
cv2.createTrackbar('sUpper', 'sliders1', hsvu[1], 255, nothing)
cv2.createTrackbar('vUpper', 'sliders1', hsvu[2], 255, nothing)
cv2.createTrackbar('blur','camera',blurRad,15,nothing)
cv2.createTrackbar('rLower', 'sliders2', rgbLim[0,0], 255, nothing)
cv2.createTrackbar('gLower', 'sliders2', rgbLim[0,1], 255, nothing)
cv2.createTrackbar('bLower', 'sliders2', rgbLim[0,2], 255, nothing)
cv2.createTrackbar('rUpper', 'sliders2', rgbLim[1,0], 255, nothing)
cv2.createTrackbar('gUpper', 'sliders2', rgbLim[1,1], 255, nothing)
cv2.createTrackbar('bUpper', 'sliders2', rgbLim[1,2], 255, nothing)
capture = cv2.VideoCapture(fname)
print capture
flagShowVis = True
img = np.array([])
img2 = np.copy(img)
while True:
#read the camera image:
ret,img = capture.read()
#update settings from sliders:
settingUpdate(hsvl,hsvu,blurRad,rgbLim)
#process frames
if ret:
#blur the image to reduce color noise: (5 x 5)
img = cv2.blur(img,(blurRad,blurRad))
#filter the image in RGB space
thres = cv2.inRange(img,rgbLim[0,:],rgbLim[1,:])
#convert image to HSV
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
#threshold the image using the HSV lower and upper bounds
thresh = cv2.inRange(hsv,hsvl,hsvu)
#unionize the HSV and RGB thresholds:
thresh = thresh&thres
#find contours in the thresholded image:
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
#pickle contours to deal with a bug in opencv 2.4.3
tmp = pickle.dumps(contours)
contours = pickle.loads(tmp)
#get the contour with the largest area:
max_area = -1
best_cnt = np.array([])
cx,cy = (0,0)
#loop over the contours and find the one with the largest area:
for cnt in contours:
area = cv2.contourArea(cnt)
if area>max_area:
max_area = area
best_cnt = cnt
#check that the size of the best contour is not empty
if np.shape(best_cnt)[0]>0:
#find the centroid of best contour
M = cv2.moments(best_cnt)
#check that the divisor moment is nonzero; if it is, set the location to (0,0)
if M['m00']>0:
cx,cy = int(M['m10']/M['m00']),int(M['m01']/M['m00'])
else:
cx,cy = (0,0)
if flagShowVis:
#draw circle at contour centroid:
cv2.circle(img,(cx,cy),3,(0,255,0),-1)
cv2.imshow('camera',img)
else:
#try drawing the best contour and not showing the thresholded image
cv2.circle(img,(cx,cy),3,(0,255,0),-1)
cv2.drawContours(img,contours,-1,(255,0,0),2)
cv2.imshow('camera',img)
keyRet = cv2.waitKey(5)
#see if user hits 'ESC' in opencv windows
if keyRet==27:
break
elif keyRet==32:
flagShowVis = not flagShowVis
cv2.destroyAllWindows()
if __name__=="__main__":
main()
|
from sklearn import tree
# Collect data
# 0 = bumpy
# 1 = smooth
features = [[140, 1], [130, 1], [150, 0], [170, 0]]
# 0 = apple
# 1 = orange
labels = ["apple", "apple", "orange", "orange"]
# Train classifier
clf = tree.DecisionTreeClassifier()
clf = clf.fit(features, labels) #for each set of data we define a specify class
# Make prediction
weight = int(input('Please enter a weight: '))
surface = int(input('Please enter 0 = bumpy or 1 = smooth: '))
prediction = clf.predict([[weight, surface]])
print(''.join(prediction).capitalize())
|
from __future__ import absolute_import
# import everything from the fortran object
from ._rrtmg_sw import *
|
import argparse
from pdfminer.high_level import extract_text_to_fp
import datetime
import io
import os
import sys
import csv
import statistics
# global variables
subject_map = {}
keyword_score = {}
keyword_id = {}
subjects = []
keywords = []
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--codingfile', help='coding/analysis file (default = coding.csv)', type=argparse.FileType('r'),
default='coding.csv')
parser.add_argument('--keywordfile', help='keyword list file (default = keywords.csv)', type=argparse.FileType('r'),
default='keywords.csv')
parser.add_argument('--normalizefile', help='normalize terms file (default = normalize_terms.csv)', type=argparse.FileType('r'),
default='normalize_terms.csv')
args = parser.parse_args()
# Read through existing keywords file
with args.keywordfile as keywords_file:
keywords_csv = csv.reader(keywords_file)
for row in keywords_csv:
keywords += [row[0]]
keyword_score[row[0]] = row[1]
keyword_id[row[0]] = row[2]
keyword_coded = {}
with args.codingfile as coding_file:
coding_csv = csv.reader(coding_file)
next(coding_csv, None) # Skip the header row
for row in coding_csv:
old_keyword = row[7] # Column H
new_score = row[13] # Column N
new_keyword = row[16] # Column Q
if not new_score.isnumeric():
# do nothing, go to the next line
continue
if new_keyword.strip() is not '':
# keep in mind that the new keyword may or may not already exist in keywords
if new_keyword in keyword_coded:
# if we've already seen it in this coding file
print("Updating new keyword ", new_keyword)
keyword_coded[new_keyword] += [int(new_score)]
else:
print("Adding new keyword ", new_keyword)
keyword_coded[new_keyword] = [int(new_score)]
else:
# we have seen cases where there's nothing in either H or Q; just skip this
if old_keyword.strip() is '':
continue
if old_keyword in keyword_coded:
# if we've already seen it in this coding file
print("There's no new keyword; Updating old keyword ", old_keyword)
keyword_coded[old_keyword] += [int(new_score)]
else:
print("There's no new keyword; Adding old keyword ", old_keyword)
keyword_coded[old_keyword] = [int(new_score)]
keyword_mode_scores = {}
for k, s in keyword_coded.items():
try:
keyword_mode_scores[k] = statistics.mode(s)
except statistics.StatisticsError:
m = round(statistics.mean(s))
print("Where keyword =", k, " and scores =", s)
print("Made a decision to go with ", m)
keyword_mode_scores[k] = m
# Now merge with keywords file
keywords_new = keywords
keyword_score_new = keyword_score
keyword_id_new = keyword_id
for k, s in keyword_mode_scores.items():
if k in keywords:
keyword_score_new[k] = keyword_mode_scores[k]
keyword_id_new[k] = keyword_id[k]
else:
keywords_new += [k]
keyword_score_new[k] = keyword_mode_scores[k]
keyword_id_new[k] = ''
with open('keywords_new.csv', 'w') as new_keyword_file:
keywords_new_csv = csv.writer(new_keyword_file)
for k in keywords_new:
keywords_new_csv.writerow([k, keyword_id_new[k], keyword_score_new[k]])
|
import datetime
import math
import multiprocessing
def main():
do_computation(1)
t0 = datetime.datetime.now()
print(f"Doing math on {multiprocessing.cpu_count():,} processors.")
processor_count = multiprocessing.cpu_count()
pool = multiprocessing.Pool()
for n in range(1, processor_count + 1):
pool.apply_async(
do_computation,
(30_000_000 * (n - 1) / processor_count, 30_000_000 * n / processor_count),
)
pool.close()
pool.join()
dt = datetime.datetime.now() - t0
print(f"Time taken in seconds: {dt.total_seconds():,.2f}")
def do_computation(start=0, num=10):
pos = start
k_sq = 1000 * 1000
while pos < num:
pos += 1
math.sqrt((pos - k_sq) * (pos - k_sq))
if __name__ == "__main__":
main()
|
import requests
from config_bw import *
import json
from bs4 import BeautifulSoup
headers = {
'Accept': 'application/json, text/javascript',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': 'cy=2; cye=beijing; _lxsdk_cuid=161c5833a08c8-01effa25717221-326a7d04-13c680-161c5833a09c8; _lxsdk=161c5833a08c8-01effa25717221-326a7d04-13c680-161c5833a09c8; _hc.v=b03e110a-3a63-63bb-9cc7-b4f281bcc057.1519437168; dper=1657d6b1bc2812d25046a4911dc7c7829cb81b190ffc14aaf3d924bceccbdf9d; ll=7fd06e815b796be3df069dec7836c3df; ua=%E7%99%BD%E7%8C%AB%E5%A4%A7%E4%BE%A0yecc; ctu=ea699dee96a1f9175dbf280cc9dc03d29c3387333a91cd8cea05f785d0fa67b6; msource=default; default_ab=citylist%3AA%3A1; cityid=2; _lx_utm=utm_source%3DmShare; _lxsdk_s=161c5833a0c-8b-b5c-143%7C%7C112',
'Host': 'm.dianping.com',
'Origin': 'https://h5.dianping.com',
'Referer': 'https://h5.dianping.com/app/app-community-free-meal/index.html?from=city_hot',
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Mobile Safari/537.36'
}
s = requests.session()
s.headers.update(headers)
ActivityId = []
end = False
def get_index_urls(page_index):
url = 'https://m.dianping.com/activity/static/list?page=' + str(
page_index) + '&cityid=®ionParentId=0®ionId=0&type=1&sort=0&filter=0'
r = s.get(url)
if r.status_code == 200:
return r.text
else:
print('index页面请求失败', r.status_code)
def parse_urls(html):
content = json.loads(html)
PageEnd = content['data']['pageEnd']
Activitys = content['data']['mobileActivitys']
for Activity in Activitys:
if Activity['applyed'] == False:
ActivityId.append(Activity['offlineActivityId'])
print('新上架-' + Activity['title'])
if PageEnd == 'true':
end = True
def get_payload(activityid):
headers_bing = {
'Host': 'm.dianping.com',
'Connection': 'keep-alive',
'Content-Length': '533',
'Accept': 'application/json, text/plain, */*',
'Origin': 'https://m.dianping.com',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1',
'Content-Type': 'application/json;charset=UTF-8',
'Referer': 'https://m.dianping.com/mobile/dinendish/apply/209124081?a=1&source=null&utm_source=null',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cookie': '_lxsdk_cuid=161ad130f75c8-099dd9fd58f3c6-32687a04-1fa400-161ad130f76c8; _lxsdk=161ad130f75c8-099dd9fd58f3c6-32687a04-1fa400-161ad130f76c8; _hc.v=015b9cac-41f5-50c9-5b9f-5485f7ac38f0.1519027163; cye=beijing; s_ViewType=10; _dp.ac.v=66165a86-31be-4f3b-a52c-5f6f4be762d8; ua=%E7%99%BD%E7%8C%AB%E5%A4%A7%E4%BE%A0yecc; ctu=ea699dee96a1f9175dbf280cc9dc03d2019db2f32c6c52c19b366f24102e115d; aburl=1; dper=123dcdd604f0b2aabc11ebeae246f2500ea8444c889a8a82bdb485f58953c104; ll=7fd06e815b796be3df069dec7836c3df; cy=2; cityid=2; default_ab=citylist%3AA%3A1; _lxsdk_s=161c846d25c-d89-114-256%7C%7C57; _lx_utm=utm_source%3Dnull',
}
payload = {
'offlineActivityId': activityid, 'babyBirth': None, 'email': None, 'weddingDate': None, 'haveBaby': None,
'shippingAddress': None, 'comboId': None, 'branchId': None, 'extInfo1': '不愿意', 'extInfo2': None,
'extInfo3': None, 'extraCount': None, 'passCardNo': None, 'env': 1, 'cx': '',
'userAgent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1',
'referer': 'https://m.dianping.com/mobile/dinendish/apply/' + str(activityid) + '?a=1&source=null&utm_source=null',
'uuid': 0, 'source': 'null'
}
r = requests.post(REQUEST_BING_URL, data=json.dumps(payload), headers=headers_bing)
branch = json.loads(r.text)
print(r.text)
if branch['data']['code'] == 402:
print('----------------------------选择分店--------------------------')
branchid = get_branchId(activityid)
payload = {
'offlineActivityId': activityid, 'babyBirth': None, 'email': None, 'weddingDate': None, 'haveBaby': None,
'shippingAddress': None, 'comboId': None, 'branchId': branchid, 'extInfo1': '不愿意', 'extInfo2': None,
'extInfo3': None, 'extraCount': None, 'passCardNo': None, 'env': 1, 'cx': '',
'userAgent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1',
'referer': 'https://m.dianping.com/mobile/dinendish/apply/' + str(
activityid) + '?a=1&source=null&utm_source=null',
'uuid': 0, 'source': 'null'
}
r = requests.post(REQUEST_BING_URL, data=json.dumps(payload), headers=headers_bing)
print(r.text)
def get_branchId(activityid):
url = 'https://m.dianping.com/mobile/dinendish/apply/' + str(activityid) + '?a=1&source=null&utm_source=null'
r = requests.get(url, headers=headers)
html = r.text
soup = BeautifulSoup(html, 'lxml')
branchid = soup.find('select', attrs={'name': 'branchId'}).find_all('option')[1].get('value')
return branchid
def main():
for i in range(1, 10):
html = get_index_urls(i)
parse_urls(html)
if end:
break
for activityid in ActivityId:
print(activityid)
get_payload(activityid)
if __name__ == '__main__':
main()
|
from brownie import accounts
from brownie import ConfigContract
from brownie import KeyBroadcastContract
def main():
acc = accounts.load("ganache9")
print("Starting deployment")
cc = ConfigContract.deploy(5, {"from": acc})
KeyBroadcastContract.deploy(cc.address, {"from": acc})
|
scores = [
"accuracy_score",
"balanced_accuracy_score",
"average_precision_score",
"f1_score",
"precision_score",
"recall_score",
"jaccard_score",
"roc_auc_score",
"explained_variance_score",
"r2_score",
]
losses = [
"brier_score_loss",
"log_loss",
"max_error",
"mean_absolute_error",
"mean_squared_error",
"mean_squared_log_error",
"median_absolute_error",
]
dl_scores = [
"accuracy",
"binary_accuracy",
"categorical_accuracy",
"sparse_categorical_accuracy",
"top_k_categorical_accuracy",
"sparse_top_k_categorical_accuracy",
]
dl_losses = [
"mean_squared_error",
"mean_absolute_error",
"mean_absolute_percentage_error",
"mean_squared_logarithmic_error",
"squared_hinge",
"hinge",
"categorical_hinge",
"logcosh",
"categorical_crossentropy",
"sparse_categorical_crossentropy",
"binary_crossentropy",
"kullback_leibler_divergence",
"poisson",
"cosine_proximity",
]
|
# import
import random
import time
from tkinter import *
from PIL import Image, ImageTk
import glob
# constant
INCOME_TAX = 100
SIZE_OF_DICEimg = 70
AMMOUNT_IN_STARTING = 25000
MINIMUM_BANK_BALLANCE = -1000
BANK = - MINIMUM_BANK_BALLANCE * 12
FINE_FOR_JAIL = 500
player_colours = ['#808080', '#ff0000', '#ffff00', '#00ff00', '#ff00ff', '#8080ff', '#00ffff', '#008000']
random.shuffle(player_colours)
coordinate_of_box = []
# game constant
SALARY = 1500
JAIL_FINE = 500
addrs = glob.glob("New folder (2)/*.jpeg") # background imz
places1 = {
'Mumbai': {'rent': 1000, "price": 10000},
'Delhi': {'rent': 700, "price": 7000},
'Bangalore': {'rent': 1200, "price": 12000},
'Hyderabad': {'rent': 500, "price": 5000},
'Ahmedabad': {'rent': 400, "price": 4000},
'Chennai': {'rent': 800, "price": 8000},
'Kolkata': {'rent': 250, "price": 2500},
'Pune': {'rent': 750, "price": 7500},
'Jaipur': {'rent': 150, "price": 1500},
'Kanpur': {'rent': 450, "price": 4500},
'Nagpur': {'rent': 250, "price": 2500},
'Lucknow': {'rent': 500, "price": 5000},
'New_Delhi': {'rent': 1500, "price": 15000},
'Bhopal': {'rent': 450, "price": 4500},
'Indore': {'rent': 550, "price": 5500},
'Patna': {'rent': 250, "price": 250},
'Ghaziabad': {'rent': 150, "price": 2000},
'Agra': {'rent': 300, "price": 2500},
'Varanasi': {'rent': 1500, "price": 10000},
'Guwahati': {'rent': 250, "price": 2000},
'Kota': {'rent': 800, "price": 5000},
'Gorakhpur': {'rent': 650, "price": 6500},
'Noida': {'rent': 1000, "price": 8500},
'Dehradun': {'rent': 450, "price": 4500},
'Jammu': {'rent': 250, "price": 2500}
}
places2 = {
'water_park': {'rent': 2500, "price": 15000, "Tax": 500},
'Electric_company': {'rent': 3500, "price": 25000, "Tax": 700},
'Railway': {'rent': 5000, "price": 35000, "Tax": 1000},
'AirLines': {'rent': 5500, "price": 40000, "Tax": 1100},
'Motor_boat': {'rent': 3500, "price": 20000, "Tax": 700},
'Resort': {'rent': 2500, "price": 15000, "Tax": 500}
}
# randomness for map
N = 10000
totalBox = 36
no_of_places1 = 23
no_of_places2 = 4
no_of_chance = 2
no_of_communityChest = 2
no_of_incomeTax = 2
unfilledBox = totalBox - 4
box = []
list_Of_Players = []
chance_of_player = 0
txt = None
# initilization
root = Tk()
root.attributes("-fullscreen", True)
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
canvas = Canvas(root, width=w, height=h, bg='#020205')
canvas.pack()
canvas.images = list()
imgIndexD1 = None
imgIndexD2 = None
d1 = None
d2 = None
DICE_IMZ_POS = (w / 2, h / 2)
BigBox = None
alpha = .3
images = []
chance_pic = None
chance_pic_object = None
def create_rectangle(x1, y1, x2, y2, **kwargs):
# global images
if 'alpha' in kwargs:
alpha = int(kwargs.pop('alpha') * 255)
fill = kwargs.pop('fill')
fill = root.winfo_rgb(fill) + (alpha,)
image = Image.new('RGBA', (x2-x1, y2-y1), fill)
images.append(ImageTk.PhotoImage(image))
return canvas.create_image(x1, y1, image=images[-1], anchor='nw')
# image = ImageTk.PhotoImage(image)
# return canvas.create_image(x1, y1, image=image, anchor='nw')
# canvas.create_rectangle(x1, y1, x2, y2, **kwargs)
def roleTheDice():
# possible_outcome = ["2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"]
# return 2 number B/N 1-6
d1, d2 = random.randint(0, 5) + 1, random.randint(0, 5) + 1
animateDice(d1, d2)
return d1, d2
def animateDice(diceNo1, diceNo2):
# animate the rolling dice and ends with diceNo
global imgIndexD1, imgIndexD2, d1, d2
x, y = DICE_IMZ_POS
if imgIndexD1 != None or imgIndexD2 != None:
canvas.delete(d1)
canvas.delete(d2)
for i in range(16):
imagefile = 'Dice/animation' + str(i) + ".png"
pic = Image.open(imagefile)
pic = ImageTk.PhotoImage(pic)
# pic = pic.resize(DICE_IMZ_SHAPE)
d1 = canvas.create_image(x + SIZE_OF_DICEimg / 2, y, anchor=NW, image=pic)
d2 = canvas.create_image(x - SIZE_OF_DICEimg / 2, y, anchor=NW, image=pic)
canvas.update()
time.sleep(.1)
canvas.delete(d1)
canvas.delete(d2)
if imgIndexD1 == None:
imagefile = 'Dice/' + str(diceNo1) + ".png"
pic = Image.open(imagefile)
pic = ImageTk.PhotoImage(pic)
d1 = canvas.create_image(x + SIZE_OF_DICEimg / 2, y, anchor=NW, image=pic)
canvas.images.append(pic)
canvas.update()
imgIndexD1 = len(canvas.images) - 1
else:
imagefile = 'Dice/' + str(diceNo1) + ".png"
pic = Image.open(imagefile)
pic = ImageTk.PhotoImage(pic)
# pic = pic.resize(DICE_IMZ_SHAPE)
d1 = canvas.create_image(x + SIZE_OF_DICEimg / 2, y, anchor=NW, image=pic)
canvas.images[imgIndexD1] = pic
canvas.update()
if imgIndexD2 == None:
imagefile = 'Dice/' + str(diceNo2) + ".png"
pic = Image.open(imagefile)
pic = ImageTk.PhotoImage(pic)
# pic = pic.resize(DICE_IMZ_SHAPE)
d2 = canvas.create_image(x - SIZE_OF_DICEimg / 2, y, anchor=NW, image=pic)
canvas.images.append(pic)
canvas.update()
imgIndexD1 = len(canvas.images) - 1
else:
imagefile = 'Dice/' + str(diceNo2) + ".png"
pic = Image.open(imagefile)
pic = ImageTk.PhotoImage(pic)
# pic = pic.resize(DICE_IMZ_SHAPE)
d2 = canvas.create_image(x - SIZE_OF_DICEimg / 2, y, anchor=NW, image=pic)
canvas.images[imgIndexD2] = pic
canvas.update()
class PLAYER:
def __init__(self, ID):
self.name = None
self.total_prop = AMMOUNT_IN_STARTING
self.bank_balance = AMMOUNT_IN_STARTING
self.NoOfProperties = 0
self.ListOfPropertied = []
self.cash = 0
self.colour = player_colours[ID]
self.id = ID
self.chanceToBeMissed = 0
self.houseNo = 0
self.object = None
self.name = None
self.bankkrupt = False
self.tax_C = 0
self.player_imz = None
def update_valriabe(self):
self.NoOfProperties = len(self.ListOfPropertied)
self.total_prop = self.bank_balance + self.cash
self.tax_C = 0
for prop in self.ListOfPropertied:
if prop.type == 'C':
self.tax_C += prop.tax
self.total_prop += prop.cost
def players():
# NoOfPlayer = int(input("no of players"))
NoOfPlayer = 6
if NoOfPlayer <= 0 or NoOfPlayer > 6:
print("invalid choice \n enter no b/n 1 and 6")
NoOfPlayer = int(input("no of players"))
for i in range(NoOfPlayer):
p = PLAYER(i)
list_Of_Players.append(p)
# name = input("name of player" + str(i))
p.name = 'player' + str(i)
class BLOCK:
def __init__(self):
self.rectangle = None
self.cost = 0
self.win = 0
self.rent = 0
self.fine = 0
self.tax = 0
self.name = None
self.type = "A"
self.oner = None
self.oner_rectangle = None
self.box_no = None
self.coordinates = None
def update(self, player, diceNo):
# a : start, JAIL, GO TO JAIL, REST HOUSE
# B: PLACE 1
# c: PLACE 2
# D: communityChest
# F: incomeTax
# if self.oner:
# print(self.type, player.name, self.oner.name)
Action = None
if self.oner == player:
Action = 'No Action'
return Action
else:
if self.type == 'A':
if self.name == 'jail':
# pay fine or miss chance
if player.bank_balance > self.fine:
# paying fine
player.bank_balance -= self.fine
Action = player.name + ' is paying fine $' + str(self.fine)
else:
# missing chance
player.chanceToBeMissed = 3
Action = player.name + " is missing 3 chance"
elif self.name == 'go_to_jail':
Action = 'Go To Jail'
player.houseNo = 9
Action = Action + " and" \
"\n"
if player.bank_balance > self.fine:
# paying fine
player.bank_balance -= self.fine
Action += player.name + ' is paying fine $' + str(self.fine)
else:
# missing chance
player.chanceToBeMissed = 3
Action += player.name + " is missing 3 chance"
draw_players()
return Action
elif self.name == 'rest_house':
Action = player.name + ' rest for 1 chance'
player.chanceToBeMissed = 1
elif self.type == 'F':
# tax = INCOME_TAX * player.NoOfProperties
tax = int(player.total_prop * .05) # 5%
player.bank_balance -= tax if tax > 0 else 100
Action = player.name + ' is paying tax of $' + str(tax if tax > 0 else 100)
elif self.type == 'B' or self.type == 'C':
if self.oner == None:
if player.bank_balance - self.cost > MINIMUM_BANK_BALLANCE:
# buy property
player.bank_balance -= self.cost
Action = player.name + ' Buy ' + self.name + ' of cost $' + str(self.cost)
# add this amount to bankers account
self.oner = player
player.total_prop += 1
player.ListOfPropertied.append(self)
else:
Action = player.name + " can't buy this " + self.name + " BankBalance = $" + str(player.bank_balance) + ' & cost of property $' + str(self.cost)
else:
if player.bank_balance - MINIMUM_BANK_BALLANCE > self.rent:
Action = player.name + ' is paying $' + str(self.rent) + ' to ' + self.oner.name
player.bank_balance -= self.rent
self.oner.bank_balance += self.rent
else:
if len(player.ListOfPropertied) == 0:
# player is bankkrupt
player.bankkrupt = True
Action = player.name + 'is bankkrupt'
return Action
else:
Action = player.name +'sell a property to pay rent of' + str(self.rent) + '\n' + player.name + ' is paying $' + str(self.rent) + ' to ' + self.oner.name
# index = input('index of property to be sell from' + str([p.name for p in player.ListOfPropertied]))
# index = int(index)
prop = random.choice(player.ListOfPropertied)
# prop = player.ListOfPropertied[index]
player.bank_balance += prop.cost
player.ListOfPropertied.remove(prop)
prop.oner = None
player.bank_balance -= self.rent
self.oner.bank_balance += self.rent
# print(player.ListOfPropertied)
elif self.type == 'D' or self.type == 'E':
if diceNo % 2 == 0:
player.bank_balance += 500
Action = player.name + ' is geting $500 from ' + self.name
else:
player.bank_balance -= 500
Action = player.name + ' is paying $500 to ' + self.name
else:
pass
# if self.name == "jail":
# if player.bank_balance > FINE_FOR_JAIL:
# player.bank_balance -= FINE_FOR_JAIL
# else:
# pass
# elif self.name == "go_to_jail":
# player.houseNo = 10
# if player.bank_balance > FINE_FOR_JAIL:
# player.bank_balance -= FINE_FOR_JAIL
# else:
# pass
# elif self.name == "rest_house":
# pass
return Action
def give_place(no, coordinates):
global no_of_places1, no_of_places2, no_of_chance, no_of_communityChest, no_of_incomeTax, unfilledBox, SALARY
place = BLOCK()
place.box_no = no
place.coordinates = coordinates
# print(unfilledBox, no, '\n\n')
# house
if no == 0:
# start
place.name = "start"
place.win = SALARY
elif no == 9:
# jail
place.name = "jail"
place.fine = JAIL_FINE
# pay fine or miss 3 chance
elif no == 18:
# rest house
place.name = "rest_house"
# miss a chance
elif no == 27:
# go to jail
place.name = "go_to_jail"
place.fine = JAIL_FINE
else:
# no of places1 is 37-4-4-4-2 = 23
# no of places2 is 4
# 2 chance 2 community chest
# 2 income tax
a = random.randint(0, N)
if a < N * no_of_places1 // unfilledBox:
key = random.choice(list(places1))
listOfPara = places1[key]
del places1[key]
# print(key, no_of_places1)
place.name = key
place.rent = listOfPara['rent']
place.cost = listOfPara['price']
place.type = 'B'
no_of_places1 -= 1
unfilledBox -= 1
elif a < N * (no_of_places2 + no_of_places1) // unfilledBox:
key = random.choice(list(places2))
listOfPara = places2[key]
del places2[key]
# print(key, no_of_places2)
place.name = key
place.rent = listOfPara['rent']
place.cost = listOfPara['price']
place.type = 'C'
place.tax = listOfPara['Tax']
no_of_places2 -= 1
unfilledBox -= 1
elif a < N * (no_of_places2 + no_of_places1 + no_of_chance) // unfilledBox:
# print('chance', no_of_chance)
place.name = 'chance'
place.type = 'D'
no_of_chance -= 1
unfilledBox -= 1
elif a < N * (no_of_places2 + no_of_places1 + no_of_chance + no_of_communityChest) // unfilledBox:
# print('communityChest', no_of_communityChest)
place.name = 'communityChest'
place.type = 'E'
no_of_communityChest -= 1
unfilledBox -= 1
elif a < N * (
no_of_places2 + no_of_places1 + no_of_chance + no_of_communityChest + no_of_incomeTax) // unfilledBox:
# print('incomeTax', no_of_incomeTax - 1)
place.name = 'incomeTax'
place.type = 'F'
no_of_incomeTax -= 1
unfilledBox -= 1
else:
place = give_place(no, coordinates)
# print('12345678', no)
return place
def create_map():
global k1, k2, box
k1 = w // 10
k2 = h // 10
co = 0
for i in range(10):
j = 9 * k2
# b = c.create_rectangle(i * k1, j, i * k1 + k1, j + k2, fill="#555555")
coordinate = i * k1, j, i * k1 + k1, j + k2
place = give_place(co, coordinate)
box.append(place)
# c.create_text(i * k1 + k1 / 2, j + k2 / 2, fill="darkblue", font="Times 15 italic bold", text=place.name)
# text=str(co))
co += 1
for j in range(8, 0, -1):
i = 9 * k1
# b = c.create_rectangle(i, j * k2, i + k1, j * k2 + k2, fill="#555555")
coordinate = i, j * k2, i + k1, j * k2 + k2
place = give_place(co, coordinate)
box.append(place)
# c.create_text(i + k1 / 2, j * k2 + k2 / 2, fill="darkblue", font="Times 15 italic bold", text=place.name)
# text=str(co))
co += 1
for i in range(9, -1, -1):
j = 0
# b = c.create_rectangle(i * k1, j, i * k1 + k1, j + k2, fill="#555555")
coordinate = i * k1, j, i * k1 + k1, j + k2
place = give_place(co, coordinate)
# place = give_place(co, b, c)
# place = give_place(co, b)
box.append(place)
# canvas.create_text(i * k1 + k1 / 2, j + k2 / 2, fill="darkblue", font="Times 15 italic bold",
# text=place.name)
# text=str(co))
co += 1
for j in range(1, 9):
i = 0 * k1
# b = c.create_rectangle(i, j * k2, i + k1, j * k2 + k2, fill="#555555")
coordinate = i, j * k2, i + k1, j * k2 + k2
place = give_place(co, coordinate)
# place = give_place(co, b, c)
# place = give_place(co, b)
box.append(place)
# canvas.create_text(i + k1 / 2, j * k2 + k2 / 2, fill="darkblue", font="Times 15 italic bold",
# text=place.name)
co += 1
def draw_map():
canvas.delete("all")
imagefile = addrs[1]
pic = Image.open(imagefile)
pic = ImageTk.PhotoImage(pic)
canvas.create_image(0, 0, anchor=NW, image=pic)
canvas.images.append(pic)
canvas.update()
for b in box:
x1, y1, x2, y2 = b.coordinates
k1 = w // 10
k2 = h // 10
if b.rectangle:
canvas.delete(b.rectangle)
rectangle = canvas.create_rectangle(x1, y1, x2, y2, fill="#000000")
canvas.create_text(x1 + k1 / 2, y1 + k2 / 2, fill="gray", font="Times 15 italic bold", text=b.name)
b.rectangle = rectangle
if b.oner:
b.oner_rectangle =canvas.create_rectangle(x1, y1, x1 + 25, y1 + 10, fill=b.oner.colour)
draw_players()
draw_player_bio()
canvas.update()
def mouse_pointer(event):
index = None
global BigBox, images
for coordinate in coordinate_of_box:
x1, y1, x2, y2 = coordinate
if event.x > x1 and event.x < x2 and event.y > y1 and event.y < y2:
index = coordinate_of_box.index((x1, y1, x2, y2))
break
if index!= None:
x1 -= k1//2
x2 += k1//2
y1 -= k2//2
y2 += k2//2
if index <= 9:
y1 -= k2//2
y2 -= k2//2
if index <= 18 and index >= 9:
x1 -= k1//2
x2 -= k1//2
if index <= 27 and index >= 18:
y1 += k2//2
y2 += k2 // 2
if index < 40 and index >= 27 or index == 0:
x1 += k1//2
x2 += k1//2
if BigBox != None:
canvas.delete(BigBox)
if box[index].oner != None:
colour = box[index].oner.colour
else:
colour = '#050505'
BigBox = create_rectangle(x1, y1, x2, y2, fill = colour, alpha=alpha)
else:
canvas.delete(BigBox)
# print(len(images))
images = []
pass
player_bio_Object = []
def draw_player_bio():
# player 1
global chance_pic_object, chance_pic
for ob in player_bio_Object:
canvas.delete(ob)
k1 = w // 10
k2 = h // 10
const = 30
x1 = k1 + k1 / 4
y1 = k2 + k2 / 4
x2 = x1 + k1 * 2 + k1 / 3
y2 = y1 + k2 * 3
for i in range(0, 3):
player = list_Of_Players[i]
player.update_valriabe()
if player.bankkrupt:
continue
if chance_of_player == player.id:
imagefile = 'player/100x150/player' + player.colour[1:] + ".png"
pic = Image.open(imagefile)
pic = ImageTk.PhotoImage(pic)
chance_pic = pic
chnace_pic_object = canvas.create_image(x1 - 10, y1 + 25, anchor=NW, image=pic)
else:
if chance_pic_object != None:
canvas.delete(chance_pic_object)
chance_pic_object = None
player_bio_Object.append(canvas.create_text((x1 + x2) / 2, y1 + const, fill=player.colour, font="Times 25 bold", text=player.name))
player_bio_Object.append(canvas.create_text((x1 + x2) / 2, y1 + const * 2, fill="#ffffff", font="Times 15",
text='Bank_Balance:' + str(player.bank_balance)))
player_bio_Object.append(canvas.create_text((x1 + x2) / 2, y1 + const * 3, fill="#ffffff", font="Times 15",
text='total_prop:' + str(player.total_prop)))
# canvas.create_text((x1 + x2 )/ 2, y2 + const*3, fill="#ffffff", font="Times 15", text= 'Cash:' + player.cash)
string = ""
line = y1 + const * 4
for i in range(len(player.ListOfPropertied)):
if i % 2 != 1 and i != len(player.ListOfPropertied) - 1:
string = string + player.ListOfPropertied[i].name + ' '
continue
string = string + player.ListOfPropertied[i].name + ' '
player_bio_Object.append(canvas.create_text((x1 + x2) / 2, line, fill="#ffffff", font="Times 15",
text=string))
line = line + const
string = ''
x1 = x2 + k1 / 4
x2 = x1 + k1 * 2 + k1 / 3
x1 = k1 + k1 / 4
y1 = h - k2 - k2 / 4
x2 = x1 + k1 * 2 + k1 / 3
y2 = y1 - k2 * 3
for i in range(3, 6):
player = list_Of_Players[i]
player.update_valriabe()
if player.bankkrupt:
continue
if chance_of_player == player.id:
imagefile = 'player/100x150/player' + player.colour[1:] + ".png"
pic = Image.open(imagefile)
pic = ImageTk.PhotoImage(pic)
chance_pic = pic
chnace_pic_object = canvas.create_image(x1 + 0, y2 + 25, anchor=NW, image=pic)
else:
if chance_pic_object != None:
canvas.delete(chance_pic_object)
chance_pic_object = None
# canvas.create_rectangle(x1, y1, x2, y2, fill='#222222')
player_bio_Object.append(canvas.create_text((x1 + x2) / 2, y2 + const, fill=player.colour, font="Times 25 bold", text=player.name))
player_bio_Object.append(canvas.create_text((x1 + x2) / 2, y2 + const * 2, fill="#ffffff", font="Times 15",
text='Bank_Balance:' + str(player.bank_balance)))
player_bio_Object.append(canvas.create_text((x1 + x2) / 2, y2 + const * 3, fill="#ffffff", font="Times 15",
text='total_prop:' + str(player.total_prop)))
# canvas.create_text((x1 + x2 )/ 2, y2 + const*3, fill="#ffffff", font="Times 15", text= 'Cash:' + player.cash)
string = ""
line = y2 + const * 4
for i in range(len(player.ListOfPropertied)):
if i % 2 != 1 and i != len(player.ListOfPropertied) - 1:
string = string + player.ListOfPropertied[i].name + ' '
continue
string = string + player.ListOfPropertied[i].name + ' '
player_bio_Object.append(canvas.create_text((x1 + x2) / 2, line, fill="#ffffff", font="Times 15",
text=string))
line = line + const
string = ''
x1 = x2 + k1 / 4
x2 = x1 + k1 * 2 + k1 / 3
pass
def draw_players():
list_Of_Players.reverse()
List = list_Of_Players
list_Of_Players.reverse()
for p in List:
if p.bankkrupt:
continue
box_no = p.houseNo
h = p.id
# print(h)
colour = p.colour
x1, y1, x2, y2 = box[box_no].coordinates
x1 += k1 // 10 + k1 * h // 10
x2 = x1 + k1 // 10
y2 -= k1 // 10
y1 = y2 - k1 // 10
imagefile = 'player/small/player' + colour[1:] + ".png"
pic = Image.open(imagefile)
pic = ImageTk.PhotoImage(pic)
# canvas.create_image(x1, y1, anchor=NW, image=pic)
p.player_imz = pic
if p.object == None:
canvas.create_image(x1, y1 - 30, anchor=NW, image=pic)
# p.object = canvas.create_oval(x1, y1, x2, y2, fill=colour)
# print(p.object, '**')
else:
# canvas.delete(p.object)
# canvas.create_image(x1, y1 - 30, anchor=NW, image=pic)
# p.object = canvas.create_oval(x1, y1, x2, y2, fill=colour)
# p.object.move
p.object.config(x=x1, y=y1)
canvas.update()
def update_map():
for b in box:
x1, y1, x2, y2 = b.coordinates
if b.oner_rectangle:
canvas.delete(b.oner_rectangle)
b.oner_rectangle = None
if b.oner:
b.oner_rectangle = canvas.create_rectangle(x1, y1, x1 + 25, y1 + 10, fill=b.oner.colour)
draw_players()
draw_player_bio()
canvas.update()
def mainloop(event):
global chance_of_player, txt
p = list_Of_Players[chance_of_player]
chance_of_player += 1
chance_of_player = chance_of_player%6
if p.chanceToBeMissed > 0:
p.chanceToBeMissed -= 1
return
if p.bankkrupt:
return
dice_no1, dice_no2 = roleTheDice()
dice_no = dice_no1 + dice_no2
newBox = p.houseNo + dice_no
if newBox >= totalBox:
p.bank_balance += SALARY
p.bank_balance -= p.tax_C
newBox = newBox % totalBox
while p.houseNo != newBox:
p.houseNo += 1
p.houseNo = p.houseNo % totalBox
draw_players()
canvas.update()
time.sleep(.1)
else:
while p.houseNo < newBox:
p.houseNo += 1
draw_players()
canvas.update()
time.sleep(.1)
p.houseNo = newBox
Action = box[newBox].update(p, dice_no)
print(Action)
if txt:
canvas.delete(txt)
txt = None
txt = canvas.create_text(w / 2, h/2 - SIZE_OF_DICEimg / 2
, fill="#f0f0f0", font="Times 15",
text=Action)
time.sleep(.5)
update_map()
def satup():
create_map()
for b in box:
coordinate_of_box.append(b.coordinates)
root.bind('<Motion>', mouse_pointer)
root.bind('<Button-1>', mainloop)
players()
draw_map()
if __name__ == '__main__':
# draw(root)
satup()
# while True:
# mainloop(None)
# time.sleep(1)
# for i in range(25):
# roleTheDice()
# time.sleep(1)
root.mainloop()
# draw_dice()
|
import time
import random
def brute_force_validator(A):
n = len(A)
A.sort()
result = 0
for z in range(2, n):
for y in range(1, z):
for x in range(0, y):
if A[x]+A[y] > A[z]:
result += 1
return result
def ncr(r, n):
if r == n:
return 1
elif r + 1 == n:
return n
elif r > n:
return 0
else:
return r * ncr(r, n-1)
def solution(A):
n = len(A)
A.sort()
result = 0
for x in range(0, n-2):
y = x + 1
z = x + 2
while z<n:
if A[x] + A[y] > A[z]:
result += z - y
z += 1
elif y < z - 1:
y += 1
else:
y += 1; z += 1
return result |
import os
import re
# Hello, Welcome to this script! This script was made by Andreas Eike. This script will rename your movie files and
# directory name for each movie automatically, in the specified path. You can safely run this script without any options
# if this script is placed in a folder which contains folders, each folder with a different movie in it.
# The movie folder can contain multiple files, but the script only renames video files and subtitle files.
# Specify which path your movie folders are in.
path = os.path.dirname(__file__)
# An array which will hold some known illegal words, which will be removed if found in the filename string.
# Only used if no year is found in the string (A movie file without a year)
illegalWordList = []
# this array is used to store the name of files that could not be renamed. Probably because its not the right file type.
renameErrors = []
# Specifies the correct format for the finished movie name.
# Example:
# Correct syntax would be "Some Movie (1980)"
correctWordRegex = "(?! )^(?!.* ).*[0-9]{3}[)]$"
# split the string into parts, by using regex.
# Returns an array of words, which can then be built together later.
def splitString(string):
# remove the file extension part of the string
string = string[:len(string) - 4]
# remove some specific characters
string = string.replace("[", "")
string = string.replace("]", "")
# Splits the string into parts, uses splitters such as " ", "." and " "
stringArr = re.split("\.|\s+|_| ", string)
# Capitalizes words.
stringArr = [f.capitalize() for f in stringArr]
return stringArr
# check if word is illegal and should be removed, returns true or false
def wordIsIllegal(string):
# sets the string to lower for unified comparison.
string = string.lower()
# if the word is illegal, return true.
for removeWord in illegalWordList:
if (string.find(removeWord.lower)) != -1:
return True
return False
# Removes illegal words. If a year is found, the function simply removes all text that comes after the year. It also
# sets parenteses around the year string. If no year is found, a illegal word list is used. This is a seperate .txt
# file, called wordlist.
def removeWords(wordArr):
index = 0
yearFound = False
for word in wordArr:
# Checks is word is a year (and not the first word of the name). If so, sets parenteses around the year,
# and then removes the rest of the string.
if ifWordIsYear(word) and index != 0:
yearFound = True
if word[0] != "(" and word[len(word) - 1] != ")":
word = "(" + word + ")"
wordArr[index] = word
break
index += 1
if yearFound:
del wordArr[index + 1:len(wordArr)]
# Else, use wordlist to remove words. Rarely used (only if no year is found in the movie filename).
else:
with open("wordlist.txt") as f:
illegalWordArray = f.readlines()
illegalWordArray = [x.strip() for x in illegalWordArray]
illegalWordArray = [x.lower() for x in illegalWordArray]
wordsToRemove = []
for word in wordArr:
if word.lower() in illegalWordArray:
wordsToRemove.append(word)
for word in wordsToRemove:
wordArr.pop(wordArr.index(word))
return wordArr
# check if word is year, returns true or false
def ifWordIsYear(string):
if len(string)==4:
match = re.match(r".*[1-3][0-9]{3}", string)
if match is not None:
return True
return False
# Actually renames the files in your directory.
def renameFile(rootPath, oldFileName, newFileName):
oldPath = os.path.join(rootPath, oldFileName)
newPath = os.path.join(rootPath, newFileName)
os.rename(oldPath, newPath)
print("Renamed FILE", oldName, "to", newName, ".")
def renameDirectory(oldDirectoryName, newDirectoryName):
oldPath = os.path.join(oldDirectoryName)
newPath = os.path.join(newDirectoryName)
if oldPath!=newPath:
os.rename(oldPath, newPath)
print("Renamed DIRECTORY", oldDirectoryName, "to", newDirectoryName)
# Basically os.walk, but with one level of walk to not include subdirectories
# from https://stackoverflow.com/questions/229186/os-walk-without-digging-into-directories-below
def walkLevel(directory, level=1):
directory = directory.rstrip(os.path.sep)
assert os.path.isdir(directory)
numSep = directory.count(os.path.sep)
for walkRoot, walkDirs, walkFiles in os.walk(directory):
yield walkRoot, walkDirs, walkFiles
numSepThis = root.count(os.path.sep)
if numSep + level <= numSepThis:
del dirs[:]
if __name__ == "__main__":
# For loop that repeats through your specfied directory 'path', assumes that video and subtitle files are stored in
# seperate folders under your root path. no subdirectories are handled.
for root, dirs, files in walkLevel(path):
newName = ""
oldName = ""
# repeats for each file in the folder
for name in files:
oldName = name
# Only rename if its a video or subtitle file.
if name.find(".mkv") != -1 or name.find(".mp4") != -1 or name.find(".avi") != -1 or name.find(".srt") != -1:
newName = name
# Checks if the word is properly named. If not, continue and rename it.
if newName.find("sample") == -1 and re.match(correctWordRegex, newName[:len(newName) - 4]) is None:
fileExtension = newName[len(newName) - 4:]
# If one file in the directory has already been renamed, no need to rename the second file
# (All files should have the same name, apart from the file extension)
if len(files) > 1 and re.match(correctWordRegex, files[0][:len(files[0]) - 4]) is not None:
newName = files[0][:len(files[0]) - 4] + fileExtension
# Else, build the string.
else:
newNameArr = splitString(newName)
newNameArr = removeWords(newNameArr)
newName = " ".join(newNameArr) + fileExtension
renameFile(root, name, newName)
else:
if root not in renameErrors:
renameErrors.append(root + "\\" + name)
if newName != oldName and newName != "":
joinPath = os.path.join(path, newName[:len(newName) - 4])
renameDirectory(root, joinPath)
|
#Preço = a
#Pagamento = b
a = float(input("Preco sugerido: "))
b = float(input("Pagamento proposto: "))
if (a > b):
x = a - b
round(x, 2)
msg = "Falta "
else:
x = b - a
round(x, 2)
msg = "Troco de "
print(msg, x) |
import logging, errno, time
class setlogger():
#Root Logger
rootLogger = logging.getLogger('')
rootLogger.setLevel(logging.DEBUG)
# log file handler
fh = logging.FileHandler('LOG_WT_SENSORS.txt')
# Format the log message
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
rootLogger.addHandler(fh)
#Other Logger Setting (formats, etc. for future)
loggerT = logging.getLogger('transferLabData')
loggerE = logging.getLogger('executer')
|
#!usr/bin/python3
#-*- coding:utf-8 -*-
"""
创建正则表达式,使用前瞻和后顾来保证手机号前后不应该出现数字
version:1.0
date:2019\5\30
"""
import re
def main():
pattern=re.compile(r'(?<=\D)(1[38]\d{9}|14[57]\d{8}|15[0-35-9]\d{8}|17[678]\d{8})(?=\D)')
sentence='''
重要的事说8140123456789遍,
文档手机号是13512346789这个号,不是15600998765,也不是110或119,
王大锤的手机号才是15600998765。
'''
#查找所有匹配并保存到一个列表中
mylist=re.findall(pattern,sentence)
print(mylist)
print("----------分割线----------")
#通过迭代器取出匹配对象并获得匹配的内容
for temp in pattern.finditer(sentence):
print(temp.group())
print("----------分割线----------")
#通过search函数指定搜索位置找出所有匹配
m=pattern.search(sentence)
while m:
print(m.group)
m=pattern.search(sentence,m.end())
if __name__=='__main__':
main()
|
from django.contrib import admin
from . models import Trail, Item, Location, Game, Action, Event, Context
# Register your models here.
class ItemAdmin(admin.ModelAdmin):
list_display = ('id', 'item_name', 'item_description')
class LocationAdmin(admin.ModelAdmin):
list_display = ('id', 'location_name', 'location_description', 'location_visit_event')
class TrailAdmin(admin.ModelAdmin):
list_display = ('id', 'trail_name', 'trail_description', 'trail_mission', 'trail_total_items')
class GameAdmin(admin.ModelAdmin):
list_display = ('id', 'game_name', 'game_mission', 'game_total_items')
class ActionAdmin(admin.ModelAdmin):
list_display = ('id', 'action_name', 'action_event', 'action_verb', 'action_item')
class ContextAdmin(admin.ModelAdmin):
list_display = ('id', 'context_index', 'context_text', 'context_location')
class EventAdmin(admin.ModelAdmin):
list_display = ('id', 'event_name')
admin.site.register(Item, ItemAdmin)
admin.site.register(Location, LocationAdmin )
admin.site.register(Trail, TrailAdmin)
admin.site.register(Game, GameAdmin)
admin.site.register(Action, ActionAdmin)
admin.site.register(Context, ContextAdmin)
admin.site.register(Event, EventAdmin)
|
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import turicreate
from surprise import NormalPredictor
from surprise import Dataset
from surprise import Reader
from surprise.model_selection import cross_validate
from surprise import SVD
from collections import defaultdict
def predict(ratings, similarity, type='user'):
if type == 'user':
mean_user_rating = ratings.mean(axis=1)
#We use np.newaxis so that mean_user_rating has same format as ratings
ratings_diff = (ratings - mean_user_rating[:, np.newaxis])
pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T
elif type == 'item':
pred = ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)])
return pred
def get_top_n(predictions, n=10):
'''Return the top-N recommendation for each user from a set of predictions.
Args:
predictions(list of Prediction objects): The list of predictions, as
returned by the test method of an algorithm.
n(int): The number of recommendation to output for each user. Default
is 10.
Returns:
A dict where keys are user (raw) ids and values are lists of tuples:
[(raw item id, rating estimation), ...] of size n.
'''
# First map the predictions to each user.
top_n = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_n[uid].append((iid, est))
# Then sort the predictions for each user and retrieve the k highest ones.
for uid, user_ratings in top_n.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_n[uid] = user_ratings[:n]
return top_n
r_cols = ['user_id', 'item_id', 'rating']
ratings = pd.read_csv('user-id-sentiment-category_and_score', names=r_cols)
items = pd.read_csv('item-id', names=['item_id', 'item_name', 'placeholder'])
users = pd.read_csv('user-id',names=['user_id', 'user_name', 'twitter_id'])
n_items = ratings.item_id.unique().shape[0]
n_users = ratings.user_id.unique().shape[0]
data_matrix = np.zeros((n_users, n_items))
train_data = turicreate.SFrame(ratings)
#Training the model
item_sim_model = turicreate.item_similarity_recommender.create(train_data, user_id='user_id', item_id='item_id', target='rating', similarity_type='cosine')
#Making recommendations
item_sim_recomm = item_sim_model.recommend(users=[1,2,3,4,5],k=5)
item_sim_recomm.print_rows(num_rows=25)
reader = Reader(rating_scale=(-1, 1))
data = Dataset.load_from_df(ratings[['user_id', 'item_id', 'rating']], reader)
trainset = data.build_full_trainset();
cross_validate(NormalPredictor(), data, cv=2)
algo = SVD()
cross_validate(algo, data, measures=['RMSE', 'MAE'], cv=5, verbose=True)
algo.fit(trainset)
testset = trainset.build_anti_testset()
predictions = algo.test(testset)
top_n = get_top_n(predictions, n=5)
# Print the recommended items for each user
for uid, user_ratings in top_n.items():
print(uid, [iid for (iid, _) in user_ratings])
# for line in ratings.itertuples():
# data_matrix[line[1]-1, line[2]-1] = line[3]
#
# user_similarity = pairwise_distances(data_matrix, metric='cosine')
# item_similarity = pairwise_distances(data_matrix.T, metric='cosine')
# user_prediction = predict(data_matrix, user_similarity, type='user')
# item_prediction = predict(data_matrix, item_similarity, type='item')
# print(user_prediction)
# print(item_prediction)
|
from django import template
from viewer.views.shared_code import glob_manager_data
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag(takes_context=True)
def get_urls_header(context):
try:
id_corpus = context.request.session['viewer__viewer__current_corpus']
except KeyError:
return []
return glob_manager_data.get_setting_for_corpus('urls_header', id_corpus)
@register.simple_tag(takes_context=True)
def get_state_sorted(context, field):
current_corpus = context.request.session['viewer__viewer__current_corpus']
list_sorted_columns = context.request.session[current_corpus]['viewer__viewer__sorted_columns']
for obj_sorted_column in list_sorted_columns:
if obj_sorted_column['field'] == field:
return obj_sorted_column['order']
return 'None'
@register.simple_tag(takes_context=True)
def get_width_filters(context):
current_corpus = context.request.session['viewer__viewer__current_corpus']
return context.request.session[current_corpus]['viewer__width_filters']
@register.simple_tag(takes_context=True)
def get_is_collapsed_div_filters(context):
current_corpus = context.request.session['viewer__viewer__current_corpus']
return context.request.session[current_corpus]['viewer__is_collapsed_div_filters']
@register.simple_tag(takes_context=True)
def get_has_template_view_item(context):
id_corpus = context.request.session['viewer__viewer__current_corpus']
return glob_manager_data.get_setting_for_corpus('template_html', id_corpus) != None or glob_manager_data.get_setting_for_corpus('external_source', id_corpus) != None
@register.simple_tag(takes_context=True)
def get_is_collapsed_div_tags(context):
current_corpus = context.request.session['viewer__viewer__current_corpus']
return context.request.session[current_corpus]['viewer__is_collapsed_div_tags']
@register.simple_tag(takes_context=True)
def get_is_collapsed_div_selections(context):
current_corpus = context.request.session['viewer__viewer__current_corpus']
return context.request.session[current_corpus]['viewer__is_collapsed_div_selections']
@register.simple_tag(takes_context=True)
def get_is_collapsed_div_settings(context):
current_corpus = context.request.session['viewer__viewer__current_corpus']
return context.request.session[current_corpus]['viewer__is_collapsed_div_settings']
@register.simple_tag(takes_context=True)
def get_is_allowed_editing(context):
id_corpus = context.request.session['viewer__viewer__current_corpus']
return glob_manager_data.get_has_access_to_editing(id_corpus, context.request)
@register.simple_tag(takes_context=True)
def get_has_secret_token_editing(context):
id_corpus = context.request.session['viewer__viewer__current_corpus']
return glob_manager_data.has_corpus_secret_token_editing(id_corpus)
@register.simple_tag(takes_context=True)
def get_values_filter(context, filter_custom):
current_corpus = context.request.session['viewer__viewer__current_corpus']
return context.request.session[current_corpus]['viewer__viewer__filter_custom'][filter_custom['data_field']]
@register.simple_tag(takes_context=True)
def get_tags(context):
current_corpus = context.request.session['viewer__viewer__current_corpus']
return context.request.session[current_corpus]['viewer__viewer__filter_tags']
@register.filter
def in_columns_checked(key, request):
current_corpus = request.session['viewer__viewer__current_corpus']
return key in request.session[current_corpus]['viewer__viewer__columns']
@register.simple_tag(takes_context=True)
def get_count_items(context, tag):
current_corpus = context.request.session['viewer__viewer__current_corpus']
related_name = glob_manager_data.get_setting_for_corpus('database_related_name', current_corpus)
return getattr(tag, related_name).count()
@register.filter
def get(item, field):
try:
return item[field]
except TypeError:
try:
value = item
for attribute in field.split('__'):
value = getattr(value, attribute)
return value
except AttributeError:
return item.id
@register.filter
def get_type_field(field, settings):
return settings['data_fields'][field]['type'].lower()
@register.filter
def get_display_name(field, settings):
return settings['data_fields'][field]['display_name']
@register.filter
def display_as_tag_classes(list_tags):
result = ''
try:
for tag in list_tags:
result += 'tag_' + str(tag.id) + ' '
except TypeError:
for tag in list_tags.all():
result += 'tag_' + str(tag.id) + ' '
return result.strip()
@register.simple_tag(takes_context=True)
def get_dark_mode(context):
try:
return context.request.session['viewer__dark_mode']
except KeyError:
context.request.session['viewer__dark_mode'] = False
return False
@register.simple_tag(takes_context=True)
def get_html_dark_mode(context):
html = """
<label class="switch align-self-center mb-0 ml-3" title="Toggle dark mode">
<input id="button_toggle_dark_mode" type="checkbox" {checked}>
<span class="slider round"></span>
</label>
"""
is_dark_mode = False
try:
is_dark_mode = context.request.session['viewer__dark_mode']
except KeyError:
context.request.session['viewer__dark_mode'] = is_dark_mode
return mark_safe(
html.format(
checked = 'checked' if is_dark_mode else ''
)
) |
from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
__copyright__ = "Copyright 2011 Red Robot Studios Ltd."
__license__ = "GPL v3.0 http://www.gnu.org/licenses/gpl.html"
admin.autodiscover()
urlpatterns = patterns('',
(r'', include('openelm.public.urls')),
(r'accounts/', include('openelm.accounts.urls')),
(r'management/', include('openelm.management.urls')),
(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
# Serve media files through dev server.
urlpatterns += patterns('',
(r'^media/(v\d*/)?(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
|
from distutils.core import setup
setup(
name= 'pyfinder',
version='0.5',
description= ' Look for files and text inside files',
long_description= open('README').read(),
py_modules= ['pyfinder'],
author= 'Giovanni C. Oberti', # Tratto da "Python Gioda completa" di Marco Buttu
author_email= 'giovanni.oberti@goldenbyte.it',
license='BSD',
keywords= ' python generators distutils',
scripts= ['scripts/pyfinder'],
platforms= 'all',
classifiers= [
'Intended Audience :: Education',
'Intended Audience :: Developers',
'Licence :: OSI Approved :: BSD Licence',
'Operating System :: OS Indipendent',
'Programming Language :: Python :: 3.3',
'Topic :: Documentation',
'Topic :: Education :: Testing',
'Topic :: Text Processing :: Filters',
'Topic :: Utilities'
]
) |
def calculate():
operation = input('''
add for addition
sub for subtraction
mul for multiplication
div for division
''')
num_1 = int(input())
num_2 = int(input())
if operation == 'add':
print('{} + {} = {}'.format(num_1, num_2,num_1 + num_2))
elif operation == 'sub':
print('{} - {} = {}'.format(num_1, num_2,num_1 - num_2))
elif operation == 'mul':
print('{} * {} = {}'.format(num_1, num_2, num_1 * num_2))
elif operation == 'div':
print('{} / {} = {}'.format(num_1, num_2,num_1 / num_2))
else:
print('invalid input.')
calculate()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-29 05:54
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_models', '0014_auto_20180329_0544'),
]
operations = [
migrations.AlterField(
model_name='connection',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2018, 3, 29, 5, 54, 41, 940491)),
),
migrations.AlterField(
model_name='loginentry',
name='auth_token',
field=models.CharField(default=b'93dadd53-be58-4a10-ab6e-68b974bb577c', max_length=512),
),
migrations.AlterField(
model_name='loginentry',
name='login_time',
field=models.DateTimeField(default=datetime.datetime(2018, 3, 29, 5, 54, 41, 941962)),
),
migrations.AlterField(
model_name='question',
name='question_time',
field=models.DateTimeField(default=datetime.datetime(2018, 3, 29, 5, 54, 41, 943127)),
),
]
|
# Generated by Django 3.0 on 2021-04-06 13:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cadastro', '0002_auto_20210405_1130'),
]
operations = [
migrations.CreateModel(
name='Votar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qtdVotos', models.IntegerField(blank=True, null=True, verbose_name='Quantidade de votos')),
('opcao', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cadastro.OpcaoVoto', verbose_name='Opção')),
('pessoa', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cadastro.Pessoa', verbose_name='Pessoa')),
('votacao', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cadastro.Votacao', verbose_name='Votação')),
],
),
]
|
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
no_circle_course = set()
temp_visited = set()
def is_circle_course(n):
if n in temp_visited:
return True
elif n in no_circle_course:
return False
else:
temp_visited.add(n)
for c1, c2 in prerequisites:
if c1 == n:
if is_circle_course(c2):
return True
temp_visited.remove(n)
no_circle_course.add(n)
return False
for n in range(numCourses):
if is_circle_course(n):
return False
return True
if __name__ == "__main__":
s = Solution()
print(s.canFinish(2, [[1,0]]))
print(s.canFinish(2, [[1,0], [0, 1]]))
print(s.canFinish(12, [[5, 11], [11, 2], [7, 11],
[7, 8], [8, 9], [11, 9],
[3, 8], [3, 10]]))
|
#!/bin/python3
# link problem: https://www.hackerrank.com/challenges/non-divisible-subset/problem
import math
import os
import random
import re
import sys
#
# Complete the 'nonDivisibleSubset' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER k
# 2. INTEGER_ARRAY s
#
def nonDivisibleSubset(k, s):
frequency = [0] * k
for num in s:
frequency[num % k] += 1
if k % 2 == 0:
frequency[k//2] = min(frequency[k//2], 1)
result = min(1, frequency[0])
for i in range(1, k//2 + 1):
result += max(frequency[i], frequency[k - i])
return result
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
s = list(map(int, input().rstrip().split()))
result = nonDivisibleSubset(k, s)
fptr.write(str(result) + '\n')
fptr.close()
|
import numpy as np
import scipy.stats as stats
import matplotlib as plt
def calculate_covariance(x1, x2, length_scale):
"""
Input: x1, x2 (numpy vectors), length_scale (float, representing length_scale
Output: S, covariance matrix, representing the covariance of x1,x2
"""
nrows = len(x1)
ncols = len(x2)
S = np.zeros([nrows, ncols])
for i in range(nrows):
for j in range(ncols):
z = -.5*(np.abs(x1[i]-x2[j])/length_scale)**2
S[i,j] = np.exp(z)
return S
# 1. plot sample functions from gaussian process
x_star = np.linspace(-5,5,50) # define fucntion domain
sigma = calculate_covariance(x_star, x_star, 1) # calculate covariance
# generate a number of samples from the process
nsamples = 3
values = np.zeros([len(x_star), nsamples])
for i in range(nsamples):
mu = np.zeros([1,len(x_star)])
print mu.shape
print sigma.shape
values[:,i] = stats.multivariate_normal(mu, sigma, 1)
# R cbind just turns it into a matrix? https://stat.ethz.ch/R-manual/R-devel/library/base/html/cbind.html
# plot the results
plt.plot(x_star, values)
plt.show()
# 2. Assume known data points
known_points = np.array([[-4,-3,-1,0,2],
[-2,0,1,2,-1]])
# define x -- what is this "x <- f$x" ??
sigma_xx = calculate_covariance(x,x, 1)
sigma_xxs = calculate_covariance(x,x_star, 1)
sigma_xsx = calculate_covariance(x_star,x, 1)
sigma_xsxs = calculate_covariance(x_star,x_star, 1)
y = known_points[:,1]
known_star_bar = np.dot(sigma_xsx,np.dot(sigma_xx,y))
known_star_sigma = sigma_xsxs - np.dot(sigma_xsx, np.dot(sigma_xx,sigma_xxs))
#left panel of the figure
nsamples = 3
values = np.zeros([len(x_star), nsamples])
for i in range(nsamples):
values[:,i] = stats.multivariate_normal(known_star_bar, known_star_sigma, 1)
# Plot the results including the mean function
# and constraining data points
plt.plot(x, values)
plt.show()
# 3. Add in noise
# The standard deviation of the noise
sigma_n = 0.1
# Recalculate the mean and covariance functions
known_bar_star_n = np.dot(sigma_xsx,(k.xx + sigma_n**2 * np.dot(np.identity(len(x)), y)))
known_star_n_sigma =sigma_xsxs - np.dot(sigma_xsx, np.dot(sigma_xx + sigman_n**2*np.identity(len(x)),sigma_xxs))
# Redraw the sample functions
values = np.zeros([len(x_star), nsamples])
for i in range(nsamples):
values[:,i] = stats.multivariate_normal(known_star_bar_n, known_star_n_sigma, 1)
|
"""
COMP 208 - Assignment 4
"""
import skimage.io as i
import numpy as np
image_path = input('Please enter the path to the saved "mountain.png" image: ') #The user is asked to give the file location in order to be able to locate mountain.png.
if image_path[-1] != "\\": #This adds the \\ necessary at the end of the folder name in case the user did not do it.
image_path += "\\"
class ImageAnalysis: #Creating the ImageAnalysis class
def __init__(self, image_path):
self.image = i.imread(image_path + "mountain.png") #This searches for the file depending on where it has been saved and loads it.
self.image_path = image_path #The attribute image_path is set to image_path.
def dimension(self):
print(' ') #Printing an empty space to make the following text easier to distinguish in the console.
print('This is the size of the image: (', str(self.image.shape[1]), ',', str(self.image.shape[0]), ',', str(self.image.shape[-1]), ')')
print(' ')
def show(self):
i.imshow(self.image) #This implements the show method which displays mountain.png on the screen.
i.show()
def retrieveHidden(self):
hidden_image = np.empty([131, 100, 3], dtype = int) #These are the known dimensions of the hidden image, the type is integers as we are dealing with whole numbers.
for row in range(100): #Basically (0, 100) dimension...
for col in range(131): #These numbers come from the dimension of the hidden image.
hidden_image[col][row] = self.image[col*11][row*11] #We multiply by 11 as we know that the 12th pixel is the one that was replaced by the secret image.
hidden_image = hidden_image.astype("uint8") #The image needs to be converted to uint8 prior to saving to suppress a lossy conversion error.
i.imsave(image_path + "hidden.png", hidden_image) #Saving the newly retrieved hidden image in the same folder as the original mountain image.
def fix(self):
fixed_image = self.image
fixed_image = fixed_image.astype("int64") #Converting the array declared in image into int64 type.
average_color = np.zeros(shape = 3) #Taking the average of the 3 surrounding pixels.
for row in range(100): #These numbers come from the dimension of the hidden image given in the assignment instructions.
for col in range(131): #Looking at the each item in the rows and columns of the image, basically axis 0 and 1.
fixed_cplus = fixed_image[col*11+1][row*11] #The following comes from the reverse engineering of the following "The algorithm is simple, you just need to know that every 2 consecutive pixels from the hidden image are separated by 11 pixels in the mountain image."
fixed_cminus = fixed_image[col*11-1][row*11] #Settings these variables to make the following lines more readable.
fixed_rplus = fixed_image [col*11][row*11+1] #The 4 possible cases are either above, under, left or right of the pixel.
fixed_rminus = fixed_image [col*11][row*11-1]
if col != 0 and row !=0 : #This is for pixels positioned in the middle, the average is taken normally.
average_color = (fixed_cplus + fixed_cminus + fixed_rplus + fixed_rminus)//4 #Floored division because we want a whole number (integer) answer.
elif col != 0 and row == 0: #This is for when the pixel is at the left or right edge.
average_color = (fixed_cplus + fixed_cminus + fixed_rplus)//3
elif col == 0 and row != 0: #This is for the above or bottom edges.
average_color = (fixed_cplus + fixed_rplus + fixed_rminus)//3 #Dividing by 3 as we are takine the average of 3 pixels.
elif col == 0 and row == 0: #This is for the corner pixels
average_color = (fixed_cplus + fixed_rplus)//2
fixed_image[col*11][row*11] = average_color
fixed_image = fixed_image.astype("uint8") #The image needs to be converted to uint8 prior to saving to suppress a lossy conversion error.
i.imsave(image_path + "clean_mountain.png", fixed_image) #Saving the newly retrieved clean mountain image in the same folder as the original mountain image.
def averageRGB(self):
image = i.imread(self.image_path + "hidden.png") #We will be working from the hidden file created.
average_rgb = np.zeros(shape = (np.shape(image)[0], np.shape(image)[1])) #This creates an array of 0s of the dimension of the image, which should be of 131 by 100.
for row in range(np.shape(image)[0]): #The height is represented by the rows and the width is represented by the columns.
for col in range(np.shape(image)[1]):
average_rgb[row][col] = round(np.average(image[row][col])) #This uses the python average function and rounds it up to 0 decimals using round().
np.savetxt(image_path + "RGB.csv", average_rgb, delimiter = ",") #Saving the newly retrieved csv file in the same folder as the original mountain image.
def load_rgb_from_file(self, file_name):
self.file_name = file_name
file = np.loadtxt(image_path + self.file_name, delimiter = ",")
nbre_of_lines = np.shape(file)[0] #Settings these variables to make the following lines more readable.
nbre_of_columns = np.shape(file)[1]
RGB_load = np.zeros((nbre_of_lines, nbre_of_columns, 3), dtype = np.uint8) #The type is set as uint8 to avoid errors.
for row in range(np.shape(RGB_load)[0]): #The height is represented by the rows and the width is represented by the columns.
for col in range(np.shape(RGB_load)[1]):
n = file[row][col] #The data from the file is taken into the variable n.
RGB_load[row][col] = [n, n, n] #Then it is transformed using the RGB_load to make it black and white.
RGB_load = RGB_load.astype("uint8") #The image needs to be converted to uint8 prior to saving to suppress a lossy conversion error.
i.imsave(image_path + "black_white_hidden.png", RGB_load) #Saving the newly retrieved black and white hidden image in the same folder as the original image.
i.imshow(RGB_load) #The picture should get queued to be displayed once i.show() is ran.
i.show() #The picture should get displayed.
io = ImageAnalysis(image_path) #This runs everything as asked in the assignment.
print(io)
io.retrieveHidden()
io.fix()
io.dimension()
io.show()
io.averageRGB()
io.load_rgb_from_file("RGB.csv")
|
from socket import socket, AF_INET, SOCK_STREAM
server = socket(AF_INET, SOCK_STREAM)
server.bind(('', 1234))
server.listen(5)
HEADERSIZE = 10
client, address = server.accept()
# Connection acknowledgement
print(f"Connection with {address} has been established")
msg = "Congratulations you have connected with the server!!\n"
msg = f"{len(msg):<{HEADERSIZE}}" + msg
client.send(bytes(msg, "utf-8"))
# as long as client send requests, send back responses
# until client wishes to terminate
while True:
msg = ""
instr = client.recv(24).decode("utf-8").upper()
if instr == "GET":
print(f"{instr} was received")
msg += "HTTP GET response\n"
elif instr == "POST":
print(f"{instr} was received")
msg += "HTTP POST response\n"
elif instr == "PUT":
print(f"{instr} was received")
msg += "HTTP PUT response\n"
elif instr == "PATCH":
print(f"{instr} was received")
msg += "HTTP PATCH response\n"
elif instr == "HEAD":
print(f"{instr} was received")
msg += "HTTP HEAD response\n"
elif instr == "END":
print("Request to terminate was received")
print("Session terminating")
msg += "Termination by yours truly"
msg = f"{len(msg):<{HEADERSIZE}}" + msg
client.send(bytes(msg, "utf-8"))
client.close()
break
else:
print("invalid instruction was received")
msg += "Invalid instruction\n"
msg += "Enter GET, POST, PUT, PATCH, or HEAD\n"
msg = f"{len(msg):<{HEADERSIZE}}" + msg
client.send(bytes(msg, "utf-8"))
server.close()
|
import hail as hl
import scipy.stats as spst
import pytest
def test_deprecated_binom_test():
assert hl.eval(hl.binom_test(2, 10, 0.5, 'two.sided')) == \
pytest.approx(spst.binom_test(2, 10, 0.5, 'two-sided'))
def test_binom_test():
arglists = [[2, 10, 0.5, 'two-sided'],
[4, 10, 0.5, 'less'],
[32, 50, 0.4, 'greater']]
for args in arglists:
assert hl.eval(hl.binom_test(*args)) == pytest.approx(spst.binom_test(*args)), args
|
# coding: utf-8
# In[59]:
# Import modules into namespace
import pandas as pd
import numpy as np
# In[66]:
# Read xlsx doc
filepath = r'C:\Users\brandon.terrebonne\Desktop\organization_gen\albertsons_dsd_spend_20160624.xlsx'
df = pd.read_excel(filepath)
# In[67]:
# Add missing columns, which will now have null values
choice_company_id = raw_input('company_id = ')
if choice_company_id == "":
choice_company_id = "company_id"
choice_company_name = raw_input('company_name = ')
if choice_company_name == "":
choice_company_name = "company_name"
choice_address_1 = raw_input('address_1 = ')
if choice_address_1 == "":
choice_address_1 = "address_1"
choice_address_2 = raw_input('address_2 = ')
if choice_address_2 == "":
choice_address_2 = "address_2"
choice_city = raw_input('city = ')
if choice_city == "":
choice_city = "city"
choice_state = raw_input('state = ')
if choice_state == "":
choice_state = "state"
choice_postal_code = raw_input('postal_code = ')
if choice_postal_code == "":
choice_postal_code = "postal_code"
choice_country = raw_input('country = ')
if choice_country == "":
choice_country = "country"
choice_tax_id = raw_input('tax_id = ')
if choice_tax_id == "":
choice_tax_id = "tax_id"
choice_reserve_percentage = raw_input('reserve_percentage = ')
if choice_reserve_percentage == "":
choice_reserve_percentage = "reserve_percentage"
choice_reserve_amount = raw_input('reserve_amount = ')
if choice_reserve_amount == "":
choice_reserve_amount = "reserve_amount"
choice_reserve_invoice_priority = raw_input('reserve_invoice_priority = ')
if choice_reserve_invoice_priority == "":
choice_reserve_invoice_priority = "reserve_invoice_priority"
choice_reserve_before_adjustments = raw_input('reserve_before_adjustments = ')
if choice_reserve_before_adjustments == "":
choice_reserve_before_adjustments = "reserve_before_adjustments"
df2 = pd.DataFrame(df,columns=[choice_company_id, choice_company_name, choice_address_1, choice_address_2,
choice_city, choice_state, choice_postal_code, choice_country,
choice_tax_id, choice_reserve_percentage, choice_reserve_amount,
choice_reserve_invoice_priority, choice_reserve_before_adjustments])
# Rename columns
old_names = [choice_company_id, choice_company_name, choice_address_1, choice_address_2,
choice_city, choice_state, choice_postal_code, choice_country,
choice_tax_id, choice_reserve_percentage, choice_reserve_amount,
choice_reserve_invoice_priority, choice_reserve_before_adjustments]
new_names = ['company_id','company_name','address_1','address_2',
'city', 'state', 'postal_code','country','tax_id','reserve_percentage',
'reserve_amount','reserve_invoice_priority','reserve_before_adjustments']
df2.rename(columns=dict(zip(old_names, new_names)), inplace=True)
# In[69]:
#This works in resolving issue #2 on Github
def check_type(x):
p = x.split('.')
try:
if x == 'nan':
y = np.nan
elif float(x) / float(p[0]) > 1.0:
y = x
else:
y = p[0]
except ValueError:
y = x
return y
df2['company_id'] = df2['company_id'].astype(str)
df2['company_id'] = df2['company_id'].map(check_type)
# TODO - check if this is needed, which it probably is not
# Strip leading/trailing spaces
def strip(text):
try:
return text.strip()
except AttributeError:
return text
df2['company_id'] = strip(df2['company_id'])
# In[70]:
### Cleaning reserves###
def filling(column):
new_column = []
for item in column:
if item == '22222222222222222222':
item = np.nan
new_column.append(item)
else:
item = float(item)
item = ("{0:.2f}".format(round(item,2)))
new_column.append(item)
return new_column
df2['reserve_amount'] = df2['reserve_amount'].fillna(value=22222222222222222222)
df2['reserve_amount'] = df2['reserve_amount'].astype(str)
df2['reserve_amount'] = filling(df2['reserve_amount'])
df2.fillna(value="", inplace=True)
# In[71]:
### Add leading zeros ###
def leading(x, y):
new_id = []
field_length = int(y)
for item in x:
try:
leng = field_length - len(str(item))
if len(str(item)) < 1:
new_id.append(item)
elif len(str(item)) < field_length:
item2 = str("00000000000000000000")
item3 = ""
item3 = str(item2[0:leng]) + str(item)
new_id.append(item3)
else:
new_id.append(item)
except:
new_id.append(item)
return new_id
# User input 1
choice_company_id_leading = raw_input('If company_id has leading zeros, input the field length (otherwise just hit enter): ')
if choice_company_id_leading != "":
df2['company_id'] = leading(df2['company_id'], choice_company_id_leading)
else:
df2['company_id'] = df2['company_id']
# New DF if user does not require reserve info
df3 = pd.DataFrame(df2, columns=['company_id','company_name','address_1','address_2',
'city', 'state', 'postal_code','country','tax_id'])
# User input 2, Save data to CSV
while True:
choice_reserve = raw_input('So let\'s talk about this... Do you REALLY want to include reserve information in this file (Yes or No):')
if choice_reserve.lower() == 'yes':
df2.to_csv(r'C:\Users\brandon.terrebonne\Desktop\organization_gen\test_organization_4_20160624.csv', sep=',', index=False, encoding='utf-8')
print "\nYour file includes reserve information and is now ready!\nDisclaimer: This file will override current supplier-level reserves!!!"
break
if choice_reserve.lower() == 'no':
df3.to_csv(r'C:\Users\brandon.terrebonne\Desktop\organization_gen\test_organization_4_20160624.csv', sep=',', index=False, encoding='utf-8')
print "\nYour file excludes reserve information and is now ready!"
break
else:
print "\nOh goodness... \'%s\' is not a valid response. Please answer with either \'Yes\' or \'No\'." % (choice_reserve)
continue
|
s = int(input("초를 입력하세요 :"))
h = s//3600
m = (s%3600)//60
t = (s%3600)%60
print(s, "초는", h, "시간", m, "분", t, "초입니다.")
|
# -*-coding: utf-8-*-
# Author : Christopher Lee
# License: Apache License
# File : test_query_cache.py
# Date : 2017-05-18 09-08
# Version: 0.0.1
# Description: description of this file.
import logging
import datetime
from db_util import mysql_query, mysql_execute
from werkzeug.contrib.cache import RedisCache
from dataobj import IntField, DatetimeField
from dataobj import Model
from dataobj import StrField
from mycache import query_cache
logging.basicConfig(level=logging.DEBUG)
URL = 'mysql://root:chris@localhost:3306/yunos_new'
class CommonDao(object):
@staticmethod
def execute(sql, args):
# print("execute:", sql, args)
return mysql_execute(sql, args=args, mysql_url=URL, debug=True)
@staticmethod
def query(sql, args):
# print("query:", sql, args)
return mysql_query(sql, args=args, mysql_url=URL, debug=True)
@query_cache
class Folder(Model):
folder_id = IntField(db_column='id', primary_key=True, auto_increment=True)
name = StrField(db_column='name', default='新建文件夹', max_length=255)
icon_url = StrField(not_null=False, max_length=1024)
create_at = DatetimeField(default=datetime.datetime.now)
class Meta:
table_name = 'folder'
dao_class = CommonDao
cache_db_factory = lambda: RedisCache(db=1)
cache_conditions = {
'*': 3600,
'folder_id': 3600,
'name': 3600,
'folder_id__lt': 3600,
'folder_id__gt + name__contains': 3500
}
def test_query():
# print(Folder.objects.count())
for x in range(30, 35):
Folder.objects.get(folder_id=x)
# print()
results = Folder.objects.all().order_by('folder_id', descending=True).limit(5)
print("Length of results: {}".format(len(results)))
print("First item of results: {}".format(results.first()))
print("Last item of results: {}".format(results.last()))
print("List slicing: {}".format(results[:4]))
def test_dump_new(i):
# Folder.objects.all().order_by("folder_id").last()
folder = Folder()
folder.name = '新建文件夹_{}'.format(i)
folder.icon_url = "https://img.moviewisdom.cn/folder_icon_{}.png".format(i)
folder.dump()
# print(Folder.objects.all().order_by("folder_id", descending=True).first())
def test_update():
last_id = Folder.objects.all().order_by("folder_id", descending=True).first().folder_id
folder = Folder.objects.get(folder_id=last_id)
print(folder)
print(folder.update(name="测试更新-L".format(folder.folder_id, datetime.datetime.now().isoformat())))
# folder = Folder.objects.all().order_by("folder_id", descending=True).first()
folder = Folder.objects.get(folder_id=last_id)
print(folder)
def test_delete():
last_id = Folder.objects.all().order_by("folder_id").last().folder_id
folder = Folder.objects.get(folder_id=last_id)
print(folder)
print(folder.delete())
folder = Folder.objects.all().order_by("folder_id").last()
print(folder)
def test_multi_operations():
# 首先创建 10 个文件夹
# for _ in range(5):
# test_dump_new("Chris, {}".format(_))
# 然后采用各种方式加载这些文件夹
for x in Folder.objects.filter(folder_id__gt=8, name__contains='Chris'):
print(x)
print()
# test_dump_new('Chris, new')
f = Folder.objects.all().order_by("folder_id").last()
# f.update(name='Chris Updated')
f.delete()
def test_all_cache():
Folder.objects.all_cache()
print(Folder.objects.get(name="新建文件夹_新增"))
print(Folder.objects.get(folder_id=10))
print(Folder.objects.filter(name="新建文件夹_新增").order_by('folder_id', descending=True).limit(2, 100)[:])
print(Folder.objects.filter(name="新建文件夹_新增")[:])
def test_clear_cache():
Folder.objects.clear_cache()
if __name__ == '__main__':
# test_query()
# for _ in range(20):
# test_dump_new('X')
# test_update()
#
# for _ in range(20):
# test_delete()
# test_multi_operations()
# test_all_cache()
# test_clear_cache()
x = Folder.objects.get(folder_id=10)
print(x)
|
import time
from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
class TestDemo:
def setup(self):
url = "http://127.0.0.1:5001/wd/hub"
url1 = "http://193.112.47.128:5001/wd/hub"
chrome_options = webdriver.ChromeOptions()
# 解决DevToolsActivePort文件不存在的报错
# chrome_options.add_argument('--no-sandbox')
# 指定浏览器分辨率
# chrome_options.add_argument('window-size=1920x3000')
# 谷歌文档提到需要加上这个属性来规避bug
# chrome_options.add_argument('--disable-gpu')
# 隐藏滚动条, 应对一些特殊页面
# chrome_options.add_argument('--hide-scrollbars')
# 不加载图片, 提升速度
# chrome_options.add_argument('blink-settings=imagesEnabled=false')
# 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
chrome_options.add_argument('--headless')
self.driver = webdriver.Remote(command_executor=url1,
desired_capabilities=DesiredCapabilities.CHROME,
options=chrome_options)
def test_0(self):
self.driver.get("http://www.baidu.com")
time.sleep(1)
a=self.driver.page_source
print("\n","-"*20)
print(a)
print("-"*30)
def teardown(self):
self.driver.quit() |
# -*- coding: utf-8 -*-
"""
Canny 边缘检测
蒋小军
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
def nothing(x):
pass
img = cv2.imread(r"C:/users/public/pictures/Sample Pictures/shapessm.jpg")
cv2.namedWindow('edges')
L = 80
H = 200
cv2.createTrackbar('L','edges',0,255,nothing)
cv2.createTrackbar('H','edges',0,800,nothing)
"""
plt .subplot(121)
plt.imshow(img,cmap='gray')
plt.title("Original Image")
plt.xticks([])
plt.yticks([])
plt .subplot(122)
plt.imshow(edges,cmap='gray')
plt.title("Edge Image")
plt.xticks([])
plt.yticks([])
plt.show()
"""
while(1):
cv2.imshow('image',img)
edges = cv2.Canny(img,L,H)
cv2.imshow('edges',edges)
k = cv2.waitKey(1)&0xFF
if k == 27:
break
L = cv2.getTrackbarPos('L','edges')
H = cv2.getTrackbarPos('H','edges')
|
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
"""Tests using pytest_resilient_circuits"""
from __future__ import print_function
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from .mock_incident import IncidentMock
PACKAGE_NAME = "fn_soar_utils"
FUNCTION_NAME = "soar_utils_close_incident"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = IncidentMock
def call_soar_utils_close_incident_function(circuits, function_params, timeout=20):
# Fire a message to the function
evt = SubmitTestFunction("soar_utils_close_incident", function_params)
circuits.manager.fire(evt)
event = circuits.watcher.wait("soar_utils_close_incident_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestSoarUtilsCloseIncident:
""" Tests for the soar_utils_close_incident function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
inputs = {
"soar_utils_close_fields": "{\"resolution_id\":9,\"resolution_summary\":\"resolved\"}",
"incident_id": 123
}
output = True
@pytest.mark.parametrize("inputs, expected_results", [(inputs, output)])
def test_success(self, circuits_app, inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_soar_utils_close_incident_function(circuits_app, inputs)
assert(expected_results == results['content']['success'])
|
from unittest import TestCase, main
from unittest.mock import Mock, MagicMock
from mck import Foo
class TestMck(TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_foo(self):
foo = Foo()
foo.bar = MagicMock()
foo.bar.return_value = 1
res = foo.yaa()
foo.bar.assert_called_with()
self.assertEqual(res, 1)
def test_abc(self):
foo = Foo()
mock = MagicMock(name='bleh')
foo.abc(mock)
mock.something.assert_called_with(mock)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import sys, os
sys.path.append('../lib')
import osutils as utils
try:
from wia import Wia
except:
utils.install_pip('wia')
from wia import Wia
try:
from sense_hat import SenseHat
except:
utils.install_pkg('sense-hat')
from sense_hat import SenseHat
def install():
utils.install_pip('wia')
if not utils.is_executable('npm'):
utils.install_nodejs('6')
#utils.install_pkg('opencv-python')
def temperature(sense, seconds):
while True:
temperature=round(sense.get_temperature(),2)
wia.Event.publish(name="temperature", data=temperature)
sleep(int(seconds)))
def weather(sense, seconds):
while True:
temperature=round(sense.get_temperature(),2)
humidity=round(sense.get_humidity(), 2)
pressure=round(sense.get_pressure(), 2)
wia.Event.publish(name="temperature", data=temperature)
wia.Event.publish(name="humidity", data=humidity)
wia.Event.publish(name="pressure", data=pressure)
sleep(int(seconds)))
def usage():
print("\n%s Usage:" % os.path.basename(__file__))
print("\n\t-a --action install)
print("\n\t-s --sense [ temp | weather ]")
print("\n")
sys.exit(2)
def main(argv):
""" main """
try:
opts, args = getopt.getopt(argv,"a:s:",["action=","sense="])
except getopt.GetoptError:
usage()
if not opts:
usage()
sense = SenseHat()
sense.clear()
wia = Wia()
wia.access_token = os.environ['MY_WIA_TOKEN']
for opt, arg in opts:
if opt in ("-a", "--action"):
if arg == 'install':
install()
else:
usage()
elif opt in ("-s", "--sense"):
if arg == 'temp':
temperature(sense, 30)
elif arg == 'weather':
weather(sense, 30)
else:
usage()
else:
usage()
if __name__ == "__main__":
main(sys.argv[1:])
|
# Довженко Віталій
# Лабораторна робота №3_2
#
from PyQt5 import QtWidgets
from PyQt5.uic import loadUi
import sys
class mywindow(QtWidgets.QMainWindow):
def __init__(self):
super(__class__, self).__init__()
loadUi("Lab3_2_view.ui", self)
self.horizontalSlider.valueChanged.connect(self.sliderValue)
self.spinBox.valueChanged.connect(self.spinValue)
def sliderValue(self):
self.progressBar.setValue(self.horizontalSlider.value())
self.spinBox.setValue(self.horizontalSlider.value())
def spinValue(self):
self.progressBar.setValue(self.spinBox.value())
self.horizontalSlider.setValue(self.spinBox.value())
app = QtWidgets.QApplication([])
application = mywindow()
application.show()
sys.exit(app.exec())
|
# A lambda function is a small anonymous function.
# A lambda function can take any number of arguments, but can only have one expression.
# A lambda function that adds 10 to the number passed in as an argument, and print the result:
x = lambda a : a + 10
print(x(5))
# A lambda function that multiplies argument a with argument b and print the result:
x = lambda a, b : a * b
print(x(5, 6))
# A lambda function that sums argument a, b, and c and print the result:
x = lambda a, b, c : a + b + c
print(x(5, 6, 2))
|
# Write a python program to check whether a number is even or odd without using modulus (%)
#operator.
n=int(input("Enter a Number:\t"))
if n&1==1:
print('Number is Odd')
else:
print('Number is Even') |
# estParam.py - functions to estimate hull parameters from principal characteristics
import math
# ---------
def displacement(Cb, T, L, B) : # inputs in unitless, meters, meters, meters
# constants
rho = 1026 #kg/m^3
g = 9.81 #m/s^2
# calculate displacement
nabla = Cb*T*L*B # volume displacement from hull parameters
Disp = nabla*rho*(1/1000) # metric tonnes
return Disp
# ---------
# based on Grubisic 2012
def wettedSurf(Cb, T, L, B) : # inputs in unitless, meters, meters, meters
# constants
rho = 1026 #kg/m^3
g = 9.81 #m/s^2
# calculate displacement
nabla = Cb*T*L*B # volume displacement from hull parameters
# Estimation of wetted surface area
C = 2.61 + (((B/T)*((B/T)-0.244))/81) # unitless
S = C*math.sqrt(L*nabla) #m^2
return S
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
self.embedding = nn.Linear(nHidden * 2, nOut)
def forward(self, input):
recurrent, _ = self.rnn(input)
T, b, h = recurrent.size()
t_rec = recurrent.view(T * b, h)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(T, b, -1)
return output
class SimpleLinear(nn.Module):
def __init__(self, nIn, nOut):
super(SimpleLinear, self).__init__()
self.linear = nn.Linear(nIn, nOut)
def forward(self, x):
timesteps, batch_size = x.size(0), x.size(1)
x = x.view(batch_size*timesteps, -1)
x = self.linear(x)
x = x.view(timesteps, batch_size, -1)
return x
class SimpleLSTM(nn.Module):
def __init__(self, nIn, nHidden):
super(SimpleLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
def forward(self, input_):
recurrent, _ = self.rnn(input_)
T, b, h = recurrent.size()
return recurrent
class STN(nn.Module):
def __init__(self, imgH, imgW):
super(STN, self).__init__()
self.imgH = imgH
self.imgW = imgW
self.localization = nn.Sequential(
nn.MaxPool2d(2, stride=2),
nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(2, stride=2),
nn.ReLU(True),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(2, stride=2),
nn.ReLU(True),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(2, stride=2),
nn.ReLU(True)
)
h, w = imgH//16, imgW//16
# Regressor for the 3 * 2 affine matrix
self.fc_loc = nn.Sequential(
nn.Linear(128 * h * w, 32),
nn.ReLU(True),
nn.Linear(32, 3 * 2)
)
# Initialize the weights/bias with identity transformation
self.fc_loc[2].weight.data.zero_()
self.fc_loc[2].bias.data.copy_(torch.Tensor([1, 0, 0, 0, 1, 0]).float())
def forward(self, x):
xs = self.localization(x)
b, c, h, w = xs.size(0), xs.size(1), xs.size(2), xs.size(3)
xs = xs.view(-1, c*h*w)
theta = self.fc_loc(xs)
theta = theta.view(-1, 2, 3)
grid = F.affine_grid(theta, x.size())
x = F.grid_sample(x, grid)
return x
class CRNN(nn.Module):
def __init__(self, imgH, nc, nclass, nh, stn_flag=False, n_rnn=2, leakyRelu=False):
super(CRNN, self).__init__()
assert imgH % 16 == 0, 'imgH has to be a multiple of 16'
ks = [3, 3, 3, 3, 3, 3, 2]
ps = [1, 1, 1, 1, 1, 1, 0]
ss = [1, 1, 1, 1, 1, 1, 1]
nm = [64, 128, 256, 256, 512, 512, 512]
cnn = nn.Sequential()
def convRelu(i, batchNormalization=False):
nIn = nc if i == 0 else nm[i - 1]
nOut = nm[i]
cnn.add_module('conv{0}'.format(i),
nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
if batchNormalization:
cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
if leakyRelu:
cnn.add_module('relu{0}'.format(i),
nn.LeakyReLU(0.2, inplace=True))
else:
cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
convRelu(0)
cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2)) # 64x16x64
convRelu(1)
cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2)) # 128x8x32
convRelu(2, True)
convRelu(3)
cnn.add_module('pooling{0}'.format(2),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 256x4x16
convRelu(4, True)
convRelu(5)
cnn.add_module('pooling{0}'.format(3),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 512x2x16
convRelu(6, True) # 512x1x16
# self.stn_flag = stn_flag
# if stn_flag:
# self.stn = STN(imgH, imgW)
self.cnn = cnn
self.rnn = nn.Sequential(
BidirectionalLSTM(512, nh, nh),
BidirectionalLSTM(nh, nh, nclass))
def forward(self, input):
# conv features
# if self.stn_flag:
# input = self.stn(input)
conv = self.cnn(input)
b, c, h, w = conv.size()
assert h == 1, "the height of conv must be 1"
conv = conv.squeeze(2)
conv = conv.permute(2, 0, 1) # [w, b, c]
# rnn features
output = self.rnn(conv)
return output
class GravesNet(nn.Module):
def __init__(self, imgH, nh, nclass, depth):
super(GravesNet, self).__init__()
# self.mid_rnn = nn.LSTM(nh, nh, bidirectional=True)
self.fc_in = SimpleLinear(imgH, nh*2)
self.hidden_layers = [SimpleLSTM(nh*2, nh)for i in range(depth)]
# self.hidden_layers = SimpleLSTM(nh*2, nh)
self.fc_out = SimpleLinear(nh*2, nclass)
self.module = nn.Sequential(self.fc_in, *self.hidden_layers, self.fc_out)
def forward(self, input_):
# conv features
input_ = input_.squeeze(0)
input_ = input_.permute(2, 0, 1).contiguous() # [w, b, c]
output = self.module(input_)
return output |
import io
import os
import json
from packaging import version
import numpy as np
import onnx
import onnx.numpy_helper
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.onnx.symbolic_helper import _default_onnx_opset_version
from pytorch_pfn_extras.onnx import export
from pytorch_pfn_extras.onnx import export_testcase
from pytorch_pfn_extras.onnx import is_large_tensor
from pytorch_pfn_extras.onnx import LARGE_TENSOR_DATA_THRESHOLD
from pytorch_pfn_extras.onnx.strip_large_tensor import _strip_large_tensor_tool_impl
output_dir = 'out'
torch_version = version.Version(torch.__version__)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def _get_output_dir(d, **kwargs):
output_dir_base = 'out'
opset_ver = kwargs.get('opset_version', _default_onnx_opset_version)
output_dir = os.path.join(
output_dir_base, 'opset{}'.format(opset_ver), d)
os.makedirs(output_dir, exist_ok=True)
return output_dir
def _helper(model, args, d, **kwargs):
output_dir = _get_output_dir(d)
if 'training' not in kwargs:
kwargs['training'] = model.training
if 'do_constant_folding' not in kwargs:
kwargs['do_constant_folding'] = False
export_testcase(model, args, output_dir, **kwargs)
return output_dir
@pytest.mark.filterwarnings("ignore:Named tensors .* experimental:UserWarning")
def test_export_testcase():
model = Net().to('cpu')
x = torch.zeros((1, 1, 28, 28))
output_dir = _helper(model, x, 'mnist', output_grad=True)
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'meta.json'))
assert os.path.isfile(os.path.join(output_dir, 'model.onnx'))
test_data_set_dir = os.path.join(output_dir, 'test_data_set_0')
expected_input_0_path = os.path.join(test_data_set_dir, 'input_0.pb')
assert os.path.isfile(expected_input_0_path)
assert _to_array(expected_input_0_path).shape == (1, 1, 28, 28)
assert os.path.isfile(os.path.join(test_data_set_dir, 'output_0.pb'))
assert os.path.isfile(os.path.join(
test_data_set_dir, 'gradient_input_0.pb'))
for i in range(8):
assert os.path.isfile(os.path.join(
test_data_set_dir, 'gradient_{}.pb'.format(i)))
assert not os.path.isfile(os.path.join(test_data_set_dir, 'gradient_8.pb'))
def test_export_filename():
model = nn.Sequential(nn.Linear(5, 10, bias=False))
x = torch.zeros((2, 5))
output_dir = _get_output_dir('export_filename')
model_path = os.path.join(output_dir, 'model.onnx')
with pytest.warns(UserWarning):
out = export(model, x, model_path, return_output=True)
assert os.path.isfile(model_path)
expected_out = torch.zeros((2, 10)) # check only shape size
np.testing.assert_allclose(
out.detach().cpu().numpy(), expected_out.detach().cpu().numpy())
def test_export_testcase_return_output():
model = nn.Sequential(nn.Linear(5, 10, bias=False))
x = torch.zeros((2, 5))
output_dir = _get_output_dir('export_filename')
if version.Version('1.6.0') <= torch_version:
with pytest.warns(UserWarning):
(out,) = export_testcase(model, x, output_dir, return_output=True)
else:
(out,) = export_testcase(model, x, output_dir, return_output=True)
assert os.path.isfile(os.path.join(output_dir, 'model.onnx'))
expected_out = torch.zeros((2, 10)) # check only shape size
np.testing.assert_allclose(
out.detach().cpu().numpy(), expected_out.detach().cpu().numpy())
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_export_stream():
model = nn.Sequential(nn.Linear(5, 10, bias=False))
x = torch.zeros((2, 5))
bytesio = io.BytesIO()
assert len(bytesio.getvalue()) == 0
out = export(model, x, bytesio, return_output=True)
assert len(bytesio.getvalue()) > 0
expected_out = torch.zeros((2, 10)) # check only shape size
np.testing.assert_allclose(
out.detach().cpu().numpy(), expected_out.detach().cpu().numpy())
def test_cuda_tensor():
if not torch.cuda.is_available():
pytest.skip('CUDA is not available')
device = 'cuda'
model = Net().to(device)
x = torch.zeros((1, 1, 28, 28), device=device)
_helper(model, x, 'mnist_cuda', output_grad=True)
def test_model_not_overwrite():
model = Net().to('cpu')
x = torch.zeros((1, 1, 28, 28))
dir_name = 'multiple_test_dataset'
output_dir = _helper(model, x, dir_name)
assert os.path.isdir(output_dir)
output_dir = _helper(model, x + 0.5, dir_name, model_overwrite=False)
test_data_set_dir = os.path.join(output_dir, 'test_data_set_1')
assert os.path.isfile(os.path.join(test_data_set_dir, 'input_0.pb'))
assert os.path.isfile(os.path.join(test_data_set_dir, 'output_0.pb'))
def _to_array(f, name=None):
assert os.path.isfile(f)
onnx_tensor = onnx.TensorProto()
with open(f, 'rb') as fp:
onnx_tensor.ParseFromString(fp.read())
if name is not None:
assert onnx_tensor.name == name
return onnx.numpy_helper.to_array(onnx_tensor)
def test_backward():
model = nn.Sequential(nn.Linear(5, 10, bias=False))
x = torch.ones((2, 5))
output_dir = _helper(model, x, 'backword_default', output_grad=True)
assert os.path.isdir(output_dir)
test_data_set_dir = os.path.join(output_dir, 'test_data_set_0')
assert os.path.isdir(test_data_set_dir)
grad = _to_array(os.path.join(test_data_set_dir, 'gradient_0.pb'))
expected_grad = np.full((10, 5), 2.0, dtype=np.float32)
np.testing.assert_allclose(grad, expected_grad)
def test_backward_custom_input():
model = nn.Sequential(nn.Linear(5, 10, bias=False))
x = torch.ones((2, 5))
grad_in = torch.ones((2, 10)) * 0.5
output_dir = _helper(
model, x, 'backword_custom_input', output_grad=grad_in,
output_names=['output0'])
assert os.path.isdir(output_dir)
test_data_set_dir = os.path.join(output_dir, 'test_data_set_0')
assert os.path.isdir(test_data_set_dir)
output_grad_in = _to_array(
os.path.join(test_data_set_dir, 'gradient_input_0.pb'), 'output0')
np.testing.assert_allclose(output_grad_in, grad_in)
grad = _to_array(os.path.join(test_data_set_dir, 'gradient_0.pb'))
expected_grad = np.full((10, 5), 1.0, dtype=np.float32)
np.testing.assert_allclose(grad, expected_grad)
@pytest.mark.filterwarnings(
"ignore::torch.jit.TracerWarning", "ignore::UserWarning")
def test_backward_multiple_input():
model = nn.GRU(input_size=10, hidden_size=3, num_layers=1)
input = torch.ones((4, 5, 10), requires_grad=True)
h = torch.ones((1, 5, 3), requires_grad=True)
grads = [torch.ones((4, 5, 3)) / 2, torch.ones((1, 5, 3)) / 3]
output_dir = _helper(model, (input, h), 'backward_multiple_input',
output_grad=grads,
output_names=['output0', 'output1'])
assert os.path.isdir(output_dir)
test_data_set_dir = os.path.join(output_dir, 'test_data_set_0')
assert os.path.isdir(test_data_set_dir)
model.zero_grad()
exp_out1, exp_out2 = model.forward(input, h)
torch.autograd.backward(
tensors=[exp_out1, exp_out2],
grad_tensors=grads)
output1_grad_in = _to_array(
os.path.join(test_data_set_dir, 'gradient_input_0.pb'), 'output0')
np.testing.assert_allclose(grads[0], output1_grad_in)
output2_grad_in = _to_array(
os.path.join(test_data_set_dir, 'gradient_input_1.pb'), 'output1')
np.testing.assert_allclose(grads[1], output2_grad_in)
for i, (name, param) in enumerate(model.named_parameters()):
actual_grad = _to_array(
os.path.join(test_data_set_dir, 'gradient_{}.pb'.format(i)), name)
np.testing.assert_allclose(param.grad, actual_grad)
@pytest.mark.filterwarnings(
"ignore::torch.jit.TracerWarning", "ignore::UserWarning")
def test_export_testcase_strip_large_tensor_data():
if torch_version < version.Version('1.6.0'):
pytest.skip('skip for PyTorch 1.5 or earlier')
model = Net().to('cpu')
x = torch.zeros((1, 1, 28, 28))
output_dir = _helper(
model, x, 'mnist_stripped_tensor_data',
output_grad=True, strip_large_tensor_data=True)
assert os.path.isdir(output_dir)
assert os.path.isfile(os.path.join(output_dir, 'meta.json'))
assert os.path.isfile(os.path.join(output_dir, 'model.onnx'))
test_data_set_dir = os.path.join(output_dir, 'test_data_set_0')
assert os.path.isfile(os.path.join(test_data_set_dir, 'input_0.pb'))
assert os.path.isfile(os.path.join(test_data_set_dir, 'output_0.pb'))
for i in range(8):
assert os.path.isfile(os.path.join(
test_data_set_dir, 'gradient_{}.pb'.format(i)))
assert not os.path.isfile(os.path.join(test_data_set_dir, 'gradient_8.pb'))
with open(os.path.join(output_dir, 'meta.json')) as metaf:
metaj = json.load(metaf)
assert metaj['strip_large_tensor_data']
def is_stripped_with_check(tensor):
if is_large_tensor(tensor, LARGE_TENSOR_DATA_THRESHOLD):
assert tensor.data_location == onnx.TensorProto.EXTERNAL
assert tensor.external_data[0].key == 'location'
meta = json.loads(tensor.external_data[0].value)
assert meta['type'] == 'stripped'
assert type(meta['average']) == float
assert type(meta['variance']) == float
return True
assert len(tensor.external_data) == 0
return False
onnx_model = onnx.load(os.path.join(
output_dir, 'model.onnx'), load_external_data=False)
check_stripped = [
is_stripped_with_check(init) for init in onnx_model.graph.initializer]
# this testcase tests strip, so output mode is no stripped, test is failed
assert any(check_stripped)
for pb_filepath in ('input_0.pb', 'output_0.pb'):
with open(os.path.join(test_data_set_dir, pb_filepath), 'rb') as f:
tensor = onnx.TensorProto()
tensor.ParseFromString(f.read())
is_stripped_with_check(tensor)
# check re-load stripped onnx
_strip_large_tensor_tool_impl(
os.path.join(output_dir, 'model.onnx'),
os.path.join(output_dir, 'model_re.onnx'),
LARGE_TENSOR_DATA_THRESHOLD)
assert os.path.isfile(os.path.join(output_dir, 'model_re.onnx'))
# loading check
onnx.load(
os.path.join(output_dir, 'model_re.onnx'), load_external_data=False)
def test_export_testcase_options():
model = Net().to('cpu')
x = torch.zeros((1, 1, 28, 28))
output_dir = _helper(
model, x, 'mnist_stripped_tensor_data',
opset_version=11, strip_doc_string=False)
onnx_model = onnx.load(os.path.join(
output_dir, 'model.onnx'), load_external_data=False)
assert onnx_model.opset_import[0].version == 11
assert onnx_model.graph.node[0].doc_string != ''
class NetWithUnusedInput(nn.Module):
def __init__(self):
super(NetWithUnusedInput, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x, unused):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
@pytest.mark.parametrize("keep_initializers_as_inputs", [None, True, False])
def test_export_testcase_with_unused_input(keep_initializers_as_inputs):
if torch_version < version.Version('1.7.0'):
pytest.skip('skip for PyTorch 1.6 or earlier')
model = NetWithUnusedInput().to('cpu')
x = torch.zeros((1, 1, 28, 28))
unused = torch.zeros((1,))
# Without input_names
output_dir = _helper(
model, args=(x, unused), d='net_with_unused_input_without_input_names',
opset_version=11, strip_doc_string=False,
keep_initializers_as_inputs=keep_initializers_as_inputs)
assert os.path.isdir(output_dir)
test_data_set_dir = os.path.join(output_dir, 'test_data_set_0')
assert os.path.exists(os.path.join(test_data_set_dir, 'input_0.pb'))
assert not os.path.exists(os.path.join(test_data_set_dir, 'input_1.pb'))
xmodel = onnx.load_model(os.path.join(output_dir, 'model.onnx'))
assert xmodel.graph.input[0].name == 'input_0'
assert len(xmodel.graph.input) == 1 or \
xmodel.graph.input[1].name != 'input_1'
# With input_names
output_dir = _helper(
model, args=(x, unused), d='net_with_unused_input_with_input_names',
opset_version=11, strip_doc_string=False,
keep_initializers_as_inputs=keep_initializers_as_inputs,
input_names=['x', 'unused'])
assert os.path.isdir(output_dir)
test_data_set_dir = os.path.join(output_dir, 'test_data_set_0')
assert os.path.exists(os.path.join(test_data_set_dir, 'input_0.pb'))
assert not os.path.exists(os.path.join(test_data_set_dir, 'input_1.pb'))
xmodel = onnx.load_model(os.path.join(output_dir, 'model.onnx'))
assert xmodel.graph.input[0].name == 'x'
assert len(xmodel.graph.input) == 1 or \
xmodel.graph.input[1].name != 'unused'
def test_user_meta():
model = nn.Sequential(nn.Linear(5, 10, bias=False))
x = torch.ones((2, 5))
output_dir = _helper(model, x, 'meta_without_user_meta', metadata=True)
with open(os.path.join(output_dir, "meta.json")) as metaf:
assert "user_meta" not in json.load(metaf)
output_dir = _helper(model, x, 'meta_with_user_meta', metadata=True,
user_meta={"user_key": "user_value"})
with open(os.path.join(output_dir, "meta.json")) as metaf:
assert json.load(metaf)["user_meta"]["user_key"] == "user_value"
with pytest.warns(UserWarning):
output_dir = _helper(model, x, 'without_meta_with_user_meta',
metadata=False,
user_meta={"user_key": "user_value"})
assert not os.path.exists(os.path.join(output_dir, 'meta.json'))
def test_export_pt():
model = nn.Sequential(nn.Linear(5, 10, bias=False))
x = torch.ones((2, 5))
output_dir = _helper(model, x, 'export_script_pt',
export_torch_script=True,
export_torch_trace=False)
assert os.path.exists(os.path.join(output_dir, 'model_script.pt'))
assert not os.path.exists(os.path.join(output_dir, 'model_trace.pt'))
output_dir = _helper(model, x, 'export_trace_pt',
export_torch_script=False,
export_torch_trace=True)
assert not os.path.exists(os.path.join(output_dir, 'model_script.pt'))
assert os.path.exists(os.path.join(output_dir, 'model_trace.pt'))
|
import xml.sax
import re
import pdb
import sys
import os
#THIS CODE IS MADE FOR PYTHON 3.0+
#get link probabilities for anchor texts
#NEEDS
#anchors_anum
#anchors_tally
#OUTPUT
#anchors_link_prob
#FORMAT
#anchor_text<>num_links<>num_occurences
anchors = {}
for fname in os.listdir('../anchors_anum/'):
for line in open('../anchors_anum/' + fname, 'r'):
anc = line.split('<>')[2].strip()
if anc in anchors:
anchors[anc] = (anchors[anc][0] + 1, 0)
else:
anchors[anc] = (1,0)
print(fname)
print 'got numerators'
for fname in os.listdir('../anchors_tally/'):
for line in open('../anchors_tally/' + fname, 'r'):
anc = line.split('<>')[0].strip()
cnt = line.split('<>')[1].strip()
anchors[anc] = (anchors[anc][0], anchors[anc][1] + int(cnt))
print(fname)
print 'got denominators'
out = open('../anchors_link_prob','w')
for anc in sorted(anchors.keys()):
out.write(anc + '<>' + str(anchors[anc][0]) + '<>' + str(anchors[anc][1]) + '\n')
out.close()
|
# -*- coding: utf-8 -*-
'''
Created on 2018年2月2日
@author: Administrator
'''
import csv
import numpy as np
from sklearn import svm
def get_data(path):
with open(path) as fn:
readers=csv.reader(fn)
rows=[row for row in readers]
rows.pop(0)
return np.array(rows)
def trans_data(train_data):
x_train_1=np.reshape(train_data[:,-10],[len(train_data),1])
x_train_2=train_data[:,-8:-4]
x_train_3=np.reshape(train_data[:,-3],[len(train_data),1])
x_train_4=np.reshape(train_data[:,-1],[len(train_data),1])
x_train=np.hstack((x_train_1,x_train_2))
x_train=np.hstack((x_train,x_train_3))
x_train=np.hstack((x_train,x_train_4))
for i in range(len(x_train)):
if x_train[i,1]=='female':
x_train[i,1]=1
else:
x_train[i,1]=0
if x_train[i,-1]=='C':
x_train[i,-1]=0
if x_train[i,-1]=='Q':
x_train[i,-1]=1
if x_train[i,-1]=='S':
x_train[i,-1]=2
if x_train[i,2]=='':
# x_train[i,2]=np.random.randint(10,60)
x_train[i,2]=30
x_train_=[]
for i in range(len(x_train)):
c=x_train[i]
x_1=[]
for j in range(len(x_train[0])):
if '.' in c[j]:
c[j]=c[j][:(c[j].index('.'))]
if c[j]=='':
c[j]=0
x_1.append(int(c[j]))
x_train_.append(x_1)
x_train_=np.array(x_train_)
return x_train_
path_train='D:\\Desktop\\kaggle\\Titanic_20180202\\train.csv'
path_test='D:\\Desktop\\kaggle\\Titanic_20180202\\test.csv'
train_data=get_data(path_train)
test_data=get_data(path_test)
print(train_data.shape,test_data.shape)
x_train_=trans_data(train_data)
x_test=trans_data(test_data)
y_train=np.reshape(train_data[:,1],[891,1])
'''
# print(test_data[1],train_data[1])
# for i in range(len(train_data)):
# train_data.remove(i,3)
x_train_1=np.reshape(train_data[:,2],[891,1])
x_train_2=train_data[:,4:8]
x_train_3=np.reshape(train_data[:,-3],[891,1])
x_train_4=np.reshape(train_data[:,-1],[891,1])
x_train=np.hstack((x_train_1,x_train_2))
x_train=np.hstack((x_train,x_train_3))
x_train=np.hstack((x_train,x_train_4))
y_train=np.reshape(train_data[:,1],[891,1])
print(x_train.shape,x_train_1.shape,x_train_2.shape,y_train.shape)
print(x_train[1],y_train[1])
print('step1')
for i in range(len(x_train)):
if x_train[i,1]=='female':
x_train[i,1]=0
else:
x_train[i,1]=1
if x_train[i,-1]=='C':
x_train[i,-1]=0
if x_train[i,-1]=='Q':
x_train[i,-1]=1
if x_train[i,-1]=='S':
x_train[i,-1]=2
if x_train[i,2]=='':
# x_train[i,2]=np.random.randint(10,60)
x_train[i,2]=30
print(x_train[5],y_train[5])
print(x_train[6],y_train[6])
print('step2')
x_train_=[]
y_train_=np.zeros([len(y_train),1])
for i in range(len(x_train)):
if str(y_train[i])=="['0']":
y_train_[i]=0
else:
y_train_[i]=1
c=x_train[i]
x_1=[]
for j in range(len(x_train[0])):
if '.' in c[j]:
c[j]=c[j][:(c[j].index('.'))]
if c[j]=='':
c[j]=0
x_1.append(int(c[j]))
x_train_.append(x_1)
x_train_=np.array(x_train_)
'''
y_train_=np.zeros([len(y_train),1])
for i in range(len(x_train_)):
if str(y_train[i])=="['0']":
y_train_[i]=0
else:
y_train_[i]=1
print(x_train_.shape,x_train_[5])
print(x_test.shape,x_test[5])
print(y_train_.shape,y_train_[1])
from sklearn import svm
# x=np.array([[2,0],[1,1],2,3])
# y=np.array([0,0,1])
clf=svm.SVC(kernel='linear')
# clf = svm.SVC(C=0.1, kernel='linear', decision_function_shape='ovr')
# clf = svm.SVC(C=0.8, kernel='rbf', gamma=20, decision_function_shape='ovr')
clf.fit(x_train_,y_train_)
# print(clf)
# print(clf.supprot_vectors_)
# print(clf.support_)
# print(clf.n_support)
# prediction_=clf.predict(x_train_[850:])
# print(prediction_.shape)
# prediction_=np.reshape(prediction_,[len(prediction_),1])
# acc=y_train_[850:]-prediction_
# # print(acc)
# print(acc.shape)
# sum1=0
# for i in range(len(acc)):
# if acc[i]==0.:
# sum1+=1
# print(sum1/(len(acc)))
# print(prediction_)
prediction_1=clf.predict(x_test)
print(prediction_1.shape)
with open('D:\\Desktop\\kaggle\\Titanic_20180202\\y_test.csv','w',newline='') as csvfile:
writer=csv.writer(csvfile)
writer.writerow(['PassengerId','Survived'])
for i in range(len(prediction_1)):
data=(i+892,prediction_1[i])
writer.writerow(data)
print('done!') |
from sys import argv
#Here we import a module from sys to keep code small.
#argv is the "argument variable" which holds arguments you pass to Python script
#when you run it.
script, first, second, third = argv
#Assigning argv to four variables to unpack argv in each variable
print "The script is called: ", script
print "The first variable is:", first
print "The second variable is: ", second
print "The third variable is: ", third
#If we run the script with too few parameters, the remaining arguments are not
#defined. As long as argv is set to some variables, each variable must have set parameters.
|
import torch
from utils.data_reader import get_train_dev_test_data
from utils.train_helper import load_model, eval_model, get_data_loader
train_data, dev_data, test_data = get_train_dev_test_data()
model = load_model("model/checkpoints/DeepCoNN_20200601215955.pt")
model.to(model.config.device)
loss = torch.nn.MSELoss()
data_iter = get_data_loader(test_data, model.config)
print(eval_model(model, data_iter, loss))
|
t=int(input())
for I in range(t):
n=int(input())
a=list(map(int,input().split(" ")))
print(len(set(a)))
'''if(len(b)==1):
print('1')
else:
count=0
for i in range(len(b)-1,0,-1):
count+=(b[i]-b[i-1])
print(count+1)'''
|
# base imports
import tkinter as tk
from tkinter import ttk
# relative module imports for all the frames
from .manageusers import ManageUsers
from .employees import Employees
from .payments import Payments
from .tenants import Tenants
from .apartments import Apartments
# A master frame [contained by the root Application]
# responsible for containing the buttons that switch between different pages of the application.
class Menu(tk.Frame):
def __init__(self, master):
super().__init__(master)
self.master = master
self.pack(fill='both')
# Creating the widgets
logout = ttk.Button(self, width=10, text="Logout",
command=self.master.logout)
logout.bind('<Return>', self.master.logout)
logout.pack(side='left', fill='both', expand=True, padx=(0, 14))
manage_users = ttk.Button(self, width=16, text="Manage Users",
command=lambda: self.next_frame(ManageUsers))
manage_users.bind('<Return>', lambda: self.next_frame(ManageUsers))
manage_users.pack(side='left', fill='both', expand=True)
view_employees = ttk.Button(self, width=16, text="Employees",
command=lambda: self.next_frame(Employees))
view_employees.bind('<Return>', lambda: self.next_frame(Employees))
view_employees.pack(side='left', fill='both', expand=True)
view_payments = ttk.Button(self, width=16, text="Payments",
command=lambda: self.next_frame(Payments))
view_payments.bind('<Return>', lambda: self.next_frame(Payments))
view_payments.pack(side='left', fill='both', expand=True)
view_tenants = ttk.Button(self, width=16, text="Tenants",
command=lambda: self.next_frame(Tenants))
view_tenants.bind('<Return>', lambda: self.next_frame(Tenants))
view_tenants.pack(side='left', fill='both', expand=True)
view_apartments = ttk.Button(self, width=16, text="Apartments",
command=lambda: self.next_frame(Apartments))
view_apartments.bind('<Return>', lambda: self.next_frame(Apartments))
view_apartments.pack(side='left', fill='both', expand=True)
# Buttons for usage in enabling and disabling on press
self.current_button = manage_users
self.current_button.configure(state='disabled')
self.buttons = {"ManageUsers": manage_users,
"Employees": view_employees,
"Payments": view_payments,
"Tenants": view_tenants,
"Apartments": view_apartments}
def next_frame(self, frame, _event=None):
self.current_button.configure(state='normal')
self.current_button = self.buttons[frame.__name__]
self.current_button.configure(state='disabled')
self.master.next_frame(frame)
|
# Dada a lista L = [5, 7, 2, 9, 4, 1, 3], escreva um programa que imprima as seguintes informações:
# a) tamanho da lista
# b) maior valor da lista
# c) menor valor da lista
# d) soma de todos os elementos da lista
# e) lista em ordem crescente
# f) lista em ordem decrescente
Lista = [5, 7, 2, 9, 4, 1, 3]
print("A sua lista atual é:", Lista)
print("Tamanho da lista:", len(Lista))
print("Maior valor da lista:", max(Lista))
print("Menor valor da lista:", min(Lista))
print("Soma de todos os elementos da lista:", sum(Lista))
Lista.sort()
print("Lista em ordem crescente:", Lista)
Lista.reverse()
print("Lista em ordem descrescente:", Lista) |
# ---------------------------------------------------------
#Name:
#
#Author:Tory Stietz
#
#Date created:
#
#Script Function:
#
#Script References:
#
#Special Instructions:
#
#****************************
import requests
import json
def getVlanBrief(VlanIP)
"""
Be sure to run feature nxapi first on Nexus Switch
"""
#sets the username and password to the values in the single quotations.
switchuser='cisco'
switchpassword='cisco'
url='https://' + VlanIP + '/ins'
myheaders={'content-type':'application/json-rpc'}
payload=[
{
"jsonrpc": "2.0",
"method": "cli",
"params": {
"cmd": "show ip interface brief",
"version": 1
},
"id": 1
},
{
"jsonrpc": "2.0",
"method": "cli",
"params": {
"cmd": "interface " + VlanName,
"version": 1
},
"id": 2
},
{
"jsonrpc": "2.0",
"method": "cli",
"params": {
"cmd": "ipaddress " + ipAddr + " 255.255.255.0",
"version": 1
},
"id": 3
}
]
'''
verify=False below is to accept untrusted certificate
'''
response = requests.post(url,data=json.dumps(payload), verify=False,headers=myheaders,auth=(switchuser,switchpassword)).json()
return response
IpAddress= input('Enter a new IP Address:')
print(IpAddress)
ipList = IpAddress.split(".")
#Adds five to the new Ip Address and prints the Address.
def AddToIP(ipList):
octet3 = int(myList[2])
octet3 = octet3 + 5
myList[2] = str(octet3)
newIP = myList [0] + "." + myList[1] + "." + myList[2] + "." + myList[3]
print("newIP")
print(response)#prints the full dictionary.
interfaces = response["result"]["body"]["TABLE_intf"]["ROW_intf"]
for interface in interfaces:
print(interface['intf-name']) #individually prints each nested dictionary within the main dictionary.
#prints the headings inside the parthenses spaced out with the \t tabs
print("Name\t\tProtocol\tLink\t\tAddress")
#prints 60 dashes to act as a divider between the headings and the actual device information.
print("-" * 60)
#references the section of command that says interfaces = response["result"]["body"]["TABLE_intf"]["ROW_intf"]
for info in interfaces :
#Prints the info stored in interfaces for each specified key from the nested dictionary.
print(info['intf-name'] + '\t\t' + info['proto-state'] + '\t\t' + info['link-state'] + '\t\t' + info['prefix'])
# end of script
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 14:01:13 2019
@author: lenovo
"""
newstr="RESTART"
#define the value
txt=newstr.replace("R",'$')
#it is use for replace the value of R as $
print(txt)
#it print the value of txt
apr=txt.replace("$",'R',1)
#It defines that left the first R
print(apr) |
import os
import inspect
import app
# Values for server paths
# appRoot = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
appRoot = os.path.dirname(inspect.getfile(app))
appConfigRoot = os.path.join(appRoot, 'Config')
webRoot = os.path.join(appRoot, 'wwwroot')
loginRoot = r'http://cs302.pythonanywhere.com'
# Values for public server
publicPort = 10001
# Values for ip location
universityDesktop = '10.103.0.0'
universityWifi = '172.23.0.0'
# Values for encryption
privateKeyPath = os.path.join(appRoot, 'Data', 'key')
publicKeyPath = os.path.join(appRoot, 'Data', 'key.pub')
salt = 'COMPSYS302-2017'
serverKey = '150ecd12d550d05ad83f18328e536f53'
serverAESBlockSize = 16
serverAESPadding = ' '
# Values for data/storage
dbPath = os.path.join(appRoot, 'Data', 'entity.db')
# Values for standards support
standards = {
'encryption': ['0', '3'],
'hashing': ['0', '1', '3']
}
|
import random
try:
filepath="monster_list.txt"
count = len(open(filepath,encoding="utf-8").readlines())
a=random.randint(0,count)
f = open(filepath,"r",encoding="utf-8")# 返回一個檔案物件
line = f.readline()# 呼叫檔案的 readline()方法
for i in range(a):
line = f.readline()
print(line)
f.close()
except:
print("請放入monster_list.txt")
|
import rasterio, os, shutil, glob
import numpy as np
###
# REQUIRES GDAL 1.8.0 +
#
# This is more of a roadmap to how we got to specific mask results than it is a script.
# Keep this in mind when reading the processes used to get to the needed masks.
###
input_dir = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Output_Data'
akcan = os.path.join( input_dir, 'alfresco_model_vegetation_input_2005.tif' )
template = os.path.join( input_dir, 'template_akcan_extent_1km.tif' )
# make a template alaska_canada empty raster for warping / resampling to
with rasterio.open( akcan ) as akcan_rst:
meta = akcan_rst.meta
meta.update( compress='lzw', nodata=None, dtype=np.uint8 )
arr = akcan_rst.read_band( 1 ).data
with rasterio.open( template, 'w', **meta ) as template_rst:
template_rst.write_band( 1, np.zeros_like( arr ) )
# cleanup
del arr, akcan_rst, template_rst
## Maritime Domain Alaska Only
combined_mask = rasterio.open( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/combined_mask.tif' )
nlcd_mask = rasterio.open( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/nlcd_2001_land_cover_maritime_mask.tif' )
combined_arr = combined_mask.read_band(1)
nlcd_arr = nlcd_mask.read_band(1)
nd = nlcd_arr.data
cd = combined_arr.data
new = np.zeros_like( cd )
new[ (cd == 1) & (nd == 1 ) ] = 1
new_mask = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/combined_mask_alaska_only.tif'
output_resampled = new_mask.replace( '.tif', '_akcan_1km.tif' )
meta = nlcd_mask.meta
meta.update( compress='lzw', nodata=None )
with rasterio.open( new_mask, 'w', **meta ) as out:
out.write_band( 1, new )
# make a 1km version in the domain of the large AKCanada Extent
if os.path.exists( output_resampled ):
[ os.remove( i ) for i in glob.glob( output_resampled[:-3] + '*' ) ]
shutil.copyfile( template, output_resampled )
command = 'gdalwarp -r mode -multi -srcnodata None ' + new_mask + ' ' + output_resampled
os.system( command )
## Maritime Domain Canada Only
new = np.zeros_like( cd )
new[ (cd == 1) & (nd == 0 ) ] = 1
new_mask = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/combined_mask_canada_only.tif'
output_resampled = new_mask.replace( '.tif', '_akcan_1km.tif' )
meta = nlcd_mask.meta
meta.update( compress='lzw', nodata=None )
with rasterio.open( new_mask, 'w', **meta ) as out:
out.write_band( 1, new )
# make a 1km version in the domain of the large AKCanada Extent
if os.path.exists( output_resampled ):
[ os.remove( i ) for i in glob.glob( output_resampled[:-3] + '*' ) ]
shutil.copyfile( template, output_resampled )
command = 'gdalwarp -r mode -multi -srcnodata None ' + new_mask + ' ' + output_resampled
os.system( command )
## Now lets take the domain rasters for SEAK, SCAK and remove Canada
ak_only_combined = rasterio.open( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/combined_mask_alaska_only.tif' )
seak = rasterio.open( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/seak_aoi.tif' )
scak = rasterio.open( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/scak_aoi.tif' )
kodiak = rasterio.open( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/kodiak_aoi.tif' )
# some file meta for the output rasters
meta = ak_only_combined.meta
meta.update( compress='lzw', nodata=None )
ak_arr = ak_only_combined.read_band( 1 )
# seak
seak_arr = seak.read_band( 1 )
out_arr = np.zeros_like( seak_arr.data )
out_arr[ (ak_arr == 1) & (seak_arr == 1) ] = 1
output_filename = seak.name.replace( '.tif', '_akonly.tif' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write_band( 1, out_arr )
del seak_arr, seak
# scak
scak_arr = scak.read_band( 1 )
out_arr = np.zeros_like( scak_arr.data )
out_arr[ (ak_arr == 1) & (scak_arr == 1) ] = 1
output_filename = scak.name.replace( '.tif', '_akonly.tif' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write_band( 1, out_arr )
del scak_arr, scak
# kodiak
kodiak_arr = kodiak.read_band( 1 ).filled()
output_filename = kodiak.name.replace( '.tif', '_akonly.tif' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write_band( 1, kodiak_arr.filled() )
del kodiak_arr, kodiak
## Now lets take the domain rasters for SEAK, SCAK and remove Alaska
ak_only_combined = rasterio.open( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/combined_mask_alaska_only.tif' )
seak = rasterio.open( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/seak_aoi.tif' )
scak = rasterio.open( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/scak_aoi.tif' )
kodiak = rasterio.open( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/kodiak_aoi.tif' )
# some file meta for the output rasters
meta = ak_only_combined.meta
meta.update( compress='lzw', nodata=None )
ak_arr = ak_only_combined.read_band( 1 )
# seak
seak_arr = seak.read_band( 1 )
out_arr = np.zeros_like( seak_arr.data )
out_arr[ (ak_arr == 0) & (seak_arr == 1) ] = 1
output_filename = seak.name.replace( '.tif', '_canadaonly.tif' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write_band( 1, out_arr )
del seak_arr, seak
# scak
scak_arr = scak.read_band( 1 )
out_arr = np.zeros_like( scak_arr.data )
out_arr[ (ak_arr == 0) & (scak_arr == 1) ] = 1
output_filename = scak.name.replace( '.tif', '_canadaonly.tif' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write_band( 1, out_arr )
del scak_arr, scak
# # make a mask of the saltwater domain from NLCD -- class 11
nlcd = rasterio.open( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Vegetation/Input_Data/maritime/nlcd_2001_land_cover_maritime.tif' )
meta = nlcd.meta
meta.update( compress='lzw', nodata=None )
arr = nlcd.read_band( 1 ).data
arr[ arr != 11 ] = 0
arr[ arr == 11 ] = 1
output_filename = nlcd.name.replace( '.tif', '_saltwater_mask.tif' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write_band( 1, arr )
# make a 1km version in the domain of the large AKCanada Extent
output_resampled = out.name.replace( '.tif', '_akcan_1km.tif' )
if os.path.exists( output_resampled ):
[ os.remove( i ) for i in glob.glob( output_resampled[:-3] + '*' ) ]
shutil.copyfile( template, output_resampled )
command = 'gdalwarp -r mode -multi -srcnodata None ' + out.name + ' ' + output_resampled
os.system( command )
|
from flask import Flask, render_template, redirect, request, send_from_directory
from linode_api4 import LinodeLoginClient, OAuthScopes
from keys import LINODE_API_KEY, LINODE_CLIENT_ID
from setup_vpn import create_vpn
from constants import LINODE_REGIONS
app = Flask(__name__)
@app.route("/")
def hello():
return render_template("homepage.html")
@app.route("/setup")
def setup():
return render_template("setup.html")
@app.route("/FAQ")
def help():
return render_template("help.html")
@app.route("/donate")
def donate():
return render_template("donate.html")
"""
Error Handling
"""
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(502)
def time_out_502(e):
return render_template('timeout.html'), 502
@app.errorhandler(504)
def time_out_504(e):
return render_template('timeout.html'), 504
"""
Create VPN with Linode API
"""
@app.route("/linode-oauth-begin")
def linode_oauth():
login_client = LinodeLoginClient(LINODE_CLIENT_ID, LINODE_API_KEY)
redirect_to = login_client.generate_login_url(scopes=[OAuthScopes.Linodes.create, OAuthScopes.Linodes.modify, OAuthScopes.Linodes.view])
return redirect(redirect_to)
@app.route("/linode_oauth_redirect", methods=['GET'])
def linode_oauth_redirect():
login_client = LinodeLoginClient(LINODE_CLIENT_ID, LINODE_API_KEY)
code = request.args["code"]
token = login_client.finish_oauth(code)[0]
print(token)
regions = [key for key, value in LINODE_REGIONS.items()]
return render_template("confirm.html", provider="Linode", token=token, regions=regions)
"""
Create VPN with Digital Ocean API
"""
"""
"""
@app.route("/create-vpn", methods=['POST'])
def create_vpn_on_server():
# use linode api to create a vpn
token = request.form["token"]
region = request.form["region"]
ip, password = create_vpn("Linode", region, token)
print(ip)
print(password)
return render_template("success.html", token=token, ip=ip, password=password)
"""
Download VPN file
"""
@app.route('/download', methods=['POST'])
def download():
token = request.form["file"]
login_client = LinodeLoginClient(LINODE_CLIENT_ID, LINODE_API_KEY)
login_client.expire_token(token)
return send_from_directory(directory="configs", filename=token + ".ovpn", attachment_filename="yourkeyfile.ovpn", as_attachment=True)
"""
"""
if __name__ == "__main__":
app.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.