text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: QAM-Decoder
# Author: Ihar Yatsevich
# GNU Radio version: 3.7.13.5
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from gnuradio import analog
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from grc_gnuradio import blks2 as grc_blks2
from optparse import OptionParser
import osmosdr
import sip
import sys
import time
from gnuradio import qtgui
class qam_decoder(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "QAM-Decoder")
Qt.QWidget.__init__(self)
self.setWindowTitle("QAM-Decoder")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "qam_decoder")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.center_freq = center_freq = 145.75e6
self.lo_freq = lo_freq = 80e3
self.freq = freq = center_freq
self.samp_rate = samp_rate = 2.048e6
self.qpsk = qpsk = digital.constellation_rect(([0.707+0.707j, -0.707+0.707j, -0.707-0.707j, 0.707-0.707j]), ([0, 1, 2, 3]), 4, 2, 2, 1, 1).base()
self.data_freq = data_freq = freq+lo_freq
##################################################
# Blocks
##################################################
self.rtlsdr_source_0 = osmosdr.source( args="numchan=" + str(1) + " " + '' )
self.rtlsdr_source_0.set_sample_rate(samp_rate)
self.rtlsdr_source_0.set_center_freq(center_freq, 0)
self.rtlsdr_source_0.set_freq_corr(0, 0)
self.rtlsdr_source_0.set_dc_offset_mode(0, 0)
self.rtlsdr_source_0.set_iq_balance_mode(0, 0)
self.rtlsdr_source_0.set_gain_mode(False, 0)
self.rtlsdr_source_0.set_gain(14.4, 0)
self.rtlsdr_source_0.set_if_gain(0, 0)
self.rtlsdr_source_0.set_bb_gain(0, 0)
self.rtlsdr_source_0.set_antenna('', 0)
self.rtlsdr_source_0.set_bandwidth(0, 0)
self.rational_resampler_xxx_0_0_0 = filter.rational_resampler_ccc(
interpolation=1,
decimation=8,
taps=None,
fractional_bw=None,
)
self.rational_resampler_xxx_0_0 = filter.rational_resampler_ccc(
interpolation=1,
decimation=10,
taps=None,
fractional_bw=None,
)
self.qtgui_sink_x_0 = qtgui.sink_c(
1024, #fftsize
firdes.WIN_BLACKMAN_hARRIS, #wintype
center_freq, #fc
samp_rate/8, #bw
"", #name
True, #plotfreq
True, #plotwaterfall
True, #plottime
True, #plotconst
)
self.qtgui_sink_x_0.set_update_time(1.0/10)
self._qtgui_sink_x_0_win = sip.wrapinstance(self.qtgui_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_sink_x_0_win)
self.qtgui_sink_x_0.enable_rf_freq(True)
self.low_pass_filter_0 = filter.fir_filter_ccf(1, firdes.low_pass(
1, samp_rate/8, 12000, 1000, firdes.WIN_HAMMING, 6.76))
self._freq_tool_bar = Qt.QToolBar(self)
if None:
self._freq_formatter = None
else:
self._freq_formatter = lambda x: eng_notation.num_to_str(x)
self._freq_tool_bar.addWidget(Qt.QLabel('Center frequency'+": "))
self._freq_label = Qt.QLabel(str(self._freq_formatter(self.freq)))
self._freq_tool_bar.addWidget(self._freq_label)
self.top_grid_layout.addWidget(self._freq_tool_bar)
self.digital_qam_demod_0 = digital.qam.qam_demod(
constellation_points=4,
differential=True,
samples_per_symbol=4,
excess_bw=0.35,
freq_bw=6.28/100.0,
timing_bw=6.28/100.0,
phase_bw=6.28/100.0,
mod_code="gray",
verbose=False,
log=False,
)
self._data_freq_tool_bar = Qt.QToolBar(self)
if None:
self._data_freq_formatter = None
else:
self._data_freq_formatter = lambda x: eng_notation.num_to_str(x)
self._data_freq_tool_bar.addWidget(Qt.QLabel('Data center frequency'+": "))
self._data_freq_label = Qt.QLabel(str(self._data_freq_formatter(self.data_freq)))
self._data_freq_tool_bar.addWidget(self._data_freq_label)
self.top_grid_layout.addWidget(self._data_freq_tool_bar)
self.blocks_multiply_xx_1 = blocks.multiply_vcc(1)
self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_char*1, 'D:\\Output-data.txt', False)
self.blocks_file_sink_0.set_unbuffered(False)
self.blks2_packet_decoder_0 = grc_blks2.packet_demod_b(grc_blks2.packet_decoder(
access_code='',
threshold=-1,
callback=lambda ok, payload: self.blks2_packet_decoder_0.recv_pkt(ok, payload),
),
)
self.analog_sig_source_x_1 = analog.sig_source_c(samp_rate/8, analog.GR_COS_WAVE, -lo_freq, 1, 0)
##################################################
# Connections
##################################################
self.connect((self.analog_sig_source_x_1, 0), (self.blocks_multiply_xx_1, 1))
self.connect((self.blks2_packet_decoder_0, 0), (self.blocks_file_sink_0, 0))
self.connect((self.blocks_multiply_xx_1, 0), (self.low_pass_filter_0, 0))
self.connect((self.digital_qam_demod_0, 0), (self.blks2_packet_decoder_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.rational_resampler_xxx_0_0, 0))
self.connect((self.rational_resampler_xxx_0_0, 0), (self.digital_qam_demod_0, 0))
self.connect((self.rational_resampler_xxx_0_0_0, 0), (self.blocks_multiply_xx_1, 0))
self.connect((self.rational_resampler_xxx_0_0_0, 0), (self.qtgui_sink_x_0, 0))
self.connect((self.rtlsdr_source_0, 0), (self.rational_resampler_xxx_0_0_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "qam_decoder")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_center_freq(self):
return self.center_freq
def set_center_freq(self, center_freq):
self.center_freq = center_freq
self.rtlsdr_source_0.set_center_freq(self.center_freq, 0)
self.qtgui_sink_x_0.set_frequency_range(self.center_freq, self.samp_rate/8)
self.set_freq(self._freq_formatter(self.center_freq))
def get_lo_freq(self):
return self.lo_freq
def set_lo_freq(self, lo_freq):
self.lo_freq = lo_freq
self.set_data_freq(self._data_freq_formatter(self.freq+self.lo_freq))
self.analog_sig_source_x_1.set_frequency(-self.lo_freq)
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
Qt.QMetaObject.invokeMethod(self._freq_label, "setText", Qt.Q_ARG("QString", self.freq))
self.set_data_freq(self._data_freq_formatter(self.freq+self.lo_freq))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.rtlsdr_source_0.set_sample_rate(self.samp_rate)
self.qtgui_sink_x_0.set_frequency_range(self.center_freq, self.samp_rate/8)
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate/8, 12000, 1000, firdes.WIN_HAMMING, 6.76))
self.analog_sig_source_x_1.set_sampling_freq(self.samp_rate/8)
def get_qpsk(self):
return self.qpsk
def set_qpsk(self, qpsk):
self.qpsk = qpsk
def get_data_freq(self):
return self.data_freq
def set_data_freq(self, data_freq):
self.data_freq = data_freq
Qt.QMetaObject.invokeMethod(self._data_freq_label, "setText", Qt.Q_ARG("QString", self.data_freq))
def main(top_block_cls=qam_decoder, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
|
# Ask the user for a number. Depending on whether the number is even or odd,
# print out an appropriate message to the
# user. Hint: how does an even / odd number react differently when divided by 2?
import math
class info():
def __init__(self):
pass
def question(self):
self.question = input("choose a number?")
if (float(self.question)) % 2 == 0:
print("This is even")
else:
print("This is odd")
return self.question
a = info()
a.question() |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import idlecars.model_helpers
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='DriverSurvey',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('source', models.CharField(max_length=32, verbose_name='How did you hear about idlecars?')),
('other_source', models.CharField(max_length=255, verbose_name='', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OwnerSurvey',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('source', models.CharField(max_length=32, verbose_name='How did you hear about idlecars?')),
('other_source', models.CharField(max_length=255, verbose_name='', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(unique=True, max_length=254, verbose_name='Email Address')),
('zipcode', models.CharField(max_length=5, verbose_name='Zip Code', validators=[django.core.validators.RegexValidator('^[0-9]+$', 'Only numbers are allowed in a zip code.', 'Invalid zip code'), django.core.validators.MinLengthValidator(5), django.core.validators.MaxLengthValidator(5)])),
('role', idlecars.model_helpers.ChoiceField(default='Driver', max_length=16, choices=[(b'driver', 'Driver'), (b'owner', 'Owner')])),
('created_time', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='ownersurvey',
name='contact',
field=models.ForeignKey(related_name='owner_survey', to='website.Contact', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='driversurvey',
name='contact',
field=models.ForeignKey(related_name='driver_survey', to='website.Contact', null=True),
preserve_default=True,
),
]
|
#! /usr/bin/env python3
# Ioannis Broumas
# ioabro17@student.hh.se
# Dec 2020
# Remember OpenCV is BGR
# Read the camera on the roof and detect the location of ArUco markers on the map
# Can be adapted to detect something else on the board
import numpy as np
import cv2 as cv
from cv2 import aruco
#ROS 2
import rclpy
from rclpy.node import Node
from std_msgs.msg import String
from geometry_msgs.msg import Point
Sparkie_ID = 15 # Change accordingly
Drone_ID = 2 # Change accordingly
# Constant parameters used in Aruco methods
ARUCO_PARAMETERS = aruco.DetectorParameters_create()
ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_6X6_250)
class Roof(Node):
def __init__(self):
super().__init__('roof')
self.sparkie_publisher_ = self.create_publisher(Point, 'sparkieGPS', 10)
self.drone_publisher_ = self.create_publisher(Point, 'droneGPS', 10)
self.get_logger().info("Node Roof initialized!")
def main(args=None):
rclpy.init(args=args)
r = Roof()
point = Point()
# IP camera on the ceiling
cam = cv.VideoCapture('rtsp://192.168.1.2:554/axis-media/media.amp')
font = cv.FONT_HERSHEY_SIMPLEX
if not cam.isOpened():
print('--(!)Error opening video capture')
cam.release()
cv.destroyAllWindows()
r.destroy_node()
rclpy.shutdown()
exit()
HorizontalPixels = cam.get(3)
# print(cam.get(3)) # Width - Horizontal
VerticalPixels = cam.get(4)
# print(cam.get(4)) # Height - Vertical
# Resolution = HorizontalPixels * VerticalPixels
VerticalDistance = 2225 # 2425
HorizontalDistance = 3035 # 3635
# Calculate mm to pixels
y_mm_to_pixel = VerticalDistance / VerticalPixels
x_mm_to_pixel = HorizontalDistance / HorizontalPixels
while True:
# Capturing each frame of our video stream
ret, frame = cam.read()
# If found, add object points, image points (after refining them)
if ret == True:
# grayscale image
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Detect Aruco markers
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)
if ids is not None:
# Print corners and ids to the console
for i, corner in zip(ids, corners):
if i == Sparkie_ID:
# Find X, Y of the center pixel
cx = (corner[0][0][0] + corner[0][1][0] + corner[0][2][0] + corner[0][3][0]) / 4
cy = (corner[0][0][1] + corner[0][1][1] + corner[0][2][1] + corner[0][3][1]) / 4
# Convert pixels to mm
X = x_mm_to_pixel * cx
Y = y_mm_to_pixel * cy
# Add offset and convert to meters
X = round((X + 300) / 1000,3)
Y = round((VerticalDistance - Y + 200) / 1000,3)
# Prepare and publish the point
point.x = X
point.y = Y
point.z = .0
r.sparkie_publisher_.publish(point)
CX = 'X :' + str(float(X))
CY = 'Y :' + str(float(Y))
cv.putText(frame, CX, (0,45), font, 1, (255,0,0), 2, cv.LINE_AA)
cv.putText(frame, CY, (0,90), font, 1, (255,0,0), 2, cv.LINE_AA)
print(X, Y)
if i == Drone_ID:
# Find X, Y of the center pixel
cx = (corner[0][0][0] + corner[0][1][0] + corner[0][2][0] + corner[0][3][0]) / 4
cy = (corner[0][0][1] + corner[0][1][1] + corner[0][2][1] + corner[0][3][1]) / 4
# Convert pixels to mm
X = x_mm_to_pixel * cx
Y = y_mm_to_pixel * cy
# Prepare and publish the point
point.x = X
point.y = VerticalDistance - Y
point.z = .0
r.drone_publisher_.publish(point)
# Outline the detected marker in our image
# frame = aruco.drawDetectedMarkers(frame, corner, borderColor=(0, 0, 255))
print(X, Y)
frame = aruco.drawDetectedMarkers(frame, corners, borderColor=(0, 0, 255))
# Show the frame
cv.imshow('A-GPS', frame)
if cv.waitKey(10) == 27: # Wait for 10ms, if key == 27 (esc char) break
break
cam.release()
cv.destroyAllWindows()
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
r.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
import uuid
from django.db import models, transaction
from django.contrib.auth import get_user_model
from timebank.models import Account
class Order(models.Model):
class Meta:
verbose_name = 'Pedido'
verbose_name_plural = 'Pedidos'
STATUS_PENDING = 0
STATUS_CONFIRMED = 1
STATUS_CHOICES = (
(STATUS_PENDING, 'Pendente'),
(STATUS_CONFIRMED, 'Efetuado'),
)
uid = models.UUIDField(
unique=True,
editable=False,
default=uuid.uuid4,
verbose_name='Identificador Público'
)
requester = models.ForeignKey(
get_user_model(),
verbose_name='Solicitante',
on_delete=models.PROTECT,
related_name='requester',
)
grantor = models.ForeignKey(
get_user_model(),
verbose_name='Concedente',
on_delete=models.PROTECT,
related_name='grantor',
)
description = models.CharField(
max_length=240,
verbose_name='Descrição',
)
order_price = models.DecimalField(
verbose_name='Valor da troca',
decimal_places=1,
max_digits=5,
help_text='Valor da troca',
)
status = models.IntegerField(
choices=STATUS_CHOICES,
verbose_name='Status do pedido',
default=0,
)
created = models.DateTimeField(
auto_now_add=True
)
def __str__(self):
price = self.order_price
description = self.description
requester = str(self.requester)
grantor = str(self.grantor)
exhibition = f'{requester} solicitou {description} de {grantor} por {price} '
return exhibition
@classmethod
def confirm_order(cls, order_uid):
with transaction.atomic():
order = cls.objects.select_for_update().get(uid=order_uid)
order_price = order.order_price
order.status = cls.STATUS_CONFIRMED
order.save()
requester = order.requester
grantor = order.grantor
Account.withdraw(requester, order_price)
Account.deposit(grantor, order_price)
|
# all the imports
import os
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
from oauth2client import client, GOOGLE_TOKEN_URI, GOOGLE_REVOKE_URI
from apiclient.discovery import build
import httplib2
import datetime
import json
app = Flask(__name__) # create the application instance
app.config.from_object(__name__) # load config from this file , flask_page.py
with open('client_secrets.json') as secret_file:
secrets = json.load(secret_file)
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'flask_page.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default',
CLIENT_ID=secrets['web']['client_id'], # Load client id from file
CLIENT_SECRET=secrets['web']['client_secret'] # Load client secret from file
)),
app.config.from_envvar('FLASK_PAGE_SETTINGS', silent=True)
@app.route('/')
def show_main_page():
db = get_db()
cur = db.execute('select title, link, text, id from sites order by id desc')
sites = cur.fetchall()
cur = db.execute('select title, text, id from notes order by id desc')
notes = cur.fetchall()
events = get_calendar()
return render_template('show_main_page.html', sites=sites, notes=notes, events=events)
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config["DATABASE"])
rv.row_factory = sqlite3.Row
return rv
def init_db():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
"""Initializes the database."""
init_db()
print('Initialized the database.')
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
return redirect(url_for('show_main_page'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
return redirect(url_for('show_main_page'))
@app.route('/add_website', methods=['GET', 'POST'])
def add_website():
error=None
if request.method == 'POST':
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('insert into sites (title, link, text) values (?, ?, ?)',
[request.form['title'], request.form['link'], request.form['text']])
db.commit()
return redirect(url_for('show_main_page'))
return render_template('add_website.html')
@app.route('/delete_site/<int:site_id>', methods=['POST'])
def delete_site(site_id):
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('delete from sites where id=' + str(site_id))
db.commit()
return redirect(url_for('show_main_page'))
@app.route('/edit_site/<int:edit_site_id>', methods=['GET', 'POST'])
def edit_site(edit_site_id):
error=None
if request.method == 'POST':
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('update sites set title=?,link=?,text=? where id=?',
[request.form['title'], request.form['link'], request.form['text'], str(edit_site_id)])
db.commit()
return redirect(url_for('show_main_page'))
else:
if not session.get('logged_in'):
abort(401)
db = get_db()
cur = db.execute('select title, link, text from sites where id=' + str(edit_site_id))
site = cur.fetchone()
return render_template('edit_site.html', site=site, site_id=edit_site_id)
@app.route('/add_note', methods=['GET', 'POST'])
def add_note():
error=None
if request.method == 'POST':
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('insert into notes (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
db.commit()
return redirect(url_for('show_main_page'))
return render_template('add_note.html')
@app.route('/delete_note/<int:del_note_id>', methods=['POST'])
def delete_note(del_note_id):
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('delete from notes where id=' + str(del_note_id))
db.commit()
return redirect(url_for('show_main_page'))
@app.route('/edit_note/<int:edit_note_id>', methods=['GET', 'POST'])
def edit_note(edit_note_id):
error=None
if request.method == 'POST':
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('update notes set title=?,text=? where id=?',
[request.form['title'], request.form['text'], str(edit_note_id)])
db.commit()
return redirect(url_for('show_main_page'))
else:
if not session.get('logged_in'):
abort(401)
db = get_db()
cur = db.execute('select title, text from notes where id=' + str(edit_note_id))
note = cur.fetchone()
return render_template('edit_note.html', note=note, note_id=edit_note_id)
# See https://developers.google.com/google-apps/calendar/quickstart/python
# https://developers.google.com/api-client-library/python/auth/web-app
# https://stackoverflow.com/questions/22915461/google-login-server-side-flow-storing-credentials-python-examples
@app.route('/oauth2callback')
def oauth2callback():
flow = client.flow_from_clientsecrets(
'client_secrets.json',
scope='https://www.googleapis.com/auth/calendar.readonly',
redirect_uri='http://127.0.0.1:5000/oauth2callback',
prompt='consent')
flow.params['access_type'] = 'offline' # offline access
flow.params['include_granted_scopes'] = 'true' # incremental auth
if (request.args.get('error')):
flash('Google login failed')
elif 'code' not in request.args:
auth_uri = flow.step1_get_authorize_url()
return redirect(auth_uri)
else:
auth_code = request.args.get('code')
credentials = flow.step2_exchange(auth_code)
session['access_token'] = credentials.access_token
session['refresh_token'] = credentials.refresh_token
expires_in = credentials.get_access_token().expires_in
print('access_token:', credentials.get_access_token())
session['expire_time'] = datetime.datetime.now() + datetime.timedelta(seconds=expires_in)
flash('Google login succeeded')
return redirect(url_for('show_main_page'))
def get_calendar():
if not 'access_token' in session:
print('no access token')
return []
if session['expire_time'] < datetime.datetime.now():
(session['access_token'], expires_in) = refresh_access_token()
session['expire_time'] = datetime.datetime.now() + datetime.timedelta(seconds=expires_in)
credentials = client.AccessTokenCredentials(session['access_token'], 'user-agent-value')
http_auth = credentials.authorize(httplib2.Http())
service = build('calendar', 'v3', http=http_auth)
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
time_max = (datetime.datetime.now() + datetime.timedelta(days=7)).isoformat() + 'Z'
eventsResult = service.events().list(
calendarId='primary', timeMin=now, timeMax=time_max, singleEvents=True,
orderBy='startTime').execute()
events = eventsResult.get('items', [])
appointments = []
for event in events:
if (event['start'].get('dateTime')):
appointment = {}
rcf_start = event['start'].get('dateTime')
start = datetime.datetime.strptime(rcf_start, '%Y-%m-%dT%H:%M:%S-07:00')
appointment['start_day'] = start.strftime("%a")
appointment['start_date'] = start.strftime("%Y-%m-%d")
appointment['start_time'] = start.strftime("%-I:%M")
rcf_end = event['end'].get('dateTime')
end = datetime.datetime.strptime(rcf_end, '%Y-%m-%dT%H:%M:%S-07:00')
appointment['end_time'] = end.strftime("%-I:%M")
appointment['summary'] = event['summary']
appointments.append(appointment)
return appointments;
# https://stackoverflow.com/questions/27771324/google-api-getting-credentials-from-refresh-token-with-oauth2client-client
def refresh_access_token():
print('hi')
credentials = client.OAuth2Credentials(
None, app.config['CLIENT_ID'], app.config['CLIENT_SECRET'], session['refresh_token'], None, GOOGLE_TOKEN_URI,
None, revoke_uri=GOOGLE_REVOKE_URI)
# refresh the access token (or just try using the service)
credentials.refresh(httplib2.Http())
print(credentials.to_json())
return (credentials.access_token, credentials.get_access_token().expires_in) |
import jsonschema
import numpy as np
import pytest
import oscope.schema as schema
VALID_METADATA = {
"sender": {
"id": "foo",
"name": "bar",
"session": "baz",
"time": 0
},
"trace": {
"samples": 100,
"frequency": 3000000,
"channel": 1
},
"sequence": 0
}
VALID_DATA = np.arange(VALID_METADATA["trace"]["samples"], dtype=np.float64)
def test_validator_fails():
with pytest.raises(jsonschema.ValidationError):
schema.validate_message_metadata({})
def test_validator_success():
schema.validate_message_metadata(VALID_METADATA)
def test_get_sender_meta():
meta = schema.get_sender_meta("foo", "bar")
jsonschema.validate(meta, schema.SENDER_SCHEMA)
|
from django.contrib import admin
from .models import Question,Quiz,QuizTaker,UserTracker,AccountData
# Register your models here.
admin.site.register(Question)
admin.site.register(Quiz)
admin.site.register(QuizTaker)
admin.site.register(UserTracker)
admin.site.register(AccountData)
|
import pytest
from hw9.hw9_t02 import Suppressor, suppressor
test_subjects = [Suppressor, suppressor]
@pytest.fixture(params=test_subjects, ids=[subj.__name__ for subj in test_subjects])
def function(request):
return request.param
def test_context_manager_positive_suppress_index_error(function):
with function(IndexError):
[][1]
assert True
def test_context_manager_positive_suppress_key_error(function):
with function(KeyError):
{"a": 1}["b"]
assert True
def test_context_manager_suppress_wrong_error_raise_value_error(function):
with pytest.raises(ValueError): # noqa: PT011,PT012
with function(KeyError):
int("string")
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"networkcloud virtualmachine list",
)
class List(AAZCommand):
"""List virtual machines in the provided resource group or subscription.
:example: List virtual machines for resource group
az networkcloud virtualmachine list --resource-group "resourceGroupName"
:example: List virtual machines for subscription
az networkcloud virtualmachine list
"""
_aaz_info = {
"version": "2023-07-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.networkcloud/virtualmachines", "2023-07-01"],
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.networkcloud/virtualmachines", "2023-07-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg()
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
condition_0 = has_value(self.ctx.args.resource_group) and has_value(self.ctx.subscription_id)
condition_1 = has_value(self.ctx.subscription_id) and has_value(self.ctx.args.resource_group) is not True
if condition_0:
self.VirtualMachinesListByResourceGroup(ctx=self.ctx)()
if condition_1:
self.VirtualMachinesListBySubscription(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class VirtualMachinesListByResourceGroup(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/virtualMachines",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-07-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.extended_location = AAZObjectType(
serialized_name="extendedLocation",
flags={"required": True},
)
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.location = AAZStrType(
flags={"required": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_element.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
extended_location = cls._schema_on_200.value.Element.extended_location
extended_location.name = AAZStrType(
flags={"required": True},
)
extended_location.type = AAZStrType(
flags={"required": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.admin_username = AAZStrType(
serialized_name="adminUsername",
flags={"required": True},
)
properties.availability_zone = AAZStrType(
serialized_name="availabilityZone",
flags={"read_only": True},
)
properties.bare_metal_machine_id = AAZStrType(
serialized_name="bareMetalMachineId",
flags={"read_only": True},
)
properties.boot_method = AAZStrType(
serialized_name="bootMethod",
)
properties.cloud_services_network_attachment = AAZObjectType(
serialized_name="cloudServicesNetworkAttachment",
flags={"required": True},
)
properties.cluster_id = AAZStrType(
serialized_name="clusterId",
flags={"read_only": True},
)
properties.cpu_cores = AAZIntType(
serialized_name="cpuCores",
flags={"required": True},
)
properties.detailed_status = AAZStrType(
serialized_name="detailedStatus",
flags={"read_only": True},
)
properties.detailed_status_message = AAZStrType(
serialized_name="detailedStatusMessage",
flags={"read_only": True},
)
properties.isolate_emulator_thread = AAZStrType(
serialized_name="isolateEmulatorThread",
)
properties.memory_size_gb = AAZIntType(
serialized_name="memorySizeGB",
flags={"required": True},
)
properties.network_attachments = AAZListType(
serialized_name="networkAttachments",
)
properties.network_data = AAZStrType(
serialized_name="networkData",
)
properties.placement_hints = AAZListType(
serialized_name="placementHints",
)
properties.power_state = AAZStrType(
serialized_name="powerState",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.ssh_public_keys = AAZListType(
serialized_name="sshPublicKeys",
)
properties.storage_profile = AAZObjectType(
serialized_name="storageProfile",
flags={"required": True},
)
properties.user_data = AAZStrType(
serialized_name="userData",
)
properties.virtio_interface = AAZStrType(
serialized_name="virtioInterface",
)
properties.vm_device_model = AAZStrType(
serialized_name="vmDeviceModel",
)
properties.vm_image = AAZStrType(
serialized_name="vmImage",
flags={"required": True},
)
properties.vm_image_repository_credentials = AAZObjectType(
serialized_name="vmImageRepositoryCredentials",
)
properties.volumes = AAZListType(
flags={"read_only": True},
)
cloud_services_network_attachment = cls._schema_on_200.value.Element.properties.cloud_services_network_attachment
cloud_services_network_attachment.attached_network_id = AAZStrType(
serialized_name="attachedNetworkId",
flags={"required": True},
)
cloud_services_network_attachment.default_gateway = AAZStrType(
serialized_name="defaultGateway",
)
cloud_services_network_attachment.ip_allocation_method = AAZStrType(
serialized_name="ipAllocationMethod",
flags={"required": True},
)
cloud_services_network_attachment.ipv4_address = AAZStrType(
serialized_name="ipv4Address",
)
cloud_services_network_attachment.ipv6_address = AAZStrType(
serialized_name="ipv6Address",
)
cloud_services_network_attachment.mac_address = AAZStrType(
serialized_name="macAddress",
flags={"read_only": True},
)
cloud_services_network_attachment.network_attachment_name = AAZStrType(
serialized_name="networkAttachmentName",
)
network_attachments = cls._schema_on_200.value.Element.properties.network_attachments
network_attachments.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.network_attachments.Element
_element.attached_network_id = AAZStrType(
serialized_name="attachedNetworkId",
flags={"required": True},
)
_element.default_gateway = AAZStrType(
serialized_name="defaultGateway",
)
_element.ip_allocation_method = AAZStrType(
serialized_name="ipAllocationMethod",
flags={"required": True},
)
_element.ipv4_address = AAZStrType(
serialized_name="ipv4Address",
)
_element.ipv6_address = AAZStrType(
serialized_name="ipv6Address",
)
_element.mac_address = AAZStrType(
serialized_name="macAddress",
flags={"read_only": True},
)
_element.network_attachment_name = AAZStrType(
serialized_name="networkAttachmentName",
)
placement_hints = cls._schema_on_200.value.Element.properties.placement_hints
placement_hints.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.placement_hints.Element
_element.hint_type = AAZStrType(
serialized_name="hintType",
flags={"required": True},
)
_element.resource_id = AAZStrType(
serialized_name="resourceId",
flags={"required": True},
)
_element.scheduling_execution = AAZStrType(
serialized_name="schedulingExecution",
flags={"required": True},
)
_element.scope = AAZStrType(
flags={"required": True},
)
ssh_public_keys = cls._schema_on_200.value.Element.properties.ssh_public_keys
ssh_public_keys.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.ssh_public_keys.Element
_element.key_data = AAZStrType(
serialized_name="keyData",
flags={"required": True},
)
storage_profile = cls._schema_on_200.value.Element.properties.storage_profile
storage_profile.os_disk = AAZObjectType(
serialized_name="osDisk",
flags={"required": True},
)
storage_profile.volume_attachments = AAZListType(
serialized_name="volumeAttachments",
)
os_disk = cls._schema_on_200.value.Element.properties.storage_profile.os_disk
os_disk.create_option = AAZStrType(
serialized_name="createOption",
)
os_disk.delete_option = AAZStrType(
serialized_name="deleteOption",
)
os_disk.disk_size_gb = AAZIntType(
serialized_name="diskSizeGB",
flags={"required": True},
)
volume_attachments = cls._schema_on_200.value.Element.properties.storage_profile.volume_attachments
volume_attachments.Element = AAZStrType()
vm_image_repository_credentials = cls._schema_on_200.value.Element.properties.vm_image_repository_credentials
vm_image_repository_credentials.password = AAZStrType(
flags={"required": True, "secret": True},
)
vm_image_repository_credentials.registry_url = AAZStrType(
serialized_name="registryUrl",
flags={"required": True},
)
vm_image_repository_credentials.username = AAZStrType(
flags={"required": True},
)
volumes = cls._schema_on_200.value.Element.properties.volumes
volumes.Element = AAZStrType()
system_data = cls._schema_on_200.value.Element.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200.value.Element.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class VirtualMachinesListBySubscription(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.NetworkCloud/virtualMachines",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-07-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.extended_location = AAZObjectType(
serialized_name="extendedLocation",
flags={"required": True},
)
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.location = AAZStrType(
flags={"required": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_element.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
extended_location = cls._schema_on_200.value.Element.extended_location
extended_location.name = AAZStrType(
flags={"required": True},
)
extended_location.type = AAZStrType(
flags={"required": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.admin_username = AAZStrType(
serialized_name="adminUsername",
flags={"required": True},
)
properties.availability_zone = AAZStrType(
serialized_name="availabilityZone",
flags={"read_only": True},
)
properties.bare_metal_machine_id = AAZStrType(
serialized_name="bareMetalMachineId",
flags={"read_only": True},
)
properties.boot_method = AAZStrType(
serialized_name="bootMethod",
)
properties.cloud_services_network_attachment = AAZObjectType(
serialized_name="cloudServicesNetworkAttachment",
flags={"required": True},
)
properties.cluster_id = AAZStrType(
serialized_name="clusterId",
flags={"read_only": True},
)
properties.cpu_cores = AAZIntType(
serialized_name="cpuCores",
flags={"required": True},
)
properties.detailed_status = AAZStrType(
serialized_name="detailedStatus",
flags={"read_only": True},
)
properties.detailed_status_message = AAZStrType(
serialized_name="detailedStatusMessage",
flags={"read_only": True},
)
properties.isolate_emulator_thread = AAZStrType(
serialized_name="isolateEmulatorThread",
)
properties.memory_size_gb = AAZIntType(
serialized_name="memorySizeGB",
flags={"required": True},
)
properties.network_attachments = AAZListType(
serialized_name="networkAttachments",
)
properties.network_data = AAZStrType(
serialized_name="networkData",
)
properties.placement_hints = AAZListType(
serialized_name="placementHints",
)
properties.power_state = AAZStrType(
serialized_name="powerState",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.ssh_public_keys = AAZListType(
serialized_name="sshPublicKeys",
)
properties.storage_profile = AAZObjectType(
serialized_name="storageProfile",
flags={"required": True},
)
properties.user_data = AAZStrType(
serialized_name="userData",
)
properties.virtio_interface = AAZStrType(
serialized_name="virtioInterface",
)
properties.vm_device_model = AAZStrType(
serialized_name="vmDeviceModel",
)
properties.vm_image = AAZStrType(
serialized_name="vmImage",
flags={"required": True},
)
properties.vm_image_repository_credentials = AAZObjectType(
serialized_name="vmImageRepositoryCredentials",
)
properties.volumes = AAZListType(
flags={"read_only": True},
)
cloud_services_network_attachment = cls._schema_on_200.value.Element.properties.cloud_services_network_attachment
cloud_services_network_attachment.attached_network_id = AAZStrType(
serialized_name="attachedNetworkId",
flags={"required": True},
)
cloud_services_network_attachment.default_gateway = AAZStrType(
serialized_name="defaultGateway",
)
cloud_services_network_attachment.ip_allocation_method = AAZStrType(
serialized_name="ipAllocationMethod",
flags={"required": True},
)
cloud_services_network_attachment.ipv4_address = AAZStrType(
serialized_name="ipv4Address",
)
cloud_services_network_attachment.ipv6_address = AAZStrType(
serialized_name="ipv6Address",
)
cloud_services_network_attachment.mac_address = AAZStrType(
serialized_name="macAddress",
flags={"read_only": True},
)
cloud_services_network_attachment.network_attachment_name = AAZStrType(
serialized_name="networkAttachmentName",
)
network_attachments = cls._schema_on_200.value.Element.properties.network_attachments
network_attachments.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.network_attachments.Element
_element.attached_network_id = AAZStrType(
serialized_name="attachedNetworkId",
flags={"required": True},
)
_element.default_gateway = AAZStrType(
serialized_name="defaultGateway",
)
_element.ip_allocation_method = AAZStrType(
serialized_name="ipAllocationMethod",
flags={"required": True},
)
_element.ipv4_address = AAZStrType(
serialized_name="ipv4Address",
)
_element.ipv6_address = AAZStrType(
serialized_name="ipv6Address",
)
_element.mac_address = AAZStrType(
serialized_name="macAddress",
flags={"read_only": True},
)
_element.network_attachment_name = AAZStrType(
serialized_name="networkAttachmentName",
)
placement_hints = cls._schema_on_200.value.Element.properties.placement_hints
placement_hints.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.placement_hints.Element
_element.hint_type = AAZStrType(
serialized_name="hintType",
flags={"required": True},
)
_element.resource_id = AAZStrType(
serialized_name="resourceId",
flags={"required": True},
)
_element.scheduling_execution = AAZStrType(
serialized_name="schedulingExecution",
flags={"required": True},
)
_element.scope = AAZStrType(
flags={"required": True},
)
ssh_public_keys = cls._schema_on_200.value.Element.properties.ssh_public_keys
ssh_public_keys.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.ssh_public_keys.Element
_element.key_data = AAZStrType(
serialized_name="keyData",
flags={"required": True},
)
storage_profile = cls._schema_on_200.value.Element.properties.storage_profile
storage_profile.os_disk = AAZObjectType(
serialized_name="osDisk",
flags={"required": True},
)
storage_profile.volume_attachments = AAZListType(
serialized_name="volumeAttachments",
)
os_disk = cls._schema_on_200.value.Element.properties.storage_profile.os_disk
os_disk.create_option = AAZStrType(
serialized_name="createOption",
)
os_disk.delete_option = AAZStrType(
serialized_name="deleteOption",
)
os_disk.disk_size_gb = AAZIntType(
serialized_name="diskSizeGB",
flags={"required": True},
)
volume_attachments = cls._schema_on_200.value.Element.properties.storage_profile.volume_attachments
volume_attachments.Element = AAZStrType()
vm_image_repository_credentials = cls._schema_on_200.value.Element.properties.vm_image_repository_credentials
vm_image_repository_credentials.password = AAZStrType(
flags={"required": True, "secret": True},
)
vm_image_repository_credentials.registry_url = AAZStrType(
serialized_name="registryUrl",
flags={"required": True},
)
vm_image_repository_credentials.username = AAZStrType(
flags={"required": True},
)
volumes = cls._schema_on_200.value.Element.properties.volumes
volumes.Element = AAZStrType()
system_data = cls._schema_on_200.value.Element.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200.value.Element.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
__all__ = ["List"]
|
# -*- coding:utf-8 -*-
# author: dzhhey
help_ = ["usage: scp [-346BCpqrTv] [-c cipher] [-F ssh_config] [-i identity_file]\r\n",
"[-J destination] [-l limit] [-o ssh_option] [-P port]\r\n",
" [-S program] source ... target\r\n"]
def parse(args_=None):
try:
if len(args_) == 1:
if args_[0] == "-v" or args_[0] == "-V":
with open("buffer", "w") as f:
f.writelines(help_)
elif len(args_) > 1:
if len(args_) == 2:
usr_ip = args_[0].split(":")[0]
statement = usr_ip + "'s password:"
with open("buffer", "w") as f:
f.write(statement)
with open("control", "w") as f:
f.write(usr_ip)
else:
for i in args_:
if ":" in i:
usr_ip = i.split(":")[0]
statement = usr_ip + "'s password:"
with open("buffer", "w") as f:
f.write(statement)
with open("control", "w") as f:
f.write(usr_ip)
else:
with open("buffer", "w") as f:
f.writelines(help_)
except Exception:
with open("buffer", "w") as f:
f.writelines(help_)
|
from django.urls import path, re_path
from . import views
app_name = 'storage'
urlpatterns = [
path('user_add_api', views.user_add_api, name='user_add_api'), # 添加用户
path('center', views.user_center, name='user_center'),
re_path(r'^$', views.index, name='index'),
] |
"""Functions for signal detection theory
The functions in this module help calculate dprime and ROC curves
"""
from __future__ import division
from scipy.stats import norm
from math import exp,sqrt
Z = norm.ppf
import pandas as pd
def calc_sdt(data, coding_dict=None, measures=None):
"""Calculate signal detection stats (e.g., dprime, criterion, beta) from a pandas dataframe
Parameters
----------
data : Pandas dataframe
longform dataframe including cols for subject, objective status of each trial (e.g., signal/old, noise/new),
response for each trial (e.g., signal/old, noise/new)
coding_dict : dict
dictionary with information about objective column (objective_col; string) and
response column (response_col; string), subject ID column (subj_col; string),
objective "signal" (signal; list of strings) and "noise" (noise; list of strings) labels,
and subjective "signal" response labels (signal_resp; list of strings).
Example coding_dict (for a memory experiment):
coding_dict = dict(objective_col='TrialType', # column name for new
signal=['old'], # objectively old label
noise=['similar', 'new'], # objectively new label
response_col='Resp_bin', #
signal_resp=['Old'],
subj_col='Subject',
)
measures : list of strings
list of SDT measures to include in output; options are 'd' (dprime), 'beta', 'c', and 'Ad'
Returns
-------
df : Pandas dataframe
A longform dataframe with a column for subject ID, measure, and value.
"""
# get relevant info
subj_col = coding_dict['subj_col']
obj_col = coding_dict['objective_col']
resp_col = coding_dict['response_col']
signal = coding_dict['old']
noise = coding_dict['new']
signal_resp = coding_dict['old_resp']
# init new dataframe
df = pd.DataFrame(columns=[subj_col, 'measure', 'value'])
# calculate dprime for each subj
for subj in data[subj_col].unique():
data_s = data[data[subj_col] == subj]
count_signal = data_s[data_s[obj_col].isin(signal)].Trial.count()
count_noise = data_s[data_s[obj_col].isin(noise)].Trial.count()
count_hit = data_s[data_s[obj_col].isin(signal) &
data_s[resp_col].isin(signal_resp)].Trial.count()
count_fa = data_s[data_s[obj_col].isin(noise) &
data_s[resp_col].isin(signal_resp)].Trial.count()
# Floors and ceilings are replaced by half hits and half FA's
halfHit = 0.5/count_signal
halfFa = 0.5/count_noise
# Calculate hitrate, avoiding d' infinity
hitRate = count_hit/count_signal
if hitRate == 1: hitRate = 1-halfHit
if hitRate == 0: hitRate = halfHit
# Calculate false alarm rate, avoiding d' infinity
faRate = count_fa/count_noise
if faRate == 1: faRate = 1-halfFa
if faRate == 0: faRate = halfFa
out = {}
out['d'] = Z(hitRate) - Z(faRate)
out['beta'] = exp(Z(faRate)**2 - Z(hitRate)**2)/2
out['c'] = -(Z(hitRate) + Z(faRate))/2
out['Ad'] = norm.cdf(out['d']/sqrt(2))
for measure in measures:
row = pd.Series({subj_col: subj,
'measure': measure,
'value': out[measure]})
df = df.append(row, ignore_index=True)
return df
def calc_roc(data, coding_dict=None):
"""Calculate ROC curve for a pandas dataframe
Parameters
----------
data : Pandas dataframe
dataframe including cols for subject, objective status of each trial (e.g., signal/old, noise/new),
response for each trial (e.g., 1-5 confidence scale)
coding_dict : dict
dictionary with information about objective (objective_col; string) and
response columns (response_col; string), subject ID column (subj_col; string),
objective signal (signal; list of strings) and noise (noise; list of strings) labels,
and subjective responses (signal_resp; list of strings).
Example:
coding_dict = dict(objective_col='TrialType', # column name for new
signal=['old'], # objectively old label
noise=['similar', 'new'], # objectively new label
response_col='Response',
subj_col='Subject',
)
Returns
-------
df : Pandas dataframe
A longform dataframe with a column for subject ID, level of confidence, and
proportions of responses for old and new trials
"""
# get relevant info
subj_col = coding_dict['subj_col']
obj_col = coding_dict['objective_col']
resp_col = coding_dict['response_col']
signal = coding_dict['old']
noise = coding_dict['new']
max_resp = int(data[resp_col].max())
# init new dataframe
df = pd.DataFrame(columns=[subj_col, 'conf_level', 'signal', 'noise'])
# calculate dprime for each subj
for subj in data[subj_col].unique():
data_s = data[data[subj_col] == subj]
count_signal = data_s[data_s[obj_col].isin(signal)].Trial.count()
count_noise = data_s[data_s[obj_col].isin(noise)].Trial.count()
for level in range(1, max_resp+1):
count_signal_tolevel = data_s[(data_s[obj_col].isin(signal)) &
(data_s[resp_col] >= level)].Trial.count()
count_noise_tolevel = data_s[(data_s[obj_col].isin(noise)) &
(data_s[resp_col] >= level)].Trial.count()
row = pd.Series({subj_col: subj,
'conf_level': level,
'signal': count_signal_tolevel/count_signal,
'noise': count_noise_tolevel/count_noise})
df = df.append(row, ignore_index=True)
return df
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os, copy
import json
import torch
import imageio
import numpy as np
from collections import defaultdict
from torchvision.utils import save_image
from argparse import Namespace
from fairseq.tasks import FairseqTask, register_task
from fairseq.optim.fp16_optimizer import FP16Optimizer
from fairseq.logging import progress_bar
from fairnr.data import (
ShapeViewDataset, SampledPixelDataset, ShapeViewStreamDataset,
WorldCoordDataset, ShapeDataset, InfiniteDataset
)
from fairnr.data.data_utils import write_images, recover_image, parse_views
from fairnr.data.geometry import ray, compute_normal_map
from fairnr.renderer import NeuralRenderer
from fairnr.data.trajectory import get_trajectory
from fairnr import ResetTrainerException
@register_task("single_object_rendering")
class SingleObjRenderingTask(FairseqTask):
"""
Task for remembering & rendering a single object.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser"""
parser.add_argument("data", help='data-path or data-directoy')
parser.add_argument("--object-id-path", type=str, help='path to object indices', default=None)
parser.add_argument("--no-preload", action="store_true")
parser.add_argument("--no-load-binary", action="store_true")
parser.add_argument("--load-depth", action="store_true",
help="load depth images if exists")
parser.add_argument("--transparent-background", type=str, default="1.0",
help="background color if the image is transparent")
parser.add_argument("--load-mask", action="store_true",
help="load pre-computed masks which is useful for subsampling during training.")
parser.add_argument("--train-views", type=str, default="0..50",
help="views sampled for training, you can set specific view id, or a range")
parser.add_argument("--valid-views", type=str, default="0..50",
help="views sampled for validation, you can set specific view id, or a range")
parser.add_argument("--test-views", type=str, default="0",
help="views sampled for rendering, only used for showing rendering results.")
parser.add_argument("--subsample-valid", type=int, default=-1,
help="if set > -1, subsample the validation (when training set is too large)")
parser.add_argument("--view-per-batch", type=int, default=6,
help="number of views training each batch (each GPU)")
parser.add_argument("--valid-view-per-batch", type=int, default=1,
help="number of views training each batch (each GPU)")
parser.add_argument("--view-resolution", type=str, default='64x64',
help="width for the squared image. downsampled from the original.")
parser.add_argument('--valid-view-resolution', type=str, default=None,
help="if not set, if valid view resolution will be train view resolution")
parser.add_argument("--min-color", choices=(0, -1), default=-1, type=int,
help="RGB range used in the model. conventionally used -1 ~ 1")
parser.add_argument("--virtual-epoch-steps", type=int, default=None,
help="virtual epoch used in Infinite Dataset. if None, set max-update")
parser.add_argument("--pruning-every-steps", type=int, default=None,
help="if the model supports pruning, prune unecessary voxels")
parser.add_argument("--half-voxel-size-at", type=str, default=None,
help='specific detailed number of updates to half the voxel sizes')
parser.add_argument("--reduce-step-size-at", type=str, default=None,
help='specific detailed number of updates to reduce the raymarching step sizes')
parser.add_argument("--prune-voxel-at", type=str, default=None,
help='specific detailed number of pruning voxels')
parser.add_argument("--rendering-every-steps", type=int, default=None,
help="if set, enables rendering online with default parameters")
parser.add_argument("--rendering-args", type=str, metavar='JSON')
parser.add_argument("--pruning-th", type=float, default=0.5,
help="if larger than this, we choose keep the voxel.")
parser.add_argument("--pruning-with-train-stats", action='store_true',
help="if set, model will run over the training set statstics to prune voxels.")
parser.add_argument("--pruning-rerun-train-set", action='store_true',
help="only works when --pruning-with-train-stats is also set.")
parser.add_argument("--output-valid", type=str, default=None)
def __init__(self, args):
super().__init__(args)
self._trainer, self._dummy_batch = None, None
# check dataset
self.train_data = self.val_data = self.test_data = args.data
self.object_ids = None if args.object_id_path is None else \
{line.strip(): i for i, line in enumerate(open(args.object_id_path))}
self.output_valid = getattr(args, "output_valid", None)
if os.path.isdir(args.data):
if os.path.exists(args.data + '/train.txt'):
self.train_data = args.data + '/train.txt'
if os.path.exists(args.data + '/val.txt'):
self.val_data = args.data + '/val.txt'
if os.path.exists(args.data + '/test.txt'):
self.test_data = args.data + '/test.txt'
if self.object_ids is None and os.path.exists(args.data + '/object_ids.txt'):
self.object_ids = {line.strip(): i for i, line in enumerate(open(args.data + '/object_ids.txt'))}
if self.object_ids is not None:
self.ids_object = {self.object_ids[o]: o for o in self.object_ids}
else:
self.ids_object = {0: 'model'}
if len(self.args.tensorboard_logdir) > 0 and getattr(args, "distributed_rank", -1) == 0:
from tensorboardX import SummaryWriter
self.writer = SummaryWriter(self.args.tensorboard_logdir + '/images')
else:
self.writer = None
self._num_updates = {'pv': -1, 'sv': -1, 'rs': -1, 're': -1}
self.pruning_every_steps = getattr(self.args, "pruning_every_steps", None)
self.pruning_th = getattr(self.args, "pruning_th", 0.5)
self.rendering_every_steps = getattr(self.args, "rendering_every_steps", None)
self.steps_to_half_voxels = getattr(self.args, "half_voxel_size_at", None)
self.steps_to_reduce_step = getattr(self.args, "reduce_step_size_at", None)
self.steps_to_prune_voxels = getattr(self.args, "prune_voxel_at", None)
if self.steps_to_half_voxels is not None:
self.steps_to_half_voxels = [int(s) for s in self.steps_to_half_voxels.split(',')]
if self.steps_to_reduce_step is not None:
self.steps_to_reduce_step = [int(s) for s in self.steps_to_reduce_step.split(',')]
if self.steps_to_prune_voxels is not None:
self.steps_to_prune_voxels = [int(s) for s in self.steps_to_prune_voxels.split(',')]
if self.rendering_every_steps is not None:
gen_args = {
'path': args.save_dir,
'render_beam': 1, 'render_resolution': '512x512',
'render_num_frames': 120, 'render_angular_speed': 3,
'render_output_types': ["rgb"], 'render_raymarching_steps': 10,
'render_at_vector': "(0,0,0)", 'render_up_vector': "(0,0,-1)",
'render_path_args': "{'radius': 1.5, 'h': 0.5}",
'render_path_style': 'circle', "render_output": None
}
gen_args.update(json.loads(getattr(args, 'rendering_args', '{}') or '{}'))
self.renderer = self.build_generator(Namespace(**gen_args))
else:
self.renderer = None
self.train_views = parse_views(args.train_views)
self.valid_views = parse_views(args.valid_views)
self.test_views = parse_views(args.test_views)
@classmethod
def setup_task(cls, args, **kwargs):
"""
Setup the task
"""
return cls(args)
def repeat_dataset(self, split):
return 1 if split != 'train' else self.args.distributed_world_size # IMPORTANT!
def load_dataset(self, split, **kwargs):
"""
Load a given dataset split (train, valid, test)
"""
self.datasets[split] = ShapeViewDataset(
self.train_data if split == 'train' else \
self.val_data if split == 'valid' else self.test_data,
views=self.train_views if split == 'train' else \
self.valid_views if split == 'valid' else self.test_views,
num_view=self.args.view_per_batch if split == 'train' else \
self.args.valid_view_per_batch if split == 'valid' else 1,
resolution=self.args.view_resolution if split == 'train' else \
getattr(self.args, "valid_view_resolution", self.args.view_resolution) if split == 'valid' else \
getattr(self.args, "render_resolution", self.args.view_resolution),
subsample_valid=self.args.subsample_valid if split == 'valid' else -1,
train=(split=='train'),
load_depth=self.args.load_depth and (split!='test'),
load_mask=self.args.load_mask and (split!='test'),
repeat=self.repeat_dataset(split),
preload=(not getattr(self.args, "no_preload", False)) and (split!='test'),
binarize=(not getattr(self.args, "no_load_binary", False)) and (split!='test'),
bg_color=getattr(self.args, "transparent_background", "1,1,1"),
min_color=getattr(self.args, "min_color", -1),
ids=self.object_ids
)
if split == 'train':
max_step = getattr(self.args, "virtual_epoch_steps", None)
if max_step is not None:
total_num_models = max_step * self.args.distributed_world_size * self.args.max_sentences
else:
total_num_models = 10000000
if getattr(self.args, "pruning_rerun_train_set", False):
self._unique_trainset = ShapeViewStreamDataset(copy.deepcopy(self.datasets[split])) # backup
self._unique_trainitr = self.get_batch_iterator(
self._unique_trainset, max_sentences=self.args.max_sentences_valid, seed=self.args.seed,
num_shards=self.args.distributed_world_size, shard_id=self.args.distributed_rank,
num_workers=self.args.num_workers)
self.datasets[split] = InfiniteDataset(self.datasets[split], total_num_models)
if split == 'valid':
self.datasets[split] = ShapeViewStreamDataset(self.datasets[split])
def build_generator(self, args):
"""
build a neural renderer for visualization
"""
return NeuralRenderer(
beam=args.render_beam,
resolution=args.render_resolution,
frames=args.render_num_frames,
speed=args.render_angular_speed,
raymarching_steps=args.render_raymarching_steps,
path_gen=get_trajectory(args.render_path_style)(
**eval(args.render_path_args)
),
at=eval(args.render_at_vector),
up=eval(args.render_up_vector),
fps=getattr(args, "render_save_fps", 24),
output_dir=args.render_output if args.render_output is not None
else os.path.join(args.path, "output"),
output_type=args.render_output_types,
test_camera_poses=getattr(args, "render_camera_poses", None),
test_camera_intrinsics=getattr(args, "render_camera_intrinsics", None),
test_camera_views=getattr(args, "render_views", None)
)
def setup_trainer(self, trainer):
# give the task ability to access the global trainer functions
self._trainer = trainer
@property
def source_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return None
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return None
def update_step(self, num_updates, name='re'):
"""Task level update when number of updates increases.
This is called after the optimization step and learning rate
update at each iteration.
"""
self._num_updates[name] = num_updates
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
if (((self.pruning_every_steps is not None) and \
(update_num % self.pruning_every_steps == 0) and \
(update_num > 0)) or \
((self.steps_to_prune_voxels is not None) and \
update_num in self.steps_to_prune_voxels) \
) and \
(update_num > self._num_updates['pv']) and \
hasattr(model, 'prune_voxels'):
model.eval()
if getattr(self.args, "pruning_rerun_train_set", False):
with torch.no_grad():
model.clean_caches(reset=True)
progress = progress_bar.progress_bar(
self._unique_trainitr.next_epoch_itr(shuffle=False),
prefix=f"pruning based statiscs over training set",
tensorboard_logdir=None,
default_log_format=self.args.log_format if self.args.log_format is not None else "tqdm")
for step, inner_sample in enumerate(progress):
outs = model(**self._trainer._prepare_sample(self.filter_dummy(inner_sample)))
progress.log(stats=outs['other_logs'], tag='track', step=step)
model.prune_voxels(self.pruning_th, train_stats=getattr(self.args, "pruning_with_train_stats", False))
self.update_step(update_num, 'pv')
if self.steps_to_half_voxels is not None and \
(update_num in self.steps_to_half_voxels) and \
(update_num > self._num_updates['sv']):
model.split_voxels()
self.update_step(update_num, 'sv')
raise ResetTrainerException
if self.rendering_every_steps is not None and \
(update_num % self.rendering_every_steps == 0) and \
(update_num > 0) and \
self.renderer is not None and \
(update_num > self._num_updates['re']):
sample_clone = {key: sample[key].clone() if sample[key] is not None else None for key in sample }
outputs = self.inference_step(self.renderer, [model], [sample_clone, 0])[1]
if getattr(self.args, "distributed_rank", -1) == 0: # save only for master
self.renderer.save_images(outputs, update_num)
self.steps_to_half_voxels = [a for a in self.steps_to_half_voxels if a != update_num]
if self.steps_to_reduce_step is not None and \
update_num in self.steps_to_reduce_step and \
(update_num > self._num_updates['rs']):
model.reduce_stepsize()
self.update_step(update_num, 'rs')
self.update_step(update_num, 'step')
return super().train_step(sample, model, criterion, optimizer, update_num, ignore_grad)
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
model.add_eval_scores(logging_output, sample, model.cache, criterion, outdir=self.output_valid)
if self.writer is not None:
images = model.visualize(sample, shape=0, view=0)
if images is not None:
write_images(self.writer, images, self._num_updates['step'])
return loss, sample_size, logging_output
def save_image(self, img, id, view, group='gt'):
object_name = self.ids_object[id.item()]
def _mkdir(x):
if not os.path.exists(x):
os.mkdir(x)
_mkdir(self.output_valid)
_mkdir(os.path.join(self.output_valid, group))
_mkdir(os.path.join(self.output_valid, group, object_name))
imageio.imsave(os.path.join(
self.output_valid, group, object_name,
'{:04d}.png'.format(view)),
(img * 255).astype(np.uint8))
def filter_dummy(self, sample):
if self._dummy_batch is None:
self._dummy_batch = sample
if sample is None:
sample = self._dummy_batch
return sample
|
"""web_statistics URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from main_app.views import *
from UserManagement.views import *
from AdminManagement.views import *
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', main),
url(r'^about/$', about),
url(r'^daily_statistics/$', daily_statistics),
url(r'^periodic_statistics/$', periodic_statistics),
url(r'^admin_statistics/$', admin_statistics),
url(r'^registration/$', registration),
url(r'^privateroom/$', privateroom),
url(r'^sitemap/$', sitemap),
url(r'^faq/$', faq),
url(r'^politics/$', politics),
url(r'^business/$', business),
url(r'^economics_and_finances/$', economics_and_finances),
url(r'^society/$', society),
url(r'^userinfo/$', privateroom),
url(r'^authorization/$', authorization),
url(r'^partnership/$', partnership),
url(r'^contacts/$', contacts),
url(r'^review/$', review),
url(r'^support/$', support),
url(r'^common_statistics', common_statistics),
url(r'^private_settings', private_settings),
url(r'^googlechart', googlechart),
]
urlpatterns += [
url(r'^user/login/$', login),
url(r'^user/logout/$', logout),
url(r'^user/registration/$', registration),
url(r'^base_admin/$', base_admin),
url(r'^myadmin/$', myadmin),
url(r'^myadmin/delete/user/(\d+)$', delete_user),
url(r'^myadmin/get_user_form/(\d+)$', get_user_form),
url(r'^myadmin/create/user/(\d*)$', create_user),
url(r'^user/set_new_password/$', set_new_password),
]
if settings.DEBUG:
# Static files (CSS, JavaScript, Images)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) |
from utils import *
parser = get_base_parser()
# data
parser.add_argument('--dither_mode', default='nodither', type=str, choices=['dither','nodither'], help='dither mode of input gifs')
parser.add_argument('--nColor', default=32, type=int, help='color palette size')
parser.add_argument('--tCrop', default=5, type=int, help='sequence length')
parser.add_argument('--sCrop', default=256, type=int, help='spatial patch size')
parser.add_argument('--tStride', default=10, type=int, help="temporal downsampling")
parser.add_argument('--pool_size', default=60, type=int, help="image pool size")
# model
parser.add_argument('--color_model1_file', default='pretrained/ccdnet1_gan_faces_nodither_ep30.pt', type=str, help='')
parser.add_argument('--color_model_key', default='model_netG', type=str, help='')
parser.add_argument('--unroll', default=0, type=int, help='')
parser.add_argument('--no_regif', default=True, action='store_false', dest='regif', help='regif: recompute the gif as iter input')
parser.add_argument('--color_model2_file', default='', type=str, help='')
parser.add_argument('--sequential', default=False, action='store_true', dest='sequential', help='process color:flow sequentially')
parser.add_argument('--maxFlow', default=30, type=float, help='maximum flow value, use for rescaling')
# loss
parser.add_argument('--w_idl', default=0.5, type=float, help='weight for image difference loss')
parser.add_argument('--w_gdl', default=0.5, type=float, help='weight for gradient difference loss')
parser.add_argument('--w_warp', default=0.5, type=float, help='weight for image difference loss')
parser.add_argument('--w_smooth', default=1, type=float, help='weight for gradient difference loss')
parser.add_argument('--w_ggan', default=1, type=float, help='weight for generator loss')
parser.add_argument('--w_dgan', default=1, type=float, help='weight for discriminator loss')
parser.add_argument('--gan_loss', default='GAN', type=str, choices=['GAN', 'LSGAN'], help='which GAN Loss')
parser.add_argument('--nogan', default=False, action='store_true', dest='nogan', help='do not use gan')
parser.add_argument('--L_warp_outlier', default=40.0, type=float, help='initial outlier value for warp loss')
# optimizer
parser.add_argument('--GC', default=1.0, type=float, help='gradient clipping')
# apply mode
parser.add_argument('--applyT', default=2, type=int, help='factor for temporal interpolation')
opts = parser.parse_args()
dataRoot = '/nfs/bigbrain/yangwang/Gif2Video/gif2video_data/gif_faces/'
opts.inputRoot = dataRoot + 'face_gif_image/expand1.5_size256_s1_g32_' + opts.dither_mode
manual_seed(opts.seed)
global color_model1
color_model1 = torchmodel.UNet_simple(3, 3, ch=64)
color_model1.load_state_dict(torch.load(opts.color_model1_file)[opts.color_model_key])
color_model1 = nn.DataParallel(color_model1.eval().to(DEVICE))
if opts.unroll > 0:
global color_model2
iter_ch = 3*4 if opts.regif else 3*2
color_model2 = torchmodel.UNet_simple(iter_ch, 3, ch=64)
color_model2.load_state_dict(torch.load(opts.color_model2_file)[opts.color_model_key])
color_model2 = nn.DataParallel(color_model2.eval().to(DEVICE))
if opts.regif: # recompute the gif as iterative input
def iterative_input(fakeB, realA, colors, nColor=opts.nColor):
# fakeB_gif = fakeB
B, C, H, W = fakeB.shape
fakeB_gif = []
for i in range(B):
_fakeB, _realA = fakeB[i].detach(), realA[i].detach()
_fakeB = _fakeB.view(C, H*W).transpose(0, 1)
_colors = colors[i, :nColor].detach()
dist = pairwise_distances(_fakeB, _colors)
argmin = dist.min(dim=1)[1]
_fakeB_gif = _colors[argmin].transpose(0, 1).view(1, C, H, W)
fakeB_gif.append(_fakeB_gif)
fakeB_gif = torch.cat(fakeB_gif, dim=0)
new_input = torch.cat([fakeB, realA, fakeB_gif, realA - fakeB_gif], dim=1)
return new_input
else:
def iterative_input(fakeB, realA, colors, nColor=opts.nColor):
new_input = torch.cat([fakeB, realA], dim=1)
return new_input
def create_dataloader():
trSet = torchdata.gif_faces_ct_train(inputRoot=opts.inputRoot, tCrop=opts.tCrop, sCrop=opts.sCrop)
trLD = DD.DataLoader(trSet, batch_size=opts.BtSz,
sampler= DD.sampler.SubsetRandomSampler([0]*opts.BtSz) if opts.OneBatch else DD.sampler.RandomSampler(trSet),
num_workers=opts.workers, pin_memory=True, drop_last=True)
evalSet = torchdata.gif_faces_ct_eval(inputRoot=opts.inputRoot, tStride=opts.tStride, tCrop=opts.tCrop)
evalLD = DD.DataLoader(evalSet, batch_size=1,
sampler=DD.sampler.SequentialSampler(evalSet),
num_workers=opts.workers, pin_memory=True, drop_last=False)
return trLD, evalLD
def create_model():
model = edict()
model.netG = torchmodel.netSlomo(maxFlow=opts.maxFlow)
model.netD = torchmodel.NLayerDiscriminator(in_ch=12, ndf=64, n_layers=3)
for key in model.keys():
model[key] = nn.DataParallel(model[key].to(DEVICE))
return model
def board_vis(epoch, frm1, frm0, frm10, frm01, F01, F10, Vt0s, gif, target, imgs):
B, L, C, H, W = target.shape
# I0 and I1
im0, im1 = frm0[:1].detach(), frm1[:1].detach()
im0_warp, im1_warp = frm10[:1].detach(), frm01[:1].detach()
im0_err, im1_err = (im0 - im0_warp).abs(), (im1 - im1_warp).abs()
im01_diff = (im0 - im1).abs()
x = torch.cat((im0, im1, im0_warp, im1_warp, im0_err, im1_err, im01_diff), dim=0)
x = vutils.make_grid(x, nrow=2, normalize=True)
opts.board.add_image('train_batch/i0_i1', x, epoch)
# flow
flow01, flow10 = F01[:1].detach(), F10[:1].detach()
flow01 = torch.cat([flow01, flow01.new_zeros(1, 1, H, W)], dim=1)
flow10 = torch.cat([flow10, flow10.new_zeros(1, 1, H, W)], dim=1)
x = torch.cat([flow01, flow10], dim=0)
x = vutils.make_grid(x, nrow=2, normalize=True, range=(-1, 1))
opts.board.add_image('train_batch/f01_f10', x, epoch)
# vis_map
vis0s = Vt0s[0].detach().expand(-1, 3, -1, -1)
vis1s = 1 - vis0s
x = torch.cat([vis0s, vis1s], dim=0)
x = vutils.make_grid(x, nrow=L-2, normalize=True)
opts.board.add_image('train_batch/vis0_vis1', x, epoch)
# interp
ims_gif = gif[0].detach()
ims_gt = target[0].detach()
ims_est = imgs[0].detach()
ims_err = (ims_est - ims_gt).abs()
x = torch.cat((ims_gif, ims_gt, ims_est, ims_err), dim=0)
x = vutils.make_grid(x, nrow=L, normalize=True)
opts.board.add_image('train_batch/recover', x, epoch)
def train(epoch, trLD, model, optimizer, fakeABPool):
# switch to train mode (Dropout, BatchNorm, etc)
for key in model.keys(): model[key].train()
tags = ['D_gan', 'D_real', 'D_fake', 'D_acc'] + ['L_gan', 'L_idl', 'L_gdl', 'L_warp', 'L_smooth', 'L_total']
epL = AverageMeters(tags)
N = max(1, round(len(trLD) * opts.trRatio))
for i, samples in progressbar.progressbar(enumerate(islice(trLD, N)), max_value=N):
# i, samples = 0, next(iter(trLD))
btSz = samples[0].shape[0]
gif, target, colors = list(map(lambda x: preprocess(x).to(DEVICE), samples))
B, L, C, H, W = gif.shape
gif0, gif1 = gif[:, 0], gif[:, -1]
color0, color1 = colors[:, 0], colors[:, -1]
frm0, frm1, frm_ts = target[:, 0], target[:, -1], target[:, 1:L-1]
ts = np.linspace(0, 1, L)[1:L-1].tolist()
with torch.no_grad():
################################################
I0 = color_model1(gif0).tanh()
for _ in range(opts.unroll):
new_input = iterative_input(I0, gif0, color0)
I0 = (I0 + color_model2(new_input)).tanh()
I1 = color_model1(gif1).tanh()
for _ in range(opts.unroll):
new_input = iterative_input(I1, gif1, color1)
I1 = (I1 + color_model2(new_input)).tanh()
################################################
if not opts.sequential:
Its, F01, F10, Ft1s, Ft0s, Vt0s = model.netG(gif0, gif1, I0, I1, ts)
else:
Its, F01, F10, Ft1s, Ft0s, Vt0s = model.netG(I0, I1, I0, I1, ts)
imgs = torch.cat((I0.unsqueeze(dim=1), Its, I1.unsqueeze(dim=1)), dim=1)
D_input = lambda A, B: torch.cat((A, B, pad_tl(diff_xy(B))), dim=1)
realAB = D_input(gif.view(B*L, -1, H, W), target.view(B*L, -1, H, W))
fakeAB = D_input(gif.view(B*L, -1, H, W), imgs.view(B*L, -1, H, W))
# (1) Update D network
optimizer.netD.zero_grad()
fakeAB_ = fakeABPool.query(fakeAB.detach()).to(DEVICE)
real_logits = model.netD(realAB)
fake_logits = model.netD(fakeAB_)
d_gan, d_real, d_fake = compute_D_loss(real_logits, fake_logits, method=opts.gan_loss)
d_acc, _, _ = compute_D_acc(real_logits, fake_logits, method=opts.gan_loss)
loss_d = d_gan * opts.w_dgan
loss_d.backward()
if d_acc.item() < 0.75:
nn.utils.clip_grad_norm_(model.netD.parameters(), opts.GC)
optimizer.netD.step()
# (2) Update G network
optimizer.netG.zero_grad()
fake_logits = model.netD(fakeAB)
L_gan = compute_G_loss(fake_logits, method=opts.gan_loss)
L_idl = 127.5*(f_idl(I0, frm0) + f_idl(I1, frm1) + f_idl(Its, frm_ts))
L_gdl = 127.5*(f_gdl(I0, frm0) + f_gdl(I1, frm1) + f_gdl(Its, frm_ts))
L_smooth = opts.maxFlow*(f_smooth(F01) + f_smooth(F10))
frm10 = torchmodel.backwarp(frm1, F01*opts.maxFlow)
frm01 = torchmodel.backwarp(frm0, F10*opts.maxFlow)
frm1ts = torch.cat(list(torchmodel.backwarp(frm1, Ft1s[:, i]*opts.maxFlow).unsqueeze(1) for i in range(Ft1s.shape[1])), dim=1)
frm0ts = torch.cat(list(torchmodel.backwarp(frm0, Ft0s[:, i]*opts.maxFlow).unsqueeze(1) for i in range(Ft0s.shape[1])), dim=1)
L_warp = 127.5*(f_idl(frm10, frm0) + f_idl(frm01, frm1) + f_idl(frm1ts, frm_ts) + f_idl(frm0ts, frm_ts))
Loss_g = L_gan * opts.w_ggan + L_idl * opts.w_idl + L_gdl * opts.w_gdl + L_warp * opts.w_warp + L_smooth * opts.w_smooth
Loss_g.backward()
if d_acc.item() > 0.25 and L_warp < opts.L_warp_outlier:
nn.utils.clip_grad_norm_(model.netG.parameters(), opts.GC)
optimizer.netG.step()
# tags = ['D_gan', 'D_real', 'D_fake', 'D_acc'] + ['L_gan', 'L_idl', 'L_gdl', 'L_warp', 'L_smooth', 'L_total']
values = list(map(lambda x: x.item(), [d_gan, d_real, d_fake, d_acc, L_gan, L_idl, L_gdl, L_warp, L_smooth, Loss_g]))
assert len(tags) == len(values)
for tag, value in zip(tags, values):
epL[tag].update(value, btSz)
if opts.board is not None and i%opts.dispIter==0:
opts.board.add_scalar('train_batch/'+tag, value, epoch-1+float(i+1)/N)
if opts.board is not None and i%opts.dispIter==0:
board_vis(epoch, frm1, frm0, frm10, frm01, F01, F10, Vt0s, gif, target, imgs)
# logging
state = edict({k:v.avg for k, v in epL.items()})
print('Train_Summary: Epoch [{:03d}/{:03d}], {}'.format(epoch, opts.nEpoch, state))
if opts.board is not None:
for tag, value in state.items():
opts.board.add_scalar('train_epoch/'+tag, value, epoch)
opts.L_warp_outlier = epL['L_warp'].avg * 1.5
print('outlier threshold for L_warp is set to {}'.format(opts.L_warp_outlier))
def train_nogan(epoch, trLD, model, optimizer):
# switch to train mode (Dropout, BatchNorm, etc)
for key in model.keys(): model[key].train()
tags = ['L_idl', 'L_gdl', 'L_warp', 'L_smooth', 'L_total']
epL = AverageMeters(tags)
N = max(1, round(len(trLD) * opts.trRatio))
for i, samples in progressbar.progressbar(enumerate(islice(trLD, N)), max_value=N):
# i, samples = 0, next(iter(trLD))
btSz = samples[0].shape[0]
gif, target, colors = list(map(lambda x: preprocess(x).to(DEVICE), samples))
B, L, C, H, W = gif.shape
gif0, gif1 = gif[:, 0], gif[:, -1]
color0, color1 = colors[:, 0], colors[:, -1]
frm0, frm1, frm_ts = target[:, 0], target[:, -1], target[:, 1:L-1]
ts = np.linspace(0, 1, L)[1:L-1].tolist()
with torch.no_grad():
################################################
I0 = color_model1(gif0).tanh()
for _ in range(opts.unroll):
new_input = iterative_input(I0, gif0, color0)
I0 = (I0 + color_model2(new_input)).tanh()
I1 = color_model1(gif1).tanh()
for _ in range(opts.unroll):
new_input = iterative_input(I1, gif1, color1)
I1 = (I1 + color_model2(new_input)).tanh()
################################################
if not opts.sequential:
Its, F01, F10, Ft1s, Ft0s, Vt0s = model.netG(gif0, gif1, I0, I1, ts)
else:
Its, F01, F10, Ft1s, Ft0s, Vt0s = model.netG(I0, I1, I0, I1, ts)
imgs = torch.cat((I0.unsqueeze(dim=1), Its, I1.unsqueeze(dim=1)), dim=1)
# Update G network
optimizer.netG.zero_grad()
L_idl = 127.5*(f_idl(I0, frm0) + f_idl(I1, frm1) + f_idl(Its, frm_ts))
L_gdl = 127.5*(f_gdl(I0, frm0) + f_gdl(I1, frm1) + f_gdl(Its, frm_ts))
L_smooth = opts.maxFlow*(f_smooth(F01) + f_smooth(F10))
frm10 = torchmodel.backwarp(frm1, F01*opts.maxFlow)
frm01 = torchmodel.backwarp(frm0, F10*opts.maxFlow)
frm1ts = torch.cat(list(torchmodel.backwarp(frm1, Ft1s[:, i]*opts.maxFlow).unsqueeze(1) for i in range(Ft1s.shape[1])), dim=1)
frm0ts = torch.cat(list(torchmodel.backwarp(frm0, Ft0s[:, i]*opts.maxFlow).unsqueeze(1) for i in range(Ft0s.shape[1])), dim=1)
L_warp = 127.5*(f_idl(frm10, frm0) + f_idl(frm01, frm1) + f_idl(frm1ts, frm_ts) + f_idl(frm0ts, frm_ts))
Loss_g = L_idl * opts.w_idl + L_gdl * opts.w_gdl + L_warp * opts.w_warp + L_smooth * opts.w_smooth
Loss_g.backward()
if L_warp < opts.L_warp_outlier:
nn.utils.clip_grad_norm_(model.netG.parameters(), opts.GC)
optimizer.netG.step()
# tags = ['L_idl', 'L_gdl', 'L_warp', 'L_smooth', 'G_Loss']
values = list(map(lambda x: x.item(), [L_idl, L_gdl, L_warp, L_smooth, Loss_g]))
assert len(tags) == len(values)
for tag, value in zip(tags, values):
epL[tag].update(value, btSz)
if opts.board is not None and i%opts.dispIter==0:
opts.board.add_scalar('train_batch/'+tag, value, epoch-1+float(i+1)/N)
if opts.board is not None and i%opts.dispIter==0:
board_vis(epoch, frm1, frm0, frm10, frm01, F01, F10, Vt0s, gif, target, imgs)
# logging
state = edict({k:v.avg for k, v in epL.items()})
print('Train_Summary: Epoch [{:03d}/{:03d}], {}'.format(epoch, opts.nEpoch, state))
if opts.board is not None:
for tag, value in state.items():
opts.board.add_scalar('train_epoch/'+tag, value, epoch)
opts.L_warp_outlier = epL['L_warp'] * 1.5
print('outlier threshold for L_warp is set to {}'.format(opts.L_warp_outlier))
def evaluate(epoch, evalLD, model):
# switch to evaluate mode (Dropout, BatchNorm, etc)
netG = model.netG
netG.eval()
tags = ['PSNR', 'PSNR_gif', 'SSIM', 'SSIM_gif']
epL = AverageMeters(tags)
for i, (gif0s, gif1s, targets, color0s, color1s) in progressbar.progressbar(enumerate(evalLD), max_value=len(evalLD)):
# i, (gif0s, gif1s, targets) = 0, next(iter(evalLD))
# gif0s, gif1s: 1, T, C, H, W
# targets: 1, T, L, C, H, W
_, T, L, C, H, W = targets.size()
for j in range(T):
gif0, gif1, target, color0, color1 = gif0s[:, j], gif1s[:, j], targets[:, j], color0s[:, j], color1s[:, j]
gif0, gif1, target, color0, color1 = list(map(lambda x: preprocess(x).to(DEVICE), (gif0, gif1, target, color0, color1)))
ts = np.linspace(0, 1, L)[1:L-1].tolist()
with torch.no_grad():
################################################
I0 = color_model1(gif0).tanh()
for _ in range(opts.unroll):
new_input = iterative_input(I0, gif0, color0)
I0 = (I0 + color_model2(new_input)).tanh()
I1 = color_model1(gif1).tanh()
for _ in range(opts.unroll):
new_input = iterative_input(I1, gif1, color1)
I1 = (I1 + color_model2(new_input)).tanh()
################################################
if not opts.sequential:
Its, F01, F10, Ft1s, Ft0s, Vt0s = model.netG(gif0, gif1, I0, I1, ts)
else:
Its, F01, F10, Ft1s, Ft0s, Vt0s = model.netG(I0, I1, I0, I1, ts)
pred = torch.cat((I0.unsqueeze(dim=1), Its, I1.unsqueeze(dim=1)), dim=1)
pred_gif = torch.cat(list((gif0 if t<=0.5 else gif1).unsqueeze(1) for t in np.linspace(0, 1, L).tolist()), dim=1)
comp_psnr = lambda x, y: rmse2psnr((x - y).abs().pow(2).mean().pow(0.5).item(), maxVal=2.0)
psnr = comp_psnr(pred, target)
psnr_gif = comp_psnr(pred_gif, target)
tensor2im = lambda x: np.moveaxis(x.cpu().numpy(), 0, 2)
ssim, ssim_gif = 0.0, 0.0
for k in range(L):
ssim += comp_ssim(tensor2im(pred[0, k]), tensor2im(target[0, k]), data_range=2.0, multichannel=True)/L
ssim_gif += comp_ssim(tensor2im(pred_gif[0, k]), tensor2im(target[0, k]), data_range=2.0, multichannel=True)/L
values = [psnr, psnr_gif, ssim, ssim_gif]
assert len(tags) == len(values)
for tag, value in zip(tags, values):
epL[tag].update(value, 1.0/T)
# logging
state = edict({k:v.avg for k, v in epL.items()})
print('Evaluate_Summary: Epoch [{:03d}/{:03d}], {}'.format(epoch, opts.nEpoch, state))
if opts.board is not None:
for tag, value in state.items():
opts.board.add_scalar('eval_epoch/'+tag, value, epoch)
def main_train():
print('==> create dataset loader')
trLD, evalLD = create_dataloader()
fakeABPool = ImagePool(opts.pool_size)
print('==> create model, optimizer, scheduler')
model = create_model()
optimizer = create_optimizer(model, opts)
scheduler = create_scheduler(optimizer, opts)
print('==> initialize with checkpoint or initModel ?')
FIRST_EPOCH = 1 # do not change
USE_CKPT = opts.checkpoint >= FIRST_EPOCH
if USE_CKPT:
resume_checkpoint(opts.checkpoint, model, optimizer, opts)
start_epoch = opts.checkpoint + 1
else:
initialize(model, opts.initModel)
start_epoch = FIRST_EPOCH
print('==> start training from epoch %d'%(start_epoch))
for epoch in range(start_epoch, FIRST_EPOCH + opts.nEpoch):
print('\nEpoch {}:\n'.format(epoch))
for key in scheduler.keys():
scheduler[key].step(epoch-1)
lr = scheduler[key].optimizer.param_groups[0]['lr']
print('learning rate of {} is set to {}'.format(key, lr))
if opts.board is not None: opts.board.add_scalar('lr_schedule/'+key, lr, epoch)
if opts.nogan:
train_nogan(epoch, trLD, model, optimizer)
else:
train(epoch, trLD, model, optimizer, fakeABPool)
if not opts.OneBatch and epoch%opts.saveStep==0:
save_checkpoint(epoch, model, optimizer, opts)
if not opts.OneBatch and epoch%opts.evalStep==0:
evaluate(epoch, evalLD, model)
def main_eval():
_, evalLD = create_dataloader()
model = create_model()
initialize(model, opts.initModel)
evaluate(-1, evalLD, model)
def main_vis():
mkdir(opts.visDir)
print('==> load model')
model = create_model()
initialize(model, opts.initModel)
netG = model.netG
netG.eval()
print('==> create data loader')
visSet = torchdata.gif_faces_ct_eval(inputRoot=opts.inputRoot, tStride=opts.tCrop, tCrop=opts.tCrop)
for i, (gif0s, gif1s, targets, color0s, color1s) in progressbar.progressbar(enumerate(visSet), max_value=min(opts.visNum, len(visSet))):
# i, (gif0s, gif1s, targets) = 0, next(iter(visSet))
# gif0s, gif1s: T, C, H, W
# targets: T, L, C, H, W
if i >= opts.visNum: break
T, L, C, H, W = targets.size()
ims_target = np.moveaxis(targets.view(T*L, C, H, W).numpy().astype(np.uint8), 1, 3)
ims_gif, ims_pred = [], []
for j in range(T):
gif0, gif1, color0, color1 = gif0s[j:j+1], gif1s[j:j+1], color0s[j:j+1], color1s[j:j+1]
gif0, gif1, color0, color1 = list(map(lambda x: preprocess(x).to(DEVICE), (gif0, gif1, color0, color1)))
ts = np.linspace(0, 1, L)[1:L-1].tolist()
with torch.no_grad():
################################################
I0 = color_model1(gif0).tanh()
for _ in range(opts.unroll):
new_input = iterative_input(I0, gif0, color0)
I0 = (I0 + color_model2(new_input)).tanh()
I1 = color_model1(gif1).tanh()
for _ in range(opts.unroll):
new_input = iterative_input(I1, gif1, color1)
I1 = (I1 + color_model2(new_input)).tanh()
################################################
if not opts.sequential:
Its, _, _, _, _, _ = model.netG(gif0, gif1, I0, I1, ts)
else:
Its, _, _, _, _, _ = model.netG(I0, I1, I0, I1, ts)
pred = torch.cat((I0.unsqueeze(dim=1), Its, I1.unsqueeze(dim=1)), dim=1)
# pred_gif = torch.cat(list((gif0 if t<=0.5 else gif1).unsqueeze(1) for t in np.linspace(0, 1, L).tolist()), dim=1)
pred_gif = torch.cat(list((gif0 if t<0.999 else gif1).unsqueeze(1) for t in np.linspace(0, 1, L).tolist()), dim=1)
pred = np.moveaxis(postprocess(pred[0]).cpu().numpy().astype(np.uint8), 1, 3)
pred_gif = np.moveaxis(postprocess(pred_gif[0]).cpu().numpy().astype(np.uint8), 1, 3)
ims_gif.append(pred_gif)
ims_pred.append(pred)
ims_gif = np.concatenate(ims_gif, axis=0)
ims_pred = np.concatenate(ims_pred, axis=0)
ims_error = np.abs(ims_target.astype(float) - ims_pred.astype(float))
ims_error = np.tile(ims_error.mean(axis=3, keepdims=True), 3)
ims_error = (ims_error / 20.0 * 255.0).astype(np.uint8)
ims_row1 = np.concatenate([ims_gif, ims_target], axis=2)
ims_row2 = np.concatenate([ims_pred, ims_error ], axis=2)
ims_four = np.concatenate([ims_row1, ims_row2 ], axis=1)
fourcc = cv2.VideoWriter_fourcc(*'x264') #(*'DIVX') # 'x264' doesn't work
fps = 25.0
_, H_, W_, _ = ims_four.shape
video = cv2.VideoWriter('{}/{:04d}_result.mp4'.format(opts.visDir, i+1), fourcc, fps, (W_, H_))
for j, im in enumerate(ims_four):
video.write(im[:, :, ::-1])
video.release()
def main_apply():
print('==> read gif frames')
# ims = imageio.mimread(opts.applyFile)
# L, H, W = len(ims), ims[0].shape[0], ims[0].shape[1]
# for i in range(L):
# if ims[i].ndim == 2:
# ims[i] = np.broadcast_to(np.expand_dims(ims[i], 2), list(ims[i].shape) + [3])
# elif ims[i].ndim == 3:
# ims[i] = ims[i][:,:,:3]
# print('==> load model')
# model = create_model()
# initialize(model, opts.initModel)
# netG = model.netG
# netG.eval()
# print('==> processing')
# ims_gif, ims_pred = [], []
# for i in range(L-1):
# gif0, gif1 = ims[i], ims[i+1]
# im2cutensor = lambda im: preprocess(torch.ByteTensor(np.moveaxis(im, 2, 0)).view(1, 3, H, W)).to(DEVICE)
# gif0 = im2cutensor(gif0)
# gif1 = im2cutensor(gif1)
# ts = np.linspace(0, 1, opts.applyT)[1:opts.applyT].tolist()
# with torch.no_grad():
# ################################################
# I0 = color_model1(gif0).tanh()
# for _ in range(opts.unroll):
# new_input = iterative_input(I0, gif0, color0)
# I0 = (I0 + color_model2(new_input)).tanh()
# I1 = color_model1(gif1).tanh()
# for _ in range(opts.unroll):
# new_input = iterative_input(I1, gif1, color1)
# I1 = (I1 + color_model2(new_input)).tanh()
# ################################################
# if not opts.sequential:
# Its, _, _, _, _, _ = model.netG(gif0, gif1, I0, I1, ts)
# else:
# Its, _, _, _, _, _ = model.netG(I0, I1, I0, I1, ts)
# pred = torch.cat((I0.unsqueeze(dim=1), Its, I1.unsqueeze(dim=1)), dim=1)
# pred_gif = torch.cat(list((gif0 if t<=0.5 else gif1).unsqueeze(1) for t in np.linspace(0, 1, opts.applyT+1).tolist()), dim=1)
# #pred_gif = torch.cat(list((gif0 if t<0.999 else gif1).unsqueeze(1) for t in np.linspace(0, 1, opts.applyT+1).tolist()), dim=1)
# pred = np.moveaxis(postprocess(pred[0][:-1]).cpu().numpy().astype(np.uint8), 1, 3)
# pred_gif = np.moveaxis(postprocess(pred_gif[0][:-1]).cpu().numpy().astype(np.uint8), 1, 3)
# ims_gif.append(pred_gif)
# ims_pred.append(pred)
# ims_gif = np.concatenate(ims_gif, axis=0)
# ims_pred = np.concatenate(ims_pred, axis=0)
# ims_row = np.concatenate([ims_gif, ims_pred], axis=2)
# imageio.mimwrite('{}_t{}.mp4'.format(opts.applyFile, opts.applyT), ims_row)
if __name__ == '__main__':
trainMode = True
if opts.evalMode:
opts.board = None
trainMode = False
main_eval()
if opts.visMode:
opts.board = None
trainMode = False
main_vis()
if opts.applyMode:
opts.board = None
trainMode = False
main_apply()
if trainMode:
opts.board = SummaryWriter(os.path.join(opts.saveDir, 'board'))
options_text = print_options(opts, parser)
opts.board.add_text('options', options_text, opts.checkpoint)
main_train()
opts.board.close()
|
# 关于_str__和__str__方法
# 非集合对象的重写
class Card:
insure = False
def __init__(self,rank,suit):
self.rank = rank
self.suit =suit
self.hard,self.soft = self._point()
def __repr__(self):
return "{__class__.__name__}(suit={suit!r},rank = {rank!r})".format(__class__=self.__class__ ,**self.__dict__)
def __str__(self):
return "{rank}{suit}".format(**self.__dict__)
class NumberCard(Card):
def _point(self):
return int(self.rank),int(self.suit)
x = NumberCard('2','100')
print(str(x),repr(x))
# 集合对象的重写
class Hand:
def __init__(self,dealer_card,*cards):
self.dealer_card = dealer_card
# 集合对象
self.cards = list(cards)
def __str__(self):
return ",".join(map(str,self.cards))
def __repr__(self):
return "{__class__.__name__}({dealer_card!r},{_cards_str})".format(__class__=self.__class__,_cards_str=",".join(map(repr,self.cards)),**self.__dict__)
|
import factory
from src.client import Client
from src.facture import Facture
from src.produit import Produit
class client_factory(factory.Factory):
class Meta:
model = Client
nom = factory.Faker("last_name")
prenom = factory.Faker('first_name')
class facture_factory(factory.Factory):
class Meta:
model = Facture
client = client_factory()
numero = factory.Faker('pyint')
class produit_factory(factory.Factory):
class Meta:
model = Produit
nom = factory.Faker('company')
prix = factory.Faker('pyfloat', left_digits=3, right_digits=2, positive=True)
|
def split():
text = open ("file.txt","r")
text = text.read()
text = text.split()
return text
def ay(array ):
for i in range(len(array)):
if len(array[i]) > 3:
array[i] = array[i][1:] + array[i][0] + "ay"
return array
filetext = split()
wordsay = ay(filetext)
for i in range (len(wordsay)):
print(wordsay[i])
|
from .biginteger import BigInteger
from .uint import *
__all__ = ['BigInteger', 'UInt160', 'UInt256']
|
from django.contrib.auth.models import AbstractUser
from django.db import models
class CustomUser(AbstractUser):
rating = models.IntegerField(default=0)
count_blogs = models.IntegerField(default=0)
count_comments = models.IntegerField(default=0)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 22:10:30 2021
@author: Nishad Mandlik
"""
import pysniff.utils as pu
import nest_asyncio
IN_DIR = "./logs/pcap_filt"
OUT_DIR = "./logs/pickle"
nest_asyncio.apply()
pu.pcap_dir_to_pickle(IN_DIR, out_dir_path=OUT_DIR)
# pu.pcap_dir_to_pickle(
# "./logs/pcap/Delft Station Cycle Stand (Weekend)", out_dir_path="./logs/pickle")
|
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE','ddf.settings')
app = Celery('ddf')
app.config_from_object('django.conf:settings',namespace = 'CELERY')
app.autodiscover_tasks() |
__author__ = 'alex'
# Напишите программу, которая в качестве входа принимает произвольное регулярное выражение, и выполняет
# следующие преобразования:
# 1) Преобразует регулярное выражение непосредственно в ДКА.
# 2) По ДКА строит эквивалентный ему КА, имеющий наименьшее возможное количество состояний.
# Указание. Воспользоваться алгоритмом, приведенным по адресу
# http://neerc.ifmo.ru/wiki/index.php?title=Минимизация_ДКА,_алгоритм_Хопкрофта_(сложность_O(n_log_n)
# )
# 3) Моделирует минимальный КА для входной цепочки из терминалов исходной грамматики.
from dfa import DFA
regexp = "(a|b)*abb"
# regexp = 'abaacabd'
dfa = DFA()
dfa.make(regexp)
print('DFA of "%s":' % regexp)
print('Alphabet: ', dfa.alphabet)
print('Start', dfa.start_state)
print('States', dfa.states)
print('Transformation table', dfa.transformation_table)
print('Finite states', dfa.finite_states)
dfa.minimize()
print('Minimized states', dfa.minimized_states)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-22 10:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('conversationtree', '0007_auto_20171022_1225'),
]
operations = [
migrations.AlterField(
model_name='node',
name='intent',
field=models.CharField(blank=True, max_length=100, unique=True),
),
]
|
# Generated by Django 3.1.7 on 2021-06-17 06:43
import blog.models
import ckeditor.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=200, unique=True)),
('image', models.ImageField(upload_to=blog.models.upload_gallery_image_path)),
('body', ckeditor.fields.RichTextField()),
('iframe', models.TextField(blank=True, null=True)),
('create', models.DateTimeField(auto_now_add=True)),
('update', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('p', 'publish'), ('d', 'draft')], max_length=1)),
('hits', models.IntegerField(default=1)),
('numbers_rating', models.FloatField(default=0)),
('scope_avrage', models.FloatField(default=0)),
('rating', models.DecimalField(decimal_places=2, default=0, max_digits=3)),
('user', models.ForeignKey(default=1, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
|
from typing import (
Type,
)
from eth.rlp.blocks import BaseBlock
from eth.vm.state import BaseState
from .blocks import GrayGlacierBlock
from .headers import (
compute_gray_glacier_difficulty,
configure_gray_glacier_header,
create_gray_glacier_header_from_parent,
)
from .state import GrayGlacierState
from .. import ArrowGlacierVM
class GrayGlacierVM(ArrowGlacierVM):
# fork name
fork = "gray-glacier"
# classes
block_class: Type[BaseBlock] = GrayGlacierBlock
_state_class: Type[BaseState] = GrayGlacierState
# Methods
create_header_from_parent = staticmethod( # type: ignore
create_gray_glacier_header_from_parent(compute_gray_glacier_difficulty)
)
compute_difficulty = staticmethod(compute_gray_glacier_difficulty) # type: ignore
configure_header = configure_gray_glacier_header
|
# -*- coding: utf-8 -*-
import Tfidf
import os
table = Tfidf.Tfidf()
positive = 1
negative = -1
#import files
datano = "1"
path = "data 1/s%s/" %datano
multiFile = "data 1/"
evaluationDataNum = 1
demopath = 'demodata/'
for root, dirs, files in os.walk(multiFile):
for f in files:
if f == ".DS_Store" : continue
if ("s%d" %evaluationDataNum) in root: continue
fileHandle = open (os.path.join(root,f))
if "baseball" in root:
table.addDocument(positive, f, fileHandle.read().split())
else:
table.addDocument(negative, f, fileHandle.read().split())
fileHandle.close()
doclist = table.getTfIdf()
fileHandle = open ("midData/FVWithoutNo.%d.txt" %evaluationDataNum, 'w')
#fileHandle = open ("midData/s%sFeatureVector.txt" %datano, 'w')
for doc in doclist:
data = "%d" %doc[0] + " " +str(doc[2]) +"\n"
fileHandle.write(data)
fileHandle.close();
|
planets = ["Earth","Mars","Neptune","Venus","Mercury","Saturn","Jupiter","Uranus"]
for space in planets:
print(space)
|
"""stepik_djumanji URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from vacancies.views import MainView, VacancyView, CompanyView, ListVacanciesView, VacanciesBySpecialtyView, \
LetStartView, CreateCompanyView, MyCompanyView, MyVacanciesView, CreateVacancyView, MyVacancyDetailView, SearchView
from accounts.views import login_view, register_view, logout_view
urlpatterns = [
path('admin/', admin.site.urls, name='admin_panel'),
path('', MainView.as_view(), name='main'),
path('vacancies/', ListVacanciesView.as_view(), name='vacancies'),
path('vacancies/cat/<specialty>/', VacanciesBySpecialtyView.as_view(), name='specialisation'),
path('companies/<int:pk>/', CompanyView.as_view(), name='company'),
path('vacancies/<int:pk>/', VacancyView.as_view(), name='vacancy'),
path('vacancies/<int:pk>/send/', VacancyView.as_view(), name='vacancy_send'),
path('mycompany/letsstart/', LetStartView.as_view(), name='letsstart'),
path('mycompany/create/', CreateCompanyView.as_view(), name='create_company'),
path('mycompany/', MyCompanyView.as_view(), name='my_company'),
path('mycompany/vacancies/', MyVacanciesView.as_view(), name='my_vacancies'),
path('mycompany/vacancies/create/', CreateVacancyView.as_view(), name='create_vacancy'),
path('mycompany/vacancies/<pk>', MyVacancyDetailView.as_view(), name='my_vacancy'),
path('search/', SearchView.as_view(), name='search'),
path('login/', login_view, name='login'),
path('register/', register_view, name='register'),
path('logout/', logout_view, name='logout'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) \
+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
def break_words(stuff):
"""Takes a string in input, breaks where there are spaces, returns single words"""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sort the words that are arcument of the function"""
return sorted(words)
def print_first_word(words):
"""Print first word after popping it off"""
first_word = words.pop(0)
print(first_word)
def print_last_word(words):
"""Print first word after popping it off"""
last_word = words.pop(-1)
print(last_word)
def sort_sentence(sentence):
"""Returns sorted words from sentence"""
words = break_words(sentence)
sorted_words = sort_words(words)
return sorted_words
def print_first_and_last(sentence):
"""Prints first and last word of a sentence"""
words = break_words(sentence)
print_first_word(words)
print_last_word(worqds)
def print_first_and_last_sorted(sentence):
"""Prints first and last word of a previously sorted sentence"""
sorted_words = sort_sentence(sentence)
print_first_word(sorted_words)
print_last_word(sorted_words) |
import numpy as np
import pandas as pd
#
from numpy.random import randn
#
# labels = ['a', 'b', 'c']
# my_data = [10, 20, 30]
# arr = np.array(my_data)
# d = {'a': 10, 'b': 20, 'c': 30}
#
# # Create a series from string and number array
# print(pd.Series(my_data, labels))
#
# # Create a series from np
# print(pd.Series(arr, labels))
#
# # Create a series from dictionary
# print(pd.Series(d))
#
# # Create a series with built-in functions
# print(pd.Series([sum, print, len]))
#
# ser1 = pd.Series([1, 2, 3, 4], ['USA', 'Germany', 'USSR', 'Japan'])
# ser2 = pd.Series([1, 2, 5, 4], ['USA', 'Germany', 'Italy', 'Japan'])
# ser3 = pd.Series(labels)
#
# # Sum together series with equal keys
# print(ser1 + ser2)
#
np.random.seed(101)
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
# print(df)
#
# # Grab columns from DataFrame using bracket notation
# print(df['W'])
# # DataFrame is just a collection of Series
# print(type(df['W']))
# # Pass in a list in order to grab several columns
# print(df[['W', 'Z']])
#
# df['new'] = df['W'] + df['Y']
# print(df['new'])
#
# # Drop a row (rows referred to as 0-axis, columns as 1-axis) inplace=True means permanently drop
# df.drop('E', axis=0, inplace=False)
# print(df)
#
# # Get row/columns of matrix
# print(df.shape)
#
# # Select rows from dataframe (as series)
print(df.loc['A'])
#
# # Select rows (index based location)
# print(df.iloc[2])
#
# # Select cell from (row, col)
# print(df.loc['B', 'Y'])
#
# # Select multiple cells
# print(df.loc[['A', 'B'], ['W', 'Y']])
#
# # Conditional selection (get all cell values relative to predicate)
# print(df > 0)
# print(df[df > 0])
#
# # Return bool values of col W
# print(df['W'] > 0)
# # Only return true values of col W
# print(df[df['W'] > 0])
#
# # Grab all rows in dataframe where Sum(row) < 0
# print(df[df['Z'] < 0])
#
# resultdf = df[df['W'] > 0][['X', 'Y']]
# print(resultdf)
#
# # Multiple conditions (use ampersand instead of and when dealing with a series of bool values)
# print(df[(df['W'] > 0) & (df['Y'] > 1)])
#
# # Reset index (only here)
# print(df.reset_index(inplace=False))
#
# new_ind = 'CA NY WY OR CO'.split()
# print(new_ind)
#
# # Add new column
# df['States'] = new_ind
# print(df)
#
# # Set new index
# print(df.set_index('States'))
#
# # Index hierarchy
# outside = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2']
# inside = [1, 2, 3, 1, 2, 3]
# hier_index = list(zip(outside, inside))
# hier_index = pd.MultiIndex.from_tuples(hier_index)
#
# df = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
#
# print(df)
# print(df.loc['G1'])
# print(df.loc['G1'].iloc[1])
#
# # Set label indexes
# df.index.names = ['Groups', 'Num']
# print(df)
#
# print(df.loc['G2'].loc[2]['B'])
#
# # Cross section grab
# print(df.xs(1, level='Num'))
#
# # Missing data
# d = {'A': [1, 2, np.nan], 'B': [5, np.nan, np.nan], 'C': [1, 2, 3]}
# df = pd.DataFrame(d)
# print(df)
#
# # Drop all rows with null/NaN values
# print(df.dropna())
#
# # Drop all columns with null/NaN values
# print(df.dropna(axis=1))
#
# # Drop rows with x null/NaN values
# print(df.dropna(thresh=2))
#
# # Fill null/NaN values
# print(df.fillna(value='FILL'))
#
# # Group by with aggregate functions
# data = {'Company': ['GOOG', 'GOOG', 'MSFT', 'MSFT', 'FB', 'FB'],
# 'Person': ['Sam', 'Charlie', 'Amy', 'Vanessa', 'Carl', 'Sarah'],
# 'Sales': [200, 120, 340, 124, 243, 350]}
#
# df = pd.DataFrame(data)
# groupByComp = df.groupby('Company')
# # group by mean value (ignores string columns)
# print(groupByComp.mean())
#
# print(groupByComp.sum().loc['FB'])
# print(groupByComp.count())
# print(groupByComp.min())
# print(groupByComp.describe().transpose())
#
# # Operations
# df = pd.DataFrame({'col1': [1, 2, 3, 4],
# 'col2': [444, 555, 666, 444],
# 'col3': ['abc', 'def', 'ghi', 'xyz']})
#
# print(df.head())
# # Find unique values in dataframe (nunique for counting)
# print(df['col2'].unique())
#
# print(df['col2'].value_counts())
#
# # Conditional selection
# print(df[(df['col1'] > 2) & (df['col2'] == 444)])
#
#
# def times2(x):
# return x * 2
#
#
# print(df['col1'])
# # Apply custom function
# print(df['col1'].apply(times2))
# print(df['col1'].apply(lambda x: x * 2))
#
# # Remove columns
# print(df.drop('col1', axis=1))
#
# # Sort by column 2
# print(df.sort_values('col2'))
#
# # Find null values
# print(df.isnull())
#
# data = {'A': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'],
# 'B': ['one', 'one', 'two', 'two', 'one', 'one'],
# 'C': ['x', 'y', 'x', 'y', 'x', 'y'],
# 'D': [1, 3, 2, 5, 4, 1]}
#
# df = pd.DataFrame(data)
#
# print(df)
# # Create pivot table
# print(df.pivot_table(values='D', index=['A', 'B'], columns=['C']))
|
from __future__ import absolute_import
import unittest
import numpy as np
from mozsci import evaluation
from mozsci.inputs import mean_std_weighted
from six.moves import range
class TestAUCFast(unittest.TestCase):
def test_auc_wmw_fast(self):
t = [-1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1]
p = [0.01, 0.05, 0.2, 0.25, 0.1, 0.9, 0.6, 0.01, 0.90, 1.0, 0.33, 0.55, 0.555]
auc_act = 0.54761904761904767
auc = evaluation.auc_wmw_fast(t, p)
self.assertTrue(abs(auc_act - auc) < 1.0e-8)
def test_auc_degenerate(self):
y = np.array([0])
ypred = np.array([[ 1.0]])
weights = np.array([1])
auc = evaluation.auc_wmw_fast(y, ypred, weights=weights)
self.assertTrue(auc == 0)
class Testclassification_error(unittest.TestCase):
def test_classification_error(self):
y = np.array([0, 1, 1, 0])
ypred = np.array([0.1, 0.9, 0.4, 0.2])
self.assertTrue(abs(evaluation.classification_error(y, ypred) - 0.25) <
1e-12)
self.assertTrue(abs(evaluation.classification_error(y, ypred, thres=0.3
) - 0.0) < 1e-12)
weights = np.array([1.0, 0.8, 0.7, 0.6])
self.assertTrue(abs(evaluation.classification_error(y, ypred, weights=weights) - (1.0 - (1.0 + 0.8 + 0.6) / (weights.sum()))) < 1.0e-12)
class Test_precision_recall_f1(unittest.TestCase):
def setUp(self):
self.yactual = np.array([0, 0, 0, 0, 1, 1, 1])
self.ypred = np.array([0, 1, 1, 1, 1, 0, 0])
self.weights = np.array([1, 2, 3, 4, 5, 6, 7])
self.yactual1 = self.yactual.reshape(7, 1)
self.ypred1 = self.ypred.reshape(1, 7)
self.weights1 = self.weights.reshape(1, 7)
def test_precision_recall_f1(self):
tp = 1.0
fp = 3.0
fn = 2.0
actual_prec_rec_f1 = Test_precision_recall_f1.prec_rec_f1_from_tp_fp_fn(tp, fp, fn)
for y in [self.yactual, self.yactual1]:
for ypred in [self.ypred, self.ypred1]:
prec_rec_f1 = evaluation.precision_recall_f1(y, ypred)
for k in range(3):
self.assertTrue(abs(actual_prec_rec_f1[k] - prec_rec_f1[k]) < 1e-12)
def test_precision_recall_f1_weighted(self):
tp = 5.0
fp = 2.0 + 3 + 4
fn = 6.0 + 7
actual_prec_rec_f1 = Test_precision_recall_f1.prec_rec_f1_from_tp_fp_fn(tp, fp, fn)
for y in [self.yactual, self.yactual1]:
for ypred in [self.ypred, self.ypred1]:
for weights in [self.weights, self.weights1]:
prec_rec_f1 = evaluation.precision_recall_f1(y, ypred, weights=weights)
for k in range(3):
self.assertTrue(abs(actual_prec_rec_f1[k] - prec_rec_f1[k]) < 1e-12)
def test_degenerate(self):
# test case with degenerate input
y = np.array([0])
ypred = np.array([[ 1.0]])
weights = np.array([1])
prf = evaluation.precision_recall_f1(y, ypred, weights=weights)
# check that none are NaN
self.assertFalse(np.array([np.isnan(ele) for ele in prf]).any())
# and they should all be 0
self.assertTrue(np.allclose(prf, [0, 0, 0]))
@staticmethod
def prec_rec_f1_from_tp_fp_fn(tp, fp, fn):
actual_prec_rec_f1 = np.zeros(3)
actual_prec_rec_f1[0] = tp / (tp + fp) # precision
actual_prec_rec_f1[1] = tp / (tp + fn) # recall
actual_prec_rec_f1[2] = 2.0 * actual_prec_rec_f1[0] * actual_prec_rec_f1[1] / (actual_prec_rec_f1[0] + actual_prec_rec_f1[1]) # f1
return actual_prec_rec_f1
class Test_pearson_weighted(unittest.TestCase):
def test_pearson_weighted(self):
from scipy.stats import pearsonr
x = np.array([1, 2, 3, 4, 5])
y = np.array([1.0, 1.5, -0.5, 3.4, 2.9])
weights = np.array([1, 0, 0.5, 2, 1.5])
r_no_wgt = pearsonr(x, y)[0]
r_no_wgt_test = evaluation.pearsonr_weighted(x, y)
r_ones_wgt = evaluation.pearsonr_weighted(x, y, np.ones(x.shape))
self.assertTrue(abs(r_no_wgt - r_no_wgt_test) < 1e-12)
self.assertTrue(abs(r_no_wgt - r_ones_wgt) < 1e-12)
xm = mean_std_weighted(x, weights)
ym = mean_std_weighted(y, weights)
r_wgt = np.sum((x - xm['mean']) * (y - ym['mean']) * weights) / np.sum(weights)
self.assertTrue((evaluation.pearsonr_weighted(x, y, weights) - r_wgt) < 1e-12)
class Test_spearmanr_by(unittest.TestCase):
def test_spearmanr_by(self):
f = np.array([50, 52.19589972, 44.97281905, 50,
47.6719409 , 45.96619825, 50, 50,
48.18824048, 54.88529706, 42.67667074, 41.80373588,
37.29934119, 57.98812747, 45.04782628, 38.10858417,
46.44031713, 40.59823939, 26.29936944, 23.96820474,
47.98343799, 36.4455311 , 43.92931621, 55.19172514,
33.44633285, 37.38381116, 39.03392758, 41.43285553,
28.63082987, 31.86069758, 41.19551474, 29.04928565,
39.09690404, 36.75441683, 29.66390582, 70.4035713 ,
63.53532854, 49.78916058, 64.39911984, 65.41353192,
48.42353021, 60.38572122, 42.44357922, 42.86378695,
58.93821467, 61.93862217, 36.23459784, 64.57533596,
40.09399141, 45.57233379, 44.7748158 , 50.88705955,
47.24016865, 51.75866967, 36.17935042, 46.73933887,
52.7136634 , 47.0337377 , 34.19077012, 18.5836512 ,
41.63257011, 9.8698871 , 37.63277795, 47.71676464,
34.89667886, 35.10845963, 44.56638481, 36.70884056,
57.9185177 , 50.65260932, 58.53307806, 43.25154747,
40.59802125, 38.97005406, 35.19682907, 51.94755877,
44.04430199, 35.84048228, 36.25006727, 46.35317423,
37.44668618, 16.90596421, 38.87970562, 47.33515849,
27.41230181, 29.47142008])
position = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 12.,
13., 15., 16., 17., 19., 23., 24., 25., 26., 27., 28.,
29., 1., 2., 3., 6., 8., 9., 11., 12., 13., 17.,
19., 21., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20.,
22., 23., 24., 25., 26., 27., 1., 2., 4., 5., 6.,
7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17.,
18., 20., 21., 22., 23., 24., 25., 26., 27.])
queryid = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3], np.int)
fast_spearman = evaluation.spearmanr_by(f, position, queryid)
self.assertTrue(abs(fast_spearman - -0.42666971560358913) < 1e-1)
class TestClassificationPerfMeasure(unittest.TestCase):
def test_basic_measure_1(self):
"""
Test classification_model_performance. All correct case.
"""
observed = np.array([0, 1, 1, 0, 0, 0, 1])
calculated = np.array([0, 1, 1, 0, 0, 0, 1])
measure = evaluation.classification_model_performance(observed, calculated)
self.assertEqual(measure, 0)
def test_basic_measure_2(self):
"""
Test classification_model_performance. All correct case.
"""
observed = np.array([0, 1, 0, 1, 0, 0, 1])
calculated = np.array([0, 1, 1, 0, 0, 0, 1])
measure = evaluation.classification_model_performance(observed, calculated)
self.assertAlmostEqual(measure, 0.2857142857140)
def test_basic_measure_3(self):
"""
Test classification_model_performance. weighted case.
"""
observed = np.array([0, 1, 0, 1, 0, 0, 1])
calculated = np.array([0, 1, 1, 0, 0, 0, 1])
measure = evaluation.classification_model_performance(observed, calculated, [1.0, 3.0])
def test_matrix_measure_1(self):
"""
Test classification_model_performance_matrix. All correct case.
"""
observed = np.array([0, 1, 1, 0, 0, 0, 1])
calculated = np.array([0, 1, 1, 0, 0, 0, 1])
measure = evaluation.classification_model_performance_matrix(observed, calculated)
expected_measure = np.array([[4, 0], [0, 3]])
np.testing.assert_array_almost_equal(measure, expected_measure)
def test_matrix_measure_2(self):
"""
Test classification_model_performance_matrix. All correct case.
"""
observed = np.array([0, 1, 0, 1, 0, 0, 1])
calculated = np.array([0, 1, 1, 0, 0, 0, 1])
measure = evaluation.classification_model_performance_matrix(observed, calculated)
expected_measure = np.array([[3, 1], [1, 2]])
np.testing.assert_array_almost_equal(measure, expected_measure)
def test_matrix_measure_3(self):
"""
Test classification_model_performance_matrix. multiple classes case.
"""
observed = np.array([1, 0, 1, 0, 1, 0, 2, 3])
calculated = np.array([1, 0, 1, 1, 0, 2, 3, 0])
measure = evaluation.classification_model_performance_matrix(observed, calculated)
expected_measure = np.array([[1, 1, 1, 0], [1, 2, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0]])
np.testing.assert_array_almost_equal(measure, expected_measure)
def test_loss_measure_1(self):
"""
Test classification_model_performance_loss. default loss (0-1 loss).
"""
observed = np.array([0, 1, 1, 0, 1, 0, 1])
calculated = np.array([0, 1, 1, 0, 0, 0, 1])
measure = evaluation.classification_model_performance_loss(observed, calculated)
self.assertEqual(measure, 1)
def test_loss_measure_2(self):
"""
Test classification_model_performance_loss. user defined loss measure - squared loss.
"""
observed = np.array([0, 1, 0, 1, 0, 2, 1])
calculated = np.array([0, 1, 1, 0, 2, 0, 1])
loss = lambda i, j: (i-j)*(i-j)
measure = evaluation.classification_model_performance_loss(observed, calculated, loss)
self.assertEqual(measure, 10)
if __name__ == "__main__":
unittest.main()
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index),
path('user/create', views.create_user),
path('books', views.display_books),
path('user/login', views.login),
path('user/logout', views.logout),
path('books/create', views.add_book),
path('books/<int:book_id>', views.one_book),
path('books/<int:book_id>/update', views.book_update),
path('books/<int:book_id>/delete', views.book_delete),
path('books/<int:book_id>/favorite', views.fav_book),
path('books/<int:book_id>/unfavorite', views.unfav_book),
] |
# Script to check pid rerolls for a frame
import LCRNG
from tools import getIVs
natures = [ "Hardy", "Lonely", "Brave", "Adamant", "Naughty", "Bold", "Docile", "Relaxed", "Impish", "Lax", "Timid", "Hasty", "Serious", "Jolly", "Naive", "Modest", "Mild", "Quiet", "Bashful", "Rash", "Calm", "Gentle", "Sassy", "Careful", "Quirky" ]
methods = {1:(0,0),2:(1,0),4:(0,1)}
def getWild(seed,m):
go = LCRNG.PokeRNG(seed)
go.nextUInt()
go.nextUInt()
go.nextUInt()
searchNature = go.nextUShort() % 25
pidcheck = True
rerolls = -1
while pidcheck:
low = go.nextUShort()
high = go.nextUShort()
pid = (high << 16) | low
pidcheck = pid % 25 != searchNature
rerolls += 1
go.advance(m[0])
iv1 = go.nextUShort()
go.advance(m[1])
iv2 = go.nextUShort()
ivs = getIVs(iv1,iv2)
return [hex(pid),natures[searchNature],ivs, rerolls]
while True:
seed = int(input("Initial Seed: 0x"),16)
frame = int(input("Frame: "))
rng = LCRNG.PokeRNG(seed)
rng.advance(frame-1)
for m in methods:
info = getWild(rng.seed,methods[m])
print("Method",m,info[0][2:],info[1],info[2])
print("Rerolls:", info[3])
print()
|
#!/usr/bin/env python3
"""
Setup Loud ML python package
"""
import os
from setuptools import setup
setup(
name='loudml',
version=os.getenv('LOUDML_VERSION', '1.4'),
description="Machine Learning application",
py_modules=[
],
packages=[
'loudml',
'rmn_common',
],
setup_requires=[
],
tests_require=['nose'],
test_suite='nose.collector',
install_requires=[
# DO NOT ADD REQUIRES HERE
# See base/vendor/requirements.txt.in
],
extras_require={
},
package_data={
},
data_files=[
],
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'loudmld=loudml.server:main',
'loudml-faker=loudml.faker:main',
'loudml=loudml.cli:main',
],
'loudml.commands': [
'list-checkpoints=loudml.cli:ListCheckpointsCommand',
'save-checkpoint=loudml.cli:SaveCheckpointCommand',
'load-checkpoint=loudml.cli:LoadCheckpointCommand',
'create-model=loudml.cli:CreateModelCommand',
'delete-model=loudml.cli:DeleteModelCommand',
'list-models=loudml.cli:ListModelsCommand',
'list-templates=loudml.cli:ListTemplatesCommand',
'show-model=loudml.cli:ShowModelCommand',
'load-data=loudml.cli:LoadDataCommand',
'train=loudml.cli:TrainCommand',
'predict=loudml.cli:PredictCommand',
'forecast=loudml.cli:ForecastCommand',
'plot=loudml.cli:PlotCommand',
],
'loudml.models': [
'donut=loudml.donut:DonutModel',
],
'loudml.hooks': [
'annotations=loudml.annotations:AnnotationHook',
],
'loudml.datasources': [
'influxdb=loudml.influx:InfluxDataSource',
'elasticsearch=loudml.elastic:ElasticsearchDataSource',
'elasticsearch_aws=loudml.elastic_aws:ElasticsearchAWSDataSource',
'warp10=loudml.warp10:Warp10DataSource',
'mongodb=loudml.mongo:MongoDataSource',
],
},
)
|
# Write a Python program which accepts a sequence of comma-separated numbers from user and generate a list and a tuple with those numbers
my_input = str(raw_input("Enter comma-separated numbers: "))
my_list = [int(x) for x in my_input.split(",") if x.strip().isdigit()]
my_tuple = tuple([int(x) for x in my_input.split(",") if x.strip().isdigit()])
# Using map
my_list1 = map(int, my_input.split(","))
my_tuple1 = tuple(map(int, my_input.split(",")))
print my_input
print my_list
print my_tuple
print my_list1
print my_tuple1 |
print("Reboot the computer and try to connect.")
response = input("Did that fix the problem? (y/n): ")
if response == "n":
print("Reboot the computer and try to connect.")
response = input("Did that fix the problem? (y/n): ")
if response == "n":
print("Make sure the cables between the router & modem are plugged in firmly.")
response = input("Did that fix the problem? (y/n): ")
if response == "n":
print("Move the router to a new location and try to connect.")
response = input("Did that fix the problem? (y/n): ")
if response == "n":
print("Get a new router.")
|
import pygame
import constants
from level_manager import *
from art import *
from control_select import *
from game_screen import *
from soccer_screen import *
#from single_player import *
pygame.init()
pygame.joystick.init()
class ControllerScreen():
# game_mode:
# 0: standard versus
# 1: soccer
# 2: practice
def __init__(self,game_mode=0):
self.game_mode = game_mode
if game_mode == 2:
self.control_menu = ControlSelect(1)
else:
self.control_menu = ControlSelect()
self.background_image = Art().get_image("basketBallCourt_dark")
font = pygame.font.SysFont('Calibri', 25, True, False)
self.ready_time = 0
self.current_time = 0
self.ready = False
self.controller = font.render("Controller not detected!",True,constants.RED)
def return_to_title(self):
pygame.joystick.quit()
pygame.joystick.init()
for i in range(pygame.joystick.get_count()):
pygame.joystick.Joystick(i).init()
LevelManager().leave_level()
def handle_keyboard_event(self, event):
if event.type == pygame.KEYDOWN:
if (event.key == pygame.K_ESCAPE or \
event.key == pygame.K_TAB or \
event.key == pygame.K_BACKSPACE) and \
not self.control_menu.is_anyone_ready():
self.return_to_title()
if event.type == pygame.JOYBUTTONDOWN:
for i in range(0,pygame.joystick.get_count()):
if pygame.joystick.Joystick(i).get_button(1) and\
not self.control_menu.is_anyone_ready():
self.return_to_title()
break
self.control_menu.handle_event(event)
def update(self):
self.current_time = pygame.time.get_ticks()
self.control_menu.update()
if self.control_menu.is_everyone_ready():
if not self.ready:
self.ready = True
self.ready_time = self.current_time
else:
if self.current_time >= self.ready_time + constants.PROMPT_DELAY:
if self.game_mode == 0:
LevelManager().load_level(GameScreen(self.control_menu.get_player1_control(),
self.control_menu.get_player2_control()))
if self.game_mode == 1:
# load soccer
pass
if self.game_mode == 2:
LevelManager().load_level(SinglePlayer(self.control_menu.get_player1_control()))
pass
self.control_menu.reset()
self.ready = False
def draw(self, screen):
screen.fill(constants.WHITE)
screen.blit(self.background_image, [0, 0])
self.control_menu.draw(screen)
#screen.blit(self.controller, [(constants.SCREEN_WIDTH/2 - self.controller.get_width()/2), (constants.SCREEN_HEIGHT/4)])
|
import numpy as np
func = lambda x, y : complex(y.real + y.imag + np.exp(x) * (1 - x*x), 2 * y.real + y.imag)
eps = 1e-5
h0 = 0.3
x, X = 0., 1.
y = 0. + 0.j
def step(x, y, h):
ϕ0 = h * func(x, y)
ϕ1 = h * func(x + h / 2, y + ϕ0 / 2)
ϕ2 = h * func(x + h / 2, y + ϕ1 / 2)
ϕ3 = h * func(x + h, y + ϕ2)
Δy = (ϕ0 + 2 * ϕ1 + 2 * ϕ2 + ϕ3) / 6
return y + Δy
def jump(x, y):
global h0
h = h0
yh = step(x, y, h)
while True:
yh0 = step(x, y, h / 2)
yh1 = step(x + h / 2, yh0, h / 2)
if np.abs(yh - yh1) <= eps:
return yh, h
h /= 2
yh = yh0
# u'(x) = f(x, u, v)
# v'(x) = g(x, u, v)
# alpha beta A такие же
# u = xeˣ u' = eˣ + u + v - x²eˣ = u + v + eˣ(1 - x²)
# v = x²eˣ v' = 2xeˣ + x²eˣ = 2u + v
def main():
global f, x, X, h0, eps, y
x0 = x
ans = []
while X - x > 0:
ans.append((x, y))
y, h = jump(x, y)
x += h
if X - x < h0:
h0 = X - x
ans.append((x, y))
for i, xy in enumerate(ans):
_x, _y = xy
print(f"{i:2})x: {xy[0]:6.6}\t\ty: {xy[1].real:6.6} {xy[1].imag:6.6}\t\terr: {(np.abs(complex(_x * np.exp(_x), _x * _x * np.exp(_x)) - xy[1])):6.6}")
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
n = 1000
testx = [x0 + i * (X - x0) / n for i in range(n)]
testy = list(map(lambda x : x * np.exp(x), testx))
testz = list(map(lambda x : x * x * np.exp(x), testx))
ans = list(zip(*ans))
plt.plot(ans[0], list(map(lambda y : y.real, ans[1])), list(map(lambda y : y.imag, ans[1])), label='Решение методом')
ax.plot(testx, testy, testz, label='Аналитическое решение')
plt.legend(loc='best')
plt.show()
return 0
if __name__ == "__main__":
main() |
def go_random():
x = random.randint(0,600)
y = random.randint(0,600)
move(x,y)
return x,y
def timber_n(n, a):
start = team.WOOD
timber(a)
while team.WOOD < start+n:
print (team.WOOD)
sleep(0.1)
return team.WOOD - start
go_random()
sleep(1)
while not explored:
go_random()
sleep(0.5)
a = explored[-1]
if a.isWood:
if team.WOOD < 200:
print('Timbering')
print(timber_n(200, a))
print('timbered')
sleep(2)
|
from typing import List
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
lens = len(nums)
if lens == 0:
return 0
# recording current element index
st = 1
# previous element
pre = nums[0]
# increment index
i = 1
while i < lens:
# recording duplicate element
k = 0
while i + k < lens and nums[i + k] == pre:
k += 1
# avoid out ot index
if i + k >= lens:
break
# filling element
nums[st] = nums[i + k]
st += 1
pre = nums[i + k]
i += k
# print(nums)
return st
def removeElement(self, nums: List[int], val: int) -> int:
# k为多余的统计步骤
"""
lens = len(nums)
k, pos = 0, 0
for i in range(lens):
if nums[i] == val:
k += 1
else:
nums[pos] = nums[i]
pos += 1
# print(nums)
return lens-k
"""
# find the position and insert into it directly
lens = len(nums)
pos = 0
for i in range(lens):
if nums[i] != val:
nums[pos] = nums[i]
pos += 1
return pos
def searchInsert(self, nums: List[int], target: int) -> int:
# brute force
"""
lens = len(nums)
for i in range(lens):
if nums[i] >= target:
return i
return lens
"""
# <=
lens = len(nums)
left, right = 0, lens
# binary search template
# right length, not equal, remove which median
while left < right:
mid = left + (right - left) // 2
if nums[mid] == target:
return mid
elif nums[mid] < target:
left = mid + 1
else:
# keep right boundary
right = mid
# both left and right is fine
# the final check is needed when the result is not exit
return right
def maxSubArray(self, nums: List[int]) -> int:
# no max value
"""
lens = len(nums)
if lens == 0:
return 0
ans = 0
i, k = 0, 0
while i < lens:
ans += nums[i]
if ans < 0:
if k != 0:
ans = nums[i-k]
i -= k
k = 0
else:
ans = nums[i]
else:
k += 1
i += 1
return ans
"""
# answer
ans = nums[0]
sums = 0
for i in nums:
# sums + i > i
# positive effect
if sums > 0:
sums += i
else:
sums = i
ans = max(ans, sums)
return ans
def plusOne(self, digits: List[int]) -> List[int]:
lens = len(digits)
# wrong
'''
for i in range(lens-1, 0, -1):
if digits[i]+incre >= 9:
digits[i] = 0
incre += 1
else:
digits[i] = digits[i] + incre + 1
incre = 0
if digits[0]+incre == 9:
digits[0] = 0
digits.insert(0, 1)
else:
digits[0] = digits[0] + 1 + incre
print(digits)
return digits
'''
# increment directly
if digits[lens - 1] < 9:
digits[lens - 1] += 1
else:
# special circumstances
incre = 1
for i in range(lens - 1, 0, -1):
if digits[i] + incre > 9:
digits[i] = 0
else:
digits[i] += incre
incre = 0
# judge the first element
if incre == 1 and digits[0] == 9:
digits[0] = 0
digits.insert(0, 1)
elif incre == 1:
digits[0] += 1
# print(digits)
return digits
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
i, j = 0, 0
lens = m + n
# special circumstances
if n == 0:
return
elif m == 0:
for i in range(n):
nums1[i] = nums2[i]
return
while i < m and j < n:
# whether i is larger than j
if nums1[i] >= nums2[j]:
# exchange position
for k in range(lens - 1, i, -1):
nums1[k] = nums1[k - 1]
# insert value
nums1[i] = nums2[j]
j += 1
m += 1
i += 1
# larger than all value
if j < n and nums2[j] > nums1[m - 1]:
while j < n:
nums1[i] = nums2[j]
i += 1
j += 1
print(nums1)
def generate(self, numRows: int) -> List[List[int]]:
# public answer
"""
triangle = []
for row_num in range(numRows):
# The first and last row elements are always 1.
# init list and padding elements
row = [None for _ in range(row_num+1)]
row[0], row[-1] = 1, 1
# Each triangle element is equal to the sum of the elements
# above-and-to-the-left and above-and-to-the-right.
for j in range(1, len(row)-1):
row[j] = triangle[row_num-1][j-1] + triangle[row_num-1][j]
triangle.append(row)
return triangle
"""
lists = []
# 仅将第一位作为起始初始化,后续皆可循环推导
if numRows >= 1:
lists.append([1])
for _ in range(1, numRows):
# 保存先前列表便于操作
pre = lists[-1]
cur_sort = [1]
lens = len(pre)
for j in range(1, lens):
cur_sort.append(pre[j] + pre[j - 1])
cur_sort.append(1)
lists.append(cur_sort)
return lists
def getRow(self, rowIndex: int) -> List[int]:
lists = []
if rowIndex >= 0:
lists.append(1)
for _ in range(0, rowIndex):
cur = [1]
pre = lists
lens = len(pre)
for j in range(1, lens):
cur.append(pre[j] + pre[j - 1])
cur.append(1)
lists = cur
return lists
if __name__ == '__main__':
show = Solution()
print(show.removeDuplicates([0, 0, 1, 1, 1, 2, 2, 3, 3, 4]))
print(show.removeElement([0, 1, 2, 2, 3, 0, 4, 2], 2))
print(show.searchInsert([1, 3, 5, 6], 7))
print(show.maxSubArray([-2, 1, -3, 4, -1, 2, 1, -5, 4]))
print(show.plusOne([9, 9, 9, 9]))
print(show.merge([0], 0, [1], 1))
print(show.generate(5))
print(show.getRow(3))
|
from numpy.core.fromnumeric import mean
import pandas as pd
import numpy as np
def load_data_csv(path):
data = pd.read_csv(path, encoding="utf-8", sep=";")
return data
def infos (data: pd):
print("Describe Data: \n", data.describe())
print("Head + 2: \n", data.head(2))
print("Columns: \n", data.columns)
def calculate_men_column(data: pd, column: str):
mean = np.mean(data[column])
return round(mean, 1)
def calculate_query_mean(data: pd, query: str, column):
data = data.query(query)
mean = np.mean(data[column])
return round(mean, 1)
def main():
path_present = "/home/annap/Documentos/IGTI/Eng_Dados/Modulo1/TP_Enem_2019/data_ENEM_MG_present.csv"
path_present_score_total = "/home/annap/Documentos/IGTI/Eng_Dados/Modulo1/TP_Enem_2019/data_ENEM_MG_present_score_not_zero.csv"
df_enem_present = load_data_csv(path_present)
df_enem_score_total = load_data_csv(path_present_score_total)
#infos(df_enem_present)
#infos(df_enem_score_total)
# Resultados do TP 1
# Qual é a média da nota em matemática de todos os alunos mineiros?
print("---Média Da Nota de Matemática. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_men_column(df_enem_present, "NU_NOTA_MT"), calculate_men_column(df_enem_score_total, "NU_NOTA_MT")))
# Qual é a média da nota de Linguagens e Códigos de todos os alunos mineiros?
print("---Média Da Nota de Linguagens e Código. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_men_column(df_enem_present, "NU_NOTA_LC"), calculate_men_column(df_enem_score_total, "NU_NOTA_LC")))
# Qual é a média da nota em Ciências Humanas dos alunos do sexo FEMININO e MASCULINOS mineiros?
query = str("TP_SEXO == 'F'")
print("---Média Da Nota de Ciências Humanas do sexo Feminino. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_CH"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_CH")))
query = str("TP_SEXO == 'M'")
print("---Média Da Nota de Ciências Humanas do sexo Masculino.\nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_CH"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_CH")))
# Qual é a média da nota em matemática dos alunos do sexo FEMININO que moram na cidade de Montes Claros?
query = str("TP_SEXO == 'F' & NO_MUNICIPIO_RESIDENCIA == 'Montes Claros'")
print("---Média Da Nota de Matemática do sexo Feminino, Residentes em Montes Claros. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_MT"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_MT")))
# Qual é a média da nota em Matemática dos alunos do município de Sabará que possuem TV por assinatura na residência?
query = str(" NO_MUNICIPIO_RESIDENCIA == 'Sabará' & Q021 == 'B' ")
print("---Média Da Nota de Matemática dos Alunos, Residentes em Sabará que possuem TV por assinatura. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_MT"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_MT")))
# Qual é a média da nota em Ciências Humanas dos alunos mineiros que possuem dois fornos de micro-ondas em casa?
query = str(" Q016 == 'C' ")
print("---Média Da Nota de Ciências Humanas dos Alunos que possuem dois fornos de micro-ondas.\nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_CH"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_CH")))
# Qual é a nota média em Matemática dos alunos mineiros cuja mãe completou a pós-graduação?
query = str(" Q002 == 'G' ")
print("---Média Da Nota de Matemática dos Alunos, cuja mãe completou a pós-graduação. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_MT"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_MT")))
# Qual é a nota média em Matemática dos alunos de Belo Horizonte e de Conselheiro Lafaiete ?
query = " NO_MUNICIPIO_RESIDENCIA == 'Belo Horizonte' | NO_MUNICIPIO_RESIDENCIA == 'Conselheiro Lafaiete' "
print("---Média Da Nota de Matemática dos Alunos, residentes e Belo Horizonte e de Conselheiro Lafaiete. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_MT"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_MT")))
# Qual é a nota média em Ciências Humanas dos alunos mineiros que moram sozinhos?
query = " Q005 == 1 "
print("---Média Da Nota de Ciências Humanas dos Alunos que moram só. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_CH"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_CH")))
# Qual é a nota média em Ciências Humanas dos alunos mineiros cujo pai completou Pós Graduação e possuem renda familiar entre R$ 8.982,01 e R$ 9.980,00 ?
query = " Q001 == 'G' & Q006 == 'M' "
print("---Média Da Nota de Ciências Humanas dos Alunos cujo pai completou Pós Graduação e possuem renda familiar entre R$ 8.982,01 e R$ 9.980,00. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_CH"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_CH")))
# Qual é a nota média em Matemática dos alunos do sexo FEMININO que moram em Lavras e escolheram "Espanhol" como língua estrangeira?
query = " TP_SEXO == 'F' & NO_MUNICIPIO_RESIDENCIA == 'Lavras' & TP_LINGUA == 1"
print("---Média Da Nota de Matemática dos Alunos, do sexo FEMININO que escolheram ESPANHOL e residem em Lavras. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_MT"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_MT")))
# Qual é a nota média em Matemática dos alunos do sexo Masculino que moram em Ouro Preto?
query = " TP_SEXO == 'M' & NO_MUNICIPIO_RESIDENCIA == 'Ouro Preto' "
print("---Média Da Nota de Matemática dos Alunos, do sexo MASCULINO que residem em Ouro Preto. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_MT"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_MT")))
# Qual é a nota média em Ciências Humanas dos alunos surdos?
query = " IN_SURDEZ == 1 "
print("---Média Da Nota de Ciências Humanas dos Alunos que são Surdos. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_CH"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_CH")))
# Qual é a nota média em Matemática dos alunos do sexo FEMININO que moram em Belo Horizonte, Sabará, Nova Lima e Betim e possuem dislexia?
query = " TP_SEXO == 'F' and IN_DISLEXIA == 1 and (NO_MUNICIPIO_RESIDENCIA == 'Belo Horizonte' | NO_MUNICIPIO_RESIDENCIA == 'Sabará' \
| NO_MUNICIPIO_RESIDENCIA == 'Nova Lima' | NO_MUNICIPIO_RESIDENCIA == 'Betim') "
print("---Média Da Nota de Matemática dos Alunos, do sexo FEMININO que residem em Sabará, Belo Horizonte, Betim e Nova Lima, que possuem Dislexia. \nPresentes: {} \nPresentes sem notas zero: {}" \
.format(calculate_query_mean(df_enem_present, query, "NU_NOTA_MT"), calculate_query_mean(df_enem_score_total, query, "NU_NOTA_MT")))
if __name__ == "__main__":
main() |
import logging
import asyncio
import asab
from .abc.connection import Connection
from .abc.lookup import Lookup
from .matrix.matrix import Matrix
L = logging.getLogger(__file__)
class BSPumpService(asab.Service):
def __init__(self, app, service_name="bspump.PumpService"):
super().__init__(app, service_name)
self.Pipelines = dict()
self.Connections = dict()
self.Lookups = dict()
self.LookupFactories = []
self.Matrixes = dict()
def locate(self, address):
if '.' in address:
p, t = address.split('.', 1)
else:
p = address
t = None
pipeline = self.Pipelines.get(p)
if pipeline is None:
return None
elif t is None:
return pipeline
if t[:1] == '*':
for source in pipeline.Sources:
if source.Id == t[1:]:
return source
else:
for processor in pipeline.iter_processors():
if processor.Id == t:
return processor
return None
# Pipelines
def add_pipeline(self, pipeline):
if pipeline.Id in self.Pipelines:
raise RuntimeError("Pipeline with id '{}' is already registered".format(pipeline.Id))
self.Pipelines[pipeline.Id] = pipeline
def add_pipelines(self, *pipelines):
for pipeline in pipelines:
self.add_pipeline(pipeline)
def del_pipeline(self, pipeline):
del self.Pipelines[pipeline.Id]
# Connections
def add_connection(self, connection):
if connection.Id in self.Connections:
raise RuntimeError("Connection '{}' already created".format(connection.Id))
self.Connections[connection.Id] = connection
return connection
def add_connections(self, *connections):
for connection in connections:
self.add_connection(connection)
def locate_connection(self, connection_id):
if isinstance(connection_id, Connection):
return connection_id
try:
return self.Connections[connection_id]
except KeyError:
raise KeyError(
"Cannot find connection id '{}' (did you call add_connection() before add_pipeline() ?)".format(connection_id)
)
# Lookups
def add_lookup(self, lookup):
if lookup.Id in self.Lookups:
raise RuntimeError("Lookup '{}' already created".format(lookup.Id))
self.Lookups[lookup.Id] = lookup
return lookup
def add_lookups(self, *lookups):
for lookup in lookups:
self.add_lookup(lookup)
def locate_lookup(self, lookup_id):
if isinstance(lookup_id, Lookup):
return lookup_id
try:
return self.Lookups[lookup_id]
except KeyError:
for lookup_factory in self.LookupFactories:
lookup = lookup_factory.locate_lookup(lookup_id)
if lookup is not None:
self.Lookups[lookup_id] = lookup
return lookup
raise KeyError("Cannot find lookup id '{}' (did you call add_lookup() ?)".format(lookup_id))
def add_lookup_factory(self, lookup_factory):
self.LookupFactories.append(lookup_factory)
# Matrixes
def add_matrix(self, matrix):
if matrix.Id in self.Matrixes:
raise RuntimeError("Matrix '{}' already created".format(matrix.Id))
self.Matrixes[matrix.Id] = matrix
return matrix
def add_matrixes(self, *matrixes):
for matrix in matrixes:
self.add_matrix(matrix)
def locate_matrix(self, matrix_id):
if isinstance(matrix_id, Matrix):
return matrix_id
try:
return self.Matrixes[matrix_id]
except KeyError:
raise KeyError("Cannot find matrix id '{}' (did you call add_matrix() ?)".format(matrix_id))
#
async def initialize(self, app):
# Run initialization of lookups
lookup_update_tasks = []
for lookup in self.Lookups.values():
if not lookup.Lazy:
lookup_update_tasks.append(lookup.ensure_future_update(app.Loop))
# Await all lookups
if len(lookup_update_tasks) > 0:
done, pending = await asyncio.wait(lookup_update_tasks, loop=app.Loop)
# Start all pipelines
for pipeline in self.Pipelines.values():
pipeline.start()
async def finalize(self, app):
# Stop all started pipelines
if len(self.Pipelines) > 0:
await asyncio.gather(*[pipeline.stop() for pipeline in self.Pipelines.values()], loop=app.Loop)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Librerías del programa
import sys
import math
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
class Luz(object):
encendida = True
colores = [(1, 1, 1, 1), (0, 0, 0, 1), (1, 1, 0, 1), (0, 1, 0, 1), (1, 0, 1, 1)]
def __init__(self, luz_id, posicion):
self.luz_id = luz_id
self.posicion = posicion
self.color_actual = 0
def dibujar(self):
light_id = self.luz_id
color = Luz.colores[self.color_actual]
glLightfv(light_id, GL_POSITION, self.posicion)
glLightfv(light_id, GL_DIFFUSE, color)
glLightfv(light_id, GL_CONSTANT_ATTENUATION, 0.1)
glLightfv(light_id, GL_LINEAR_ATTENUATION, 0.05)
def cambiar_color(self):
self.color_actual += 1
self.color_actual %= len(Luz.colores)
def enable(self):
if not Luz.encendida:
glEnable(GL_LIGHTING)
Luz.encendida = True
glEnable(self.luz_id)
class Esfera(object):
meridianos = 40
paralelos = 40
def __init__(self, radio, posicion, color):
self.radio = radio
self.posicion = posicion
self.color = color
def dibujar(self):
glTranslatef(*self.posicion)
glMaterialfv(GL_FRONT, GL_DIFFUSE, self.color)
glutSolidSphere(self.radio, Esfera.meridianos, Esfera.paralelos)
class App(object):
def __init__(self, ancho=800, largo=600):
self.titulo = 'Esferas con Luz'
self.largo = largo
self.ancho = ancho
self.angulo = 0
self.distancia = 20
self.iluminacion = Luz(GL_LIGHT0, (15, 5, 15, 1))
self.iluminacion1 = Luz(GL_LIGHT1, (0, 0, 0, 1))
self.esfera1 = Esfera(2, (0, 0, 0), (1, 0.2, 0.2, 1))
self.esfera2 = Esfera(1, (4, 2, 0), (0.25, 0.25, 0.25, 1))
self.esfera3 = Esfera(1, (-4, 2, 0), (0.8, 0.8, 0.8, 1))
self.esfera4 = Esfera(1, (-4, -2, 0), (0.25, 0.25, 0.25, 1))
self.esfera5 = Esfera(1, (4, -6, 0), (0.8, 0.8, 0.8, 1))
def iniciar(self):
glutInit()
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowPosition(50, 50)
glutInitWindowSize(self.ancho, self.largo)
glutCreateWindow(self.titulo)
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHT1)
self.iluminacion.enable()
glClearColor(.1, .1, .1, 1)
glMatrixMode(GL_PROJECTION)
aspect = self.ancho / self.largo
gluPerspective(40., aspect, 1., 40.)
glMatrixMode(GL_MODELVIEW)
glutDisplayFunc(self.dibujar)
glutSpecialFunc(self.keyboard)
glutMainLoop()
def dibujar(self):
x = math.sin(self.angulo) * self.distancia
z = math.cos(self.angulo) * self.distancia
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
gluLookAt(x, 0, z, 0, 0, 0, 0, 1, 0)
self.iluminacion.dibujar()
self.iluminacion1.dibujar()
self.esfera1.dibujar()
self.esfera2.dibujar()
self.esfera3.dibujar()
self.esfera4.dibujar()
self.esfera5.dibujar()
glutSwapBuffers()
def keyboard(self, tecla, x, y):
if tecla == GLUT_KEY_F1:
sys.exit()
if tecla == GLUT_KEY_UP:
self.distancia -= 0.1
if tecla == GLUT_KEY_DOWN:
self.distancia += 0.1
if tecla == GLUT_KEY_LEFT:
self.angulo -= 0.05
if tecla == GLUT_KEY_RIGHT:
self.angulo += 0.05
if tecla == GLUT_KEY_F2:
self.iluminacion.cambiar_color()
self.distancia = max(5, min(self.distancia, 20))
self.angulo %= math.pi * 2
glutPostRedisplay()
if __name__ == '__main__':
app = App()
app.iniciar()
|
from collections import UserDict
class AddressBook(UserDict):
def add_record(self, name, record):
self.data[name] = record
class Record(self):
def __init__(self, name, *phones):
self.phones = []
self.name = name
def add_phone(self, phone):
self.phones.append(phone)
def remove_phone(self, phone):
self.phones.remove(phone)
def edit_phone(self, phone):
pass
class Field:
def __init__(self, value):
self.value = value
class Name(Field):
def __init__(self, name):
self.name = name
class Phone(Field):
def __init__(self, value):
self.value = value |
#encoding=utf-8
from django.db import models
from django.core.urlresolvers import reverse
# Create your models here.
class Article(models.Model):
status_choice=(
('d','draft'),
('p','Published')
)
title = models.CharField(u'标题',max_length=70)
body = models.TextField(u'正文')
create_time = models.DateTimeField(u'创建时间',auto_now_add=True)
last_modified_time = models.DateTimeField(u'最后修改时间',auto_now = True)
status = models.CharField(u'文章状态',max_length=1,choices = status_choice)
abstract = models.CharField(u'文章摘要',max_length=54,blank=True,null=True,help_text=u'可选,如若为空将摘取正文的前54个字符')
views = models.PositiveIntegerField(u'浏览量',default=0)
likes = models.PositiveIntegerField(u'点赞数',default=0)
likes_user = models.ManyToManyField('User',verbose_name=u'点赞人',null=True,related_name='user_hit_likes')
topped = models.BooleanField(u'置顶',default=False)
auther = models.ForeignKey('User',verbose_name = u'作者',null=True,on_delete = models.SET_NULL)
category = models.ForeignKey('Category',verbose_name=u'分类',null=True,on_delete = models.SET_NULL)
def __str__(self):
return self.title
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('article_detail_page',kwargs={'article_id': self.pk})
class Meta:
ordering = ['-last_modified_time']
class Comment(models.Model):
status_choice=(
('R',u'已读'),
('N',u'未读')
)
comment_user = models.ForeignKey('User',verbose_name = u'评论人',related_name="comment_user")
comment_content = models.TextField(u'评论内容')
comment_status = models.CharField(u'评论状态',max_length=10,choices = status_choice,default=u"未读")
comment_time = models.DateTimeField(u'评论时间',auto_now = True)
comment_reminder = models.ForeignKey('User',verbose_name=u"提醒人",null=True,related_name="comment_reminder")
article = models.ForeignKey('Article',verbose_name = u'归属文章')
def __str__(self):
return self.comment_user.name
def __unicode__(self):
return self.comment_user.name
class Meta():
ordering = ['comment_time']
class Category(models.Model):
name = models.CharField(u'类名',max_length=20)
create_time = models.DateTimeField(u'创建时间',auto_now_add=True)
last_modified_time = models.DateTimeField(u'最后修改时间',auto_now = True)
def __str__(self):
return self.name
def __unicode__(self):
return self.name
class Meta():
ordering = ['-last_modified_time']
class User(models.Model):
user_status=(
('invisible',u'隐身'),
('online',u'在线'),
('offline',u'离线')
)
name = models.CharField(u'用户名',max_length=20)
passwd = models.CharField(u'密码',max_length=20,null = True)
email = models.EmailField(u'邮箱地址',max_length=256,default='jimforit@163.com')
status = models.CharField(u'状态',max_length=10,choices = user_status,default='online')
last_login_time = models.DateTimeField(u'最后一次登录时间',auto_now = True)
def __str__(self):
return self.name
def __unicode__(self):
return self.name
class Meta():
ordering = ['last_login_time'] |
#!/usr/bin/env python3
# Convert and old-style (Annif prototype) subject corpus (a directory of
# *.txt files) into a new-style document-oriented corpus (a single TSV
# file).
import sys
import os
import os.path
import collections
if len(sys.argv) != 2:
print("Usage: {} <directory> >corpus.tsv".format(sys.argv[0]),
file=sys.stderr)
sys.exit(1)
path = sys.argv[1]
if not os.path.isdir(path):
print("Error: path '{}' is not a directory".format(path),
file=sys.stderr)
sys.exit(1)
doc_uris = collections.defaultdict(list)
def normalize(line):
return ' '.join(line.strip().split())
for fn in os.listdir(path):
with open(os.path.join(path, fn)) as f:
uri, label = f.readline().strip().split(' ', 1)
for line in f:
doc_uris[normalize(line)].append(uri)
for doc, uris in doc_uris.items():
uricol = ' '.join(('<{}>'.format(uri) for uri in uris))
print('{}\t{}'.format(doc, uricol))
|
class LogarunUser:
def __init__(self, logarunUsername, logarunPassword):
self.username = logarunUsername
self.password = logarunPassword
def __rep__(self):
return self.username
def __str__(self):
return self.username |
class TreeNode(object):
__name__ = "TreeNode"
def __init__(self, content=None, children=None):
self.content = content
self.children = children
def __str__(self):
if self.content is None and self.__name__ != "NArg":
return self.__name__
output = self.__name__ + ": "
if type(self.content) == str:
output += self.content
elif self.content is not None:
output += self.content.__str__()
if type(self.children) == list and len(self.children) > 0:
output += "[" + ";".join([child.__str__() for child in self.children]) + "]"
return output
relations = ['on.p', 'to_the_left_of.p', 'to_the_right_of.p', 'in_front_of.p', 'behind.p', 'above.p', 'below.p', 'over.p', 'under.p', 'near.p', 'touching.p', 'at.p', 'between.p',
'side-by-side-with.p', 'on_top_of.p']
grammar = {}
grammar['on.p'] = lambda x: TPred(x)
grammar['to_the_left_of.p'] = lambda x: TPred(x)
grammar['to_the_right_of.p'] = lambda x: TPred(x)
grammar['in_front_of.p'] = lambda x: TPred(x)
grammar['behind.p'] = lambda x: TPred(x)
grammar['above.p'] = lambda x: TPred(x)
grammar['below.p'] = lambda x: TPred(x)
grammar['over.p'] = lambda x: TPred(x)
grammar['under.p'] = lambda x: TPred(x)
grammar['underneath.p'] = lambda x: TPred(x)
grammar['near.p'] = lambda x: TPred(x)
grammar['touching.p'] = lambda x: TPred(x)
grammar['at.p'] = lambda x: TPred(x)
grammar['in.p'] = lambda x: TPred(x)
grammar['between.p'] = lambda x: TPred(x)
grammar['side_by_side_with.p'] = lambda x: TPred(x)
grammar['on_top_of.p'] = lambda x: TPred(x)
grammar['close_to.p'] = lambda x: TPred(x)
grammar['near_to.p'] = lambda x: TPred(x)
grammar['next_to.p'] = lambda x: TPred(x)
grammar['far_from.p'] = lambda x: TPred(x)
grammar['touching.p'] = lambda x: TPred(x)
grammar['supporting.p'] = lambda x: TPred(x)
grammar['exist.pred'] = lambda x: TPred(x)
grammar['facing.p'] = lambda x: TPred(x)
grammar['touch.v'] = lambda x: TPred(x)
grammar['contain.v'] = lambda x: TPred(x)
grammar['consist_of.v'] = lambda x: TPred(x)
grammar['face.v'] = lambda x: TPred(x)
grammar['have.v'] = lambda x: TPred(x)
grammar['color.pred'] = lambda x: TPred(x)
grammar['side_by_side.a'] = lambda x: TPred(x)
grammar['where.a'] = lambda x: TPred(x)
grammar['clear.a'] = lambda x: TPred(x)
grammar['leftmost.a'] = lambda x: NPred(x, mods = [TSuperMarker()])
grammar['rightmost.a'] = lambda x: NPred(x, mods = [TSuperMarker()])
grammar['frontmost.a'] = lambda x: NPred(x, mods = [TSuperMarker()])
grammar['topmost.a'] = lambda x: NPred(x, mods = [TSuperMarker()])
grammar['highest.a'] = lambda x: NPred(x, mods = [TSuperMarker()])
grammar['lowest.a'] = lambda x: NPred(x, mods = [TSuperMarker()])
grammar['backmost.a'] = lambda x: NPred(x, mods = [TSuperMarker()])
grammar['farthest.a'] = lambda x: NPred(x, mods = [TSuperMarker()])
#grammar['nearest_to.a'] = lambda x: NPred(x, mods = [TSuperMarker()])
#grammar['block.n'] = lambda x: TNoun(x)
#grammar['{block}.n'] = lambda x: TNoun(x)
grammar['block.n'] = lambda x: NArg(obj_type = x)
grammar['one.n'] = lambda x: NArg(obj_type = x)
grammar['{block}.n'] = lambda x: NArg(obj_type = x)
grammar['table.n'] = lambda x: NArg(obj_type = x, obj_id = "TABLE")
grammar['stack.n'] = lambda x: NArg(obj_type = x, obj_id = "STACK")
grammar['row.n'] = lambda x: NArg(obj_type = x, obj_id = "ROW")
grammar['thing.n'] = lambda x: NArg(obj_type = x, obj_id = None)
grammar['{thing}.n'] = lambda x: NArg(obj_type = x, obj_id = None)
grammar['what.pro'] = lambda x: NArg()
grammar['which.pro'] = lambda x: NArg()
grammar['anything.pro'] = lambda x: NArg()
grammar['something.pro'] = lambda x: NArg()
grammar['each_other.pro'] = lambda x: NArg(obj_type="EACHOTHER")
grammar['color.n'] = lambda x: NArg(obj_type = "PROPERTY", obj_id = "color")
grammar['direction.n'] = lambda x: NArg(obj_type = "PROPERTY", obj_id = "direction")
grammar['*ref'] = lambda x: NArg(obj_type = "REF", obj_id = "*ref")
grammar['it.pro'] = lambda x: NArg(obj_type = "REF", obj_id = x)
grammar['there.pro'] = lambda x: TTherePro()
grammar['corner-of.n'] = lambda x: TRelNoun(x)
grammar['edge-of.n'] = lambda x: TRelNoun(x)
grammar['side-of.n'] = lambda x: TRelNoun(x)
grammar['center-of.n'] = lambda x: TRelNoun(x)
grammar['middle-of.n'] = lambda x: TRelNoun(x)
grammar['part-of.n'] = lambda x: TRelNoun(x)
grammar['height-of.n'] = lambda x: TRelNoun(x)
grammar['length-of.n'] = lambda x: TRelNoun(x)
grammar['halfway.adv-a'] = lambda x: TAdv(x)
grammar['slightly.adv-a'] = lambda x: TAdv(x)
grammar['directly.adv-a'] = lambda x: TAdv(x)
grammar['fully.adv-a'] = lambda x: TAdv(x)
grammar['how.mod-a'] = lambda x: TAdvAdjMod(x)
grammar['very.mod-a'] = lambda x: TAdvAdjMod(x)
grammar['halfway.mod-a'] = lambda x: TAdvAdjMod(x)
grammar['slightly.mod-a'] = lambda x: TAdvAdjMod(x)
grammar['directly.mod-a'] = lambda x: TAdvAdjMod(x)
grammar['red.a'] = lambda x: NColor(x)
grammar['green.a'] = lambda x: NColor(x)
grammar['blue.a'] = lambda x: NColor(x)
grammar['yellow.a'] = lambda x: NColor(x)
grammar['black.a'] = lambda x: NColor(x)
grammar['white.a'] = lambda x: NColor(x)
grammar['brown.a'] = lambda x: NColor(x)
grammar['gray.a'] = lambda x: NColor(x)
grammar['left.a'] = lambda x: TAdj(x)
grammar['right.a'] = lambda x: TAdj(x)
grammar['top.a'] = lambda x: TAdj(x)
grammar['front.a'] = lambda x: TAdj(x)
grammar['back.a'] = lambda x: TAdj(x)
grammar['high.a'] = lambda x: TAdj(x)
grammar['upper.a'] = lambda x: TAdj(x)
grammar['low.a'] = lambda x: TAdj(x)
grammar['lower.a'] = lambda x: TAdj(x)
grammar['last.a'] = lambda x: TAdj(x)
grammar['first.a'] = lambda x: TAdj(x)
grammar['short.a'] = lambda x: TAdj(x)
grammar['long.a'] = lambda x: TAdj(x)
grammar['middle.a'] = lambda x: TAdj(x)
grammar['tall.a'] = lambda x: TAdj(x)
grammar['many.a'] = lambda x: TAdj(x)
grammar['far.a'] = lambda x: TAdj(x)
grammar['same.a'] = lambda x: TAdj(x)
grammar['one.a'] = lambda x: TNumber(x)
grammar['one.d'] = lambda x: TNumber(x)
grammar['two.a'] = lambda x: TNumber(x)
grammar['two.d'] = lambda x: TNumber(x)
grammar['three.a'] = lambda x: TNumber(x)
grammar['three.d'] = lambda x: TNumber(x)
grammar['few.a'] = lambda x: TNumber(x)
grammar['several.a'] = lambda x: TNumber(x)
grammar['do.aux-s'] = lambda x: TAuxSDo()
grammar['be.v'] = lambda x: TCopulaBe()
grammar['plur'] = lambda x: TPlurMarker()
grammar['pres'] = lambda x: TTenseMarker()
grammar['nquan'] = lambda x: TQuanMarker()
grammar['fquan'] = lambda x: TQuanMarker()
grammar['most-n'] = lambda x: TSuperMarker()
grammar['most'] = lambda x: TSuperMarker()
grammar['sub'] = lambda x: TSubMarker()
grammar['?'] = lambda x: TQMarker()
grammar['n+preds'] = lambda x: TNModMarker()
grammar['prog'] = lambda x: TAspectMarker(prog=True, perf=False)
grammar['perf'] = lambda x: TAspectMarker(prog=False, perf=True)
grammar['k'] = lambda x: TNReifierMarker()
grammar['='] = lambda x: TEqualsMarker()
grammar['adv-a'] = lambda x: TAdvTransformMarker(x)
grammar['adv-s'] = lambda x: TAdvTransformMarker(x)
grammar['adv-e'] = lambda x: TAdvTransformMarker(x)
grammar['adv-f'] = lambda x: TAdvTransformMarker(x)
grammar['that.rel'] = lambda x: TRelativizer(content=x)
grammar['which.d'] = lambda x: TDet(x)
grammar['the.d'] = lambda x: TDet(x)
grammar['a.d'] = lambda x: TDet(x)
grammar['other.d'] = lambda x: TDet(x)
grammar['another.d'] = lambda x: TDet(x)
grammar['every.d'] = lambda x: TDet(x)
grammar['other.a'] = lambda x: TDet(x)
grammar['any.d'] = lambda x: TDet(x)
grammar['some.d'] = lambda x: TDet(x)
grammar['what.d'] = lambda x: TDet(x)
grammar['all.d'] = lambda x: TDet(x)
grammar['how_many.d'] = lambda x: TDet(x)
grammar['Nvidia|'] = lambda x: TName(x)
grammar['Toyota'] = lambda x: TName(x)
grammar['McDonalds'] = lambda x: TName(x)
grammar['SRI'] = lambda x: TName(x)
grammar['Starbucks'] = lambda x: TName(x)
grammar['Texaco'] = lambda x: TName(x)
grammar['Target'] = lambda x: TName(x)
grammar['Burger_King'] = lambda x: TName(x)
grammar['Mercedes'] = lambda x: TName(x)
grammar['Twitter'] = lambda x: TName(x)
grammar['HP'] = lambda x: TName(x)
grammar['Shell'] = lambda x: TName(x)
grammar['Heineken'] = lambda x: TName(x)
grammar['nvidia|'] = lambda x: TName(x)
grammar['toyota'] = lambda x: TName(x)
grammar['mcdonalds'] = lambda x: TName(x)
grammar['sri'] = lambda x: TName(x)
grammar['starbucks'] = lambda x: TName(x)
grammar['texaco'] = lambda x: TName(x)
grammar['target'] = lambda x: TName(x)
grammar['burger_king'] = lambda x: TName(x)
grammar['mercedes'] = lambda x: TName(x)
grammar['twitter'] = lambda x: TName(x)
grammar['hp'] = lambda x: TName(x)
grammar['shell'] = lambda x: TName(x)
grammar['heineken'] = lambda x: TName(x)
grammar['burger'] = lambda x: TName(x)
grammar['king'] = lambda x: TName(x)
grammar['|Nvidia|'] = lambda x: TName(x)
grammar['|Toyota|'] = lambda x: TName(x)
grammar['|McDonalds|'] = lambda x: TName(x)
grammar['|SRI|'] = lambda x: TName(x)
grammar['|Starbucks|'] = lambda x: TName(x)
grammar['|Texaco|'] = lambda x: TName(x)
grammar['|Target|'] = lambda x: TName(x)
grammar['|Burger_King|'] = lambda x: TName(x)
grammar['|Mercedes|'] = lambda x: TName(x)
grammar['|Twitter|'] = lambda x: TName(x)
grammar['|HP|'] = lambda x: TName(x)
grammar['|Shell|'] = lambda x: TName(x)
grammar['|Heineken|'] = lambda x: TName(x)
grammar['|nvidia|'] = lambda x: TName(x)
grammar['|toyota|'] = lambda x: TName(x)
grammar['|mcdonalds|'] = lambda x: TName(x)
grammar['|sri|'] = lambda x: TName(x)
grammar['|starbucks|'] = lambda x: TName(x)
grammar['|texaco|'] = lambda x: TName(x)
grammar['|target|'] = lambda x: TName(x)
grammar['|nvidia|.n'] = lambda x: TName(x)
grammar['|toyota|.n'] = lambda x: TName(x)
grammar['|mcdonalds|.n'] = lambda x: TName(x)
grammar['|sri|.n'] = lambda x: TName(x)
grammar['|starbucks|.n'] = lambda x: TName(x)
grammar['|texaco|.n'] = lambda x: TName(x)
grammar['|target|.n'] = lambda x: TName(x)
grammar['not.adv-s'] = lambda x: TNeg()
grammar['not.adv-a'] = lambda x: TNeg()
grammar['or.cc'] = lambda x: TConj(x)
grammar['and.cc'] = lambda x: TConj(x)
grammar[("TName", "TName")] = lambda x, y: TName(content=x.content + " " + y.content)
#Verb + tense/aspect rules
grammar[("TTenseMarker", "TCopulaBe")] = lambda x, y: NVP(content=y, children=[NSentenceParams(tense=x)])
grammar[("NSentenceParams", "TCopulaBe")] = lambda x, y: NVP(content=y, children=[x])
grammar[("TTenseMarker", "TVerb")] = lambda x, y: NVP(content=y, children=[NSentenceParams(tense=x)] + y.children)
grammar[("NSentenceParams", "TVerb")] = lambda x, y: NVP(content=y, children=[x] + y.children)
grammar[("TTenseMarker", "TAuxSDo")] = lambda x, y: NVP(content=y, children=[NSentenceParams(tense=x)])
grammar[("NSentenceParams", "TAuxSDo")] = lambda x, y: NVP(content=y, children=[x] + y.children)
grammar[("TAspectMarker", "TAspectMarker")] = lambda x, y: TAspectMarker(prog = x.prog or y.prog, perf = x.perf or y.perf)
grammar[("TTenseMarker", "TAspectMarker")] = lambda x, y: NSentenceParams(tense = x, aspect = y)
#Adjective modifier rules
grammar[("TSuperMarker", "TAdj")] = lambda x, y: TAdj(content = y.content, mods = [x])
grammar[("TAdvAdjMod", "TAdj")] = lambda x, y: TAdj(content = y.content, mods = [x])
grammar[("TNeg", "TAdj")] = lambda x, y: TAdj(content = y.content, mods = [x])
#Determiner rules
grammar[("TQuanMarker", "TAdj")] = lambda x, y: NDet(y) if (y.content != "many.a" or y.mods == [] or y.mods[0].content != "how.mod-a") else NCardDet()
#Argument + modifier rules
grammar[("TName", "NArg")] = lambda x, y: NArg(obj_type = y.obj_type, obj_id = x.content)
grammar[("TDet", "NArg")] = lambda x, y: NArg(obj_type = y.obj_type, obj_id = y.obj_id, mods = y.mods, det = x, plur = y.plur)
grammar[("NDet", "NArg")] = lambda x, y: NArg(obj_type = y.obj_type, obj_id = y.obj_id, mods = y.mods, det = x, plur = y.plur)
grammar[("NColor", "NArg")] = lambda x, y: NArg(obj_type = y.obj_type, obj_id = y.obj_id, mods = [x] + y.mods, det = x, plur = y.plur)
grammar[("NCardDet", "NArg")] = lambda x, y: NArg(obj_type = y.obj_type, obj_id = y.obj_id, mods = y.mods, det = x, plur = y.plur)
grammar[("TAdj", "NArg")] = lambda x, y: NArg(obj_type = y.obj_type, obj_id = y.obj_id, mods = y.mods + [x], det = y.det, plur = y.plur)
grammar[("TPlurMarker", "NArg")] = lambda x, y: NArg(obj_type = y.obj_type, obj_id = y.obj_id, mods = y.mods, det = y.det, plur = True)
grammar[("TNReifierMarker", "NArg")] = lambda x, y: y
grammar[("TEmpty", "NArg")] = lambda x, y: y
grammar[("NArg", "TEmpty")] = lambda x, y: x
grammar[("TRelNoun", "NArg")] = lambda x, y: NArg(obj_type = x.content[:-5], obj_id = x.content[:-5].upper(), mods = [y])
grammar[("TNumber", "NArg")] = lambda x, y: NArg(obj_type = y.obj_type, obj_id = y.obj_id, mods = y.mods + [x], det = y.det, plur = y.plur)
grammar[("TNeg", "NArg")] = lambda x, y: NArg(obj_type = y.obj_type, obj_id = y.obj_id, mods = y.mods + [x], det = y.det, plur = y.plur)
grammar[("TConj", "NArg")] = lambda x, y: NConjArg(x, children = [y])
grammar[("NArg", "TConj")] = lambda x, y: NConjArg(y, children = [x])
grammar[("Narg", "NConjArg")] = lambda x, y: NConjArg(y.content, children = y.children + [x])
grammar[("NConjArg", "NArg")] = lambda x, y: NConjArg(x.content, children = x.children + [y])
grammar[("NArg", "NArg")] = lambda x, y: NConjArg(TConj(), children = [x, y])
grammar[("TEqualsMarker", "NArg")] = lambda x, y: y
grammar[("NRel", "NArg")] = lambda x, y: NArg(obj_type = y.obj_type, obj_id = y.obj_id, mods = y.mods + [x], det = y.det, plur = y.plur)
#Relational rules
grammar[("TPrep", "NArg")] = lambda x, y: NRel(x, children=[y])
grammar[("TPrep", "NConjArg")] = lambda x, y: NRel(x, children=[y])
grammar[("TPred", "NArg")] = lambda x, y: NPred(x.content, children=[y])
grammar[("NArg", "TPred")] = lambda x, y : NPred(content = y.content, children = [x])
grammar[("TPred", "NConjArg")] = lambda x, y: NPred(x.content, children=[y])
grammar[("NConjArg", "NPred")] = lambda x, y: NPred(content=y.content, children=[x]+y.children, mods = y.mods)
grammar[("NConjArg", "TPred")] = lambda x, y: NPred(content=y.content, children=[x])
grammar[("TNeg", "NPred")] = lambda x, y: NPred(content=y.content, children=y.children, mods=y.mods+[x])
grammar[("TNeg", "TPred")] = lambda x, y: NPred(content=y.content, mods=[x])
grammar[("NSentenceParams", "NPred")] = lambda x, y: y
grammar[("TCopulaBe", "TPred")] = lambda x, y: y
grammar[("TCopulaBe", "NPred")] = lambda x, y: y
grammar[("TCopulaBe", "TAdj")] = lambda x, y: NPred(content = y.content, mods=y.mods)
# grammar[("TCopulaBe", "NRel")] = lambda x, y: y
grammar[("TCopulaBe", "NArg")] = lambda x, y: NPred(content = x, children = [y])
grammar[("NPred", "TPred")] = lambda x, y: NPred(content = y, children = x.children, neg = x.neg, mods = x.mods)
#grammar[("NVP", "NRel")] = lambda x, y: y
grammar[("TNeg", "NRel")] = lambda x, y: NRel(y.content, y.children, neg=True)
grammar[("NArg", "NRel")] = lambda x, y: NRel(content=y.content, children=[x]+y.children, neg = y.neg)
grammar[("NConjArg", "NRel")] = lambda x, y: NRel(content=y.content, children=[x]+y.children, neg = y.neg)
grammar[("NConjArg", "TPrep")] = lambda x, y: NRel(content=y, children=[x])
grammar[("NVP", "NRel")] = lambda x, y: y
grammar[("NVP", "TTherePro")] = lambda x, y: NPred(content = "EXIST")
grammar[("NSentenceParams", "NRel")] = lambda x, y: y
grammar[("NVP", "TAdj")] = lambda x, y: NRel(y, children=[])
#grammar[("TAdv", "NRel")] = lambda x, y: NRel(y.content, y.children, y.neg, y.mods + [x])
grammar[("TAdv", "NPred")] = lambda x, y: NPred(y.content, y.children, y.neg, y.mods + [x])
grammar[("TDet", "TPrep")] = lambda x, y: y
grammar[("TAdj", "TPrep")] = lambda x, y: y
grammar[("NVP", "NArg")] = lambda x, y: NPred(content = x, children = [y])
#Changed This!!!
#grammar[("NArg", "NPred")] = lambda x, y: NArg(obj_type = x.obj_type, obj_id = x.obj_id, mods = x.mods + [y], det = x.det, plur = x.plur)
grammar[("NArg", "NPred")] = lambda x, y: NPred(content = y.content, children = [x] + y.children, neg = y.neg, mods = y.mods)
grammar[("TEmpty", "NPred")] = lambda x, y: y
grammar[("NPred", "TEmpty")] = lambda x, y: x
grammar[("NPred", "NArg")] = lambda x, y: NPred(content = x.content, children = x.children + [y])
grammar[("TRelativizer", "NPred")] = lambda x, y: y
#grammar[("TRelativizer", "NRel")] = lambda x, y: y
grammar[("TSuperMarker", "NRel")] = lambda x, y: NRel(content = y.content, children = y.children, neg = y.neg, mods = y.mods + [x])
grammar[("TAdvTransformMarker", "NRel")] = lambda x, y: y
grammar[("TAdvAdjMod", "NRel")] = lambda x, y: NRel(content = y.content, children = y.children, neg = y.neg, mods = y.mods + [x])
grammar[("NPred", "NRel")] = lambda x, y : NPred(content = y.content, children = x.children + y.children, neg = y.neg, mods = y.mods)
grammar[("NPred", "NPred")] = lambda x, y : NPred(content = y.content, children = x.children + y.children, neg = y.neg, mods = y.mods)
grammar[("NArg", "TPrep")] = lambda x, y : NPred(content = y, children = [x])
grammar[("NRel", "NRel")] = lambda x, y : NPred(content = x.content, children = x.children, neg = x.neg, mods = x.mods + [y])
grammar[("NVerbParams", "NRel")] = lambda x, y : NPred(content = y.content, children = y.children, neg = y.neg, mods = [x] + y.mods)
#Sentence-level rules
#grammar[("NVerbParams", "NRel")] = lambda x, y : NSentence(content = y, is_question = False, tense = NVerbParams)
grammar[("NRel", "TQMarker")] = lambda x, y: NSentence(x, True)
grammar[("NArg", "TQMarker")] = lambda x, y: NSentence(x, True)
grammar[("NPred", "TQMarker")] = lambda x, y: NSentence(x, True)
grammar[("NSentence", "TQMarker")] = lambda x, y: NSentence(content = x.content, is_question = True, tense = x.tense)
grammar[("NSentenceParams", "NSentence")] = lambda x, y: NSentence(content = y.content, is_question = y.is_question, mods = y.mods + [x])
grammar[("TEmpty", "NSentence")] = lambda x, y: y
grammar[("TTenseMarker", "NSentence")] = lambda x, y: NSentence(content = y.content, is_question = y.is_question, mods = y.mods + [x])
class TEmpty(TreeNode):
__name__ = "TEmpty"
class TRelativizer(TreeNode):
__name__ = "TRelativizer"
def __init__(self, content=None):
super().__init__(content, None)
class TCopulaBe(TreeNode):
__name__ = "TCopulaBe"
def __init__(self, content=None):
super().__init__(content, None)
class TAuxSDo(TreeNode):
__name__ = "TAuxSDo"
def __init__(self):
super().__init__(None, None)
class TPlurMarker(TreeNode):
__name__ = "TPlurMarker"
def __init__(self):
super(TPlurMarker, self).__init__(None, None)
class TQuanMarker(TreeNode):
__name__ = "TQuanMarker"
def __init__(self):
super(TQuanMarker, self).__init__(None, None)
class TSuperMarker(TreeNode):
__name__ = "TSuperMarker"
def __init__(self, content=None):
super(TSuperMarker, self).__init__(None, None)
class TNReifierMarker(TreeNode):
__name__ = "TNReifierMarker"
def __init__(self):
super(TNReifierMarker, self).__init__()
class TAspectMarker(TreeNode):
__name__ = "TAspectMarker"
def __init__(self, prog=False, perf=False):
super(TAspectMarker, self).__init__(None, None)
self.prog = prog
self.perf = perf
def __str__(self):
return "PROG=" + str(self.prog) + ":PERF=" + str(self.perf)
class TSubMarker(TreeNode):
__name__ = "TSubMarker"
def __init__(self):
super().__init__(content, None)
class TEqualsMarker(TreeNode):
__name__ = "TEqualsMarker"
def __init__(self):
super().__init__(None, None)
class TTherePro(TreeNode):
__name__ = "TTherePro"
def __init__(self, content=None):
super().__init__()
class TAdvTransformMarker(TreeNode):
__name__ = "TAdvTransformMarker"
def __init__(self, content):
super().__init__(content, None)
class TNeg(TreeNode):
__name__ = "TNeg"
def __init__(self):
super().__init__(None, None)
def __str__(self):
return "NOT"
class TTenseMarker(TreeNode):
__name__ = "TTenseMarker"
def __init__(self, content=None):
super().__init__(content, None)
class TQMarker(TreeNode):
__name__ = "TQMarker"
def __init__(self):
super().__init__()
def __str__(self):
return "?"
class TNModMarker(TreeNode):
__name__ = "TNModMarker"
def __init__(self, content=None):
super(TNModMarker, self).__init__()
class TDet(TreeNode):
__name__ = "TDet"
def __init__(self, content=None):
super(TDet, self).__init__(content, None)
class TPrep(TreeNode):
__name__ = "TPrep"
def __init__(self, content=None):
super(TPrep, self).__init__(content, None)
class TPred(TreeNode):
__name__ = "TPred"
def __init__(self, content=None):
super().__init__(content)
class TNoun(TreeNode):
__name__ = "TNoun"
def __init__(self, content=None):
super(TNoun, self).__init__(content, None)
class TRelNoun(TreeNode):
__name__ = "TRelNoun"
def __init__(self, content=None):
super(TRelNoun, self).__init__(content, None)
class TAdvAdjMod(TreeNode):
__name__ = "TAdvAdjMod"
def __init__(self, content=None):
super(TAdvAdjMod, self).__init__(content, None)
class TName(TreeNode):
__name__ = "TName"
def __init__(self, content=None):
super().__init__(content.replace("|", "").replace(".n", ""))
class TPro(TreeNode):
__name__ = "TPro"
def __init__(self, content=None):
super(TPro, self).__init__(content, None)
class TNumber(TreeNode):
__name__ = "TNumber"
def __init__(self, content=None):
super(TNumber, self).__init__(content, None)
class TAdj(TreeNode):
__name__ = "TAdj"
def __init__(self, content, mods=[]):
super().__init__(content, None)
self.mods = mods
def __str__(self):
return self.content if self.mods is [] else self.content + "; MOD=" + str(self.mods)
class TAdv(TreeNode):
__name__ = "TAdv"
def __init__(self, content=None):
super(TAdv, self).__init__(content, None)
class TConj(TreeNode):
__name__ = "TConj"
def __init__(self, content=None):
super(TConj, self).__init__(content, None)
class TUnknown(TreeNode):
__name__ = "TUnknown"
def __init__(self, content=None):
super(TUnknown, self).__init__(content, None)
class NSentenceParams(TreeNode):
__name__ = "NSentenceParams"
def __init__(self, tense=None, aspect=None):
self.tense = tense
self.aspect = aspect
def __str__(self):
return self.tense.__str__() + ":" + self.aspect.__str__()
class NDet(TreeNode):
__name__ = "NDet"
def __init__(self, content=None):
super(NDet, self).__init__(content, None)
class NColor(TreeNode):
__name__ = "NColor"
def __init__(self, content=None):
super().__init__(content.replace(".a", ""), None)
class NCardDet(TreeNode):
__name__ = "NCardDet"
def __init__(self):
super(NCardDet, self).__init__(None, None)
def __str__(self):
return "HOWMANY"
class NVP(TreeNode):
__name__ = "NVP"
def __init__(self, content, children=[]):
self.content = content
self.children = children
class NRel(TreeNode):
__name__ = "NRel"
def __init__(self, content, children=None, neg=False, mods=[]):
#super().__init__(content, children)
self.content = content
self.children = children
self.neg = neg
self.mods = mods
def __str__(self):
output = "RELATION={"
if self.neg == True:
output += "NOT "
for mod in self.mods:
output += ": " + mod.__str__()
output += self.content.__str__()
for idx in range(len(self.children)):
output += "\nARG" + str(idx) + " " + self.children[idx].__str__()
output += "\n}"
return output
class NPred(TreeNode):
__name__ = "NPred"
def __init__(self, content, children=[], neg=False, mods=[]):
#super().__init__(content, children)
self.content = content
self.children = children
self.neg = neg
self.mods = mods
def __str__(self):
output = "PRED={"
if self.neg == True:
output += "NOT "
output += "MODS: {"
for mod in self.mods:
output += mod.__str__() + "; "
output += "}\nCONTENT: " + self.content.__str__()
for idx in range(len(self.children)):
output += "\nARG" + str(idx) + " " + self.children[idx].__str__()
output += "\n}"
return output
class NConjArg(TreeNode):
__name__ = "NConjArg"
def __init__(self, content, children=None):
super().__init__(content, None)
self.content = content
self.children = children
class NArg(TreeNode):
__name__ = "NArg"
def __init__(self, obj_type=None, obj_id=None, mods=[], det=None, plur=False):
#super().__init__(None, None)
self.obj_type = obj_type if obj_type is None else obj_type.replace(".n", "").replace("{", "").replace("}", "")
if obj_type == "stack.n":
print (".N CHECK!!!: ", obj_type, self.obj_type)
self.obj_id = obj_id
self.mods = mods
self.det = det
self.plur = plur
def update(self, narg=None, obj_type=None, obj_id=None, mods=None, det=None, plur=None):
if obj_type is not None:
self.obj_type = obj_type
elif narg is not None:
self.obj_type = narg.obj_type
if obj_id is not None:
self.obj_id = obj_id
elif narg is not None:
self.obj_id = narg.obj_id
if mods is not None:
for item in mods:
self.mods.append(copy.copy(item))
elif narg is not None:
self.mods += narg.mods
if det is not None:
self.det = det
elif narg is not None:
self.det = narg.det
if plur is not None:
self.plur = plur
elif narg is not None:
self.plur = narg.plur
return self
def __str__(self):
if self.mods is not None:
for item in self.mods:
if item is not None and hasattr(item, "children") and item.children is not None and self in item.children:
print ("ERROR!!!: ", self.obj_type, item.content)
return ""
output = "ARGUMENT={" + str(self.obj_type)+"; " + str(self.obj_id) + "; " + str(self.det) + "; " + str(self.plur) + "; ["
for mod in self.mods:
#output += mod.__str__() + ", "
output += str(mod) + ", "
output+= "]}"
return output
class NSentence(TreeNode):
__name__ = "NSentence"
def __init__(self, content, is_question=True, tense=None, mods=[]):
super().__init__(content, None)
self.content = content
self.is_question = is_question
self.tense = tense
self.mods = mods |
"""
Given an image represented by an NxN matrix, where each pixel in
the image is 4 bytes, write a method to rotate the image by 90.
Can you do this in place?
"""
# Time Complexity: O(nm) = O (n2)
# Space Complexity: O(n2)
# Note to self: use numpy next time
import copy
def rotate_90(m):
if len(m) != len(m[0]):
return Exception("Can't perform rotation on non nxn matrices")
output = copy.deepcopy(m)
index_row = 0
for row in range(0, len(m)):
index = 0
for col in range(len(m)-1, -1,-1):
output[index_row][index] = m[col][row]
index +=1
index_row += 1
return output
m = [
[1,2,3],
[4,5,6],
[7,8,9],
[10,11,12]
]
"""
Rotated
[7,4,1]
[8,5,2]
[9,6,3]
"""
print("Original:\t", m)
print("Rotated:\t", rotate_90(m)) |
from Products.CMFCore.utils import getToolByName
def uninstall(portal):
"""Run uninstall profile."""
setup_tool = getToolByName(portal, 'portal_setup')
setup_tool.runAllImportStepsFromProfile('profile-collective.prettyphoto:' \
'uninstall')
setup_tool.setBaselineContext('profile-Products.CMFPlone:plone')
return "Ran all uninstall steps."
|
import re
from datetime import datetime
import pandas as pd
import numpy as np
from arch.unitroot import ADF
import statsmodels.api as sm
from itertools import combinations
class PairTrading(object):
"""
配对交易类
包含最小距离法
协整模型
"""
def __init__(self):
self.priceX = None
self.priceY = None
self.returnX= None
self.returnY = None
self.standardX= None
self.standardY= None
self.logPriceX = None
self.logPriceY = None
self.resultSSD = pd.DataFrame()
self.resultCoint = pd.DataFrame()
self.resultNotCoint = pd.DataFrame()
self.debug = False
def setStock(self,priceX,priceY):
if priceX is None or priceY is None:
return None
self.priceX = priceX
self.priceY = priceY
self.returnX= None
self.returnY = None
self.standardX= None
self.standardY= None
'''
最小距离
'''
def SSD(self,timePeriod=None):
formX,formY = self.getPeriodPrice(timePeriod)
standardX = self.standardPrice(formX)
standardY = self.standardPrice(formY)
SSD = np.sum((standardX + standardY) ** 2)
return SSD
def SSDSpread(self,timePeriod=None):
formX,formY = self.getPeriodPrice(timePeriod)
standardX = self.standardPrice(formX)
standardY = self.standardPrice(formY)
spread = standardY - standardX
return spread
# 计算多个股票之间的SSD
def calAllSSD(self,stocks:pd.DataFrame):
if not isinstance(stocks,pd.DataFrame):
raise TypeError('传入的格式不对,传入DataFrame格式的参数')
# num = len(stocks)
self.resultSSD = None
results = []
combins = [c for c in combinations(stocks.columns, 2)]
for c in combins:
# print(c)
# 选出最小距离的股票对
self.setStock(stocks[c[0]], stocks[c[1]])
ssd = self.SSD()
results.append([c[0]+':'+c[1],ssd])
self.resultSSD = pd.DataFrame(results,columns=['name','ssd']).sort_values(by=['ssd'])
'''
协整模型
'''
# 形成期协整关系检验
def checkLogPrice(self,price,name):
adfprice = ADF(price)
if adfprice.pvalue >= 0.05:
if self.debug:
print(
'''
%s 价格的对数序列不具有平稳性.
p-value of ADF test: %f
'''% (name,adfprice.pvalue)
)
return True
else:
if self.debug:
print(
'''
%s 价格的对数序列具有平稳性
p-value of ADF test: %f
''' % (name,adfprice.pvalue)
)
return False
def checkDiffPrice(self,price,name):
diffPrice = price.diff()[1:]
adfDiff = ADF(diffPrice)
if adfDiff.pvalue <= 0.05:
if self.debug:
print(
'''
%s 价格的对数序列具有平稳性
p-value of ADF test: %f
''' % (name,adfDiff.pvalue)
)
return True
else:
if self.debug:
print(
'''
%s 价格的对数序列不具有平稳性
p-value of ADF test: %f
''' % (name,adfDiff.pvalue)
)
return False
# 协整关系检验
def cointegration(self,priceX=None,priceY=None):
# 判断两支股票是否协整
if priceX:
logPriceX = np.log(priceX)
else:
logPriceX = self.priceX
if priceY:
logPriceY = np.log(priceY)
else:
logPriceY =self.priceY
if self.checkLogPrice(logPriceX,'PriceX') or self.checkLogPrice(logPriceY,'PriceY'):
if self.checkDiffPrice(logPriceX,'PriceX') or self.checkDiffPrice(logPriceY,'PriceY'):
results = sm.OLS(logPriceY, sm.add_constant(logPriceX)).fit()
resid = results.resid
# 用单位根检验法,判断残差项是否平稳
adfSpread = ADF(resid)
if adfSpread.pvalue >= 0.05:
if self.debug:
print(
"""
交易价格不具有协整关系。
p-value of ADF test: %f
回归系数:
截距:%f
系数:%f
""" % (adfSpread.pvalue, results.params[0], results.params[1])
)
return None, None
else:
if self.debug:
print(
"""
交易价格具有协整关系。
p-value of ADF test: %f
回归系数:
截距:%f
系数:%f
""" % (adfSpread.pvalue, results.params[0], results.params[1])
)
return results.params[0], results.params[1]
return None,None
def CointegrationSpread(self,formPeriod=None,tradePeriod=None):
if self.priceX or self.priceY is None:
raise Exception('先使用setStock(x,y)')
formX,formY = self.getPeriodPrice(formPeriod)
coefficiens = self.cointegration(formX,formY)
if coefficiens is None :
print('未形成协整关系')
return None
else :
if tradePeriod is None:
tradePeriod = formPeriod
formX, formY = self.getPeriodPrice(tradePeriod)
logPriceX = np.log(formX)
logPriceY = np.log(formX)
spread = logPriceY - coefficiens[0]-coefficiens[1]*logPriceX
return spread
def calAllCointegration(self,stocks:pd.DataFrame):
if not isinstance(stocks, pd.DataFrame):
raise TypeError('传入的格式不对,传入DataFrame格式的参数')
self.resultCoint = pd.DataFrame()
self.resultNotCoint = pd.DataFrame()
combins = [c for c in combinations(stocks.columns, 2)]
for c in combins:
# print(c)
# 判断是否协整
self.setStock(stocks[c[0]], stocks[c[1]])
alpha,beta = self.cointegration()
name = c[0] + ':' + c[1]
if alpha:
results = pd.DataFrame([[alpha,beta]],columns=['alpha', 'beta'],index=[name])
self.resultCoint = self.resultCoint.append(results)
else:
self.resultNotCoint = self.resultNotCoint.append(pd.DataFrame([name],columns=['name']))
def calBound(self,method,formPeriod=None,width=1.5):
if method == 'SSD':
spread = self.SSDSpread(formPeriod)
elif method == 'Cointegation':
spread = self.CointegrationSpread(formPeriod)
else:
raise Exception('不存在该方法,选择‘SSD’或者‘Cointegration’')
return None,None
mu = np.mean(spread)
std = np.std(spread)
UpBound = mu + width * std
DownBound = mu - width * std
return UpBound, DownBound
def getDateIndex(self,date):
result = date.split(':')
# if isinstance(date, datetime):
# result = date
# else:
start = datetime.strptime(result[0], '%Y%m%d').date()
end = datetime.strptime(result[1], '%Y%m%d').date()
return start,end
def getPeriodPrice(self,timePeriod):
if timePeriod:
form_start, form_end = self.getDateIndex(timePeriod)
formX = self.priceX[self.getDateIndex(form_start):self.getDateIndex(form_end)]
formY = self.priceY[self.getDateIndex(form_start):self.getDateIndex(form_end)]
else:
formX = self.priceX
formY = self.priceY
return formX,formY
def standardPrice(self,price):
# 价格标准化
ret = (price - price.shift(1)) / price.shift(1)[1:]
standard= (ret + 1).cumprod()
return standard
def to_csv(self,csv_path=None):
print(len(self.resultCoint))
if len(self.resultSSD) > 0 :
self.resultSSD.to_csv('resultSSD.csv',header=None,index=False)
if len(self.resultCoint) > 0 :
self.resultCoint.to_csv('resultCoint.csv')
if len(self.resultNotCoint) > 0 :
self.resultNotCoint.to_csv('resultNotCoint.csv',index=False)
|
"""Basic mathematical operations in Python"""
def math_op(a, b):
normal_division = a / b
floor_division = a // b
modulus = a % b
power = a ** b
return [normal_division, floor_division, modulus, power]
def check_parity(n):
# If the number is even then result = 0
# If the number is odd then result = 1
result = (n % 2)
return result
if __name__ == '__main__':
[normal_division, floor_division, modulus, power] = math_op(3, 2)
print('---Mathematical operations between 3 and 2---\n')
print(f'Normal division = {normal_division}')
print(f'Floor division = {floor_division}')
print(f'Modulus = {modulus}')
print(f'Power = {power}\n\n')
print('---Check if the given numbers are even or odd---\n')
print(f'Result for 2 = {check_parity(2)}')
print(f'Result for -2 = {check_parity(-2)}')
print(f'Result for 7 = {check_parity(7)}')
print(f'Result for 11 = {check_parity(11)}')
|
from flask import Flask,g
import pandas as pd
from pprint import pprint as pp
app = Flask(__name__)
@app.route ('/search/<search_key>')
def search_data(search_key):
match_items = g.search_df[g.search_df['ServiceType'] == search_key]
out = match_items.to_json(orient='records')
pp(out)
return out
@app.before_request
def read_data():
g.search_df = pd.read_excel('ow_search_data.xlsx',0)
pp(g.search_df)
if __name__ == "__main__":
app.run(debug=True) |
#coding=utf-8
import paramiko
import os
from helper import cal_relative_path
class FTP(object):
"""docstring for FTP"""
def __init__(self, host, port, user, password, path ="/", local_path="/", protocol = 'FTP'):
self.transport = paramiko.Transport((host, port))
self.transport.connect(username = user, password = password)
self.client = paramiko.SFTPClient.from_transport(self.transport)
self.path = path
self.local_path = local_path
def close(self):
self.client.close()
self.transport.close()
def set_path(self, path):
self.path = path
def upload(self,filename):
if not os.path.exists(filename):
print 'file not exists: %s' % filename
dst_file = self.get_remote_file(filename)
try:
print "remote:", dst_file
print "local:", filename
self.client.put(filename, dst_file)
except:
print "upload failed!"
else:
print "upload successful!"
def download(self,filename):
dst_file = self.get_remote_file(filename)
try:
print "remote:", dst_file
print "local:", filename
self.client.get(dst_file, filename)
except:
print "download failed!"
else:
print "download successful!"
def get_remote_file(self, filename):
fullname = os.path.abspath(os.path.join(os.path.dirname(__file__), filename))
# print self.local_path
# print fullname
rel_file = cal_relative_path(fullname,self.local_path)
rel_file = rel_file.replace(os.sep,"/").strip("/")
dst_file = "%s/%s" % (self.path, rel_file)
# print '--------------'
# print fullname
# print self.local_path
# print self.path
# print rel_file
# print dst_file
# print '--------------'
return dst_file
if __name__ == "__main__":
print "This is ftp.py"
|
def get_seller_order_notification_type(order_id, order_status):
return {
'NEW': 'You have received a new order {0}'.format(order_id),
'ACCEPTED': 'You have accepted item(s) in order {0}'.format(order_id),
'REJECTED': 'You have rejected item(s) in order {0}'.format(order_id),
'COMPLETED': 'You have delivered item(s) in order {0}'.format(order_id),
'DISPATCHED': 'You have dispatched item(s) in order {0}'.format(order_id),
'CANCELLED': 'Buyer has cancelled order {0}'.format(order_id),
}.get(order_status)
def get_buyer_order_notification_type(order_id, order_status):
return {
'NEW': 'You have placed a new order {0}'.format(order_id),
'ACCEPTED': 'Seller has accepted item(s) in order {0}'.format(order_id),
'REJECTED': 'Seller has rejected item(s) in order {0}'.format(order_id),
'COMPLETED': 'Seller has delivered item(s) in order {0}'.format(order_id),
'DISPATCHED': 'Seller has dispatched item(s) in order {0}'.format(order_id),
'CANCELLED': 'You have cancelled your order {0}'.format(order_id),
}.get(order_status)
def get_seller_complain_notification_type(query_id, query_status):
return {
'NEW': 'You have received a new complain/service request {0}'.format(query_id),
'ACCEPTED': 'You have accepted complain/service request {0}'.format(query_id),
'REJECTED': 'You have rejected complain/service request {0}'.format(query_id),
'RESOLVED': 'You have resolved complain/service request {0}'.format(query_id),
}.get(query_status)
def get_buyer_complain_notification_type(query_id, query_status):
return {
'NEW': 'You have raised a new complain/service request {0}'.format(query_id),
'ACCEPTED': 'Seller has accepted complain/service request {0}'.format(query_id),
'REJECTED': 'Seller has rejected complain/service request {0}'.format(query_id),
'RESOLVED': 'Seller has resolved complain/service request {0}'.format(query_id),
}.get(query_status)
|
import unittest
from test_Sphere import SphereTest
from test_Customer import CustomerTest
from test_Manager import ManagerTest
def main():
testSuite = unittest.TestSuite()
testSuite.addTest( unittest.makeSuite( SphereTest ) )
testSuite.addTest( unittest.makeSuite( CustomerTest ) )
testSuite.addTest( unittest.makeSuite( ManagerTest ) )
testRunner = unittest.TextTestRunner()
testRunner.run( testSuite )
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 21:39:02 2020
@author: Rizky Dewa Sakti_1301180358
"""
#input soal sudoku
Board = [
[4,0,0,8,0,0,5,7,0],
[0,5,0,0,2,0,0,1,0],
[9,0,0,0,0,5,6,0,0],
[7,0,0,0,0,0,0,5,0],
[0,9,6,0,0,0,3,2,0],
[0,4,0,0,0,0,0,0,6],
[0,0,4,1,0,0,0,0,9],
[0,7,0,0,8,0,0,3,0],
[0,2,8,0,0,9,0,0,5],
]
def print_board(B):
for i in range(len(B)):
if ( i % 3 == 0 ) and (i != 0):
print('- - - - - - - - - - - - - - -')
for j in range(len(B[i])):
if (j % 3 == 0) and (j != 0):
print(' | ', end="")
if (j != 8):
print (str(B[i][j]) + " ", end="")
else:
print (B[i][j])
def find_empty(B):
for i in range(len(B)):
for j in range(len(B[i])):
if (B[i][j] == 0):
return i,j #Baris, Kolom
return None
def valid(B, num, pos):
#Cek Baris
for i in range(len(B[0])):
if B[pos[0]][i] == num and pos[1] != i:
return False
#Cek Kolom
for i in range(len(B[0])):
if B[i][pos[1]] == num and pos[0] != i:
return False
#Cek Box
box_x = pos[0] // 3
box_y = pos[1] // 3
#pos [0] dn pos[1] = [0,2,..,8]
#pos[0] dan pos[1] div 3 kqrena akan dikonversi menjadi bagian box[0,1,2] --> # 7 8 0 | 4 0 0 | 1 2 0
# 6 0 0 | 0 7 5 | 0 0 9
# 0 0 0 | 6 0 1 | 0 7 8
# box_x=0 box_x=1 box_x=2
# box_y=0 box_y=0 box_y=0
# - - - - - - - - - - - -
# 0 0 7 | 0 4 0 | 2 6 0
# 0 0 1 | 0 5 0 | 9 3 0
# 9 0 4 | 0 6 0 | 0 0 5
# box_x=0
# box_y=1
# - - - - - - - - - - - -
# 0 7 0 | 3 0 0 | 0 1 2
# 1 2 0 | 0 0 7 | 4 0 0
# 0 4 9 | 2 0 6 | 0 0 7
# box_x=0
# box_y=2
for i in range(box_x * 3, box_x*3 + 3):
#ex: pos[0] = 7, so box_x = 2, then iterate i in range (6,9)
for j in range(box_y * 3, box_y*3 + 3):
#ex: pos[1] = 5, so box_y = 1, then iterate i in range (3,6)
#dari contoh diatas, kita mendapatkan box_x=2, box_y=1 jadi kita mengece box 2,1 --># |2 6 0|
# |9 3 0|
# |0 0 5|
if (B[i][j] == num) and ((i,j) != pos):
# (B[i][j] == num) untuk mengecek tidak ada num yg sama di suatu box
# ((i,j) != pos) untuk memastikan (i,j) tidak mengulang mengecek posisi yg sama dengan input (pos)
return False
return True
def sudoku_backtrack(B):
#Base Case
find = find_empty(B)
if not find:
return True #return true jika semua row,col telah diisi
else:
row, col = find
for i in range(1,10): #Mengisi baris,kolom dengan angka 1,2,..,9
if valid(B, i, (row, col)): #cek apakah num baru yaitu: i, valid untuk diinput ke Board
B[row][col] = i
if sudoku_backtrack(B): #pendekatan rekursif sampai kita menemukan solusi atau saat input 1..9 tidak ada yang valid.
return True
# Saat input 1..9 tidak ada yg valid:
# maka akan return false (di baris 115) dan menyebabkan line (104) tidak dikerjakan
# lalu, baris (112) akan mereset row,col yang barus kita inputkan dan mengulanginya lagi dengan menginput num baru yaitu: i
# di baris 101 melalui pendekatan reksursif di baris (104)
B[row][col] = 0 #saat input 1..9 tidak ada yang valid, maka akan mereset angka di baris,kolom tsb
# dan melakukan backtrack di pendekatan rekursif tadi
return False
print("")
print("--------------Board Awal--------------")
print("")
print_board(Board)
sudoku_backtrack(Board)
print("")
print("--------------HASIL-------------------")
print("")
print_board(Board) |
value = [35, 36, 40, 44]
print("Answer the following algebra question: ")
print("if x = 8, then what is the value of 4(x+3) ?")
for index, number in enumerate(value):
print(index + 1, number, sep = ". ")
n = int(input("Your choice: "))
if n == 4 or n == 44:
print("Bingo")
else:
print("Not correct") |
GEN_A_INIT = 679
GEN_A_FACTOR = 16807
GEN_B_INIT = 771
GEN_B_FACTOR = 48271
MODULUS = 2147483647
RUN_1_LENGTH = 40000000
RUN_2_LENGTH = 5000000
def generator(init, factor, rule=lambda a: True):
result = init
while True:
result *= factor
result %= MODULUS
if rule(result):
yield result
def run():
a = generator(GEN_A_INIT, GEN_A_FACTOR)
b = generator(GEN_B_INIT, GEN_B_FACTOR)
return len([1 for _ in range(RUN_1_LENGTH) if a.next() & 0xFFFF == b.next() & 0xFFFF])
def run_2():
a = generator(GEN_A_INIT, GEN_A_FACTOR, rule=lambda x: x % 4 == 0)
b = generator(GEN_B_INIT, GEN_B_FACTOR, rule=lambda x: x % 8 == 0)
return len([1 for _ in range(RUN_2_LENGTH) if a.next() & 0xFFFF == b.next() & 0xFFFF])
if __name__ == '__main__':
print(run())
print(run_2())
|
from json_database import JsonDatabase
optional_file_path = "users.db"
db = JsonDatabase("users", optional_file_path)
# add some users to the database
for user in [
{"name": "bob", "age": 12},
{"name": "bobby"},
{"name": ["joe", "jony"]},
{"name": "john"},
{"name": "jones", "age": 35},
{"name": "jorge"}, # NOTE: no duplicate entries allowed
{"name": "jorge"}, # this one will be ignored
{"name": "joey", "birthday": "may 12"}]:
db.add_item(user)
# pretty print database contents
db.print()
# save it
db.commit() |
floor=0
while floor<10:
print floor
floor=floor+1
if floor==10:
print "mein top par pahunch gaya"
|
import csv
import os
pybank_csv = os.path.join("Resources", 'budget_data.csv')
months_change = []
net_change_list = []
tot_months = 0
tot_profits = 0
inc_chg = ["", 0]
dec_chg = ["", 9999999999999]
with open(pybank_csv, "r") as csvfile:
csv_reader=csv.reader(csvfile, delimiter=',')
header = next(csv_reader)
# Extract the first row to avoid appending to net_change
first_row = next(csv_reader)
tot_months += 1
tot_profits += int(first_row[1])
prev_net = int(first_row[1])
for row in csv_reader:
# track the total
tot_months += 1
tot_profits += int(row[1])
# track the net change
net_change = int(row[1]) - prev_net
prev_net = int(row[1])
net_change_list += [net_change]
months_change += [row[0]]
# Calculate the greatest increase
if net_change > inc_chg[1]:
inc_chg[0] = row[0]
inc_chg[1] = net_change
# Calculate the greatest decrease
if net_change < dec_chg[1]:
dec_chg[0] = row[0]
dec_chg[1] = net_change
# Calculate the average net change
net_monthly_avg = sum(net_change_list) / len(net_change_list)
# Generate output Summary
output_summary = (
f"Financial Analysis\n"
f"--------------\n"
f"Total Months: {tot_months}\n"
f"Total Profits: {tot_profits}\n"
f"Average Change: ${net_monthly_avg:.2f}\n"
f"Greatest Increase in Profits: {inc_chg[0]} (${inc_chg[1]})\n"
f"Greatest Decrease in Profits: {dec_chg[0]} (${dec_chg[1]}\n"
)
print(output_summary)
output_file = os.path.join("budget_analysis.txt")
# Open the output file
with open(output_file, "w") as txt_file:
txt_file.write(output_summary) |
# python3
import sys, threading
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
class TreeHeight:
def read(self):
self.n = int(sys.stdin.readline())
self.parent = list(map(int, sys.stdin.readline().split()))
'''
def compute_height(self):
# Replace this code with a faster implementation
maxHeight = 0
for vertex in range(self.n):
height = 0
i = vertex
while i != -1:
height += 1
i = self.parent[i]
maxHeight = max(maxHeight, height);
return maxHeight;
'''
def compute_height(self):
height = 0
q = []
for i in range(self.n):
q.append(i)
nodeCount = len(q)
if nodeCount == 0:
return height
while i != -1:
height += 1
i = self.parent[i]
while (nodeCount > 0):
# Remove node of this level
q.pop(0)
# Add node of next level
q.append(self.parent[i])
nodeCount -= 1
'''
# Read each parent index
for child_index in range(self.n):
height = 0
parent_index = self.parent[child_index]
if parent_index == -1:
self.root = child_index
height += 1
else:
nodes[parent_index].append(nodes[child_index])
maxHeight = max(maxHeight, height)
return maxHeight
'''
def main():
tree = TreeHeight()
tree.read()
print(tree.compute_height())
threading.Thread(target=main).start()
|
from net.yolo_top import yolov3
from data.data_pipeline import data_pipeline
from net.config import cfg
import numpy as np
import time
import tensorflow as tf
import os
def tower_loss(scope, imgs, true_boxes, istraining):
"""Calculate the total loss on a single tower running the CIFAR model.
Args:
scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
images: Images. 4D tensor of shape [batch_size, height, width, 3].
labels: Labels. 1D tensor of shape [batch_size].
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Build inference Graph.
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
model = yolov3(imgs, true_boxes, istraining)
_ = model.compute_loss()
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train(num_gpus):
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.Variable(0, trainable=False)
#lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
# global_step,
# decay_steps,
# LEARNING_RATE_DECAY_FACTOR,
# staircase=True)
lr = tf.train.piecewise_constant(global_step,
cfg.train.lr_steps,
cfg.train.lr_scales)
# Create an optimizer that performs gradient descent.
# opt = tf.train.GradientDescentOptimizer(lr)
opt = tf.train.AdamOptimizer(learning_rate = lr)
tower_grads = []
imgs, true_boxes = data_pipeline(
[cfg.path.train_data_path, cfg.path.train_data_path], cfg.batch_size)
print(imgs.shape)
print(true_boxes.shape)
imgs = tf.reshape(imgs,
[cfg.batch_size, imgs.shape[1], imgs.shape[2], imgs.shape[3]])
true_boxes = tf.reshape(true_boxes,
[cfg.batch_size, true_boxes.shape[1], true_boxes.shape[2]])
batch_queue = tf.contrib.slim.prefetch_queue.prefetch_queue(
[imgs, true_boxes], capacity=2 * num_gpus)
with tf.variable_scope(tf.get_variable_scope()):
for i in range(num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % ('tower', i)) as scope:
istraining = tf.constant(True, tf.bool)
imgs_batch, true_boxes_batch = batch_queue.dequeue()
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
loss = tower_loss(scope,
imgs_batch,
true_boxes_batch,
istraining)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
print('ops________________________________________________')
for i in grads:
print(i[0])
print ('variables________________________________________________')
for i in grads:
print (i[1])
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
print('average ops________________________________________________')
for i in grads:
print(i[0])
print ('average variables________________________________________________')
for i in grads:
print (i[1])
# Add a summary to track the learning rate.
summaries.append(tf.summary.scalar('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
#update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
#vars_det = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="Head")
#with tf.control_dependencies(update_op):
# apply_gradient_op = opt.minimize(loss,
# global_step = global_step,
# var_list = vars_det)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# Group all updates to into a single train op.
train_op = apply_gradient_op
# Create a saver.
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
gs = 0
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False))
ckpt = tf.train.get_checkpoint_state(cfg.path.ckpt_dir)
if (ckpt and ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
gs = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
sess.run(tf.assign(global_step, gs))
print('Restore batch: ', gs)
else:
print('no checkpoint found')
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
for i in range(gs, cfg.train.max_batches):
#start_time = time.time()
_ = sess.run(train_op)
#duration = time.time() - start_time
#print(duration)
if(i % 10 == 0):
loss_ = sess.run(loss)
print(i,': ', loss_)
if(i % 2000 == 0):
saver.save(sess,
cfg.path.ckpt_dir + 'yolov3.ckpt',
global_step=global_step,
write_meta_graph=False)
print("Complete!!") |
import numpy as np
from collections import defaultdict
def f(x):
"""This is the function f(x) which is proportional to some P(x).
In this example, consider 5 points in a circle as our space. We
do not know the actual probability distribution, but we do know
some relative probabilities. """
return _f()[x]
def next_x_given_x(x, width=2):
"""This gives the next point located normally (rounded to nearest integer)
around x. This is the jumping distribution. """
num_points = len(_f())
next_x = round(np.random.normal()*width)
# Loop around since our points are in a circle.
# Note: this works with negative numbers too.
return next_x % num_points
def metropolis(fun=f, num_iter=10000):
"""fun is the function f(x) which is proportional to some P(x) which we
are trying to approximate. """
num_points = len(_f())
x = np.random.randint(num_points) # Starting point.
# We count how many times we observe each point in the Markov sequence.
bins_of_samples = defaultdict(lambda: 0)
# Could be less than num_iter because we reject some samples.
for i in range(num_iter):
next_x = next_x_given_x(x)
acceptance_ratio = fun(next_x)/fun(x) # = P(next_x)/P(x).
if acceptance_ratio >= 1 or np.random.uniform() < acceptance_ratio:
# Accept next_x.
x = next_x
else:
# Reject next_x.
pass
bins_of_samples[x] += 1 # Count this sample.
stationary_distribution = [bins_of_samples[p]/num_iter
for p in bins_of_samples]
return stationary_distribution
if __name__ == '__main__':
for D in [[89, 77, 84, 1, 30],
[89, 77, 84, 1, 300]]:
def _f():
return D
print('Target distribution: ')
denominator_P = sum(_f())
print([float('%.2f' % round(x/denominator_P, 2)) for x in _f()])
print('Stationary distribution: ')
print([round(x, 2) for x in metropolis()])
print()
|
from lxml import etree
html = etree.parse('taobaoProduct.html', etree.HTMLParser(encoding="utf-8"))
print(html.xpath('//div[@id="mainsrp-itemlist"]'))
print(html.xpath('//div[@id="mainsrp-itemlist"]//div[@class="items"][1]/div'))
obj_list = html.xpath(
'//div[@id="mainsrp-itemlist"]//div[@class="items"][1]/div')
print(html.xpath(
'//div[@id="mainsrp-itemlist"]//div[@class="items"][1]/div[1]//div[@class="price g_price g_price-highlight"]/strong/text()'))
print(html.xpath(
'//div[@id="mainsrp-itemlist"]//div[@class="items"][1]/div[1]//div[@class="row row-2 title"]/a/text()'))
print(html.xpath('//div[@id="mainsrp-itemlist"]//div[@class="items"][1]/div[1]//div[@class="row row-2 title"]/a')
[0].xpath('string(.)').replace('\n', '').strip())
print(html.xpath(
'//div[@id="mainsrp-itemlist"]//div[@class="items"][1]/div[1]//div[@class="shop"]/a/span[last()]/text()'))
data_list = []
for obj in obj_list:
item = {}
item['price'] = obj.xpath(
'.//div[@class="price g_price g_price-highlight"]/strong/text()')[0]
item['title'] = obj.xpath(
'.//div[@class="row row-2 title"]/a')[0].xpath('string(.)').replace('\n', '').strip()
item['shop'] = obj.xpath(
'.//div[@class="shop"]/a/span[last()]/text()')[0]
print(item)
|
#encoding='utf-8'
try:
import os,sys
except Exception as err:
print('导入库失败!请检查是否安装相关库后重试.')
sys.exit(0)#避免程序继续运行造成的异常崩溃,友好退出程序
def main(report_path=''):
if not report_path:
base_path=os.path.dirname(os.path.abspath(__file__))#获取当前项目文件夹
base_path=base_path.replace('\\','/')
sys.path.insert(0,base_path)#将当前目录添加到系统环境变量,方便下面导入版本配置等文件
try:#导入版本配置等文件
import common.baseinfo as info
except Exception as err:
print('版本配置文件导入失败!请检查: '+base_path+'/common/baseinfo.py 文件是否存在.\n错误信息如下:')
print(err)
sys.exit(0)#避免程序继续运行造成的异常崩溃,友好退出程序
else:
try:
version=info.version#获取版本号
except Exception as err:
print('版本号获取失败!请检查是否设置了版本号.错误信息如下:')
print(err)
sys.exit(0)#避免程序继续运行造成的异常崩溃,友好退出程序
else:
report_path=base_path+'/report/'+str(version)#根据版本号,获取report目录
try:
os.system('allure open -h 127.0.0.1 -p 8083 '+str(report_path))
except Exception as err:
print('自动化测试已完成,但创建web服务浏览失败!\n当前测试报告地址保存在: '+str(report_path)+'\n直接浏览html文件会导致数据加载失败!\n可以新建一个web服务,将web目录指向: '+str(report_path))
if __name__=='__main__':
main()
|
# The main scraper
import requests, json, pdb, time, sys
import classes
# Main Method, get a list of artists and scrape their information using musicbrainz
def scrape (artistLocation, outputFile):
artists = []
with open(artistLocation) as f:
artists = json.load(f)
# Create lists of artist/album/song objects that have been scraped
songList = []
albumList = []
artistList = []
# iterate through each artist and scrape for information
for artist in artists:
try:
# Search for the artist and get his/her id
artistId = musicBrainzScrape_searchArtist(artist)
# Scrape the artist's information
artistObject = musicBrainzScrape_scrapeArtistInformation(artistId)
# If artist has no album information, moveon
if(len(artistObject.getAlbumIdList()) == 0):
continue
# Scrape all album information associated with the artist
albumObjectList = [musicBrainzScrape_scrapeAlbumInformation(albumId) for albumId in artistObject.getAlbumIdList()]
# Scrape all song information about each album
songObjectList = [musicBrainzScrape_scrapeSongInformation(album.getReleaseId()) for album in albumObjectList]
artistList.append(artistObject)
for albumObject in albumObjectList:
albumList.append(albumObject)
for songArray in songObjectList:
for song in songArray:
songList.append(song)
except:
break
with open(outputFile, "w+") as f:
print("Artists scraped: ")
for art in artistList:
print(art.artistName)
songDicts = [song.produceDict() for song in songList]
albumDicts = [album.produceDict() for album in albumList]
artistDicts = [artist.produceDict() for artist in artistList]
mainDict = {"artists":artistDicts, "albums":albumDicts, "songs":songDicts}
json.dump(mainDict, f, sort_keys=True, indent=4, separators=(',', ': '))
# A wrapper on requests get method where if response is 503, wait sleep and attempt to procede
def sendGet (url, parameters=None):
returnVal = None
correctReturn = False
while not correctReturn:
time.sleep(1)
r = requests.get(url, params=parameters)
if(r.status_code == 200):
correctReturn = True
returnVal = r.json()
else:
print("HTTP GET Has status code: %d ... retrying" % (r.status_code))
return returnVal
# Searches for the artist name on musicBrainz. Returns the artist's unique music brainz id.
def musicBrainzScrape_searchArtist (artistName):
# Set the link to send the HTTP GET and the required parameters to be used
link = "https://musicbrainz.org/ws/2/artist/"
parameters = {"query" : "artist:%s" % (artistName), "fmt":"json"}
# Perform the HTTP GET request and convert it to json
responseJson = sendGet(link,parameters)
# Take the top result and get the id
uniqueId = responseJson["artists"][0]["id"]
print("Searching for artist: %s" % (artistName))
return uniqueId
# Polls musicbrainz with the given artist ID and returns an artist object
def musicBrainzScrape_scrapeArtistInformation (artistId):
# Set the link to send the HTTP GET and the required parameters to be used
link = "https://musicbrainz.org/ws/2/artist/%s" % (artistId)
parameters = {"inc":"aliases+release-groups", "fmt":"json"}
# Perform the HTTP GET request and convert it to json
responseJson = sendGet(link,parameters)
# Get the name of the artist
artistName = responseJson["name"]
# Get the aliases the artist goes by
artistAlias = [alias["name"] for alias in responseJson["aliases"] if alias["name"] != artistName]
# Get the area that the artist resides
area = None
if ("area" in responseJson and responseJson["area"] != None):
area = responseJson["area"]["name"]
# Get the list of albums plus the album ids
albumList = [album["title"] for album in responseJson["release-groups"]]
albumIdList = [album["id"] for album in responseJson["release-groups"]]
#albumType = album["primary-type"]
#if ("secondary-types" in album):
# albumType = albumType + "_" + album["secondary-types"][0]
print("Scraped artist: %s" % (artistName))
# Create and return an artist object with the information we scraped
artistObject = classes.artist(artistName, artistAlias, albumList, albumIdList, area)
return artistObject
# Polls musicbrains with the given album ID and resturns an album object
def musicBrainzScrape_scrapeAlbumInformation (albumId):
# Set the link to send the HTTP GET and the required parameters to be used
link = "https://musicbrainz.org/ws/2/release-group/%s" % (albumId)
parameters = {"inc":"releases+tags", "fmt":"json"}
# Perform the HTTP GET request and convert it to json
responseJson = sendGet(link,parameters)
# Get the album's name
albumName = responseJson["title"]
# Get the album's releasedate
date = responseJson["first-release-date"]
# Get the list of songs the album contains plus the id
releaseId = responseJson["releases"][0]["id"]
songList,songIdList = musicBrainzScrape_scrapeReleaseInformation(releaseId)
# Get the album's type, if the album has a secondary type, we will append as primary_secondary
albumType = responseJson["primary-type"]
if (len(responseJson["secondary-types"]) > 0):
albumType += "_" + responseJson["secondary-types"][0]
print("Scraped album: %s" % (albumName))
# Create and return an album object with the information we scraped
albumObject = classes.album(albumType, albumName, date, songList, releaseId)
return albumObject
# Polls musicbrains with the given release ID and returns a tuple of list of ([songTitle], [songId])
def musicBrainzScrape_scrapeReleaseInformation(releaseId):
# Set the link to send the HTTP GET and the required parameters to be used
link = "https://musicbrainz.org/ws/2/release/%s" % (releaseId)
parameters = {"inc":"artist-credits+labels+discids+recordings", "fmt":"json"}
# Perform the HTTP GET request and convert it to json
responseJson = sendGet(link,parameters)
songName = [song["title"] for song in responseJson["media"][0]["tracks"]]
songId = [song["id"] for song in responseJson["media"][0]["tracks"]]
#print("Scraped Release Information, %d songs found for releaseid: %s" % (len(songName), releaseId))
return (songName, songId)
# Polls musicbrains with the given release ID and returns a list of song objects
def musicBrainzScrape_scrapeSongInformation (releaseId):
# Set the link to send the HTTP GET and the required parameters to be used
link = "https://musicbrainz.org/ws/2/release/%s" % (releaseId)
parameters = {"inc":"recordings", "fmt":"json"}
# Perform the HTTP GET request and convert it to json
responseJson = sendGet(link,parameters)
# Populate and return a list of song objects
arr = [classes.song(responseJson["title"],s["title"],s["length"],s["position"]) for s in responseJson["media"][0]["tracks"]]
print ("Scraped %d songs from release id: %s" % (len(arr), releaseId))
return arr
if __name__ == "__main__":
scrape(sys.argv[1], sys.argv[2])
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
from .f_approximator import FunctionApproximator
class Network(FunctionApproximator):
def __init__(self, inputs, outputs, scope=None):
super(Network, self).__init__(inputs, outputs, scope=scope)
def predict(self, x):
x = x[np.newaxis,:]
# return self.model.predict(x)
return self.sess.run(self.outputs[0], feed_dict={self.inputs[0]: x})
|
import zerorpc
from engine import BMEngine
# from engine_sample import BMEngine
import logging
import imp
import os
logging.basicConfig()
bmEngine = BMEngine()
class ListenerRPC(object):
def send(self, name):
print("receive message: " + name)
# bmEngine.receiveFeedback
return "from RPC message: " + name
def getTrack(self, message):
print('get track called. message: ' + message)
trackId = bmEngine.getTrack()
return trackId
def receiveFeedback(self, feedback, trackId):
print('feedback: ' + feedback + ", trackId: " + trackId)
# print('receivedFeedback. message: ' + message)
msg = bmEngine.receiveFeedback(feedback, trackId)
return msg
def reloadEngine(self, moduleName, fileName):
print('Current path : ' + os.getcwd())
try:
newModule = imp.load_source(moduleName, fileName)
bmEngine = newModule.NewEngine()
except Exception:
print("Input Error. moduleName: " + moduleName + ", fileName: " + fileName)
return '-0.1'
print('Succeed reload engine! Version : ' + bmEngine.version)
return bmEngine.version
s = zerorpc.Server(ListenerRPC())
#s.bind("tcp://0.0.0.0:8080")
s.bind("tcp://0.0.0.0:4242")
#s.bind("tcp://127.0.0.1:4242")
print("python server is running")
try:
s.run();
except Exception, e:
print ('Exception error: ' + e);
# class HelloRPC(object):
# def hello(self, name):
# print('receive message: ' + name)
# return "Hello, %s" % name
# s = zerorpc.Server(HelloRPC())
# s.bind("tcp://0.0.0.0:4242")
|
import r2pipe
#flag{theres_three_of_em}
def file_stream(c):
fs=open("a.rr2","w")
fs.write("#!/usr/bin/rarun2\n")
fs.write("program=./triptych\n")
fs.write("stdin=\"flag{"+c+"\""+"\n")
fs.write("stdout=")
fs.close()
def table(c):
file_stream(c)
r2=r2pipe.open("./triptych")
r2.cmd("e dbg.profile=a.rr2")
r2.cmd("ood")
r2.cmd("db 0x00400acd")
r2.cmd("dc")
r2.cmd("db 0x004009d7")
r2.cmd("dc")
r2.cmd("db 0x004008e1")
r2.cmd("dc")
r2.cmd("db 0x004007ce")
r2.cmd("dc")#f
r2.cmd("dc")#l
r2.cmd("dc")#a
r2.cmd("dc")#g
r2.cmd("dc")#{
r2.cmd("dc")
ret_c=r2.cmd("dr dl")
ret_c=int(ret_c,16)
return ret_c
di={}
for i in range(48,126):
a=table(chr(i))
di[chr(a)]=chr(i)
message="zmu}jnd{o{f_ndo{{_hz_{ga"
flag=""
for p in message:
flag+=di[p]
print flag
print "flag is "+flag
#.text:0000000000400ACD call the_second
#r2.cmd("db 0x00400acd")
#r2.cmd("dc")
#2nd 0x004009d7
#3rd 0x004008e1
#4th 0x004007ce
#dl
|
#!/usr/bin/python
import proveedores, time
import Empleados, sys,os
def leer():
efl="\n"
msj=""" Programa de creacion de archivos para
importar data en el BCP"""
msj2= "Precione:"+efl+"1.- Para crear archivo de proveedores"+efl
msj2+="2.- Para crear archivo de Empleados"+efl
msj2+="3.- Para salir del programa"
while True:
os.system('clear')
print "*"*(len(msj)+2)
print "*"+msj+'*'
print "*"*(len(msj)+2)
print efl
print msj2
c=sys.stdin.read(1)
if c=='1':
proveedores.leer_proveedores()
for i in xrange(5):
print i
time.sleep(0.5)
elif c=='2': Empleados.leer_empleados()
elif c=='3':
print "Saliendo del programa..."+efl
time.sleep(1)
break
c=sys.stdin.read(1)
leer()
|
import os
def ex2():
# Ask the user
print("Choose a number: ")
total_files = input()
files = []
# Create loop variable
n_total_files = int(total_files)
# get the name
print("Insert the name of the files:")
for k in range(n_total_files):
print("Name of the file " + str(k+1) + ":")
name_file = input()
files.append(name_file)
# Store
for k in range(n_total_files):
name = "ffmpeg -i {} ".format(files[k])
# Add name
name = str(name + ".mp4")
os.system(name)
if __name__ == "__main__":
ex2()
|
'''
침몰하는 타이타닉(그리디)
sort 하고 앞뒤로 더한 값이 제한보다 크면 뒤에 값 pop
아니면 같이 pop
list pop vs deque pop
앞뒤로 pop 할때는 deque가 더 빠름 (중간 pop 안됨)
'''
import sys
#sys.stdin = open("in1.txt","r")
'''
n,m = 5,140
arr = [90,50,70,100,60]
'''
n,m = map(int,input().split())
arr = list(map(int,input().split()))
arr.sort()
ans = 0
while arr:
if arr[0] + arr[-1] > m:
arr.pop()
ans += 1
else :
arr.pop()
arr.pop(0)
ans += 1
if len(arr)==1:
arr.pop()
ans += 1
print(ans)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 17:49:34 2020
@author: Varad Srivastava
A valid email address meets the following criteria:
It's composed of a username, domain name, and extension assembled in this format: username@domain.extension
The username starts with an English alphabetical character, and any subsequent characters consist of one or more of the following: alphanumeric characters, -,., and _.
The domain and extension contain only English alphabetical characters.
The extension is , , or characters in length.
Given pairs of names and email addresses as input, print each name and email address pair having a valid email address on a new line.
Hint: Try using Email.utils() to complete this challenge. For example, this code:
import email.utils
print email.utils.parseaddr('DOSHI <DOSHI@hackerrank.com>')
print email.utils.formataddr(('DOSHI', 'DOSHI@hackerrank.com'))
produces this output:
('DOSHI', 'DOSHI@hackerrank.com')
DOSHI <DOSHI@hackerrank.com>
Input Format
The first line contains a single integer, , denoting the number of email address.
Each line of the subsequent lines contains a name and an email address as two space-separated values following this format:
name <user@email.com>
Constraints
Output Format
Print the space-separated name and email address pairs containing valid email addresses only. Each pair must be printed on a new line in the following format:
name <user@email.com>
You must print each valid email address in the same order as it was received as input.
Sample Input
2
DEXTER <dexter@hotmail.com>
VIRUS <virus!@variable.:p>
Sample Output
DEXTER <dexter@hotmail.com>
Explanation
dexter@hotmail.com is a valid email address, so we print the name and email address pair received as input on a new line.
virus!@variable.:p is not a valid email address because the username contains an exclamation point (!) and the extension contains a colon (:). As this email is not valid, we print nothing.
"""
import email.utils
import re
if __name__ == "__main__":
n = int(input())
for _ in range(n):
parsed = email.utils.parseaddr(input())
pattern = re.match(r"[a-zA-Z][\w\.-]+@[a-zA-Z]+\.[a-zA-Z]{1,3}$",parsed[1])
if pattern:
print(email.utils.formataddr(parsed))
else:
continue
|
'''
7. Reverse Integer [Easy]
Given a 32-bit signed integer, reverse digits of an integer.
Example 1:
Input: 123
Output: 321
Example 2:
Input: -123
Output: -321
Example 3:
Input: 120
Output: 21
[Note]:
Assume we are dealing with an environment
which could only store integers within the 32-bit signed integer range: [−231, 231 − 1].
For the purpose of this problem,
assume that your function returns 0 when the reversed integer overflows.
[注意]: 如以下示例边际case,input为31位并未溢出,但是output则为34位溢出了,因此返回的是0。
[Input]:
1534236469
[Output]:
9646324351
而题目说的‘Given a 32-bit signed integer’,意思是给出的x一定是在32位里的带符号整数,
所以我们只需要检查输出的数字有没有溢出就可以了。
[Method 1.1]: str + Two-pointer
Time: O(lgn/2) = O(lgn),lgn是因为最多循环n的位数(相当于每次除以10)
Runtime: 32 ms, faster than 96.18% of Python3 online submissions for Reverse Integer.
Memory Usage: 13.9 MB, less than 5.26% of Python3 online submissions for Reverse Integer.
'''
class Solution:
def reverse(self, x: int) -> int:
str_num = str(x)
if len(str_num) < 2: return x
string_list = list(str_num)
i, j = 1 if string_list[0] == ('-' or '+') else 0, len(string_list) - 1
while i < j:
string_list[i], string_list[j] = string_list[j], string_list[i]
i += 1
j -= 1
n = int(''.join(string_list))
return n if (-2**31) < n < (2**31 - 1) else 0
'''
[Method 1.2]: str + bit_length + abs() + cmp()
Because in Python we have:
>>> False - True
-1
>>> True - True
0
>>> False - False
0
>>> True - False
1
==> ((x > 0) - (x < 0)) return -1 if x < 0; return 0 if x == 0; return 1 if x > 0
Runtime: 36 ms, faster than 84.41% of Python3 online submissions for Reverse Integer.
Memory Usage: 13.7 MB, less than 5.26% of Python3 online submissions for Reverse Integer.
'''
class Solution:
def bit_length(self, n):
s = bin(n)
s = s.lstrip('-0b')
return len(s)
def reverse(self, x: int) -> int:
n = ((x > 0) - (x < 0)) * int(str(abs(x))[::-1])
return n if self.bit_length(n) < 32 else 0
'''
[Method 3]
翻转整数,那就在x%10后赋给新的值rev,每次遍历前新的值先乘以10,再加上取余后的数。
每次遍历后x都除以10,如果这个rev比32位最大整数还要大,或者比最小的还要小,则返回0。
否则遍历结束后,返回rev即可。
Time: O(lgn), Space: O(1)
Runtime: 36 ms, faster than 84.41% of Python3 online submissions for Reverse Integer.
Memory Usage: 14 MB, less than 5.26% of Python3 online submissions for Reverse Integer.
'''
class Solution:
def reverse(self, x: int) -> int:
rev = 0
sign = -1 if x < 0 else 1
x = abs(x)
while x != 0:
rev = rev*10 + x%10
x //= 10
rev *= sign
return rev if -2**31 < rev < 2**31 - 1 else 0
|
#!/usr/bin/env python3
#
# Merge Submission Files
# find ./ -name 'submission*.csv' | xargs cat | sort -nr | awk -F',' '!a[$1]++' | sort -n | sponge > output/submission.csv
# grep ',1,' output/submission.csv | wc -l # count number of non-zero entries
#
# Run Main Script:
# PYTHONUNBUFFERED=1 time -p nice ././constraint_satisfaction/z3_solver.run.py | tee -a submission.log
#
from constraint_satisfaction.solve_dataframe import solve_dataframe
from utils.datasets import submission_file, test_df
if __name__ == '__main__':
solve_dataframe(test_df, savefile=submission_file, modulo=(1, 0), exact=False, max_timeout=0.1*60*60)
|
import sqlite3
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import MinMaxScaler
conn= sqlite3.connect('data.sqlite')
df= pd.read_sql_query("SELECT * FROM fish_data", conn)
y=df["Spec"]
df = df.dropna()
X=df.drop(["Spec"], axis=1)
scaler = MinMaxScaler(feature_range=(0,1))
X=scaler.fit_transform(np.array(X))
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.30, shuffle=True)
decision = DecisionTreeClassifier(criterion='entropy',
splitter='best',
min_samples_split=2,
min_samples_leaf=2,
min_weight_fraction_leaf=0.0,
max_features=6)
decision_model = decision.fit(X_train, y_train)
decision_model
y_pred = decision_model.predict(X_test) # nemo
print(accuracy_score(y_test, y_pred))
confusion_matrix =confusion_matrix(y_test, y_pred)
print(confusion_matrix)
def counts_from_confusion(confusion):
"""
Obtain TP, FN FP, and TN for each class in the confusion matrix
"""
counts_list = []
# Iterate through classes and store the counts
for i in range(confusion.shape[0]):
tp = confusion[i, i]
fn_mask = np.zeros(confusion.shape)
fn_mask[i, :] = 1
fn_mask[i, i] = 0
fn = np.sum(np.multiply(confusion, fn_mask))
fp_mask = np.zeros(confusion.shape)
fp_mask[:, i] = 1
fp_mask[i, i] = 0
fp = np.sum(np.multiply(confusion, fp_mask))
tn_mask = 1 - (fn_mask + fp_mask)
tn_mask[i, i] = 0
tn = np.sum(np.multiply(confusion, tn_mask))
counts_list.append({'Class': i,
'acc': (tp + tn)/(tp + tn + fp + fn),
'sensitivity':tp/(tp+fn),
'specificity':tn/(tn+fp)
})
return counts_list
list = counts_from_confusion(confusion_matrix)
print(list)
plot_confusion_matrix(decision,X_test,y_test)
plt.show() |
# -*- coding: utf-8 -*-
__author__ = 'RicardoMoya'
#TARGET_PAGE = "http://searchivarius.org/about"
#TARGET_PAGE = "http://rbc.ru"
TARGET_PAGE = "http://icanhazip.com"
from ConnectionManager import ConnectionManager, CHARSET
cm = ConnectionManager()
for j in range(5):
for i in range(3):
print ("\t\t" + cm.request(TARGET_PAGE).read().decode(CHARSET))
cm.new_identity() |
from django import forms
from django.core.validators import EmailValidator
from . import models
field = {
"name": {
"id": "name",
"name": "name",
"label": "input",
"elements": {
"class": "form-control",
"type": "text",
"placeholder": "Name"
}
},
"email": {
"id": "email",
"name": "email",
"label": "input",
"elements": {
"class": "form-control",
"type": "text",
"placeholder": "Email"
}
},
"account": {
"id": "account",
"name": "account",
"label": "input",
"elements": {
"class": "form-control",
"type": "text",
"placeholder": "Account"
}
},
"status": {
"id": "status",
"name": "status",
"label": "select",
"value": [
{
"label": "option",
"value": "Status",
"elements": {
"value": "",
"selected": "selected"
}
},
{
"label": "option",
"value": "un-authenticated",
"elements": { "value": "unauthenticated" }
},
{
"label": "option",
"value": "authenticated",
"elements": { "value": "authenticated" }
}
],
"elements": {
"class": "form-control"
}
},
"text": {
"id": "text",
"name": "text",
"label": "textarea",
"elements": {
"style": "width: 100%; height: *;",
"rows": "15"
}
}
}
form = {
"AddUserForm": {
"method": "POST",
"action": "user/A_Add/",
"fields": ["name", "email", "account"]
},
"ModUserForm": {
"method": "POST",
"action": "user/A_Modify/",
"fields": ["name", "email", "status"]
},
"DelUserForm": {
"method": "POST",
"action": "user/A_Delete/",
"fields": ["account"]
},
"BatchImportForm": {
"method": "POST",
"action": "user/A_BatchImport/",
"fields": ["text"]
}
}
def UserForm(data, formName, act):
newDoc = {};
for each in form[formName]['fields']:
if each in models.action:
action = models.action[each][act]
if action == 'unique' and models.not_duplicate({ each: data[each]}, True if act == 'U' else False) == False:
return {'result': None}
newDoc[each] = data[each]
return newDoc
|
import turtle as t
def say_hello( x,y ):
print( f'Hello {x} {y}.' )
def drag_turtle( x,y ):
t.goto( x,y )
t.onclick( say_hello )
t.ondrag( drag_turtle )
t.done()
|
__author__ = "Oier Lopez de Lacalle <oier.lopezdelacalle@gmail.com>"
import sys
import os
import nltk
import utils
import getopt
from LexSampReader import LexSampReader
def usage():
sys.stderr.write("USAGE: create_gs.py -c configure.file\n")
def write_stdout_gs(instances):
for (insId, insLabel, offset, tokens) in instances:
sys.stdout.write("%s\t%s\n" % (insId, insLabel))
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "c:")
except getopt.GetoptError as err :
usage()
sys.exit(2)
if len(args)!=0:
usage()
sys.exit(2)
conf_file = ""
for o, a in opts:
if o == "-c":
conf_file = a
if conf_file == "":
usage()
sys.exit(2)
props = utils.read_properties(conf_file)
#Read target words list
if not "WORDLIST" in props:
sys.stderr.write("[ERROR] WORDLIST file not defined\n")
sys.exit(2)
words = utils.read_words(props["WORDLIST"])
#Get dataset path (file structured is hard-coded for simplicity)
if not "DATADIR" in props:
sys.stderr.write("[ERROR] DATADIR not defined\n")
sys.exit(2)
data_dir = props["DATADIR"]
#Get number of folds in xvalidation
if not "XVAL" in props:
sys.stderr.write("[ERROR] XVAL number of folds not defined\n")
K = int(props["XVAL"])
##
## Get XVAL test files for each word in words
##
for word in sorted(words):
for fold in range(1, K+1):
#sys.stderr.write("[INFO] Fold %s\n" %str(fold))
reader = LexSampReader()
dataset = data_dir + "/" + word + "/xval/fold"+str(fold)
testInstances = reader.getInstances(dataset+"/"+word+".test.ls.utf8.xml")
write_stdout_gs(testInstances)
|
#圆的面积和周长
import math
r = eval(input("请输入半径:"))
while r != 0 :
#计算面积和周长
s = math.pi * r * r
c = math.pi * 2 * r
print("当半径为{:-^10.2f}时,圆的面积为{:-^10.2f},圆的周长为{:-^10.2f}\n".format(r,s,c))
r = eval(input("请输入半径(输入0时结束):"))
print("结束")
|
from Library import Library
import string
def read_file(path):
f = open(path,"r")
lines = f.readlines()
nb_books,nb_lib,nb_days = map(int,lines[0].replace("\n","").split(" "))
all_books = list(map(int,lines[1].split(" ")))
i = 2
id = 0
libs = []
repet_all_books = [0] * nb_books
all_repet = 0
while (i<len(lines)-1):
nb_book_lib, signup_time, speed = map(int,lines[i].split(" ")[0:3])
books_lib = list(map(int,lines[i+1].split(" ")))
for book in books_lib:
repet_all_books[book] += 1
all_repet += 1
library = Library(id,nb_book_lib,signup_time,speed,books_lib,all_books)
libs.append(library)
i += 2
id += 1
for i, book in enumerate(all_books):
repet_all_books[i] /= all_repet
print("Calc Freq")
for j, lib in enumerate(libs):
sum_freq = 0
for book in lib.books:
sum_freq += repet_all_books[book]
lib.factor /= sum_freq
print("Finished Reading")
return nb_days,all_books,libs
def write_output(path,libs):
f = open(path,"w")
nb_libs = len(libs)
f.write(str(nb_libs) + "\n")
for i,element in enumerate(libs):
id_lib = element[0]
nb_books = len(element[1])
if nb_books >0:
f.write(str(id_lib) + " " + str(nb_books)+"\n")
for i in range(len(element[1])):
f.write(str(element[1][i]) + " ")
f.write("\n")
|
from mako.template import *
def indent(code, indentation=''):
return '\n'.join(indentation + line for line in code.split('\n'))
|
from django.apps import AppConfig
class DataImportConfig(AppConfig):
"""
Configure the data_import application.
"""
name = "data_import"
verbose_name = "Data Import"
|
import sqlite3
class Sqligther():
def __init__(self, database_file):
#подключаем базу
self.connection = sqlite3.connect('db.db')
self.cursor = self.connection.cursor()
def get_subscriptions(self, status = True):
#получаем всех активных подписчиков
with self.connection:
#print (self.cursor.execute("SELECT * FROM subscribers WHERE status = ? ",(status,)).fetchall())
return self.cursor.execute("SELECT * FROM subscribers WHERE status = ?",(status,)).fetchall()
def subscriber_exists(self, user_id):
#проверяем есть ли уже юзер в базе
with self.connection:
result = self.cursor.execute("SELECT * FROM subscribers WHERE user_id = ?",(user_id,)).fetchall()
#print (bool(len(result)))
return bool(len(result))
def add_subscriber(self, user_id, status = True):
with self.connection:
return self.cursor.execute("INSERT INTO subscribers (user_id, status) VALUES (?,?)",(user_id, status,))
#обновляем status
def update_subscriptions (self, user_id, status):
with self.connection:
return self.cursor.execute(" UPDATE subscribers SET status = ? WHERE user_id = ?",(status, user_id,))
def close (self):
#закрываем соединение
self.connection.close()
|
import csv
import os
import random
import math
import pandas as pd
def read_data(csv_path):
"""Read in the training data from a csv file.
The examples are returned as a list of Python dictionaries, with column names as keys.
"""
examples = []
with open(csv_path, 'r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for example in csv_reader:
for k, v in example.items():
if v == '':
example[k] = None
else:
try:
example[k] = float(v)
except ValueError:
example[k] = v
examples.append(example)
return examples
def clean_data(data):
data = pd.DataFrame(data=data)
data = data.rename({'Gender': 'Female', 'Education': 'Graduate'}, axis=1)
for key in data:
col = data[key]
new_col = []
if key == "Female":
for x in col:
if x == "Female":
new_col.append(1)
elif x == "Male":
new_col.append(0)
else:
new_col.append(x)
data[key] = new_col
if key == "Married":
for x in col:
if x == "Yes":
new_col.append(1)
elif x == "No":
new_col.append(0)
else:
new_col.append(x)
data[key] = new_col
if key == "Graduate":
for x in col:
if x == "Graduate":
new_col.append(1)
elif x == "Not Graduate":
new_col.append(0)
else:
new_col.append(x)
data[key] = new_col
if key == "Self_Employed":
for x in col:
if x == "Yes":
new_col.append(1)
elif x == "No":
new_col.append(0)
else:
new_col.append(x)
data[key] = new_col
if key == "Property_Area":
for x in col:
if x == "Urban":
new_col.append(2)
elif x == "Semiurban":
new_col.append(1)
elif x == "Rural":
new_col.append(0)
else:
new_col.append(x)
data[key] = new_col
if key == "Dependents":
for x in col:
if x == "3+":
new_col.append(3)
elif x == 2.0:
new_col.append(2)
elif x == 1.0:
new_col.append(1)
elif x == 0.0:
new_col.append(0)
else:
new_col.append(x)
data[key] = new_col
data = data.to_dict('records')
return data
def rollback_data(attr, val):
# getting the original values of the data back for tree visualization
ret_attr = attr
ret_val = val
if attr == "Female":
ret_attr = "Gender"
if int(val) == 1:
ret_val = {'ge': ["Female"], 'lt': ["Male"]}
elif int(val) == 0:
print("122")
ret_val = {'ge': ["Female", "Male"], 'lt': []}
if attr == "Married":
ret_attr = "Married"
if int(val) == 1:
ret_val = {'ge': ["Yes"], 'lt': ["No"]}
elif int(val) == 0:
print("129")
ret_val = {'ge': ["Yes", "No"], 'lt': []}
if attr == "Graduation":
ret_attr = "Education"
if int(val) == 1:
ret_val = {'ge': ["Graduated"], 'lt': ["Not Graduate"]}
elif int(val) == 0:
print("136")
ret_val = {'ge': ["Graduated", "Not Graduate"], 'lt': []}
if attr == "Self_Employed":
ret_attr = "Self_Employed"
if int(val) == 1:
ret_val = {'ge': ["Yes"], 'lt': ["No"]}
elif int(val) == 0:
print("143")
ret_val = {'ge': ["Yes", "No"], 'lt': []}
if attr == "Property_Area":
ret_attr = "Property_Area"
if int(val) == 2:
ret_val = {'ge': ["Urban"], 'lt': ["Semiurban", "Rural"]}
elif int(val) == 1:
ret_val = {'ge': ["Urban", "Semiurban"], 'lt': ["Rural"]}
elif int(val) == 0:
ret_val = {'ge': ["Urban", "Semiurban", "Rural"], 'lt': []}
if attr == "Dependents":
ret_attr = "Dependents"
if int(val) == 3:
ret_val = {'ge': ["3+"], 'lt': ["2", "1", "0"]}
elif int(val) == 2:
ret_val = {'ge': ["3+", "2"], 'lt': ["1", "0"]}
elif int(val) == 1:
ret_val = {'ge': ["3+", "2", "1"], 'lt': ["0"]}
elif int(val) == 0:
ret_val = {'ge': ["3+", "2", "1", "0"], 'lt': []}
return ret_attr, ret_val
def display(tree):
"""Returns list of strings, width, height, and horizontal coordinate of the root."""
# No child. Leaf Node
if hasattr(tree, 'pred_class'):
line = f'{tree.pred_class} ' + "({:.2f})".format(tree.prob)
width = len(line)
height = 1
middle = width // 2
return [line], width, height, middle
# Two children.
if hasattr(tree, 'child_ge'):
left, n, p, x = display(tree.child_ge)
if hasattr(tree, 'child_lt'):
right, m, q, y = display(tree.child_lt)
if hasattr(tree, 'pred_class'):
s = '%s (%s)' % tree.pred_class % tree.prob
else:
attr, val = rollback_data(
tree.test_attr_name, tree.test_attr_threshold)
if type(val) == type({}):
ge = [f'{x}, ' for x in val['ge']]
ge = "".join(ge)
lt = [f'{x}, ' for x in val['lt']]
lt = "".join(lt)
s = f'<----({attr} == {ge[:-2]})--' + f'({attr} == {lt[:-2]})---->'
else:
s = f'<----({attr} >=' + " {:.2f})--".format(
val) + f'({attr} <' + " {:.2f})---->".format(val)
u = len(s)
first_line = (x + 1) * ' ' + (n - x - 1) * \
'_' + s + y * '_' + (m - y) * ' '
second_line = x * ' ' + '/' + \
(n - x - 1 + u + y) * ' ' + '\\' + (m - y - 1) * ' '
if p < q:
left += [n * ' '] * (q - p)
elif q < p:
right += [m * ' '] * (p - q)
zipped_lines = zip(left, right)
lines = [first_line, second_line] + \
[a + u * ' ' + b for a, b in zipped_lines]
return lines, n + m + u, max(p, q) + 2, n + u // 2
def train_test_split(examples, test_perc):
"""Randomly data set (a list of examples) into a training and test set."""
test_size = round(test_perc*len(examples))
shuffled = random.sample(examples, len(examples))
return shuffled[test_size:], shuffled[:test_size]
class TreeNodeInterface():
"""Simple "interface" to ensure both types of tree nodes must have a classify() method."""
def classify(self, example):
pass
class DecisionNode(TreeNodeInterface):
"""Class representing an internal node of a decision tree."""
def __init__(self, test_attr_name, test_attr_threshold, child_lt, child_ge, child_miss):
"""Constructor for the decision node. Assumes attribute values are continuous.
Args:
test_attr_name: column name of the attribute being used to split data
test_attr_threshold: value used for splitting
child_lt: DecisionNode or LeafNode representing examples with test_attr_name
values that are less than test_attr_threshold
child_ge: DecisionNode or LeafNode representing examples with test_attr_name
values that are greater than or equal to test_attr_threshold
child_miss: DecisionNode or LeafNode representing examples that are missing a
value for test_attr_name
"""
self.test_attr_name = test_attr_name
self.test_attr_threshold = test_attr_threshold
self.child_ge = child_ge
self.child_lt = child_lt
self.child_miss = child_miss
def classify(self, example):
"""Classify an example based on its test attribute value.
Args:
example: a dictionary { attr name -> value } representing a data instance
Returns: a class label and probability as tuple
"""
test_val = example[self.test_attr_name]
if test_val is None:
return self.child_miss.classify(example)
elif test_val < self.test_attr_threshold:
return self.child_lt.classify(example)
else:
return self.child_ge.classify(example)
def __str__(self):
return "test: {} < {:.4f}".format(self.test_attr_name, self.test_attr_threshold)
class LeafNode(TreeNodeInterface):
"""Class representing a leaf node of a decision tree. Holds the predicted class."""
def __init__(self, pred_class, pred_class_count, total_count):
"""Constructor for the leaf node.
Args:
pred_class: class label for the majority class that this leaf represents
pred_class_count: number of training instances represented by this leaf node
total_count: the total number of training instances used to build the whole tree
"""
self.pred_class = pred_class
self.pred_class_count = pred_class_count
self.total_count = total_count
# probability of having the class label
self.prob = pred_class_count / total_count
def classify(self, example):
"""Classify an example.
Args:
example: a dictionary { attr name -> value } representing a data instance
Returns: a class label and probability as tuple as stored in this leaf node. This will be
the same for all examples!
"""
return self.pred_class, self.prob
def __str__(self):
return "leaf {} {}/{}={:.2f}".format(self.pred_class, self.pred_class_count,
self.total_count, self.prob)
class DecisionTree:
"""Class representing a decision tree model."""
def __init__(self, examples, id_name, class_name, min_leaf_count=1):
"""Constructor for the decision tree model. Calls learn_tree().
Args:
examples: training data to use for tree learning, as a list of dictionaries
id_name: the name of an identifier attribute (ignored by learn_tree() function)
class_name: the name of the class label attribute (assumed categorical)
min_leaf_count: the minimum number of training examples represented at a leaf node
"""
self.id_name = id_name
self.class_name = class_name
self.min_leaf_count = min_leaf_count
# build the tree!
self.root = self.learn_tree(examples)
def learn_tree(self, examples):
"""Build the decision tree based on entropy and information gain.
Args:
examples: training data to use for tree learning, as a list of dictionaries. The
attribute stored in self.id_name is ignored, and self.class_name is consided
the class label.
Returns: a DecisionNode or LeafNode representing the tree
"""
if self.entropy(examples) == 0:
max_info = self.major_label(examples)
return LeafNode(max_info['max_label'], max_info['count'], max_info['total'])
else:
split = self.best_split(examples)
if split['leaf_node']:
max_info = self.major_label(examples)
return LeafNode(max_info['max_label'], max_info['count'], max_info['total'])
print(split['attr_name'], split['threshold'])
if len(split['miss_list']) < self.min_leaf_count:
return DecisionNode(split['attr_name'], split['threshold'], self.learn_tree(split['less_than_list']), self.learn_tree(split['ge_list']), self.learn_tree(random.choice([split['ge_list'], split['less_than_list']])))
max_info = self.major_label(split['ge_list']) if len(split['ge_list']) >= len(
split['less_than_list']) else self.major_label(split['less_than_list'])
return DecisionNode(split['attr_name'], split['threshold'], self.learn_tree(split['less_than_list']), self.learn_tree(split['ge_list']), self.learn_tree(random.choice([split['ge_list'], split['less_than_list']])))
def classify(self, example):
"""Perform inference on a single example.
Args:
example: the instance being classified
Returns: a tuple containing a class label and a probability
"""
return self.root.classify(example)
def best_split(self, list):
info_gain = 0
max_info_gain = -math.inf
parent_entropy = 0
ge_child_entropy = 0
less_than_child_entropy = 0
best_split_dict = {}
for attr_name in list[0].keys():
if attr_name == self.id_name or attr_name == self.class_name:
continue
attr_val_list = [li[attr_name] for li in list]
for test_threshold in attr_val_list:
ge_list = []
less_than_list = []
miss_list = []
parent_entropy = self.entropy(list)
if test_threshold == None:
continue
for e in list:
if e[attr_name] == None:
miss_list.append(e)
elif e[attr_name] >= test_threshold:
ge_list.append(e)
else:
less_than_list.append(e)
ge_child_entropy = len(ge_list)/len(list) * \
self.entropy(ge_list)
less_than_child_entropy = (
len(less_than_list)/len(list)) * self.entropy(less_than_list)
miss_child_entropy = (
len(miss_list)/len(list)) * self.entropy(miss_list)
info_gain = parent_entropy - \
(ge_child_entropy + less_than_child_entropy)
if (info_gain > max_info_gain + miss_child_entropy):
if len(ge_list) < self.min_leaf_count or len(less_than_list) < self.min_leaf_count:
if 'threshold' in best_split_dict.keys():
best_split_dict['leaf_node'] = False
continue
else:
best_split_dict['leaf_node'] = True
continue
max_info_gain = info_gain
best_split_dict['ge_list'] = ge_list
best_split_dict['less_than_list'] = less_than_list
best_split_dict['miss_list'] = miss_list
best_split_dict['attr_name'] = attr_name
best_split_dict['threshold'] = test_threshold
if(max_info_gain == parent_entropy or len(best_split_dict['ge_list']) < self.min_leaf_count or len(best_split_dict['less_than_list']) < self.min_leaf_count):
best_split_dict['leaf_node'] = True
else:
best_split_dict['leaf_node'] = False
return best_split_dict
def entropy(self, list):
entropy = 0
if(len(list) == 0):
return 0
label_list = [li[self.class_name] for li in list]
yes = label_list.count("Y")
no = label_list.count("N")
yes_prob = yes/len(list)
no_prob = no/len(list)
entropy = -(yes_prob*self.log_func(yes_prob) +
no_prob*self.log_func(no_prob))
return entropy
def log_func(self, num):
if num == 0:
return 0
else:
return math.log(num)
def major_label(self, list):
label_list = [li[self.class_name] for li in list]
yes = label_list.count("Y")
no = label_list.count("N")
max_info = max([yes, no])
if max_info == yes:
return {'max_label': "Y", "count": yes, "total": len(list)}
else:
return {'max_label': "N", "count": no, "total": len(list)}
def __str__(self):
"""String representation of tree, calls _ascii_tree()."""
ln_bef, ln, ln_aft = self._ascii_tree(self.root)
return "\n".join(ln_bef + [ln] + ln_aft)
#############################################
if __name__ == '__main__':
path_to_csv = './data/train.csv'
class_attr_name = 'Loan_Status'
id_attr_name = 'Loan_ID'
min_examples = 10 # minimum number of examples for a leaf node
# read in the data
examples = clean_data(read_data(path_to_csv))
train_examples, test_examples = train_test_split(examples, 0.15)
test_data = clean_data(read_data('./data/test.csv'))
# learn a tree from the training set
tree = DecisionTree(train_examples, id_attr_name,
class_attr_name, min_examples)
# test the tree on the test set and see how we did
correct = 0
ordering = ['Y', 'N'] # used to count "almost" right
test_act_pred = {}
for example in test_examples:
actual = example[class_attr_name]
pred, prob = tree.classify(example)
print("{:30} pred {:15} ({:.2f}), actual {:15} {}".format(example[id_attr_name] + ':',
"'" + pred + "'", prob,
"'" + actual + "'",
'*' if pred == actual else ''))
if pred == actual:
correct += 1
test_act_pred[(actual, pred)] = test_act_pred.get(
(actual, pred), 0) + 1
print("\n\n\n\nTEST DATA\n\n")
for example in test_data:
pred, prob = tree.classify(example)
print("{:30} pred {:15} ({:.2f})".format(example[id_attr_name] + ':',
"'" + pred + "'", prob,))
print("\naccuracy: {:.2f}".format(correct/len(test_examples)))
print("\ncheck out the tree.txt file for tree visualization")
# visualize the tree in sweet, 8-bit text and store it in tree.txt file
if os.path.exists("tree.txt"):
os.remove("tree.txt")
f = open("tree.txt", "x")
f.write("\n\n")
for line in display(tree.root)[0]:
f.write(line + "\n")
f.close()
|
"""
Plot spatial noise: cell position vs. CV^2
Copyright (C) 2017 Ahmet Ay, Dong Mai, Soo Bin Kwon, Ha Vu
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, shared, os
import numpy, math
import matplotlib.pyplot as plt
import xlrd, xlwt
from xlrd import XLRDError
from matplotlib import rc # text style
rc('text', usetex=True) # activate latex text rendering
gene_colors = ['b','r','g'] # her1, her7, her
expression_colors = ['#2A6EFF','m','#008d74'] # low (sky blue), medium (pink/purple), high (green)
region_colors = ['#2A6EFF','m','#008d74'] # posterior (sky blue), middle (pink/purple), anterior (green)
def determineTickInterval(r,l): # determine tick interval given a range (r)
# r: range
# l: limit (increase l for more ticks)
candidates = [0.05,0.1,0.2,0.5,1,2,5,10,20,50,100]
for candidate in candidates:
if r/candidate<l:
return candidate
return 1
def updateXTicklabels(ax):
xlabels = [format(label, r',.0f') for label in ax.get_xticks()] # int
ax.set_xticklabels(xlabels)
ax.tick_params(axis='x', pad=5)
ylabels = [format(label, r',.1f') for label in ax.get_yticks()] # float
ax.set_yticklabels(ylabels)
def separateThreeGroups(all_mean, slice_mean, slice_cv_squared, num_slices): # separate data into three groups based on expression level
thresholds = []
groups_cv_squared = [] # three-dimensional array
groups_mean = [] # three-dimensional array
# Get the threshold for each group = average of 3 consecutive increasing mean levels of mRNAs
num_slices_in_group = int(math.floor(len(all_mean)/3))
all_mean = sorted(all_mean)
for i in range(2):
thresholds.append(numpy.mean(all_mean[num_slices_in_group*(i+1):num_slices_in_group*(i+1)+2]))
thresholds.append(max(all_mean))
# Set up three-dimensional arrays [slice][group index: low, medium, high][embryo in that slice having mRNA levels in that group]
for i in range(num_slices):
groups_cv_squared.append([])
groups_mean.append([])
for j in range(3):
groups_cv_squared[i].append([])
groups_mean[i].append([])
# Separate slices into three groups
for i in range(num_slices):
for j in range(len(slice_mean[i])):
for k in range(len(thresholds)):
if slice_mean[i][j] <= thresholds[k]:
groups_cv_squared[i][k].append(slice_cv_squared[i][j])
groups_mean[i][k].append(slice_mean[i][j])
break
return groups_cv_squared, groups_mean
def plot_grouped(ax, slice_cv_squared, groups_cv_squared, num_slices): # plot cell position vs. CV^2 with three separate expression groups
ymax = 0
for j in range(num_slices): # cell position
for k in range(3): # low, medium, high expression
if len(groups_cv_squared[j][k])>0: # enough data to plot
ax.scatter(j, numpy.mean(groups_cv_squared[j][k]), c=expression_colors[k], s = 22, edgecolors='none')
ax.errorbar(j, numpy.mean(groups_cv_squared[j][k]),
yerr=2*numpy.std(groups_cv_squared[j][k])/math.sqrt(len(groups_cv_squared[j][k])), ls='none',c=expression_colors[k], capsize=2)
ymax = max(ymax, numpy.mean(groups_cv_squared[j][k])+2*numpy.std(groups_cv_squared[j][k])/math.sqrt(len(groups_cv_squared[j][k])))
ax.xaxis.set_ticks(numpy.arange(0,num_slices+1,determineTickInterval(num_slices,6)))
ax.set_xlim(-1,num_slices)
ax.set_xlabel(r"Cell position (posterior - anterior)")
ax.yaxis.set_ticks(numpy.arange(0,ymax+1,determineTickInterval(ymax+1,6)))
ax.set_ylim(0,ymax*1.05)
ax.tick_params(direction='in')
updateXTicklabels(ax)
ax.set_ylabel(r"$\textsf{CV}^{\textsf{\small{2}}}$ \textit{(her)}")
# Figure legend
low = plt.Line2D(range(1), range(1), color="w", marker='o', markerfacecolor=expression_colors[0], \
markeredgecolor=expression_colors[0], markersize=4)
medium = plt.Line2D(range(1), range(1), color="w", marker='o', markerfacecolor=expression_colors[1], \
markeredgecolor=expression_colors[1], markersize=4)
high = plt.Line2D(range(1), range(1), color="w", marker='o', markerfacecolor=expression_colors[2], \
markeredgecolor=expression_colors[2], markersize=4)
ax.legend([low,medium,high],[r"Low expression",r"Medium expression",r"High expression"], \
numpoints=1, loc=9, bbox_to_anchor=(0.45,1.15), ncol=3, fontsize = 10, \
handletextpad = 0.05, labelspacing = 0.05, columnspacing = 0.08)
def plot(ax, gene, slice_cv_squared,num_slices): # plot cell position vs. CV^2
for j in range(num_slices):
ax.scatter(j, numpy.mean(slice_cv_squared[gene][j]), c='#722AFF', s = 22, edgecolors='none')
ax.errorbar(j, numpy.mean(slice_cv_squared[gene][j]),
yerr=2*numpy.std(slice_cv_squared[gene][j])/math.sqrt(len(slice_cv_squared[gene][j])), ls='none', c='#722AFF',capsize=2)
ax.xaxis.set_ticks(numpy.arange(0,num_slices+1,determineTickInterval(num_slices,6)))
ax.set_xlim(-1,num_slices)
ax.set_xlabel(r"Cell position (posterior - anterior)")
start,end = ax.get_ylim()
ax.yaxis.set_ticks(numpy.arange(0,end+1,determineTickInterval(end,5)))
ax.set_ylim(0,end)
ax.tick_params(direction='in')
updateXTicklabels(ax)
def write(sheet, slice_mean, slice_cv_squared, groups_mean, groups_cv_squared, num_slices, num_groups): # write data to Excel
labels = ['Cell position','Mean','CV^2','Std error','# slices','',
'Low mean','Std error','CV^2 mean','Std error','# slices','',
'Medium mean','Std error','CV^2 mean','Std error','# slices','',
'High mean','Std error','CV^2 mean','Std error','# slices','',
'Raw data (CV^2)']
for i in range(len(labels)):
sheet.write(0,i,labels[i])
for i in range(num_slices): # cell position
line = []
if len(slice_cv_squared[i])>0:
line = [i, numpy.mean(slice_mean[i]), numpy.mean(slice_cv_squared[i]), numpy.std(slice_cv_squared[i])/math.sqrt(len(slice_cv_squared[i])),len(slice_mean[i]),""]
for j in range(num_groups): # add data based on three expression groups to the right
if len(groups_mean[i][j])>0: # enough data to write
line.append(numpy.mean(groups_mean[i][j]))
line.append(numpy.std(groups_mean[i][j])/math.sqrt(len(groups_mean[i][j])))
line.append(numpy.mean(groups_cv_squared[i][j]))
line.append(numpy.std(groups_cv_squared[i][j])/math.sqrt(len(groups_cv_squared[i][j])))
line.append(len(groups_mean[i][j]))
line.append("")
else:
line += [""]*4 + [0] + [""]
for j in range(len(line)):
sheet.write(i+1,j,line[j])
# we comment out the following piece of code because this raw data is rather unnecessary.
# we will write raw data for her mRNA levels into separate file
'''
for j in range(len(slice_cv_squared[i])):
sheet.write(i+1,j+6+num_groups*6,slice_cv_squared[i][j]) # write all CV^2 values within the same cell position
'''
def writeSPSS(spss_worksheets,slice_cv_squared): # write data to Excel for SPSS statistical analysis
labels = ['Region(L/R)','CV^2']
for m in range(3): # her1/her7/her
ws = spss_worksheets[m]
for (i, label) in enumerate(labels):
ws.write(0, i, label)
row_index = 1
for i in range(len(slice_cv_squared[m])): # cell position
for j in range(len(slice_cv_squared[m][i])):
# Posterior vs. anterior
ws.write(row_index,0,1) if i<len(slice_cv_squared[m])/2 else ws.write(row_index,0,2)
ws.write(row_index,1,slice_cv_squared[m][i][j])
row_index += 1
def plot_heatmap_cv_her_pos(slice_cv2, slice_mean, state_name, save_file_name, num_slices):
fig = plt.figure()
ax = fig.add_subplot(111)
heat_map_colors = ['#FF7A33', '#FFF333', '#C1FF33']
current_pos_bin = 0
for i in range(num_slices):
if (i >= int(float(num_slices) * float((current_pos_bin + 1)) / float(3))):
current_pos_bin += 1
if len(slice_cv2[i]) > 0:
ax.scatter(slice_mean[i], slice_cv2[i], color = heat_map_colors[current_pos_bin], alpha = 0.2, marker = '.')
ax.set_xlim(0, 200)
ax.set_xlabel('$\mathit{' + state_name + '}$ mRNAs')
ax.set_ylabel('$CV^2$')
fig.savefig(save_file_name, format = "png", dpi = 300)
def plot_heatmap_binned_cvHerPos(slice_cv2, slice_mean, state_name, save_file_name, \
num_slices, interval, max_her, write_spss_ws, write_sum_ws):
# create new excel file
labels = ["bin_index", "Pos=0/Ant=2", "cv2", "mean"]
for i, label in enumerate(labels):
write_spss_ws.write(0,i,label)
labels = ["bin_index", "Pos=0/Ant=2", "mean_her", "mean_cv2", "her_ste", "cv2_ste"]
for i, label in enumerate(labels):
write_sum_ws.write(0,i,label)
# create new plot
fig = plt.figure()
ax = fig.add_subplot(111)
heatmap_regions = 3
current_pos_bin = 0
num_bins = int(max_her / interval)
binned_mean = [] #[bin][region][data point]
binned_cv2 = []
for i in range(num_bins):
binned_mean.append([[], [], []]) # posterior, middle, anterior
binned_cv2.append([[], [], []])
### BIN DATA
for i in range (num_slices):# slice
# if one third of the embryo is over, we move onto a new region
if (i >= int(float(num_slices) * float((current_pos_bin + 1)) / float(3))):
current_pos_bin += 1
for j in range(len(slice_cv2[i])):# embryo having such a slice
bin_index = 0
add = True
while bin_index < num_bins:
if (slice_mean[i][j] <= ((bin_index + 1) * interval)):
break
else:
bin_index += 1
if bin_index == num_bins:
add = False
if add:
(binned_mean[bin_index][current_pos_bin]).append(slice_mean[i][j])
(binned_cv2[bin_index][current_pos_bin]).append(slice_cv2[i][j])
### WRITE spss DATA
row_index = 1
for i in range(num_bins):
for j in range(heatmap_regions):
if (len(binned_mean[i][j]) > 0) and j != 1: # right now we only write down values of posterior and anterior, we don't need the middle part
for k in range(len(binned_cv2[i][j])):
line = [i, j, binned_cv2[i][j][k], binned_mean[i][j][k]]
for l, data_item in enumerate(line):
write_spss_ws.write(row_index, l, data_item)
row_index += 1
### PLOT and WRITE plot DATA
xmax = -float('inf')
ymax = -float('inf')
row_index = 1
for i in range(num_bins):
for j in range(heatmap_regions):
if (len(binned_mean[i][j]) > 0) and j != 1: # right now we skip plotting the middle part of the embryo, we only plot the posterior and the anterior
# calculate number
mean = numpy.mean(binned_mean[i][j])
cv2 = numpy.mean(binned_cv2[i][j])
xerr = numpy.std(binned_mean[i][j]) / math.sqrt(len(binned_mean[i][j])) # one standard error
yerr = numpy.std(binned_cv2[i][j]) / math.sqrt(len(binned_cv2[i][j]))
# plot
ax.scatter(mean, cv2, color = region_colors[j])
ax.errorbar(mean, cv2, xerr = 2 * xerr, \
yerr = 2 * yerr, color = region_colors[j], capsize=2)
xmax = max(xmax, mean + xerr)
ymax = max(ymax, cv2 + yerr)
# write summary data
line = [i, j, mean, cv2, xerr, yerr]
for k, line_data in enumerate(line):
write_sum_ws.write(row_index, k, line_data)
row_index += 1
### Fix the figures' ticks
ax.set_xlim (0, xmax + 5)
ax.set_ylim (0, ymax + 0.1)
ax.set_xticks([(i * interval) for i in range(num_bins + 1)])
ax.set_yticks(numpy.arange(0, ymax + 0.1, determineTickInterval(ymax, 5)))
updateXTicklabels(ax)
ax.set_xlabel('Total $\mathit{' + state_name + '}$ mRNA')
ax.set_ylabel('$CV^2$')
# Figure legend
posterior = plt.Line2D(range(1), range(1), color="w", marker='o', markerfacecolor=region_colors[0], markeredgecolor=region_colors[0], markersize=8)
anterior = plt.Line2D(range(1), range(1), color="w", marker='o', markerfacecolor=region_colors[2], markeredgecolor=region_colors[2], markersize=8)
ax.legend([posterior, anterior], [r"Posterior",r"Anterior"], loc=9, bbox_to_anchor=(0.5,1.15), ncol=2, numpoints=1, fontsize=12)
# adjust the plot
fig.subplots_adjust(left=0.1, bottom=0.1, right=.975, top=.85, wspace=None, hspace=None)
fig.savefig(save_file_name, format = "png", dpi = 300)
def write_raw_her_cv2(wb, groups_mean_her, group_cv_squared_her, num_slices, num_groups, group_names):
assert num_groups == len(group_names), "plot_CVsquared.py: Number of groups is not the same as the number of sheets names provided"
labels = ["slice_index", "cv2"]
for i in range(num_groups):
row_index = 1
# create worksheet and write labels
ws = wb.add_sheet(group_names[i])
for j, label in enumerate(labels):
ws.write(0, j, label)
# write data
for k in range(num_slices): # for each slide
cv2_in_group = group_cv_squared_her[k][i] # get the data of this group in this slide
for j, cv2 in enumerate(cv2_in_group):
ws.write(row_index, 0, k) # write slide index
ws.write(row_index, 1, cv2) # write cv2 in this group
row_index += 1
def main():
# Check input
if not shared.isInt(sys.argv[1]):
print('plot_CVsquared.py: Number of embryos must be an integer.')
exit(1)
elif int(sys.argv[1])<=0:
print('plot_CVsquared.py: Number of embryos must be larger than zero.')
exit(1)
num_embryos = int(sys.argv[1])
if len(sys.argv)==num_embryos+3:
inputs = sys.argv[2:num_embryos+2]
directory = sys.argv[num_embryos+2]
else:
usage()
shared.ensureDir(directory)
slice_mean_her1 = [] # two-dimensional array [slice][embryo having that slice]
slice_mean_her7 = []
slice_mean_her = []
slice_cv_squared_her1 = [] # two-dimensional array [slice][embryo having that slice]
slice_cv_squared_her7 = []
slice_cv_squared_her = []
num_slices = [] # 1D: Store all mean mRNA levels of all slices, regradless of the position
all_mean_her1 = []
all_mean_her7 = []
all_mean_her = []
for i in range(len(inputs)): # embryo
# Open embryo data
if not os.path.isfile(inputs[i]):
print('plot_CVsquared.py: File "'+inputs[i]+'" does not exist.')
exit(1)
try:
workbook = xlrd.open_workbook(inputs[i],'r')
except XLRDError as e:
print('plot_CVsquared.py: Cannot open file "'+inputs[i]+'".')
exit(1)
worksheets = workbook.sheets()
for j in range(len(worksheets)): # region
worksheet = worksheets[j]
file_len = worksheet.nrows
num_slices.append(file_len-1)
for k in range(1,file_len): # slice (cell position)
row = list(worksheet.row(k))
# Set up multidimensional array
if len(slice_mean_her1)<k:
slice_mean_her1.append([])
slice_cv_squared_her1.append([])
if len(slice_mean_her7)<k:
slice_mean_her7.append([])
slice_cv_squared_her7.append([])
if len(slice_mean_her)<k:
slice_mean_her.append([])
slice_cv_squared_her.append([])
if isinstance(row[1].value,float): # valid slice
num_cells = int(row[1].value) # number of cells within this slice
her1 = []
her7 = []
her = []
for l in range(num_cells): # background subtraction
# Take the cell's data only if its expression levels are positive after background subtraction
if row[8+2*l].value>0 and row[8+2*l+1].value>0:
her1.append(row[8+2*l].value)
her7.append(row[8+2*l+1].value)
her.append(row[8+2*l].value + row[8+2*l+1].value)
if len(her1)>=3: # valid only if there are more than 2 cells
# Mean
her1_mean = numpy.mean(her1)
her7_mean = numpy.mean(her7)
her_mean = numpy.mean(her)
slice_mean_her1[k-1].append(her1_mean) # store mean for this cell position
slice_mean_her7[k-1].append(her7_mean)
slice_mean_her[k-1].append(her_mean)
all_mean_her1.append(her1_mean) # store mean regardless of cell position
all_mean_her7.append(her7_mean)
all_mean_her.append(her_mean)
# CV^2
slice_cv_squared_her1[k-1].append((numpy.std(her1)/her1_mean)**2)
slice_cv_squared_her7[k-1].append((numpy.std(her7)/her7_mean)**2)
slice_cv_squared_her[k-1].append((numpy.std(her)/her_mean)**2)
# Determine number of slices with at least 80% of embryos for analysis
nSlices = sorted(num_slices)[int(num_embryos*0.2)]
# Divide data into three groups based on average RNA level ---> 3D arrays [slice][group][embryo]
groups_cv_squared_her1,groups_mean_her1 = separateThreeGroups(all_mean_her1,slice_mean_her1,slice_cv_squared_her1,nSlices)
groups_cv_squared_her7,groups_mean_her7 = separateThreeGroups(all_mean_her7,slice_mean_her7,slice_cv_squared_her7,nSlices)
groups_cv_squared_her,groups_mean_her = separateThreeGroups(all_mean_her,slice_mean_her,slice_cv_squared_her,nSlices)
slice_cv_squared = [slice_cv_squared_her1,slice_cv_squared_her7,slice_cv_squared_her]
groups_cv_squared = [groups_cv_squared_her1,groups_cv_squared_her7,groups_cv_squared_her]
# Plot cell position vs. CV^2 with three separate expression groups (low, medium, high)
fig = plt.figure(figsize=(4.5,4),dpi=300)
plot_grouped(fig.add_subplot(111),slice_cv_squared_her, groups_cv_squared_her, nSlices)
fig.subplots_adjust(left=0.15, bottom=0.15, right=0.95, top=0.9, wspace=None, hspace=0.25)
fig.savefig(directory + "/" + "CVsquared_grouped_her.png", format = "png", dpi=300)
# Plot cell position vs. CV^2 with all expression groups combined in her1
fig = plt.figure(figsize=(4.5,4),dpi=300)
ax_her1 = fig.add_subplot(111)
plot(ax_her1, 0, slice_cv_squared, nSlices)
ax_her1.set_ylabel(r"$\textsf{CV}^{\textsf{\small{2}}}$ \textit{(her1)}")
fig.subplots_adjust(left=0.15, bottom=0.15, right=0.95, top=0.9, wspace=None, hspace=.3)
fig.savefig(directory + "/" + "CVsquared_her1.png", format = "png", dpi=300)
# Plot cell position vs. CV^2 with all expression groups combined in her7
fig = plt.figure(figsize=(4.5,4),dpi=300)
ax_her7 = fig.add_subplot(111)
plot(ax_her7, 1, slice_cv_squared, nSlices)
ax_her7.set_ylabel(r"$\textsf{CV}^{\textsf{\small{2}}}$ \textit{(her7)}")
fig.subplots_adjust(left=0.15, bottom=0.15, right=0.95, top=0.9, wspace=None, hspace=.3)
fig.savefig(directory + "/" + "CVsquared_her7.png", format = "png", dpi=300)
# Write data to Excel
workbook = xlwt.Workbook(encoding="ascii")
write(workbook.add_sheet("Her1"), slice_mean_her1, slice_cv_squared_her1, groups_mean_her1, groups_cv_squared_her1, nSlices, 3)
write(workbook.add_sheet("Her7"), slice_mean_her7, slice_cv_squared_her7, groups_mean_her7, groups_cv_squared_her7, nSlices, 3)
write(workbook.add_sheet("Her"), slice_mean_her, slice_cv_squared_her, groups_mean_her, groups_cv_squared_her, nSlices, 3)
write_raw_her_cv2(workbook, groups_mean_her, groups_cv_squared_her, nSlices, 3, ["her_low", "her_medium", "her_high"])
# Now, slice_cv_squared_her and slice_mean_her and their lot will be 2D arrays, in the following format:
# [slice][embryo has this slice]
# create binned_cv_her_heatmap.png
interval = 15
max_her = 120
plot_heatmap_binned_cvHerPos(slice_cv_squared_her, slice_mean_her, 'her', \
directory + '/binned_cv_her_heatmap.png', nSlices, interval, \
max_her, workbook.add_sheet("binned_cv2_her_pos_spss"), workbook.add_sheet("binned_cv2_her_pos_summary"))
# Write data for SPSS statistical analysis
spss_worksheets = [workbook.add_sheet("spss_ANOVA_her1"), workbook.add_sheet("spss_ANOVA_her7"), workbook.add_sheet("spss_ANOVA_her")]
writeSPSS(spss_worksheets,slice_cv_squared)
workbook.save(directory + "/CVsquared.xls")
def usage():
print("plot_CVsquared.py: Invalid command-line arguments")
print("Format: python plot_CVsquared.py <number of embryos> <first embryo's slice.xls> <second embryo's slice.xls> ... <last embryo's slice.xls> <output directory>")
print("Example: python plot_CVsquared.py 20 wildtypefulldataset/embryo1/slices.xls wildtypefulldataset/embryo2/slices.xls ... wildtypefulldataset/embryo20/slices.xls wildtypefulldataset")
exit(1)
main()
|
# -*- coding: utf-8 -*-
"""
File Name: spl_input
Description: ""
Author: Donny.fang
Date: 2020/6/4 14:14
"""
class SplInput(object):
"""
Spl pipeline cmd
"""
def __init__(self):
pass
def get_input(self):
return input("Spl cmd: ")
|
import argparse
import tensorflow as tf
import matplotlib.pyplot as plt
from datasets.dataset import build_dataset
import helpers
parser = argparse.ArgumentParser()
parser.add_argument("--datasets", nargs="+", type=str, required=True)
args = parser.parse_args()
datasets = set(args.datasets)
data_files = []
if "meld" in datasets:
# data_files = ["data/meld_train.tfrecord-" + str(i) for i in range(36)]
data_files += ["data/meld_train.tfrecord"]
if "tess" in datasets:
data_files += ["data/tess_train.tfrecord-" + str(i) for i in range(2)]
if "savee" in datasets:
data_files += ["data/savee_train.tfrecord-" + str(i) for i in range(1)]
if "crema" in datasets:
data_files += ["data/crema_train.tfrecord-" + str(i) for i in range(1)]
print(data_files)
dataset = build_dataset([data_files], 1, 10, False)
iterator = dataset.make_one_shot_iterator()
next_item = iterator.get_next()
spectrogram = next_item[helpers.DATA_KEY]
mean_val = tf.reduce_mean(spectrogram)
length = tf.shape(spectrogram)[1]
cur_mean = 0
all_lens = []
i = 0
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
while True:
try:
x,l = sess.run([mean_val, length])
except tf.errors.OutOfRangeError:
break
i+=1
cur_mean += (x - cur_mean)/i
all_lens.append(l)
print("{}: {} ".format(i, cur_mean), end="\r")
print("{}: {} ".format(i, cur_mean))
_, bins, _ = plt.hist(all_lens)
print(bins)
plt.show() |
#!/usr/bin/python
import numpy
import sys
import os
from keras.models import Sequential
from keras.layers import Dense,Activation,Dropout
from keras.optimizers import RMSprop
from keras.utils import np_utils
from keras.models import load_model
out_classes=4
batch_size=128
num_digits=10
def fixx(n,fw):
t=0
if(int(n)%3==0):
#print('fizz',end='')
t=3
if(int(n)%5==0):
#print('buzz',end='')
if(t==3):
t=15
else:
t=5
if(t==0):
#print(n,end='')
fw.write(n)
else:
#print("\n",end='')
if(t==3):
fw.write('fizz\n')
if(t==5):
fw.write('buzz\n')
if(t==15):
fw.write('fizzbuzz\n')
def software1(file):
fp=open(file,"r")
fw=open("Software1.txt","w")
for n in fp:
fixx(n,fw)
print('\nSoftware1.txt generated, logic based\n')
fp.close()
fw.close()
def bin_encode(i):
return [i >> d & 1 for d in range(num_digits)]
def fizz_buzz_pred(i, pred):
return [str(i), "fizz", "buzz", "fizzbuzz"][pred.argmax()]
def fizz_buzz(i):
if i % 15 == 0: return "fizzbuzz"
elif i % 5 == 0: return "buzz"
elif i % 3 == 0: return "fizz"
else: return str(i)
def outModel(file):
model=load_model('model1')
fp=open(file,"r")
fw=open("Software2.txt","w")
errors=0
correct=0
count=0
for n in fp:
i=int(n)
count=count+1
x=bin_encode(i)
y = model.predict(numpy.array(x).reshape(-1,10))
print(fizz_buzz_pred(i,y))
fw.write(fizz_buzz_pred(i,y)+'\n')
if fizz_buzz_pred(i,y) == fizz_buzz(i):
correct = correct + 1
else:
errors = errors + 1
fp.close()
fw.close()
print("Errors :" , errors, " Correct :", correct)
print("Accuracy : ",(correct/count)*100,'%')
print('\nSoftware2.txt generated, ML based')
def software2(file):
if os.path.isfile('model1'):
print('\nModel exists')
outModel(file)
else:
print("\nModel doesn't exists")
def main():
file=sys.argv[2]
software1(file)
software2(file)
main()
|
from neo4j.v1 import GraphDatabase, basic_auth
import csv
def mergeRelation(fileName):
with open(fileName) as f:
b = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True)]
for a in b:
print a
createLeg(a)
createDirectedRelation('Platform', a['originSpot']+'_out', 'Leg', a['_id'], 'START_AT', '{weight:' + a['netFare'] + '}')
createDirectedRelation('Leg', a['_id'], 'Platform', a['destinationSpot']+'_in', 'END_AT','')
def mergeNode(fileName):
with open(fileName) as f:
b = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True)]
for a in b:
createStation(a)
createPlatform(a['_id'] + '_in','Mo')
createPlatform(a['_id'] + '_in','Tu')
createPlatform(a['_id'] + '_in','We')
createPlatform(a['_id'] + '_in','Th')
createPlatform(a['_id'] + '_in','Fr')
createPlatform(a['_id'] + '_in','Sa')
createPlatform(a['_id'] + '_in','Su')
createPlatform(a['_id'] + '_out','Mo')
createPlatform(a['_id'] + '_out','Tu')
createPlatform(a['_id'] + '_out','We')
createPlatform(a['_id'] + '_out','Th')
createPlatform(a['_id'] + '_out','Fr')
createPlatform(a['_id'] + '_out','Sa')
createPlatform(a['_id'] + '_out','Su')
createDirectedRelation('Station', a['_id'], 'Platform', a['_id']+ '_in', 'HAS_PLATFORM', '')
createDirectedRelation('Platform', a['_id']+'_in', 'Station', a['_id'], 'CAN_BOARD', '{weight: 50}')
createDirectedRelation('Station', a['_id'], 'Platform', a['_id']+ '_out', 'HAS_PLATFORM','')
createDirectedRelation('Platform', a['_id']+'_out', 'Station', a['_id'], 'CAN_BOARD', '{weight:50}')
# createDirectedRelation('Platform', a['_id']+'_in', 'Platform', a['_id']+ '_out', 'CAN_TRANSFER_TO', '{weight: 25}')
print a
def createStation(stationDict):
print "CREATE (n:Station { SpotSubType: '" + stationDict['SpotSubType'] + "', _id: '" + stationDict['_id'] + "', cityId: '" + stationDict['cityId'] + "', countryCode: '" + stationDict['countryCode'] + "', lat:'" + stationDict['lat'] + "', lon: '" + stationDict['lon'] + "', name: '" + stationDict['name'] + "' });"
session.run("CREATE (n:Station { SpotSubType: '" + stationDict['SpotSubType'] + "', _id: '" + stationDict['_id'] + "', cityId: '" + stationDict['cityId'] + "', countryCode: '" + stationDict['countryCode'] + "', lat:'" + stationDict['lat'] + "', lon: '" + stationDict['lon'] + "', name: '" + stationDict['name'] + "' });")
def createPlatform(platformID,weekDay):
print "CREATE (n:Platform { _id : '" + platformID + "', day : '" + weekDay + "' })"
session.run("CREATE (n:Platform { _id : '" + platformID + "', day : '" + weekDay + "' })");
def createDirectedRelation(labelFirst, idFirst, labelSecond, idSecond, relationLabel, attributes):
print "MATCH (n:" + labelFirst + " {_id : '"+ idFirst + "'}),(m:"+ labelSecond +" {_id: '" + idSecond + "'}) CREATE (n)-[r:" + relationLabel + " " + attributes +"]->(m) RETURN r "
session.run("MATCH (n:" + labelFirst + " {_id : '"+ idFirst + "'}),(m:"+ labelSecond +" {_id: '" + idSecond + "'}) CREATE (n)-[r:" + relationLabel + " " + attributes +"]->(m) RETURN r ")
def createLeg(legDict):
print "CREATE (n:Leg { departureTime: '" + legDict['departureTime'] + "', _id: '" + legDict['_id'] + "', arrivalTime: '" + legDict['arrivalTime'] + "', duration: '" + legDict['duration'] + "', netFare:'" + legDict['netFare'] + "', route: '" + legDict['route'] + "' });"
session.run("CREATE (n:Leg { departureTime: '" + legDict['departureTime'] + "', _id: '" + legDict['_id'] + "', arrivalTime: '" + legDict['arrivalTime'] + "', duration: '" + legDict['duration'] + "', netFare:'" + legDict['netFare'] + "', route: '" + legDict['route'] + "' });")
driver = GraphDatabase.driver("bolt://localhost:7687", auth=basic_auth("neo4j", "zephyr"))
session = driver.session()
mergeNode('nodes.csv')
mergeRelation('relations.csv')
session.close()
|
import webbrowser
class Movie():
"""This class provides a way to store movie related information."""
def __init__(self, movie_title, movie_storyline,
poster_image, trailer_youtube,
movie_imdb, movie_release_date, movie_rating):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.imdb = movie_imdb
self.release_date = movie_release_date
self.rating = movie_rating
#This method will automatically play a movie's trailer
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
|
#filename = konstruktor.py
from __future__ import print_function
#create class to define square length, width, and height
class Kotak(object):
def __init__(self, p, l, t):
self.panjang = p
self.lebar = l
self.tinggi = t
#create function to find volume
def HitungVolume(self):
return self.panjang * self.lebar* self.tinggi
#create destructor
def __del__(self):
print("Destruktor teks dipanggil")
#create function to print Data
def CetakData(self):
print("Hitung volume kotak")
print("Panjang: ", self.panjang)
print("Lebar: ", self.lebar)
print("Tinggi: ", self.tinggi)
print("Volume: ", self.HitungVolume())
def main_method():
Kotak1 = Kotak(6, 4, 3)
Kotak1.CetakData()
#menghapus data
del Kotak1
if __name__ == "__main__":
main_method() |
import wx
import ObjectListView as olv
import globals as gbl
import lib.ui_lib as uil
import lib.month_lib as ml
class TabPanel(wx.Panel):
def __init__(self, parent, model_name):
wx.Panel.__init__(self, parent)
self.SetBackgroundColour(wx.Colour(gbl.COLOR_SCHEME.pnlBg))
layout = wx.BoxSizer(wx.HORIZONTAL)
self.model_name = model_name
list_panel = self.build_list_panel(self)
detail_panel = self.build_detail_panel(self)
layout.Add(list_panel, 0, wx.EXPAND | wx.ALL, 5)
layout.Add(detail_panel, 0, wx.EXPAND | wx.ALL, 5)
self.SetSizerAndFit(layout)
def build_list_panel(self, parent):
panel = wx.Panel(
parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize
)
panel.SetBackgroundColour(wx.Colour(gbl.COLOR_SCHEME.tbBg))
layout = wx.BoxSizer(wx.VERTICAL)
tb_panel = self.build_list_toolbar_panel(panel)
lst_panel = self.build_list_list_panel(panel)
layout.Add(tb_panel, 0, wx.EXPAND | wx.ALL, 5)
layout.Add(lst_panel, 0, wx.EXPAND | wx.ALL, 5)
panel.SetSizerAndFit(layout)
return panel
def build_list_toolbar_panel(self, parent):
panel = wx.Panel(
parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize
)
panel.SetBackgroundColour(wx.Colour(gbl.COLOR_SCHEME.tbBg))
layout = wx.BoxSizer(wx.HORIZONTAL)
name_fltr_lbl = uil.get_toolbar_label(panel, 'Name:')
name_fltr_lbl.SetForegroundColour(wx.Colour(gbl.COLOR_SCHEME.tbFg))
layout.Add(name_fltr_lbl, 0, wx.ALL, 5)
self.name_fltr_ctrl = wx.SearchCtrl(panel, wx.ID_ANY, '',
style=wx.TE_PROCESS_ENTER, name='name_fltr_ctrl')
self.name_fltr_ctrl.ShowCancelButton(True)
layout.Add(self.name_fltr_ctrl, 0, wx.ALL, 5)
notes_fltr_lbl = uil.get_toolbar_label(panel, 'Notes')
notes_fltr_lbl.SetForegroundColour(wx.Colour(gbl.COLOR_SCHEME.tbFg))
layout.Add(notes_fltr_lbl, 0, wx.ALL, 5)
self.notes_fltr_ctrl = wx.SearchCtrl(panel, wx.ID_ANY,
style=wx.TE_PROCESS_ENTER, name='notes_fltr_ctrl')
self.notes_fltr_ctrl.ShowCancelButton(True)
layout.Add(self.notes_fltr_ctrl, 0, wx.ALL, 5)
self.active_btn = uil.toolbar_button(panel, 'ALL')
self.active_btn.set_size(wx.Size(70, -1))
layout.Add(self.active_btn, 0, wx.ALL, 0)
self.help_btn = uil.get_help_btn(panel)
layout.Add(self.help_btn, 0, wx.ALL, 5)
panel.SetSizerAndFit(layout)
return panel
def build_list_list_panel(self, parent):
panel = wx.Panel(parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize)
panel.SetBackgroundColour(gbl.COLOR_SCHEME.lstBg)
layout = wx.BoxSizer(wx.HORIZONTAL)
flags = wx.LC_REPORT | wx.SUNKEN_BORDER | wx.LC_SINGLE_SEL
self.list_ctrl = olv.ObjectListView(panel, wx.ID_ANY,
size=wx.Size(-1, 600),
style=flags)
self.set_columns(self.list_ctrl)
self.list_ctrl.SetBackgroundColour(gbl.COLOR_SCHEME.lstHdr)
layout.Add(self.list_ctrl, 0, wx.ALL | wx.EXPAND, 5)
panel.SetSizer(layout)
return panel
def set_columns(self, listCtrl):
raise NotImplementedError("Please Implement this method")
def build_detail_panel(self, parent):
panel = wx.Panel(
parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize
)
panel.SetBackgroundColour(wx.Colour(gbl.COLOR_SCHEME.tbBg))
layout = wx.BoxSizer(wx.VERTICAL)
tb_panel = self.build_detail_toolbar_panel(panel)
self.fm_panel = self.build_detail_form_panel(panel)
self.asn_panel = self.build_asn_panel(panel)
layout.Add(tb_panel, 0, wx.EXPAND | wx.ALL, 5)
layout.Add(self.fm_panel, 0, wx.EXPAND | wx.ALL, 5)
layout.Add(self.asn_panel, 0, wx.EXPAND | wx.ALL, 5)
panel.SetSizerAndFit(layout)
return panel
def build_detail_toolbar_panel(self, parent):
panel = wx.Panel(
parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize
)
panel.SetBackgroundColour(wx.Colour(gbl.COLOR_SCHEME.tbBg))
layout = wx.BoxSizer(wx.HORIZONTAL)
self.clear_btn = uil.toolbar_button(panel, 'Clear Form')
self.save_btn = uil.toolbar_button(panel, 'Update ' + self.model_name)
self.drop_btn = uil.toolbar_button(panel, 'Drop ' + self.model_name)
self.drop_btn.set_size((150, -1))
layout.Add(self.clear_btn, 0, wx.ALL, 5)
layout.Add(self.drop_btn, 0, wx.ALL, 5)
layout.Add(self.save_btn, 0, wx.ALL, 5)
panel.SetSizer(layout)
return panel
def build_detail_form_panel(self, parent):
panel = wx.Panel(
parent, wx.ID_ANY, wx.DefaultPosition, size=(-1, 300)
)
panel.SetBackgroundColour(wx.Colour(gbl.COLOR_SCHEME.frmBg))
panel.SetForegroundColour('black')
layout = wx.BoxSizer(wx.VERTICAL)
self.add_model_layout(panel, layout)
notes_layout = wx.BoxSizer(wx.VERTICAL)
notes_lbl = wx.StaticText(panel, wx.ID_ANY, 'Notes:')
self.notes_ctrl = wx.TextCtrl(panel, wx.ID_ANY,
style=wx.TE_MULTILINE, size=(500, 100))
notes_layout.Add(notes_lbl, 0, wx.ALL, 5)
notes_layout.Add(self.notes_ctrl, 0, wx.ALL, 5)
layout.Add(notes_layout, 0, wx.ALL, 5)
panel.SetSizer(layout)
return panel
def add_model_layout(self, panel, layout):
raise NotImplementedError("Please Implement this method")
def build_asn_panel(self, parent):
panel = wx.Panel(
parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize
)
panel.SetBackgroundColour(wx.Colour(gbl.COLOR_SCHEME.tbBg))
layout = wx.BoxSizer(wx.VERTICAL)
tb_panel = self.build_asn_list_toolbar_panel(panel)
asn_list_panel = self.build_asn_list_list_panel(panel)
layout.Add(tb_panel, 0, wx.EXPAND | wx.ALL, 5)
layout.Add(asn_list_panel, 0, wx.EXPAND | wx.ALL, 5)
panel.SetSizerAndFit(layout)
return panel
def build_asn_list_toolbar_panel(self, parent):
panel = wx.Panel(
parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize
)
panel.SetBackgroundColour(gbl.COLOR_SCHEME.tbBg)
layout = wx.BoxSizer(wx.HORIZONTAL)
self.add_asn_btn = uil.toolbar_button(panel, 'Add Assignment')
layout.Add(self.add_asn_btn, 0, wx.ALL, 5)
self.drop_asn_btn = uil.toolbar_button(panel, 'Drop Assignments')
layout.Add(self.drop_asn_btn, 0, wx.ALL, 5)
panel.SetSizer(layout)
return panel
def build_asn_list_list_panel(self, parent):
panel = wx.Panel(parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize)
panel.SetBackgroundColour(gbl.COLOR_SCHEME.lstBg)
layout = wx.BoxSizer(wx.HORIZONTAL)
self.asn_list_ctrl = olv.ObjectListView(panel, wx.ID_ANY,
size=wx.Size(-1, 375),
style=wx.LC_REPORT | wx.SUNKEN_BORDER)
self.asn_list_ctrl.SetColumns([
self.get_owner_column(),
olv.ColumnDefn('From', 'left', 105, 'frum',
stringConverter=ml.prettify,
),
olv.ColumnDefn('Thru', 'left', 100, 'thru',
stringConverter=ml.prettify
),
olv.ColumnDefn('Effort', 'left', 100, 'effort'),
])
layout.Add(self.asn_list_ctrl, 1, wx.ALL | wx.EXPAND, 5)
panel.SetSizer(layout)
return panel
def get_owner_column(self):
raise NotImplementedError("Please Implement this method")
def set_list(self, model):
self.Freeze()
self.list_ctrl.SetObjects(model)
self.Thaw()
def set_name(self, value):
if not value:
value = ''
self.name_ctrl.SetValue(value)
def get_name(self):
value = self.name_ctrl.GetValue()
return value if value else None
def set_notes(self, value):
if not value:
value = ''
self.notes_ctrl.SetValue(value)
def get_notes(self):
notes = self.notes_ctrl.GetValue()
return notes if notes else None
def set_selected_idx(self, idx):
self.selected_idx = idx
def get_selected_idx(self):
return self.list_ctrl.GetNextSelected(-1)
def set_selection(self, idx):
self.list_ctrl.Select(idx, on=1)
self.list_ctrl.EnsureVisible(idx)
def get_selection(self):
return self.list_ctrl.GetSelectedObject()
def clear_selection(self):
self.list_ctrl.Select(self.get_selected_idx(), on=False)
def set_save_button_label(self, value):
self.save_btn.set_label(value)
def get_save_button_label(self):
return self.save_btn.get_label()
def set_active_button_label(self, value):
self.active_btn.set_label(value)
def get_active_button_label(self):
return self.active_btn.label
def get_asn_selections(self):
return self.asn_list_ctrl.GetSelectedObjects()
def set_asn_list(self, asns):
if gbl.dataset.get_active_only():
asns = [asn for asn in asns if asn.active]
self.Freeze()
self.asn_list_ctrl.SetObjects(asns)
self.Thaw()
def get_selected_asns(self):
return self.asn_list_ctrl.GetSelectedObjects()
def set_details_active(self, active, model):
if active:
self.drop_btn.set_label('Drop %s'% model)
self.fm_panel.Enable()
self.asn_panel.Enable()
self.asn_list_ctrl.SetTextColour('black')
self.clear_btn.Enable()
self.save_btn.Enable()
else:
self.drop_btn.set_label('Undrop %s'% model)
self.fm_panel.Disable()
self.asn_panel.Disable()
self.asn_list_ctrl.SetTextColour('gray')
self.clear_btn.Disable()
self.save_btn.Disable()
def clear_details(self):
uil.clear_panel(self.fm_panel)
uil.clear_panel(self.asn_panel.Children[1])
|
d = int (input ("Introduce un dia"))
m = int (input("introduce mes"))
a = int (input("introduce el año"))
if (m>12 or m<1 or d>31 or d<1 or a<=0):
print ("Fecha incorrecta")
elif (m==2 and d>28 ):
print ("Fecha incorrecta ")
elif (d>31):
print ("Fecha incorrecta")
elif (m==4 and d>30):
print ("Fecha incorrecta")
elif (m==6 and d>30):
print("Fecha incorrecta")
elif (m==9 and d>30):
print ("Fecha incorrecta")
elif (m==9 and d>30):
print("Fecha incorrecta")
else:
print ("Fecha correcta")
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import ElementNotInteractableException, NoSuchElementException, StaleElementReferenceException
from msedge.selenium_tools import Edge, EdgeOptions
from random import randint, randrange
import time
import random
URL = 'https://www.amazon.com/DP/B08WM28PVH'
WAIT_TIME = 5
PRICE_LIMIT = 700.00
class Shopping:
def __init__(self, mail, password):
self.mail = mail
self.password = password
options = EdgeOptions()
options.use_chromium = True
options.add_argument('-inprivate')
self.driver = Edge(
executable_path='msedgedriver.exe', options=options)
def logIn(self):
driver = self.driver
mail_elem = driver.find_element_by_xpath("//input[@name='email']")
mail_elem.clear()
mail_elem.send_keys(self.mail)
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
mail_elem.send_keys(Keys.RETURN)
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
password_elem = driver.find_element_by_xpath(
"//input[@name='password']")
password_elem.clear()
password_elem.send_keys(self.password)
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
password_elem.send_keys(Keys.RETURN)
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
def findProduct(self):
driver = self.driver
driver.get(URL)
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
isAvailable = self.productStatus()
if isAvailable == 'See Similar Items':
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
self.findProduct()
elif isAvailable <= PRICE_LIMIT:
buy_now = driver.find_element_by_name('submit.buy-now')
buy_now.click()
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
self.logIn()
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
# Deliver This Adress
deliver_noww = driver.find_element_by_class_name('a-declarative')
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
deliver_noww.click()
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
place_order_text = driver.find_element_by_name(
'placeYourOrder1').text
place_order = driver.find_element_by_name('placeYourOrder1')
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
print(f'***** ORDERED: {place_order_text}')
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
place_order.click()
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
else:
time.sleep(randint(int(WAIT_TIME/2), WAIT_TIME))
self.findProduct()
def productStatus(self):
driver = self.driver
available = driver.find_element_by_class_name('a-button-text').text
if available == 'See Similar Items':
print(f'***** ESTADO - STATUS: {available}')
return available
else:
print(f'***** PRECIO - PRICE: {available}')
return float(available[6:])
def closeBrowser(self):
""" Closes browser """
self.driver.close()
if __name__ == '__main__':
shopBot = Shopping(mail="yourmail@mail.com",
password="your_password")
shopBot.findProduct()
shopBot.closeBrowser() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.