text stringlengths 38 1.54M |
|---|
from environs import Env
import superjob as sj
import headhunter as hh
from table import generate_language_table, prepare_language_table
def main():
languages = (
"JavaScript",
"Python",
"Java",
"TypeScript",
"C#",
"PHP",
"C++",
"C",
"Ruby",
)
env = Env()
env.read_env()
api_key = env.str("SUPERJOB_API_KEY")
hh_statistic = hh.collect_languages_statistic(languages)
sj_statistic = sj.collect_languages_statistic(api_key, languages)
hh_table = generate_language_table(
prepare_language_table(hh_statistic), "HeadHunter ะะพัะบะฒะฐ"
)
sj_table = generate_language_table(
prepare_language_table(sj_statistic), "SuperJob ะะพัะบะฒะฐ"
)
print(hh_table)
print(sj_table)
if __name__ == "__main__":
main()
|
# Generated by Django 3.0.3 on 2020-02-24 02:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Articulo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(blank=True, max_length=50)),
('contenido', models.TextField(max_length=1000)),
('fechaPublicacion', models.DateField(auto_now_add=True)),
],
),
]
|
#coding:utf8
import json
from app.share.constants import *
from dispatcher import GameServiceHandle
from firefly.server.globalobject import masterServiceHandle, GlobalObject
@GameServiceHandle(COMMAND_TEST)
def testMethod(dynamicId, request):
'''
for test purpose, always return E_OK and a message in data
'''
# get message sent by client
data = json.loads(request)
msg = data[KEY_MESSAGE]
# return response
reply = 'I got your message: %s' % msg
response = {}
response[KEY_ERRNO] = E_OK
response[KEY_DATA] = { KEY_MESSAGE : reply }
return json.dumps(response) |
import sqlite3
conn = sqlite3.connect('6.db')
c = conn.cursor()
#c.execute("""CREATE TABLE name_and_age(
# first text,
# age integer
#
#
# )""")
#c.execute("INSERT INTO name_and_age VALUES ('Sharon','13')")
c.execute("SELECT * FROM name_and_age WHERE first='Sharon'")
print(c.fetchall())
conn.commit()
conn.close()
|
# run.py
# from the app package __init__.py
from app import create_app, db
# from app.auth.models import User
# My home page is at blog component
if __name__ == '__main__':
flask_app = create_app('dev')
with flask_app.app_context():
db.create_all()
flask_app.run() |
import sys
import numpy as np
import random
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# This program takes in input files of diffrate/time and the integrated rate/energy threshold
# and samples multiple neutrino start times, which are saved in a histogram.
# Read names of output file and input file from command line
if len(sys.argv) != 3:
print "Wrong number of arguments."
print "Usage: " + sys.argv[0] + " <(diffrate) input file> <(energy threshold) input file>"
quit()
else:
infileName1 = sys.argv[1]
infileName2 = sys.argv[2]
# Open input file for reading
infile1 = open(infileName1, "r")
infile2 = open(infileName2, "r")
# Initialise arrays
time_val_list = []
diffrate_list = []
intrate_list = []
enthreshold_list = []
# Scaling factors for total events
energy_threshold = 0. # Energy threshold of the detector in keV
detector_mass = 7. # Detector mass in tonnes (7 for LZ)
SN_dist_unit = 3.0857e+20 # 10kpc in m - all readings are relative to this distance and scale as 1/r^2
SN_dist_actual = 3.0857e+20 # 1kpc in m is 3.0857e+19
SN_dist_event_scaling = 1./(SN_dist_actual/SN_dist_unit)**2 # Scaling factor for events by distance SN is from earth
# Fill arrays
for line in infile1:
tokens = line.split()
time_val_list.append(float(tokens[0]))
diffrate_list.append(float(tokens[1]))
for line in infile2:
tokens = line.split(",")
enthreshold_list.append(float(tokens[0]))
intrate_list.append(float(tokens[1]))
# Close files
infile1.close()
infile2.close()
# Convert lists to numpy arrays
time_val_list = np.array(time_val_list)
diffrate_list = np.array(diffrate_list)
intrate_list = np.array(intrate_list)
enthreshold_list = np.array(enthreshold_list)
# Fit threshold data with an exponential
def exp_func(x, a, b, c):
return a * np.exp(-b * x) + c
popt, pcov = curve_fit(exp_func, enthreshold_list, intrate_list)
total_events = np.sum(diffrate_list)/1000. # Total integral of the file
# Scale by SN distance, detector mass and the threshold energy of the detector
# Scaling by mass is done by multiplying by the mass in tonnes of the detector
# The total events scale off as 1/r^2, where r is the SN distance
# The scaling by energy threshold is done by finding the value of the fitted function at the energy
# threshold of the detector and scaling by this value over the value of the function at 0 energy threshold
total_events_scaled = SN_dist_event_scaling*detector_mass*exp_func(energy_threshold, *popt)
print "Total integral of file = %f" %total_events
print "Total number of events = %d" %int(total_events_scaled)
start_time_samples = [] # Array to store all start times
num_start_time = 1000 # Number of start times to add to histogram
num_events = 1000 #int(total_events_scaled)
outputfile_name = 'histstarttime_' + str(num_events) + 'events_' + str(num_start_time) + 'ent.png'
# Running integral (over full 10s range) array - to save computation in the loop
running_integral_10s = np.empty(len(diffrate_list))
integral_sofar = 0.
for m in range(len(diffrate_list)):
integral_sofar += diffrate_list[m]/1000.
running_integral_10s[m] = integral_sofar
for k in range(num_start_time):
start_time = 20. # Set start time to large number for the purpose of calculating a minimum
for i in range(num_events):
random_sample = random.random()*total_events
for j in range(len(time_val_list)):
if running_integral_10s[j] >= random_sample:
# Interpolate between the interval endpoints
# Current integral is the integral up to the start of the interval
# tvals and dr_interp are the interpolated data points over the 1ms interval
if j == 0:
tvals = np.linspace(0., time_val_list[j], 101)
dr_interp = np.interp(tvals, [0., time_val_list[j]],
[diffrate_list[j], diffrate_list[j]])
current_integral = 0.
else:
tvals = np.linspace(time_val_list[j - 1], time_val_list[j], 101)
dr_interp = np.interp(tvals, [time_val_list[j - 1], time_val_list[j]],
[diffrate_list[j - 1], diffrate_list[j]])
current_integral = running_integral_10s[j - 1]
# Run through integral and find the time value of the random sample to a precision of 0.01ms
for n in range(1,len(tvals)):
if current_integral >= random_sample:
first_encounter = tvals[n]
break
current_integral += dr_interp[n] / 100000.
break
if first_encounter < start_time:
start_time = first_encounter
print "%d: Start time = %fms" % (k+1, start_time * 1000)
start_time_samples.append(start_time)
start_time_samples = np.array(start_time_samples)
"""
plt.plot(enthreshold_list, intrate_list, 'b-', label='data')
plt.plot(enthreshold_list, exp_func(enthreshold_list, *popt), 'r-', label='fit')
plt.xlabel('Energy threshold')
plt.ylabel('Integrated rate')
plt.axis([0, 10, 0, 30])
plt.legend()
plt.savefig('thresholdfit.png', bbox_inches='tight')
plt.show()
n1, bins1, patches1 = plt.hist(time_samples, bins = 100, range = (0,10), normed = True)
plt.xlabel('Time')
plt.ylabel('Differential rate')
plt.title('Histogram of Differential rate')
plt.axis([0, 10, 0, 1])
plt.savefig('histdiffrate.png', bbox_inches='tight')
plt.show()
"""
n2, bins2, patches2 = plt.hist(start_time_samples, bins = 1000, range = (0,0.1))
plt.xlabel('Start time')
plt.ylabel('Num of Occurences')
plt.title('Histogram of Start times')
plt.axis([0, 0.1, 0, num_start_time])
plt.savefig(outputfile_name, bbox_inches='tight')
|
personInfo = {
"name": "์ด์ฒ ์",
"birth": "1990๋
3์ 18์ผ",
"address": "๋ถ์ฐ ๋ถ์ฐ์ง๊ตฌ ์ค์๋๋ก 668",
"tel": "010-1234-5678"
}
name = personInfo["name"]
print(name) |
m1 = int(input().split()[0])
m2 = int(input().split()[0])
if m2-m1==1:
print("1")
else:
print("0") |
# -*- coding: utf-8 -*-
__author__ = 'Chunyou<snowtigersoft@126.com>'
import os
import codecs
import json
from datetime import datetime
from .logger import Logger
class FileLogger(Logger):
"""
A file based CTP logger.
"""
def __init__(self, folder, append=True, in_memory_records=1000, datetime_format='%Y-%m-%d %H:%M:%S',
datetime_adapter=None):
"""
Initialize current logger.
:param folder: string. folder of the log files. The folder will have the following sub-folders:
(1) market, this folder stores depth market data from the marketing api.
(2) trading, this folder stores all the other kinds of data.
In each folder, the should be files yyyy-mm-dd.txt for each day.
:param in_memory_records, int, the maximal number of log records to keep in memory
before flushing them into the log file.
:param append: boolean, if True and if a log file exists, append to that file;
otherwise empty the existing log file.
:param datetime_format: string, datetime format in the log file.
:param datetime_adapter: function pointer from datetime to datetime, used to adapt time in different time zones.
"""
Logger.__init__(self)
self.append = append
self.datetime_format = datetime_format
self.market_folder = folder + os.path.sep + 'market'
self._mkdir(self.market_folder)
self.trading_folder = folder + os.path.sep + 'trading'
self._mkdir(self.trading_folder)
self.in_memory_records = in_memory_records
self.datetime_adapter = datetime_adapter
@classmethod
def _mkdir(cls, newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
cls._mkdir(head)
if tail:
os.mkdir(newdir)
def flush_impl(self):
filename = datetime.now().strftime('%Y-%m-%d.txt')
# flush trading data
handle = codecs.open(self.trading_folder + os.path.sep + filename, 'a' if self.append else 'w', 'utf-8')
trading_data, self.trading_data = self.trading_data, []
for item in trading_data:
handle.write("%s\t%s\t%s\n" % (
item[0].strftime(self.datetime_format), item[1], json.dumps(item[2], ensure_ascii=False)))
handle.close()
# flush market data
handle = codecs.open(self.market_folder + os.path.sep + filename, 'a' if self.append else 'w', 'utf-8')
market_data, self.market_data = self.market_data, []
for item in market_data:
handle.write("%s\t%s\t%s\n" % (
item[0].strftime(self.datetime_format), item[1], json.dumps(item[2], ensure_ascii=False)))
handle.close() |
from pprint import pprint
from apps.chrome_driver import ChromeDriver
from apps.read_excel import ReadExcel
from apps.write_excel import WriteExcel
sample_read_excel_path = 'assets/sample.xlsx'
sample_read_excel_path = 'assets/20200807_TN_๋๋น์คํค๋ง_sample.xlsx'
sample_write_excel_path = 'assets/result.xlsx'
if __name__ == '__main__':
reader = ReadExcel()
read_result = reader.read(path = sample_read_excel_path)
pprint(read_result)
chrome = ChromeDriver()
translate_result = chrome.run(translate_data = read_result)
pprint(translate_result)
writer = WriteExcel()
writer.write(write_data = translate_result, path = sample_write_excel_path)
|
from mpf.core.custom_code import CustomCode
import random
class Mystery(CustomCode):
def on_load(self):
self.info_log('Enabling')
self.machine.events.add_handler('cmd_get_mystery', self.get_mystery)
self.machine.events.add_handler('cmd_mystery_award_chainsaw_letter', self.on_award_chainsaw_letter)
self.machine.events.add_handler('cmd_mystery_light_lock', self.on_light_lock)
self.machine.events.add_handler('cmd_mystery_franklin_letter', self.on_franklin_letter)
#self.machine.events.add_handler('s_left_flipper_active', self.flipper_test)
def get_mystery(self, **kwargs):
del kwargs
self.info_log('Getting Mystery')
choices = self.fetch_choices()
self.trace("choices")
self.info_log(choices)
choice = random.choice(choices)
self.machine.game.player["v_current_mystery_value"] = choice
self.save_acquired_mysteries(choice)
self.trace("choice")
self.info_log(choice)
self.machine.events.post('cmd_mystery_choice', choice=choice)
def fetch_choices(self):
choices = []
frequenies = {
"small_points": 20,
"add_bonus_multiplier": 20,
"award_chain_saw_letter": 20,
"light_lock": 20,
"award_franklin_letter": 20,
"2_x_playfield": 10,
"30_second_ball_save": 10,
"big_points": 10,
"jack_shit": 10,
"award_tilt_warning": 5,
"light_extra_ball": 1
# "save_from_the_grave": 10, # on hold
# "collect_bonus": 10, # on hold
}
for choice, number in frequenies.items():
for _ in range(number):
choices.append(choice)
return self.filter_acquired_choices(choices)
def filter_acquired_choices(self, choices):
checked = choices.copy()
for acquired_choice in self.get_player_acquired_mysteries():
checked = list(filter((acquired_choice).__ne__, checked))
checked = self.filter_choice_by_state(checked)
if checked == []:
collected_mysteries = self.get_player_acquired_mysteries()
self.machine.game.player["collected_mysteries"] = []
return self.filter_choice_by_state(choices)
return checked
def filter_choice_by_state(self, checked):
rejects = []
rejections = {
"award_chain_saw_letter": self.should_reject_saw_letter,
"light_lock": self.should_reject_light_lock,
"award_franklin_letter": self.should_reject_franklin_letter,
"2_x_playfield": self.should_reject_2_x_playfield,
"light_extra_ball": self.should_reject_light_extra_ball,
"franklin_frenzy": self.should_reject_franklin_frenzy
}
for choice, reject_func in rejections.items():
if reject_func():
rejects.append(choice)
for reject in rejects:
checked = list(filter((reject).__ne__, checked))
return checked
def save_acquired_mysteries(self, mystery):
if self.ensure_player_acquired_mysteries():
collected_mysteries = self.get_player_acquired_mysteries()
collected_mysteries.append(mystery)
unique_choices = self.uniqify(collected_mysteries)
collected_mysteries = unique_choices
def ensure_player_acquired_mysteries(self):
player = self.machine.game.player
if not player:
return False
if player["collected_mysteries"] != 0:
return True
player["collected_mysteries"] = []
return True
def get_player_acquired_mysteries(self):
player = self.machine.game.player
if self.ensure_player_acquired_mysteries():
return player["collected_mysteries"]
return []
def trace(self, str):
padding = 25 * "*"
self.info_log(padding + " " + str + " " + padding)
def uniqify(self, seq):
checked = []
for e in seq:
if e not in checked:
checked.append(e)
return checked
##################################################
# Choices Logic
##################################################
# Chainsaw Letter
def flipper_test(self, **kwargs):
self.info_log(self.machine.shots['left_franklin_shot'].state)
self.info_log(self.machine.shots['left_franklin_shot'].state_name)
self.info_log(self.machine.shots['right_franklin_shot'].state)
self.info_log(self.machine.shots['right_franklin_shot'].state_name)
def should_reject_saw_letter(self):
if not self.is_mode_active("chainsaw"):
return True
return self.current_available_chainsaw_letters() == []
def current_available_chainsaw_letters(self):
letters = ["c", "h", "a", "i", "n", "s", "a2", "w"]
letters_copy = self.chainsaw_letters().copy()
collected_letters_copy = self.collected_chainsaw_letters().copy()
result = list(set(letters_copy) - set(collected_letters_copy))
result.sort(key=self.chainsaw_letter_index)
return result
def collected_chainsaw_letters(self):
letters_copy = self.chainsaw_letters().copy()
collected_letters = []
for letter in letters_copy:
if self.machine.game.player[f'v_chainsaw_{letter}_collected'] == 1:
collected_letters.append(letter)
return collected_letters
def chainsaw_letters(self):
return ["c", "h", "a", "i", "n", "s", "a2", "w"]
def chainsaw_letter_index(self, letter):
return self.chainsaw_letters().index(letter)
def on_award_chainsaw_letter(self, **kwargs):
letters = self.current_available_chainsaw_letters()
letter = letters[0]
if ['s', 'a2', 'w'].count(letter) == 0:
self.machine.shots[f'chain_{letter[0]}_shot'].hit()
else:
self.machine.shots[f'saw_{letter[0]}_shot'].hit()
# escape lock
def should_reject_light_lock(self):
if not self.is_mode_active("escape"):
return True
safe_states = ["start", "lock_1_locked", "lock_2_locked"]
current_state = self.machine.game.player["v_escape_state"]
for i in safe_states:
if(i == current_state) :
return False
return True
def on_light_lock(self, **kwargs):
self.machine.events.post('cmd_mystery_show_complete')
self.machine.events.post('cmd_mystery_meat_award_light_lock')
# Franklin
def should_reject_franklin_letter(self):
if not self.is_mode_active("franklin"):
return True
(self.machine.shots['left_franklin_shot'].state + self.machine.shots['right_franklin_shot'].state) < 7
def on_franklin_letter(self, **kwargs):
if self.machine.shots['left_franklin_shot'].state < 4:
self.machine.shots['left_franklin_shot'].hit()
else:
self.machine.shots['right_franklin_shot'].hit()
# 2 x playfield
# TODO: This needs to be updated
def should_reject_2_x_playfield(self):
if not self.is_mode_active("tt_playfield"):
return True
if self.machine.game.player["v_ttp_enabled"] == 1:
return True
if self.machine.game.player["v_tt_playfield_counter_count"] > 2:
return True
return False
def should_reject_light_extra_ball(self):
return False
def should_reject_franklin_frenzy(self):
return False
def is_mode_active(self, mode_name):
return self.machine.mode_controller.is_active(mode_name)
|
import ics.utils.sps.lamps.controllers.digitalLoggers as digitalLoggers
import ics.utils.sps.lamps.controllers.aten as aten |
from wtforms import Form, StringField, PasswordField, validators, IntegerField, SelectField
from wtforms.widgets import TextArea
from wtforms.fields import TextAreaField
class PlaceSelectionForm(Form):
row = IntegerField('Row', [validators.number_range(0)])
column = IntegerField('Column', [validators.number_range(0)]) |
%% Test_hybridRocketThrustCalc.m
%
% Based on the driver script for AOE 4984: Booster Design, Assignment 4.
% That script is the implimentation of Example 16.4 from the textbook
% "Rocket Propulsion Elements 8th Edition", by Oscar Biblarz George P.
% Sutton. It models the performance of a hybrid rocket.
%
% This script tests the hybridRocketThrustCalc.m function using data from
% Sutton. This is to ensure that Thrust and Specific Impulse are computed
% correctly. The values given and computed were compared and agree with the
% values in Sutton in the Booster Design homework as of 2019-04-22.
%
% Example 16-1
% Suppose that the operating characteristics of a Space Shuttle-class
% hybrid rocket booster are to be determined, given the following initial
% design requirements.
%
% Unlike Sutton, I use functions from numerical methods to compute the
% values instead of looking things up from tables. This allows me to have a
% much more flexible design.
%
% @author: Matt Marti
% @date: 2019-04-25
clear
%% Given
% Constants
g0 = 32.174; % [ft/s/s] Gravity acceleration
% Numeric design choices
Fv = 3.1e6; % [lbf] Required Initial Thrust (vacuum)
tburn = 120; % [s] Burn Time
Douter = 150; % [in] Fuel Grain Outside Diameter
P1 = 700; % [psia] Initial Chamber Pressure
r = 2; % Initial Mixture Ratio
A2oAt = 7.72; % Initial Expansion Ratio
% Miscellaneous parameters
Nports_circ = 7; % Number of channels in fuel - circular array
cstar_efficiency = 0.95;
% Fuel: HTPB
rhof = 0.033; % [lbm/in^3] Fuel density
aHTPB = 0.104;
nHTPB = 0.681;
rHTPBdot_fun = @(G0) aHTPB*G0^nHTPB; % [in/s] HTPB regression rate
% Oxidizer: Liquid Oxygen
% Table 16-2
rvec = (1:.2:3)'; % Mass Mixture Ratio
cstarvec = [
4825;
5180;
5543;
5767;
5882;
5912;
5885;
5831;
5768;
5703;
5639]; % [ft/s] Characteristic velocity
kvec = [
1.308;
1.282;
1.239;
1.201;
1.171;
1.152;
1.143;
1.138;
1.135;
1.133;
1.132]; % [no units]
%% Determine Mass Flow Rates
% fprintf('--- Not dependent on fuel grain geometry ---\n');
% Determine characteristic velocity and specific heats using cubic spline
cstar_theory = cubicspline(rvec, cstarvec, r); % [ft/s]
k = cubicspline(rvec, kvec, r); % [ft/s]
% Determine pressure ratio from Expansion Ratio
A2oAt_fun = @(M,k) 1./M.*(2*(1+0.5*(k-1)*M.^2)/(k+1)).^(0.5*(k+1)/(k-1));
Mach_error_fun = @(M) log(A2oAt_fun(M,k)) - log(A2oAt); % Error function
M2 = secantrootsolve( Mach_error_fun, 1, 5); % Exit Mach No.
P0oP = @(M,k) (1+0.5*(k-1)*M.^2).^(k/(k-1)); % Stagnation Pressure P0 ~= P1
% Remember: People Order Our Patties!
% fprintf('Flow Exit Mach Number (M2): %.2f\n', M2);
% Determine Thrust coefficient
P2 = P1 / P0oP(M2,k); % Exit pressure
P3 = 0; % Ambient pressure (vacuum)
P2oP1 = P2/P1;
Cf_P3 = sqrt(((2*k^2)/(k-1))*(2/(k+1))^((k+1)/(k-1))*(1-(P2oP1)^((k-1)/k))) ...
+ (P2-P3)/P1*A2oAt; % Thrust coefficient
% fprintf('Vacuum thrust coeff (Cfv): %.3f\n', Cf_P3);
% Determine initial nozzle area
At = Fv / (Cf_P3*P1); % [in^2] Nozzle throat Area
Dt = 2*sqrt(At/pi); % [in] Nozzle throat Diameter
% fprintf('Throat Area (At): %.2f [in^2]\n', At);
% fprintf('Throat Diameter (Dt): %.2f [in]\n', Dt);
% Mass flow rate
cstar = cstar_efficiency * cstar_theory; % [ft/s]
mdot = g0 * P1 * At / cstar;
% fprintf('Mass flow rate (mdot): %.2f [lbm/s]\n', mdot);
% Mass values
mfdot = mdot / (r+1); % [lbm/s] Fuel mass rate
modot = mdot - mfdot; % [lbm/s] Oxidizer mass rate
% fprintf('Fuel rate (mfdot): %.2f [lbm/f]\n', mfdot)
% fprintf('Oxidizer rate (modot): %.2f [lbm/f]\n', modot)
%% Determine fuel and oxidizer mass requirement for circular port array
% Note that the next section assumes (and is only valid for) a geometry in
% which all the fuel grains are circles centered around a center circle.
% fprintf('\n--- Cicular cross section parameters ---\n');
N = Nports_circ;
piN = pi*N;
a = aHTPB;
n = nHTPB;
% Determine Ri and db (initial port radius and fuel grain width)
Rtf_constraint = 0.5*Douter/3; % Constraint for fuel grain diameter
Rtf_fun = @(Ri, a, n, t) ...
(a*(2*n+1)*((modot/piN)^n)*t + Ri^(2*n+1))^(1/(2*n+1)); % Eq 16-13: Solution to the fuel regression rate ODE
Ri_error_fun = @(Ri) Rtf_fun(Ri, aHTPB, nHTPB, tburn) - Rtf_constraint; % Error function for port radius
Ri = secantrootsolve( Ri_error_fun, 0, 0.5*Douter); % Combustion Port radius
db = 0.5*Douter/3 - Ri; % Fuel burn distance (grain width)
% fprintf('Port radius (Ri): %.2f [in]\n', Ri);
% fprintf('Grain width (db): %.2f [in]\n', db);
% Determine length of fuel
G0 = modot / (piN*Ri^2); % [lbm/in^2/s] Oxidizer Mass Velocity
rdot = rHTPBdot_fun(G0);
L = (mfdot/N) / (2*pi*Ri*rhof*rdot); % [in] fuel length
% fprintf('Oxidizer mass velocity (G0): %.2f [lbm/in^2/s]\n', G0);
% fprintf('Fuel regression rate (rdot): %.3f [in/s]\n', rdot);
% fprintf('Fuel grain length (L): %.1f [in]\n', L);
% Determine required fuel and oxidizer mass
crossSectionArea_fuel = ( pi*(Ri+db)^2 - pi*Ri^2 );
mf = N * L * crossSectionArea_fuel * rhof;
mo = modot*tburn;
% fprintf('Fuel Mass (mf): %.0f [lbm]\n', mf);
% fprintf('Oxidizer Mass (mo): %.0f [lbm]\n', mo);
% fprintf('Total propellant mass (m): %.0f [lbm]\n', mo+mf);
% --- Determine end specific impulse for circular cross section ---
% Determine Specific impluse
Isp = Cf_P3*cstar / g0;
% fprintf('Specific Impulse (Isp): %.2f [s]\n', Isp);
%% Run function to test values obtained
% Area calculations
A2 = A2oAt * At; % [in^2] Nozzle Exit Area
Ap = N * pi * Ri^2; % [in^2] Port area
Ab = N*pi*2*Ri*L; % [in^2] Exposed fuel surface area
% Fuel reguression characteristics
regresscoeff = [aHTPB; nHTPB];
% Cstar efficiency
eta = cstar_efficiency;
% Unit Conversion parameters
lbm2kg = 0.453592; % Pounds to kilograms
in2m = 0.0254; % Inches to meters
ft2in = 12; % Feet to inches
lb2N = 4.44822; % Pounds to Newtons
psia2Pa = 6894.76; % Pounds per square inch to Pa
ft2m = 0.3048;
% Compute Unit conversions
g0_si = g0*ft2m;
rdot_si = rdot * in2m;
rhof_si = rhof * lbm2kg / in2m / in2m / in2m;
a_si = 3.045015375e-5; % a in SI units
n_si = 0.680825577; % n in SI units
% eta = eta
At_si = At*in2m^2;
A2_si = A2*in2m^2;
Ap_si = Ap*in2m^2;
Ab_si = Ab*in2m^2;
modot_si = modot * lbm2kg;
mfdot_si = mfdot * lbm2kg;
mdot_si = mdot * lbm2kg;
P3_si = P3 * psia2Pa;
Fv_si = Fv * lb2N;
P1_si = P1 * psia2Pa;
Go_si = modot_si / Ap_si;
cstarvec_si = cstarvec * ft2m;
c_si = Fv_si/mdot_si;
% Regression function
regress_fun = @(Go) a_si*(Go^n_si); % Sutton 16-15 and 16-5
% Function call
[ F_test, rdot_test, mfdot_test, mdot_test, P1_test, Isp_test, CF_test, c_test ] ...
= hybridRocketThrustCalc( ...
g0_si, rhof_si, regress_fun, rvec, cstarvec_si, kvec, eta, ...
At_si, A2_si, Ap_si, Ab_si, modot_si, P3_si );
% Test values
merr = 1e-5; % Max error
assert( abs((Fv_si - F_test)/Fv_si) < merr, 'Bad thrust');
assert( abs(rdot_si - rdot_test)/rdot < merr, 'Bad fuel regression');
assert( abs(P1_si - P1_test)/P1_si < merr, 'Bad Combustion Pressure');
assert( abs(mfdot_si - mfdot_test)/mfdot_si < merr, 'Bad Fuel Mass rate');
assert( abs(mdot_si - mdot_test)/mdot_si < merr, 'Bad Mass flow rate');
assert( abs(Isp - Isp_test)/Isp < merr, 'Bad Specific Impulse');
assert( abs(Cf_P3 - CF_test)/Cf_P3 < merr, 'Bad Thrust Coefficient');
assert( abs(c_si - c_test)/c_si < merr, 'Bad Exhaust Velocity');
%% If oxidizer mass flow rate is 0, thrust should be 0
% Function call
[ F_test, rdot_test, mfdot_test, mdot_test, P1_test, Isp_test, CF_test, c_test ] ...
= hybridRocketThrustCalc( ...
g0_si, rhof_si, regress_fun, rvec, cstarvec_si, kvec, eta, ...
At_si, A2_si, Ap_si, Ab_si, 0, P3_si );
% Test values
merr = 1e-5; % Max error
assert( abs(F_test) < merr, 'Bad thrust');
assert( abs(rdot_test) < merr, 'Bad fuel regression');
assert( abs(mfdot_test) < merr, 'Bad Fuel Mass rate');
assert( abs(mdot_test) < merr, 'Bad Mass flow rate');
assert( abs(c_test) < merr, 'Bad Exhaust Velocity');
%% Pass
fprintf('PASSED: Test_hybridRocketThrustCalc\n');
|
from django.conf.urls import url
from django.urls import path
from . import views
urlpatterns = [
path('index/', views.home, name='home'),
#url(r'^$', index, name='index'),
path('search/', views.search, name='search'),
path('cart/', views.cart, name='cart'),
path('contact/', views.contact, name='contact'),
]
|
from .empleado import Empleado
class Departamento:
def __init__ (self, nombre, telefono):
self.nombre = nombre
self.telefono = telefono
self.empleados = {}
self.supervisor = None
def __str__ (self):
cadena = f'''\n DEPARTAMENTO {self.nombre.upper ()} -- Telรฉfono del departamento: {self.telefono}
'''
for objeto_empleado in self.empleados.values():
cadena += objeto_empleado.getEmpleado()
return cadena
# def media_salarial (self):
# salario_sum = 0
# for x in self.empleados:
# salario_sum += x.salario
# salario_media = salario_sum/ (len (self.empleados))
# return salario_media
def sort(self,fn_lambda = lambda Empleado : Empleado.salario, option = False ):
self.empleados.sort( key = fn_lambda, reverse = option )
|
import tkinter as tk
from tkinter import messagebox
import math
class Application(tk.Tk):
def __init__(self, master=None):
tk.Tk.__init__(self, master)
self.title("Triangle Hypotenuse Calculator")
self.create()
def create(self):
self.aLabel = tk.Label(self, text="A Value:").grid(row=0, column=0)
self.bLabel = tk.Label(self, text="B Value:").grid(row=1, column=0)
self.aEntry = tk.Entry(self, width=3)
self.bEntry = tk.Entry(self, width=3)
self.calcButton = tk.Button(self, text="Calculate Hypotenuse", command=self.calculator).grid(row=2, column=0, columnspan=3)
self.aEntry.grid(row=0, column=1, padx=2)
self.bEntry.grid(row=1, column=1, padx=2)
def calculator(self):
try:
self.aValue = float(self.aEntry.get())
self.bValue = float(self.bEntry.get())
self.hValue = math.sqrt((self.aValue**2)+(self.bValue**2))
self.message = tk.messagebox.showinfo("Calculation", "Triange Hypotenuse: {0:.2f}".format(self.hValue))
except:
self.error = tk.messagebox.showerror("Error", "Please enter valid information")
if __name__ == '__main__':
App = Application()
App.mainloop() |
from helpers import analytics, primes
analytics.monitor()
limit = 10**7
primesList = primes.primes(int(limit**0.5)+1)
def sf(n):
pcount,pf = 0,[]
for p in primesList:
if p*p > n: break
count = 0
while n%p==0:
n //= p
count += 1
if count > 0:
pf.append(p)
pcount += 1
if pcount == 2 and n > 1: return []
if n > 1:
pf.append(n)
pcount += 1
if pcount > 2: return []
return tuple(pf)
def main(limit):
pairs = {}
for n in range(1,limit+1):
pf = sf(n)
if len(pf) == 2:
pairs[pf] = n
return sum(pairs.values())
# S(100) = 2262
# S(10**7) = ?
print(main(limit), analytics.lap(), analytics.maxMem()) |
from abc import ABC, abstractmethod
from collections import OrderedDict
import pandas as pd
import vcf
from intervaltree import Interval as TreeInterval
from intervaltree import IntervalTree
import io_plt
from call import Call, EventType
from interval import Interval
from interval_collection import IntervalCollection, FeatureCollection
from reference_dictionary import ReferenceDictionary
class Callset(ABC):
@abstractmethod
def __init__(self, sample_to_calls_map: dict, ref_dict: ReferenceDictionary):
"""Constructor for abstract callset representation
Args:
sample_to_calls_map: a map from samples to a FeatureCollection
ref_dict: reference dictionary
"""
super().__init__()
assert len(sample_to_calls_map) > 0
self.ref_dict = ref_dict
self.sample_names = set(sample_to_calls_map.keys())
self.sample_to_calls_map = sample_to_calls_map
self.__preprocess()
# TODO add a check to make sure the callset is not malformed, i.e. the calls don't intersect and
# TODO the intervals in the featurecollections equal to the intervals stored in their corresponding calls
# TODO Also make sure that code doesn't break if one of the contigs is not in the callset
@classmethod
@abstractmethod
def read_in_callset(cls, **kwargs):
pass
def __preprocess(self):
"""
Preprocess the callset by filling the regions with no calls with EventType.NO_CALL events, thereby assigning
an event to every single base.
"""
for sample in self.sample_names:
interval_to_call_map = OrderedDict()
for contig in self.ref_dict.contigs:
contig_interval = self.ref_dict.get_contig_interval_for_chrom_name(contig)
events_on_contig = self.sample_to_calls_map.get(sample)._get_interval_tree(contig)
if not events_on_contig:
continue
result = events_on_contig.copy()
# TODO make code aware of 1-based representation
# TODO i.e. right now some events overlap by a single base
# This hacky code fills potential gaps between calls that lie within interval with NO_CALL events
result.addi(contig_interval.start, contig_interval.end, Call(interval=contig_interval,
sample=sample,
event_type=EventType.NO_CALL,
call_attributes={"QS": 0, "QA": 0}))
result.split_overlaps()
for interval in events_on_contig.items():
result.remove_overlap(interval.begin, interval.end)
for interval in events_on_contig.items():
result.addi(interval.begin, interval.end, Call.deep_copy(interval.data))
for t in sorted(result):
if t.end - t.begin == 1 and t.data.event_type == EventType.NO_CALL:
# intervaltree.split_overlaps will create single base regions which we want to discard
continue
call = Call.deep_copy(t.data)
if t.data.event_type == EventType.NO_CALL:
call.interval = Interval(contig, t.begin, t.end)
interval_to_call_map[Interval(contig, t.begin, t.end)] = call
self.sample_to_calls_map[sample] = FeatureCollection(interval_to_call_map)
def find_intersection_with_interval(self, interval: Interval, sample: str):
"""
Given an interval find all overlapping calls in the callset and truncate them appropriately.
Note: we assume that the calls in the callset do not overlap for a single sample.
Args:
interval: a given interval
sample: sample from the callset
Returns:
A list of sorted, non-overlapping events that completely cover a given interval
"""
assert sample in self.sample_names, "Sample %s is not in the callset" % sample
calls = self.sample_to_calls_map.get(sample)
intersecting_calls = calls.find_intersection(interval)
if not intersecting_calls:
return [(interval, EventType.NO_CALL)]
else:
result = IntervalTree([TreeInterval(call.interval.start, call.interval.end, call.event_type)
for call in intersecting_calls])
max_val = sorted(result)[-1].end
min_val = sorted(result)[0].begin
result.chop(interval.end, max(interval.end, max_val))
result.chop(min(interval.start, min_val), interval.start)
return [(Interval(interval.chrom, t.begin, t.end), t.data) for t in sorted(result)]
def to_string_sample(self, sample):
print("#sample=%s" % sample)
callset_feature_collection = self.sample_to_calls_map.get(sample)
for contig in callset_feature_collection.ordered_contigs:
for tree_interval in sorted(callset_feature_collection.get_interval_tree(contig)):
print(str(Interval(contig, tree_interval.begin, tree_interval.end)) + '\t' + str(tree_interval.data))
print(str(Interval(contig, tree_interval.begin, tree_interval.end)) + '\t' + str(tree_interval.data))
class TruthCallset(Callset):
def __init__(self, sample_to_calls_map: map, ref_dict: ReferenceDictionary):
super().__init__(sample_to_calls_map, ref_dict)
@classmethod
def read_in_callset(cls, **kwargs):
assert "truth_file" in kwargs
truth_file = kwargs["truth_file"]
interval_file = kwargs["interval_file"]
ref_dict = kwargs["reference_dictionary"]
allele_frequency_threshold = kwargs["allele_frequency_threshold"]
considered_interval_collection = IntervalCollection.read_interval_list(interval_file)
truth_calls_pd = pd.read_csv(open(truth_file, 'r'), sep="\t", comment="#", header=None,
names=["chrom", "start", "end", "name", "svtype", "samples"],
dtype={"chrom": str, "start": int, "end": int, "name": str, "svtype": str,
"samples": str})
# Do a single pass over the truth callset to initialize the set of samples contained in it
sample_set = set()
for index, row in truth_calls_pd.iterrows():
sample_names = set(row["samples"].split(","))
sample_set.update(sample_names)
# Do the second pass to initialize everything else
sample_to_calls_map = {}
# Initialize callset
for sample in sample_set:
sample_to_calls_map[sample] = []
previous_interval_truth = None
number_of_not_rescued_overlapping_events = 0
number_of_overlapping_events_same_genotype = 0
number_of_enveloped_events = 0
overall_events = 0
event_filtered_out_due_allele_freq_threshold = 0
for index, row in truth_calls_pd.iterrows():
event_type = cls.__get_event_type_from_sv_type(row["svtype"])
if event_type is None:
continue
interval = Interval(row["chrom"], int(row["start"]), int(row["end"]))
if previous_interval_truth is not None and interval.chrom == previous_interval_truth.chrom \
and interval.start < previous_interval_truth.start:
raise ValueError("Intervals Interval(%s) and Interval(%s) in truth callset are not in sorted order"
% (previous_interval_truth, interval))
# Do not include calls outside of our interval list of interest
if not considered_interval_collection.find_intersection(interval):
continue
# Do not include calls with allele frequency above specified
sample_names = set(row["samples"].split(","))
overall_events += len(sample_names)
if len(sample_names) / len(sample_set) > allele_frequency_threshold:
event_filtered_out_due_allele_freq_threshold += len(sample_names)
continue
for sample_name in sample_names:
call = Call(interval=interval, sample=sample_name, event_type=event_type, call_attributes=None)
if len(sample_to_calls_map.get(sample_name)) > 0 and sample_to_calls_map.get(sample_name)[-1].interval.intersects_with(interval):
last_interval = sample_to_calls_map.get(sample_name)[-1].interval
last_call = sample_to_calls_map.get(sample_name)[-1]
if last_interval.end <= interval.end and last_call.event_type == call.event_type:
# Merge overlapping events with the same call
new_interval = Interval(interval.chrom, last_interval.start, interval.end)
sample_to_calls_map.get(sample_name)[-1].interval = new_interval
number_of_overlapping_events_same_genotype += 1
elif interval.end < last_interval.end:
# If one call is contained in another only keep the larger call
number_of_enveloped_events += 1
else:
number_of_not_rescued_overlapping_events += 1
continue
sample_to_calls_map.get(sample_name).append(call)
previous_interval_truth = interval
for sample_name in sample_set:
interval_to_call_map = OrderedDict()
if sample_to_calls_map.get(sample_name) is None:
continue
for index in range(len(sample_to_calls_map.get(sample_name))):
interval_to_call_map[sample_to_calls_map.get(sample_name)[index].interval] = \
sample_to_calls_map.get(sample_name)[index]
sample_to_calls_map[sample_name] = FeatureCollection(interval_to_call_map)
io_plt.log("There are %d unique samples in truth set." % (len(sample_set)))
io_plt.log("There are %d events for all samples in the truth call set." % overall_events)
io_plt.log("There are %d events that were filtered out due to the allele frequency threshold." % event_filtered_out_due_allele_freq_threshold)
io_plt.log("There are %d intersecting events in truth set that were not rescued." %
number_of_not_rescued_overlapping_events)
io_plt.log("There are %d overlapping events with the same genotype." %
number_of_overlapping_events_same_genotype)
io_plt.log("There are %d enveloped events with different genotypes." % number_of_enveloped_events)
return cls(sample_to_calls_map, ref_dict)
@staticmethod
def __get_event_type_from_sv_type(sv_type: str):
"""
This method will return None if Structural Variation event type is not a Copy Number Variant
"""
cnv_type_events = {"DUP", "DEL"}
if sv_type not in cnv_type_events:
return None
else:
return EventType[sv_type]
class GCNVCallset(Callset):
def __init__(self, sample_to_calls_map: map, ref_dict: ReferenceDictionary):
super().__init__(sample_to_calls_map, ref_dict)
@classmethod
def read_in_callset(cls, **kwargs):
assert "gcnv_segment_vcfs" in kwargs
gcnv_segment_vcfs = kwargs["gcnv_segment_vcfs"]
ref_dict = kwargs["reference_dictionary"]
sample_to_calls_map = {}
for vcf_file in gcnv_segment_vcfs:
vcf_reader = vcf.Reader(open(vcf_file, 'r'))
assert len(vcf_reader.samples) == 1
sample_name = vcf_reader.samples[0]
interval_to_call_map = OrderedDict()
for record in vcf_reader:
interval = Interval(record.CHROM, record.POS, record.INFO['END'])
event_type = EventType.gcnv_genotype_to_event_type(int(record.genotype(sample_name)['GT']))
attributes = {'QS': int(record.genotype(sample_name)['QS']),
'QA': int(record.genotype(sample_name)['QA']),
'NP': int(record.genotype(sample_name)['NP'])}
call = Call(interval=interval, sample=sample_name, event_type=event_type, call_attributes=attributes)
interval_to_call_map[interval] = call
sample_to_calls_map[sample_name] = FeatureCollection(interval_to_call_map)
return cls(sample_to_calls_map, ref_dict)
|
#equality and inequality with strings
session = 'discrete structures'
print(session == 'discrete structures')
print(session != 'character building')
#using the .lower() function
place = 'New York'
print(place == 'new york')
print(place.lower() == 'new york')
print(place != 'Vegas')
#numerical tests
number = 9
print(number == 9)
print(number != 9)
print(number < 8)
print(number > 10)
print(number <= 11)
print(number >= 9)
#using and or
bus = '6M'
print(bus == '6M' or '9C')
stops = 'semanggi'
print(stops == 'semanggi' and stops == 'GBK')
#item in a list
favorite_colors = ['purple', 'blue', 'green']
color = 'green'
if color in favorite_colors:
print('In list')
#iten not in a list
favorite_colors = ['purple', 'blue', 'green']
color = 'pink'
if color not in list:
print('Not in list')
|
import requests
import re
from bs4 import BeautifulSoup
import urllib
import pymssql
def hhh1(url):
r=requests.get(url,timeout=30)
r.encoding = r.apparent_encoding
return r.text
def hhh2(url,liebiao):
html=hhh1(url)
soup=BeautifulSoup(html,'html.parser')
name=soup.find_all('a',"ri-uname")
time=soup.find_all('div',"ri-time")
pingjia=soup.find_all('div',"ri-remarktxt")
for i in range(len(pingjia)):
pingjia2=[]
pingjia3=''
pingjia2.extend(pingjia[i].contents)
for j in range(len(pingjia2)):
pingjia3+=pingjia2[j].string.replace('\r','').replace('\n','')
name1=name[i].attrs['title']
time1=time[i].string
liebiao.append([name1,time1,pingjia3])
def hhh3(liebiao,lujing):
for i in liebiao:
name_=i[0]
time_=i[1]
pingjia_=i[2]
with open (lujing,'a',encoding='utf-8') as f:
f.write(name_+'๏ผ '+time_+' '+pingjia_+'\n\n\n')
server = "localhost"
user = "sa"
password = "19980501."
database = "ChongMing"
try:
conn = pymssql.connect(server, user, password, database)
cursor = conn.cursor()
SQL = "insert into origin (username, destination, usercomment, time, platform) values ('{0}', '{1}', '{2}','{3}', '{4}')".format(name_, "ๅดๆๅฒ", pingjia_, time_, "็พๅบฆๆ
ๆธธ")
# print(SQL+";\n")
cursor.execute(SQL)
conn.commit()
conn.close()
except:
file = open("ERROR.txt", "a", encoding = 'utf-8')
file.write(SQL)
file.close()
else:
pass
def main():
liebiao=[]
lujing='D://chongming.doc'
for i in range(34):
num=15*i
url='https://lvyou.baidu.com/chongmingdao/remark/?rn=15&pn='+str(num)+'&style=hot#remark-container'
hhh2(url,liebiao)
hhh3(liebiao,lujing)
main()
|
#Setting the environment
import os, sys, collections
os.environ['SPARK_HOME']="/Users/abhisheksingh29895/Desktop/programming/spark-1.6.0-bin-hadoop2.6"
sys.path.append("/Users/abhisheksingh29895/Desktop/programming/spark-1.6.0-bin-hadoop2.6/bin")
sys.path.append("/Users/abhisheksingh29895/anaconda/lib/python2.7/site-packages/")
import findspark
findspark.init()
from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local").setAppName("My app").set("spark.executor.memory", "2g")
sc = SparkContext(conf=conf)
#Loading the files
path = "/Users/abhisheksingh29895/Desktop/courses/CURRENT/Advance_Machine_Learning/HW2/BX-CSV-Dump/"
ratings = sc.textFile(path + "BX-Book-Ratings.csv")
books = sc.textFile(path + "BX-Books.csv")
user = sc.textFile(path + "BX-Users.csv")
#Counting the number of rows in the Ratings set
def number_of_ratings(data):
"""
param: Ratings Dataset
return: (Explicit/Implicit Ratings, count)
"""
lines = data.filter(lambda p: "User-ID" not in p)
split_lines = lines.map(lambda x: (x.split(";")[0], x.split(";")[1], x.split(";")[2]))
dict_data = split_lines.map(lambda x: x[2]).countByValue()
total = sum(dict_data.values()) ; implicit = dict_data['"0"'] ; explicit = total-implicit
print ""
print "Number of Explicit Ratings are %s" %(explicit)
print "Number of Implicit Ratings are %s" %(implicit)
#function to create a RDD with count for each ratings
def ratings_frequency(data):
"""
param: Ratings Dataset
return: RDD of ratings/Counts
"""
lines = data.filter(lambda p: "User-ID" not in p)
split_lines = lines.map(lambda x: (x.split(";")[0], x.split(";")[1], x.split(";")[2]))
split_lines1 = split_lines.map(lambda x: (x[2],1))
rdd_data = split_lines1.reduceByKey(lambda x, y: x + y)
rdd_data = rdd_data.sortByKey()
print ""
print "An RDD with [Ratings: (Count of Ratings)] has been created"
#function to create a RDD with average ratings per city
def avg_ratings_per_city(data1, data2):
"""
param: Ratings Dataset, User Dataset
return: city/avg.Ratings
"""
lines1 = data1.filter(lambda p: "User-ID" not in p)
split_lines = lines1.map(lambda x: (x.split(";")[0], x.split(";")[1], x.split(";")[2]))
split_lines1 = split_lines.map(lambda x: (x[0],x[2]))
split_lines3 = split_lines1.filter(lambda x: x[1] != u'"0"')
lines2 = data2.filter(lambda p: "User-ID" not in p)
split_lines2 = lines2.map(lambda x: (x.split(";")[0], x.split(";")[1].split(",")[0]))
full_data = split_lines3.join(split_lines2).collect()
table = sc.parallelize(full_data)
table1 = table.map(lambda x: (x[1][1].encode('utf8')[1:], int(x[1][0].encode('utf8')[1:len(x[1][0])-1])))
table2 = table1.groupByKey().mapValues(lambda x: list(x))
table3 = table2.map(lambda x: sum(x[1])*1.0/len(x[1]))
print ""
print "An RDD with [City: Avg_Ratings(Explicit)] has been created"
#function to give the city with highest number of ratings
def city_highest_number_ratings(data1, data2):
"""
param: Ratings Dataset, User Dataset
return: city
"""
lines1 = data1.filter(lambda p: "User-ID" not in p)
split_lines = lines1.map(lambda x: (x.split(";")[0], x.split(";")[1], x.split(";")[2]))
split_lines1 = split_lines.map(lambda x: (x[0],x[2]))
lines2 = data2.filter(lambda p: "User-ID" not in p)
split_lines2 = lines2.map(lambda x: (x.split(";")[0], x.split(";")[1].split(",")[0]))
full_data = split_lines1.join(split_lines2).collect()
table = sc.parallelize(full_data)
table1 = table.map(lambda x: (x[1][1],x[1][0]))
dict = table1.countByKey()
dict1 = sorted(dict.items(),key = lambda x :x[1], reverse = True)
dict1[0][0].encode("utf8")
print ""
print "City with the highest number of ratings is %s" %(str(dict1[0][0])[1:])
#function to create a RDD with number of ratings per author
def ratings_per_author(data1, data2):
"""
param: Ratings Dataset, Books Dataset
return: ratings, author
"""
lines1 = data1.filter(lambda p: "User-ID" not in p)
split_lines = lines1.map(lambda x: (x.split(";")[0], x.split(";")[1], x.split(";")[2]))
split_lines1 = split_lines.map(lambda x: (x[1],x[2]))
split_lines3 = split_lines1.filter(lambda x: x[1] != u'"0"')
lines2 = data2.filter(lambda p: "User-ID" not in p)
split_lines2 = lines2.map(lambda x: (x.split(";")[0], x.split(";")[2]))
full_data = split_lines3.join(split_lines2).collect()
table = sc.parallelize(full_data)
table1 = table.map(lambda x: (x[1][1],x[1][0]))
ratings_author = table1.reduceByKey(lambda x, y: x + y)
print ""
print "An RDD with [author: Number_ratings(Explicit)] has been created"
#function to create a RDD with number of ratings per user
def ratings_per_user(data):
"""
param: Ratings Dataset
return: ratings, user
"""
lines1 = data.filter(lambda p: "User-ID" not in p)
split_lines = lines1.map(lambda x: (x.split(";")[0], x.split(";")[1], x.split(";")[2]))
split_lines1 = split_lines.map(lambda x: (x[0],x[2]))
split_lines2 = split_lines1.filter(lambda x: x[1] != u'"0"')
ratings_user = split_lines2.reduceByKey(lambda x, y: x + y)
print "An RDD with [User: Number_ratings(Explicit)] has been created"
"""
#Part (2) : Document Classification using Naive Bayes Classifier
"""
#Function to use the standard Naive Bayes classifier of Spark to predict Document categories
def use_naive_nayes():
"""
Running the Naive Bayes from Spark's Mlib library
"""
from pyspark.mllib.classification import NaiveBayes
from pyspark.mllib.feature import HashingTF, IDF
from pyspark.mllib.linalg import SparseVector, Vectors
from pyspark.mllib.regression import LabeledPoint
#loading the files
path = "/Users/abhisheksingh29895/Desktop/courses/CURRENT/Advance_Machine_Learning/HW2/aclImdb/"
train_pos = sc.textFile(path + "train/pos/*txt").map(lambda line: line.encode('utf8')).map(lambda line: line.split())
train_neg = sc.textFile(path + "train/neg/*txt").map(lambda line: line.encode('utf8')).map(lambda line: line.split())
test_pos = sc.textFile(path + "test/pos/*txt").map(lambda line: line.encode('utf8')).map(lambda line: line.split())
test_neg = sc.textFile(path + "test/neg/*txt").map(lambda line: line.encode('utf8'))
#TF-IDF
tr_pos = HashingTF().transform(train_pos) ; tr_pos_idf = IDF().fit(tr_pos)
tr_neg = HashingTF().transform(train_neg) ; tr_neg_idf = IDF().fit(tr_neg)
te_pos = HashingTF().transform(test_pos) ; te_pos_idf = IDF().fit(te_pos)
te_neg = HashingTF().transform(test_neg) ; te_neg_idf = IDF().fit(te_neg)
#IDF step
tr_pos_tfidf = tr_pos_idf.transform(tr_pos) ; tr_neg_tfidf = tr_neg_idf.transform(tr_neg)
te_pos_tfidf = te_pos_idf.transform(te_pos) ; te_neg_tfidf = te_neg_idf.transform(te_neg)
#Creating labels
pos_label = [1] * 12500 ; pos_label = sc.parallelize(pos_label)
neg_label = [1] * 12500 ; neg_label = sc.parallelize(neg_label)
# Combine using zip
train_pos_file = pos_label.zip(tr_pos_tfidf).map(lambda x: LabeledPoint(x[0], x[1]))
train_neg_file = neg_label.zip(tr_neg_tfidf).map(lambda x: LabeledPoint(x[0], x[1]))
test_pos_file = pos_label.zip(te_pos_tfidf).map(lambda x: LabeledPoint(x[0], x[1]))
test_neg_file = neg_label.zip(te_neg_tfidf).map(lambda x: LabeledPoint(x[0], x[1]))
#Joining 2 RDDS to form the final training set
train_file = train_pos_file.union(train_neg_file)
test_file = test_pos_file.union(test_neg_file)
# Fitting a Naive bayes model
model = NaiveBayes.train(train_file)
# Make prediction and test accuracy
predictionAndLabel = test_file.map(lambda p: (model.predict(p[1]), p[0]))
accuracy = 1.0 * predictionAndLabel.filter(lambda (x, v): x == v).count() / test.count()
print ""
print "Test accuracy is {}".format(round(accuracy,4))
#function for cleaning Text
def process_text(record):
""" Tokenize text and remove stop words."""
text = record['text']
stopWords = ['a', 'able', 'about', 'across', 'after', 'all', 'almost', 'also','am', 'among', 'an', 'and', 'any'
,'are', 'as', 'at', 'be','because', 'been', 'but', 'by', 'can', 'cannot', 'could', 'dear','did', 'do', 'does'
, 'either', 'else', 'ever', 'every', 'for', 'from', 'get', 'got', 'had', 'has', 'have', 'he', 'her', 'hers'
, 'him', 'his', 'how', 'however', 'i', 'if', 'in', 'into', 'is','it', 'its', 'just', 'least', 'let', 'like'
, 'likely', 'may', 'me', 'might', 'most', 'must', 'my', 'neither', 'no', 'nor','not', 'of', 'off', 'often'
, 'on', 'only', 'or', 'other', 'our', 'own', 'rather', 'said', 'say', 'says', 'she', 'should', 'since', 'so'
, 'some', 'than', 'that', 'the', 'their', 'them', 'then', 'there', 'these', 'they', 'this', 'tis', 'to', 'too'
, 'twas', 'us', 've', 'wants', 'was', 'we', 'were', 'what', 'when', 'where', 'which','while', 'who', 'whom'
, 'why', 'will', 'with', 'would', 'yet', 'you', 'your', 'NA', '..........', '%', '@']
words = [''.join(c for c in s if c not in string.punctuation) for s in text]
no_stops = [word for word in words if word not in stopWords]
return {'label':record['label'], 'words':no_stops}
#Function to count the words
def count_word(record, index):
return record.features[index]
#Function to classify a test record
def classify_test_record(record, log_pos_prior, log_neg_prior, log_pos_probs, log_neg_probs):
words = np.array(record.features)
pos_prob = log_pos_prior + np.dot(words, log_pos_probs)
neg_prob = log_neg_prior + np.dot(words, log_neg_probs)
if pos_prob > neg_prob:
return 1
else:
return 0
#Function to calculate probability for the given words
def calc_probability(word_count, total_words, total_unique_words):
return float(word_count + 1) / (total_words + total_unique_words + 1)
#Function to build a Naive bayes Classifier from scratch and classify documents
def build_naive_bayes():
"""
Building the Naive Bayes from Spark
"""
import string, numpy as np
from collections import Counter
from pyspark.mllib.classification import NaiveBayes
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.regression import LabeledPoint
#loading the files
#path = "/Users/abhisheksingh29895/Desktop/courses/CURRENT/Advance_Machine_Learning/HW2/aclImdb/"
path = "s3n://amldataabhi/HW2/"
train_pos = sc.textFile(path + "train/pos/*txt").map(lambda line: line.encode('utf8'))
train_neg = sc.textFile(path + "train/neg/*txt").map(lambda line: line.encode('utf8'))
test_pos = sc.textFile(path + "test/pos/*txt").map(lambda line: line.encode('utf8'))
test_neg = sc.textFile(path + "test/neg/*txt").map(lambda line: line.encode('utf8'))
#Binding the Positive & Negatives sets
train = train_pos.map(lambda x: {'label':1, 'text':x}).union(train_neg.map(lambda x: {'label':0, 'text':x}))
test = test_pos.map(lambda x: {'label':1, 'text':x}).union(test_neg.map(lambda x: {'label':0, 'text':x}))
#Processing the test
train = train.map(process_text) ; test = test.map(process_text)
#Creating a dictionary
vocabulary_rdd = train.flatMap(lambda x: x['words']).distinct()
vocabulary = vocabulary_rdd.collect()
#Function to count the number of words for this
def count_words(record, vocabulary):
word_counts = Counter(record['words'])
word_vector = []
for word in vocabulary:
word_vector.append(word_counts[word])
label = record['label']
features = Vectors.dense(word_vector)
return LabeledPoint(label, features)
#
#Word count on each of the file
train_data = train.map(lambda record: count_words(record, vocabulary)).repartition(16)
test_data = test.map(lambda record: count_words(record, vocabulary)).repartition(16)
#Using MLib model
model = NaiveBayes.train(train_data, 1.0)
#making our own model
total_training = train.count()
pos_prior = train_pos.count() * 1.0/ total_training ; neg_prior = 1 - pos_prior ; num_unique_words = len(vocabulary)
pos_total_words = train_data.filter(lambda x: x.label == 1).map(lambda x: sum(x.features)).reduce(lambda x1, x2: x1 + x2)
neg_total_words = train_data.filter(lambda x: x.label == 0).map(lambda x: sum(x.features)).reduce(lambda x1, x2: x1 + x2)
vocabulary_rdd_index = vocabulary_rdd.zipWithIndex().collect()
#Creating RDDS of the words for each category
pos_word_counts_rdd = train_data.filter(lambda x: x.label == 1).\
flatMap(lambda x: list(enumerate(x.features))).\
reduceByKey(lambda x1, x2: x1 + x2).sortByKey()
neg_word_counts_rdd = train_data.filter(lambda x: x.label == 0).\
flatMap(lambda x: list(enumerate(x.features))).\
reduceByKey(lambda x1, x2: x1 + x2).sortByKey()
#Storing list of all words
pos_word_counts = [] ; pos_probs = [] ; neg_word_counts = [] ; neg_probs = [] #To store the list of all positives
for word, index in vocabulary_rdd_index:
word_p = train_data.filter(lambda x: x.label == 1).map(lambda x: x.features[index]).reduce(lambda x1, x2: x1 + x2)
word_n = train_data.filter(lambda x: x.label == 0).map(lambda x: x.features[index]).reduce(lambda x1, x2: x1 + x2)
word_prob_p = float(word_p + 1) / (pos_total_words + num_unique_words + 1)
word_prob_n = float(word_n + 1) / (neg_total_words + num_unique_words + 1)
pos_word_counts.append(word_count) ; pos_probs.append(word_prob)
neg_word_counts.append(word_count) ; neg_probs.append(word_prob)
#Creatng RDDS for each of the groups
pos_probs_rdd = pos_word_counts_rdd.map(lambda x: calc_probability(x[1], pos_total_words, num_unique_words))
neg_probs_rdd = neg_word_counts_rdd.map(lambda x: calc_probability(x[1], neg_total_words, num_unique_words))
#Calculating the log of probabilities
log_pos_prior , log_neg_prior = np.log(pos_prior), np.log(neg_prior)
log_pos_probs, log_neg_probs = np.log(np.array(pos_probs)), np.log(np.array(neg_probs))
#Making classification based on conditional probabilities
classifications = test_data.map(lambda x: classify_test_record(x, log_pos_prior, log_neg_prior, log_pos_probs, log_neg_probs))
correct = classifications.zip(test_data.map(lambda x: x.label)).filter(lambda x: x[0] == x[1]).count()
#Accuracy is
accuracy = correct / test_data.count()
print ""
print "Test accuracy is {}".format(round(accuracy,4))
#Calling the main function to run the code
if __name__ == '__main__':
print "******* Q.1) Part 1] Number of Ratings (Explicit / Implicit)**********"
number_of_ratings(ratings)
print "Done"
print "******* Q.1) Part 2] Count of each ratings**********"
ratings_frequency(ratings)
print "Done"
print "******* Q.1) Part 3] average ratings per city **********"
avg_ratings_per_city(ratings,user)
print "Done"
print "******* Q.1) Part 4] city with the highest rating **********"
city_highest_number_ratings(ratings,user)
print "Done"
print "******* Q.1) Part 5] city with the highest rating **********"
ratings_per_author(ratings,books)
ratings_per_user(ratings)
print "Done"
print "******* Question 1 Over, now using data from AWS for Naive Bayes **********"
exit()
#First SSH to the PEM file to activate the instance
#ssh -i ~/Abhishek3.pem hadoop@ec2-54-186-36-60.us-west-2.compute.amazonaws.com
pyspark #on EMR (Hadoop) Instance
AWS_ACCESS_KEY_ID = #123455666666666666
AWS_SECRET_ACCESS_KEY = #1234556666666666663446464748484
#Enabling the hadoop path for spark
sc._jsc.hadoopConfiguration().set("fs.s3n.awsAccessKeyId", AWS_ACCESS_KEY_ID)
sc._jsc.hadoopConfiguration().set("fs.s3n.awsSecretAccessKey", AWS_SECRET_ACCESS_KEY)
print "Done"
print "******* Q.2) Part 1] Document Classification using Standard Naive Bayes **********"
use_naive_bayes()
print "Done"
print "******* Q.2) Part 2] Document Classification using my own Naive Bayes **********"
build_naive_bayes()
print "******* I have partnered with Jason Helgren from MSAN 2016 for this task!! **********"
print "Done"
|
from microWebSrv.microWebSrv import MicroWebSrv
import json
# from time import sleep
from _thread import allocate_lock # ,start_new_thread
# C:\Users\yaniv\AppData\Local\Programs\Thonny\Lib\site-packages\thonny\plugins\micropython\api_stubs
from machine import Pin
routeHandlers = []
# ( "/test", "GET", _httpHandlerTestGet ),
# ( "/test", "POST", _httpHandlerTestPost )
# ]
led = Pin(5, Pin.OUT, value=1) # 1, Pin.PULL_UP
# btn = Pin(0, Pin.IN) # Pin.PULL_UP
# led.value(1)
# led.off() # the opesit on is off and off in on
print('events_data page load')
global _chatWebSockets
_chatWebSockets = [ ]
global _chatLock
_chatLock = allocate_lock()
global res
def btn_change(pin):
cur_btn = 1 # btn()
with _chatLock:
for ws in _chatWebSockets:
send = {}
send['btn'] = str(cur_btn == 1)
try: ws.SendText(json.dumps(send))
except: pass
print('ws sending: ', cur_btn)
if cur_btn == 1: # btn is not press
print('btn not pressed')
else:
print('btn pressed')
# btn.irq(btn_change)
# ----------------------------------------------------------------------------
# test get query parameters [/send?name=yaniv&last=cohen]
@MicroWebSrv.route('/led')
def _httpHandlerEditWithArgs(httpClient, httpResponse):
args = httpClient.GetRequestQueryParams()
# print('QueryParams', args)
content = ""
if 'status' in args:
if args['status'] == 'false':
led.on()
else:
led.off()
print('led is: ', args['status'])
with _chatLock:
for ws in _chatWebSockets:
send = {}
send['led'] = str(args['status'] == 'false')
try: ws.SendText(json.dumps(send))
except: pass
# ws.SendText('{"led": "'+ str(args['status'] == 'false')+'"}')
print('ws sending: ', args['status'] == 'false')
httpResponse.WriteResponseOk(headers=None,
contentType="text/html",
contentCharset="UTF-8",
content=content)
# ----------------------------------------------------------------------------
def WSJoinChat(webSocket, addr):
webSocket.RecvTextCallback = OnWSChatTextMsg
# webSocket.RecvBinaryCallback = _recvBinaryCallback
webSocket.ClosedCallback = OnWSChatClosed
# addr = webSocket.Request.UserAddress
with _chatLock:
for ws in _chatWebSockets:
print('<%s:%s HAS JOINED THE CHAT>' % addr)
_chatWebSockets.append(webSocket)
print('<WELCOME %s:%s>' % addr)
def OnWSChatTextMsg(webSocket, msg):
print('msg is: ', msg)
recv = json.loads(msg)
if 'msg' in recv:
msgIn = recv['msg']
print('msg is: ', msgIn)
global res
res = None
exec(msgIn)
if res != None:
print('res is: ', res)
send = {}
send['res'] = str(res)
try: webSocket.SendText(json.dumps(send))
except: pass
def OnWSChatClosed(webSocket) :
_chatWebSockets.remove(webSocket)
print("WS CLOSED")
# ============================================================================
|
#############################################################
# Quarterly Performance Update Script using Naver Finance
#############################################################
# How to run
# ex> q_perf_update.py 2020.06
#
#
#############################################################
import pandas as pd
import requests
import sqlite3
NF_URL = "https://finance.naver.com/item/main.nhn?code="
dbPath = "c:/StockDB/"
stockDb = "myStock.db"
# quarterly performance table info
qp_table = "q_perf_report"
qp_field = [ "์ข
๋ชฉ์ฝ๋", "๋ถ๊ธฐ", "๋งค์ถ์ก", "์์
์ด์ต", "๋น๊ธฐ์์ด์ต",
"์์
์ด์ต๋ฅ ", "์์ด์ต๋ฅ ", "ROE", "๋ถ์ฑ๋น์จ", "๋น์ข๋น์จ",
"์ ๋ณด์จ", "EPS", "PER", "BPS", "PBR" ]
qp_type = [ "text", "text", "int", "int", "int",
"real", "real", "real", "real", "real",
"real", "int", "real", "int", "real" ]
# Naver's Quarterly Performance Actual Key in Dictionary
qp_name = [ "์ข
๋ชฉ์ฝ๋", "๋ถ๊ธฐ", "๋งค์ถ์ก", "์์
์ด์ต", "๋น๊ธฐ์์ด์ต",
"์์
์ด์ต๋ฅ ", "์์ด์ต๋ฅ ", "ROE(์ง๋ฐฐ์ฃผ์ฃผ)", "๋ถ์ฑ๋น์จ", "๋น์ข๋น์จ",
"์ ๋ณด์จ", "EPS(์)", "PER(๋ฐฐ)", "BPS(์)", "PBR(๋ฐฐ)" ]
kospi_list = []
kosdaq_list = []
# connect to db
stock_con = sqlite3.connect(dbPath + stockDb)
stock_cur = stock_con.cursor()
# DEBUG:0 (disabled) or DEBUG:1 (enabled)
DEBUG = 1
def debug_print(x):
if DEBUG == 0:
return
print(x)
# create qp_table field for check_n_create_QPTable()
def create_QPField():
primary = "PRIMARY KEY("
field = "("
for i in range(len(qp_field)):
field = field + qp_field[i] + " " + qp_type[i]
if i == 0:
field = field + " NOT NULL, "
primary = primary + qp_field[i] + ", "
elif i == 1:
field = field + " NOT NULL, "
primary = primary + qp_field[i] + ") "
elif i == len(qp_field) - 1:
field = field + ", " + primary + ");"
else:
field = field + ", "
return field
# check and create quarterly performance table if not exist
def check_n_create_QPTable():
stock_cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='" + qp_table + "'")
exist = stock_cur.fetchall()
if len(exist) == 0:
# create tables
field = create_QPField()
create_qp_tbl_sql = "CREATE TABLE " + qp_table + field
debug_print(create_qp_tbl_sql)
stock_cur.execute(create_qp_tbl_sql)
stock_con.commit()
debug_print("๋ถ๊ธฐ์ค์ ํ
์ด๋ธ ์์ฑ: " + qp_table)
else:
debug_print("๋ถ๊ธฐ์ค์ ํ
์ด๋ธ ์กด์ฌ: " + qp_table)
def get_jongmokCode(table_name):
jongmok_sql = "SELECT * FROM " + table_name + ";"
debug_print(jongmok_sql)
stock_cur.execute(jongmok_sql)
records = stock_cur.fetchall()
code_list = []
for i in range(len(records)):
if records[i][0] != '':
code_list.append(records[i][0])
debug_print("DB ์ข
๋ชฉ์ฝ๋ ๋ฆฌ์คํธ ๋ฆฌํด: " + str(len(code_list)))
return code_list
def get_jongmokName(table, code):
name_sql = "SELECT ์ข
๋ชฉ๋ช
FROM " + table + " WHERE ์ข
๋ชฉ์ฝ๋='" + code +"';"
stock_cur.execute(name_sql)
records = stock_cur.fetchall()
if len(records) == 0:
return ""
return records[0][0]
def get_jongmokNameByCode(code):
name = get_jongmokName("kospi", code)
if name == "":
name = get_jongmokName("kosdaq", code)
return name
def initDB():
global kospi_list
global kosdaq_list
# check & create quarterly performance table
check_n_create_QPTable()
kospi_list = get_jongmokCode("kospi")
debug_print("---------------kospi--------------")
debug_print("| record count: " + str(len(kospi_list)))
debug_print("---------------kospi--------------")
debug_print(kospi_list)
kosdaq_list = get_jongmokCode("kosdaq")
debug_print("---------------kosdaq--------------")
debug_print("| record count: " + str(len(kosdaq_list)))
debug_print("---------------kosdaq--------------")
debug_print(kosdaq_list)
def input_year_quarter():
print("๊ฐ์ ธ์ฌ ๋ถ๊ธฐ๋ฅผ ์
๋ ฅํ์ธ์. ์: 2020.06")
quarter_str = input()
if len(quarter_str) != 7:
print("ERROR: ์๋ชป๋ ํฌ๋งท์ ์
๋ ฅ์
๋๋ค.")
return ""
return quarter_str
def get_quarter():
quarter = ""
while quarter == "":
quarter = input_year_quarter()
return quarter
#######################################################################
# This is test function to check all the layout of html
# only for development. If there is any problem in parsing data,
# use this function and adapt script to new page layout and
# modify get_n_parse_jongmok()
def test_get_n_parse_jongmok(jongmok_code, quarter):
nf_resp = requests.get(NF_URL + jongmok_code)
df = pd.read_html(nf_resp.text)[5]
df = df.append(pd.read_html(nf_resp.text)[6])
df = df.append(pd.read_html(nf_resp.text)[7])
df = df.append(pd.read_html(nf_resp.text)[8])
df = df.append(pd.read_html(nf_resp.text)[9])
print(df)
print(df.iloc[0][1])
financial_stmt = pd.read_html(nf_resp.text)[3]
financial_stmt.set_index(('์ฃผ์์ฌ๋ฌด์ ๋ณด', '์ฃผ์์ฌ๋ฌด์ ๋ณด', '์ฃผ์์ฌ๋ฌด์ ๋ณด'), inplace=True)
financial_stmt.index.rename('์ฃผ์์ฌ๋ฌด์ ๋ณด', inplace=True)
financial_stmt.columns = financial_stmt.columns.droplevel(2)
fs_dict = financial_stmt.to_dict()
print(fs_dict)
print(fs_dict[('์ต๊ทผ ๋ถ๊ธฐ ์ค์ ', quarter)]['๋งค์ถ์ก'])
print(fs_dict[('์ต๊ทผ ๋ถ๊ธฐ ์ค์ ', quarter)]['์์
์ด์ต'])
print(fs_dict[('์ต๊ทผ ๋ถ๊ธฐ ์ค์ ', quarter)]['๋น๊ธฐ์์ด์ต'])
def get_n_parse_jongmok(jongmok_code, quarter):
nf_resp = requests.get(NF_URL + jongmok_code)
try:
financial_stmt = pd.read_html(nf_resp.text)[3]
except:
return None
# NOTE: ์ง์ ์ข
๋ชฉ์ ์ฌ๋ฌด์ ๋ณด๊ฐ ์์ผ๋ฏ๋ก ์์ธ ์ฒ๋ฆฌ
if (('์ฃผ์์ฌ๋ฌด์ ๋ณด', '์ฃผ์์ฌ๋ฌด์ ๋ณด', '์ฃผ์์ฌ๋ฌด์ ๋ณด') in financial_stmt) == False:
return None
financial_stmt.set_index(('์ฃผ์์ฌ๋ฌด์ ๋ณด', '์ฃผ์์ฌ๋ฌด์ ๋ณด', '์ฃผ์์ฌ๋ฌด์ ๋ณด'), inplace=True)
financial_stmt.index.rename('์ฃผ์์ฌ๋ฌด์ ๋ณด', inplace=True)
financial_stmt.columns = financial_stmt.columns.droplevel(2)
fs_dict = financial_stmt.to_dict()
#debug_print(fs_dict)
# NOTE: ๊ฐํน ๊ฐ์ ธ์จ ๋์
๋๋ฆฌ ๋ฐ์ดํ์ ํค๊ฐ์ด ์๋ชป ์
๋ ฅ๋์ด ์๋ ๊ฒฝ์ฐ๊ฐ ์๋ค.
# 3๋ถ๊ธฐ ๋ฐ์ดํ๊ฐ 2020.08๋ก ๋์ด์๋ค๋ ์ง. ์ด๋ฐ ๊ฒฝ์ฐ ํ๋ฌ ์ ๊ฐ์ ๋ค์ ์๋ํด๋ณธ๋ค.
input_qt = quarter
if (('์ต๊ทผ ๋ถ๊ธฐ ์ค์ ', input_qt) in fs_dict) == False:
print("ERROR: ์ต๊ทผ ๋ถ๊ธฐ ์ค์ " + input_qt + " ์์")
alt_qt = { "03":"02", "06":"05", "09":"08", "12":"11"}
input_qt = input_qt[:-2] + alt_qt[input_qt[-2:]]
print("RETRY: ์ต๊ทผ ๋ถ๊ธฐ ์ค์ " + input_qt + " ์ฌ์๋")
# NOTE: ์๋ก ์์ฅ๋ ํ์ฌ์ ๊ฒฝ์ฐ์ ๋ถ๊ธฐ์ค์ ์ด ์กด์ฌํ์ง ์์ผ๋ฏ๋ก ์ด๋ ๊ฒ๋ ์๋๋ฉด
# ๊ทธ๋ฅ ์คํตํ๋ค.
if (('์ต๊ทผ ๋ถ๊ธฐ ์ค์ ', input_qt) in fs_dict) == False:
return None
qp = fs_dict[('์ต๊ทผ ๋ถ๊ธฐ ์ค์ ', input_qt)]
debug_print(fs_dict[('์ต๊ทผ ๋ถ๊ธฐ ์ค์ ', input_qt)])
parsed_data = [ jongmok_code, quarter ]
for i in range(2, len(qp_name)):
debug_print(qp_name[i] + ": " + str(fs_dict[('์ต๊ทผ ๋ถ๊ธฐ ์ค์ ', input_qt)][qp_name[i]]))
parsed_data.append( str(fs_dict[('์ต๊ทผ ๋ถ๊ธฐ ์ค์ ', input_qt)][qp_name[i]]) )
debug_print(parsed_data)
return parsed_data
def exist_record(code, quarter):
search_sql = "SELECT * FROM " + qp_table + \
" WHERE " + \
qp_field[0] + "='" + code + "'" + \
" AND " + \
qp_field[1] + "='" + quarter + "';"
stock_cur.execute(search_sql)
exist = stock_cur.fetchall()
if len(exist) == 0:
return 0
return 1
def insert_record(record):
values = "VALUES ("
for i in range(len(record)):
values = values + "'" + record[i] + "'"
if i == len(record) - 1:
values = values + ");"
else:
values = values + ", "
insert_sql = "INSERT INTO " + qp_table + " " + values
debug_print(insert_sql)
stock_cur.execute(insert_sql)
stock_con.commit()
def parse_n_store_all_jongmok(quarter):
global kospi_list
global kosdaq_list
all_jongmok = kospi_list + kosdaq_list
debug_print("๊ฐ์ ธ์ฌ ์ข
๋ชฉ ๊ฐฏ์: " + str(len(all_jongmok)) + "๊ฐ")
count = 0
for code in all_jongmok:
name = get_jongmokNameByCode(code)
# check
if exist_record(code, quarter) == 1:
debug_print("์กด์ฌ> ์ข
๋ชฉ์ฝ๋: " + code + ", ์ข
๋ชฉ๋ช
: " + name + ", ๋ถ๊ธฐ: " + quarter)
continue
count = count + 1
debug_print("์ ๊ท> No: " + str(count) + ", ์ข
๋ชฉ์ฝ๋: " + code + \
", ์ข
๋ชฉ๋ช
: " + name + ", ๋ถ๊ธฐ: " + quarter)
# get & parse
data = get_n_parse_jongmok(code, quarter)
if data is None:
print("INFO: Skip " + code + " (" + name + ")")
continue
# store
insert_record(data)
debug_print("Total: " + str(count) + "๊ฐ ๋ ์ฝ๋ ์
๋ ฅ")
if __name__ == "__main__":
initDB()
quarter = get_quarter()
parse_n_store_all_jongmok(quarter) |
import getopt
import sys
from mySock import client, close
from myUtil import login
# payloads
comment_payload = '650#{gli&&er}{"glit_id":<glit_id>,"user_id":<user_id>,"user_screen_name":"<screen_name>","id":-1,' \
'"content":"<content>","date":"2020-06-23T06:29:00.751Z"}## '
# params
GLIT_ID = ""
USER_ID = ""
USER_SCREEN_NAME = "default"
USER_NAME = ""
PASSWORD = ""
COMMENT = "default"
USAGE_INFO = """
Usage: comment.py -u <user_name> -p <password>
-g <glit_id>
-s <screen_name>
-c <comment_text>
"""
def get_params(opts):
global comment_payload
global USER_NAME, PASSWORD, GLIT_ID, COMMENT, USER_SCREEN_NAME
# collect data
for opt, arg in opts:
if opt == '-h':
print(USAGE_INFO)
sys.exit()
elif opt == '-u':
USER_NAME = arg
elif opt == '-p':
PASSWORD = arg
elif opt == '-g':
GLIT_ID = arg
comment_payload = comment_payload.replace("<glit_id>", GLIT_ID)
elif opt == '-c':
COMMENT = arg
elif opt == '-s':
USER_SCREEN_NAME = arg
def format_payloads():
global comment_payload
comment_payload = comment_payload.replace("<content>", COMMENT)
comment_payload = comment_payload.replace("<user_id>", USER_ID)
comment_payload = comment_payload.replace("glit_id>", GLIT_ID)
comment_payload = comment_payload.replace("<screen_name>", USER_SCREEN_NAME)
def action(sock):
# post comment
print(comment_payload)
sock.send(comment_payload.encode())
data = sock.recv(2048)
print(data.decode())
def main(argv):
global USER_NAME, PASSWORD, USER_ID
# try to get main arguments
try:
opts, args = getopt.getopt(argv, "hu:p:g:c:s:")
except getopt.GetoptError:
print(USAGE_INFO)
sys.exit()
get_params(opts)
# create socket and login
print("[+] Started")
sock = client()
USER_ID = login(sock, USER_NAME, PASSWORD)
# create payloads
format_payloads()
# main action
action(sock)
# close connection
close(sock)
if __name__ == '__main__':
main(sys.argv[1:])
|
# heap Implementation
class MinHeap:
def __init__(self,arr=[]):
self.arr=arr
def insert(self,val):
self.arr.append(val)
pointer=len(self.arr)-1
parent_pointer=(pointer-1)//2
while(parent_pointer >=0 and self.arr[parent_pointer]>self.arr[pointer]):
self.arr[parent_pointer],self.arr[pointer]=self.arr[pointer],self.arr[parent_pointer]
pointer=parent_pointer
parent_pointer=(pointer-1)//2
def remove_min(self):
self.arr[0],self.arr[len(self.arr)-1]=self.arr[len(self.arr)-1],self.arr[0]
self.arr.pop()
pointer=0
child_pointer1=(2*pointer) +1
child_pointer2=(2*pointer) +2
while(child_pointer2<len(self.arr)):
minimum=min(self.arr[child_pointer1],self.arr[pointer],self.arr[child_pointer2])
if(minimum==self.arr[pointer]):
break
elif(minimum==self.arr[child_pointer1]):
self.arr[child_pointer1],self.arr[pointer]=self.arr[pointer],self.arr[child_pointer1]
pointer=child_pointer1
child_pointer1=2*pointer +1
child_pointer2=2*pointer +2
elif(minimum==self.arr[child_pointer2]):
self.arr[child_pointer2],self.arr[pointer]=self.arr[pointer],self.arr[child_pointer2]
pointer=child_pointer2
child_pointer1=2*pointer +1
child_pointer2=2*pointer +2
if(child_pointer1<len(self.arr)):
minimum=min(self.arr[child_pointer1],self.arr[pointer])
if(minimum==self.arr[child_pointer1]):
self.arr[child_pointer1],self.arr[pointer]=self.arr[pointer],self.arr[child_pointer1]
|
#coding=utf-8
#@author:xiaolin
#@file:Ensemble_Pipeline.py
#@time:2016/9/1 16:27
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
adress1='D:/data/pipeline_test.csv'
df_0=pd.read_csv(adress1,sep=',')
df_1=df_0.dropna(how='any')
data =df_1.ix[:,:-1] # can input the data/samples num
label =df_1.ix[:,-1] # input the corr label num
print ('Data shape:',data.shape)
# print df_1.head()
# n_estimator=data.shape[1]
X_train, X_test, y_train, y_test = train_test_split(data, label, test_size=0.1)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.1)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding()
rt_lm = LogisticRegression()
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier()
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
# print 'train data',X_train,y_train
print 'after random forest:',rf.apply(X_train),rf.apply(X_train).shape
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
# print 'randomf transform:',
print 'one hot encode:',rf_enc.transform(rf.apply(X_test)),rf_enc.transform(rf.apply(X_test)).shape
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier()
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
# plt.figure(2)
# plt.xlim(0, 0.2)
# plt.ylim(0.8, 1)
# plt.plot([0, 1], [0, 1], 'k--')
# plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
# plt.plot(fpr_rf, tpr_rf, label='RF')
# plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
# plt.plot(fpr_grd, tpr_grd, label='GBT')
# plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
# plt.xlabel('False positive rate')
# plt.ylabel('True positive rate')
# plt.title('ROC curve (zoomed in at top left)')
# plt.legend(loc='best')
# plt.show()
rfc=RandomForestClassifier()
lr=LogisticRegression()
pipeline=make_pipeline(rfc,lr)
pipeline.fit(X_train,y_train)
y_pred=pipeline.predict_proba(X_test)
fpr,tpr, thres=roc_curve(y_test, y_pred)
|
#
# @lc app=leetcode.cn id=216 lang=python3
#
# [216] ็ปๅๆปๅ III
#
# @lc code=start
from typing import List
class Solution:
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
nums = list(range(1, 10))
res = []
def helper(index: int, used: List[int], target: int):
if not target:
if len(used) == k:
res.append(used)
return
if index < 0 or target < 0 or len(used) > k:
return
helper(index-1, used, target)
helper(index-1, used+[nums[index]], target-nums[index])
helper(8, [], n)
return res
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
def dp(num: int, target: int):
if not target:
return [[]]
if num < 1 or target < 0:
return []
res = dp(num-1, target)
for item in dp(num-1, target-num):
res.append(item+[num])
return res
return [item for item in dp(9, n) if len(item) == k]
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
dp = [[[] for _ in range(n+1)] for _ in range(10)]
for i in range(10):
dp[i][0].append([])
for i in range(1, 10):
for target in range(1, n+1):
for item in dp[i-1][target]:
dp[i][target].append(item[:])
if target >= i:
for item in dp[i-1][target-i]:
dp[i][target].append(item+[i])
return [item for item in dp[9][n] if len(item) == k]
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
dp = [[] for _ in range(n+1)]
dp[0].append([])
for i in range(1, 10):
for target in range(n, i-1, -1):
for item in dp[target-i]:
dp[target].append(item+[i])
return [item for item in dp[n] if len(item) == k]
# @lc code=end
|
"""
define a simple logger
"""
import logging
from logging.handlers import TimedRotatingFileHandler
import sys
import requests
from requests.adapters import HTTPAdapter
import os
#the max retries for http connect
MAX_RETRIES=3
s = requests.Session()
s.mount('http://', HTTPAdapter(max_retries=MAX_RETRIES))
s.mount('https://', HTTPAdapter(max_retries=MAX_RETRIES))
class LogFactory(object):
headers = {"Content-Type": "text/plain"}
def __init__(
self,
log_dir: str = "sb",
log_level: int = logging.INFO,
log_prefix="xx.log",
log_format=None,
scope_name="xx",
use_webhook=True,
webhook_url: str = "",
mentioned_list=["@all"],
use_stream=True,
file_handler_type="rolling",
timeout=50
):
"""
Args:
log_dir:the directory to save log,default is logs which is on current directory!
log_leve:int,can be warn,info,error,fatal....
webhook_url:a url which push info
use_stream:bool,whether show info to other stream
file_handler_type:str,if rolling,set rolling log by day/normal:a generic a+ mode file
scope_name:the scope name,to prevent that different loggers write the same content
mentioned_list:the person list which you want to push info,default for everyone
timeout:the timeout for net request
"""
self.log_dir = log_dir
self.log_level = log_level
self.use_stream=use_stream
self.file_handler_type=file_handler_type
self.timeout=timeout
#optional
self.use_webhook=use_webhook
if use_webhook:
self.webhook_url = webhook_url
key_index = webhook_url.find("key")
if key_index == -1:
print("the webhook url: {} // missing key.\nif you use this,you can not push file!".format(webhook_url))
self.url_key = ""
else:
self.url_key = webhook_url[key_index:]
self.mentioned_list=mentioned_list
self.prefix=log_prefix
self.format=log_format
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
if not isinstance(self.log_level,int):
try:
self.log_level=int(self.log_level)
except:
raise RuntimeError("log level should be int or can be converted to int ,but your input is {}".format(self.log_level))
self._set_logger(
prefix=self.prefix,
log_format=self.format,
scope_name=scope_name
)
def _set_logger(self, prefix: str, scope_name:str,log_format: str = None):
"""
Args:
prefix:the prefix of log file
"""
log_fp = os.path.join(self.log_dir, prefix)
if self.file_handler_type=="rolling":
file_handler = TimedRotatingFileHandler(
filename=log_fp,
when="midnight",
interval=1,
backupCount=5, #hard code
encoding="utf-8"
)
elif self.file_handler_type=="normal":
file_handler=logging.FileHandler(
filename=log_fp,
mode="a+",
encoding="utf-8"
)
if log_format is None:
log_format = "%(asctime)s [%(levelname)s] %(filename)s: %(message)s"
formatter = logging.Formatter(log_format)
file_handler.setLevel(self.log_level)
file_handler.setFormatter(formatter)
_logger = logging.getLogger(scope_name)
_logger.setLevel(self.log_level)
_logger.addHandler(file_handler)
#add to stream
if self.use_stream:
stream_handler=logging.StreamHandler(stream=sys.stdout)
stream_handler.setLevel(self.log_level)
stream_handler.setFormatter(formatter)
_logger.addHandler(stream_handler)
self.logger = _logger
def info(self, msg):
self.logger.info(msg)
def warning(self, msg):
self.logger.warning(msg)
def error(self, msg,exc_info=True):
self.logger.error(msg,exc_info=exc_info)
def fatal(self, msg):
self.logger.fatal(msg)
def push_text(self, text):
"""
Args:
text:str,text content to push
mentioned_list:list,members you want to @
Returns:
dict,http post returns
"""
if not self.use_webhook:
self.logger.warning("you set not use webhook....!")
return
data = {
"msgtype": "text",
"text": {
"content": text,
"mentioned_list": self.mentioned_list,
}
}
try:
res = requests.post(self.webhook_url, headers=self.headers, json=data,timeout=self.timeout)
res_json=res.json()
except Exception as e:
res_json={"error":str(e)}
return res_json
def push_markdown(self, markdown: str):
"""
Args:
markdown:str,the markdown format text
Returns:
dict,http post returns
"""
if not self.use_webhook:
self.logger.warning("you set not use webhook....!")
return
if not markdown.endswith("<@all>"):
markdown += "<@all>"
data = {
"msgtype": "markdown",
"markdown": {
"content": markdown
}
}
try:
res = requests.post(self.webhook_url, headers=self.headers, json=data,timeout=self.timeout)
res_json=res.json()
except Exception as e:
res_json={"error":str(e)}
return res_json
def push_image(self, img_base64, img_md5):
"""
Args:
img_base64:img convert to base64
img_md5:check the img
Returns:
dict,http post returns
"""
if not self.use_webhook:
self.logger.warning("you set not use webhook....!")
return
data = {
"msg_type": "image",
"image": {
"base64": img_base64,
"md5": img_md5
}
}
try:
res = requests.post(self.webhook_url, headers=self.headers, json=data, timeout=self.timeout)
res_json = res.json()
except Exception as e:
res_json = {"error": str(e)}
return res_json
def push_file(self, fp: str):
"""
Args:
fp:the file path you want to push
Returns:
dict,http post returns
"""
if not self.use_webhook:
self.logger.warning("you set not use webhook....!")
return
post_url = "https://qyapi.weixin.qq.com/cgi-bin/webhook/upload_media?key={}&type={}".format(
self.url_key,
"file" # ๅบๅฎไผ ๅ
ฅ file
)
file_size = os.path.getsize(fp)
file_data = {
"filename": fp,
"filelength": file_size
}
file_res = requests.post(
post_url,
json=file_data
)
file_json = file_res.json()
if "media_id" in file_json:
media_id = file_json["media_id"]
push_data = {
"msgtype": "file",
"file": {
"meida_id": media_id
}
}
res= requests.post(self.webhook_url, headers=self.headers, json=push_data,timeout=self.timeout)
res_json=res.json()
return res_json
else:
return file_json
def __str__(self):
p_tr=hex(id(self))
return "<object with log and push info at {}>".format(p_tr)
if __name__=="__main__":
my_logger=LogFactory(
log_dir=".",
log_level=logging.INFO,
webhook_url="https://www.baidu.com"
)
print(my_logger)
|
# coding: utf-8
import matplotlib.pyplot as plt
import csv
from itertools import islice
"""
This script is for gathering Thermal conductivity data of GaN 1750&3500 sample
and comparing with DFT result
"""
def plotTC(TCdt,DFTTCdt,OGRTCdt,grp, plotfolder): #Plotting TC data of each sample
#Plot each sample with DFT&Ogura-HDNNP result
for i in range(10):
smplname=grp+"-"+str(i+1)
plotfile=plotfolder+smplname+".png"
fig = plt.figure()
ax1 = fig.add_subplot(111)
plt.title(f'[SMZ-NNP:GaN {smplname}] Thermal Conductivity')
ax1.set_xlabel("Temperature (K)")
ax1.set_ylabel("Thermal Conductivity (W/m-K)")
ax1.grid(True)
ax1.set_ylim(0, 700)
plt.rcParams["legend.edgecolor"] ='green'
ax1.plot(TCdt[i][0],TCdt[i][1],c="blue",label="SMZ-x/y")
ax1.plot(TCdt[i][0],TCdt[i][2],c="blue",linestyle="dotted",label="SMZ-z")
ax1.plot(OGRTCdt[0],OGRTCdt[1],c="red",label="OGR-x/y")
ax1.plot(OGRTCdt[0],OGRTCdt[2],c="red",linestyle="dotted",label="OGR-z")
ax1.plot(DFTTCdt[0],DFTTCdt[1],c="black",label="VASP-x/y")
ax1.plot(DFTTCdt[0],DFTTCdt[2],c="black",linestyle="dotted",label="VASP-z")
plt.legend(loc="upper right")
plt.savefig(plotfile)
plt.close()
print(f'TC of each sample ({grp}smpl) is plotted')
#Plot TC(x/y) of all sample with DFT&Ogura-HDNNP result
plotfile=plotfolder+grp+"-xyall.png"
fig = plt.figure()
ax1 = fig.add_subplot(111)
plt.title(f'[SMZ-NNP:GaN {grp}-all] Thermal Conductivity (x/y)')
ax1.set_xlabel("Temperature (K)")
ax1.set_ylabel("Thermal Conductivity (W/m-K)")
ax1.grid(True)
ax1.set_ylim(0, 700)
plt.rcParams["legend.edgecolor"] ='green'
ax1.plot(TCdt[0][0],TCdt[0][1],c="blue",label="SMZ")
for i in range(1,10):
ax1.plot(TCdt[i][0],TCdt[i][1],c="blue")
ax1.plot(OGRTCdt[0],OGRTCdt[1],c="red",label="OGR")
ax1.plot(DFTTCdt[0],DFTTCdt[1],c="black",label="VASP")
ax1.legend(loc="upper right")
plt.savefig(plotfile)
plt.close()
print(f'TC(x/y) of all sample ({grp}smpl) is plotted')
#Plot TC(z) of all sample with DFT&Ogura-HDNNP result
plotfile=plotfolder+grp+"-zall.png"
fig = plt.figure()
ax1 = fig.add_subplot(111)
plt.title(f'[SMZ-NNP:GaN {grp}-all] Thermal Conductivity (z)')
ax1.set_xlabel("Temperature (K)")
ax1.set_ylabel("Thermal Conductivity (W/m-K)")
ax1.grid(True)
ax1.set_ylim(0, 700)
plt.rcParams["legend.edgecolor"] ='green'
ax1.plot(TCdt[0][0],TCdt[0][2],c="blue",label="SMZ")
for i in range(1,10):
ax1.plot(TCdt[i][0],TCdt[i][2],c="blue")
ax1.plot(OGRTCdt[0],OGRTCdt[2],c="red",label="OGR")
ax1.plot(DFTTCdt[0],DFTTCdt[2],c="black",label="VASP")
ax1.legend(loc="upper right")
plt.savefig(plotfile)
plt.close()
print(f'TC(z) of all sample ({grp}smpl) is plotted')
if __name__ == '__main__':
GaNfolder="/home/okugawa/NNP-F/GaN/SMZ-200901/"
DFTTCfile="/home/okugawa/NNP-F/GaN/GaN-shimizuNNP/data111111.dat-vasp"
OGRTCfile="/home/okugawa/NNP-F/GaN/GaN-shimizuNNP/data111111.dat-ogura"
DFTcsv="/home/okugawa/NNP-F/GaN/GaN-shimizuNNP/data111111-vasp.csv"
OGRcsv="/home/okugawa/NNP-F/GaN/GaN-shimizuNNP/data111111-ogura.csv"
plotfolder=GaNfolder+"result/"
grps=["1750","3500"]
colors=["orange","green"]
#Read DFT result TC data
DFTTCdt=[[] for i in range(3)]
with open(DFTTCfile, 'r') as DFTTCf, open(DFTcsv, 'w') as Dcsv:
writer2 = csv.writer(Dcsv, lineterminator='\n')
lenDFT=0
for line in DFTTCf:
data=line.split()
if '0.0' in data[0] and data[0]!='0.0':
wrdata=[float(data[0]),float(data[1]),float(data[2]),float(data[3])]
writer2.writerow(wrdata)
DFTTCdt[0].append(float(data[0]))
DFTTCdt[1].append(float(data[1]))
DFTTCdt[2].append(float(data[3]))
lenDFT+=1
print(f'DFT TC data ({lenDFT}) was read')
#Read Ogura-HDNNP result TC data
OGRTCdt=[[] for i in range(3)]
with open(OGRTCfile, 'r') as OGRTCf, open(OGRcsv, 'w') as Ocsv:
writer2 = csv.writer(Ocsv, lineterminator='\n')
lenOGR=0
for line in OGRTCf:
data=line.split()
if '0.0' in data[0] and data[0]!='0.0':
wrdata=[float(data[0]),float(data[1]),float(data[2]),float(data[3])]
writer2.writerow(wrdata)
OGRTCdt[0].append(float(data[0]))
OGRTCdt[1].append(float(data[1]))
OGRTCdt[2].append(float(data[3]))
lenOGR+=1
print(f'Ogura-HDNNP TC data ({lenOGR}) was read')
#Read TC of 1750&3500sample data from poscar-elm2/out.txt
TCdt=[[[[],[],[]] for i in range(10)] for j in range(2)]
for k,grp in enumerate(grps):
TCdtfolder=GaNfolder+grp+"smpl/training_2element/TCdata/"
for i in range(1,11):
grpname=grp+"-"+str(i)
TCfolder =GaNfolder+grp+"smpl/training_2element/"+str(i)
TCdtfile=TCdtfolder+grp+"-"+str(i)+".csv"
TCfile= TCfolder+"/poscar_elm2/out.txt"
with open(TCfile, 'r') as TCf, open(TCdtfile, 'w') as TCdtf:
writer2 = csv.writer(TCdtf, lineterminator='\n')
for n, line in enumerate(TCf):
if 'Thermal conductivity (W/m-k)' in line:
TCf.seek(0)
lenSMZ=0
for lined in islice(TCf, n+3, n+103):
data=lined.split()
wrdata=[float(data[0]),float(data[1]),float(data[2]),float(data[3])]
writer2.writerow(wrdata)
TCdt[k][i-1][0].append(float(data[0]))
TCdt[k][i-1][1].append(float(data[1]))
TCdt[k][i-1][2].append(float(data[3]))
lenSMZ+=1
break
print(f'{grp}-{i} TC data ({lenSMZ}) was read')
#Plot TC curve of each sample
for k,grp in enumerate(grps):
plotTC(TCdt[k],DFTTCdt,OGRTCdt,grp, plotfolder) |
from django.contrib import admin
from rank.models import VoteEvent,Individual
admin.site.register(VoteEvent)
admin.site.register(Individual)
|
from math import *
print(2)
print(2.097)
print(-2.097)
print(3 + 4.5)
print(3 - 4.5)
print(3 * 4.5)
print(3 * 4 + 5)
print(3 * (4 + 5))
print(10 % 3)
my_num = 5
print(str(my_num) + " my favorite number")
my_num = -5
print(abs(my_num))
print(pow(3, 2))
print(max(4, 6))
print(min(4, 6))
print(round(3.6))
print(floor(3.7))
print(ceil(3.7))
print(sqrt(36))
|
# 152. Maximum Product Subarray
class Solution:
def maxProduct(self, nums: List[int]) -> int:
pos_result = [0 for i in range(len(nums))]
neg_result = [0 for i in range(len(nums))]
if len(nums) == 0:
return 0
for i in range(len(nums)):
if i == 0:
pos_result[i] = nums[i]
neg_result[i] = nums[i]
else:
pos_result[i] = max(nums[i], nums[i] * pos_result[i - 1], nums[i] * neg_result[i - 1])
neg_result[i] = min(nums[i], nums[i] * pos_result[i - 1], nums[i] * neg_result[i - 1])
return max(pos_result) |
import subprocess as sub
import crypt
class UserExist(Exception):
def __str__(self):
return repr("User exist in the system.")
class UserNotExist(Exception):
def __str__(self):
return repr("User don't exist in the system.")
class GroupExist(Exception):
def __str__(self):
return repr("Group exist in the system.")
class GroupNotExist(Exception):
def __str__(self):
return repr("Group don't exist in the system.")
class ManageUser(object):
cmd_exists_user = 'egrep "^{username}" /etc/passwd'
cmd_exists_group = 'egrep "^{groupname}" /etc/group'
def __init__(self):
super(ManageUser, self).__init__()
def _exec_command(self, cmd):
action = sub.Popen(cmd, stdout=sub.PIPE, shell=True)
(output, error) = action.communicate()
return error or output
def exists(self, **kwargs):
"""Check if exist a user or group in the system.
Arg:
**kwargs:
user: The user name.
group: The group name.
Return:
True: If the user exist.
False: If the user don't exist.
"""
cmd = ""
if 'user' in kwargs:
cmd = self.cmd_exists_user.format(username=kwargs['user'])
elif 'group' in kwargs:
cmd = self.cmd_exists_group.format(groupname=kwargs['group'])
result = self._exec_command(cmd)
if result:
return True
else:
return False
def list(self):
""" List actual users
:return: tuple
"""
cmd = "awk -F':' '{if ($3 >= 1000 ) print $1}' /etc/passwd"
result = self._exec_command(cmd)
return result.decode("utf-8").split()
def create(self, **kwargs):
"""Method for create users in the system.
Arg:
**kwargs:
b: base directory for the home directory of
the new account.
c: GECOS field of the new account.
d: home directory of the new account.
g: name or ID of the primary group of the new account.
m: create the user's home directory.
M: do not create the user's home directory.
N: do not create a group with the same name as the user.
p: password of the new account.
s: login shell of the new account.
u: user ID of the new account.
user: User name.
Return:
True: If all is ok.
False: If the user is't create.
Exception:
UserExist
"""
if not self.exists(user=kwargs['user']):
if 'p' in kwargs:
kwargs['p'] = crypt.crypt(kwargs['p'], "22")
cmd = 'adduser'
for key, value in kwargs.items():
if not key is 'user':
cmd = cmd + ' -' + str(key) + ' ' + str(value)
else:
cmd = cmd + ' ' + value
self._exec_command(cmd)
if self.exists(user=kwargs['user']):
return True
else:
return False
else:
raise UserExist()
def update_password(self, **kwargs):
"""Change the user password.
Arg:
**kwargs:
user: The user name.
password: The user password
Return:
True: If the user exist.
False: If the user don't exist.
Exception:
UserNotExist
"""
if self.exists(user=kwargs['user']):
proc = sub.Popen(['passwd', kwargs['user'], '--stdin'], stdin=sub.PIPE)
proc.stdin.write(kwargs['password'] + '\n')
proc.stdin.write(kwargs['password'])
proc.stdin.flush()
return True
else:
raise UserNotExist()
def delete(self, user):
"""Delete a user system.
Arg:
**kwargs:
user: The user name.
Return:
True: If the user is deleted.
False: If the user is't deleted.
"""
if self.exists(user=user):
cmd = 'userdel -r -f ' + user
self._exec_command(cmd)
return True
else:
raise UserNotExist()
def create_group(self, group):
"""Create the group in the system.
Arg:
group: The group name.
Return:
True: If the group is created.
Exception:
GroupExist
"""
if self.exists(group=group):
cmd = 'groupadd', group
self._exec_command(cmd)
return True
else:
raise GroupExist()
def update_group(self, **kwargs):
"""Modify the data group.
Arg:
**kwargs:
g: The group ID of the given GROUP will be changed to GID.
n: New group name.
group: The group name.
Return:
True: If the group is updated.
Exception:
GroupNotExist
"""
if self.exists(group=kwargs['group']):
cmd = 'groupmod'
for key, value in kwargs.iteritems():
if not key is 'group':
cmd = cmd, '-' + str(key), str(value)
else:
cmd = cmd, value
self._exec_command(cmd)
return True
else:
raise GroupNotExist()
def delete_group(self, group):
"""Delete the group.
Arg:
group: The group name.
Return:
True: If the group is deleted.
Exception:
GroupNotExist
"""
if self.exists(group=group):
cmd = 'groupdel', group
self._exec_command(cmd)
return True
else:
raise GroupNotExist()
|
import random
import datetime as d
def wish():
greetings = ['hey hai i am jeff ....!,i am here to help u out . by the way may i know ur name',
'hello i am jeff ..! how can i help you and tell me yor name !']
return random.choice(greetings)
def welcome(name):
time = d.datetime.now().hour
if time<12:
return"goodmoring " +name+ " !! enjoy your day !"
elif time >= 12 and time <16:
return "wishing you a good afternoon " +name+"!!"
elif time >=16 and time <22:
return "a very peaceful evening "+name+'!!'
else :
return 'ohh!! its too late, '+name+' anyways i ll help you'
def choices():
#group1 =[Mpc]
courses = ['Diploma Engineering',"Merchant Navy courses","B. Arch","B.Sc. courses. .."," B Pharmacy."]
#group2 = [BIpc]
courses2 = ["MBBS (Medicine)","BDS (Dental)","Agriculture Allied Courses ","Pharmacy Courses"]
print("1. MPC\n2. BIPC")
print("enter the number 1 or 2 to get details :")
print("Enter 0 to end the chat !! :\n")
try:
x = int (input ())
print()
if x==1:
print (courses)
print("Thank You!!!")
elif x==2:
print(courses2)
print("Thank You!!!")
elif x==0:
quit()
except Exception as e:
print("Enter Valid Input")
choices()
def bot_jeff():
print(wish())
print()
grp = input()
print()
print(welcome(grp))
print()
choices()
def quit():
print("Thank You!!!")
print()
print(" ###### Welcome to chat bot ######")
print()
bot_jeff() |
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from jisho.models import Definition
from jisho.serializers import DefinitionSerializer
from django.template import Context, RequestContext, loader
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
import sys, traceback
from rest_framework import permissions
from rest_framework.generics import ListCreateAPIView
from rest_framework.generics import RetrieveAPIView
def in_group(user, groupname):
return u.groups.filter(name=groupname).count() == 0
def home(request):
#handling post dropdown result
'''
if request.method == 'POST':
list_name = request.POST.get('vlist', '')
if list_name == '...':
return HttpResponseRedirect('')
definition_pk = request.POST.get('definition', '')
definition = Definition.objects.get(pk=definition_pk)
new_list = VocabularyList.objects.get(name=list_name)
definition.lists.add(new_list)
return HttpResponseRedirect('')
'''
definitions = Definition.objects.all().order_by('timestamp').reverse()
#first_date = definitions[len(definitions)-1].timestamp
#last_date = definitions[0].timestamp
#lists = VocabularyList.objects.all()
paginator = Paginator(definitions, 30) # Show 30 contacts per page
page = request.GET.get('page')
try:
definitions = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
definitions = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
definitions = paginator.page(paginator.num_pages)
t = loader.get_template('jisho/index.html')
c = RequestContext(request, {
'title' : 'Weeabot Jisho Lookups',
'description' : 'Recent weeabot irc bot .jisho lookups',
#'first_date' : first_date,
#'last_date' : last_date,
'definitions': definitions,
'paginator' : paginator,
#'lists' : lists,
'editable' : False, #request.user.is_staff,
'deleteable' : False,
'show_vocab_lists' : False,
})
return HttpResponse(t.render(c))
'''
RESTful interface support
'''
'''
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
'''
class DefinitionList(ListCreateAPIView):
'''
List all definitions, or create a new definition.
'''
queryset = Definition.objects.all()
serializer_class = DefinitionSerializer
paginate_by = 10
class DefinitionDetail(RetrieveAPIView):
'''
Show an individual definition by id
'''
queryset = Definition.objects.all()
serializer_class = DefinitionSerializer
|
# Define a function that transform the test dataset into desired format
def transform_test(test_set, pre_trained_d2v, use_infer=True):
'''
Compute question vectors based on indicator use_infer.
If True, then question vectors in test set will be inferred from pre-trained doc2vec object
If False, then train a separate doc2vec on test questions
return:
array of size (len(test_set), 50) as input array of neural network model
'''
# Concatenate whole text
test_set['question_all'] = test_set['questions_body'] + ' ' + test_set['questions_title'] + ' ' + test_set['question_tags']
test_set['question_all'].apply(tp.prossessor)
if use_infer:
# Initiate transformed dictionary
test_dict = dict()
for idx, row in test_set[['questions_id', 'question_all']].drop_duplicates(subset='questions_id').iterrows():
test_dict[row['questions_id']] = pre_trained_d2v.infer_vector(row['question_all'].split())
else:
# Use test_set to train a new doc2vec object
test_dict, _ = train_doc2vec(test_set, 'questions_id', ['question_all'], dim=50, epoch=30)
test_input_1 = pd.DataFrame(test_dict).T
test_input_2 = test_input_1.values
return test_input_1, test_input_2
# Define a function that pinpoints the 'k-most-similar-professionals' via cosine similarity
def find_closest(pro_ids, pro_trained, nn_vector, k=avg_num_ans):
'''
Find k closest (most similar) professionals based on neural network output vectors and doc2vec embeddings
pro_ids: professionals ids
pro_trained: pre-trained doc2vec embedding vectors
nn_vector: predicted neural network vector
k : num of neighbours
return:
list of indices indicating professionals
'''
dictionary = dict(zip(pro_ids, pro_trained))
nearest_indice = dict()
for key, vec in dictionary.items():
# Reshape into 2D arrays and find cosine similarity
cos_sim = cosine_similarity(vec.reshape(-1,1).T, nn_vector.reshape(-1,1).T)
nearest_indice[key] = cos_sim
# Sort dictionary based on values
sorted_dict = sorted(nearest_indice.items(), key=lambda kv: kv[1], reverse=True)
nearest_k = [tup[0] for tup in sorted_dict[0:k]]
# This list contains the closest k professionals embedding vectors given the predicted ques embedding
return nearest_k
def find_all_closest(pro_ids, pro_trained, questions_ids, nn_outputs):
total = nn_outputs.shape[0]
recommending_dict = {}
for pos in range(total):
nearest = find_closest(pro_ids, pro_trained, nn_outputs[pos])
recommending_dict[questions_ids[pos]] = nearest
return recommending_dict
|
import numpy as np
def get_cropped_videos(video):
_, r, c, _ = video.shape
X = []
while len(X) < 5:
x = np.random.randint(0, c)
y = np.random.randint(0, r)
if y + 224 <= r and x + 224 <= c:
snippet = video[:, y:y+224, x:x+224, :]
X.append(snippet)
return np.array(X)
classes = open('classes.txt').read().splitlines()
train_file = open('/dev/shm/ucfTrainTestlist/trainlist01.txt').read().splitlines()
for l in train_file:
fn = l.split(' ')[0]
classidx = int(l.split(' ')[1])
target = classes[classidx-1]
path = '/dev/shm/UCF-101/{}/{}'.format(target, fn)
|
""" Kujira API is flask/websocket app for serving Ceph cluster data """
from flask import Flask
from flask_socketio import SocketIO
from kujira.blueprints import SERVER_BP, OSD_BP, POOL_BP, MON_BP, CLUSTER_BP
from kujira.rest.controllers import osds, pools, servers, clusters, mons
import eventlet
eventlet.monkey_patch()
SOCKETIO = SocketIO()
def create_app(debug=False):
"""Create an application."""
app = Flask(__name__)
app.register_blueprint(OSD_BP)
app.register_blueprint(SERVER_BP)
app.register_blueprint(POOL_BP)
app.register_blueprint(MON_BP)
app.register_blueprint(CLUSTER_BP)
app.debug = debug
app.config.from_object('config')
SOCKETIO.init_app(app, engineio_logger=True, async_mode='eventlet')
return app
|
from yahoofinancials import YahooFinancials
import datetime
import smtplib
import json
# Just want to watch some major worldwide indexes
mutual_funds = ['^GSPC', '^DJI', '^IXIC', '^FTSE', '^N100', '^FCHI', '^GDAXI', '^N225', '^TWII', '^HSI']
mutual_funds = YahooFinancials(mutual_funds)
# Define dates for the historical price data arguments
today = datetime.date.today()
endDate = (today - datetime.timedelta(weeks=1)).strftime('%Y-%m-%d')
startDate = (today - datetime.timedelta(weeks=52)).strftime('%Y-%m-%d')
# Actually call the YF functions
hist = mutual_funds.get_historical_price_data(startDate, endDate, "monthly")
curr = mutual_funds.get_current_price()
# Initialize empty list of alerts
alerts = []
# Loop through provided tickers to get Current price and High/Low range.
for ticker in curr:
currentValue = curr[ticker]
lows = []
highs = []
for price in hist[ticker]['prices']:
if price['low'] is not None:
lows.append(price['low'])
if price['high'] is not None:
highs.append(price['high'])
lowest = min(lows)
highest = max(highs)
currentPct = (currentValue - lowest) / (highest - lowest)
if currentPct <= .05:
currentPct = "{:.2%}".format(currentPct)
alerts.append(f"{ticker} is currently at {currentPct} of the 52 week range.")
# If any alerts were generated, send them out
if len(alerts) > 0:
# Load settings from external file
with open('settings.json', 'r') as settings_file:
settings = json.load(settings_file)
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(settings['user'], settings['pass'])
for recipient in settings['recipients']:
server.sendmail(settings['user'], recipient, ' '.join(alerts))
server.close()
|
# -*- coding: utf-8 -*-
'''
Helper functions
'''
import flask.json as json
from sqlalchemy.sql import text
from consts import *
from hashlib import md5
def error_resp(code, resp=None):
'''Response from the Flask server
Args:
code (int): Error code number
resp (dict): Response data. Optional, but HTTP 405 status is returned if not provided
Returns:
tuple: (JSON data, HTTP status code)
'''
if resp is not None:
return json.jsonify({
'error': code,
'response': resp
}), 200
else:
return json.jsonify({
'error': code
}), 405
def load_ticket(id, con):
'''Loads ticket information by id
Args:
id (int): ticket id
con: Database connection context variable
Returns:
dict: Ticket data structure
'''
q = text("SELECT `e`.*, UNIX_TIMESTAMP(`e`.`dt`) AS `e_utime`, `t`.`id` AS `tid`, `t`.`tg_id`, `t`.`t_buy`, `t`.`t_refund`, `t`.`status` AS `t_status`, `t`.`t_code`, `t`.`ts`, UNIX_TIMESTAMP(`t`.`ts`) AS `t_utime` FROM `events` AS `e` , `tickets` AS `t` WHERE `t`.`event_id` = `e`.`id` AND `t`.`id` = :i")
try:
return format_ticket(con.execute(q, i=id).fetchone())
except:
return None
def format_ticket(r):
'''Formats raw event and ticket data after SQL-query execution
Args:
r: data after SQL-query execution
Returns:
dict: Event and ticket data structure
'''
if r:
# Event data structure
event = {
'id': r['id'], # Event id
'title': r['title'], # Event title
'descr': r['descr'], # Event description
'long_descr': r['long_descr'], # Event long description
'org_id': r['org_id'], # Organizer id
'lang_id': r['lang_id'], # Event language id
'dt': str(r['dt']), # Event 'datetime'
'd': r['dt'].strftime(DT_FORMAT), # Date of the event
't': r['dt'].strftime('%H:%M'), # Time of the event
'utime': r['e_utime'], # Unixtime of the event
'status': r['status'], # Event status: -1 (cancelled), 0 (not approved yet), 1 (approved)
'game_id': r['game_id'], # Event type id
'city_id': r['city_id'], # City id of the event
'addr': r['addr'], # Address of the event
'map': r['map'], # Event on Google Map via link
'price': r['price'], # Cost of a ticket to the event
'count_min': r['count_min'], # Minimal count of tickets to sold required to start event
'count_max': r['count_max'], # Maximum count of tickets available to sold to the event
'count_free': r['count_free'], # Count of free of charge tickets to the event
'count_paid': r['count_paid'], # Count of already paid tickets
'link': r['link'], # Link to the photos from the event
'images': event_images(r) # Images describing the event
}
t_no = '{}-{}'.format(r['t_code'], r['tid'])
ticket = {
'id': r['tid'], # Id of the ticket to the event
'tg': r['tg_id'], # Id of the Telegram user
'buy': r['t_buy'], # Ticket buying transaction
'refund': r['t_refund'], # Ticket refunding transaction
'code': r['t_code'], # Ticked code
'number': t_no, # Full ticked number: <code>-<id>
'status': r['t_status'], # Ticked status: -1 (return), 0 (not used yet), 1 (used)
'utime': r['t_utime'], # Payment process unixtime
'dt': str(r['ts']), # Payment process timestamp
'd': r['ts'].strftime(DT_FORMAT), # Date of the event
't': r['ts'].strftime('%H:%M'), # Time of the event
'image': URL_IMAGES + '/t/' + t_no + '.' + TYPE_IMG + '?key=' + md5('{}{}{}'.format(t_no, r['org_id'], SALT_KEY).encode('utf-8')).hexdigest()
}
return {
'event': event,
'ticket': ticket
}
else:
return {}
# ะะฐะณััะทะบะฐ ัะทะตัะฐ ะฟะพ ะตะณะพ id
def load_user(id, con, cache=None):
'''Loads user information by user id
Args:
id (int): user id
con: database connection context variable
cache: memcached database handler. Optional, if available
Returns:
dict: user data structure
'''
if cache is not None:
try:
val = cache.get('tg:' + str(id)).decode()
except:
val = ''
else:
val = None
try:
r = con.execute(text(
"SELECT *, UNIX_TIMESTAMP(`ts`) AS `utime` \
FROM `tg_users` \
WHERE `id` = :i"), i=id).fetchone()
resp = {
'uid': r['id'], # Tekegram user id
'uname': r['uname'], # Telegram username
'fname': r['fname'], # Full username
'langs': r['langs'], # Event languages that user can attend to the event. Bit mask
'lang_id': r['lang_id'], # Telegram chatbot interface language
'city_def': r['city_def'], # City of events to offer by default
'ts': str(r['ts']), # Timestamp of user registration
'utime': r['utime'], # Unixtime of user registration
'src': r['src'], # Source of the user
'd': r['ts'].strftime(DT_FORMAT), # Date of registration
't': r['ts'].strftime('%H:%M') # Time of registration
} if r else {}
except:
resp = None
if resp:
resp['cache'] = val
else:
resp = {'cache': val} if val else None
return resp
def event_images(e, id='id'):
'''Builds the list of URLs of images about the event
Args:
e (dict): raw event data after SQL-query execution
Returns:
list: list of URLs to the images
'''
imgs = list()
for i in range(1, NUM_IMAGES + 1):
img_ext = 'img_ext_' + str(i)
if e[img_ext]:
imgs.append(URL_IMAGES + '/e/' + str(e[id]) + '-' + str(i) + '.' + e[img_ext])
return imgs
|
import random
import copy
import time
from Exploitation import ShrinkingEncircling
from Exploitation import SpiralUpdating
from MPModel_ALBP import ALBP_Model
from WOA_ALBP import WOAforALBP
import DataGenerator
# Data
d_TaskTimeMin = [7, 1, 6, 8, 15, 11, 3, 12, 8, 14, 4, 8, 5, 21, 3, 8, 6, 3, 1, 4, 43, 13, 6, 25, 24, 6, 4, 17, 4, 5, 1, 2, 11, 2, 4, 9, 2, 6, 3, 1, 21, 1, 2, 4, 4]
d_TaskTimeMax = [14, 9, 12, 16, 31, 14, 15, 14, 13, 19, 8, 10, 8, 36, 8, 15, 12, 6, 1, 8, 84, 22, 33, 32, 44, 11, 6, 17, 4, 5, 7, 2, 25, 5, 5, 15, 2, 13, 3, 1, 37, 12, 3, 9, 4]
d_nbStations = 8 # number of workstations
_, d_PrecedenceTasks = DataGenerator.loadData('data/KILBRID.IN2')
# Parameters
d_nb_pop = 50
d_max_it = 50
# ----------------------------------------------------------------------------
class RobustBalancingSolution:
def __init__(self, TaskTimeMin, TaskTimeMax, nbStations, PrecedenceTasks, taskSequence=None):
# raw data
if taskSequence is None:
taskSequence = []
self.TaskSequence = taskSequence
self.TaskTimeMin = TaskTimeMin
self.TaskTimeMax = TaskTimeMax
self.nbStations = nbStations
self.PrecedenceTasks = PrecedenceTasks
# objects
self.CycleTime = -1
self.TaskAssignment = []
self.WorkLoad = []
self.MaxRegret = -1
def randomSequence(self):
# encoding
self.TaskSequence = []
M = copy.deepcopy(self.PrecedenceTasks)
for _ in range(len(M)):
choices = []
for i in range(len(M)):
if not M[i] and i not in self.TaskSequence:
choices.append(i)
nxtask = random.choice(choices)
self.TaskSequence.append(nxtask)
for j in M:
if nxtask + 1 in j:
j.remove(nxtask + 1)
def decoding(self):
# decoding
TaskSequence = self.TaskSequence
nbStations = self.nbStations
TimeList = self.TaskTimeMin
init_cycletime, cycletime = max(sum(TimeList) // self.nbStations, max(TimeList)), sum(TimeList)
while cycletime > init_cycletime:
task_unassigned = TaskSequence[:]
workload, potential_workload = [], []
station_num = 0
task_to_station = []
while station_num < nbStations and task_unassigned:
currTime, currTasks = 0, []
while task_unassigned and currTime + TimeList[task_unassigned[0]] <= init_cycletime:
currTime += TimeList[task_unassigned[0]]
currTasks.append(task_unassigned.pop(0))
workload.append(currTime)
task_to_station.append(currTasks)
station_num += 1
if task_unassigned:
workload[-1] += sum(TimeList[_] for _ in task_unassigned)
task_to_station[-1] += task_unassigned
for m in range(len(workload) - 1):
potential_task = task_to_station[m + 1][0]
potential_workload.append(workload[m] + TimeList[potential_task])
cycletime, init_cycletime = max(workload), min(potential_workload)
self.CycleTime = cycletime
self.TaskAssignment = task_to_station
self.WorkLoad = workload
def evaluateMaxRegret(self):
# find worst-case scenario of solution
max_regret = 0
for station in range(len(self.TaskAssignment)):
s = dict()
# 1.calculate task time list
task_time = self.TaskTimeMin[:]
for _ in self.TaskAssignment[station]:
task_time[_] = self.TaskTimeMax[_]
# 2.calculate optimal cycle time
# OptimalCT = ALBP_Model(task_time, self.nbStations, self.PrecedenceTasks).objective_value
sol, _ = WOAforALBP(task_time, self.nbStations, self.PrecedenceTasks, 30, 30)
OptimalCT = sol.CycleTime
# 3.calculate cycle time of solution
CT = max(sum(task_time[task] for task in self.TaskAssignment[k]) for k in range(len(self.TaskAssignment)))
# 4.calculate regret
Regret = CT - OptimalCT
# find worst-case scenario: s
if Regret > max_regret:
max_regret = Regret
self.MaxRegret = max_regret
def printSolution(self):
print("*************************** Solution ***************************")
print("Assignment of Tasks to Workstations:")
for station in range(len(self.TaskAssignment)):
print('\tWorkstation%d: Task' % (station + 1), end=" ")
for i in self.TaskAssignment[station]:
print(i + 1, end=" ")
print()
print('Max regret value =', self.MaxRegret)
# ----------------------------------------------------------------------------
class RobustBalancingPopulation:
def __init__(self, TaskTimeMin, TaskTimeMax, nbStations, PrecedenceTasks, nbPop):
self.nbPop = nbPop
self.population = []
for _ in range(nbPop):
sol = RobustBalancingSolution(TaskTimeMin, TaskTimeMax, nbStations, PrecedenceTasks)
sol.randomSequence()
sol.decoding()
sol.evaluateMaxRegret()
self.population.append(sol)
def bestSolution(self):
best, min_MaxRegret = self.population[0], self.population[0].MaxRegret
for _ in self.population:
if _.MaxRegret < min_MaxRegret:
best, min_MaxRegret = _, _.MaxRegret
return best
# ----------------------------------------------------------------------------
def WOAforMMRALBP(TaskTimeMin, TaskTimeMax, nbStations, PrecedenceTasks, nbWhales, maxIter, opt=None):
start = time.process_time()
# initialization
P = RobustBalancingPopulation(TaskTimeMin, TaskTimeMax, nbStations, PrecedenceTasks, nbWhales)
bestSol = P.bestSolution()
# iteration
it = 1
while it <= maxIter:
for sol in P.population:
if random.random() < 0.5:
y = bestSol if random.random() < 0.5 else random.sample(P.population, 1)[0]
sol.TaskSequence = ShrinkingEncircling(sol.TaskSequence, y.TaskSequence, 4)
else:
sol.TaskSequence = SpiralUpdating(sol.TaskSequence, bestSol.TaskSequence, 4)
sol.decoding()
bestSol = P.bestSolution()
if opt and bestSol.MaxRegret <= opt: break
# print('Cycle time =', bestSol.CycleTime)
it += 1
end = time.process_time()
# print("CPU time of WOA for MMRALBP: %.3fs" % (end - start))
return bestSol, end - start
if __name__ == "__main__":
bestSol = WOAforMMRALBP(d_TaskTimeMin, d_TaskTimeMax, d_nbStations, d_PrecedenceTasks, d_nb_pop, d_max_it)
# print solution
bestSol.printSolution()
|
from __future__ import print_function, unicode_literals
import os
import tempfile
import boto3
import json
import logging
from glob import glob
from shutil import copyfile
from libraries.aws_tools.s3_handler import S3Handler
from libraries.general_tools.file_utils import write_file, remove_tree
from libraries.door43_tools.templaters import do_template
from datetime import datetime, timedelta
class ProjectDeployer(object):
"""
Deploys a project's revision to the door43.org bucket
Read from the project's user dir in the cdn.door43.org bucket
by applying the door43.org template to the raw html files
"""
def __init__(self, cdn_bucket, door43_bucket):
"""
:param string cdn_bucket:
:param string door43_bucket:
"""
self.cdn_bucket = cdn_bucket
self.door43_bucket = door43_bucket
self.cdn_handler = None
self.door43_handler = None
self.lambda_client = None
self.logger = logging.getLogger()
self.setup_resources()
self.temp_dir = tempfile.mkdtemp(suffix="", prefix="deployer_")
def setup_resources(self):
self.cdn_handler = S3Handler(self.cdn_bucket)
self.door43_handler = S3Handler(self.door43_bucket)
self.lambda_client = boto3.client('lambda', region_name='us-west-2')
def deploy_revision_to_door43(self, build_log_key):
"""
Deploys a single revision of a project to door43.org
:param string build_log_key:
:return bool:
"""
build_log = None
try:
build_log = self.cdn_handler.get_json(build_log_key)
except:
pass
if not build_log or 'commit_id' not in build_log or 'repo_owner' not in build_log or 'repo_name' not in build_log:
return False
self.logger.debug("Deploying, build log: " + json.dumps(build_log))
user = build_log['repo_owner']
repo_name = build_log['repo_name']
commit_id = build_log['commit_id'][:10]
s3_commit_key = 'u/{0}/{1}/{2}'.format(user, repo_name, commit_id)
s3_repo_key = 'u/{0}/{1}'.format(user, repo_name)
source_dir = tempfile.mkdtemp(prefix='source_', dir=self.temp_dir)
output_dir = tempfile.mkdtemp(prefix='output_', dir=self.temp_dir)
template_dir = tempfile.mkdtemp(prefix='template_', dir=self.temp_dir)
self.cdn_handler.download_dir(s3_commit_key, source_dir)
source_dir = os.path.join(source_dir, s3_commit_key)
resource_type = build_log['resource_type']
template_key = 'templates/project-page.html'
template_file = os.path.join(template_dir, 'project-page.html')
self.logger.debug("Downloading {0} to {1}...".format(template_key, template_file))
self.door43_handler.download_file(template_key, template_file)
html_files = sorted(glob(os.path.join(source_dir, '*.html')))
if len(html_files) < 1:
content = ''
if len(build_log['errors']) > 0:
content += """
<div style="text-align:center;margin-bottom:20px">
<i class="fa fa-times-circle-o" style="font-size: 250px;font-weight: 300;color: red"></i>
<br/>
<h2>Critical!</h2>
<h3>Here is what went wrong with this build:</h3>
</div>
"""
content += '<div><ul><li>' + '</li><li>'.join(build_log['errors']) + '</li></ul></div>'
elif len(build_log['warnings']) > 0:
content += """
<div style="text-align:center;margin-bottom:20px">
<i class="fa fa-exclamation-circle" style="font-size: 250px;font-weight: 300;color: yellow"></i>
<br/>
<h2>Warning!</h2>
<h3>Here are some problems with this build:</h3>
</div>
"""
content += '<ul><li>' + '</li><li>'.join(build_log['warnings']) + '</li></ul>'
else:
content += '<h1 class="conversion-requested">{0}</h1>'.format(build_log['message'])
content += '<p><i>No content is available to show for {0} yet.</i></p>'.format(repo_name)
content += """
<script type="text/javascript">setTimeout(function(){window.location.reload(1);}, 10000);</script>
"""
html = """
<html lang="en">
<head>
<title>{0}</title>
</head>
<body>
<div id="content">{1}</div>
</body>
</html>""".format(repo_name, content)
repo_index_file = os.path.join(source_dir, 'index.html')
write_file(repo_index_file, html)
# merge the source files with the template
do_template(resource_type, source_dir, output_dir, template_file)
# Copy first HTML file to index.html if index.html doesn't exist
html_files = sorted(glob(os.path.join(output_dir, '*.html')))
if len(html_files) > 0:
index_file = os.path.join(output_dir, 'index.html')
if not os.path.isfile(index_file):
copyfile(os.path.join(output_dir, html_files[0]), index_file)
# Copy all other files over that don't already exist in output_dir, like css files
for filename in sorted(glob(os.path.join(source_dir, '*'))):
output_file = os.path.join(output_dir, os.path.basename(filename))
if not os.path.exists(output_file) and not os.path.isdir(filename):
copyfile(filename, output_file)
# Upload all files to the door43.org bucket
for root, dirs, files in os.walk(output_dir):
for f in sorted(files):
path = os.path.join(root, f)
if os.path.isdir(path):
continue
key = s3_commit_key + path.replace(output_dir, '')
self.logger.debug("Uploading {0} to {1}".format(path, key))
self.door43_handler.upload_file(path, key, 0)
# Now we place json files and make an index.html file for the whole repo
try:
self.door43_handler.copy(from_key='{0}/project.json'.format(s3_repo_key), from_bucket=self.cdn_bucket)
self.door43_handler.copy(from_key='{0}/manifest.json'.format(s3_commit_key), to_key='{0}/manifest.json'.format(s3_repo_key))
self.door43_handler.redirect(s3_repo_key, '/' + s3_commit_key)
self.door43_handler.redirect(s3_repo_key + '/index.html', '/' + s3_commit_key)
except Exception:
pass
remove_tree(self.temp_dir) # cleanup temp files
return True
def redeploy_all_projects(self, deploy_function):
i = 0
one_day_ago = datetime.utcnow() - timedelta(hours=24)
for obj in self.cdn_handler.get_objects(prefix='u/', suffix='build_log.json'):
i += 1
last_modified = obj.last_modified.replace(tzinfo=None)
if one_day_ago <= last_modified:
continue
self.lambda_client.invoke(
FunctionName=deploy_function,
InvocationType='Event',
LogType='Tail',
Payload=json.dumps({
'cdn_bucket': self.cdn_bucket,
'build_log_key': obj.key
})
)
return True
|
#!/usr/bin/env python
import sys
from time import sleep
from random import choice
class Game(object):
'''
A game class for 'Rock', 'Paper', 'Scissors'.
Will play a nominal amount of games, and then exit.
To start playing create an instance, game = Game().
Then simply start playing with game.play().
Keyword arguments
:games: the amount of games to play, (default 1)
'''
def __init__(self, games=1):
self.options = ['rock', 'paper', 'scissors']
self.game_cycles = games
self.wait = 1
def play(self, chosen=None):
'''
Plays the game, expects input from user.
To bypass user input, use chosen arg
All inputs are validated against self.options
Keyword arguments
:chosen: the hand you wish to play eg 'rock', (default=None)
'''
if chosen:
chosen = self.validate_input(chosen, self.options)
while not chosen:
msg = 'Please type Rock, Paper, or Scissors, then press enter: '
chosen = raw_input(msg)
chosen = self.validate_input(chosen, self.options)
c = choice(self.options)
print 'Player: %s vs Computer: %s' %(chosen, c)
sleep(self.wait)
plyr = self.options.index(chosen)
comp = self.options.index(c)
result = self.decide(plyr=plyr, comp=comp)
sleep(self.wait)
self.game_cycles -= 1
self.play_again()
def validate_input(self, input_, options):
'''
Validate input, test if it matches any of the options
White space and case do not affect validation
'''
i = input_.lower().strip()
if i not in options:
print 'Sorry, "%s" is not a valid choice.\n' %(input_)
return None
return i
def decide(self, plyr, comp):
if (plyr - 1) == comp or plyr == (comp - 2):
print 'Congratulations you won!'
return 1
elif (comp -1) == plyr or comp == (plyr - 2):
print "Bad luck, you lost"
return 0
else:
print 'A draw!'
return 2
def play_again(self):
options = ['y', 'n']
if self.game_cycles < 1:
self.end()
else:
r = False
while not r:
msg = 'You have %s game(s) remaining, play again "Y" or "N"? '\
%(self.game_cycles)
r = raw_input(msg)
r = self.validate_input(r, options)
if r == 'y':
self.play()
else:
self.end()
def end(self):
print 'Thanks for playing, good bye'
sys.exit()
if __name__ == '__main__':
number_games = 1
if len(sys.argv) > 1:
number_games = sys.argv[-1]
game = Game(int(number_games))
game.play()
|
# Copyright 2020 Alexander Polishchuk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
import obm
EXTRAS_REQUIRE = {
"tests": ["pytest", "python-dotenv", "pytest-xdist"],
"lint": ["pylint", "mypy"],
"docs": ["sphinx>=2.4,<3", "sphinx-rtd-theme"],
"deploy": ["twine"],
"dev": ["tox", "rope"],
}
EXTRAS_REQUIRE["dev"] += (
EXTRAS_REQUIRE["tests"]
+ EXTRAS_REQUIRE["lint"]
+ EXTRAS_REQUIRE["docs"]
+ EXTRAS_REQUIRE["deploy"]
)
def read(file_name):
with open(file_name) as f:
content = f.read()
return content
setuptools.setup(
name="obm",
version=obm.__version__,
packages=setuptools.find_packages(exclude=["tests*"]),
install_requires=["aiohttp>=3.6,<4", "web3>=5.7,<6", "marshmallow>=3.5,<4"],
extras_require=EXTRAS_REQUIRE,
license="Apache License 2.0",
description="Async blockchain nodes interacting tool with ORM-like api.",
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/madnesspie/obm",
author="Alexander Polishchuk",
author_email="apolishchuk52@gmail.com",
classifiers=[
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
],
)
|
from django.contrib import admin
from .models import *
# Register your models here.
class EducationAdmin(admin.ModelAdmin):
empty_value_display = '-empty-'
admin.site.register(Education, EducationAdmin)
class StatusAdmin(admin.ModelAdmin):
empty_value_display = '-empty-'
admin.site.register(Status, StatusAdmin)
class CategoryAdmin(admin.ModelAdmin):
empty_value_display = '-empty-'
admin.site.register(EducationCategory, CategoryAdmin)
class TaskAdmin(admin.ModelAdmin):
empty_value_display = '-empty-'
admin.site.register(Task, TaskAdmin)
|
#!python
"""
It turns out that 12 cm is the smallest length of wire that can be bent to form an integer sided right angle triangle in exactly one way, but there are many more examples.
12 cm: (3,4,5)
24 cm: (6,8,10)
30 cm: (5,12,13)
36 cm: (9,12,15)
40 cm: (8,15,17)
48 cm: (12,16,20)
In contrast, some lengths of wire, like 20 cm, cannot be bent to form an integer sided right angle triangle, and other lengths allow more than one solution to be found; for example, using 120 cm it is possible to form exactly three different integer sided right angle triangles.
120 cm: (30,40,50), (20,48,52), (24,45,51)
Given that L is the length of the wire, for how many values of L โค 1,500,000 can exactly one integer sided right angle triangle be formed?
"""
from math import floor
def is_triple(a,b,c):
return a*a+b*b==c*c
def generate_triples(p):
for c in range(1,p):
for a in range(1,c):
b = p-c-a
if a>b:
continue
if is_triple(a,b,c):
yield [a,b,c]
def has_one_triple(p):
count = 0
for t in generate_triples(p):
count +=1
if count > 1:
return False
if count==1:
return True
return False
def triple_sieve(p):
s=[True for i in range(p+1)]
s[0]= False
for i in range(1,p+1):
if s[i]:
if has_one_triple(i):
print(i)
for j in range(i+i, p+1, i):
s[j]=False
else:
s[i]==False
from fractions import gcd
from functools import lru_cache
from math import sqrt
def prime_factors(n):
while n>1:
i=2
while i<=n:
if n%i==0:
yield i
n = int(n/i)
break
i += 1
def distinct_prime_factors(n):
return set(prime_factors(n))
def lesser_rel_prime_generator(x):
l = [True for i in range(x)]
l[0] = False
for p in distinct_prime_factors(x):
for i in range(p+p, x, p):
l[i]=False
for i in range(x):
if l[i]:
yield i
def generate_primative_triples(limit):
results = [0 for i in range(limit+1)]
count = 0
for m in range(2,int(sqrt(limit/2))):
for n in range(1,m):
if(m-n)%2==1:
if gcd(m,n)==1:
a = m*m-n*n
b = 2*m*n
c = m*m+n*n
p = a+b+c
while p <= limit:
results[p] += 1
if results[p] == 1:
count += 1
if results[p] == 2:
count -= 1
p += (a+b+c)
print(count)
generate_primative_triples(1500000) |
"""Math module is simple math module to test student ability to do simple math"""
def add(n_1, n_2):
return n_1 + n_2
def multiply(n_1, n_2):
return n_1 * n_2 |
inp = input().split()
n, m = int(inp[0]), int(inp[1])
mat = [[0] * m for i in range(n)]
#d = [[0] * m for i in range(n)]
for i in range(n):
inp = list(map(int, input().split()))
for j in range(m):
mat[i][j] = int(inp[j])
t = mat[0][0]
for i in range(1, m):
mat[0][i] += mat[0][i - 1]
for i in range(1, n):
mat[i][0] += mat[i - 1][0]
for i in range(n - 1):
for j in range(m - 1):
mat[i + 1][j + 1] = max(mat[i + 1][j], mat[i][j + 1]) + mat[i + 1][j + 1]
print(mat[n - 1][m - 1])
#ASMR?
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os, sys
from os import walk
import logging
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4 import uic
from view.calculation_session_layout import CalculationSessionLayout
from view.field_session_layout import FieldSessionLayout
from view.comparation_session_layout import ComparationSessionLayout
from utils.dialog_box import DialogBox
from utils.input_dialog_box import InputDialogBox
from matlab_interface.matlab_interface import MatlabInterface
# Cargo mi diseรฑo de aplicacion principal
main_window = uic.loadUiType("designer/main_application.ui")[0]
class MainWindow(QMainWindow, main_window):
def __init__(self, parent = None):
QMainWindow.__init__(self, parent)
self.setupUi(self)
# Inicializo IDs de sesion
# TODO: Levantarlos de disco!
self.calculation_id = 1
self.field_id = 1
# Creo el objeto correspondiente a la pantalla de Comparaciones
self.comparation_view = ComparationSessionLayout(self.matplot_container, self.toolbar_container, self.saved_calculation_list, self.saved_calculation_info)
# Le agrego funcionalidad a los botones que abren, cargan y cierran sesiones de calculo y campo
self.create_new_calculation_button.clicked.connect(self.create_new_calculation_session)
self.open_saved_calculation_button.clicked.connect(self.open_saved_calculation_session)
self.delete_calculation_button.clicked.connect(self.delete_current_calculation_session)
self.save_calculation_button.clicked.connect(self.save_current_calculation_session)
self.create_field_calculation_button.clicked.connect(self.create_new_field_session)
self.open_saved_field_calculation.clicked.connect(self.open_saved_field_session)
self.save_field_calculation_button.clicked.connect(self.save_current_field_session)
self.delete_field_calculation_button.clicked.connect(self.delete_current_field_session)
self.update_saved_sessions_button.clicked.connect(self.update_saved_sessions)
def create_new_calculation_session(self):
self.calculation_session_layout = CalculationSessionLayout(self.calculation_id)
last_index = self.calculation_session_container.addTab(self.calculation_session_layout, "NuevaSesionCalculo" + str(self.calculation_id))
self.calculation_session_container.setCurrentIndex(last_index)
self.calculation_id += 1
def open_saved_calculation_session(self):
# Obtengo todos los archivos guardados
files = []
# TODO: Deshardcodear directorio de sesiones guardadas
for (dirpath, dirnames, filenames) in walk(os.getcwd() + "/save_sessions/calculation/"):
files.extend(filenames)
break
logging.debug("Los archivos guardados hallados son: " + str(files))
# Muestro un pop-up con una lista de archivos guardados
session_name = InputDialogBox.show_item_input_dialog_box(self, "Sesiones guardadas", "Elija una de las sesiones guardadas:", files)
if session_name is not None:
# Construyo el nombre completo del archivo
filename = os.getcwd() + "/save_sessions/calculation/" + session_name
# Obtengo el elegido por el user y agrego un Tab con una nueva session layout
self.calculation_session_layout = CalculationSessionLayout(self.calculation_id, filename)
last_index = self.calculation_session_container.addTab(self.calculation_session_layout, session_name)
self.calculation_session_container.setCurrentIndex(last_index)
self.calculation_id += 1
def save_current_calculation_session(self):
filename = InputDialogBox.show_text_input_dialog_box(self, "Guardar sesiรณn de cรกlculo", "Ingrese el nombre de la sesiรณn de cรกlculo: ")
logging.debug("Se guardarรก la sesiรณn de cรกlculo en el archivo: " + str(filename))
# TODO: Deshardcodear directorio de sesiones guardadas
file_handler = open(os.getcwd() + "/save_sessions/calculation/" + filename, 'w')
current_session = self.calculation_session_container.currentWidget()
if current_session.save_calculation_session(file_handler):
logging.error("Sesiรณn guardada con รฉxito!")
# TODO: Modificar titulo del tab!
file_handler.close()
DialogBox.show_dialog_box(QMessageBox.Information, "Guardar sesiรณn", "Se ha guardado la sesiรณn satisfactoriamente")
else:
logging.error("La sesiรณn no ha podido guardarse!")
os.unlink(file_handler.name)
DialogBox.show_dialog_box(QMessageBox.Critical, "Guardar sesiรณn", "No se ha podido guardar la sesiรณn")
def delete_current_calculation_session(self):
# TODO: Cartel de ยฟEsta seguro?
self.calculation_session_container.removeTab(self.calculation_session_container.currentIndex())
def create_new_field_session(self):
self.field_session_layout = FieldSessionLayout(self.field_id)
last_index = self.field_session_container.addTab(self.field_session_layout, "NuevaSesionCampo" + str(self.field_id))
self.field_session_container.setCurrentIndex(last_index)
self.field_id += 1
def open_saved_field_session(self):
# Obtengo todos los archivos guardados
files = []
# TODO: Deshardcodear directorio de sesiones guardadas
for (dirpath, dirnames, filenames) in walk(os.getcwd() + "/save_sessions/field/"):
files.extend(filenames)
break
logging.debug("Los archivos guardados hallados son: " + str(files))
# Muestro un pop-up con una lista de archivos guardados
session_name = InputDialogBox.show_item_input_dialog_box(self, "Sesiones guardadas", "Elija una de las sesiones guardadas:", files)
if session_name is not None:
# Construyo el nombre completo del archivo
filename = os.getcwd() + "/save_sessions/field/" + session_name
# Obtengo el elegido por el user y agrego un Tab con una nueva session layout
self.field_session_layout = FieldSessionLayout(self.field_id, filename)
last_index = self.field_session_container.addTab(self.field_session_layout, session_name)
self.field_session_container.setCurrentIndex(last_index)
self.field_id += 1
def save_current_field_session(self):
filename = InputDialogBox.show_text_input_dialog_box(self, "Guardar sesiรณn de campo", "Ingrese el nombre de la sesiรณn de campo: ")
logging.debug("Se guardarรก la sesiรณn de campo en el archivo: " + str(filename))
# TODO: Deshardcodear directorio de sesiones guardadas
file_handler = open(os.getcwd() + "/save_sessions/field/" + filename, 'w')
current_session = self.field_session_container.currentWidget()
if current_session.save_field_session(file_handler):
logging.error("Sesiรณn guardada con รฉxito!")
# TODO: Modificar titulo del tab!
file_handler.close()
DialogBox.show_dialog_box(QMessageBox.Information, "Guardar sesiรณn", "Se ha guardado la sesiรณn satisfactoriamente")
else:
logging.error("La sesiรณn no ha podido guardarse!")
os.unlink(file_handler.name)
DialogBox.show_dialog_box(QMessageBox.Critical, "Guardar sesiรณn", "No se ha podido guardar la sesiรณn")
def delete_current_field_session(self):
# TODO: Cartel de ยฟEsta seguro?
self.field_session_container.removeTab(self.field_session_container.currentIndex())
def update_saved_sessions(self):
self.comparation_view.remove_graphics()
self.comparation_view.initialize_saved_sessions()
if __name__ == '__main__':
app = QApplication(sys.argv)
logging.basicConfig(level=logging.DEBUG)
splash_pix = QPixmap('loading2.png')
splash = QSplashScreen(splash_pix, Qt.WindowStaysOnTopHint)
splash.show()
# Llamo a la interfaz para ir creando el objeto
try:
matlab_interface = MatlabInterface()
except Exception:
logging.error("Finalizando...")
splash.close()
sys.exit()
def login():
splash.close()
global main_window
main_window = MainWindow()
main_window.showMaximized()
QTimer.singleShot(10000, login)
sys.exit(app.exec_())
|
import sqlalchemy
from sqlalchemy import orm
from .db_session import SqlAlchemyBase
class Chapter(SqlAlchemyBase):
__tablename__ = 'chapters'
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True, autoincrement=True)
num = sqlalchemy.Column(sqlalchemy.Integer)
title = sqlalchemy.Column(sqlalchemy.String)
content = sqlalchemy.Column(sqlalchemy.String)
course_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey("courses.id"))
html = sqlalchemy.Column(sqlalchemy.String)
is_test = sqlalchemy.Column(sqlalchemy.Boolean)
test_json = sqlalchemy.Column(sqlalchemy.String, nullable=True)
course = orm.relation('Course')
|
#!/usr/bin/python
import fnmatch
import argparse
import re
import sys
import os
import shutil
import pdb
from tools import line_yielder
parser = argparse.ArgumentParser(
description= "This program can recursively copy the header files "
+ "that are not in the target directory to the target directory "
+ "from a source directory. This program is written for resolve"
+ "the dependency issue for seqan library used in Adaptor_trimmer"
+ "software. This program has the same function has boost bcp." +
"Example usage: python copy_depency.py ~/software/Cplusplus_libs/seqan-trunk/core/include/seqan/ SeqAn1.3/seqan/")
parser.add_argument('source_dir',
help = "source directory, from which we copy the file to the"
+ "target directory")
parser.add_argument('target_dir',
help = "source directory, from which we copy the file to the"
+ "target directory")
parser.add_argument('--pattern', '-p', nargs = '*', default = ["*.h"],
help = "the file pattern that which we used to glob the files")
args = parser.parse_args()
o = sys.stdout
e = sys.stderr
#counter = 0
def write_list(list, handle = e, sep = "\n"):
handle.write(sep.join(list) + "\n")
def get_files(dir, pattern_list):
lst = set()
for path, dirs, files in os.walk(dir):
if not path.endswith('/'):
path += '/'
files = [path + i for i in files]
matched_files = [i for i in files for pattern in pattern_list if fnmatch.fnmatch(i, pattern)]
for i in matched_files:
lst.add(i)
return lst
pat_regex = re.compile('#include\s*<\s*(seqan/\S*)\s*>')
def catch_header_from_file(file, c_set):
for line in line_yielder(file):
if not line.startswith('#include'):
continue
else:
m = pat_regex.search(line)
if m:
c_set.add(m.group(1))
# if "modifier.h" in line:
# pdb.set_trace()
def remove_duplicate_parts(dir, file_name):
paths = dir.rstrip('/').split('/')
path2 = file_name.split('/')
for i, j in zip(paths[::-1], path2):
if i == j:
paths.remove(i)
if i != j:
break
paths += path2
str = "/".join(paths)
# pdb.set_trace()
return str
def check_existence(files):
existed = []
not_existed = []
for i in files:
if os.path.exists(i):
existed.append(i)
else:
not_existed.append(i)
# if len(existed) != len(files):
# e.write("existed: %d not_existed %d\n" % (len(existed), len(not_existed)))
# e.write("below file was not found:\n")
# write_list(not_existed)
def get_all_path_valid_headers(source_dir, headers_set):
s = set()
for header in headers_set:
s.add(remove_duplicate_parts(source_dir, header))
return s
""" This function make sure a directory is created if its not existed
"""
def check_multiLevel_dir_existense(dir, target_dir):
paths = os.path.dirname(dir).split('/') # discard the basename
for i in paths:
if os.path.exists(os.path.join(target_dir, i)):
continue
else:
os.mkdir(os.path.join(target_dir, i))
def files_not_existed_in_targetdir(file, dir, lst, pat = 'seqan'):
target_dir = os.path.join(os.getcwd(), dir)
path_source = file.split('/')
path_source_sliced = '/'.join(path_source[path_source.index(pat) + 1:])
valid_path_in_target_file = os.path.join(target_dir, path_source_sliced)
# pdb.set_trace()
if not os.path.exists(valid_path_in_target_file):
lst.add(file)
def copy_file_to_dir(file, dir, pat = 'seqan'):
target_dir = os.path.join(os.getcwd(), dir)
path_source = file.split('/')
path_source_sliced = '/'.join(path_source[path_source.index(pat) + 1:])
valid_path_in_target_file = os.path.join(target_dir, path_source_sliced)
not_copied = set()
if not os.path.exists(valid_path_in_target_file):
check_multiLevel_dir_existense(path_source_sliced, target_dir)
try:
shutil.copy(file, os.path.dirname(valid_path_in_target_file))
o.write("copied %s to %s\n" % (file, valid_path_in_target_file))
except:
e.write("copy file %s failed\n" % (file))
not_copied.add(file)
pass
return not_copied
class Counter:
i = 0
counter = Counter()
def get_not_existed_files():
counter.i += 1
files = get_files(args.target_dir, args.pattern)
all_headers_set = set()
for file in files:
catch_header_from_file(file, all_headers_set)
# pdb.set_trace()
all_path_valid_headers = get_all_path_valid_headers(args.source_dir, all_headers_set)
check_existence(all_path_valid_headers)
files_not_existed_inTarget = set()
for file in all_path_valid_headers:
files_not_existed_in_targetdir(file, args.target_dir, files_not_existed_inTarget)
e.write("#%d round: below %d files are not in target dir: %s\n" %
(counter.i, len(files_not_existed_inTarget), args.target_dir))
write_list(files_not_existed_inTarget)
# pdb.set_trace()
return files_not_existed_inTarget
def process(files_not_existed_inTarget):
tobedelete = set()
for file in files_not_existed_inTarget:
if len(copy_file_to_dir(file, args.target_dir)) == 0:
tobedelete.add(file)
for file in tobedelete:
files_not_existed_inTarget.remove(file)
new_files_not_existed_inTarget = get_not_existed_files()
return new_files_not_existed_inTarget
if __name__ == '__main__':
# files = get_files(args.target_dir, args.pattern)
# all_headers_set = set()
# for file in files:
# catch_header_from_file(file, all_headers_set)
## pdb.set_trace()
# all_path_valid_headers = get_all_path_valid_headers(args.source_dir, all_headers_set)
# check_existence(all_path_valid_headers)
# files_not_existed_inTarget = set()
# for file in all_path_valid_headers:
# files_not_existed_in_targetdir(file, args.target_dir, files_not_existed_inTarget)
# e.write("First round: below %d files are not in target dir: %s\n" %
# (len(files_not_existed_inTarget), args.target_dir))
# write_list(files_not_existed_inTarget)
files_not_existed_inTarget = get_not_existed_files()
new_files_not_existed_inTarget = process(files_not_existed_inTarget)
while True:
if len(new_files_not_existed_inTarget) == 0:
e.write("All files have been found and copied\n")
break;
elif files_not_existed_inTarget == new_files_not_existed_inTarget:
e.write('*' * 79 + '\n' + "Below file cannot be find in the source dir: %s\n" % (args.source_dir)+
"Are you sure the source directory is the correct direcotry to use?\n")
write_list(files_not_existed_inTarget)
e.write('*' * 79 + '\n')
break
files_not_existed_inTarget = new_files_not_existed_inTarget
new_files_not_existed_inTarget = process(new_files_not_existed_inTarget)
|
"""This script uses de poker.py module and the Montecarlo methods to calculate
the probability of the different hands of poker.
"""
import poker
from tqdm import tqdm
hands = ['highest card', 'pair', 'two pair', 'three of a kind', 'straight',
'flush', 'full house', 'four of a kind', 'straight flush']
n = input('Number of simulations?\t')
try:
n=int(n)
except:
print('Value not valid')
exit(1)
with open('montecarlo-results.txt', 'w') as fout:
results = [0, 0, 0, 0, 0, 0, 0, 0, 0]
print('Running simulations')
for i in tqdm(range(n)):
deck = poker.Deck()
deck.shuffle()
player = poker.Hand('Player')
table = poker.Hand('Table')
deck.move_cards(player, 2)
deck.move_cards(table, 5)
results[player.best_hand(table)[0]] += 1
for i in range(9):
if i == 1 or i == 5:
fout.write(hands[i] + '\t\t' + '{0:.7f}'.format(results[i]/n) + '\n')
else:
fout.write(hands[i] + '\t' + '{0:.7f}'.format(results[i]/n) + '\n') |
from django import forms
from posts.models import Post
from groups.models import Group
class GroupForm(forms.ModelForm):
class Meta:
model = Group
fields = ('name','description')
|
s = input()
end = len(s)
n = 0
for i in range(len(s)):
n += (s[end - i - 1] == '1') << i
m = int(input())
ans = []
for i in range(m):
a = int(input())
if n % a == 0:
ans += [a]
if len(ans) == 0:
print("Nenhum")
else:
ans.sort()
print(*ans) |
import argparse
import os
from time import sleep
import gym
import numpy as np
import tensorflow as tf
from tqdm import tqdm
class PolicyNetwork(tf.keras.Model):
def __init__(self, output_size):
super().__init__()
self.dense1 = tf.keras.layers.Dense(units=20, activation="relu", input_shape=(4,))
self.dense2 = tf.keras.layers.Dense(units=10, activation="relu")
self.dense3 = tf.keras.layers.Dense(units=output_size, activation="softmax")
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.dense3(x)
class Agent:
def __init__(self, learning_rate, discount_factor):
self.learning_rate = learning_rate
self.discount_factor = discount_factor
self.env = gym.make("CartPole-v0")
def _init(self, ckpt_dir):
model = PolicyNetwork(self.env.action_space.n)
optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
step = tf.Variable(0, dtype=tf.int64)
ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer, step=step)
ckpt_manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=1)
ckpt.restore(ckpt_manager.latest_checkpoint)
return model, optimizer, ckpt, ckpt_manager
def test(self, ckpt_dir):
model, optimizer, ckpt, ckpt_manager = self._init(ckpt_dir)
state = self.env.reset()
self.env.render()
done = False
while not done:
sleep(0.005)
action_probs = model(state.reshape(1, -1)).numpy().flatten()
state, reward, done, info = self.env.step(np.argmax(action_probs))
self.env.render()
self.env.close()
def train(self, episodes, ckpt_dir, log_dir):
model, optimizer, ckpt, ckpt_manager = self._init(ckpt_dir)
summary_writer = tf.summary.create_file_writer(log_dir)
with tqdm(total=episodes, desc="Episode", unit="episode") as pbar:
pbar.update(ckpt.step.numpy())
for _ in range(episodes - ckpt.step.numpy()):
states, actions, discounted_rewards, episode_reward = self._sample_episode(model)
loss = self._train_step(states, actions, discounted_rewards, model, optimizer)
ckpt_manager.save()
with summary_writer.as_default():
tf.summary.scalar("reward", episode_reward, step=ckpt.step)
tf.summary.scalar("loss", loss, step=ckpt.step)
ckpt.step.assign_add(1)
pbar.update(1)
@tf.function(experimental_relax_shapes=True)
def _train_step(self, states, actions, discounted_rewards, model, optimizer):
with tf.GradientTape() as tape:
action_probs = model(states)
negative_log_likelihood = tf.math.negative(tf.math.log(tf.gather_nd(action_probs, actions)))
weighted_negative_log_likelihood = tf.multiply(negative_log_likelihood, discounted_rewards)
loss = tf.reduce_sum(weighted_negative_log_likelihood)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
def _sample_episode(self, model):
states, actions, rewards = [], [], []
state = self.env.reset()
done = False
episode_reward = 0
while not done:
action_probs = model(state.reshape(1, -1)).numpy().flatten()
sampled_action = np.random.choice(self.env.action_space.n, p=action_probs)
states.append(state)
state, reward, done, _ = self.env.step(sampled_action)
actions.append(sampled_action)
rewards.append(reward)
episode_reward += reward
self.env.close()
return np.array(states, dtype=np.float32), \
np.array(list(enumerate(actions)), dtype=np.int32), \
np.array(self._discount_rewards(rewards)).astype(dtype=np.float32), episode_reward
def _discount_rewards(self, rewards):
discounted = []
for t in range(len(rewards)):
expected_return, exponent = 0, 0
for r in rewards[t:]:
expected_return += r * (self.discount_factor ** exponent)
exponent += 1
discounted.append(expected_return)
discounted -= np.mean(discounted)
discounted /= np.std(discounted)
return discounted
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=["train", "test"], required=True, help="Train or test the agent?")
parser.add_argument('--ckpt_dir', required=True, help="Name of checkpoint directory")
parser.add_argument('--log_dir', required=True, help="Name of log directory")
args = parser.parse_args()
BASE_DIR = os.path.dirname(__file__)
CKPT_DIR = os.path.join(BASE_DIR, args.ckpt_dir)
LOG_DIR = os.path.join(BASE_DIR, args.log_dir)
agent = Agent(learning_rate=3e-4, discount_factor=0.9)
if args.mode == "train":
agent.train(5000, CKPT_DIR, LOG_DIR)
if args.mode == "test":
agent.test(CKPT_DIR)
|
'''
@author: frank
'''
import sys
import inspect
class IPTableTarget(object):
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def interpret(args):
return None
class AcceptTarget(IPTableTarget):
tag = 'ACCEPT'
def __eq__(self, other):
return isinstance(other, AcceptTarget)
def __str__(self):
return '-j ACCEPT'
@staticmethod
def interpret(xmlobj):
return AcceptTarget()
class DropTarget(IPTableTarget):
tag = 'DROP'
def __eq__(self, other):
return isinstance(other, DropTarget)
def __str__(self):
return '-j DROP'
@staticmethod
def interpret(xmlobj):
return DropTarget()
class MasqueradeTarget(IPTableTarget):
tag = 'MASQUERADE'
def __init__(self):
self.to_ports = None
def get_start_port(self):
if not self.to_ports:
return None
ports = self.to_ports.split('-')
return ports[0]
def get_end_port(self):
if not self.to_ports:
return None
ports = self.to_ports.split('-')
return ports[-1]
def __eq__(self, other):
if not isinstance(other, MasqueradeTarget):
return False
return self.to_ports == other.to_ports
def __str__(self):
s = ['-j MASQUERADE']
if self.to_ports:
s.append('--to-ports')
s.append(self.to_ports)
return ' '.join(s)
@staticmethod
def interpret(xmlobj):
m = MasqueradeTarget()
to_ports = xmlobj.get_child_node('to-ports')
if to_ports:
m.to_ports = to_ports.text_
return m
class RejectTarget(IPTableTarget):
tag = 'REJECT'
ICMP_NET_UNREACHABLE = 'icmp-net-unreachable'
ICMP_HOST_UNREACHABLE = 'icmp-host-unreachable'
ICMP_PORT_UNREACHABLE = 'icmp-port-unreachable'
ICMP_PROTO_UNREACHABLE = 'icmp-proto-unreachable'
ICMP_NET_PROHIBITED = 'icmp-net-prohibited'
ICMP_HOST_PROHIBITED = 'icmp-host-prohibited'
TCP_RESET = 'tcp-reset'
def __init__(self):
self.reject_with = None
def __eq__(self, other):
if not isinstance(other, RejectTarget):
return False
return self.reject_with == other.reject_with
def __str__(self):
s = ['-j REJECT']
if self.reject_with:
s.append('--reject-with')
s.append(self.reject_with)
return ' '.join(s)
@staticmethod
def interpret(xmlobj):
t = RejectTarget()
rw = xmlobj.get_child_node('reject-with')
if rw:
t.reject_with = rw.text_
return t
class ReturnTarget(IPTableTarget):
tag = 'RETURN'
def __eq__(self, other):
return isinstance(other, ReturnTarget)
def __str__(self):
return '-j RETURN'
@staticmethod
def interpret(xmlobj):
t = ReturnTarget()
return t
class CheckSumTarget(IPTableTarget):
tag = 'CHECKSUM'
def __eq__(self, other):
return isinstance(other, CheckSumTarget)
def __str__(self):
return '-j CHECKSUM --checksum-fill'
@staticmethod
def interpret(xmlobj):
return CheckSumTarget()
class SnatTarget(IPTableTarget):
tag = 'SNAT'
def __init__(self):
self.to_source = None
def __eq__(self, other):
if not isinstance(other, SnatTarget):
return False
return self.to_source == other.to_source
@staticmethod
def interpret(xmlobj):
t = SnatTarget()
ts = xmlobj.get_child_node('to-source')
t.to_source = ts.text_
return t
def __str__(self):
return '-j SNAT --to-source %s' % self.to_source
def get_target(tagname):
return _targets_map.get(tagname)
_targets_map = {}
def _build_targets_map():
curr_module = sys.modules[__name__]
global _targets_map
for name, obj in inspect.getmembers(curr_module):
if inspect.isclass(obj) and issubclass(obj, IPTableTarget):
if not hasattr(obj, 'tag'):
continue
_targets_map[obj.tag] = obj
if __name__ != '__main__':
_build_targets_map() |
# Vivek Keshore
# Problem link - http://www.spoj.com/problems/ACPC10A/
while True:
try:
inp = raw_input().split()
except EOFError:
break
x, y, z = int(inp[0]), int(inp[1]), int(inp[2])
if z - y != 0:
if z - y == y - x and z - y:
print 'AP', z + (z-y)
elif z % y == 0 and y % x == 0:
if z / y == y / x:
print 'GP', z * (z/y)
else:
print ''
else:
print ''
else:
print ''
|
with open(".\Intermediate\Day 24\letter_names.txt") as letter_names:
names = letter_names.readlines()
formatted_names = []
for name in names:
formatted_name = name.strip("\n")
formatted_names.append(formatted_name)
for name in formatted_names:
with open(".\Intermediate\Day 24\starting_letter.txt", mode="r") as starting_letter:
data = starting_letter.read()
replaced = data.replace("[name]", name)
with open(f".\Intermediate\Day 24\Final Letters\{name}.txt", mode="w") as final_letter:
final_letter.write(replaced)
|
zmienna = 5 #ustalenie ลผe zmianna bฤdzie zwracaฤ wartoลฤ piฤ
tki (czy moลผna tutaj daฤ liczbฤ z kropkฤ
? czyli np. 5.8?)
calkowita = 7 #to samo co wyลผej, tylko tyczy siฤ jedynie liczb caลkowitych.
rzeczywista = 7.5 #przecinek to tutaj jest kropka.
rzeczywista = float(38) #zamiast sลowa rzeczywista mogฤ daฤ sลowo float i osiฤ
gnฤ to samo. W przypadku liczb calkowitych jest to sลowo int
x = 4.3 #ustalenie wartoลci zmiennej x
print x #wyลwietli mojฤ
zmiennฤ
x. Przy liczbach nie daje siฤ cudzysลowia
print '%.20f' % x #tego nie czajฤ.
print x
napis = 'witaj' #nie ma rรณznicy czy da siฤ jeden cudzysลรณw czy dwa, ale jak siฤ da dwa, to moลผna potem wyลwietliฤ tekst z uลผyciem pojedynczego cudzysลowia, ktรณry siฤ wyลwietli.
napis = "witaj"
napis = "Nie martw sie o 'pojedyncze' cudzyslowy."
jeden = 1
dwa = 2
trzy = jeden + dwa
print trzy #wyลwietli mi wartoลฤ trzy ustalonฤ
wczeลniej jako sumฤ wartoลci jeden i dwa, ktรณre teลผ zostaลy ustalone chwilฤ wczeลniej.
witaj = "witaj"
swiecie = "swiecie"
witajswiecie = witaj + " " + swiecie #wartoลฤ witaj, dodaฤ przerwa (spacja), dodaฤ wartoลฤ swiecie
print witajswiecie #wyลwietli mi witaj ลwiecie :)
a, b = 3, 4 #rozumiem, ลผe jak gdzieล napiszฤ pรณลบniej print a, b to wyลwietli mi 3, 4
#ZADANIE
# zmien ponizszy kod - treลฤ polecenia
napis = None
rzeczywista = None
calkowita = None
# moja robota niลผej.
napis = 'witaj'
rzeczywista = 10.0
calkowita = 20
|
import pandas as pd
import numpy as np
import torch
from torch.optim import lr_scheduler
import torch.optim as optim
from torch.autograd import Variable
import torch.nn as nn
from PIL import Image
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import torchvision.models as models
from torch.utils.data.sampler import BatchSampler
from torch.utils.data import DataLoader
import torchvision.models as models
from torch.utils.data.sampler import SubsetRandomSampler
import os
import torch.nn.functional as F
from skimage import io
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from sklearn.preprocessing import LabelEncoder
from online_triplet_loss.losses import *
from torch.optim.lr_scheduler import StepLR, ExponentialLR
from torch.optim.sgd import SGD
from torch.optim.adam import Adam
from warmup_scheduler import GradualWarmupScheduler
def generate_parts_df(df,path):
#creating a data frame with parts
parts_list=[]
for image_id in df["image"].values:
image_id=image_id.split(".")[0]
parts={"1":"","2":"","3":"","4":"","5":"","6":"","body":""}
for i in range(1,7):
image_name=image_id+"_"+str(i)+"part.jpg"
image_path=path+image_name
if os.path.exists(image_path):
parts[str(i)]=image_name
else:
parts[str(i)]="zeros.jpg"
image_name=image_id+"_body.jpg"
image_path=path+image_name
if os.path.exists(image_path):
parts["body"]=image_name
else:
parts["body"]="zeros.jpg"
parts_list.append(parts)
parts_df=pd.DataFrame(parts_list)
parts_df = parts_df.reset_index(drop=True)
df=pd.concat([df,parts_df],axis=1)
df = df.reset_index(drop=True)
return df
class BalancedBatchSampler(BatchSampler):
"""
BatchSampler - Returns batches of size n_classes * n_samples
"""
def __init__(self, dataset, n_classes, n_samples):
loader = DataLoader(dataset)
self.labels_list = []
for _,_,_,_,_,_,_,_,label in loader:
self.labels_list.append(label)
self.labels = torch.LongTensor(self.labels_list)
self.labels_set = list(set(self.labels.numpy()))
self.label_to_indices = {label: np.where(self.labels.numpy() == label)[0]
for label in self.labels_set}
for l in self.labels_set:
np.random.shuffle(self.label_to_indices[l])
self.used_label_indices_count = {label: 0 for label in self.labels_set}
self.count = 0
self.n_classes = n_classes
self.n_samples = n_samples
self.dataset = dataset
self.batch_size = self.n_samples * self.n_classes
def __iter__(self):
self.count = 0
while self.count + self.batch_size < len(self.dataset):
classes = np.random.choice(self.labels_set, self.n_classes, replace=False)
indices = []
for class_ in classes:
indices.extend(self.label_to_indices[class_][
self.used_label_indices_count[class_]:self.used_label_indices_count[
class_] + self.n_samples])
self.used_label_indices_count[class_] += self.n_samples
if self.used_label_indices_count[class_] + self.n_samples > len(self.label_to_indices[class_]):
np.random.shuffle(self.label_to_indices[class_])
self.used_label_indices_count[class_] = 0
yield indices
self.count += self.n_classes * self.n_samples
def __len__(self):
return len(self.dataset) // self.batch_size
class LearningRateWarmUP(nn.Module):
#learning rate sheduler
def __init__(self, optimizer, warmup_iteration, target_lr, after_scheduler=None):
self.optimizer = optimizer
self.warmup_iteration = warmup_iteration
self.target_lr = target_lr
self.after_scheduler = after_scheduler
def warmup_learning_rate(self, cur_iteration):
warmup_lr = self.target_lr*float(cur_iteration)/float(self.warmup_iteration)
for param_group in self.optimizer.param_groups:
param_group['lr'] = warmup_lr
def step(self, cur_iteration):
if cur_iteration <= self.warmup_iteration:
self.warmup_learning_rate(cur_iteration)
else:
self.after_scheduler.step(cur_iteration-self.warmup_iteration)
|
from .inputmanager import InputManager, keycodes, ClassContext, GlobalContext, input_manager
add_class_context = input_manager.AddClassContext
add_global_context = input_manager.AddGlobalContext
add_action_callback = input_manager.AddActionCallback |
from django.urls import path
from .import views
urlpatterns = [
path('adminpage/sendmail',views.test_email,name='test_email'),
]
|
from pyramid.view import view_config
@view_config(route_name='home', renderer='templates/mytemplate.pt')
def my_view(request):
return {'project':'json_serialize_demo'}
@view_config(route_name="custom_object", renderer="json")
def custom_object(request):
from objects import CustomObject
results = dict(
count=2,
objects=[
CustomObject('Wayne Witzel III', 'wayne@pieceofpy.com'),
CustomObject('Fake Person', 'fake.person@pieceofpy.com'),
],
)
return results
@view_config(route_name="third_party", renderer="json_third_party")
def third_party(request):
from objects import ThirdPartyObject
results = dict(
count=1,
objects=[
ThirdPartyObject(),
],
)
return results
|
#!/usr/bin/env python
# -*-coding: utf-8 -*-
"""
utils
~~~~~
Various utilities (not groovebox specific)
:copyright: (c) 2015 by Mek
:license: see LICENSE for more details.
"""
from datetime import datetime, date
import json
def subdict(d, keys):
"""Create a dictionary containing only `keys`
move to utils
"""
return dict([(k, d[k]) for k in keys])
def time2sec(t):
t = str(t)
if ":" in t:
m, s = t.split(":")
return (60 * int(m)) + int(s)
if "." in t:
return t.split(".")[0]
return t
class DatetimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
return json.JSONEncoder.default(self, obj)
|
import pymongo,random,time
db = pymongo.MongoClient(host='localhost', port=27017)['Falonie']
# collection = db['innotree_็งๅญๆ_filter_duplicates_crawled_result']
collection = db['innotree_ๆ็ฅๆ่ต_filter_duplicates_crawled_result']
collection_filter_duplicates = db['innotree_ๆ็ๆ_filter_duplicates']
for i, j in enumerate(collection.find({}), 1):
print(i,j)
pass
time_sleep = random.choice(list(range(10, 15)))
print(time_sleep)
# time.sleep(time_sleep)
# collection.drop()
# print(random.choice(list(range(10,15)))) |
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.nn.init import kaiming_normal_, xavier_uniform_, constant_
from random import choice
from models.cnn import CNNEncoder
class RNN(nn.Module):
@staticmethod
def generate_params():
params = {}
params['unit_type'] = choice(['GRU'])
params['num_hidden_units'] = choice([32, 64, 128, 256, 512])
params['num_hidden_layers'] = choice([1, 2])
params['dropout'] = choice([0, 0.25, 0.5, 0.75])
return params
def __init__(self, num_input_channels, output_len,
rnn_params):
super(RNN, self).__init__()
self.rnn_params = rnn_params
if rnn_params['unit_type'] == 'LSTM':
RNNUnit = nn.LSTM
elif rnn_params['unit_type'] == 'GRU':
RNNUnit = nn.GRU
else:
RNNUnit = None
self.rnn = RNNUnit(
input_size=num_input_channels,
hidden_size=rnn_params['num_hidden_units'],
num_layers=rnn_params['num_hidden_layers'],
dropout=rnn_params['dropout'],
batch_first=True,
bidirectional=False
)
self.out = nn.Linear(rnn_params['num_hidden_units'], output_len)
self.initialise()
def forward(self, x, internal_state):
x, internal_state = self.rnn(x, internal_state)
x = x[:, -1, :]
x = self.out(x)
return F.log_softmax(x, dim=1), internal_state
def initialise(self):
def weights_init(m):
if isinstance(m, (nn.Linear, nn.Conv1d)):
kaiming_normal_(m.weight.data, mode='fan_in')
constant_(m.bias, 0)
if isinstance(m, (nn.GRU, nn.LSTM)):
for name, param in m.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
self.apply(weights_init)
def get_params(self):
return self.rnn_params
class CRNN(nn.Module):
def __init__(self, num_input_channels, input_len, output_len, encoder_params, rnn_params):
super(CRNN, self).__init__()
self.encoder_params = encoder_params
self.rnn_params = rnn_params
self.encoder = CNNEncoder(
sequence_len=input_len,
num_input_channels=num_input_channels,
**encoder_params
)
if rnn_params['unit_type'] == 'LSTM':
RNNUnit = nn.LSTM
elif rnn_params['unit_type'] == 'GRU':
RNNUnit = nn.GRU
else:
RNNUnit = None
self.rnn = RNNUnit(
input_size=encoder_params['num_feature_maps'],
hidden_size=rnn_params['num_hidden_units'],
num_layers=rnn_params['num_hidden_layers'],
dropout=rnn_params['dropout'],
batch_first=True,
bidirectional=False
)
self.out = nn.Linear(rnn_params['num_hidden_units'], output_len)
self.initialise()
def forward(self, x, internal_state):
x = self.encoder(x)
x = x.transpose(1, 2)
x, internal_state = self.rnn(x, internal_state)
x = x[:, -1, :]
x = self.out(x)
return F.log_softmax(x, dim=1), internal_state
def initialise(self):
def weights_init(m):
if isinstance(m, (nn.Linear, nn.Conv1d)):
kaiming_normal_(m.weight.data, mode='fan_in')
constant_(m.bias, 0)
if isinstance(m, (nn.GRU, nn.LSTM)):
for name, param in m.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
self.apply(weights_init)
def get_params(self):
return {'encoder':self.encoder_params, 'decoder': self.rnn_params}
class BRNN(nn.Module):
@staticmethod
def generate_params():
params = {}
params['unit_type'] = choice(['GRU'])
params['num_hidden_units'] = choice([64, 128, 256, 512])
params['num_hidden_layers'] = choice([1, 2, 3])
params['bidirectional'] = choice([True, False])
params['dropout'] = choice([0, 0.25, 0.5, 0.75])
return params
def __init__(self, output_size, unit_type, num_input_channels, num_hidden_units, dropout, num_hidden_layers,
bidirectional, params_dict):
super(BRNN, self).__init__()
if unit_type == 'LSTM':
self.rnn_unit = nn.LSTM
elif unit_type == 'GRU':
self.rnn_unit = nn.GRU
self.num_feature_maps = num_input_channels
self.num_hidden_units = num_hidden_units
self.dropout = dropout
self.num_hidden_layers = num_hidden_layers
self.bidirectional = bidirectional
self.params_dict = params_dict
self.rnn = self.rnn_unit(
input_size=self.num_feature_maps,
hidden_size=self.num_hidden_units,
num_layers=self.num_hidden_layers,
batch_first=True,
dropout=self.dropout,
bidirectional=True
)
self.sequencer = self.rnn_unit(
input_size=self.num_feature_maps,
hidden_size=self.num_hidden_units,
num_layers=1,
batch_first=True,
dropout=self.dropout,
bidirectional=True
)
self.linear = nn.Linear(self.num_hidden_units, output_size)
self.initialise()
def forward(self, x, internal_state):
x, internal_state = self.rnn(x, internal_state)
x = self.linear(x)
return F.log_softmax(x, dim=1), internal_state
def initialise(self):
def weights_init(m):
if isinstance(m, (nn.Linear, nn.Conv1d)):
kaiming_normal_(m.weight.data, mode='fan_in')
constant_(m.bias, 0)
if isinstance(m, (nn.GRU, nn.LSTM)):
for name, param in m.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
self.apply(weights_init)
def get_params(self):
params = {}
params['num_feature_maps'] = self.num_feature_maps
params['num_hidden_units'] = self.num_hidden_units
params['dropout'] = self.dropout
params['bidirectional'] = self.bidirectional
params['num_hidden_layers'] = self.num_hidden_layers
return params
def get_l_out(l_in, padding, dilation, kernel_size, stride):
return int( (l_in + 2 * padding - dilation * (kernel_size-1) -1) / stride + 1 )
|
#!/usr/bin/env python
"""
setup.py file for SWIG
"""
from setuptools import setup, Extension
import importlib
import subprocess
import sys
# Solve the chicken-and-egg problem of requiring packages *before* the
# script has been parsed.
for package in ['numpy', 'pkgconfig']:
# Don't try to install packages that already exist.
try:
mod = importlib.import_module(package)
continue
except ModuleNotFoundError:
pass
if package in sys.modules:
continue
# Inside a `virtualenv`, we are *not* allowed to install packages as
# a regular user.
if hasattr(sys, 'real_prefix'):
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])
else:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', package])
def get_include_dirs():
import pkgconfig
import numpy
if not pkgconfig.exists('eigen3'):
raise Exception('Missing `eigen3` library. Please install it using the package manager of your operating system')
np_include_dir = numpy.get_include()
# We have to throw away the `-I` part of the `pkgconfig` output
# because it is not part of the include directory.
eigen3_include_dir = pkgconfig.cflags('eigen3')[2:]
return [np_include_dir, eigen3_include_dir]
GKextCPy_module = Extension('_GKextCPy',
sources = ['GKextCPy_wrap.cxx', 'GKextCPy.cpp'],
swig_opts = ['-c++'],
extra_compile_args = ['-std=c++11', '-O3'],
include_dirs = get_include_dirs()
)
setup(name = 'GKextCPy',
version = '0.4.1',
author = 'Elisabetta Ghisu',
description = """Graph Kernels: building the extension Python module. This is a wrapper package from C++ to Python.""",
ext_modules = [GKextCPy_module],
py_modules = ['GKextCPy'],
setup_requires = ['pkgconfig', 'numpy'],
install_requires = ['pkgconfig', 'numpy'],
license = 'ETH Zurich',
)
|
import cv2
import imutils
import numpy as np
from .cv import CVUtils
from .gui import GUIUtils
from .trackbar import (
ColorThreshTrackbar,
GrayThreshTrackbar,
CannyTrackbar,
HoughCircleTrackbar,
HoughLineTrackbar
)
def do_hough_circle(img):
hough = HoughCircleTrackbar(img)
hough.show_image()
GUIUtils.wait()
hough.close_image()
return hough.get_image()[0]
def do_hough_line(img):
hough = HoughLineTrackbar(img)
hough.show_image()
GUIUtils.wait()
hough.close_image()
return hough.get_image()[0]
def do_canny(img):
canny = CannyTrackbar(img)
canny.show_image()
GUIUtils.wait()
canny.close_image()
return canny.get_image()[0]
def do_color_thresh(img):
thresh = ColorThreshTrackbar(img)
thresh.show_image()
GUIUtils.wait()
thresh.close_image()
return thresh.get_image()[0]
def do_gray_thresh(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = GrayThreshTrackbar(img)
thresh.show_image()
GUIUtils.wait()
thresh.close_image()
return thresh.get_image()[0]
def main():
test_img = "images/1536x2048.jpg"
# load the image and resize it
orig = cv2.imread(test_img)
img = imutils.resize(orig, width=1000)
ratio = orig.shape[0] / float(img.shape[0])
# erode
img = cv2.erode(img, np.ones((3, 3), np.uint8))
do_canny(img)
do_color_thresh(img)
do_gray_thresh(img)
# get rid of some colors
# hsv = CVUtils.to_hsv(img)
# white = [0, 0, 0]
# img = CVUtils.replace_range(hsv, img, [0, 0, 8], [14, 172, 85], [14, 172, 85]) # mountains
# img = CVUtils.replace_range(hsv, img, [19, 0, 7], [179, 255, 255], [179, 255, 255]) # greens
# img = CVUtils.replace_range(hsv, img, [10, 100, 175], [40, 255, 255], [40, 255, 255]) # sand
# img = CVUtils.replace_range(hsv, img, [0, 237, 0], [179, 255, 255], [179, 255, 255]) # brick
# thresh = ThresholdedImage(img)
# thresh.show_image()
# GUIUtils.wait()
# thresh.close_image()
# GrayThreshImage(cv2.cvtColor(img, cv2.COLOR_HSV2BGR)).show_image()
# GUIUtils.wait()
# grayscale and threshold
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# thresh = cv2.threshold(gray, 120, 255, cv2.THRESH_BINARY_INV)[1]
# show_image(thresh)
if __name__ == "__main__":
main()
|
import re
from tqdm import tqdm
import csv
import codecs
from helper import *
import hashlib
import json
from googletrans import Translator
translator = Translator()
translator = Translator(service_urls=[
'translate.google.com.tw',
])
filenamelist = getfilenamelist()
md5 = hashlib.md5()
for filename in filenamelist:
newlines = []
with codecs.open(filename+".txt", "r", encoding="utf-8-sig") as file:
s = json.load(file)
for item in s:
K = item['path']
E = item['pairs'][0]['LocalizedString']
C = item['pairs'][-1]['LocalizedString']
R = item['pairs'][1]['LocalizedString']
if E == '':
if C == '':
try:
txt = translator.translate(R, src='ru', dest='zh-CN')
newlines.append([K,R,txt.text])
print (R,txt.text)
except:
pass
else:
newlines.append([K,R,C])
else:
newlines.append([K,E,C])
with codecs.open(filename+".csv", "w", encoding="utf-8-sig") as file:
f_csv = csv.writer(file)
#f_csv.writerow(['MD5','ๅๆ', '็ฟป่ฏ'])
i = 0
for newline in newlines:
i = i + 1
f_csv.writerow(newline)
|
a=1
while True:
for x in range(1000):
for y in range(1000):
if not((x+2*y<a) or (x<a) or (y<a)):
break
else:
continue
break
else:
print(a)
a+=1
|
from __future__ import absolute_import
from __future__ import print_function
import collections
import math
import random
import gzip
import csv
import os.path
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
TEXTS = '/Users/remper/Downloads/bk2vec_input/enwiki-20140203-text-cleaned.csv.gz'
CATEGORIES = '/Users/remper/Downloads/bk2vec_input/enwiki-20140203-page-category-out.csv.gz'
# Borean
#TEXTS = '/borean1/data/pokedem-experiments/bk2vec/alessio/enwiki-20140203-text-cleaned.csv.gz'
#CATEGORIES = '/borean1/data/pokedem-experiments/bk2vec/alessio/enwiki-20140203-page-category-out.csv.gz'
# Increasing limit for CSV parser
csv.field_size_limit(2147483647)
def wordreader(filename):
previous = list()
with gzip.open(filename, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quoting=csv.QUOTE_NONE, quotechar='')
try:
for row in reader:
if (len(row) is not 2):
print("Inconsistency in relations file. Previous:", previous, "Current:", row)
continue
previous = row
for word in [row[0]] + row[1].split():
yield word
except csv.Error:
print(u"Dunno why this error happens")
def build_dictionary(reader):
dictionary = dict()
reverse_dictionary = dict()
counts = dict()
processed = 0
for word in reader:
word = str(word)
processed += 1
if processed % 100000000 == 0:
print(" " + str(processed // 100000000) + "00m words parsed (last:", word, ", dic size:", len(dictionary), ")")
if word in dictionary:
counts[word] += 1
continue
dictionary[word] = len(dictionary)
reverse_dictionary[dictionary[word]] = word
counts[word] = 0
print("Parsing finished")
return dictionary, reverse_dictionary
# Limiter if there are performance issues
max_pages = 0
max_categories_per_page = 0
test_set = 0.1
def build_pages(filename, dictionary, reverse_dictionary):
global max_pages, max_categories_per_page, test_set
pages = dict()
evaluation = dict()
maxPagesTitle = "Unknown"
maxPages = 0
found = 0
notfound = 0
category_found = 0
category_notfound = 0
test_set_size = 0
training_set_size = 0
with gzip.open(filename, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quoting=csv.QUOTE_NONE, quotechar='')
try:
for row in reader:
page_title = row[0]
if page_title not in dictionary:
notfound += 1
continue
found += 1
if max_pages > 0 and found > max_pages:
break
if max_categories_per_page > 0 and len(row) > max_categories_per_page+1:
row = row[:max_categories_per_page+1]
if found % 1000000 == 0:
print(" " + str(found // 1000000) + "m pages parsed")
page_index = dictionary[page_title]
if page_index not in pages:
pages[page_index] = list()
if page_index not in evaluation:
evaluation[page_index] = list()
page_categories = pages[page_index]
evaluation_current = evaluation[page_index]
for word in row[1:]:
if word not in dictionary:
dictionary[word] = len(dictionary)
reverse_dictionary[dictionary[word]] = word
category_notfound += 1
else:
category_found += 1
if test_set > 0 and random.random() <= test_set:
test_set_size += 1
evaluation_current.append(dictionary[word])
else:
training_set_size += 1
page_categories.append(dictionary[word])
if len(page_categories) > maxPages:
maxPages = len(page_categories)
maxPagesTitle = page_title
except csv.Error:
print(u"Dunno why this error happens")
print(len(pages), "pages parsed.", "Page with most categories: ", maxPagesTitle, "with", maxPages, "categories")
print("Training set size:", training_set_size, "Test set size:", test_set_size)
print("Pages found:", found, "Pages not found:", notfound)
print("Categories found:", category_found, "Added categories as new tokens:", category_notfound)
return pages, evaluation
def restore_dictionary(filename):
dictionary = dict()
reverse_dictionary = dict()
processed = 0
with open(filename + "_dict", 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quoting=csv.QUOTE_NONE, quotechar='')
for row in reader:
row[0] = str(row[0])
row[1] = int(row[1])
processed += 1
if processed % 3000000 == 0:
print(" " + str(processed // 1000000) + "m words parsed")
dictionary[row[0]] = row[1]
reverse_dictionary[row[1]] = row[0]
return dictionary, reverse_dictionary
def store_dictionary(dictionary, filename):
with open(filename + "_dict", 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter='\t', quoting=csv.QUOTE_NONE, quotechar='')
for value in dictionary.keys():
writer.writerow([value, dictionary[value]])
def dump_evaluation(evaluation, filename):
with open(filename + "_test", 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter='\t', quoting=csv.QUOTE_NONE, quotechar='')
for value in evaluation.keys():
writer.writerow([value]+evaluation[value])
if os.path.exists(TEXTS+"_dict"):
print('Restoring dictionary')
dictionary, reverse_dictionary = restore_dictionary(TEXTS)
print('Done')
else:
dictionary, reverse_dictionary = build_dictionary(wordreader(TEXTS))
print('Storing dictionary')
store_dictionary(dictionary, TEXTS)
print('Done')
vocabulary_size = len(dictionary)
print('Vocabulary size: ', vocabulary_size)
print('Building page -> category dictionary')
pages, evaluation = build_pages(CATEGORIES, dictionary, reverse_dictionary)
vocabulary_size = len(dictionary)
print('Storing test set')
dump_evaluation(evaluation, CATEGORIES)
print('Done')
def word_provider(filename):
while True:
for word in wordreader(filename):
yield word
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(reader, dictionary, batch_size, num_skips, skip_window):
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(reader.next())
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
if buffer[skip_window] not in dictionary:
print("Word", buffer[skip_window], "is not in dictionary")
print(buffer)
print(dictionary[56])
exit()
if buffer[target] not in dictionary:
print("Word", buffer[target], "is not in dictionary")
print(dictionary[56])
print(buffer)
exit()
batch[i * num_skips + j] = dictionary[buffer[skip_window]]
labels[i * num_skips + j, 0] = dictionary[buffer[target]]
buffer.append(reader.next())
return batch, labels
data_reader = word_provider(TEXTS)
batch, labels = generate_batch(data_reader, dictionary, batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], '->', labels[i, 0])
print(reverse_dictionary[batch[i]], '->', reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 90 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 8 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample([x for x in range(valid_window)], valid_size))
num_sampled = 64 # Number of negative examples to sample.
def matrix_distance(tensor1, tensor2):
with tf.name_scope("matrix_distance"):
sub = tf.sub(tensor1, tensor2)
distance = tf.sqrt(tf.clip_by_value(tf.reduce_sum(tf.pow(sub, 2), 1), 1e-10, 1e+37))
return distance
def centroid(tensor):
with tf.name_scope("centroid"):
return tf.reduce_mean(tensor, 0)
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size], name="train_inputs")
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1], name="train_labels")
train_categories = tf.placeholder(tf.int32, shape=[None], name="train_categories")
train_category_indexes = tf.placeholder(tf.int32, shape=[None], name="train_category_indexes")
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0), name="embeddings")
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)), name="NCE_weights")
nce_biases = tf.Variable(tf.zeros([vocabulary_size]), name="NCE_biases")
# Recalculating centroids for words in a batch
with tf.name_scope("category_distances"):
category_distances = matrix_distance(
tf.gather(embeddings, train_categories),
tf.gather(embeddings, train_category_indexes)
)
# Precomputed centroids for each embeddings vector
# Initialize them with embeddings by default
# category_centroids = tf.Variable(embeddings.initialized_value())
# Update centroids
# tf.scatter_update(category_centroids, train_inputs, recalculated_centroids)
# Resolving current centroid values
# current_batch_centroids = tf.nn.embedding_lookup(category_centroids, train_inputs)
# Categorical knowledge additional term
with tf.name_scope("category_loss"):
# Building category objective which is average distance to word centroid
category_loss = tf.reduce_mean(category_distances)
category_loss_summary = tf.scalar_summary("category_loss", category_loss)
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
with tf.name_scope("skipgram_loss"):
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels, num_sampled, vocabulary_size)
, name="skipgram_loss"
)
skipgram_loss_summary = tf.scalar_summary("skipgram_loss", loss)
joint_loss = tf.add(loss, category_loss, name="joint_loss")
# Construct the SGD optimizer using a learning rate of 1.0.
loss_summary = tf.scalar_summary("joint_loss", joint_loss)
optimizer = tf.train.GradientDescentOptimizer(1.0, name="joint_objective").minimize(joint_loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Step 5: Begin training.
num_steps = 2000001
with tf.Session(graph=graph) as session:
# merged = tf.merge_all_summaries()
category_merged = tf.merge_summary([skipgram_loss_summary, loss_summary, category_loss_summary])
writer = tf.train.SummaryWriter("logs", graph)
tf.initialize_all_variables().run()
print("Initialized")
average_loss = 0
average_cat_per_page = 0
last_average_loss = 241
step = 0
while last_average_loss > 10 and step < num_steps:
batch_inputs, batch_labels = generate_batch(data_reader, dictionary, batch_size, num_skips, skip_window)
categories = list()
category_indexes = list()
for id, input in enumerate(batch_inputs):
if input in pages:
for i in pages[input]:
category_indexes.append(id)
categories.append(i)
if len(categories) is 0:
categories.append(0)
category_indexes.append(1)
average_cat_per_page += len(categories)
feed_dict = {
train_inputs: batch_inputs, train_labels: batch_labels,
train_categories: categories, train_category_indexes: category_indexes
}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
summary, _, loss_val = session.run([category_merged, optimizer, joint_loss], feed_dict=feed_dict)
writer.add_summary(summary, step)
average_loss += loss_val
#Calculate category loss once in a while
#if step % 2000 == 0:
# writer.add_summary(session.run(category_loss_summary, feed_dict=feed_dict), step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
average_cat_per_page /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
print("Average categories per batch:", average_cat_per_page)
last_average_loss = average_loss
average_loss = 0
average_cat_per_page = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
#if step % 100000 == 0:
# sim = similarity.eval()
# for i in xrange(valid_size):
# valid_word = reverse_dictionary[valid_examples[i]]
# top_k = 8 # number of nearest neighbors
# nearest = (-sim[i, :]).argsort()[1:top_k+1]
# log_str = "Nearest to %s:" % valid_word
# for k in xrange(top_k):
# close_word = reverse_dictionary[nearest[k]]
# log_str = "%s %s," % (log_str, close_word)
# print(log_str)
step += 1
print("Retrieving embeddings and normalizing them")
final_embeddings = normalized_embeddings.eval()
print("Done")
# Step 6: Dump embeddings to file
with open('embeddings.tsv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter='\t', quoting=csv.QUOTE_NONNUMERIC)
for id, embedding in enumerate(final_embeddings):
if id not in reverse_dictionary:
print("Bullshit happened: ", id, embedding)
continue
writer.writerow([reverse_dictionary[id]]+[str(value) for value in embedding])
# Step 7: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
filtered = np.nan_to_num(final_embeddings[:plot_only,:])
low_dim_embs = tsne.fit_transform(filtered)
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn and matplotlib to visualize embeddings.") |
from common.okfpgaservers.pulser.pulse_sequences.pulse_sequence import pulse_sequence
from RabiExcitation import rabi_excitation, rabi_excitation_no_offset
from BlueHeating import local_blue_heating
from EmptySequence import empty_sequence
from treedict import TreeDict
from labrad.units import WithUnit
class ramsey_dephase_excitation(pulse_sequence):
required_parameters = [
('RamseyDephase','first_pulse_duration'),
('RamseyDephase','pulse_gap'),
('RamseyDephase','dephasing_frequency'),
('RamseyDephase','dephasing_amplitude'),
('RamseyDephase','dephasing_duration'),
('RamseyDephase','second_pulse_duration'),
('StateReadout','state_readout_frequency_866'),
('StateReadout','state_readout_amplitude_866'),
]
required_subsequences = [rabi_excitation, empty_sequence, rabi_excitation_no_offset, local_blue_heating]
def sequence(self):
p = self.parameters
rd = p.RamseyDephase
spacing = (p.RamseyDephase.pulse_gap - p.RamseyDephase.dephasing_duration) / 2.0
heating_replace = TreeDict.fromdict({'Heating.local_blue_heating_frequency_397':rd.dephasing_frequency,
'Heating.local_blue_heating_amplitude_397':rd.dephasing_amplitude,
'Heating.blue_heating_frequency_866':p.StateReadout.state_readout_frequency_866,
'Heating.blue_heating_amplitude_866': p.StateReadout.state_readout_amplitude_866,
'Heating.blue_heating_duration':rd.dephasing_duration,
'Heating.blue_heating_repump_additional':WithUnit(5, 'us')
})
if spacing < WithUnit(10.0, 'us'): raise Exception("Ramsey Dephase, gap is too short to accomodate dephasing")
self.addSequence(rabi_excitation, TreeDict.fromdict({'Excitation_729.rabi_excitation_duration':rd.first_pulse_duration}))
self.addSequence(empty_sequence, TreeDict.fromdict({'EmptySequence.empty_sequence_duration':spacing}))
self.addSequence(local_blue_heating, heating_replace)
self.addSequence(empty_sequence, TreeDict.fromdict({'EmptySequence.empty_sequence_duration':spacing}))
self.addSequence(rabi_excitation_no_offset, TreeDict.fromdict({'Excitation_729.rabi_excitation_duration':rd.second_pulse_duration})) |
import unittest
from user import User
class TestUser(unittest.TestCase):
'''
Test class that defines test cases for the user class behaviours.
Args:
unittest.TestCase: TestCase class that helps in creating test cases
'''
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_user = User("Ketsia","Iragena","Kate302","kdce")
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_user.first_name,"Ketsia")
self.assertEqual(self.new_user.last_name,"Iragena")
self.assertEqual(self.new_user.password,"kdce")
def test_save_user(self):
'''
test_save_user test case to test if the user object is saved into
the user list
'''
self.new_user.save_user()
self.assertEqual(len(User.user_list),1)
if __name__ == '__main__':
unittest.main()
|
'''@file data_reader.py
contains a reader class for data'''
from six.moves import configparser
from nabu.processing.processors import processor_factory
class DataReader(object):
'''the data reader class.
a reader for data. Data is not stored in tensorflow format
as was done in data.py. Data is returned in numpy format
and is accessed by indexing instead of looping over all
data. It is currently only used in postprocessing.
'''
def __init__(self, dataconf,segment_lengths=['full']):
'''DataReader constructor
Args:
dataconf: the database configuration
segment_lengths: A list containing the desired lengths of segments.
Possibly multiple segment lengths
'''
if len(segment_lengths)>1:
print ('Warning: Not yet implemented __call__ correctly for multiple'
'segments. The returned utt_info, does not contain the _part sufix and'
'processed returns only 1 processed')
self.segment_lengths = segment_lengths
#read the processor config
proc_cfg_file=dataconf['processor_config']
parsed_proc_cfg = configparser.ConfigParser()
parsed_proc_cfg.read(proc_cfg_file)
proc_cfg = dict(parsed_proc_cfg.items('processor'))
#create a processor
self.processor = processor_factory.factory(proc_cfg['processor'])(proc_cfg,
self.segment_lengths)
#get the datafiles lines
datafile = dataconf['datafiles'] #TODO: for the moment expecting only 1 file, but this also makes sense?
if datafile[-3:] == '.gz':
open_fn = gzip.open
else:
open_fn = open
f = open(datafile)
self.datafile_lines=f.readlines()
def __call__(self, list_pos):
'''read data from the datafile list
Args:
list_pos: position on the datafile list to read
Returns:
The processed data as a numpy array'''
line = self.datafile_lines[list_pos]
#split the name and the data line
splitline = line.strip().split(' ')
utt_name = splitline[0]
dataline = ' '.join(splitline[1:])
#process the dataline
processed, utt_info = self.processor(dataline)
utt_info['utt_name'] = utt_name
#Currently only returning 1 processed!
processed = processed[self.segment_lengths[0]][0]
return processed, utt_info |
'''
Model Hypothesis: The value of a company without revenue from an approved drug is related
to the quality and number of it's clinical trials. So calculate a dollar value per trial for
each company. This assumption is more accurate as the drugs in development. Big companies with high
Market Cap (MC) are going after big targets w/ important trials. Therefore small companies with
similar trials should expect a similar large payout. To remove revenue from MC,
subtract some revenue multiplier (5 years).
DISCLAIMER: these assumptions are comletely foolish. If this is the kind of stuff the quants on Wall Street do,
then they are insane.
$ / trial = MC / Ntrials
Create a neural network to learn to predict dollar value per trial:
X = (Ntrials, [tags, nlp, paper's tags]) ... each sample is a trial and the data we can gather on it.
Y = MC / N trials(company) ... the target is the dollar value per trial.
Then pass a company's pipeline forward through this network to predict a learned pipeline value.
Sort stocks by pipeline value for investment decisions.
'''
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import Input, GlobalMaxPooling1D, GlobalAveragePooling1D
from keras.layers import Conv1D, MaxPooling1D
from keras import metrics
from keras import optimizers
from process_data import get_data
import matplotlib.pyplot as plt
import json
from sklearn.utils import shuffle
import operator
import os
from tabulate import tabulate
# https://github.com/dmlc/xgboost/issues/1715
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
from pymongo import MongoClient
db = MongoClient("mongodb://localhost:27017").stocks
# TODO: still need a file based version of the data for cloud gpu
###############################
# configuration / hyperparameters
TRAINING_SPLIT = 0.95 # raise to 1 when final model train on all data
BATCH_SIZE = 128
EPOCHS = 100
LEARNING_RATE = 0.0001
OPTIMIZER = optimizers.RMSprop(lr=LEARNING_RATE, rho=0.9, epsilon=None, decay=0.0)
HIDDEN_LAYERS = 6
HIDDEN_UNITS = 128
DROPOUT = 0.5
ACTIVATION = 'relu'
LOSS_FUNCTION = 'mean_squared_error'
# Principle components analysis of tag data if True
PCAtags = False
PCAvalue = 100
# get the data
X, Y, Ymean, Ystd, ids_today, mgs_to_trialid, Xtoday = get_data(PCAtags=PCAtags, PCAvalue=PCAvalue)
X, Y = shuffle(X, Y) # shuffle but keep indexes together
Ntrain = int(TRAINING_SPLIT * len(X)) # give it all the data to train
Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]
Xtest, Ytest = X[Ntrain:], Y[Ntrain:]
# get shapes
N, D = X.shape
print('X.shape N,D:', X.shape)
# the model will be a sequence of layers
model = Sequential()
# input layer
model.add(Dense(units=HIDDEN_UNITS, input_dim=D, activation=ACTIVATION))
# hidden layers
for layernumber in range(HIDDEN_LAYERS):
model.add(Dense(units=HIDDEN_UNITS, activation=ACTIVATION))
model.add(Dropout(DROPOUT))
# output layer - no activation on output layer for regression
model.add(Dense(1))
# Compile model
model.compile(
loss=LOSS_FUNCTION,
optimizer=OPTIMIZER,
metrics=[metrics.mae]
)
model.summary()
r = model.fit(
Xtrain,
Ytrain,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
validation_data=(Xtest, Ytest)
)
# predict from today's trials only
print('calculating/predicting for today ...')
ynew = model.predict(Xtoday)
# build the pipeline values for each company based on Today's data only, not historical
mgPipeline = {}
for mgname in mgs_to_trialid:
mgPipeline[mgname] = 0
trials = mgs_to_trialid[mgname]
for t in trials:
for num, id in enumerate(ids_today):
if (t == id):
Z = ynew[num][0]
mc = ((Z-1) * Ystd) + Ymean # remember to unshift the mean
mgPipeline[mgname] = mgPipeline[mgname] + int(mc)
# calculate the percent diffs, using today's data in listed, not historical
mgDiffs = {}
for mg in mgPipeline:
li = db.listed.find_one({"medicalgroups":mg})
mcReal = li['marketcap']
diff = ( (mgPipeline[mg] - mcReal) / (mcReal+1) )
mgDiffs[mg] = diff
sorted_x = sorted(mgDiffs.items(), key=operator.itemgetter(1), reverse=False)
tot = []
for i in sorted_x:
tot.append(i)
df = pd.DataFrame(tot, columns=["Name", "Mult"])
print(tabulate(df, headers='keys', tablefmt='psql'))
# print the difference in change
changeabsolute = ((r.history['val_mean_absolute_error'][0] - r.history['val_mean_absolute_error'][-1]) / r.history['val_mean_absolute_error'][0]) * 100
changeabsolute2 = ((r.history['mean_absolute_error'][0] - r.history['mean_absolute_error'][-1]) / r.history['mean_absolute_error'][0]) * 100
print('absolute error difference from start (validation): ', "%.2f" % changeabsolute, '%')
print('absolute error difference from start (train): ', "%.2f" % changeabsolute2, '%')
# print(r.history.keys())
plt.plot(r.history['loss'])
plt.plot(r.history['val_loss'])
plt.plot(r.history['mean_absolute_error'])
plt.plot(r.history['val_mean_absolute_error'])
plt.title('model loss/accuracy (absolute error)')
plt.xlabel('epoch')
plt.legend(['train loss', 'test loss', 'train err', 'test err'], loc='upper left')
plt.show()
# Notes
# save and load keras models
# https://machinelearningmastery.com/save-load-keras-deep-learning-models/
# hyperparam op: https://github.com/autonomio/talos
# https://towardsdatascience.com/hyperparameter-optimization-with-keras-b82e6364ca53
# Collection of keras model examples
# https://gist.github.com/candlewill/552fa102352ccce42fd829ae26277d24
|
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from util import spearman_rank_correlation_matrix, spearman_footrule_matrix, kendalltau_matrix
class Election(object):
def __init__(self, votes=None, num_clusters=2, region_ids=None, affinity=spearman_rank_correlation_matrix):
if votes is None:
self.__generate_votes()
else:
self.votes = votes
self.region_ids = region_ids
self.affinity = affinity # see util for possible affinity functions
self.__cluster_votes(num_clusters)
self.__find_cluster_centers()
def __generate_votes(self):
'''for simulations'''
self.votes = np.array([np.random.permutation(4) for _ in range(100)])
def __cluster_votes(self, num_clusters):
'''uses HAC to cluster'''
C = AgglomerativeClustering(n_clusters=num_clusters, affinity=self.affinity, linkage="complete")
cluster_assignments = C.fit_predict(self.votes)
self.vote_clusters = []
for i in range(num_clusters):
idx = np.array([a for a, b in enumerate(cluster_assignments) if b==i])
if self.region_ids is None:
self.vote_clusters.append(self.votes[idx])
else:
self.vote_clusters.append((self.votes[idx], np.array(self.region_ids)[idx]))
def __find_cluster_centers(self):
'''finds vote in each cluster closest to all the other votes on average'''
self.cluster_centers = []
for cluster in self.vote_clusters:
distance_matrix = self.affinity(cluster)
average_distances = np.array([np.average(ds) for ds in distance_matrix])
self.cluster_centers.append(cluster[np.argmin(average_distances)])
return self.cluster_centers
|
import numbers
import sys
class CalculatorError(Exception):
"""An exception class for Calculator
"""
class Calculator():
"""A terrible calculator.
"""
def add(self, a, b):
self._check_operand(a)
self._check_operand(b)
try:
return a + b
except TypeError:
raise CalculatorError("You just triggered a TypeError!")
except ValueError:
raise CalculatorError("You just triggered a ValueError!")
def subtract(self, a, b):
return a - b
def multiply(self, a, b):
return a * b
def divide(self, a, b):
try:
return a // b
except ZeroDivisionError:
raise CalculatorError("Can't divide by zero!")
def _check_operand(self, operand):
"""Should only accept numbers as input.
"""
if not isinstance(operand, numbers.Number):
raise CalculatorError(f'"{operand}" was not a number')
if __name__ == "__main__":
# A small command-line helper
print("Let's calculate!")
calculator = Calculator()
operations = [calculator.add,
calculator.subtract,
calculator.multiply,
calculator.divide]
while True:
for i, operation in enumerate(operations, start=1):
print(f"{i}: {operation.__name__}")
print("q: quit")
operation = input("Pick an operation:")
if operation == "q":
sys.exit()
else:
print(f"{operation} selected.")
op = int(operation)
# Assume the happy case,
# where the user inputs something which the float() function accepts
a = float(input("What is a? "))
b = float(input("What is b? "))
try:
print(operations[op - 1](a, b))
except CalculatorError as ex:
print(ex) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-27 12:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Afastamento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sigla', models.CharField(max_length=6, unique=True, verbose_name='Sigla do Motivo do Afastamento')),
('descricao', models.CharField(max_length=200, verbose_name='Descriรงรฃo do Afastamento')),
],
options={
'ordering': ('sigla',),
},
),
migrations.CreateModel(
name='Exercicio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo', models.CharField(max_length=6, verbose_name='Cรณdigo')),
('data_inicio', models.DateField(verbose_name='Data do inรญcio do Exercรญcio')),
('data_fim', models.DateField(blank=True, null=True, verbose_name='Data do fim do exercรญcio')),
('afastamento', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='senadores.Afastamento')),
],
options={
'ordering': ('mandato', 'data_inicio'),
},
),
migrations.CreateModel(
name='Legislatura',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data_inicio', models.DateField(verbose_name='Data de inรญcio da Legislatura')),
('data_fim', models.DateField(verbose_name='Data de fim da Legislatura')),
],
),
migrations.CreateModel(
name='Mandato',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo_mandato', models.CharField(max_length=6, verbose_name='Cรณdigo do Mandato')),
('uf', models.CharField(max_length=2, verbose_name='UF do Estado ao qual o mandato รฉ vinculado')),
('participacao', models.CharField(max_length=20, verbose_name='O tipo de participaรงรฃo')),
('primeira_legislatura', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='primeira', to='senadores.Legislatura')),
('segunda_legislatura', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='segunda', to='senadores.Legislatura')),
],
),
migrations.CreateModel(
name='Parlamentar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo_parlamentar', models.CharField(max_length=10, verbose_name='Cรณdigo do Parlamentar')),
('nome', models.CharField(max_length=45, verbose_name='Nome Polรญtico')),
('nome_completo', models.CharField(max_length=45, verbose_name='Nome Completo do Parlamentar')),
('uf', models.CharField(max_length=2, verbose_name='UF do Estado que foi eleito')),
('forma_tratamento', models.CharField(max_length=15, verbose_name='Forma de Tratamento')),
('sexo', models.CharField(choices=[('F', 'Feminino'), ('M', 'Masculino')], max_length=1)),
('email', models.EmailField(max_length=254)),
('foto_url', models.URLField(verbose_name='URL para foto do Parlamentar')),
('pagina_url', models.URLField(verbose_name='Endereรงo da Pรกgina do Parlamentar')),
],
),
migrations.CreateModel(
name='Partido',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=100, verbose_name='Nome do Partido')),
('sigla', models.CharField(max_length=10, verbose_name='Sigla')),
],
options={
'ordering': ('sigla',),
},
),
migrations.AddField(
model_name='parlamentar',
name='partido',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='senadores.Partido'),
),
migrations.AddField(
model_name='exercicio',
name='mandato',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='senadores.Mandato'),
),
]
|
# ex) ์จ๋ผ์ธ ๋์ ์ผํ๋ชฐ์ ๋์ ์ ๋ณด๋ฅผ ์คํฌ๋ํ
# yes24.co.kr > ๋ฒ ์คํธ > ๋์์ ์ ๋ชฉ, ์ ์, ์ ๊ฐ ์ถ์ถ
# ์น ๋ฌธ์ ์ ์ฅ์ ํ์ผ๋ช
์ 04yes24_best.html
# http://www.yes24.com/24/category/bestseller
from urllib.request import urlopen
import re
url = 'http://www.yes24.com/24/category/bestseller'
docs = urlopen(url)
encode = docs.info().get_content_charset()
text = docs.read().decode(encode)
with open('data/04yes24_best.html', 'w', encoding='utf-8') as out:
out.write(text)
# print(text)
with open('data/04yes24_best.html', 'r', encoding='utf-8') as f:
html = f.read()
print(html)
css1 = r'\[๋์.*?</a>'
css2 = r'<p class="aupu"><a.*?\|'
css3 = r'<p class="price">.*?</strong>'
# re.findall((css3, html, re.DOTALL))
titles = []
writers = []
prices = []
for part_html in re.findall(css1, html, re.DOTALL):
title = re.sub(r'<.*?>', '', part_html)
title = re.sub(r'\[๋์\]', '', title)
# print(title.strip())
titles.append(title.strip())
for part_html in re.findall(css2, html, re.DOTALL):
writer = re.sub(r'<.*?>', '', part_html)
writer = re.sub(r'\|', '', writer)
# print(writer.strip())
# print(writer)
writers.append(writer.strip())
for part_html in re.findall(css3, html, re.DOTALL):
price = re.sub(r'<.*?>', '', part_html)
price = re.sub(r'\,', '', price)
# print(price.strip())
prices.append(price.strip())
# for i in range(len(titles)):
# print(titles[i], writers[i], prices[i])
# ์์งํ ๋ด์ฉ์ csv ํ์ผ๋ก ์ ์ฅ
with open('data/04yes24_best.csv', 'w', encoding='utf-8') as out:
fmt = '%s, "%s", %s\n'
for i in range(len(titles)):
out.write(fmt % (titles[i], writers[i].strip(), prices[i]))
|
# -*- coding: utf-8 -*-
#
# $Id: Basic.py 4159 2012-06-20 00:34:40Z jhill $
#
# This file is part of the BCPy2000 framework, a Python framework for
# implementing modules that run on top of the BCI2000 <http://bci2000.org/>
# platform, for the purpose of realtime biosignal processing.
#
# Copyright (C) 2007-11 Jeremy Hill, Thomas Schreiner,
# Christian Puzicha, Jason Farquhar
#
# bcpy2000@bci2000.org
#
# The BCPy2000 framework is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__all__ = [
'getfs', 'msec2samples', 'samples2msec',
'window', 'ampmod', 'wavegen',
'sinewave', 'squarewave', 'trianglewave', 'sawtoothwave',
'fftfreqs', 'fft2ap', 'ap2fft', 'reconstruct', 'toy',
'fft', 'ifft', 'hanning', 'shoulder', 'hilbert',
]
import numpy
import scipy.signal
try: from scipy import fft,ifft,hanning # yeah, just keep moving the goalposts why don't ya
except ImportError: from scipy.signal import fft,ifft,hanning
from .NumTools import isnumpyarray,project,trimtrailingdims,unwrapdiff
class ArgConflictError(Exception): pass
def getfs(obj, defaultVal=None):
"""
Infer the sampling frequency from <obj>. <obj> may simply be the numerical
sampling-frequency value, or it may be an object in which the sampling
frequency is stored in obj.samplingfreq_hz (like SigTools.causalfilter
objects), obj.fs (like WavTools.wav objects) or obj.params['SamplingRate']
(like BCPy2000 objects).
"""###
fs = None
if isinstance(obj, float) or isinstance(obj, int): fs = obj
elif hasattr(obj, 'samplingfreq_hz'): fs = obj.samplingfreq_hz
elif hasattr(obj, 'fs'): fs = obj.fs
elif hasattr(obj, 'params'): fs = obj.params
if isinstance(fs, dict) and 'SamplingRate' in fs: fs = fs['SamplingRate']
if isinstance(fs, str) and fs.lower().endswith('hz'): fs = fs[:-2]
if fs == None: return defaultVal
return float(fs)
def msec2samples(msec, samplingfreq_hz):
"""
Converts milliseconds to the nearest integer number of samples given
the specified sampling frequency.
"""###
fs = getfs(samplingfreq_hz)
if msec==None or fs==None: return None
if isinstance(msec, (tuple,list)): msec = numpy.array(msec)
if isinstance(msec, numpy.ndarray): msec = msec.astype(numpy.float64)
else: msec = float(msec)
return numpy.round(float(fs) * msec / 1000.0)
def samples2msec(samples, samplingfreq_hz):
"""
Converts samples to milliseconds given the specified sampling frequency.
"""###
fs = getfs(samplingfreq_hz)
if samples==None or fs==None: return None
if isinstance(samples, (tuple,list)): samples = numpy.array(samples)
if isinstance(samples, numpy.ndarray): samples = samples.astype(numpy.float64)
else: samples = float(samples)
return 1000.0 * samples / float(fs)
def window(w, func=hanning, axis=0):
"""
Return a copy of <w> (a numpy.ndarray or a WavTools.wav object) multiplied
by the specified window function, along the specified time <axis>.
"""###
if isnumpyarray(w): y = w
elif hasattr(w, 'y'): w = w.copy(); y = w.y
else: raise TypeError("don't know how to handle this kind of carrier object")
envelope = func(y.shape[0])
envelope.shape = [{True:envelope.size, False:1}[dim==axis] for dim in range(y.ndim)]
y = y * envelope
if isnumpyarray(w): w = y
else: w.y = y
return w
def ampmod(w, freq_hz=1.0,phase_rad=None,phase_deg=None,amplitude=0.5,dc=0.5,samplingfreq_hz=None,duration_msec=None,duration_samples=None,axis=None,waveform=numpy.sin,**kwargs):
"""
Return a copy of <w> (a numpy.ndarray or a WavTools.wav object) in which
the amplitude is modulated sinusoidally along the specified time <axis>.
Default phase is such that amplitude is 0 at time 0, which corresponds to
phase_deg=-90 if <waveform> follows sine phase, since the modulator is a
raised waveform. To change this, specify either <phase_rad> or <phase_deg>.
Uses wavegen()
"""###
if isnumpyarray(w): y = w
elif hasattr(w, 'y'): w = w.copy(); y = w.y
else: raise TypeError("don't know how to handle this kind of carrier object")
if samplingfreq_hz==None: samplingfreq_hz = getfs(w)
if phase_rad==None and phase_deg==None: phase_deg = -90.0
if duration_samples==None and duration_msec==None: duration_samples = project(y,0).shape[0]
envelope = wavegen(freq_hz=freq_hz,phase_rad=phase_rad,phase_deg=phase_deg,amplitude=amplitude,dc=dc,samplingfreq_hz=samplingfreq_hz,duration_msec=duration_msec,duration_samples=duration_samples,axis=axis,waveform=waveform,**kwargs)
envelope = project(envelope, len(y.shape)-1)
y = y * envelope
if isnumpyarray(w): w = y
else: w.y = y
return w
def wavegen(freq_hz=1.0,phase_rad=None,phase_deg=None,amplitude=1.0,dc=0.0,samplingfreq_hz=None,duration_msec=None,duration_samples=None,axis=None,waveform=numpy.cos,container=None,**kwargs):
"""
Create a signal (or multiple signals, if the input arguments are arrays)
which is a sine function of time (time being defined along the specified
<axis>).
Default phase is 0, but may be changed by either <phase_deg> or <phase_rad>
(or both, as long as the values are consistent).
Default duration is 1000 msec, but may be changed by either <duration_samples>
or <duration_msec> (or both, as long as the values are consistent).
A <container> object may be supplied: if so, it should be a WavTools.wav
object. <axis> is set then set to 0, and the container object's duration
(if non-zero), sampling frequency, and number of channels are used as fallback
values if these are not specified elsewhere. The resulting signal is put into
container.y and the pointer to the container is returned.
If <duration_samples> is specified and <samplingfreq_hz> is not, then the
sampling frequency is chosen such that the duration is 1 second, so <freq_hz>
can be interpreted as cycles per signal.
The default <waveform> function is numpy.cos which means that amplitude, phase
and frequency arguments can be taken straight from the kind of dictionary
returned by fft2ap() for an accurate reconstruction.
"""###
fs = getfs(samplingfreq_hz)
default_duration_msec = 1000.0
nrep = 1
if container != None:
if fs == None: fs = getfs(container)
if hasattr(container,'duration') and container.duration(): default_duration_msec = container.duration() * 1000.0
if hasattr(container,'channels') and container.channels() and container.y.size: nrep = container.channels()
for j in range(0,2):
for i in range(0,2):
if duration_msec==None:
duration_msec = samples2msec(duration_samples, fs)
if duration_samples==None:
duration_samples = msec2samples(duration_msec, fs)
if duration_samples != None:
duration_msec = samples2msec(duration_samples, fs)
if fs==None and duration_samples!=None and duration_msec!=None: fs = 1000.0 * float(duration_samples) / float(duration_msec)
if fs==None and duration_samples!=None: fs = float(duration_samples)
if fs==None and duration_msec!=None: fs = float(duration_msec)
if duration_msec==None: duration_msec = default_duration_msec
duration_sec = duration_msec / 1000.0
duration_samples = float(round(duration_samples))
if duration_msec != samples2msec(duration_samples,fs) or duration_samples != msec2samples(duration_msec,fs):
raise ArgConflictError("conflicting duration_samples and duration_msec arguments")
x = numpy.arange(0.0,duration_samples) * (2.0 * numpy.pi / duration_samples)
freq_hz = trimtrailingdims(numpy.array(freq_hz,dtype='float'))
if phase_rad == None and phase_deg == None: phase_rad = [0.0]
if phase_rad != None:
if not isnumpyarray(phase_rad) or phase_rad.dtype != 'float': phase_rad = numpy.array(phase_rad,dtype='float')
phase_rad = trimtrailingdims(phase_rad)
if phase_deg != None:
if not isnumpyarray(phase_deg) or phase_deg.dtype != 'float': phase_deg = numpy.array(phase_deg,dtype='float')
phase_deg = trimtrailingdims(phase_deg)
if phase_rad != None and phase_deg != None:
if phase_rad.shape != phase_deg.shape: raise ArgConflictError("conflicting phase_rad and phase_deg arguments")
if numpy.max(numpy.abs(phase_rad * (180.0/numpy.pi) - phase_deg) > 1e-10): raise ArgConflictError("conflicting phase_rad and phase_deg arguments")
if phase_rad == None:
phase_rad = numpy.array(phase_deg * (numpy.pi/180.0))
amplitude = trimtrailingdims(numpy.array(amplitude,dtype='float'))
dc = trimtrailingdims(numpy.array(dc,dtype='float'))
maxaxis = max(len(freq_hz.shape), len(phase_rad.shape), len(amplitude.shape), len(dc.shape)) - 1
if axis==None:
if project(freq_hz,0).shape[0]==1 and project(phase_rad,0).shape[0]==1 and project(amplitude,0).shape[0]==1 and project(dc,0).shape[0]==1:
axis=0
else:
axis = maxaxis + 1
maxaxis = max(axis, maxaxis)
x = project(x,maxaxis).swapaxes(0,axis)
x = x * (project(freq_hz,maxaxis) * duration_sec) # *= won't work for broadcasting here
# if you get an error here, try setting axis=1 and transposing the return value ;-)
x = x + (project(phase_rad,maxaxis)) # += won't work for broadcasting here
x = waveform(x, **kwargs)
x = x * project(amplitude,maxaxis) # *= won't work for broadcasting here
if numpy.any(dc.flatten()):
x = x + project(dc,maxaxis) # += won't work for broadcasting here
if container != None:
across_channels = 1
x = project(x, across_channels)
if x.shape[across_channels] == 1 and nrep > 1: x = x.repeat(nrep, across_channels)
container.y = x
container.fs = int(round(fs))
x = container
return x
def fftfreqs(nsamp, samplingfreq_hz=1.0):
"""
Return a 1-D numpy.array of length <nsamp> containing the positive and
negative frequency values corresponding to the elements of an <nsamp>-point FFT.
If <samplingfreq_hz> is not supplied, 1.0 is assumed so the result has 0.5 as
the Nyquist frequency).
"""###
nsamp = int(nsamp)
fs = getfs(samplingfreq_hz)
biggest_pos_freq = float(numpy.floor(nsamp/2)) # floor(nsamp/2)
biggest_neg_freq = -float(numpy.floor((nsamp-1)/2)) # -floor((nsamp-1)/2)
posfreq = numpy.arange(0.0, biggest_pos_freq+1.0) * (float(fs) / float(nsamp))
negfreq = numpy.arange(biggest_neg_freq, 0.0) * (float(fs) / float(nsamp))
return numpy.concatenate((posfreq,negfreq))
def fft2ap(X, samplingfreq_hz=2.0, axis=0):
"""
Given discrete Fourier transform(s) <X> (with time along the
specified <axis>), return a dict containing a properly scaled
amplitude spectrum, a phase spectrum in degrees and in radians,
and a frequency axis (coping with all the fiddly edge conditions).
The inverse of d=fft2ap(X) is X = ap2fft(**d)
"""###
fs = getfs(samplingfreq_hz)
nsamp = int(X.shape[axis])
biggest_pos_freq = float(numpy.floor(nsamp/2)) # floor(nsamp/2)
biggest_neg_freq = -float(numpy.floor((nsamp-1)/2)) # -floor((nsamp-1)/2)
posfreq = numpy.arange(0.0, biggest_pos_freq+1.0) * (float(fs) / float(nsamp))
negfreq = numpy.arange(biggest_neg_freq, 0.0) * (float(fs) / float(nsamp))
fullfreq = numpy.concatenate((posfreq,negfreq))
sub = [slice(None)] * max(axis+1, len(X.shape))
sub[axis] = slice(0,len(posfreq))
X = project(X, axis)[sub]
ph = numpy.angle(X)
amp = numpy.abs(X) * (2.0 / float(nsamp))
if nsamp%2 == 0:
sub[axis] = -1
amp[sub] /= 2.0
return {'amplitude':amp, 'phase_rad':ph, 'phase_deg':ph*(180.0/numpy.pi), 'freq_hz':posfreq, 'fullfreq_hz':fullfreq, 'samplingfreq_hz':fs, 'axis':axis}
def ap2fft(amplitude,phase_rad=None,phase_deg=None,samplingfreq_hz=2.0,axis=0,freq_hz=None,fullfreq_hz=None,nsamp=None):
"""
Keyword arguments match the fields of the dict
output by that fft2ap() .
The inverse of d=fft2ap(X) is X = ap2fft(**d)
"""###
fs = getfs(samplingfreq_hz)
if nsamp==None:
if fullfreq_hz != None: nsamp = len(fullfreq_hz)
elif freq_hz != None: nsamp = len(freq_hz) * 2 - 2
else: nsamp = amplitude.shape[axis] * 2 - 2
amplitude = project(numpy.array(amplitude,dtype='float'), axis)
if phase_rad == None and phase_deg == None: phase_rad = numpy.zeros(shape=amplitude.shape,dtype='float')
if phase_rad != None:
if not isnumpyarray(phase_rad) or phase_rad.dtype != 'float': phase_rad = numpy.array(phase_rad,dtype='float')
phase_rad = project(phase_rad, axis)
if phase_deg != None:
if not isnumpyarray(phase_deg) or phase_deg.dtype != 'float': phase_deg = numpy.array(phase_deg,dtype='float')
phase_deg = project(phase_deg, axis)
if phase_rad != None and phase_deg != None:
if phase_rad.shape != phase_deg.shape: raise ArgConflictError("conflicting phase_rad and phase_deg arguments")
if numpy.max(numpy.abs(phase_rad * (180.0/numpy.pi) - phase_deg) > 1e-10): raise ArgConflictError("conflicting phase_rad and phase_deg arguments")
if phase_rad == None:
phase_rad = phase_deg * (numpy.pi/180.0)
f = phase_rad * 1j
f = numpy.exp(f)
f = f * amplitude
f *= float(nsamp)/2.0
sub = [slice(None)] * max(axis+1, len(f.shape))
if nsamp%2 == 0:
sub[axis] = -1
f[sub] *= 2.0
sub[axis] = slice((nsamp%2)-2, 0, -1)
f = numpy.concatenate((f, numpy.conj(f[sub])), axis=axis)
return f
def sinewave(theta, maxharm=None, rescale=False):
"""
A sine wave, no different from numpy.sin but with a function
signature compatible with squarewave(), trianglewave() and
sawtoothwave(). <maxharm> and <rescale> are disregarded.
"""###
return numpy.sin(theta) # maxharm and rescale have no effect
def squarewave(theta, maxharm=None, rescale=False, duty=0.5, ramp=0, tol=1e-8):
"""
A square wave with its peaks and troughs in sine phase.
If <maxharm> is an integer, then an anti-aliased approximation
to the square wave (containing no components of higher frequency
than <maxharm> times the fundamental) is returned instead. In
this case, the <rescale> flag can be set to ensure that the
waveform does not exceed +/- 1.0
"""###
if ramp + tol > 1.0: raise ValueError("ramp + tol cannot exceed 1.0")
if maxharm == None or maxharm == numpy.inf:
y = theta / (2*numpy.pi)
y %= 1.0
t = y * 1.0
def piecewise_linear(y, yrange, t, trange):
if trange[1] == trange[0]:
y[t==trange[0]] = sum(yrange)/2.0
else:
mask = numpy.logical_and(trange[0] <= t, t < trange[1])
t = (t[mask] - trange[0]) / float(trange[1] - trange[0])
if len(t): y[mask] = yrange[0] + (yrange[1] - yrange[0]) * t
return trange[1]
on,off = duty * (1.0-tol-ramp), (1.0 - duty) * (1-tol-ramp)
x = 0.0
x = piecewise_linear(y, [ 0, 0], t, [x, x + tol/4.0])
x = piecewise_linear(y, [ 0,+1], t, [x, x + ramp/4.0 ])
x = piecewise_linear(y, [+1,+1], t, [x, x + on ])
x = piecewise_linear(y, [+1, 0], t, [x, x + ramp/4.0 ])
x = piecewise_linear(y, [ 0, 0], t, [x, x + tol/2.0 ])
x = piecewise_linear(y, [ 0,-1], t, [x, x + ramp/4.0 ])
x = piecewise_linear(y, [-1,-1], t, [x, x + off ])
x = piecewise_linear(y, [-1, 0], t, [x, x + ramp/4.0 ])
x = piecewise_linear(y, [ 0, 0], t, [x, x + tol/4.0 ])
return y
if duty != 0.5 or ramp != 0: raise ValueError("antialiasing (maxharm!=None) not implemented for duty cycles other than 0.5 or ramps other than 0")
y = 0.0
for h in numpy.arange(1.0, 1.0+maxharm, 2.0): y = y + numpy.sin(h*theta)/h
y *= 4.0 / numpy.pi
if rescale: y /= 1.01 * numpy.abs(squarewave(numpy.pi / maxharm, maxharm=maxharm, rescale=False))
return y
def trianglewave(theta, maxharm=None, rescale=False):
"""
A triangle wave with its peaks and troughs in sine phase.
If <maxharm> is an integer, then an anti-aliased approximation
to the triangle wave (containing no components of higher frequency
than <maxharm> times the fundamental) is returned instead. The
<rescale> flag, included for compatibility with sawtoothwave() and
squarewave(), has no effect.
"""###
if maxharm == None or maxharm == numpy.inf:
return scipy.signal.sawtooth(theta+numpy.pi/2.0, width=0.5)
y = 0.0
for h in numpy.arange(1.0, 1.0+maxharm, 2.0):
y = y + numpy.sin(h * numpy.pi / 2.0) * numpy.sin(h*theta) / h**2
y *= 8.0 / numpy.pi**2
# rescale not necessary -- never overshoots
return y
def sawtoothwave(theta, maxharm=None, rescale=False):
"""
A sawtooth wave with its polarity and zero-crossings in sine phase.
If <maxharm> is an integer, then an anti-aliased approximation
to the sawtooth wave (containing no components of higher frequency
than <maxharm> times the fundamental) is returned instead. In
this case, the <rescale> flag can be set to ensure that the
waveform does not exceed +/- 1.0
"""###
shift = -numpy.pi
theta = theta + shift
if maxharm == None or maxharm == numpy.inf:
return scipy.signal.sawtooth(theta, width=1.0)
y = 0.0
for h in numpy.arange(1.0, 1.0+maxharm, 1.0): y = y + numpy.sin(h*theta)/h
y *= -2.0 / numpy.pi
if rescale: y /= 1.01 * numpy.abs(sawtoothwave(numpy.pi / maxharm - shift, maxharm=maxharm, rescale=False))
return y
def toy(n=11,f=None,a=[1.0,0.1],p=0):
"""
Toy sinusoidal signals for testing fft2ap() and ap2fft().
Check both odd and even <n>.
"""###
if f==None: f = [1.0,int(n/2)]
return wavegen(duration_samples=n,samplingfreq_hz=n,freq_hz=f,phase_deg=p,amplitude=a,axis=1).transpose()
def reconstruct(ap,**kwargs):
"""
Check the accuracy of fft2ap() and wavegen() by reconstructing
a signal as the sum of cosine waves with amplitudes and phases
specified in dict ap, which is of the form output by fft2ap.
"""###
ap = dict(ap) # makes a copy, at least of the container dict
ap['duration_samples'] = len(ap.pop('fullfreq_hz'))
ap.update(kwargs)
axis=ap.pop('axis', -1)
extra_axis = axis+1
for v in list(ap.values()): extra_axis = max([extra_axis, len(getattr(v, 'shape', []))])
ap['freq_hz'] = project(ap['freq_hz'], extra_axis).swapaxes(axis,0)
ap['axis'] = extra_axis
r = wavegen(**ap)
r = r.swapaxes(extra_axis, axis)
r = r.sum(axis=extra_axis)
return r
def shoulder(x, s):
"""
Return a (possibly asymmetric) Tukey window function of x.
s may have 1, 2, 3 or 4 elements:
1: raised cosine between x=s[0]-0.5 and x=s[0]+0.5
2: raised cosine between x=s[0] and x=s[1]
3: raised cosine rise from s[0] to s[1], and fall from s[1] to s[2]
4: raised cosine rise from s[0] to s[1], plateau from s[1] to s[2],
and fall from s[2] to s[3]
"""###
if len(s) == 1: s = [s[0]-0.5, s[0]+0.5]
if len(s) == 2: s = [s[0], 0.5*(s[0]+s[1]), s[1]]
if len(s) == 3: s = [s[0], s[1], s[1], s[2]]
if len(s) != 4: raise ValueError("s must have 1, 2, 3 or 4 elements")
xrise = x - s[0]
xrise /= s[1]-s[0]
xrise = numpy.fmin(1, numpy.fmax(0, xrise))
xrise = 0.5 - 0.5 * numpy.cos(xrise * numpy.pi)
xfall = x - s[2]
xfall /= s[3]-s[2]
xfall = numpy.fmin(1, numpy.fmax(0, xfall))
xfall = 0.5 + 0.5 * numpy.cos(xfall * numpy.pi)
return numpy.fmin(xrise, xfall)
def hilbert(x, N=None, axis=0, band=(), samplingfreq_hz=None, return_dict=False):
"""
Compute the analytic signal, just like scipy.signal.hilbert but
with the differences that (a) the computation can be performed
along any axis and (b) a limited band of the signal may be
considered. The <band> argument can be a two-, three- or
four-element tuple suitable for passing to shoulder(), specifying
the edges of the passband (expressed in Hz if <samplingfreq_hz> is
explicitly supplied, or relative to Nyquist if not).
If <return_dict> is True, do not return just the complex analytic signal
but rather a dict containing its amplitude, phase, and unwrapped phase
difference.
"""###
fs = getfs(x)
if samplingfreq_hz != None: fs = samplingfreq_hz
if fs == None: fs = 2.0
x = getattr(x, 'y', x)
if N == None: N = x.shape[axis]
shape = [1 for d in x.shape]
shape[axis] = N
h = numpy.zeros(shape, dtype=numpy.float64)
if N % 2:
h.flat[0] = 1
h.flat[1:(N+1)/2] = 2
else:
h.flat[0] = h.flat[N/2] = 1
h.flat[1:N/2] = 2
x = fft(x, n=N, axis=axis)
x = numpy.multiply(x, h)
if len(band):
f = fftfreqs(N, samplingfreq_hz=fs)
h.flat = shoulder(numpy.abs(f), band)
x = numpy.multiply(x, h)
x = ifft(x, n=N, axis=axis)
if not return_dict: return x
amplitude = numpy.abs(x)
phase_rad = numpy.angle(x)
deltaphase_rad = unwrapdiff(phase_rad, base=numpy.pi*2, axis=axis)[0]
return {
'amplitude': amplitude,
'phase_rad': phase_rad,
'deltaphase_rad': deltaphase_rad,
}
|
l = float(input("Digite a largura do terreno: "))
h = float(input("Digite a altura do terreno: "))
valor = float(input("Digite o valor do metro quadrado: "))
area = float(l*h)
preco = float(area*valor)
print(f"Area do terreno: {area:.2f}")
print(f"Preco do terreno: {preco:.2f}")
|
# politician/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
import re
from datetime import datetime
import gender_guesser.detector as gender
from django.db import models
from django.db.models import Q
import wevote_functions.admin
from candidate.models import PROFILE_IMAGE_TYPE_TWITTER, PROFILE_IMAGE_TYPE_UNKNOWN, \
PROFILE_IMAGE_TYPE_CURRENTLY_ACTIVE_CHOICES
from exception.models import handle_exception, handle_record_found_more_than_one_exception
from tag.models import Tag
from wevote_functions.functions import candidate_party_display, convert_to_int, convert_date_to_date_as_integer, \
convert_to_political_party_constant, display_full_name_with_correct_capitalization, \
extract_first_name_from_full_name, extract_middle_name_from_full_name, \
extract_last_name_from_full_name, extract_twitter_handle_from_text_string, positive_value_exists
from wevote_settings.models import fetch_next_we_vote_id_politician_integer, fetch_site_unique_id_prefix
FEMALE = 'F'
GENDER_NEUTRAL = 'N'
MALE = 'M'
UNKNOWN = 'U'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
(GENDER_NEUTRAL, 'Nonbinary'),
(UNKNOWN, 'Unknown'),
)
DISPLAYABLE_GUESS = {
'male': 'Male',
'mostly_male': 'Likely Male',
'female': 'Female',
'mostly_female': 'Likely Female',
'unknown': '...?...',
}
POLITICAL_DATA_MANAGER = 'PolDataMgr'
PROVIDED_BY_POLITICIAN = 'Politician'
GENDER_GUESSER_HIGH_LIKELIHOOD = 'GuessHigh'
GENDER_GUESSER_LOW_LIKELIHOOOD = 'GuessLow'
NOT_ANALYZED = ''
GENDER_LIKELIHOOD = (
(POLITICAL_DATA_MANAGER, 'Political Data Mgr'),
(PROVIDED_BY_POLITICIAN, 'Politician Provided'),
(GENDER_GUESSER_HIGH_LIKELIHOOD, 'Gender Guesser High Likelihood'),
(GENDER_GUESSER_LOW_LIKELIHOOOD, 'Gender Guesser Low Likelihood'),
(NOT_ANALYZED, ''),
)
logger = wevote_functions.admin.get_logger(__name__)
detector = gender.Detector()
# When merging candidates, these are the fields we check for figure_out_politician_conflict_values
POLITICIAN_UNIQUE_IDENTIFIERS = [
'ballotpedia_id',
'ballotpedia_politician_name',
'ballotpedia_politician_url',
'bioguide_id',
'birth_date',
'cspan_id',
'ctcl_uuid',
# 'facebook_url', # We now have 3 options and merge them automatically
# 'facebook_url_is_broken',
'fec_id',
'first_name',
'gender',
'govtrack_id',
'house_history_id',
'icpsr_id',
'instagram_followers_count',
'instagram_handle',
'is_battleground_race_2019',
'is_battleground_race_2020',
'is_battleground_race_2021',
'is_battleground_race_2022',
'is_battleground_race_2023',
'is_battleground_race_2024',
'is_battleground_race_2025',
'is_battleground_race_2026',
'last_name',
'lis_id',
'maplight_id',
'middle_name',
'opensecrets_id',
'political_party',
'politician_contact_form_url',
# 'politician_email', # We now have 3 options and merge them automatically
'politician_facebook_id',
'politician_googleplus_id',
'politician_name',
# 'politician_phone_number', # We now have 3 options and merge them automatically
# 'politician_url', # We have 5 options now and merge them automatically
'politician_youtube_id',
'seo_friendly_path',
'state_code',
'thomas_id',
'twitter_handle_updates_failing',
'twitter_handle2_updates_failing',
'vote_smart_id',
'vote_usa_politician_id',
'washington_post_id',
'we_vote_hosted_profile_image_url_large',
'we_vote_hosted_profile_image_url_medium',
'we_vote_hosted_profile_image_url_tiny',
'wikipedia_id',
]
POLITICIAN_UNIQUE_ATTRIBUTES_TO_BE_CLEARED = [
'bioguide_id',
'fec_id',
'govtrack_id',
'maplight_id',
'seo_friendly_path',
'thomas_id',
]
class Politician(models.Model):
# We are relying on built-in Python id field
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our data with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "pol", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_politician_integer
DoesNotExist = None
MultipleObjectsReturned = None
objects = None
we_vote_id = models.CharField(
verbose_name="we vote permanent id of this politician", max_length=255, default=None, null=True,
blank=True, unique=True)
# Official Statement from Candidate in Ballot Guide
ballot_guide_official_statement = models.TextField(verbose_name="official candidate statement from ballot guide",
null=True, blank=True, default=None)
# See this url for properties: https://docs.python.org/2/library/functions.html#property
first_name = models.CharField(verbose_name="first name",
max_length=255, default=None, null=True, blank=True)
middle_name = models.CharField(verbose_name="middle name",
max_length=255, default=None, null=True, blank=True)
last_name = models.CharField(verbose_name="last name",
max_length=255, default=None, null=True, blank=True)
politician_name = models.CharField(verbose_name="official full name",
max_length=255, default=None, null=True, blank=True)
facebook_url = models.TextField(verbose_name='facebook url of candidate', blank=True, null=True)
facebook_url2 = models.TextField(blank=True, null=True)
facebook_url3 = models.TextField(blank=True, null=True)
facebook_url_is_broken = models.BooleanField(verbose_name="facebook url is broken", default=False)
facebook_url2_is_broken = models.BooleanField(default=False)
facebook_url3_is_broken = models.BooleanField(default=False)
# This is the politician's name from GoogleCivicCandidateCampaign
google_civic_name_alternates_generated = models.BooleanField(default=False)
google_civic_candidate_name = models.CharField(
verbose_name="full name from google civic", max_length=255, default=None, null=True, blank=True)
google_civic_candidate_name2 = models.CharField(max_length=255, null=True)
google_civic_candidate_name3 = models.CharField(max_length=255, null=True)
# This is the politician's name assembled from TheUnitedStatesIo first_name + last_name for quick search
full_name_assembled = models.CharField(verbose_name="full name assembled from first_name + last_name",
max_length=255, default=None, null=True, blank=True)
gender = models.CharField("gender", max_length=1, choices=GENDER_CHOICES, default=UNKNOWN)
gender_likelihood = models.CharField("gender guess likelihood", max_length=11, choices=GENDER_LIKELIHOOD,
default='')
birth_date = models.DateField("birth date", default=None, null=True, blank=True)
# race = enum?
# official_image_id = ??
bioguide_id = models.CharField(verbose_name="bioguide unique identifier",
max_length=200, null=True, unique=True)
thomas_id = models.CharField(verbose_name="thomas unique identifier",
max_length=200, null=True, unique=True)
lis_id = models.CharField(verbose_name="lis unique identifier",
max_length=200, null=True, blank=True, unique=False)
govtrack_id = models.CharField(verbose_name="govtrack unique identifier",
max_length=200, null=True, unique=True)
opensecrets_id = models.CharField(verbose_name="opensecrets unique identifier",
max_length=200, null=True, unique=False)
vote_smart_id = models.CharField(verbose_name="votesmart unique identifier",
max_length=200, null=True, unique=False)
fec_id = models.CharField(verbose_name="fec unique identifier",
max_length=200, null=True, unique=True, blank=True)
cspan_id = models.CharField(verbose_name="cspan unique identifier",
max_length=200, null=True, blank=True, unique=False)
# DEPRECATE wikipedia_id
wikipedia_id = models.CharField(verbose_name="wikipedia url",
max_length=500, default=None, null=True, blank=True)
wikipedia_url = models.TextField(null=True)
# The candidate's name as passed over by Ballotpedia
ballotpedia_politician_name = models.CharField(
verbose_name="name exactly as received from ballotpedia", max_length=255, null=True, blank=True)
ballotpedia_politician_url = models.TextField(
verbose_name='url of politician on ballotpedia', blank=True, null=True)
# We might need to deprecate ballotpedia_id
ballotpedia_id = models.CharField(
verbose_name="ballotpedia url", max_length=500, default=None, null=True, blank=True)
house_history_id = models.CharField(verbose_name="house history unique identifier",
max_length=200, null=True, blank=True)
maplight_id = models.CharField(verbose_name="maplight unique identifier",
max_length=200, null=True, unique=True, blank=True)
washington_post_id = models.CharField(verbose_name="washington post unique identifier",
max_length=200, null=True, unique=False)
icpsr_id = models.CharField(verbose_name="icpsr unique identifier",
max_length=200, null=True, unique=False)
tag_link = models.ManyToManyField(Tag, through='PoliticianTagLink')
# The full name of the party the official belongs to.
political_party = models.CharField(verbose_name="politician political party", max_length=255, null=True)
politician_url = models.TextField(blank=True, null=True)
politician_url2 = models.TextField(blank=True, null=True)
politician_url3 = models.TextField(blank=True, null=True)
politician_url4 = models.TextField(blank=True, null=True)
politician_url5 = models.TextField(blank=True, null=True)
politician_contact_form_url = models.URLField(
verbose_name='website url of contact form', max_length=255, blank=True, null=True)
politician_twitter_handle = models.CharField(max_length=255, null=True, unique=False)
politician_twitter_handle2 = models.CharField(max_length=255, null=True, unique=False)
politician_twitter_handle3 = models.CharField(max_length=255, null=True, unique=False)
politician_twitter_handle4 = models.CharField(max_length=255, null=True, unique=False)
politician_twitter_handle5 = models.CharField(max_length=255, null=True, unique=False)
seo_friendly_path = models.CharField(max_length=255, null=True, unique=True, db_index=True)
seo_friendly_path_date_last_updated = models.DateTimeField(null=True)
seo_friendly_path_needs_regeneration = models.BooleanField(default=False)
state_code = models.CharField(verbose_name="politician home state", max_length=2, null=True)
supporters_count = models.PositiveIntegerField(default=0) # From linked_campaignx_we_vote_id CampaignX entry
twitter_handle_updates_failing = models.BooleanField(default=False)
twitter_handle2_updates_failing = models.BooleanField(default=False)
twitter_user_id = models.BigIntegerField(verbose_name="twitter id", null=True, blank=True)
vote_usa_politician_id = models.CharField(
verbose_name="Vote USA permanent id for this politician", max_length=64, default=None, null=True, blank=True)
# Image URL on Vote USA's servers. See vote_usa_profile_image_url_https, the master image cached on We Vote servers.
photo_url_from_vote_usa = models.TextField(null=True, blank=True)
# This is the master image url cached on We Vote servers. See photo_url_from_vote_usa for Vote USA URL.
vote_usa_profile_image_url_https = models.TextField(null=True, blank=True, default=None)
# Which politician image is currently active?
profile_image_type_currently_active = models.CharField(
max_length=10, choices=PROFILE_IMAGE_TYPE_CURRENTLY_ACTIVE_CHOICES, default=PROFILE_IMAGE_TYPE_UNKNOWN)
we_vote_hosted_politician_photo_original_url = models.TextField(blank=True, null=True)
# Image for politician from Facebook, cached on We Vote's servers. See also facebook_profile_image_url_https.
we_vote_hosted_profile_facebook_image_url_large = models.TextField(blank=True, null=True)
we_vote_hosted_profile_facebook_image_url_medium = models.TextField(blank=True, null=True)
we_vote_hosted_profile_facebook_image_url_tiny = models.TextField(blank=True, null=True)
# Image for politician from Twitter, cached on We Vote's servers. See local master twitter_profile_image_url_https.
we_vote_hosted_profile_twitter_image_url_large = models.TextField(blank=True, null=True)
we_vote_hosted_profile_twitter_image_url_medium = models.TextField(blank=True, null=True)
we_vote_hosted_profile_twitter_image_url_tiny = models.TextField(blank=True, null=True)
# Image for politician uploaded to We Vote's servers.
we_vote_hosted_profile_uploaded_image_url_large = models.TextField(blank=True, null=True)
we_vote_hosted_profile_uploaded_image_url_medium = models.TextField(blank=True, null=True)
we_vote_hosted_profile_uploaded_image_url_tiny = models.TextField(blank=True, null=True)
# Image for politician from Vote USA, cached on We Vote's servers. See local master vote_usa_profile_image_url_https.
we_vote_hosted_profile_vote_usa_image_url_large = models.TextField(blank=True, null=True)
we_vote_hosted_profile_vote_usa_image_url_medium = models.TextField(blank=True, null=True)
we_vote_hosted_profile_vote_usa_image_url_tiny = models.TextField(blank=True, null=True)
# Image we are using as the profile photo (could be sourced from Twitter, Facebook, etc.)
we_vote_hosted_profile_image_url_large = models.TextField(blank=True, null=True)
we_vote_hosted_profile_image_url_medium = models.TextField(blank=True, null=True)
we_vote_hosted_profile_image_url_tiny = models.TextField(blank=True, null=True)
# ctcl politician fields
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=36, null=True, blank=True)
instagram_handle = models.TextField(verbose_name="politician's instagram handle", blank=True, null=True)
instagram_followers_count = models.IntegerField(
verbose_name="count of politician's instagram followers", null=True, blank=True)
# As we add more years here, update /wevote_settings/constants.py IS_BATTLEGROUND_YEARS_AVAILABLE
is_battleground_race_2019 = models.BooleanField(default=False, null=False)
is_battleground_race_2020 = models.BooleanField(default=False, null=False)
is_battleground_race_2021 = models.BooleanField(default=False, null=False)
is_battleground_race_2022 = models.BooleanField(default=False, null=False)
is_battleground_race_2023 = models.BooleanField(default=False, null=False)
is_battleground_race_2024 = models.BooleanField(default=False, null=False)
is_battleground_race_2025 = models.BooleanField(default=False, null=False)
is_battleground_race_2026 = models.BooleanField(default=False, null=False)
# Every politician has one default CampaignX entry that follows them over time. Campaigns with
# a linked_politician_we_vote_id are auto-generated by We Vote.
# This is not the same as saying that a CampaignX is supporting or opposing this politician -- we use
# the CampaignXPolitician table to store links to politicians.
linked_campaignx_we_vote_id = models.CharField(max_length=255, null=True, unique=True)
linked_campaignx_we_vote_id_date_last_updated = models.DateTimeField(null=True)
linkedin_url = models.TextField(null=True, blank=True)
ocd_id_state_mismatch_found = models.BooleanField(default=False, null=False)
politician_facebook_id = models.CharField(
verbose_name='politician facebook user name', max_length=255, null=True, unique=False)
politician_phone_number = models.CharField(max_length=255, null=True, unique=False)
politician_phone_number2 = models.CharField(max_length=255, null=True, unique=False)
politician_phone_number3 = models.CharField(max_length=255, null=True, unique=False)
politician_googleplus_id = models.CharField(
verbose_name='politician googleplus profile name', max_length=255, null=True, unique=False)
politician_youtube_id = models.CharField(
verbose_name='politician youtube profile name', max_length=255, null=True, unique=False)
# DEPRECATE after transferring all data to politician_email
politician_email_address = models.CharField(max_length=255, null=True, unique=False)
politician_email = models.CharField(max_length=255, null=True, unique=False)
politician_email2 = models.CharField(max_length=255, null=True, unique=False)
politician_email3 = models.CharField(max_length=255, null=True, unique=False)
twitter_name = models.CharField(
verbose_name="politician plain text name from twitter", max_length=255, null=True, blank=True)
twitter_location = models.CharField(
verbose_name="politician location from twitter", max_length=255, null=True, blank=True)
twitter_followers_count = models.IntegerField(
verbose_name="number of twitter followers", null=False, blank=True, default=0)
# This is the master image cached on We Vote servers. Note that we do not keep the original image URL from Twitter.
twitter_profile_image_url_https = models.TextField(
verbose_name='locally cached url of politician profile image from twitter', blank=True, null=True)
twitter_profile_background_image_url_https = models.TextField(
verbose_name='tile-able background from twitter', blank=True, null=True)
twitter_profile_banner_url_https = models.TextField(
verbose_name='profile banner image from twitter', blank=True, null=True)
twitter_description = models.CharField(
verbose_name="Text description of this organization from twitter.", max_length=255, null=True, blank=True)
youtube_url = models.TextField(blank=True, null=True)
date_last_updated = models.DateTimeField(null=True, auto_now=True)
date_last_updated_from_candidate = models.DateTimeField(null=True, default=None)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_politician_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "pol" = tells us this is a unique id for a Politician
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}pol{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
if self.maplight_id == "": # We want this to be unique IF there is a value, and otherwise "None"
self.maplight_id = None
super(Politician, self).save(*args, **kwargs)
def __unicode__(self):
return self.last_name
class Meta:
ordering = ('last_name',)
def display_full_name(self):
if self.politician_name:
return self.politician_name
elif self.first_name and self.last_name:
return self.first_name + " " + self.last_name
elif self.google_civic_candidate_name:
return self.google_civic_candidate_name
else:
return self.first_name + " " + self.last_name
def politician_photo_url(self):
"""
fetch URL of politician's photo from TheUnitedStatesIo repo
"""
if self.bioguide_id:
url_str = 'https://theunitedstates.io/images/congress/225x275/{bioguide_id}.jpg'.format(
bioguide_id=self.bioguide_id)
return url_str
else:
return ""
def is_female(self):
return self.gender in [FEMALE]
def is_gender_neutral(self):
return self.gender in [GENDER_NEUTRAL]
def is_male(self):
return self.gender in [MALE]
def is_gender_specified(self):
return self.gender in [FEMALE, GENDER_NEUTRAL, MALE]
class PoliticiansAreNotDuplicates(models.Model):
"""
When checking for duplicates, there are times when we want to explicitly mark two politicians as NOT duplicates
"""
objects = None
politician1_we_vote_id = models.CharField(
verbose_name="first politician we are tracking", max_length=255, null=True, unique=False)
politician2_we_vote_id = models.CharField(
verbose_name="second politician we are tracking", max_length=255, null=True, unique=False)
def fetch_other_politician_we_vote_id(self, one_we_vote_id):
if one_we_vote_id == self.politician1_we_vote_id:
return self.politician2_we_vote_id
elif one_we_vote_id == self.politician2_we_vote_id:
return self.politician1_we_vote_id
else:
# If the we_vote_id passed in wasn't found, don't return another we_vote_id
return ""
class PoliticiansArePossibleDuplicates(models.Model):
"""
When checking for duplicates, there are times when we want to explicitly mark two politicians as possible duplicates
"""
politician1_we_vote_id = models.CharField(max_length=255, null=True, unique=False)
politician2_we_vote_id = models.CharField(max_length=255, null=True, unique=False)
state_code = models.CharField(max_length=2, null=True)
def fetch_other_politician_we_vote_id(self, one_we_vote_id):
if one_we_vote_id == self.politician1_we_vote_id:
return self.politician2_we_vote_id
elif one_we_vote_id == self.politician2_we_vote_id:
return self.politician1_we_vote_id
else:
# If the we_vote_id passed in wasn't found, don't return another we_vote_id
return ""
class PoliticianManager(models.Manager):
def __init__(self):
pass
def add_politician_position_sorting_dates_if_needed(self, position_object=None, politician_we_vote_id=''):
"""
Search for any CandidateCampaign objects for this politician in the future
Then find the latest election that candidate is running for, so we can get
candidate_year and candidate_ultimate_election_date.
If no future candidate entries for this politician, set position_ultimate_election_not_linked to True
:param position_object:
:param politician_we_vote_id:
:return:
"""
candidate_list = []
candidate_list_found = False
position_object_updated = False
status = ""
success = True
from candidate.models import CandidateManager
candidate_manager = CandidateManager()
if positive_value_exists(politician_we_vote_id):
from candidate.models import CandidateListManager
candidate_list_manager = CandidateListManager()
results = candidate_list_manager.retrieve_candidates_from_politician(
politician_we_vote_id=politician_we_vote_id,
read_only=True)
if results['candidate_list_found']:
candidate_list = results['candidate_list']
candidate_list_found = True
elif not results['success']:
status += results['status']
success = False
candidate_we_vote_id_list = []
if candidate_list_found:
for candidate in candidate_list:
if candidate.we_vote_id not in candidate_we_vote_id_list:
candidate_we_vote_id_list.append(candidate.we_vote_id)
ultimate_election_date_found = False
if candidate_list_found and len(candidate_we_vote_id_list) > 0:
today = datetime.now().date()
this_year = 0
if today and today.year:
this_year = convert_to_int(today.year)
date_now_as_integer = convert_date_to_date_as_integer(today)
date_results = candidate_manager.generate_candidate_position_sorting_dates(
candidate_we_vote_id_list=candidate_we_vote_id_list)
if not positive_value_exists(date_results['success']):
success = False
if success:
largest_year_integer = date_results['largest_year_integer']
if largest_year_integer < this_year:
largest_year_integer = 0
if positive_value_exists(largest_year_integer):
if not position_object.position_year:
position_object.position_year = largest_year_integer
position_object_updated = True
elif largest_year_integer > position_object.position_year:
position_object.position_year = largest_year_integer
position_object_updated = True
largest_election_date_integer = date_results['largest_election_date_integer']
if largest_election_date_integer < date_now_as_integer:
largest_election_date_integer = 0
if positive_value_exists(largest_election_date_integer):
if not position_object.position_ultimate_election_date:
position_object.position_ultimate_election_date = largest_election_date_integer
position_object_updated = True
ultimate_election_date_found = True
elif largest_election_date_integer > position_object.position_ultimate_election_date:
position_object.position_ultimate_election_date = largest_election_date_integer
position_object_updated = True
ultimate_election_date_found = True
if success and not ultimate_election_date_found:
# If here, mark position_ultimate_election_not_linked as True and then exit
status += "ULTIMATE_ELECTION_DATE_NOT_FOUND "
position_object.position_ultimate_election_not_linked = True
position_object_updated = True
return {
'position_object_updated': position_object_updated,
'position_object': position_object,
'status': status,
'success': success,
}
def create_politician_from_similar_object(self, similar_object):
"""
Take We Vote candidate, organization or representative object, and create a new politician entry
:param similar_object:
:return:
"""
status = ''
success = True
politician = None
politician_created = False
politician_found = False
politician_id = 0
politician_we_vote_id = ''
birth_date = None
facebook_url = None
first_name = None
gender = UNKNOWN
instagram_handle = None
last_name = None
linkedin_url = None
middle_name = None
object_is_candidate = False
object_is_organization = False
object_is_representative = False
political_party = None
state_code = None
vote_usa_politician_id = None
if 'cand' in similar_object.we_vote_id:
object_is_candidate = True
facebook_url = similar_object.facebook_url
first_name = extract_first_name_from_full_name(similar_object.candidate_name)
instagram_handle = similar_object.instagram_handle
middle_name = extract_middle_name_from_full_name(similar_object.candidate_name)
last_name = extract_last_name_from_full_name(similar_object.candidate_name)
linkedin_url = similar_object.linkedin_url
political_party_constant = convert_to_political_party_constant(similar_object.party)
political_party = candidate_party_display(political_party_constant)
if positive_value_exists(similar_object.birth_day_text):
try:
birth_date = datetime.strptime(similar_object.birth_day_text, '%Y-%m-%d')
except Exception as e:
birth_date = None
status += "FAILED_CONVERTING_BIRTH_DAY_TEXT: " + str(e) + " " + \
str(similar_object.birth_day_text) + " "
else:
birth_date = None
if positive_value_exists(similar_object.candidate_gender):
if similar_object.candidate_gender.lower() == 'female':
gender = FEMALE
elif similar_object.candidate_gender.lower() == 'male':
gender = MALE
elif similar_object.candidate_gender.lower() in ['nonbinary', 'non-binary', 'non binary']:
gender = GENDER_NEUTRAL
else:
gender = UNKNOWN
else:
gender = UNKNOWN
state_code = similar_object.state_code
vote_usa_politician_id = similar_object.vote_usa_politician_id
elif 'org' in similar_object.we_vote_id:
object_is_organization = True
facebook_url = similar_object.organization_facebook
first_name = extract_first_name_from_full_name(similar_object.organization_name)
instagram_handle = similar_object.organization_instagram_handle
middle_name = extract_middle_name_from_full_name(similar_object.organization_name)
last_name = extract_last_name_from_full_name(similar_object.organization_name)
gender = UNKNOWN
state_code = similar_object.state_served_code
elif 'rep' in similar_object.we_vote_id:
# If here we are looking at representative object
object_is_representative = True
facebook_url = similar_object.facebook_url
first_name = extract_first_name_from_full_name(similar_object.representative_name)
instagram_handle = similar_object.instagram_handle
middle_name = extract_middle_name_from_full_name(similar_object.representative_name)
last_name = extract_last_name_from_full_name(similar_object.representative_name)
linkedin_url = similar_object.linkedin_url
political_party_constant = convert_to_political_party_constant(similar_object.political_party)
political_party = candidate_party_display(political_party_constant)
state_code = similar_object.state_code
vote_usa_politician_id = similar_object.vote_usa_politician_id
if object_is_candidate or object_is_organization or object_is_representative:
try:
politician = Politician.objects.create(
birth_date=birth_date,
facebook_url=facebook_url,
facebook_url_is_broken=similar_object.facebook_url_is_broken,
first_name=first_name,
gender=gender,
instagram_followers_count=similar_object.instagram_followers_count,
instagram_handle=instagram_handle,
last_name=last_name,
linkedin_url=linkedin_url,
middle_name=middle_name,
political_party=political_party,
profile_image_type_currently_active=similar_object.profile_image_type_currently_active,
state_code=state_code,
twitter_description=similar_object.twitter_description,
twitter_followers_count=similar_object.twitter_followers_count,
twitter_name=similar_object.twitter_name,
twitter_location=similar_object.twitter_location,
twitter_profile_background_image_url_https=similar_object.twitter_profile_background_image_url_https,
twitter_profile_banner_url_https=similar_object.twitter_profile_banner_url_https,
twitter_profile_image_url_https=similar_object.twitter_profile_image_url_https,
twitter_handle_updates_failing=similar_object.twitter_handle_updates_failing,
twitter_handle2_updates_failing=similar_object.twitter_handle2_updates_failing,
vote_usa_politician_id=vote_usa_politician_id,
we_vote_hosted_profile_facebook_image_url_large=similar_object.we_vote_hosted_profile_facebook_image_url_large,
we_vote_hosted_profile_facebook_image_url_medium=similar_object.we_vote_hosted_profile_facebook_image_url_medium,
we_vote_hosted_profile_facebook_image_url_tiny=similar_object.we_vote_hosted_profile_facebook_image_url_tiny,
we_vote_hosted_profile_twitter_image_url_large=similar_object.we_vote_hosted_profile_twitter_image_url_large,
we_vote_hosted_profile_twitter_image_url_medium=similar_object.we_vote_hosted_profile_twitter_image_url_medium,
we_vote_hosted_profile_twitter_image_url_tiny=similar_object.we_vote_hosted_profile_twitter_image_url_tiny,
we_vote_hosted_profile_uploaded_image_url_large=similar_object.we_vote_hosted_profile_uploaded_image_url_large,
we_vote_hosted_profile_uploaded_image_url_medium=similar_object.we_vote_hosted_profile_uploaded_image_url_medium,
we_vote_hosted_profile_uploaded_image_url_tiny=similar_object.we_vote_hosted_profile_uploaded_image_url_tiny,
we_vote_hosted_profile_vote_usa_image_url_large=similar_object.we_vote_hosted_profile_vote_usa_image_url_large,
we_vote_hosted_profile_vote_usa_image_url_medium=similar_object.we_vote_hosted_profile_vote_usa_image_url_medium,
we_vote_hosted_profile_vote_usa_image_url_tiny=similar_object.we_vote_hosted_profile_vote_usa_image_url_tiny,
we_vote_hosted_profile_image_url_large=similar_object.we_vote_hosted_profile_image_url_large,
we_vote_hosted_profile_image_url_medium=similar_object.we_vote_hosted_profile_image_url_medium,
we_vote_hosted_profile_image_url_tiny=similar_object.we_vote_hosted_profile_image_url_tiny,
wikipedia_url=similar_object.wikipedia_url,
youtube_url=similar_object.youtube_url,
)
status += "POLITICIAN_CREATED "
politician_created = True
politician_found = True
politician_id = politician.id
politician_we_vote_id = politician.we_vote_id
except Exception as e:
status += "FAILED_TO_CREATE_POLITICIAN: " + str(e) + " "
success = False
if politician_found:
from politician.controllers import add_twitter_handle_to_next_politician_spot
from representative.controllers import add_value_to_next_representative_spot
twitter_handles = []
try:
if object_is_candidate:
politician.ballotpedia_politician_url = similar_object.ballotpedia_candidate_url
politician.ballotpedia_politician_name = similar_object.ballotpedia_candidate_name
politician.politician_contact_form_url = similar_object.candidate_contact_form_url
politician.politician_url = similar_object.candidate_url
politician.google_civic_candidate_name = similar_object.google_civic_candidate_name
politician.google_civic_candidate_name2 = similar_object.google_civic_candidate_name2
politician.google_civic_candidate_name3 = similar_object.google_civic_candidate_name3
politician.maplight_id = similar_object.maplight_id
politician.politician_email = similar_object.candidate_email
politician.politician_name = similar_object.candidate_name
politician.politician_phone_number = similar_object.candidate_phone
politician.vote_smart_id = similar_object.vote_smart_id
politician.vote_usa_politician_id = similar_object.vote_usa_politician_id
politician.vote_usa_profile_image_url_https = similar_object.vote_usa_profile_image_url_https
if positive_value_exists(similar_object.candidate_twitter_handle):
twitter_handles.append(similar_object.candidate_twitter_handle)
if positive_value_exists(similar_object.candidate_twitter_handle2):
twitter_handles.append(similar_object.candidate_twitter_handle2)
if positive_value_exists(similar_object.candidate_twitter_handle3):
twitter_handles.append(similar_object.candidate_twitter_handle3)
elif object_is_organization:
politician.politician_name = similar_object.organization_name
if positive_value_exists(similar_object.organization_phone1):
politician.politician_phone_number = similar_object.organization_phone1
email_list = []
if positive_value_exists(similar_object.organization_email):
email_list.append(similar_object.organization_email)
if positive_value_exists(similar_object.facebook_email):
email_list.append(similar_object.facebook_email)
if 0 in email_list and positive_value_exists(email_list[0]):
politician.politician_email = email_list[0]
if 1 in email_list and positive_value_exists(email_list[1]):
politician.politician_email2 = email_list[1]
if positive_value_exists(similar_object.organization_twitter_handle):
twitter_handles.append(similar_object.organization_twitter_handle)
politician.vote_smart_id = similar_object.vote_smart_id
elif object_is_representative:
politician.ballotpedia_politician_url = similar_object.ballotpedia_representative_url
politician.google_civic_candidate_name = similar_object.google_civic_representative_name
politician.google_civic_candidate_name2 = similar_object.google_civic_representative_name2
politician.google_civic_candidate_name3 = similar_object.google_civic_representative_name3
if positive_value_exists(similar_object.representative_twitter_handle):
twitter_handles.append(similar_object.representative_twitter_handle)
politician.politician_contact_form_url = similar_object.representative_contact_form_url
politician.politician_email = similar_object.representative_email
politician.politician_email2 = similar_object.representative_email2
politician.politician_email3 = similar_object.representative_email3
politician.politician_name = similar_object.representative_name
politician.politician_phone_number = similar_object.representative_phone
if positive_value_exists(similar_object.representative_url):
results = add_value_to_next_representative_spot(
field_name_base='politician_url',
new_value_to_add=similar_object.representative_url,
representative=politician,
)
if results['success'] and results['values_changed']:
politician = results['representative']
if not results['success']:
status += results['status']
if positive_value_exists(similar_object.representative_url2):
results = add_value_to_next_representative_spot(
field_name_base='politician_url',
new_value_to_add=similar_object.representative_url2,
representative=politician,
)
if results['success'] and results['values_changed']:
politician = results['representative']
if not results['success']:
status += results['status']
if positive_value_exists(similar_object.representative_url3):
results = add_value_to_next_representative_spot(
field_name_base='politician_url',
new_value_to_add=similar_object.representative_url3,
representative=politician,
)
if results['success'] and results['values_changed']:
politician = results['representative']
if not results['success']:
status += results['status']
if positive_value_exists(similar_object.representative_twitter_handle):
twitter_handles.append(similar_object.representative_twitter_handle)
if positive_value_exists(similar_object.representative_twitter_handle2):
twitter_handles.append(similar_object.representative_twitter_handle2)
if positive_value_exists(similar_object.representative_twitter_handle3):
twitter_handles.append(similar_object.representative_twitter_handle3)
for one_twitter_handle in twitter_handles:
twitter_results = add_twitter_handle_to_next_politician_spot(
politician, one_twitter_handle)
if twitter_results['success']:
if twitter_results['values_changed']:
politician = twitter_results['politician']
else:
status += twitter_results['status']
success = False
politician.save()
except Exception as e:
status += "FAILED_TO_ADD_OTHER_FIELDS: " + str(e) + " "
success = False
# Generate seo_friendly_path
results = self.generate_seo_friendly_path(
politician_name=politician.politician_name,
politician_we_vote_id=politician.we_vote_id,
state_code=politician.state_code,
)
if results['seo_friendly_path_found']:
politician.seo_friendly_path = results['seo_friendly_path']
try:
politician.save()
except Exception as e:
status += "FAILED_TO_GENERATE_SEO_FRIENDLY_PATH: " + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'politician': politician,
'politician_created': politician_created,
'politician_found': politician_found,
'politician_id': politician_id,
'politician_we_vote_id': politician_we_vote_id,
}
return results
def politician_photo_url(self, politician_id):
politician_manager = PoliticianManager()
results = politician_manager.retrieve_politician(politician_id=politician_id, read_only=True)
if results['success']:
politician = results['politician']
return politician.politician_photo_url()
return ""
def retrieve_politician(
self,
politician_id=0,
politician_we_vote_id='',
read_only=False,
seo_friendly_path='',
voter_we_vote_id=None):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
politician = None
politician_found = False
success = True
status = ''
try:
if positive_value_exists(politician_id):
if positive_value_exists(read_only):
politician = Politician.objects.using('readonly').get(id=politician_id)
else:
politician = Politician.objects.get(id=politician_id)
politician_id = politician.id
politician_we_vote_id = politician.we_vote_id
politician_found = True
elif positive_value_exists(politician_we_vote_id):
if positive_value_exists(read_only):
politician = Politician.objects.using('readonly').get(we_vote_id__iexact=politician_we_vote_id)
else:
politician = Politician.objects.get(we_vote_id__iexact=politician_we_vote_id)
politician_id = politician.id
politician_we_vote_id = politician.we_vote_id
politician_found = True
elif positive_value_exists(seo_friendly_path):
if positive_value_exists(read_only):
politician = Politician.objects.using('readonly').get(seo_friendly_path__iexact=seo_friendly_path)
else:
politician = Politician.objects.get(seo_friendly_path__iexact=seo_friendly_path)
politician_id = politician.id
politician_we_vote_id = politician.we_vote_id
politician_found = True
except Politician.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status += "MULTIPLE_POLITICIANS_FOUND "
except Politician.DoesNotExist:
error_result = True
exception_does_not_exist = True
status += "NO_POLITICIAN_FOUND "
except Exception as e:
success = False
status += "PROBLEM_WITH_RETRIEVE_POLITICIAN: " + str(e) + ' '
# TODO: Implement this for Politicians
# if positive_value_exists(campaignx_found):
# if positive_value_exists(campaignx_we_vote_id) and positive_value_exists(voter_we_vote_id):
# viewer_is_owner = campaignx_manager.is_voter_campaignx_owner(
# campaignx_we_vote_id=campaignx_we_vote_id, voter_we_vote_id=voter_we_vote_id)
#
# campaignx_owner_object_list = campaignx_manager.retrieve_campaignx_owner_list(
# campaignx_we_vote_id_list=[campaignx_we_vote_id], viewer_is_owner=False)
# for campaignx_owner in campaignx_owner_object_list:
# campaign_owner_dict = {
# 'organization_name': campaignx_owner.organization_name,
# 'organization_we_vote_id': campaignx_owner.organization_we_vote_id,
# 'feature_this_profile_image': campaignx_owner.feature_this_profile_image,
# 'visible_to_public': campaignx_owner.visible_to_public,
# 'we_vote_hosted_profile_image_url_medium':campaignx_owner.we_vote_hosted_profile_image_url_medium,
# 'we_vote_hosted_profile_image_url_tiny': campaignx_owner.we_vote_hosted_profile_image_url_tiny,
# }
# campaignx_owner_list.append(campaign_owner_dict)
politician_owner_list = []
results = {
'success': success,
'status': status,
'politician': politician,
'politician_found': politician_found,
'politician_id': politician_id,
'politician_owner_list': politician_owner_list,
'politician_we_vote_id': politician_we_vote_id,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
}
return results
def retrieve_politician_from_we_vote_id(self, politician_we_vote_id):
return self.retrieve_politician(politician_we_vote_id=politician_we_vote_id)
def create_politician_name_filter(
self,
filters=[],
politician_name='',
queryset=None,
return_close_matches=False,
state_code=''):
filter_set = False
if politician_name:
if positive_value_exists(return_close_matches):
if positive_value_exists(state_code):
new_filter = Q(politician_name__icontains=politician_name,
state_code__iexact=state_code)
else:
new_filter = Q(politician_name__icontains=politician_name)
else:
if positive_value_exists(state_code):
new_filter = Q(politician_name__iexact=politician_name,
state_code__iexact=state_code)
else:
new_filter = Q(politician_name__iexact=politician_name)
filter_set = True
filters.append(new_filter)
search_words = politician_name.split()
if len(search_words) > 0:
search_filters = []
for one_word in search_words:
if positive_value_exists(state_code):
search_filter = Q(
politician_name__icontains=one_word,
state_code__iexact=state_code)
else:
search_filter = Q(politician_name__icontains=one_word)
search_filters.append(search_filter)
# Add the first query
if len(search_filters) > 0:
final_search_filters = search_filters.pop()
if positive_value_exists(return_close_matches):
# ..."OR" the remaining items in the list
for item in search_filters:
final_search_filters |= item
else:
# ..."AND" the remaining items in the list
for item in search_filters:
final_search_filters &= item
queryset = queryset.filter(final_search_filters)
results = {
'filters': filters,
'filter_set': filter_set,
'queryset': queryset,
}
return results
def retrieve_all_politicians_that_might_match_similar_object(
self,
facebook_url_list=[],
full_name_list=[],
instagram_handle='',
maplight_id='',
return_close_matches=True,
state_code='',
twitter_handle_list=[],
vote_smart_id='',
vote_usa_politician_id='',
read_only=True,
):
politician_list = []
politician_list_found = False
politician = None
politician_found = False
status = ''
try:
filter_set = False
if positive_value_exists(read_only):
politician_queryset = Politician.objects.using('readonly').all()
else:
politician_queryset = Politician.objects.all()
filters = []
for facebook_url in facebook_url_list:
filter_set = True
if positive_value_exists(facebook_url):
new_filter = (
Q(facebook_url__iexact=facebook_url) |
Q(facebook_url2__iexact=facebook_url) |
Q(facebook_url3__iexact=facebook_url)
)
filters.append(new_filter)
if positive_value_exists(instagram_handle):
new_filter = Q(instagram_handle__iexact=instagram_handle)
filter_set = True
filters.append(new_filter)
if positive_value_exists(maplight_id):
new_filter = Q(maplight_id__iexact=maplight_id)
filter_set = True
filters.append(new_filter)
for twitter_handle in twitter_handle_list:
if positive_value_exists(twitter_handle):
filter_set = True
new_filter = (
Q(politician_twitter_handle__iexact=twitter_handle) |
Q(politician_twitter_handle2__iexact=twitter_handle) |
Q(politician_twitter_handle3__iexact=twitter_handle) |
Q(politician_twitter_handle4__iexact=twitter_handle) |
Q(politician_twitter_handle5__iexact=twitter_handle)
)
filters.append(new_filter)
for full_name in full_name_list:
if positive_value_exists(full_name):
filter_results = self.create_politician_name_filter(
filters=filters,
politician_name=full_name,
queryset=politician_queryset,
return_close_matches=return_close_matches,
state_code=state_code,
)
if filter_results['filter_set']:
filter_set = True
filters = filter_results['filters']
politician_queryset = filter_results['queryset']
if positive_value_exists(vote_smart_id):
new_filter = Q(vote_smart_id__iexact=vote_smart_id)
filter_set = True
filters.append(new_filter)
if positive_value_exists(vote_usa_politician_id):
new_filter = Q(vote_usa_politician_id__iexact=vote_usa_politician_id)
filter_set = True
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
politician_queryset = politician_queryset.filter(final_filters)
if filter_set:
politician_list = list(politician_queryset)
else:
politician_list = []
if len(politician_list) == 1:
politician_found = True
politician_list_found = False
politician = politician_list[0]
status += 'ONE_POLITICIAN_RETRIEVED '
elif len(politician_list) > 1:
politician_found = False
politician_list_found = True
status += 'POLITICIAN_LIST_RETRIEVED '
else:
status += 'NO_POLITICIANS_RETRIEVED '
success = True
except Exception as e:
status = 'FAILED retrieve_all_politicians_for_office ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
# TODO DALE If nothing found, look for a national entry for this candidate -- i.e. Presidential candidates
if not politician_found and not politician_list_found:
pass
results = {
'success': success,
'status': status,
'politician_list_found': politician_list_found,
'politician_list': politician_list,
'politician_found': politician_found,
'politician': politician,
}
return results
def reset_politician_image_details_from_candidate(self, candidate, twitter_profile_image_url_https,
twitter_profile_background_image_url_https,
twitter_profile_banner_url_https):
"""
Reset an Politician entry with original image details from we vote image.
:param candidate:
:param twitter_profile_image_url_https:
:param twitter_profile_background_image_url_https:
:param twitter_profile_banner_url_https:
:return:
"""
politician_details = self.retrieve_politician(
politician_we_vote_id=candidate.politician_we_vote_id,
read_only=False)
politician = politician_details['politician']
if politician_details['success']:
politician.we_vote_hosted_profile_image_url_medium = ''
politician.we_vote_hosted_profile_image_url_large = ''
politician.we_vote_hosted_profile_image_url_tiny = ''
politician.save()
success = True
status = "RESET_POLITICIAN_IMAGE_DETAILS"
else:
success = False
status = "POLITICIAN_NOT_FOUND_IN_RESET_IMAGE_DETAILS"
results = {
'success': success,
'status': status,
'politician': politician
}
return results
def search_politicians(self, name_search_terms=None):
status = ""
success = True
politician_search_results_list = []
try:
queryset = Politician.objects.all()
if name_search_terms is not None:
name_search_words = name_search_terms.split()
else:
name_search_words = []
for one_word in name_search_words:
filters = [] # Reset for each search word
new_filter = Q(politician_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(politician_twitter_handle__icontains=one_word)
filters.append(new_filter)
new_filter = Q(politician_twitter_handle2__icontains=one_word)
filters.append(new_filter)
new_filter = Q(politician_twitter_handle3__icontains=one_word)
filters.append(new_filter)
new_filter = Q(politician_twitter_handle4__icontains=one_word)
filters.append(new_filter)
new_filter = Q(politician_twitter_handle5__icontains=one_word)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
queryset = queryset.filter(final_filters)
politician_search_results_list = list(queryset)
except Exception as e:
success = False
status += "ERROR_SEARCHING_POLITICIANS: " + str(e) + " "
results = {
'status': status,
'success': success,
'politician_search_results_list': politician_search_results_list,
}
return results
# TODO: Get rid of this function and replace with update_politician_details_from_candidate in politician/controllers.py
def update_politician_details_from_candidate(self, candidate):
"""
Update a politician entry with details retrieved from candidate
:param candidate:
:return:
"""
status = ''
success = True
values_changed = False
politician_details = self.retrieve_politician(
politician_we_vote_id=candidate.politician_we_vote_id,
read_only=False)
politician = politician_details['politician']
from politician.controllers import add_twitter_handle_to_next_politician_spot
if politician_details['success'] and politician:
# Politician found so update politician details with candidate details
first_name = extract_first_name_from_full_name(candidate.candidate_name)
middle_name = extract_middle_name_from_full_name(candidate.candidate_name)
last_name = extract_last_name_from_full_name(candidate.candidate_name)
if positive_value_exists(first_name) and first_name != politician.first_name:
politician.first_name = first_name
values_changed = True
if positive_value_exists(last_name) and last_name != politician.last_name:
politician.last_name = last_name
values_changed = True
if positive_value_exists(middle_name) and middle_name != politician.middle_name:
politician.middle_name = middle_name
values_changed = True
if positive_value_exists(candidate.party):
if convert_to_political_party_constant(candidate.party) != politician.political_party:
politician.political_party = convert_to_political_party_constant(candidate.party)
values_changed = True
if positive_value_exists(candidate.vote_smart_id) and candidate.vote_smart_id != politician.vote_smart_id:
politician.vote_smart_id = candidate.vote_smart_id
values_changed = True
if positive_value_exists(candidate.maplight_id) and candidate.maplight_id != politician.maplight_id:
politician.maplight_id = candidate.maplight_id
values_changed = True
if positive_value_exists(candidate.candidate_name) and \
candidate.candidate_name != politician.politician_name:
politician.politician_name = candidate.candidate_name
values_changed = True
if positive_value_exists(candidate.google_civic_candidate_name) and \
candidate.google_civic_candidate_name != politician.google_civic_candidate_name:
politician.google_civic_candidate_name = candidate.google_civic_candidate_name
values_changed = True
if positive_value_exists(candidate.state_code) and candidate.state_code != politician.state_code:
politician.state_code = candidate.state_code
values_changed = True
if positive_value_exists(candidate.candidate_twitter_handle):
add_results = add_twitter_handle_to_next_politician_spot(politician, candidate.candidate_twitter_handle)
if add_results['success']:
politician = add_results['politician']
values_changed = add_results['values_changed']
else:
status += 'FAILED_TO_ADD_ONE_TWITTER_HANDLE '
success = False
if positive_value_exists(candidate.we_vote_hosted_profile_image_url_large) and \
candidate.we_vote_hosted_profile_image_url_large != \
politician.we_vote_hosted_profile_image_url_large:
politician.we_vote_hosted_profile_image_url_large = candidate.we_vote_hosted_profile_image_url_large
values_changed = True
if positive_value_exists(candidate.we_vote_hosted_profile_image_url_medium) and \
candidate.we_vote_hosted_profile_image_url_medium != \
politician.we_vote_hosted_profile_image_url_medium:
politician.we_vote_hosted_profile_image_url_medium = candidate.we_vote_hosted_profile_image_url_medium
values_changed = True
if positive_value_exists(candidate.we_vote_hosted_profile_image_url_tiny) and \
candidate.we_vote_hosted_profile_image_url_tiny != politician.we_vote_hosted_profile_image_url_tiny:
politician.we_vote_hosted_profile_image_url_tiny = candidate.we_vote_hosted_profile_image_url_tiny
values_changed = True
if values_changed:
politician.save()
status += "SAVED_POLITICIAN_DETAILS"
else:
status += "NO_CHANGES_SAVED_TO_POLITICIAN_DETAILS"
else:
success = False
status += "POLITICIAN_NOT_FOUND"
results = {
'success': success,
'status': status,
'politician': politician
}
return results
def update_or_create_politician_from_candidate(self, candidate):
"""
Take We Vote candidate object, and map it to update_or_create_politician
:param candidate:
:return:
"""
first_name = extract_first_name_from_full_name(candidate.candidate_name)
middle_name = extract_middle_name_from_full_name(candidate.candidate_name)
last_name = extract_last_name_from_full_name(candidate.candidate_name)
political_party = convert_to_political_party_constant(candidate.party)
# TODO Add all other identifiers from other systems
updated_politician_values = {
'vote_smart_id': candidate.vote_smart_id,
'vote_usa_politician_id': candidate.vote_usa_politician_id,
'maplight_id': candidate.maplight_id,
'politician_name': candidate.candidate_name,
'google_civic_candidate_name': candidate.google_civic_candidate_name,
'state_code': candidate.state_code,
# See below
# 'politician_twitter_handle': candidate.candidate_twitter_handle,
'we_vote_hosted_profile_image_url_large': candidate.we_vote_hosted_profile_image_url_large,
'we_vote_hosted_profile_image_url_medium': candidate.we_vote_hosted_profile_image_url_medium,
'we_vote_hosted_profile_image_url_tiny': candidate.we_vote_hosted_profile_image_url_tiny,
'first_name': first_name,
'middle_name': middle_name,
'last_name': last_name,
'political_party': political_party,
}
results = self.update_or_create_politician(
updated_politician_values=updated_politician_values,
politician_we_vote_id=candidate.politician_we_vote_id,
vote_usa_politician_id=candidate.vote_usa_politician_id,
candidate_twitter_handle=candidate.candidate_twitter_handle,
candidate_name=candidate.candidate_name,
state_code=candidate.state_code)
from politician.controllers import add_twitter_handle_to_next_politician_spot
if results['success']:
politician = results['politician']
twitter_results = add_twitter_handle_to_next_politician_spot(politician, candidate.candidate_twitter_handle)
if twitter_results['success']:
if twitter_results['values_changed']:
politician = twitter_results['politician']
politician.save()
else:
results['status'] += twitter_results['status']
results['success'] = False
return results
def update_or_create_politician(
self,
updated_politician_values={},
politician_we_vote_id='',
vote_smart_id=0,
vote_usa_politician_id='',
maplight_id="",
candidate_twitter_handle="",
candidate_name="",
state_code="",
first_name="",
middle_name="",
last_name=""):
"""
Either update or create a politician entry. The individual variables passed in are for the purpose of finding
a politician to update, and the updated_politician_values variable contains the values we want to update to.
"""
new_politician_created = False
politician_found = False
politician = Politician()
status = ''
try:
# Note: When we decide to start updating candidate_name elsewhere within We Vote, we should stop
# updating candidate_name via subsequent Google Civic imports
# If coming from a record that has already been in We Vote
if positive_value_exists(politician_we_vote_id):
politician, new_politician_created = \
Politician.objects.update_or_create(
we_vote_id__iexact=politician_we_vote_id,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(vote_smart_id):
politician, new_politician_created = \
Politician.objects.update_or_create(
vote_smart_id=vote_smart_id,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(vote_usa_politician_id):
politician, new_politician_created = \
Politician.objects.update_or_create(
vote_usa_politician_id=vote_usa_politician_id,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(candidate_twitter_handle):
# For incoming twitter_handle we need to approach this differently
query = Politician.objects.all.filter(
Q(politician_twitter_handle__iexact=candidate_twitter_handle) |
Q(politician_twitter_handle2__iexact=candidate_twitter_handle) |
Q(politician_twitter_handle3__iexact=candidate_twitter_handle) |
Q(politician_twitter_handle4__iexact=candidate_twitter_handle) |
Q(politician_twitter_handle5__iexact=candidate_twitter_handle)
)
results_list = list(query)
if len(results_list) > 0:
politician = results_list[0]
politician_found = True
else:
# Create politician
politician = Politician.objects.create(defaults=updated_politician_values)
new_politician_created = True
politician_found = True
elif positive_value_exists(candidate_name) and positive_value_exists(state_code):
state_code = state_code.lower()
politician, new_politician_created = \
Politician.objects.update_or_create(
politician_name=candidate_name,
state_code=state_code,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(first_name) and positive_value_exists(last_name) \
and positive_value_exists(state_code):
state_code = state_code.lower()
politician, new_politician_created = \
Politician.objects.update_or_create(
first_name=first_name,
last_name=last_name,
state_code=state_code,
defaults=updated_politician_values)
politician_found = True
else:
# If here we have exhausted our set of unique identifiers
politician_found = False
pass
success = True
if politician_found:
status += 'POLITICIAN_SAVED '
else:
status += 'POLITICIAN_NOT_SAVED '
except Exception as e:
success = False
status = 'UNABLE_TO_UPDATE_OR_CREATE_POLITICIAN: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'politician_created': new_politician_created,
'politician_found': politician_found,
'politician': politician,
}
return results
def fetch_politician_id_from_we_vote_id(self, we_vote_id):
politician_manager = PoliticianManager()
results = politician_manager.retrieve_politician(politician_we_vote_id=we_vote_id, read_only=True)
if results['success']:
return results['politician_id']
return 0
def fetch_politician_we_vote_id_from_id(self, politician_id):
politician_manager = PoliticianManager()
results = politician_manager.retrieve_politician(politician_id=politician_id, read_only=True)
if results['success']:
return results['politician_we_vote_id']
return ''
def fetch_politicians_are_not_duplicates_list_we_vote_ids(self, politician_we_vote_id):
results = self.retrieve_politicians_are_not_duplicates_list(politician_we_vote_id)
return results['politicians_are_not_duplicates_list_we_vote_ids']
def create_politician_row_entry(
self,
politician_name='',
politician_first_name='',
politician_middle_name='',
politician_last_name='',
ctcl_uuid='',
political_party='',
politician_email='',
politician_email2='',
politician_email3='',
politician_phone_number='',
politician_phone_number2='',
politician_phone_number3='',
politician_twitter_handle='',
politician_twitter_handle2='',
politician_twitter_handle3='',
politician_twitter_handle4='',
politician_twitter_handle5='',
politician_facebook_id='',
politician_googleplus_id='',
politician_youtube_id='',
politician_website_url=''):
"""
:param politician_name:
:param politician_first_name:
:param politician_middle_name:
:param politician_last_name:
:param ctcl_uuid:
:param political_party:
:param politician_email:
:param politician_email2:
:param politician_email3:
:param politician_phone_number:
:param politician_phone_number2:
:param politician_phone_number3:
:param politician_twitter_handle:
:param politician_twitter_handle2:
:param politician_twitter_handle3:
:param politician_twitter_handle4:
:param politician_twitter_handle5:
:param politician_facebook_id:
:param politician_googleplus_id:
:param politician_youtube_id:
:param politician_website_url:
:return:
"""
success = False
status = ""
politician_updated = False
new_politician_created = False
new_politician = ''
try:
new_politician = Politician.objects.create(
politician_name=politician_name,
first_name=politician_first_name,
middle_name=politician_middle_name,
last_name=politician_last_name,
political_party=political_party,
politician_email=politician_email,
politician_email2=politician_email2,
politician_email3=politician_email3,
politician_phone_number=politician_phone_number,
politician_phone_number2=politician_phone_number2,
politician_phone_number3=politician_phone_number3,
politician_twitter_handle=politician_twitter_handle,
politician_twitter_handle2=politician_twitter_handle2,
politician_twitter_handle3=politician_twitter_handle3,
politician_twitter_handle4=politician_twitter_handle4,
politician_twitter_handle5=politician_twitter_handle5,
politician_facebook_id=politician_facebook_id,
politician_googleplus_id=politician_googleplus_id,
politician_youtube_id=politician_youtube_id,
politician_url=politician_website_url,
ctcl_uuid=ctcl_uuid)
if new_politician:
success = True
status += "POLITICIAN_CREATED "
new_politician_created = True
else:
success = False
status += "POLITICIAN_CREATE_FAILED "
except Exception as e:
success = False
new_politician_created = False
status += "POLITICIAN_RETRIEVE_ERROR "
handle_exception(e, logger=logger, exception_message=status)
results = {
'success': success,
'status': status,
'new_politician_created': new_politician_created,
'politician_updated': politician_updated,
'new_politician': new_politician,
}
return results
def update_politician_row_entry(
self,
politician_name='',
politician_first_name='',
politician_middle_name='',
politician_last_name='',
ctcl_uuid='',
political_party='',
politician_email='',
politician_email2='',
politician_email3='',
politician_twitter_handle='',
politician_twitter_handle2='',
politician_twitter_handle3='',
politician_twitter_handle4='',
politician_twitter_handle5='',
politician_phone_number='',
politician_phone_number2='',
politician_phone_number3='',
politician_facebook_id='',
politician_googleplus_id='',
politician_youtube_id='',
politician_website_url='',
politician_we_vote_id=''):
"""
:param politician_name:
:param politician_first_name:
:param politician_middle_name:
:param politician_last_name:
:param ctcl_uuid:
:param political_party:
:param politician_email:
:param politician_email2:
:param politician_email3:
:param politician_twitter_handle:
:param politician_twitter_handle2:
:param politician_twitter_handle3:
:param politician_twitter_handle4:
:param politician_twitter_handle5:
:param politician_phone_number:
:param politician_phone_number2:
:param politician_phone_number3:
:param politician_facebook_id:
:param politician_googleplus_id:
:param politician_youtube_id:
:param politician_website_url:
:param politician_we_vote_id:
:return:
"""
success = False
status = ""
politician_updated = False
# new_politician_created = False
# new_politician = ''
existing_politician_entry = ''
try:
existing_politician_entry = Politician.objects.get(we_vote_id__iexact=politician_we_vote_id)
if existing_politician_entry:
# found the existing entry, update the values
existing_politician_entry.politician_name = politician_name
existing_politician_entry.first_name = politician_first_name
existing_politician_entry.middle_name = politician_middle_name
existing_politician_entry.last_name = politician_last_name
existing_politician_entry.party_name = political_party
existing_politician_entry.ctcl_uuid = ctcl_uuid
existing_politician_entry.politician_email = politician_email
existing_politician_entry.politician_email2 = politician_email2
existing_politician_entry.politician_email3 = politician_email3
existing_politician_entry.politician_phone_number = politician_phone_number
existing_politician_entry.politician_phone_number2 = politician_phone_number2
existing_politician_entry.politician_phone_number3 = politician_phone_number3
existing_politician_entry.politician_twitter_handle = politician_twitter_handle
existing_politician_entry.politician_twitter_handle2 = politician_twitter_handle2
existing_politician_entry.politician_twitter_handle3 = politician_twitter_handle3
existing_politician_entry.politician_twitter_handle4 = politician_twitter_handle4
existing_politician_entry.politician_twitter_handle5 = politician_twitter_handle5
existing_politician_entry.politician_facebook_id = politician_facebook_id
existing_politician_entry.politician_googleplus_id = politician_googleplus_id
existing_politician_entry.politician_youtube_id = politician_youtube_id
existing_politician_entry.politician_url = politician_website_url
# now go ahead and save this entry (update)
existing_politician_entry.save()
politician_updated = True
success = True
status = "POLITICIAN_UPDATED"
except Exception as e:
success = False
politician_updated = False
status = "POLITICIAN_RETRIEVE_ERROR"
handle_exception(e, logger=logger, exception_message=status)
results = {
'success': success,
'status': status,
'politician_updated': politician_updated,
'updated_politician': existing_politician_entry,
}
return results
# def delete_all_politician_data():
# with open(LEGISLATORS_CURRENT_FILE, 'rU') as politicians_current_data:
# politicians_current_data.readline() # Skip the header
# reader = csv.reader(politicians_current_data) # Create a regular tuple reader
# for index, politician_row in enumerate(reader):
# if index > 3:
# break
# politician_entry = Politician.objects.order_by('last_name')[0]
# politician_entry.delete()
def retrieve_politician_list(
self,
limit_to_this_state_code="",
politician_we_vote_id_list=[],
read_only=False,
):
"""
:param limit_to_this_state_code:
:param politician_we_vote_id_list:
:param read_only:
:return:
"""
status = ""
politician_list = []
politician_list_found = False
try:
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
if len(politician_we_vote_id_list):
politician_query = politician_query.filter(we_vote_id__in=politician_we_vote_id_list)
if positive_value_exists(limit_to_this_state_code):
politician_query = politician_query.filter(state_code__iexact=limit_to_this_state_code)
politician_list = list(politician_query)
if len(politician_list):
politician_list_found = True
status += 'POLITICIANS_RETRIEVED '
success = True
else:
status += 'NO_POLITICIANS_RETRIEVED '
success = True
except Politician.DoesNotExist:
# No politicians found. Not a problem.
status += 'NO_POLITICIANS_FOUND_DoesNotExist '
politician_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status += 'FAILED-retrieve_politicians_for_specific_elections: ' + str(e) + ' '
success = False
results = {
'success': success,
'status': status,
'politician_list_found': politician_list_found,
'politician_list': politician_list,
}
return results
def retrieve_politicians_from_non_unique_identifiers(
self,
state_code='',
twitter_handle_list=[],
politician_name='',
ignore_politician_id_list=[],
read_only=False):
"""
:param state_code:
:param twitter_handle_list:
:param politician_name:
:param ignore_politician_id_list:
:param read_only:
:return:
"""
keep_looking_for_duplicates = True
politician = None
politician_found = False
politician_list = []
politician_list_found = False
multiple_entries_found = False
success = True
status = ""
if keep_looking_for_duplicates and len(twitter_handle_list) > 0:
try:
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
twitter_filters = []
for one_twitter_handle in twitter_handle_list:
one_twitter_handle_cleaned = extract_twitter_handle_from_text_string(one_twitter_handle)
new_filter = (
Q(politician_twitter_handle__iexact=one_twitter_handle_cleaned) |
Q(politician_twitter_handle2__iexact=one_twitter_handle_cleaned) |
Q(politician_twitter_handle3__iexact=one_twitter_handle_cleaned) |
Q(politician_twitter_handle4__iexact=one_twitter_handle_cleaned) |
Q(politician_twitter_handle5__iexact=one_twitter_handle_cleaned)
)
twitter_filters.append(new_filter)
# Add the first query
final_filters = twitter_filters.pop()
# ...and "OR" the remaining items in the list
for item in twitter_filters:
final_filters |= item
politician_query = politician_query.filter(final_filters)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_list = list(politician_query)
if len(politician_list):
# At least one entry exists
status += 'RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_LIST_RETRIEVED '
# if a single entry matches, update that entry
if len(politician_list) == 1:
multiple_entries_found = False
politician = politician_list[0]
politician_found = True
keep_looking_for_duplicates = False
success = True
status += "POLITICIAN_FOUND_BY_TWITTER "
else:
# more than one entry found
politician_list_found = True
multiple_entries_found = True
keep_looking_for_duplicates = False # Deal with multiple Twitter duplicates manually
status += "MULTIPLE_TWITTER_MATCHES "
except Politician.DoesNotExist:
success = True
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_NOT_FOUND "
except Exception as e:
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_QUERY_FAILED1 " + str(e) + " "
success = False
keep_looking_for_duplicates = False
# twitter handle does not exist, next look up against other data that might match
if keep_looking_for_duplicates and positive_value_exists(politician_name):
# Search by Candidate name exact match
try:
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(
Q(politician_name__iexact=politician_name) |
Q(google_civic_candidate_name__iexact=politician_name) |
Q(google_civic_candidate_name2__iexact=politician_name) |
Q(google_civic_candidate_name3__iexact=politician_name)
)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_list = list(politician_query)
if len(politician_list):
# entry exists
status += 'POLITICIAN_ENTRY_EXISTS1 '
success = True
# if a single entry matches, update that entry
if len(politician_list) == 1:
politician = politician_list[0]
politician_found = True
status += politician.we_vote_id + " "
keep_looking_for_duplicates = False
else:
# more than one entry found with a match in Politician
politician_list_found = True
keep_looking_for_duplicates = False
multiple_entries_found = True
else:
success = True
status += 'POLITICIAN_ENTRY_NOT_FOUND-EXACT '
except Politician.DoesNotExist:
success = True
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_NOT_FOUND-EXACT_MATCH "
except Exception as e:
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_QUERY_FAILED2: " + str(e) + " "
success = False
if keep_looking_for_duplicates and positive_value_exists(politician_name):
# Search for Candidate(s) that contains the same first and last names
first_name = extract_first_name_from_full_name(politician_name)
last_name = extract_last_name_from_full_name(politician_name)
if positive_value_exists(first_name) and positive_value_exists(last_name):
try:
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(
(Q(politician_name__icontains=first_name) & Q(politician_name__icontains=last_name)) |
(Q(google_civic_candidate_name__icontains=first_name) &
Q(google_civic_candidate_name__icontains=last_name)) |
(Q(google_civic_candidate_name2__icontains=first_name) &
Q(google_civic_candidate_name2__icontains=last_name)) |
(Q(google_civic_candidate_name3__icontains=first_name) &
Q(google_civic_candidate_name3__icontains=last_name))
)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_list = list(politician_query)
if len(politician_list):
# entry exists
status += 'POLITICIAN_ENTRY_EXISTS2 '
success = True
# if a single entry matches, update that entry
if len(politician_list) == 1:
politician = politician_list[0]
politician_found = True
status += politician.we_vote_id + " "
keep_looking_for_duplicates = False
else:
# more than one entry found with a match in Politician
politician_list_found = True
keep_looking_for_duplicates = False
multiple_entries_found = True
else:
status += 'POLITICIAN_ENTRY_NOT_FOUND-FIRST_OR_LAST '
success = True
except Politician.DoesNotExist:
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_NOT_FOUND-FIRST_OR_LAST_NAME "
success = True
except Exception as e:
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_QUERY_FAILED3: " + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'politician_found': politician_found,
'politician': politician,
'politician_list_found': politician_list_found,
'politician_list': politician_list,
'multiple_entries_found': multiple_entries_found,
}
return results
def fetch_politicians_from_non_unique_identifiers_count(
self,
state_code='',
twitter_handle_list=[],
politician_name='',
ignore_politician_id_list=[]):
keep_looking_for_duplicates = True
status = ""
if keep_looking_for_duplicates and len(twitter_handle_list) > 0:
try:
politician_query = Politician.objects.using('readonly').all()
twitter_filters = []
for one_twitter_handle in twitter_handle_list:
one_twitter_handle_cleaned = extract_twitter_handle_from_text_string(one_twitter_handle)
new_filter = (
Q(politician_twitter_handle__iexact=one_twitter_handle_cleaned) |
Q(politician_twitter_handle2__iexact=one_twitter_handle_cleaned) |
Q(politician_twitter_handle3__iexact=one_twitter_handle_cleaned) |
Q(politician_twitter_handle4__iexact=one_twitter_handle_cleaned) |
Q(politician_twitter_handle5__iexact=one_twitter_handle_cleaned)
)
twitter_filters.append(new_filter)
# Add the first query
final_filters = twitter_filters.pop()
# ...and "OR" the remaining items in the list
for item in twitter_filters:
final_filters |= item
politician_query = politician_query.filter(final_filters)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_count = politician_query.count()
if positive_value_exists(politician_count):
return politician_count
except Politician.DoesNotExist:
status += "FETCH_POLITICIANS_FROM_NON_UNIQUE_IDENTIFIERS_COUNT1 "
# twitter handle does not exist, next look up against other data that might match
if keep_looking_for_duplicates and positive_value_exists(politician_name):
# Search by Candidate name exact match
try:
politician_query = Politician.objects.using('readonly').all()
politician_query = politician_query.filter(
Q(politician_name__iexact=politician_name) |
Q(google_civic_candidate_name__iexact=politician_name) |
Q(google_civic_candidate_name2__iexact=politician_name) |
Q(google_civic_candidate_name3__iexact=politician_name)
)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_count = politician_query.count()
if positive_value_exists(politician_count):
return politician_count
except Politician.DoesNotExist:
status += "FETCH_POLITICIANS_FROM_NON_UNIQUE_IDENTIFIERS_COUNT2 "
if keep_looking_for_duplicates and positive_value_exists(politician_name):
# Search for Candidate(s) that contains the same first and last names
first_name = extract_first_name_from_full_name(politician_name)
last_name = extract_last_name_from_full_name(politician_name)
if positive_value_exists(first_name) and positive_value_exists(last_name):
try:
politician_query = Politician.objects.using('readonly').all()
politician_query = politician_query.filter(
(Q(politician_name__icontains=first_name) & Q(politician_name__icontains=last_name)) |
(Q(google_civic_candidate_name__icontains=first_name) &
Q(google_civic_candidate_name__icontains=last_name)) |
(Q(google_civic_candidate_name2__icontains=first_name) &
Q(google_civic_candidate_name2__icontains=last_name)) |
(Q(google_civic_candidate_name3__icontains=first_name) &
Q(google_civic_candidate_name3__icontains=last_name))
)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_count = politician_query.count()
if positive_value_exists(politician_count):
return politician_count
except Politician.DoesNotExist:
status += "FETCH_POLITICIANS_FROM_NON_UNIQUE_IDENTIFIERS_COUNT3 "
return 0
def generate_seo_friendly_path(
self,
base_pathname_string=None,
politician_name=None,
politician_we_vote_id='',
state_code=None):
"""
Generate SEO friendly path for this politician. Ensure that the SEO friendly path is unique.
:param base_pathname_string: Pass this in if we want a custom SEO friendly path
:param politician_name:
:param politician_we_vote_id:
:param state_code:
:return:
"""
from politician.controllers_generate_seo_friendly_path import generate_seo_friendly_path_generic
return generate_seo_friendly_path_generic(
base_pathname_string=base_pathname_string,
for_campaign=False,
for_politician=True,
politician_name=politician_name,
politician_we_vote_id=politician_we_vote_id,
state_code=state_code,
)
def retrieve_politicians_are_not_duplicates_list(self, politician_we_vote_id, read_only=True):
"""
Get a list of other politician_we_vote_id's that are not duplicates
:param politician_we_vote_id:
:param read_only:
:return:
"""
# Note that the direction of the linkage does not matter
politicians_are_not_duplicates_list1 = []
politicians_are_not_duplicates_list2 = []
status = ""
try:
if positive_value_exists(read_only):
politicians_are_not_duplicates_list_query = \
PoliticiansAreNotDuplicates.objects.using('readonly').filter(
politician1_we_vote_id__iexact=politician_we_vote_id,
)
else:
politicians_are_not_duplicates_list_query = PoliticiansAreNotDuplicates.objects.filter(
politician1_we_vote_id__iexact=politician_we_vote_id,
)
politicians_are_not_duplicates_list1 = list(politicians_are_not_duplicates_list_query)
success = True
status += "POLITICIANS_NOT_DUPLICATES_LIST_UPDATED_OR_CREATED1 "
except PoliticiansAreNotDuplicates.DoesNotExist:
# No data found. Try again below
success = True
status += 'NO_POLITICIANS_NOT_DUPLICATES_LIST_RETRIEVED_DoesNotExist1 '
except Exception as e:
success = False
status += "POLITICIANS_NOT_DUPLICATES_LIST_NOT_UPDATED_OR_CREATED1: " + str(e) + ' '
if success:
try:
if positive_value_exists(read_only):
politicians_are_not_duplicates_list_query = \
PoliticiansAreNotDuplicates.objects.using('readonly').filter(
politician2_we_vote_id__iexact=politician_we_vote_id,
)
else:
politicians_are_not_duplicates_list_query = \
PoliticiansAreNotDuplicates.objects.filter(
politician2_we_vote_id__iexact=politician_we_vote_id,
)
politicians_are_not_duplicates_list2 = list(politicians_are_not_duplicates_list_query)
success = True
status += "POLITICIANS_NOT_DUPLICATES_LIST_UPDATED_OR_CREATED2 "
except PoliticiansAreNotDuplicates.DoesNotExist:
success = True
status += 'NO_POLITICIANS_NOT_DUPLICATES_LIST_RETRIEVED2_DoesNotExist2 '
except Exception as e:
success = False
status += "POLITICIANS_NOT_DUPLICATES_LIST_NOT_UPDATED_OR_CREATED2: " + str(e) + ' '
politicians_are_not_duplicates_list = \
politicians_are_not_duplicates_list1 + politicians_are_not_duplicates_list2
politicians_are_not_duplicates_list_found = positive_value_exists(len(politicians_are_not_duplicates_list))
politicians_are_not_duplicates_list_we_vote_ids = []
for one_entry in politicians_are_not_duplicates_list:
if one_entry.politician1_we_vote_id != politician_we_vote_id:
politicians_are_not_duplicates_list_we_vote_ids.append(one_entry.politician1_we_vote_id)
elif one_entry.politician2_we_vote_id != politician_we_vote_id:
politicians_are_not_duplicates_list_we_vote_ids.append(one_entry.politician2_we_vote_id)
results = {
'success': success,
'status': status,
'politicians_are_not_duplicates_list_found': politicians_are_not_duplicates_list_found,
'politicians_are_not_duplicates_list': politicians_are_not_duplicates_list,
'politicians_are_not_duplicates_list_we_vote_ids': politicians_are_not_duplicates_list_we_vote_ids,
}
return results
def retrieve_politicians_with_no_gender_id(self, start=0, count=15):
"""
Get the first 15 records that have gender 'U' undefined
use gender_guesser to set the gender if male or female or androgynous (can't guess other human gender states)
set gender_likelihood to gender
:param start:
:param count:
:return:
"""
politician_query = Politician.objects.using('readonly').all()
# Get all politicians who do not have gender specified
politician_query = politician_query.filter(gender=UNKNOWN)
politician_query = politician_query.exclude(gender_likelihood=POLITICAL_DATA_MANAGER)
number_of_rows = politician_query.count()
politician_query = politician_query.order_by('politician_name')
politician_query = politician_query[start:(start+count)]
politician_list_objects = list(politician_query)
results_list = []
for pol in politician_list_objects:
first = pol.first_name.lower().capitalize()
if len(first) == 1 or (len(first) == 2 and pol.first_name[1] == '.'):
# G. Burt Lancaster
first = pol.middle_name.lower().capitalize()
pol.guess = detector.get_gender(first)
try:
pol.displayable_guess = DISPLAYABLE_GUESS[pol.guess]
except KeyError:
pol.displayable_guess = DISPLAYABLE_GUESS['unknown']
pol.guess = 'unknown'
results_list.append(pol)
return results_list, number_of_rows
def retrieve_politicians_with_misformatted_names(self, start=0, count=15, read_only=False):
"""
Get the first 15 records that have 3 capitalized letters in a row, as long as those letters
are not 'III' i.e. King Henry III. Also exclude the names where the word "WITHDRAWN" has been appended when
the politician withdrew from the race
SELECT * FROM public.politician_politician WHERE politician_name ~ '.*?[A-Z][A-Z][A-Z].*?' and
politician_name !~ '.*?III.*?'
:param start:
:param count:
:param read_only:
:return:
"""
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
# Get all politicians that have three capital letters in a row in their name, but exclude III (King Henry III)
politician_query = politician_query.filter(politician_name__regex=r'.*?[A-Z][A-Z][A-Z].*?(?<!III)').\
order_by('politician_name')
number_of_rows = politician_query.count()
politician_query = politician_query[start:(start+count)]
politician_list_objects = list(politician_query)
results_list = []
# out = ''
# out = 'KING HENRY III => ' + display_full_name_with_correct_capitalization('KING HENRY III') + ", "
for x in politician_list_objects:
name = x.politician_name
if name.endswith('WITHDRAWN') and not bool(re.match('^[A-Z]+$', name)):
continue
x.person_name_normalized = display_full_name_with_correct_capitalization(name)
x.party = x.political_party
results_list.append(x)
return results_list, number_of_rows
def save_fresh_twitter_details_to_politician(
self,
politician=None,
politician_we_vote_id='',
twitter_user=None):
"""
Update a politician entry with details retrieved from the Twitter API.
"""
politician_updated = False
success = True
status = ""
values_changed = False
if not hasattr(twitter_user, 'twitter_id'):
success = False
status += "VALID_TWITTER_USER_NOT_PROVIDED "
if success:
if not hasattr(politician, 'politician_twitter_handle') and positive_value_exists(politician_we_vote_id):
# Retrieve politician to update
pass
if not hasattr(politician, 'politician_twitter_handle'):
status += "VALID_POLITICIAN_NOT_PROVIDED_TO_UPDATE_TWITTER_DETAILS "
success = False
if not positive_value_exists(politician.politician_twitter_handle) \
and not positive_value_exists(twitter_user.twitter_handle):
status += "POLITICIAN_TWITTER_HANDLE_MISSING "
success = False
# I don't think this is a problem
# if success:
# if politician.politician_twitter_handle.lower() != twitter_user.twitter_handle.lower():
# status += "POLITICIAN_TWITTER_HANDLE_MISMATCH "
# success = False
if not success:
results = {
'success': success,
'status': status,
'politician': politician,
'politician_updated': politician_updated,
}
return results
if positive_value_exists(twitter_user.twitter_description):
if twitter_user.twitter_description != politician.twitter_description:
politician.twitter_description = twitter_user.twitter_description
values_changed = True
if positive_value_exists(twitter_user.twitter_followers_count):
if twitter_user.twitter_followers_count != politician.twitter_followers_count:
politician.twitter_followers_count = twitter_user.twitter_followers_count
values_changed = True
if positive_value_exists(twitter_user.twitter_handle):
# In case the capitalization of the name changes
if twitter_user.twitter_handle != politician.politician_twitter_handle:
politician.politician_twitter_handle = twitter_user.twitter_handle
values_changed = True
if positive_value_exists(twitter_user.twitter_handle_updates_failing):
if twitter_user.twitter_handle_updates_failing != politician.twitter_handle_updates_failing:
politician.twitter_handle_updates_failing = twitter_user.twitter_handle_updates_failing
values_changed = True
if positive_value_exists(twitter_user.twitter_id):
if twitter_user.twitter_id != politician.twitter_user_id:
politician.twitter_user_id = twitter_user.twitter_id
values_changed = True
if positive_value_exists(twitter_user.twitter_location):
if twitter_user.twitter_location != politician.twitter_location:
politician.twitter_location = twitter_user.twitter_location
values_changed = True
if positive_value_exists(twitter_user.twitter_name):
if twitter_user.twitter_name != politician.twitter_name:
politician.twitter_name = twitter_user.twitter_name
values_changed = True
if positive_value_exists(twitter_user.twitter_profile_image_url_https):
if twitter_user.twitter_profile_image_url_https != politician.twitter_profile_image_url_https:
politician.twitter_profile_image_url_https = twitter_user.twitter_profile_image_url_https
values_changed = True
if positive_value_exists(twitter_user.twitter_profile_background_image_url_https):
if twitter_user.twitter_profile_background_image_url_https != \
politician.twitter_profile_background_image_url_https:
politician.twitter_profile_background_image_url_https = \
twitter_user.twitter_profile_background_image_url_https
values_changed = True
if positive_value_exists(twitter_user.twitter_profile_banner_url_https):
if twitter_user.twitter_profile_banner_url_https != politician.twitter_profile_banner_url_https:
politician.twitter_profile_banner_url_https = twitter_user.twitter_profile_banner_url_https
values_changed = True
if positive_value_exists(twitter_user.twitter_url):
from representative.controllers import add_value_to_next_representative_spot
results = add_value_to_next_representative_spot(
field_name_base='politician_url',
new_value_to_add=twitter_user.twitter_url,
representative=politician,
)
if results['success'] and results['values_changed']:
politician = results['representative']
values_changed = True
if not results['success']:
status += results['status']
if positive_value_exists(twitter_user.we_vote_hosted_profile_image_url_large):
if twitter_user.we_vote_hosted_profile_image_url_large != \
politician.we_vote_hosted_profile_twitter_image_url_large:
politician.we_vote_hosted_profile_twitter_image_url_large = \
twitter_user.we_vote_hosted_profile_image_url_large
values_changed = True
if positive_value_exists(twitter_user.we_vote_hosted_profile_image_url_medium):
if twitter_user.we_vote_hosted_profile_image_url_medium != \
politician.we_vote_hosted_profile_twitter_image_url_medium:
politician.we_vote_hosted_profile_twitter_image_url_medium = \
twitter_user.we_vote_hosted_profile_image_url_medium
values_changed = True
if positive_value_exists(twitter_user.we_vote_hosted_profile_image_url_tiny):
if twitter_user.we_vote_hosted_profile_image_url_tiny != \
politician.we_vote_hosted_profile_twitter_image_url_tiny:
politician.we_vote_hosted_profile_twitter_image_url_tiny = \
twitter_user.we_vote_hosted_profile_image_url_tiny
values_changed = True
if politician.profile_image_type_currently_active == PROFILE_IMAGE_TYPE_UNKNOWN and \
positive_value_exists(twitter_user.we_vote_hosted_profile_image_url_large):
politician.profile_image_type_currently_active = PROFILE_IMAGE_TYPE_TWITTER
values_changed = True
if politician.profile_image_type_currently_active == PROFILE_IMAGE_TYPE_TWITTER:
if twitter_user.we_vote_hosted_profile_image_url_large != politician.we_vote_hosted_profile_image_url_large:
politician.we_vote_hosted_profile_image_url_large = twitter_user.we_vote_hosted_profile_image_url_large
values_changed = True
if twitter_user.we_vote_hosted_profile_image_url_medium != \
politician.we_vote_hosted_profile_image_url_medium:
politician.we_vote_hosted_profile_image_url_medium = \
twitter_user.we_vote_hosted_profile_image_url_medium
values_changed = True
if twitter_user.we_vote_hosted_profile_image_url_tiny != politician.we_vote_hosted_profile_image_url_tiny:
politician.we_vote_hosted_profile_image_url_tiny = twitter_user.we_vote_hosted_profile_image_url_tiny
values_changed = True
if values_changed:
try:
politician.save()
politician_updated = True
success = True
status += "SAVED_POLITICIAN_TWITTER_DETAILS "
except Exception as e:
success = False
status += "NO_CHANGES_SAVED_TO_POLITICIAN_TWITTER_DETAILS: " + str(e) + " "
results = {
'success': success,
'status': status,
'politician': politician,
'politician_updated': politician_updated,
}
return results
def update_or_create_politicians_are_not_duplicates(self, politician1_we_vote_id, politician2_we_vote_id):
"""
Either update or create a politician entry.
"""
exception_multiple_object_returned = False
success = False
new_politicians_are_not_duplicates_created = False
politicians_are_not_duplicates = None
status = ""
if positive_value_exists(politician1_we_vote_id) and positive_value_exists(politician2_we_vote_id):
try:
updated_values = {
'politician1_we_vote_id': politician1_we_vote_id,
'politician2_we_vote_id': politician2_we_vote_id,
}
politicians_are_not_duplicates, new_politicians_are_not_duplicates_created = \
PoliticiansAreNotDuplicates.objects.update_or_create(
politician1_we_vote_id__exact=politician1_we_vote_id,
politician2_we_vote_id__iexact=politician2_we_vote_id,
defaults=updated_values)
success = True
status += "POLITICIANS_ARE_NOT_DUPLICATES_UPDATED_OR_CREATED "
except PoliticiansAreNotDuplicates.MultipleObjectsReturned as e:
success = False
status += 'MULTIPLE_MATCHING_POLITICIANS_ARE_NOT_DUPLICATES_FOUND_BY_POLITICIAN_WE_VOTE_ID '
exception_multiple_object_returned = True
except Exception as e:
status += 'EXCEPTION_UPDATE_OR_CREATE_POLITICIANS_ARE_NOT_DUPLICATES ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'MultipleObjectsReturned': exception_multiple_object_returned,
'new_politicians_are_not_duplicates_created': new_politicians_are_not_duplicates_created,
'politicians_are_not_duplicates': politicians_are_not_duplicates,
}
return results
class PoliticianSEOFriendlyPath(models.Model):
objects = None
def __unicode__(self):
return "PoliticianSEOFriendlyPath"
politician_we_vote_id = models.CharField(max_length=255, null=True)
politician_name = models.CharField(max_length=255, null=False)
base_pathname_string = models.CharField(max_length=255, null=True)
pathname_modifier = models.CharField(max_length=10, null=True)
final_pathname_string = models.CharField(max_length=255, null=True, unique=True, db_index=True)
class PoliticianTagLink(models.Model):
"""
A confirmed (undisputed) link between tag & item of interest.
"""
tag = models.ForeignKey(Tag, null=False, blank=False, verbose_name='tag unique identifier',
on_delete=models.deletion.DO_NOTHING)
politician = models.ForeignKey(Politician, null=False, blank=False, verbose_name='politician unique identifier',
on_delete=models.deletion.DO_NOTHING)
# measure_id
# office_id
# issue_id
class PoliticianTagLinkDisputed(models.Model):
"""
This is a highly disputed link between tag & item of interest. Generated from 'tag_added', and tag results
are only shown to people within the cloud of the voter who posted
We split off how things are tagged to avoid conflict wars between liberals & conservatives
(Deal with some tags visible in some networks, and not in others - ex/ #ObamaSucks)
"""
tag = models.ForeignKey(Tag, null=False, blank=False, verbose_name='tag unique identifier',
on_delete=models.deletion.DO_NOTHING)
politician = models.ForeignKey(Politician, null=False, blank=False, verbose_name='politician unique identifier',
on_delete=models.deletion.DO_NOTHING)
# measure_id
# office_id
# issue_id
|
""" An implementation of the MinHash LSH method that operates on Dask DataFrames.
To do: Convert DataFrame operations to Array operations. This would eliminate
looping and increase computation speed.
"""
from random import random
import pandas as pd
import dask
import dask.dataframe as dd
class MinHashLSH():
def __init__(self, df_a, df_b, id_col, hash_col, n_shingles, rows, rows_per_band, threshold, hash_func=hash):
self.df_a = df_a[hash_col].to_frame()
self.df_b = df_b[hash_col].to_frame()
self.df_a_all_attributes = df_a
self.df_b_all_attributes = df_b
self.id_col = id_col
self.hash_col = hash_col
self.n_shingles = n_shingles
self.rows_per_band = rows_per_band
self.rows = rows
self.threshold = threshold
self.random_strings = [str(random()) for _ in range(self.rows)]
self.hash_func = hash_func
@staticmethod
def shingles(value_to_hash, n_shingles):
"""Generate shingles of length n_shingles"""
return {value_to_hash[i:i + n_shingles] for i in range(len(value_to_hash) - n_shingles + 1)}
@staticmethod
def signature_matrix(shingles, random_strings, rows_per_band, hash_func):
"""Calculate the signature matrix given sets of shingles, also apply the
band_matrix transformation"""
hasher = lambda x, i: abs(hash_func(x + i))
signature = [min([hasher(x, i) for x in shingles]) for i in random_strings]
return MinHashLSH.band_matrix(signature, rows_per_band, hash_func)
@staticmethod
def band_matrix(signature_matrix, rows_per_band, hash_func):
"""Given a signature matrix, calculate the band matrix"""
hasher = lambda x: abs(hash_func(x))
return [hasher(tuple(signature_matrix[i:i + rows_per_band])) for i in range(len(signature_matrix)) if i % rows_per_band == 0]
@staticmethod
def jaccard(set_1, set_2):
"""Calculate Jaccard similarity given two sets"""
return len(set_1.intersection(set_2)) / len(set_1.union(set_2))
@staticmethod
def get_band_proba(jaccard_sim, n_rows, n_bands):
"""Calculate the probability that at least one band will
match given two records with Jaccard similarity =
jaccard_sim
Arguments:
jaccard_sim (float): the jaccard sim theshold of interst, ex. 0.5
n_rows (int): the number of minhash signatures
n_bands (int): the number of bands generated from the minhash signatures
"""
rows_per_band = n_rows / n_bands
probability = 1 - (1-jaccard_sim**rows_per_band)**n_bands
return (probability, rows_per_band)
@staticmethod
def get_theshold_bump(n_rows, n_bands):
"""Calcualate the jaccard similarity theshold at which the
probability of two records sharing a band matrix values begins
to rapidly increase
Arguments:
n_rows (int): the number of minhash signatures
n_bands (int): the number of bands generated from the minhash signatures
"""
rows_per_band = n_rows / n_bands
return (1 / n_bands) ** (1 / rows_per_band)
def apply_minhash_lsh(self, df):
"""Given a Dataframe, generate shingles, signature matrix, and band_matrix
"""
df_shingles = df[self.hash_col].apply(MinHashLSH.shingles, args=(self.n_shingles,), meta=object)
df_bands = df_shingles.apply(MinHashLSH.signature_matrix, args=(self.random_strings,
self.rows_per_band,
self.hash_func,), meta=object)
return df_bands
def get_band_matrix(self):
"""Apply the MinhashLSH process steps
"""
df_a_bands = self.apply_minhash_lsh(self.df_a)
df_b_bands = self.apply_minhash_lsh(self.df_b)
return (df_a_bands, df_b_bands)
def get_band_index(self, df):
"""SQL-style explode that take a datafram of record ids, list of band values
and generates a new DataFrame with one row per id value and individual band value.
id band
123 [000, 111, 222]
to:
id band
123 000
123 111
123 222
"""
def func(df):
return df.apply(pd.Series, 1) \
.stack() \
.reset_index(level=1, drop=True) \
.to_frame()
band_index = df.map_partitions(func)
band_index = band_index.rename(columns = {0: self.hash_col})
band_index[self.id_col] = band_index.index
return band_index
def join_bands(self):
id_a_col = self.id_col + '_a'
id_b_col = self.id_col + '_b'
# Calculate shingles, minhash signature matrix, and band matrix
bands_a, bands_b = self.get_band_matrix()
# Transpose the band matrix to one row per id and band matrix value
bands_a = self.get_band_index(bands_a)
bands_b = self.get_band_index(bands_b)
# Join the two band indexes on band values. This will connect all records
# that share at least one band value. Then dedupe so that only distinct
# id pairs are retained.
joined_bands = bands_a.merge(bands_b, how='inner', on=self.hash_col, suffixes=('_a', '_b'))
joined_bands = joined_bands[[id_a_col, id_b_col]].drop_duplicates()
# Calculate the shingles
df_a_shingles = self.df_a[self.hash_col].apply(MinHashLSH.shingles,
args=(self.n_shingles,),
meta=object)
df_b_shingles = self.df_b[self.hash_col].apply(MinHashLSH.shingles,
args=(self.n_shingles,),
meta=object)
# Some cleanup here; also specifically setting types to ensure joins work
# properly. The types seem to change throughout some processing steps. Also
# changing the index type does not seem to stick, so copying the index into
# another column and setting type before joining.
df_a_shingles = df_a_shingles.to_frame()
df_a_shingles.columns = ['shingles_a']
df_a_shingles[id_a_col] = df_a_shingles.index
df_a_shingles[id_a_col].astype(object)
df_b_shingles = df_b_shingles.to_frame()
df_b_shingles.columns = ['shingles_b']
df_b_shingles[id_b_col] = df_b_shingles.index
df_b_shingles[id_b_col].astype(object)
joined_bands = joined_bands.merge(df_a_shingles, on= id_a_col, how='inner')
joined_bands = joined_bands.merge(df_b_shingles, on= id_b_col, how='inner')
joined_bands.astype(object)
# Calculate the Jaccard similarity of the shingle pairs, retain only those
# with similarities above the threshold
joined_bands['sim'] = joined_bands.apply(lambda row: self.jaccard(row.shingles_a, row.shingles_b), axis=1, meta=float)
final_columns = [id_a_col, id_b_col, 'sim']
joined_bands = joined_bands[joined_bands.sim >= self.threshold][final_columns]
# Explicitly setting types here, these seems to change during some procesing
# steps and then prevent the tables from joining. Also, change the type of the
# index does not seem to stick, so I'm copying it into a new column.
joined_bands[id_a_col].astype(object)
joined_bands[id_b_col].astype(object)
self.df_a_all_attributes[id_a_col] = self.df_a_all_attributes.index
self.df_a_all_attributes[id_a_col].astype(object)
self.df_b_all_attributes[id_b_col] = self.df_b_all_attributes.index
self.df_b_all_attributes[id_b_col].astype(object)
# Join the above threshold pairs to the full input datasets, captures all
# attribute columns
joined_bands = joined_bands.merge(self.df_a_all_attributes, on=id_a_col, how='left')
joined_bands = joined_bands.merge(self.df_b_all_attributes, on=id_b_col, how='left', suffixes=('_a', '_b'))
# Need to reset index after computation since each Dask partition has a separate index
computed_bands = joined_bands.compute()
computed_bands.reset_index(inplace=True, drop=True)
return computed_bands
|
"""
in order of execution priority:
(i) matplotlibrc file
(ii) custom styles
(iii) manual RC parameter configuration
"""
#######################
### (i) matplotlibrc
# find the default local matplotlibrc file:
# (root)# find / -name "*matplotlibrc*" 2> /dev/null
# for example here: /usr/lib/python3.6/site-packages/matplotlib/mpl-data/matplotlibrc
# copy it here:
# ~/.config/matplotlib/matplotlibrc
#
# open it in text editor and manipulate the default configuration.
### (ii) Styles!!
# custom styles can go to ~/.config/matplotlib/stylelib
# originals in /usr/lib/python3.6/site-packages/matplotlib/mpl-data/stylelib
# in python:
# print(MPP.style.available)
# MPP.style.use('seaborn-paper')
### (iii) Manual rc configuration
# in python:
# MP.rcParams['font.size'] = 10
## or:
# MPP.rc('font',**{'family': 'Iwona', 'size': 10})
|
import re
def extrae(texto):
lista = []
try:
lista1 = re.findall('href=".+?"', texto)
lista2 = re.findall('src=".+?"', texto)
lista4 = re.findall('"https:.+?"', texto)
lista3 = re.findall('"http:.+?"', texto)
lista = lista1 + lista2 + lista3 + lista4
return lista
except:
print("Error al dar de alta el registro")
def contarCaracter(cadena, letra):
contador = 0
for l in cadena:
if l == letra:
contador = contador + 1
return contador
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from lib.core import utils
from modules import subdomain
from modules import probing
from modules import formatting
from modules import fingerprint
from modules import stoscan
from modules import screenshot
from modules import linkfinding
from modules import ipspace
from modules import portscan
from modules import vulnscan
from modules import dirbscan
from modules import corscan
def handle(options):
if utils.isFile(options.get('TARGET')):
targets = utils.just_read(options.get('TARGET'), get_list=True)
# loop through each target
for target in targets:
options['TARGET'] = target
options['OUTPUT'] = target
single_handle(options)
else:
single_handle(options)
def single_handle(options):
subdomain.SubdomainScanning(options)
probing.Probing(options)
# formatting.Formatting(options)
corscan.CORScan(options)
fingerprint.Fingerprint(options)
stoscan.StoScan(options)
screenshot.ScreenShot(options)
linkfinding.LinkFinding(options)
ipspace.IPSpace(options)
portscan.PortScan(options)
# vulnscan.VulnScan(options)
dirbscan.DirbScan(options)
|
from aiogram.dispatcher import FSMContext
from .question_text import create_question_text
from .results_calculator import PollResultsCalculator
async def receive_answer(ratio_choice, state: FSMContext):
data = await state.get_data()
character_a, character_b = data.get('characters_combinations')[data.get('current_question')]
ratio = 1 / int(ratio_choice) if data.get('inverse', False) else int(ratio_choice)
answer_pair = (character_a['id'], character_b['id'], ratio), (character_b['id'], character_a['id'], 1 / ratio)
await state.update_data({'answers': data.get('answers') + [*answer_pair]})
await state.update_data({'current_question': data.get('current_question') + 1})
async def create_next_question(data):
question_text = create_question_text(data)
character_a, character_b = data.get('characters_combinations')[data.get("current_question")]
return question_text, character_a, character_b
async def complete_poll(data, state: FSMContext):
answers = data.get('answers')
characters_id = data.get('characters').keys()
calculator = PollResultsCalculator()
average_characters_rating, concordance_factor = calculator.calculate_poll_results(answers, characters_id)
await state.update_data({'average_characters_rating': average_characters_rating,
'concordance_factor': concordance_factor})
message = 'ะกัะตะดะฝะธะต ะพัะตะฝะบะธ ะฟะพ ัะตะทัะปััะฐัะฐะผ ะพะฟัะพัะฐ:\n'
for character_id, average_rating in average_characters_rating.items():
message += f'{data.get("characters")[character_id]["name"]}: {average_rating * 100}\n'
message += f'\nะัะตะดะฒะฐัะธัะตะปัะฝัะน ะบะพัััะธัะธะตะฝั ัะพะณะปะฐัะพะฒะฐะฝะฝะพััะธ: {concordance_factor}\n\n'
message += (f'ะะฟัะพั ะพะบะพะฝัะตะฝ. ะขะตะฟะตัั ะฒะฐะผ ะฝัะถะฝะพ ัะตัะธัั, ะธัะฟะพะปัะทะพะฒะฐัั ะปะธ ะพัะฒะตัั ะฒ ะดะฐะปัะฝะตะนัะตะผ ะฐะฝะฐะปะธะทะต. '
f'ะัะปะธ ะฒั ะฒะพะพะฑัะต ะฝะต ะฟะพะฝะธะผะฐะปะธ, ััะพ ะฒั ัะพะปัะบะพ ััะพ ััะบะฐะปะธ, ัะพ, ะฟะพะถะฐะปัะนััะฐ, ะฒัะฑะตัะธัะต "ะะตั". '
f'ะัะปะธ ะถะต ะฒั ะฝะฐัััะพะตะฝั ัะตััะตะทะฝะพ, ัะพ ะพัะฒะตัะฐะนัะต "ะะฐ".')
return message
|
import sys
input = sys.stdin.readline
def find_parent(parent, x):
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
def union(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
G = int(input())
P = int(input())
gates = list(map(int, sys.stdin.readlines()))
answer = 0
parent = [i for i in range(G+1)]
for g_i in gates:
gate_idx = find_parent(parent, g_i)
if gate_idx > 0:
answer += 1
union(parent, gate_idx, gate_idx-1)
else:
break
print(answer) |
# Generated by Django 3.2.8 on 2021-10-29 15:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0018_absentdates_attendanceid'),
]
operations = [
migrations.AlterField(
model_name='absentdates',
name='dates_of_absent',
field=models.DateField(blank=True, null=True),
),
]
|
from __future__ import division
from baseagent import Agent
from collections import deque
from copy import deepcopy
import time
import numpy as np
import keras.backend as K
from keras.layers import Lambda, Input, merge, Layer
from keras.models import Model
from rl.core import Agent
from rl.policy import EpsGreedyQPolicy
from rl.util import *
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from pdb import set_trace as bp
def mean_q(y_true, y_pred):
return K.mean(K.max(y_pred, axis=-1))
"""
Agent to look at only its own state
"""
class DQNAgent(Agent):
def __init__(self, id,teammates,opponents,actions,actionsEnum,inputV, nb_actions, memory, policy=EpsGreedyQPolicy(),
gamma=.99, batch_size=32, nb_steps_warmup=1000, train_interval=1, memory_interval=1,
target_model_update=10000, delta_range=(-np.inf, np.inf), enable_double_dqn=True,
custom_model_objects={}, processor=None):
# Validate (important) input.
self.model = self.createNetwork(inputV,nb_actions)
if hasattr(self.model.output, '__len__') and len(self.model.output) > 1:
raise ValueError('Model "{}" has more than one output. DQN expects a model that has a single output.'.format(model))
if self.model.output._keras_shape != (None, nb_actions):
raise ValueError('Model output "{}" has invalid shape. DQN expects a model that has one dimension for each action, in this case {}.'.format(model.output, nb_actions))
super(DQNAgent, self).__init__()
# Soft vs hard target model updates.
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
# Hard update every `target_model_update` steps.
target_model_update = int(target_model_update)
else:
# Soft update with `(1 - target_model_update) * old + target_model_update * new`.
target_model_update = float(target_model_update)
# Parameters.
self.nb_actions = nb_actions
self.gamma = gamma
self.batch_size = batch_size
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
self.memory_interval = memory_interval
self.target_model_update = target_model_update
self.delta_range = delta_range
self.enable_double_dqn = enable_double_dqn
self.custom_model_objects = custom_model_objects
# Related objects.
self.memory = memory
self.policy = policy
self.policy._set_agent(self)
self.processor = processor
# State.
self.compiled = False
self.reset_states()
self.id=id
teammates.remove(self.id)
self.teammates= teammates
self.opponents= opponents
self.actions ,self.actionsEnum =actions,actionsEnum
self.minAct,self.maxAct=min(self.actions),max(self.actions)
def createNetwork(self,inputV,outputV):
# this returns a tensor
linputs = Input(shape=(11,))
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(64, activation='relu')(linputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(outputV, activation='softmax')(x)
rinputs= Input(shape=(inputV-11,))
y = Dense(64, activation='relu')(rinputs)
xyu = merge([predictions, y], mode='concat')
z=Dense(outputV, activation='softmax')(xyu)
# this creates a model that includes
# the Input layer and three Dense layers
model = Model(input=[linputs,rinputs], output=z)
# model = Sequential()
# model.add(Flatten(input_shape=(1,) + (inputV,)))
# model.add(Dense(16))
# model.add(Activation('relu'))
# model.add(Dense(16))
# model.add(Activation('relu'))
# model.add(Dense(16))
# model.add(Activation('relu'))
# model.add(Dense(outputV))
# model.add(Activation('linear'))
print model.summary()
return model
def get_config(self):
config = {
'nb_actions': self.nb_actions,
'gamma': self.gamma,
'batch_size': self.batch_size,
'nb_steps_warmup': self.nb_steps_warmup,
'train_interval': self.train_interval,
'memory_interval': self.memory_interval,
'target_model_update': self.target_model_update,
'delta_range': self.delta_range,
'enable_double_dqn': self.enable_double_dqn,
'model': get_object_config(self.model),
'memory': get_object_config(self.memory),
'policy': get_object_config(self.policy),
}
if self.compiled:
config['target_model'] = get_object_config(self.target_model)
return config
def compile(self, optimizer, metrics=[]):
metrics += [mean_q] # register default metrics
# We never train the target model, hence we can set the optimizer and loss arbitrarily.
self.target_model = clone_model(self.model, self.custom_model_objects)
self.target_model.compile(optimizer='sgd', loss='mse')
self.model.compile(optimizer='sgd', loss='mse')
# Compile model.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
updates = get_soft_target_model_updates(self.target_model, self.model, self.target_model_update)
optimizer = AdditionalUpdatesOptimizer(optimizer, updates)
def clipped_masked_mse(args):
y_true, y_pred, mask = args
delta = K.clip(y_true - y_pred, self.delta_range[0], self.delta_range[1])
delta *= mask # apply element-wise mask
loss = K.mean(K.square(delta), axis=-1)
# Multiply by the number of actions to reverse the effect of the mean.
loss *= float(self.nb_actions)
return loss
# Create trainable model. The problem is that we need to mask the output since we only
# ever want to update the Q values for a certain action. The way we achieve this is by
# using a custom Lambda layer that computes the loss. This gives us the necessary flexibility
# to mask out certain parameters by passing in multiple inputs to the Lambda layer.
y_pred = self.model.output
y_true = Input(name='y_true', shape=(self.nb_actions,))
mask = Input(name='mask', shape=(self.nb_actions,))
loss_out = Lambda(clipped_masked_mse, output_shape=(1,), name='loss')([y_pred, y_true, mask])
trainable_model = Model(input=[self.model.input[0],self.model.input[1], y_true, mask], output=[loss_out, y_pred])
assert len(trainable_model.output_names) == 2
combined_metrics = {trainable_model.output_names[1]: metrics}
losses = [
lambda y_true, y_pred: y_pred, # loss is computed in Lambda layer
lambda y_true, y_pred: K.zeros_like(y_pred), # we only include this for the metrics
]
trainable_model.compile(optimizer=optimizer, loss=losses, metrics=combined_metrics)
self.trainable_model = trainable_model
self.compiled = True
def load_weights(self, filepath):
self.model.load_weights(filepath)
self.update_target_model_hard()
def save_weights(self, filepath, overwrite=False):
self.model.save_weights(filepath, overwrite=overwrite)
def reset_states(self):
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.model.reset_states()
self.target_model.reset_states()
def update_target_model_hard(self):
self.target_model.set_weights(self.model.get_weights())
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def compute_batch_q_values(self, state_batch):
batch = self.process_state_batch(state_batch)
z=[]
z.append([0])
b1=batch[0][0][:11].reshape((1,-1))
#print "Shape of B1:"+str(b1.shape)
b2=batch[0][0][11:].reshape((1,-1))
#print "Shape of B2:"+str(b2.shape)
z=[b1,b2]
q_values = self.model.predict_on_batch(z)
assert q_values.shape == (len(state_batch), self.nb_actions)
return q_values
def compute_q_values(self, state):
q_values = self.compute_batch_q_values([state]).flatten()
assert q_values.shape == (self.nb_actions,)
return q_values
def forward(self, observation):
if self.processor is not None:
observation = self.processor.process_observation(observation)
# Select an action.
state = self.memory.get_recent_state(observation)
q_values = self.compute_q_values(state)
action = self.policy.select_action(q_values=q_values)
if self.processor is not None:
action = self.processor.process_action(action)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return (action,q_values)
def backward(self, reward, terminal):
# Store most recent experience in memory.
if self.processor is not None:
reward = self.processor.process_reward(reward)
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert len(action_batch) == len(reward_batch)
# Compute Q values for mini-batch update.
if self.enable_double_dqn:
# According to the paper "Deep Reinforcement Learning with Double Q-learning"
# (van Hasselt et al., 2015), in Double DQN, the online network predicts the actions
# while the target network is used to estimate the Q value.
q_values = self.model.predict_on_batch(state1_batch)
assert q_values.shape == (self.batch_size, self.nb_actions)
actions = np.argmax(q_values, axis=1)
assert actions.shape == (self.batch_size,)
# Now, estimate Q values using the target network but select the values with the
# highest Q value wrt to the online model (as computed above).
target_q_values = self.target_model.predict_on_batch(state1_batch)
assert target_q_values.shape == (self.batch_size, self.nb_actions)
q_batch = target_q_values[range(self.batch_size), actions]
else:
# Compute the q_values given state1, and extract the maximum for each sample in the batch.
# We perform this prediction on the target_model instead of the model for reasons
# outlined in Mnih (2015). In short: it makes the algorithm more stable.
target_q_values = self.target_model.predict_on_batch(state1_batch)
assert target_q_values.shape == (self.batch_size, self.nb_actions)
q_batch = np.max(target_q_values, axis=1).flatten()
assert q_batch.shape == (self.batch_size,)
targets = np.zeros((self.batch_size, self.nb_actions))
dummy_targets = np.zeros((self.batch_size,))
masks = np.zeros((self.batch_size, self.nb_actions))
# Compute r_t + gamma * max_a Q(s_t+1, a) and update the target targets accordingly,
# but only for the affected output units (as given by action_batch).
discounted_reward_batch = self.gamma * q_batch
# Set discounted reward to zero for all states that were terminal.
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
Rs = reward_batch + discounted_reward_batch
for idx, (target, mask, R, action) in enumerate(zip(targets, masks, Rs, action_batch)):
target[action] = R # update action with estimated accumulated reward
dummy_targets[idx] = R
mask[action] = 1. # enable loss for this specific action
targets = np.array(targets).astype('float32')
masks = np.array(masks).astype('float32')
# Finally, perform a single update on the entire batch. We use a dummy target since
# the actual loss is computed in a Lambda layer that needs more complex input. However,
# it is still useful to know the actual target to compute metrics properly.
metrics = self.trainable_model.train_on_batch([state0_batch, targets, masks], [dummy_targets, targets])
metrics = [metric for idx, metric in enumerate(metrics) if idx not in (1, 2)] # throw away individual losses
metrics += self.policy.metrics
if self.processor is not None:
metrics += self.processor.metrics
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_model_hard()
return metrics
@property
def metrics_names(self):
# Throw away individual losses and replace output name since this is hidden from the user.
assert len(self.trainable_model.output_names) == 2
dummy_output_name = self.trainable_model.output_names[1]
model_metrics = [name for idx, name in enumerate(self.trainable_model.metrics_names) if idx not in (1, 2)]
model_metrics = [name.replace(dummy_output_name + '_', '') for name in model_metrics]
names = model_metrics + self.policy.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
def getAction(self,state):
#print "Going In:"+str(state)
try:
(action,q_values) = self.forward(state)
except:
#print "Error occured"
time.sleep(0.2)
(action,q_values) = self.forward(state)
#bp()
print "Qvalue computed:-"+str(q_values)
return action
def perceive(self,agentState,teamState,opponentState,reward,terminal):
self.backward(reward,terminal)
#print "Perceiving"
class DQNTeamAgent(Agent):
def __init__(self, id,teammates,opponents,actions,actionsEnum,model, nb_actions, memory, policy=EpsGreedyQPolicy(),
gamma=.99, batch_size=32, nb_steps_warmup=1000, train_interval=1, memory_interval=1,
target_model_update=10000, delta_range=(-np.inf, np.inf), enable_double_dqn=True,
custom_model_objects={}, processor=None):
# Validate (important) input.
if hasattr(model.output, '__len__') and len(model.output) > 1:
raise ValueError('Model "{}" has more than one output. DQN expects a model that has a single output.'.format(model))
if model.output._keras_shape != (None, nb_actions):
raise ValueError('Model output "{}" has invalid shape. DQN expects a model that has one dimension for each action, in this case {}.'.format(model.output, nb_actions))
super(DQNAgent, self).__init__()
# Soft vs hard target model updates.
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
# Hard update every `target_model_update` steps.
target_model_update = int(target_model_update)
else:
# Soft update with `(1 - target_model_update) * old + target_model_update * new`.
target_model_update = float(target_model_update)
# Parameters.
self.nb_actions = nb_actions
self.gamma = gamma
self.batch_size = batch_size
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
self.memory_interval = memory_interval
self.target_model_update = target_model_update
self.delta_range = delta_range
self.enable_double_dqn = enable_double_dqn
self.custom_model_objects = custom_model_objects
# Related objects.
self.model = model
self.memory = memory
self.policy = policy
self.policy._set_agent(self)
self.processor = processor
# State.
self.compiled = False
self.reset_states()
self.id=id
teammates.remove(self.id)
self.teammates= teammates
self.opponents= opponents
self.actions ,self.actionsEnum =actions,actionsEnum
self.minAct,self.maxAct=min(self.actions),max(self.actions)
def get_config(self):
config = {
'nb_actions': self.nb_actions,
'gamma': self.gamma,
'batch_size': self.batch_size,
'nb_steps_warmup': self.nb_steps_warmup,
'train_interval': self.train_interval,
'memory_interval': self.memory_interval,
'target_model_update': self.target_model_update,
'delta_range': self.delta_range,
'enable_double_dqn': self.enable_double_dqn,
'model': get_object_config(self.model),
'memory': get_object_config(self.memory),
'policy': get_object_config(self.policy),
}
if self.compiled:
config['target_model'] = get_object_config(self.target_model)
return config
def compile(self, optimizer, metrics=[]):
metrics += [mean_q] # register default metrics
# We never train the target model, hence we can set the optimizer and loss arbitrarily.
self.target_model = clone_model(self.model, self.custom_model_objects)
self.target_model.compile(optimizer='sgd', loss='mse')
self.model.compile(optimizer='sgd', loss='mse')
# Compile model.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
updates = get_soft_target_model_updates(self.target_model, self.model, self.target_model_update)
optimizer = AdditionalUpdatesOptimizer(optimizer, updates)
def clipped_masked_mse(args):
y_true, y_pred, mask = args
delta = K.clip(y_true - y_pred, self.delta_range[0], self.delta_range[1])
delta *= mask # apply element-wise mask
loss = K.mean(K.square(delta), axis=-1)
# Multiply by the number of actions to reverse the effect of the mean.
loss *= float(self.nb_actions)
return loss
# Create trainable model. The problem is that we need to mask the output since we only
# ever want to update the Q values for a certain action. The way we achieve this is by
# using a custom Lambda layer that computes the loss. This gives us the necessary flexibility
# to mask out certain parameters by passing in multiple inputs to the Lambda layer.
y_pred = self.model.output
y_true = Input(name='y_true', shape=(self.nb_actions,))
mask = Input(name='mask', shape=(self.nb_actions,))
loss_out = Lambda(clipped_masked_mse, output_shape=(1,), name='loss')([y_pred, y_true, mask])
trainable_model = Model(input=[self.model.input, y_true, mask], output=[loss_out, y_pred])
assert len(trainable_model.output_names) == 2
combined_metrics = {trainable_model.output_names[1]: metrics}
losses = [
lambda y_true, y_pred: y_pred, # loss is computed in Lambda layer
lambda y_true, y_pred: K.zeros_like(y_pred), # we only include this for the metrics
]
trainable_model.compile(optimizer=optimizer, loss=losses, metrics=combined_metrics)
self.trainable_model = trainable_model
self.compiled = True
def load_weights(self, filepath):
self.model.load_weights(filepath)
self.update_target_model_hard()
def save_weights(self, filepath, overwrite=False):
self.model.save_weights(filepath, overwrite=overwrite)
def reset_states(self):
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.model.reset_states()
self.target_model.reset_states()
def update_target_model_hard(self):
self.target_model.set_weights(self.model.get_weights())
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def compute_batch_q_values(self, state_batch):
batch = self.process_state_batch(state_batch)
q_values = self.model.predict_on_batch(batch)
assert q_values.shape == (len(state_batch), self.nb_actions)
return q_values
def compute_q_values(self, state):
q_values = self.compute_batch_q_values([state]).flatten()
assert q_values.shape == (self.nb_actions,)
return q_values
def forward(self, observation):
if self.processor is not None:
observation = self.processor.process_observation(observation)
# Select an action.
state = self.memory.get_recent_state(observation)
q_values = self.compute_q_values(state)
action = self.policy.select_action(q_values=q_values)
if self.processor is not None:
action = self.processor.process_action(action)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return (action,q_values)
def backward(self, reward, terminal):
# Store most recent experience in memory.
if self.processor is not None:
reward = self.processor.process_reward(reward)
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert len(action_batch) == len(reward_batch)
# Compute Q values for mini-batch update.
if self.enable_double_dqn:
# According to the paper "Deep Reinforcement Learning with Double Q-learning"
# (van Hasselt et al., 2015), in Double DQN, the online network predicts the actions
# while the target network is used to estimate the Q value.
q_values = self.model.predict_on_batch(state1_batch)
assert q_values.shape == (self.batch_size, self.nb_actions)
actions = np.argmax(q_values, axis=1)
assert actions.shape == (self.batch_size,)
# Now, estimate Q values using the target network but select the values with the
# highest Q value wrt to the online model (as computed above).
target_q_values = self.target_model.predict_on_batch(state1_batch)
assert target_q_values.shape == (self.batch_size, self.nb_actions)
q_batch = target_q_values[range(self.batch_size), actions]
else:
# Compute the q_values given state1, and extract the maximum for each sample in the batch.
# We perform this prediction on the target_model instead of the model for reasons
# outlined in Mnih (2015). In short: it makes the algorithm more stable.
target_q_values = self.target_model.predict_on_batch(state1_batch)
assert target_q_values.shape == (self.batch_size, self.nb_actions)
q_batch = np.max(target_q_values, axis=1).flatten()
assert q_batch.shape == (self.batch_size,)
targets = np.zeros((self.batch_size, self.nb_actions))
dummy_targets = np.zeros((self.batch_size,))
masks = np.zeros((self.batch_size, self.nb_actions))
# Compute r_t + gamma * max_a Q(s_t+1, a) and update the target targets accordingly,
# but only for the affected output units (as given by action_batch).
discounted_reward_batch = self.gamma * q_batch
# Set discounted reward to zero for all states that were terminal.
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
Rs = reward_batch + discounted_reward_batch
for idx, (target, mask, R, action) in enumerate(zip(targets, masks, Rs, action_batch)):
target[action] = R # update action with estimated accumulated reward
dummy_targets[idx] = R
mask[action] = 1. # enable loss for this specific action
targets = np.array(targets).astype('float32')
masks = np.array(masks).astype('float32')
# Finally, perform a single update on the entire batch. We use a dummy target since
# the actual loss is computed in a Lambda layer that needs more complex input. However,
# it is still useful to know the actual target to compute metrics properly.
metrics = self.trainable_model.train_on_batch([state0_batch, targets, masks], [dummy_targets, targets])
metrics = [metric for idx, metric in enumerate(metrics) if idx not in (1, 2)] # throw away individual losses
metrics += self.policy.metrics
if self.processor is not None:
metrics += self.processor.metrics
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_model_hard()
return metrics
@property
def metrics_names(self):
# Throw away individual losses and replace output name since this is hidden from the user.
assert len(self.trainable_model.output_names) == 2
dummy_output_name = self.trainable_model.output_names[1]
model_metrics = [name for idx, name in enumerate(self.trainable_model.metrics_names) if idx not in (1, 2)]
model_metrics = [name.replace(dummy_output_name + '_', '') for name in model_metrics]
names = model_metrics + self.policy.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
def getAction(self,state):
#print "Going In:"+str(state)
try:
action = self.forward(state)
except:
#print "Error occured"
time.sleep(0.2)
action = self.forward(state)
#bp()
return action
def perceive(self,agentState,teamState,opponentState,reward,terminal):
self.backward(reward,terminal)
#print "Perceiving"
|
from rest_framework import status
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK
from river.models import Workflow, DONE
from river_admin.views import get, delete
from river_admin.views.serializers import StateDto, WorkflowObjectStateDto, TransitionDto, TransitionApprovalDto
@get(r'^workflow-object/identify/(?P<workflow_pk>\w+)/(?P<object_id>\w+)/$')
def get_identifier(request, workflow_pk, object_id):
workflow = get_object_or_404(Workflow.objects.all(), pk=workflow_pk)
model_class = workflow.content_type.model_class()
workflow_object = get_object_or_404(model_class.objects.all(), pk=object_id)
return Response(str(workflow_object), status=status.HTTP_200_OK)
@get(r'^workflow-object/current-state/(?P<workflow_pk>\w+)/(?P<object_id>\w+)/$')
def get_current_state(request, workflow_pk, object_id):
workflow = get_object_or_404(Workflow.objects.all(), pk=workflow_pk)
model_class = workflow.content_type.model_class()
workflow_object = get_object_or_404(model_class.objects.all(), pk=object_id)
current_state = getattr(workflow_object, workflow.field_name)
return Response(StateDto(current_state).data, status=status.HTTP_200_OK)
@get(r'^workflow-object/current-iteration/(?P<workflow_pk>\w+)/(?P<object_id>\w+)/$')
def get_current_iteration(request, workflow_pk, object_id):
workflow = get_object_or_404(Workflow.objects.all(), pk=workflow_pk)
model_class = workflow.content_type.model_class()
workflow_object = get_object_or_404(model_class.objects.all(), pk=object_id)
current_state = getattr(workflow_object, workflow.field_name)
iterations = workflow.transitions.filter(
workflow=workflow,
object_id=workflow_object.pk,
destination_state=current_state,
status=DONE
).values_list("iteration", flat=True)
last_iteration = max(iterations) + 1 if iterations else 0
return Response(last_iteration, status=status.HTTP_200_OK)
@delete(r'^workflow-object/delete/(?P<workflow_pk>\w+)/(?P<object_id>\w+)/$')
def get_identifier(request, workflow_pk, object_id):
workflow = get_object_or_404(Workflow.objects.all(), pk=workflow_pk)
model_class = workflow.content_type.model_class()
workflow_object = get_object_or_404(model_class.objects.all(), pk=object_id)
workflow_object.delete()
return Response(status=status.HTTP_200_OK)
@get(r'^workflow-object/state/list/(?P<workflow_id>\w+)/(?P<object_id>\w+)/$')
def list_states(request, workflow_id, object_id):
workflow = get_object_or_404(Workflow.objects.all(), pk=workflow_id)
model_class = workflow.content_type.model_class()
workflow_object = get_object_or_404(model_class.objects.all(), pk=object_id)
states = []
processed_states = []
for transition in workflow.transitions.filter(object_id=workflow_object.pk):
source_iteration = transition.iteration - 1
destination_iteration = transition.iteration
source_state_key = str(source_iteration) + str(transition.source_state.pk)
if source_state_key not in processed_states:
states.append({"iteration": source_iteration, "state": transition.source_state})
processed_states.append(source_state_key)
destination_state_key = str(destination_iteration) + str(transition.destination_state.pk)
if destination_state_key not in processed_states:
states.append({"iteration": destination_iteration, "state": transition.destination_state})
processed_states.append(destination_state_key)
return Response(WorkflowObjectStateDto(states, many=True).data, status=HTTP_200_OK)
@get(r'^workflow-object/transition/list/(?P<workflow_id>\w+)/(?P<object_id>\w+)/$')
def list_transitions(request, workflow_id, object_id):
workflow = get_object_or_404(Workflow.objects.all(), pk=workflow_id)
model_class = workflow.content_type.model_class()
workflow_object = get_object_or_404(model_class.objects.all(), pk=object_id)
return Response(TransitionDto(workflow.transitions.filter(object_id=workflow_object.pk), many=True).data, status=HTTP_200_OK)
@get(r'^workflow-object/transition-approval/list/(?P<workflow_id>\w+)/(?P<object_id>\w+)/$')
def list_transition_approvals(request, workflow_id, object_id):
workflow = get_object_or_404(Workflow.objects.all(), pk=workflow_id)
model_class = workflow.content_type.model_class()
workflow_object = get_object_or_404(model_class.objects.all(), pk=object_id)
return Response(TransitionApprovalDto(workflow.transition_approvals.filter(object_id=workflow_object.pk), many=True).data, status=HTTP_200_OK)
|
file_path = r"/Users/Kota/Desktop/Python/python/10 Files and Exceptions/pi_digits.txt" # Absolute path
# r is used as a raw string in case the file path includes a \n to avoid taking it as a line break
with open(file_path) as file_object:
contents = file_object.read()
print(contents)
# Opens pi_digits.txt, reads, and prints
# However there is an extra blank line at the end of the script
print("-----1-----\n")
with open("10 Files and Exceptions/pi_digits.txt") as file_object: # Relative path
contents = file_object.read()
print(contents.rstrip())
# Use a for loop to examine each line at a time
print("-----2-----\n")
with open(file_path) as file_object:
for line in file_object:
print(line)
# More blank lines. Therefore we need to rstrip()
print("-----3-----\n")
with open(file_path) as file_object:
for line in file_object:
print(line.rstrip())
# Making a list of lines from a file
print("-----4-----\n")
with open(file_path) as file_object:
lines = file_object.readlines()
for line in lines:
print(line.rstrip())
print(lines) # Only here to show that the .readlines() created a list
# Attempt to build a single string containing all the digits in the file w/o whitespace
print("-----5-----")
with open(file_path) as file_object:
lines = file_object.readlines() # Creates list
pi_string = "" # Empty string
for line in lines:
pi_string += (
line.strip()
) # Each line in the list is subsequently added to pi_string
# With stripping newline character from each line
print(pi_string) # Prints string
print(len(pi_string)) # Prints amt of chars(digits) in stirng
|
'''
@name: tic tac toe with dynamic matrix, task completed for Luka Giorgobiani
@file: responsible for tictoc game logic
@AUTHOR: DATO BARBAKADZE
@begin date/time: Saturday August 22, year 2020 / 8.36pm
โโโโโโ โโโโโโ โโโโโโ โโโโโโ โโโโโโ โโโโโโ โโโโโโ โโโโโโ
โโโ โ โโโโ โโโโโโ โ โโโโโโโ โโ โโ โ โโโ โ โโโโโ โ โโโ โ โโโ
โ โโโโ โโโโ โโโโโโ โโโ โโโโ โ โโโโ โโโ โโโ โโโโโ โโโ โโโ โ
โ โโโโโโ โโโโโโโโโโ โโโโ โโโโโโโ โ โโโโโโโ โโโ โ โโโโโโโ
โโโโโโโโโโ โโโโโโโโโโโ โโโโโ โโโโโ โโโโโโโโโโโโ โโโโโโโโโโโโโโโ โโโโ
โ โโโ โ โโ โโโโโโ โ โโ โโโโโ โโ โ โโโ โโ โโ โโ โโโโโโ โโ โโ โโ โโโโ
โ โโ โ โ โ โ โโ โโ โ โโ โ โ โ โ โ โโ โ โโ โ โ โ โโ โ โโ
โ โ โ โ โ โ โ โโ โ โ โ โโ โ โ โโ โ
โ โ โ โ โ โ โ โ โ โ โ โ
โ
'''
from threading import Thread
class TicTacLogic:
def __init__(self):
self.graph_length = 3
self.graph = [[None for i in range(self.graph_length)] for j in range(self.graph_length)] #create empty matrix
self.tik_val = 1
self.tok_val = 0
self.draw_val = 2
self.mainWinner = None
self.direction_h = 0
self.direction_v = 1
self.direction_dy_l = 2
self.direction_dy_r = 3
self.thread_list = list()
self.tok_win_count = 0
self.tik_win_count = 0
self.move_count = 0
self.turn = self.tik_val
def clear_graph(self):
'''
แคแฃแแฅแชแแ แแกแฃแแแแแแแก แแแขแ แแชแแก แ แแชแ แแแแก แกแแญแแ แแแแ แฎแแแแ.
แแแ: แแแแแแแก แแ แ แแกแขแแ แขแแก แจแแแแฎแแแแแจแ
'''
self.graph = [[None for i in range(self.graph_length)] for j in range(self.graph_length)]
for i in range(self.graph_length):
for j in range(self.graph_length):
self.graph[i][j] = None
self.turn = self.tik_val
self.thread_list = list()
def increase_win_count(self,side):
'''
แคแฃแแฅแชแแ แแ แแแก แฅแฃแแแก แแแแแแแ แแฎแแ แแกแแก, แแฅแแแแ แแก X(tic) แแฃ O(tac)
:param side: แแก แแแ แแแแขแ แ แแแแกแแแฆแแ แแแก แแฃ แ แแแแ แแฎแแ แแก แฃแแแ แแแแแแ แแแก แฅแฃแแ,
แแแแจแแแแแแแ แแฃแชแแแแแแแ แฃแแแ แฃแแ แแแแก แแ 1(X)-แก แแ 0(O)-แก
:return:
'''
if side == self.tik_val:
self.tik_win_count+=1
elif side == self.tok_val:
self.tok_win_count+=1
def coordinates(self,number):
'''
แคแฃแแฅแชแแแก แแแแแแแชแแแ แ แแชแฎแแแแ แแแแจแแแแแแแ แ แแแแแแช แแแแกแแแฆแแ แแแก แแแขแ แแชแแจแ แแแแจแแแแแแแแก แแแ แแแแแแแแแแก.
แแแ: 0 แจแแแกแแแแแแแ [0,0], 1 - [0,1] แแ แ.แจ
:param number: แ แแชแฎแแ แ แแแแแแช แแแแกแแแฆแแ แแแก แแฃ แ แแแแแ แแแขแ แแชแแก แแแแ แแแแแขแ แฃแแแ แแแแแ แฃแแแก แคแฃแแฅแชแแแ
'''
coord_difiner = 0
for y in range(self.graph_length):
for x in range(self.graph_length):
if number == coord_difiner:
return y,x
elif number>=(self.graph_length*self.graph_length):
return None
else:
coord_difiner+=1
def fill(self,y,x,val):
'''
แคแฃแแฅแชแแ แแแแญแแแก val แแแแจแแแแแแแแก แแแขแ แแชแแจแ แแ แกแแแฃแ แกแแแ แชแแก แแแแแ แแขแฃแ แแแแ แแแแแขแแแแ,
แแกแแแ แแแแฌแแแแก แแฅแแก แแฃ แแ แ แแ แกแแแ แชแแก แแแแจแแแแแแแ แแแแแญแแแฃแแ.
:param y: แแ แแแแแขแ (แแแขแ แแชแแก แแแ แแแแ แแแแแแแแแแแ)
:param x: แแแกแชแแกแ (แแแขแ แแชแแก แแแแ แ แแแแแแแแแแแ)
:param val: แแแแจแแแแแแแ แ แแแแแแช แฃแแแ แแงแแก แแ 1 แแ 0, แจแแกแแแแแแกแแ X-แแกแ แแ O-แกแ
:rtype: bool
'''
if self.graph[y][x] != None:
return False
self.graph[y][x] = val
return True
# TODO merge tik and tok functions
def tic_tac(self,position):
'''
แแแแแแ แ แคแฃแแฅแชแแ แ แแแแแแช แแแแแแซแแฎแแแ แแแจแแ แ แแชแ แแญแแ แแแ แฆแแแแแก X-แแก แแ O-แก แฉแแกแแฌแแ แแ
:param position: แ แแชแฎแแแแ แแแแจแแแแแแแ แ แแแแแแช แแแแกแแแฆแแ แแก coordinates แคแฃแแฅแชแแแก แแแฎแแแ แแแแ
แแฃ แ แแแแ แแแแแแแก แฃแแแ แฉแแแกแแแก แแแแจแแแแแแแ แแแขแ แแชแแจแ
'''
try:
position = int(position)
except:
return False
coordinates = self.coordinates(position)
if coordinates ==None:
return False
y, x = coordinates
if self.turn==1:
if self.fill(y, x, self.tik_val) == False:
return False
self.turn = self.tok_val
elif self.turn==0:
if self.fill(y, x, self.tok_val) == False:
return False
self.turn = self.tik_val
else:
return False
self.move_count+=1
if self.move_count >=(self.graph_length+(self.graph_length-1)):
self.run_check()
def check(self,axis=None,direction=None):
'''
แคแฃแแฅแชแแ แ แแแแแแช แแแแฌแแแแก แแแคแแฅแกแแ แแ แแฃ แแ แ แแแแแแฃแแ
แแก แคแฃแแฅแชแแ แแแแแแแแแแฃแแแ แฅแแแแแข แแแชแแแฃแ แแแฎแ แคแฃแแฅแชแแแแ check_x_axis, check_y_axis, check_dyagonal_left, check_dyagonal_right
:param axis: แกแขแแขแแแฃแ แ แแแแจแแแแแแแ แ แแแแแแช แแกแ แฃแแแแก แฎแแ x แแแแ แแแแแขแแก แ แแแก แฎแแ แแแแแ y isas
:param direction: 0 = horizontal, 1 = vertical, 2 = dyagonal_left, 3 = dyagonal_right
:return:winner | None
'''
winner = None;
for i in range(self.graph_length):
if direction==self.direction_h:
el = self.graph[axis][i]
elif direction==self.direction_v:
el = self.graph[i][axis]
elif direction==self.direction_dy_l:
el = self.graph[i][i]
elif direction==self.direction_dy_r:
el = self.graph[i][(self.graph_length-1)-i]
if el != None:
if i != 0:
if el != winner:
winner = None
break
else:
winner = el
else:
winner = None
break
return winner
def check_x_axis(self):
'''
แคแฃแแฅแชแแ แ แแแแแแชแ แแแแฌแแแแก แฐแแ แแแแแขแแแฃแ แแแแแ แฏแแแแแก check แคแฃแแฅแชแแแก แแแฎแแแ แแแแ
'''
for y in range(self.graph_length):
winner = self.check(y,self.direction_h)
if winner != None:
self.mainWinner = winner
else:
continue
return None
def check_y_axis(self):
'''
แคแฃแแฅแชแแ แ แแแแแแชแ แแแแฌแแแแก แแแ แขแแแแแฃแ แแแแแ แฏแแแแแก check แคแฃแแฅแชแแแก แแแฎแแแ แแแแ
'''
for x in range(self.graph_length):
winner = self.check(x,self.direction_v)
if winner != None:
self.mainWinner = winner
else:
continue
return None
def check_dyagonal_left(self):
'''
แคแฃแแฅแชแแ แ แแแแแแชแ แแแแฌแแแแก แแแแแแแแแฃแ แแแแแ แฏแแแแแก check แคแฃแแฅแชแแแก แแแฎแแแ แแแแ,
แแก แแแแแแแแแ แแฌแงแแแ แแแแ แแแแแขแแแแ [0,0]
'''
winner = self.check(None, self.direction_dy_l)
if winner != None:
self.mainWinner = winner
else:
return None
def check_dyagonal_right(self):
'''
แคแฃแแฅแชแแ แ แแแแแแชแ แแแแฌแแแแก แแแแแแแแแฃแ แแแแแ แฏแแแแแก check แคแฃแแฅแชแแแก แแแฎแแแ แแแแ,
แแก แแแแแแแแแ แแฌแงแแแ แแแแ แแแแแขแแแแ [0,last_y]
'''
winner = self.check(None, self.direction_dy_r)
if winner != None:
self.mainWinner = winner
else:
return None
def run_check(self):
'''
แคแฃแแฅแชแแ แฃแจแแแแก แงแแแแ แแแแแ แฏแแแแแก แจแแแแแแฌแแแแแ แคแฃแแฅแชแแแก, multi-threaded แแแ แแแแจแ,
แแกแแแ แงแแแแ แกแ แแแแก แแแแแแแ แแแแก แจแแแแแ แแแแฌแแแแก แแแฉแแแก
'''
x_check = Thread(target=self.check_x_axis())
self.thread_list.append(x_check)
y_check = Thread(target=self.check_y_axis())
self.thread_list.append(y_check)
dy_l_check = Thread(target=self.check_dyagonal_left())
self.thread_list.append(dy_l_check)
dy_r_check = Thread(target=self.check_dyagonal_right())
self.thread_list.append(dy_r_check)
#start threads
self.thread_join_start()
self.thread_list = list()
if self.graph_fill_check() == True:
self.mainWinner = self.draw_val
def thread_join_start(self):
'''
แคแฃแแฅแชแแ แแแกแแฎแฃแ แแแ, run_check แคแฃแแฅแชแแแจแ แจแแฅแแแแแ แกแ แแแแแแก แฉแแ แแแแก แแ แแแ แ แแแฏแแแแแแแก
'''
for th in self.thread_list:
th.start()
for th in self.thread_list:
th.join()
def graph_fill_check(self):
'''
แคแฃแแฅแชแแ แแแแฌแแแแก, แจแแแกแแแแ แแฃ แแ แ แแแขแ แแชแ แแแแแแแ
'''
for y in range(self.graph_length):
for x in range(self.graph_length):
if self.graph[y][x] == None:
return False
return True
if __name__ == "__main__":
print("run program from gui side")
|
import scrapy
class NbggrItem(scrapy.Item):
title = scrapy.Field()
description = scrapy.Field()
date = scrapy.Field()
|
from django.shortcuts import render, reverse
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from .models import Popups
from .forms import PopupForm
from banner.views import convertir_imagen_webp
def popup(request):
popups = Popups.objects.filter(fecha_inicio__lt=timezone.now(), fecha_fin__gte=timezone.now())
return render(
request,
'Popup.html',
{'popups': popups}
)
@login_required(login_url='/iniciosesion/')
def listar_popup(request):
return render(
request,
'lista_popups.html',
{'popups': Popups.objects.all()}
)
@login_required(login_url='/iniciosesion/')
# Crear y editar banner
def agregar_editar(request, pk=None):
try:
elemento = Popups.objects.get(pk=pk)
except Popups.DoesNotExist:
elemento = None
if request.POST:
form = PopupForm(data=request.POST, files=request.FILES, instance=elemento)
if form.is_valid():
# validar si imagen es jpg y no supera determinado tamaรฑo en alto y ancho
popup = form.save(commit=False)
extension = popup.extension()
guardar = False
if extension == '.jpg':
guardar = True
#if popup.imagen.width > 1920:
# guardar = False
# form.add_error('imagen', "Ancho mรกximo 1920 px")
#if popup.imagen.height > 500:
# guardar = False
# form.add_error(
# 'imagen',
# "Su imagen tiene %s px de alto. El alto mรกximo es de 500 px" % popup.imagen.height
# )
else:
form.add_error('imagen', "Extensiรณn invรกlida. Solo JPG")
if guardar:
popup.usuario = request.user
popup.save()
convertir_imagen_webp(es_archivo=True, imagen=popup.imagen)
return HttpResponseRedirect('/popups/')
else:
form = PopupForm(instance=elemento)
return render(
request,
"agregar_editar_popup.html",
{
'popups': Popups.objects.all(),
'form': form,
'elemento': elemento,
}
)
# Eliminar banner
@login_required(login_url='/iniciosesion/')
def eliminar_popup(request):
if request.POST:
pk = request.POST['pk']
try:
Popups.objects.get(pk=pk).delete()
mensaje = 'eliminado correctamente'
except Popups.DoesNotExist:
mensaje = 'Elemento no encontrado'
else:
mensaje = 'No POST'
return JsonResponse({
'mensaje': mensaje,
'url': reverse('popups:lista_popups'),
}) |
import atexit
import sys
import time
try:
import curses
except ImportError:
sys.exit('platform not supported')
import psutil
from psutil._common import bytes2human
# --- curses stuff
def tear_down():
win.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
win = curses.initscr()
atexit.register(tear_down)
curses.endwin()
lineno = 0
def print_line(line, highlight=False):
"""A thin wrapper around curses's addstr()."""
global lineno
try:
if highlight:
line += " " * (win.getmaxyx()[1] - len(line))
win.addstr(lineno, 0, line, curses.A_REVERSE)
else:
win.addstr(lineno, 0, line, 0)
except curses.error:
lineno = 0
win.refresh()
raise
else:
lineno += 1
# --- /curses stuff
def poll(interval):
# sleep some time
time.sleep(interval)
def print_header():
"""Print system-related info, above the process list."""
def get_dashes(perc):
dashes = "|" * int((float(perc) / 10 * 4))
empty_dashes = " " * (40 - len(dashes))
return dashes, empty_dashes
# cpu usage
percs = psutil.cpu_percent(interval=0, percpu=True)
for cpu_num, perc in enumerate(percs):
dashes, empty_dashes = get_dashes(perc)
line = (" CPU%-2s [%s%s] %5s%%" % (cpu_num, dashes, empty_dashes,
perc))
print_line(line)
# cpu usage
mem = psutil.virtual_memory()
dashes, empty_dashes = get_dashes(mem.percent)
line = " Mem [%s%s] %5s%% %6s / %s" % (
dashes, empty_dashes,
mem.percent,
str(int(mem.used / 1024 / 1024)) + "M",
str(int(mem.total / 1024 / 1024)) + "M"
)
print_line(line)
# swap usage
swap = psutil.swap_memory()
dashes, empty_dashes = get_dashes(swap.percent)
line = " Swap [%s%s] %5s%% %6s / %s" % (
dashes, empty_dashes,
swap.percent,
str(int(swap.used / 1024 / 1024)) + "M",
str(int(swap.total / 1024 / 1024)) + "M"
)
print_line(line)
def refresh_window():
"""Print results on screen by using curses."""
global lineno
lineno = 0
curses.endwin()
win.erase()
print_header()
win.refresh()
def main():
try:
interval = 0
while True:
poll(interval)
refresh_window()
interval = 1
except (KeyboardInterrupt, SystemExit):
pass
if __name__ == '__main__':
main()
|
from nltk.corpus import gutenberg
from nltk.tokenize import sent_tokenize
sample = gutenberg.raw("bible-kjv.txt")
tok = sent_tokenize(sample)
print(tok[5:15]) |
import pymongo
import numpy as np
mongo_uri = 'mongodb://localhost'
mongo_db = 'test'
collection_name = 'ssq_ac'
client = pymongo.MongoClient(mongo_uri) # ็ปๅฝmongo
db = client.test # ๆๅฎๆฐๆฎๅบ
collection = db.ssq_ac # ๆๅฎ้ๅ
try:
# tmp_date = []
# results = collection.find()
results = collection.find().sort('cqssc_date', pymongo.ASCENDING) # ๆๅบ
data_array = np.array([])
i=0
for result in results:
cur_cqssc_str = result['cqssc_str']
cur_cqssc_str.reverse() # ๅฐๅ่กจ็ฟป่ฝฌ๏ผๅ ไธบๆๅ็ๆฐๆฎๆฏไปๅๅพๅๆๅ็
i+=1
print(i)
for cur_number in cur_cqssc_str:
tmp_str_array = np.array(cur_number.split('|'))
tmp_num_array = tmp_str_array.astype(int)
data_array = np.concatenate((data_array, tmp_num_array), axis=0)
np.save("filename.npy",data_array)
# b = np.load("filename.npy")
finally:
client.close() |
import sublime, sublime_plugin
import json, webbrowser, time, os, sys
from aaweibosdk import APIClient, APIError
APP_KEY = '2596542044'
GET_CODE_URL = 'http://sublime.duapp.com/weibo/authorize_redirect.php'
CALLBACK_URL = 'http://sublime.duapp.com/weibo/callback.php'
ACCESS_TOKEN_FILE = os.path.join(os.getcwd(), 'access_token')
#reload(sys)
#sys.setdefaultencoding('utf8')
def do_weibo_error(weibo, errcode, recall = False):
if errcode == 21327 or errcode == 21501 or 21313 < errcode < 21318:
if sublime.ok_cancel_dialog("ACCESS TOKEN error!\n(Error No. : " + str(errcode) + ")\nGet a new token?", 'yes'):
weibo.get_token()
else :
sublime.error_message('Error No. :' + str(error_code))
def format_statuses(source_statuses):
statuses = []
for status in source_statuses:
if "deleted" in status and int(status["deleted"]) == 1:
continue
status_obj = {
"id" : status["id"],
"user" : status["user"]["name"],
"status" : status["text"],
"time" : status["created_at"]
}
if "retweeted_status" in status:
if "deleted" in status["retweeted_status"] and int(status["retweeted_status"]["deleted"]) == 1:
continue
retweeted_status_obj = {
"id" : status["retweeted_status"]["id"],
"user" : status["retweeted_status"]["user"]["name"],
"status" : status["retweeted_status"]["text"],
"time" : status["retweeted_status"]["created_at"]
}
if "original_pic" in status["retweeted_status"]:
retweeted_status_obj["with_pic"] = status["retweeted_status"]["original_pic"]
status_obj["z"] = retweeted_status_obj
if "status" in status:
status_obj["z"] = status["status"]["text"]
if "original_pic" in status:
status_obj["with_pic"] = status["original_pic"]
statuses.append(status_obj)
return statuses
class weibo:
def __init__(self):
self.wb = APIClient(APP_KEY, None, CALLBACK_URL)
self.get_local_token()
self.get = self.wb.get
self.post = self.wb.post
def get_local_token(self):
access_token_file = open(ACCESS_TOKEN_FILE)
token = access_token_file.read()
access_token_file.close()
if token:
self.set_token(token, False)
def set_token(self, token = '', wtf = True):
if not token:
return
if wtf:
try:
access_token_file = open(ACCESS_TOKEN_FILE, 'w')
access_token_file.write(token)
access_token_file.close()
except IOError:
sublime.status_message('Write token_file error!')
else:
sublime.status_message('TOKEN Saved.')
self.wb.set_access_token(token, time.time() + 1209600)
def get_token(self, open_browser = True):
if open_browser:
if sublime.ok_cancel_dialog("Open browser to get your ACCESS TOKEN?", "open"):
webbrowser.open_new(GET_CODE_URL)
sublime.active_window().show_input_panel("Input ACCESS TOKEN here: ", "", self.token_input_done, None, None)
def token_input_done(self, text):
if not text:
self.get_token(False)
sublime.message_dialog("Please input your Access TOKEN!")
else :
self.set_token(text)
def send(self, text):
if 0 < len(text) <= 140:
try:
sublime.status_message("Sending...!")
self.wb.post.statuses__update(status = text)
except APIError,data:
do_weibo_error(self, int(data.error_code))
except Exception as e:
sublime.error_message(str(e))
else:
sublime.status_message("Status has been sent!")
return True
def get_tweets(self, func, key, format, **kw):
ret = {}
sublime.status_message("Getting status...")
try:
ret = func(**kw)
if format :
ret = format_statuses(ret[key])
return json.dumps(ret, sort_keys=True, indent=4, ensure_ascii=False)
except APIError,data:
do_weibo_error(self, int(data.error_code))
except Exception as e:
sublime.error_message(str(e))
def get_timlines(self, format = False, **kw):
return self.get_tweets(self.wb.get.statuses__home_timeline, "statuses", format, **kw)
def get_at_me(self, format = False, **kw):
return self.get_tweets(self.wb.get.statuses__mentions, "statuses", format, **kw)
def get_to_me(self, format = False, **kw):
return self.get_tweets(self.wb.get.comments__to_me, "comments", format, **kw) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.