index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
26,663
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/settings.py
|
# Used for mocking the API responses. Requires data to work.
MOCK_API_RESPONSE = False
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,664
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/analyzer.py
|
"""
Analyzer for running analysis on given data models :)
Hopefully all the methods in here will be uses for analyzing the data. If that
stops being true and if I were a good developer (it wouldn't have happened in
the first place) I would update this documentation.
"""
import operator
import time
from collections import defaultdict
import printer
from poloniex_apis import public_api
from poloniex_apis import trading_api
from poloniex_apis.api_models.balances import Balances
from poloniex_apis.api_models.deposit_withdrawal_history import DWHistory
from poloniex_apis.api_models.lending_history import LendingHistory
from poloniex_apis.api_models.ticker_price import TickerData
from poloniex_apis.api_models.trade_history import TradeHistory
from poloniex_apis.public_api import return_usd_btc
def get_overview():
balances = Balances()
dw_history = DWHistory(trading_api.return_deposits_withdrawals())
deposits, withdrawals = dw_history.get_dw_history()
printer.print_dw_history(deposits, withdrawals)
balance = dw_history.get_btc_balance(TickerData())
current = balances.get_btc_total()
usd_btc_price = return_usd_btc()
balance_percentage = float("{:.4}".format(current / balance * 100))
btc_balance_sum = current - balance
usd_balance_sum = "{:.2f}".format(btc_balance_sum * usd_btc_price)
printer.print_get_overview_results(
btc_balance_sum=btc_balance_sum,
usd_balance_sum=usd_balance_sum,
balance_percentage=balance_percentage
)
def get_detailed_overview():
ticker_data = TickerData()
trade_history = TradeHistory().history
print("Note: The values below are for the particular currency pair you traded"
" against. For example, if you traded BTC_ETH -> ETH_ETC -> ETC_BTC"
"you will only see the value traded against each pair in isolation.")
for pair in trade_history:
transaction, settlement = pair.split("_")[0], pair.split("_")[1]
transaction_sum = 0
settlement_sum = 0
cross_pair = list(trade_history[pair])
for trade in cross_pair:
if trade['type'] == 'buy':
transaction_sum += float(trade["total"])
settlement_sum += float(trade["amount"]) # Total
settlement_sum -= float(trade["amount"]) * float(trade["fee"]) # Fee
else:
# For some reason, the total for sells do not include the
# fee so we include it here.
transaction_sum -= (float(trade["total"]) * (1 - float(trade["fee"])))
settlement_sum -= float(trade["amount"])
if settlement_sum > -1: # Set to 0.000001 to hide 0 balances
transaction_equivalent = float(ticker_data.get_price(pair)) * settlement_sum
transaction_balance = transaction_equivalent - transaction_sum
total_usd = float("{:.4}".format(transaction_balance * ticker_data.get_price("USDT_" + transaction)))
print("--------------{}----------------".format(pair))
print("Over your account's lifetime, you have invested {} {}".format(transaction_sum, transaction))
print("to achieve your current balance of {} {}/{} {}".format(settlement_sum, settlement, transaction_equivalent, transaction))
print("If you sold it all at the current price (assuming enough sell orders)")
if transaction_balance < 0:
print(printer.bcolors.RED, end=' ')
else:
print(printer.bcolors.GREEN, end=' ')
print("{} {}/{} USDT".format(transaction_balance, transaction, total_usd))
print(printer.bcolors.END_COLOR, end=' ')
def calculate_fees():
# TODO Should this take in the data models or call it itself
trade_history = TradeHistory()
all_fees = trade_history.get_all_fees()
ticker_data = TickerData()
fee_dict = defaultdict(float)
print("--------------All Fees--------------")
for currency_pair, fees in all_fees.items():
base_currency = currency_pair.split("_")[0]
print("{}={} {}".format(currency_pair, fees, base_currency))
fee_dict[base_currency] += fees
total_fees = 0
print("-----------Total per base-----------")
for currency, fees in fee_dict.items():
print("{}={}".format(currency, fees))
print("-------------Total Fees-------------")
for currency, fees in fee_dict.items():
# Every base coin will have USDT pairing.
if currency == "USDT":
total_fees += fees
else:
total_fees += float(ticker_data.get_price("USDT_" + currency)) * fees
print("Total fees in USDT={}".format(total_fees))
# Convert USDT to BTC for BTC total
print("Total fees in BTC={}".format(total_fees / float(ticker_data.get_price("USDT_BTC"))))
def get_change_over_time():
"""
Returns a list of currencies whose volume is over the threshold.
:return:
"""
threshold = 1000
currency_list = []
volume_data = public_api.return_24_hour_volume()
for item in volume_data:
if item.startswith('BTC'):
if float(volume_data.get(item).get('BTC')) > threshold:
currency_list.append(item)
currencies = {}
for currency_pair in currency_list:
currencies[currency_pair] = float(volume_data.get(currency_pair).get(u'BTC'))
sorted_currencies = sorted(currencies.items(), key=operator.itemgetter(1), reverse=True)
period = 300
time_segments = [3600, 86400, 172800, 259200, 345600, 604800]
print("Change over time for BTC traded currencies with volume > 1000 BTC")
for currency in sorted_currencies:
now = int(time.time())
last_week = now - 604800
history = public_api.return_chart_data(
period=period,
currency_pair=currency[0],
start=last_week,
)
time_segment_changes = []
for segment in time_segments:
try:
time_segment_changes.append(
_to_percent_change(history[-1]['close'] /
history[-int((segment / period - 1))]['close']))
except KeyError:
time_segment_changes.append("No data")
print("Currency: {}, Volume: {}".format(currency[0], currency[1]))
print(" 1H: {}, 24H: {}, 2D: {}, 3D: {}, 4D: {}, 1W: {}".format(*time_segment_changes))
time.sleep(2)
def get_lending_history():
lending_history = LendingHistory()
data = {}
for loan in lending_history.history:
if not loan['currency'] in data:
data[loan['currency']] = defaultdict()
data[loan['currency']]['earnings'] = 0
data[loan['currency']]['fees'] = 0
data[loan['currency']]['amount'] = 0
data[loan['currency']]['duration'] = 0
data[loan['currency']]['weighted_rate'] = 0
data[loan['currency']]['earnings'] += float(loan['earned'])
data[loan['currency']]['fees'] += float(loan['fee'])
data[loan['currency']]['amount'] += float(loan['amount'])
data[loan['currency']]['duration'] += float(loan['duration'])
data[loan['currency']]['weighted_rate'] += float(loan['rate']) * float(loan['duration'])
for currency in data:
average_rate = float("{:.4}".format(data[currency]['weighted_rate'] / data[currency]['duration'] * 100))
printer.print_get_lending_history(
currency=currency,
earnings=data[currency]['earnings'],
fees=data[currency]['fees'],
average_rate=average_rate
)
def _to_percent_change(number):
if not isinstance(number, float):
number = float(number)
return "{:.2f}%".format(number * 100 - 100)
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,665
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/printer.py
|
"""
Some of the logic for printing and the print statements.
"""
class bcolors:
GREEN = '\033[92m'
RED = '\033[91m'
END_COLOR = '\033[0m'
def print_get_overview_results(btc_balance_sum, usd_balance_sum, balance_percentage):
print("\nNote: Get Overview currently does not take the margin account into account.")
print("---Earnings/Losses Against Balance--")
print("{} BTC/${}".format(btc_balance_sum, usd_balance_sum))
if balance_percentage < 100:
print("Stop trading!")
print("{}%".format(balance_percentage))
elif balance_percentage < 110:
print("Still worse than an index.")
print("{}%".format(balance_percentage))
elif balance_percentage < 150:
print("Not bad")
print("{}%".format(balance_percentage))
elif balance_percentage < 175:
print("You belong here")
print("{}%".format(balance_percentage))
elif balance_percentage < 200:
print("Like striking crypto-oil")
print("{}%".format(balance_percentage))
elif balance_percentage < 250:
print("On your way to becoming a Bitcoin millionaire")
print("{}%".format(balance_percentage))
else:
print("Cryptocurrencies can get heavy, you should send them over to me for safe keeping!")
print("{}%".format(balance_percentage))
def print_get_lending_history(currency, earnings, fees, average_rate):
print("---------Your {} Lending History---------".format(currency))
print("Total earned: {} {}".format(earnings, currency))
print("Total fees: {} {}".format(fees, currency))
print("Average rate: {}%".format(average_rate))
def print_dw_history(deposits, withdrawals):
print("-----Deposit/Withdrawal History-----")
print("------------------------------------")
print("--Currency=Deposit-Withdrawal=Total-")
currencies = list(deposits.keys()) + list(withdrawals.keys())
currencies = list(set(currencies)) # remove duplicates
for currency in currencies:
deposit = deposits[currency] if currency in deposits else 0
withdrawal = withdrawals[currency] if currency in withdrawals else 0
print("{}={}-{}={}".format(currency, deposit, withdrawal, deposit - withdrawal))
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,666
|
BenjiLee/PoloniexAnalyzer
|
refs/heads/master
|
/poloniex_apis/api_models/deposit_withdrawal_history.py
|
from collections import defaultdict
from poloniex_apis.api_models.ticker_price import TickerData
class DWHistory:
def __init__(self, history):
self.withdrawals = defaultdict(float)
self.deposits = defaultdict(float)
self.history = history
def get_dw_history(self):
for deposit in self.history['deposits']:
if deposit['currency'] in self.deposits:
self.deposits[deposit['currency']] += float(deposit['amount'])
else:
self.deposits[deposit['currency']] = float(deposit['amount'])
for withdrawal in self.history['withdrawals']:
if withdrawal['currency'] in self.withdrawals:
self.withdrawals[withdrawal['currency']] += float(withdrawal['amount'])
else:
self.withdrawals[withdrawal['currency']] = float(withdrawal['amount'])
return self.deposits, self.withdrawals
def get_btc_balance(self, ticker):
balance = 0
for deposit_symbol, amount in self.deposits.items():
if deposit_symbol == u"USDT":
balance += amount * ticker.get_price("USDT_BTC")
if deposit_symbol != u'BTC':
balance += amount * ticker.get_price("BTC_" + deposit_symbol)
else:
balance += amount
for withdrawal_symbol, amount in self.withdrawals.items():
if withdrawal_symbol == u"USDT":
balance -= amount * ticker.get_price("USDT_BTC")
if withdrawal_symbol != u'BTC':
balance -= amount * ticker.get_price("BTC_" + withdrawal_symbol)
else:
balance -= amount
return balance
|
{"/poloniex.py": ["/analyzer.py"], "/poloniex_apis/public_api.py": ["/dev_utils.py", "/settings.py"], "/poloniex_apis/api_models/lending_history.py": ["/utils.py"], "/poloniex_apis/trading_api.py": ["/dev_utils.py", "/settings.py", "/poloniex_apis/api_key_secret_util.py"], "/analyzer.py": ["/printer.py", "/poloniex_apis/api_models/balances.py", "/poloniex_apis/api_models/deposit_withdrawal_history.py", "/poloniex_apis/api_models/lending_history.py", "/poloniex_apis/api_models/ticker_price.py", "/poloniex_apis/api_models/trade_history.py", "/poloniex_apis/public_api.py"], "/poloniex_apis/api_models/deposit_withdrawal_history.py": ["/poloniex_apis/api_models/ticker_price.py"]}
|
26,667
|
MichaelTowson/Stop_Spending_Money
|
refs/heads/master
|
/ssm_application/apps.py
|
from django.apps import AppConfig
class SsmApplicationConfig(AppConfig):
name = 'ssm_application'
|
{"/ssm_application/views.py": ["/ssm_application/models.py"]}
|
26,668
|
MichaelTowson/Stop_Spending_Money
|
refs/heads/master
|
/ssm_application/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
#Render Routes
path('', views.index),
path('register', views.register),
path('dashboard', views.dashboard),
path('goals', views.goals),
path('about', views.about),
#Action/Redirect Routes
path('logout', views.logout),
path('reg_user', views.register_user),
path('log_in', views.log_in),
path('goals/add_goal',views.add_goal),
path('goals/delete_goal/<int:id>', views.delete_goal),
path('log_trans', views.log_trans),
path('goals/add_start_date',views.add_start_date),
path('delete_trans/<int:trans_id>',views.delete_trans),
]
|
{"/ssm_application/views.py": ["/ssm_application/models.py"]}
|
26,669
|
MichaelTowson/Stop_Spending_Money
|
refs/heads/master
|
/ssm_application/migrations/0001_initial.py
|
# Generated by Django 2.2 on 2020-12-23 01:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Goal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(max_length=30)),
('amount', models.FloatField()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('email', models.CharField(max_length=80)),
('password', models.CharField(max_length=255)),
('plan_start_date', models.DateField(default='2020-02-02')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('amount', models.FloatField()),
('description', models.CharField(max_length=50)),
('planned', models.CharField(max_length=3)),
('happiness', models.CharField(max_length=20)),
('goal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transactions', to='ssm_application.Goal')),
],
),
migrations.AddField(
model_name='goal',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='goals', to='ssm_application.User'),
),
]
|
{"/ssm_application/views.py": ["/ssm_application/models.py"]}
|
26,670
|
MichaelTowson/Stop_Spending_Money
|
refs/heads/master
|
/ssm_application/models.py
|
from django.db import models
import bcrypt, re
class Manager(models.Manager):
def registerUser_validator(self, postData):
errors = {}
# validating email
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z._-]+\.[a-zA-Z]+$')
if not EMAIL_REGEX.match(postData['email']): # test whether a field matches the pattern
errors['email'] = "Invalid email address!"
mailExist = User.objects.filter(email = postData['email'])
if mailExist:
errors["email"] = "Email already Exist"
# validating the names
if len(postData['first_name']) < 3:
errors["first_name"] = "Should be at least 2 characters"
if len(postData['last_name']) < 3:
errors["last_name"] = "Should be at least 2 characters"
#validating password characters
if len(postData['password']) < 8:
errors["password"] = "Please make sure password is at least 8 characters"
return errors
def loginValidator(self, postData):
errors = {}
# validating email
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z._-]+\.[a-zA-Z]+$')
if not EMAIL_REGEX.match(postData['email']): # test whether a field matches the pattern
errors['not_email'] = "Invalid email address!"
return errors
mailExist = User.objects.filter(email = postData['email'])
if not mailExist:
errors['not_email'] = "Email doesn't Exist!"
return errors
class User(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.CharField(max_length=80)
password = models.CharField(max_length=255)
plan_start_date = models.DateField(default='2020-01-01')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = Manager()
#goals = a list of goals that the user has made.
class Goal(models.Model):
user = models.ForeignKey(User, related_name = "goals", on_delete = models.CASCADE)
category = models.CharField(max_length=30)
amount = models.FloatField()
#transactions - a list of transactions underneath each goal.
class Transaction(models.Model):
goal = models.ForeignKey(Goal, related_name = "transactions", on_delete = models.CASCADE)
date = models.DateField()
amount = models.FloatField()
description = models.CharField(max_length = 50)
planned = models.CharField(max_length = 3)
#OPTIONS: (1) Yes, (2) No
happiness = models.CharField(max_length = 20)
#OPTIONS: (1) Very Happy, (2) Briefly Happy, (3) The Same, (4) Less Happy/Regret
|
{"/ssm_application/views.py": ["/ssm_application/models.py"]}
|
26,671
|
MichaelTowson/Stop_Spending_Money
|
refs/heads/master
|
/ssm_application/views.py
|
from django.shortcuts import render, HttpResponse, redirect
from ssm_application.models import User, Goal, Transaction
from django.contrib import messages
import datetime
import bcrypt
#Render Template Views
def index(request):
return render(request, "index.html")
def register(request):
return render(request, "register.html")
def dashboard(request):
if 'userid' in request.session:
logged_user = User.objects.get(id=request.session['userid'])
user_goal = logged_user.goals.all()
#Calculate the remaining time in the plan, which will be displayed on the render.
selected_date = logged_user.plan_start_date #datetime object
selected_date = datetime.datetime.strftime(selected_date, '%Y-%m-%d')
selected_date = datetime.datetime.strptime(selected_date, '%Y-%m-%d')
end_date = selected_date + datetime.timedelta(days=7)
time_remaining = end_date - datetime.datetime.now() #timedelta
#Nicky's code for storing transactions
trans_dict = {}
bal_trans = {}
for val in user_goal:
sum = 0
for trans in val.transactions.all():
sum += trans.amount
trans_dict[val.category] = sum
bal_trans[val.category] = val.amount - sum
print(trans_dict)
#Context we are passing to webpage
context = {
'user': logged_user,
'user_goals': user_goal,
'time_remaining': time_remaining,
'goal_trans': trans_dict,
'goal_bal': bal_trans,
# 'trans': user_goal.transactions.all()
}
return render(request, "dashboard.html", context)
return redirect("/")
def goals(request):
if 'userid' in request.session:
logged_user = User.objects.get(id=request.session['userid'])
plan_start_date = str(logged_user.plan_start_date)
if(plan_start_date == "2020-01-01"): #This is the default date for a new user. It determines if the "goals" page should display the start date for the plan or not.
valid_start = 0
else:
valid_start = 1
print(valid_start)
context = {
'user': logged_user,
'user_goals': logged_user.goals.all(),
'valid_start': valid_start
}
return render(request, "goals.html", context)
return redirect("/")
def about(request):
return render(request, "about.html")
def log_trans(request):
if 'userid' in request.session:
goal_id = request.POST['category']
goal = Goal.objects.get(id = goal_id)
purchase_date = request.POST['purchase_date']
amt_spent = request.POST['amt_spent']
desc = request.POST['desc']
Plan_or_not = request.POST['Plan_or_not']
how_happy = request.POST['how_happy']
Transaction.objects.create(goal = goal, date = purchase_date, amount = amt_spent, description = desc, planned = Plan_or_not, happiness = how_happy)
return redirect("/dashboard")
return redirect("/")
#Login/Registration Views
def register_user(request):
errors = User.objects.registerUser_validator(request.POST)
if errors:
for key, value in errors.items():
messages.error(request, value, extra_tags=key)
return redirect("/register")
first_name = request.POST['first_name']
last_name = request.POST['last_name']
email = request.POST['email']
password = request.POST['password']
pw_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
User.objects.create(first_name = first_name, last_name = last_name, email = email, password= pw_hash)
messages.success(request, "Successfully Register", extra_tags='reg_success')
return redirect("/")
def log_in(request):
errors = User.objects.loginValidator(request.POST)
if errors:
for key, value in errors.items():
messages.error(request, value, extra_tags=key)
return redirect("/")
user = User.objects.filter(email=request.POST['email'])
if user:
logged_user = user[0]
if bcrypt.checkpw(request.POST['password'].encode(), logged_user.password.encode()):
request.session['userid'] = logged_user.id
return redirect('/dashboard')
messages.error(request, "Password doesn't match!", extra_tags='pw_not_match')
return redirect("/")
def logout(request):
request.session.flush()
return redirect("/")
#Goals Page Views
def add_goal(request):
#Add validator check ---------------!
#Get data from request
userid = request.session['userid']
user = User.objects.get(id=userid)
category = request.POST['category']
amount = request.POST['amount']
amount = float(amount)
amount = round(amount, 2)
#Create goal
Goal.objects.create(user=user, category=category, amount=amount)
return redirect("/goals")
def delete_goal(request, id):
this_goal = Goal.objects.get(id=id)
if(request.session['userid'] == this_goal.user.id):
this_goal.delete()
return redirect("/goals")
def delete_trans(request, trans_id):
if 'userid' in request.session:
this_trans = Transaction.objects.get(id = trans_id)
this_trans.delete()
return redirect("/dashboard")
return redirect("/")
def add_start_date(request):
user = User.objects.get(id=request.session['userid'])
selected_date = request.POST['selected_date']
print('Here is the first selected date:')
print(selected_date)
selected_date = datetime.datetime.strptime(selected_date, '%Y-%m-%d')
print('Here is the modified selected date:')
print(selected_date)
dateDifference = selected_date - datetime.datetime.now() #Subtracts the current time from the selected date.
dateDifference = dateDifference.total_seconds()
dateDifference = int(dateDifference)
print(dateDifference)
if(dateDifference <= -86400): #86400 is the amount of seconds in a day. So, this checks to see if the date was within 24 hours of "now".
print("Invalid: Date is in the past")
elif(dateDifference >= -86400):
print("Date is valid.")
user.plan_start_date = request.POST['selected_date']
user.save()
return redirect("/goals")
|
{"/ssm_application/views.py": ["/ssm_application/models.py"]}
|
26,731
|
riddlet/offender_registry
|
refs/heads/master
|
/offender_registry/spiders/offender_spider.py
|
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from offender_registry.items import OffenderRegistryItem
import re
class offender_spider(CrawlSpider):
name = "maryland"
allowed_domains = ["www.dpscs.state.md.us"]
start_urls = [
"http://www.dpscs.state.md.us/sorSearch/search.do?anchor=offlist&searchType=byName&coords=0%2B0&streetAddress=&radius=0.25&firstnm=&lastnm=&county=Allegany&zip=&filter=ALL&category=ALL&start=1",
"http://www.dpscs.state.md.us/sorSearch/search.do?anchor=offlist&searchType=byName&coords=0%2B0&streetAddress=&radius=0.25&firstnm=&lastnm=&county=Allegany&zip=&filter=ALL&category=ALL&start=4111",
"http://www.dpscs.state.md.us/sorSearch/search.do?anchor=offlist&searchType=byName&coords=0%2B0&streetAddress=&radius=0.25&firstnm=&lastnm=&county=Allegany&zip=&filter=ALL&category=ALL&start=4991"
] #pg 4011 & 4981 have errors, so we manually start crawling at the pages that follow those
rules = (
Rule(LinkExtractor(allow=(), restrict_xpaths=('//*[@class="paging_container"]//a[text()="Next"]'), unique=True), callback = 'parse_res_page', follow=True),
)
# def page_results(self, response):
# next_page = response.xpath('//*[@id="results_tab-List"]/div[12]/div[2]/a[1]')
# if next_page:
# url = response.urljoin(next_page[0].extract())
# yield
def parse_res_page(self, response):
for href in response.xpath("//*[@id='results_tab-List']//div[5]/a[1]/@href"):
url = response.urljoin(href.extract())
yield scrapy.Request(url, callback=self.parse_dir_contents)
def parse_dir_contents(self, response):
item = OffenderRegistryItem()
id_prog = re.compile('id=([0-9]+)')
id_url = response.url
item['id_num'] = id_prog.findall(id_url)
item['name'] = response.xpath('//*[@id="Column800"]/div/div/div[4]/text()').extract()
item['aliases'] = response.xpath('//*[@id="top_info_container"]/div[4]/text()').extract()
item['primary_residence'] = response.xpath('//*[@id="top_info_container"]/ul/li[1]/text()').extract()
item['address_change_date'] = response.xpath('//*[@id="top_info_container"]/ul/li[2]/span[2]/text()').extract()
item['temp_residence'] = response.xpath('//*[@id="top_info_container"]/ul/li[3]/span[2]/text()').extract()
item['employ_address'] = response.xpath('//*[@id="top_info_container"]/ul/li[4]/span[2]/text()').extract()
item['school_address'] = response.xpath('//*[@id="top_info_container"]/ul/li[5]/span[2]/text()').extract()
item['convict_date'] = response.xpath('//*[@id="Column800"]/div/div//ul//text()').extract()
item['convict_location'] = response.xpath('//*[@id="Column800"]/div/div//ul//text()').extract()
item['registr_authority'] = response.xpath('//*[@id="Column800"]/div/div//ul//text()').extract()
item['charges'] = response.xpath('//*[@id="Column800"]/div/div//ul//text()').extract()
item['charge_details'] = response.xpath('//*[@id="Column800"]/div/div//span[@class="charge_description"]//text()').extract()
item['custody_info'] = response.xpath('//*[@id="Column800"]/div/div//text()').extract()
item['custody_agency'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['registr_status'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['tier'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['reg_term'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['information_contact'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['current_reg_date'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['sex'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['DOB'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['curr_age'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['height'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['weight'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['race'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['skin_tone'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['eye_color'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['hair_color'] = response.xpath('//*[@id="Column800"]/div/div//li//text()').extract()
item['vehicles'] = response.xpath('//*[@id="Column800"]/div//text()').extract()
item['image_urls'] = response.xpath('//*[@id="reg_pic_big"]/img/@src').extract()
yield item
|
{"/offender_registry/spiders/offender_spider.py": ["/offender_registry/items.py"]}
|
26,732
|
riddlet/offender_registry
|
refs/heads/master
|
/offender_registry/pipelines.py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.pipelines.images import ImagesPipeline
def clean_bulk(item, search_string, skip):
k = []
for lab in search_string:
for i, j in enumerate(item):
if j == lab:
k.append(item[i+skip].strip())
return k
def clean_bulk_bookends(item, search_string1, search_string2):
pos1 = []
pos2 = []
if search_string2:
stringlist = [x.strip() for x in search_string2]
else:
stringlist = ['','Conviction Date:']
for i, j in enumerate(item):
if j.strip() == search_string1:
pos1.append(i+1)
if len(pos1) > len(pos2) and j.strip() in stringlist:
pos2.append(i)
sels = []
for i, j in enumerate(pos1):
sels.append(''.join(item[j:pos2[i]]))
return sels
class OffenderRegistryPipeline(object):
def process_item(self, item, spider):
item['name'] = [item['name'][0].strip()]
if item['aliases']:
item['aliases'] = [item['aliases'][0].replace(u'\u2022','-|-').encode('utf-8')]
item['primary_residence'] = [item['primary_residence'][0].strip()]
item['convict_date'] = clean_bulk(item['convict_date'], ['Conviction Date:'], 1)
item['convict_location'] = clean_bulk(item['convict_location'], ['Location:'], 1)
item['registr_authority'] = clean_bulk(item['registr_authority'], ['Registration Authority:', 'Jurisdiction:'], 1)
item['charges'] = clean_bulk_bookends(item['charges'], 'Charges:', item['charge_details'])
item['charge_details'] = [''.join(item['charge_details'])]
item['custody_info'] = clean_bulk(item['custody_info'], ['Custody/Supervision Information'], 3)
item['custody_agency'] = clean_bulk(item['custody_agency'], ['Agency:'], 1)
v1 = clean_bulk(item['registr_status'], ['Registration Status:'], 1)
v2 = clean_bulk(item['registr_status'], ['Registration Status:'], 2)
if len(v1[0]) > len(v2[0]):
item['registr_status'] = v1
else:
item['registr_status'] = v2
item['tier'] = clean_bulk(item['tier'], ['Tier:'], 2)
item['reg_term'] = clean_bulk(item['reg_term'], ['Reg. Term:'], 2)
item['information_contact'] = clean_bulk(item['information_contact'], ['Information Contact:'], 2)
item['current_reg_date'] = clean_bulk(item['current_reg_date'], ['Current Registration Date:'], 1)
item['sex'] = clean_bulk(item['sex'], ['Sex:'], 2)
item['DOB'] = clean_bulk(item['DOB'], ['Date of Birth:'], 2)
item['curr_age'] = clean_bulk(item['curr_age'], ['Current Age:'], 2)
item['height'] = clean_bulk(item['height'], ['Height:'], 2)
item['weight'] = clean_bulk(item['weight'], ['Weight:'], 2)
item['race'] = clean_bulk(item['race'], ['Race:'], 2)
item['skin_tone'] = clean_bulk(item['skin_tone'], ['Skin Tone:'], 2)
item['eye_color'] = clean_bulk(item['eye_color'], ['Eye Color:'], 2)
item['hair_color'] = clean_bulk(item['hair_color'], ['Hair Color:'], 2)
item['vehicles'] = clean_bulk_bookends([x.strip() for x in item['vehicles']], 'Vehicle Information', ['Exceptions'])
return item
|
{"/offender_registry/spiders/offender_spider.py": ["/offender_registry/items.py"]}
|
26,733
|
riddlet/offender_registry
|
refs/heads/master
|
/offender_registry/items.py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class OffenderRegistryItem(scrapy.Item):
# define the fields for your item here like:
id_num = scrapy.Field()
name = scrapy.Field()
aliases = scrapy.Field()
primary_residence = scrapy.Field()
address_change_date = scrapy.Field()
temp_residence = scrapy.Field()
employ_address = scrapy.Field()
school_address = scrapy.Field()
convict_date = scrapy.Field()
convict_location = scrapy.Field()
registr_authority = scrapy.Field()
charges = scrapy.Field()
charge_details = scrapy.Field()
custody_info = scrapy.Field()
custody_agency = scrapy.Field()
registr_status = scrapy.Field()
tier = scrapy.Field()
reg_term = scrapy.Field()
information_contact = scrapy.Field()
current_reg_date = scrapy.Field()
sex = scrapy.Field()
DOB = scrapy.Field()
curr_age = scrapy.Field()
height = scrapy.Field()
weight = scrapy.Field()
race = scrapy.Field()
skin_tone = scrapy.Field()
eye_color = scrapy.Field()
hair_color = scrapy.Field()
vehicles = scrapy.Field()
image_urls = scrapy.Field()
images = scrapy.Field()
|
{"/offender_registry/spiders/offender_spider.py": ["/offender_registry/items.py"]}
|
26,741
|
Square789/multiframe_list
|
refs/heads/master
|
/multiframe_list/demo2.py
|
import tkinter as tk
from multiframe_list.multiframe_list import MultiframeList
def main():
root = tk.Tk()
mfl = MultiframeList(root, inicolumns =
({"name": "aaaa"}, {"name": "bbbb"}),
resizable = True, reorderable = True
)
mfl.pack(fill = tk.BOTH, expand = 1)
root.mainloop()
if __name__ == "__main__":
main()
|
{"/multiframe_list/demo2.py": ["/multiframe_list/multiframe_list.py"], "/multiframe_list/multiframe_list.py": ["/multiframe_list/demo.py"], "/multiframe_list/demo.py": ["/multiframe_list/multiframe_list.py"], "/multiframe_list/__main__.py": ["/multiframe_list/demo.py"], "/multiframe_list/__init__.py": ["/multiframe_list/multiframe_list.py", "/multiframe_list/demo.py"]}
|
26,742
|
Square789/multiframe_list
|
refs/heads/master
|
/setup.py
|
import ast
from setuptools import setup
# Thanks: https://stackoverflow.com/questions/2058802/
# how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package
__version__ = None
with open("multiframe_list/multiframe_list.py") as h:
for line in h.readlines():
if line.startswith("__version__"):
__version__ = ast.parse(line).body[0].value.s
break
if __version__ == None:
raise SyntaxError("Version not found.")
with open("README.md") as h:
long_desc = h.read()
setup(
name = "multiframe_list",
version = __version__,
author = "Square789",
description = "Tkinter widget to display data over multiple columns.",
long_description = long_desc,
long_description_content_type = "text/markdown",
packages = ["multiframe_list"],
classifiers = [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Software Development :: User Interfaces",
"Topic :: Software Development :: Libraries :: Python Modules"
],
url = "https://www.github.com/Square789/multiframe_list/",
)
|
{"/multiframe_list/demo2.py": ["/multiframe_list/multiframe_list.py"], "/multiframe_list/multiframe_list.py": ["/multiframe_list/demo.py"], "/multiframe_list/demo.py": ["/multiframe_list/multiframe_list.py"], "/multiframe_list/__main__.py": ["/multiframe_list/demo.py"], "/multiframe_list/__init__.py": ["/multiframe_list/multiframe_list.py", "/multiframe_list/demo.py"]}
|
26,743
|
Square789/multiframe_list
|
refs/heads/master
|
/multiframe_list/multiframe_list.py
|
"""
A module that brings the MultiframeList class with it.
Its purpose is to display items and their properties over
several colums and easily format, sort and manage them as part of a UI.
"""
from enum import IntEnum
from operator import itemgetter
import os
import tkinter as tk
import tkinter.ttk as ttk
__version__ = "4.0.1"
__author__ = "Square789"
NoneType = type(None)
BLANK = ""
_DEF_LISTBOX_WIDTH = 20
DRAG_THRES = 10
MIN_WIDTH = 30
WEIGHT = 1000
ALL = "all"
END = "end"
class DRAGINTENT(IntEnum):
REORDER = 0
RESIZE = 1
class SELECTION_TYPE(IntEnum):
SINGLE = 0
MULTIPLE = 1
def _drag_intent(x, frame):
if x < (MIN_WIDTH // 2) and frame != 0:
return DRAGINTENT.RESIZE
return DRAGINTENT.REORDER
def _find_consecutive_sequences(lst):
"""
Given a **descendedly sorted list**, returns a list of ranges
of all consecutively descending ranges of numbers in the
given list. Duplicate numbers following one another are treated
as a single number.
Example: `[6, 5, 5, 4, 2, 1]` -> `[range(4, 7), range(1, 3)]`
"""
if not lst:
return []
last_start = lst[0]
last = None
res = []
for x in lst:
if last is not None and last != x and last != x + 1:
res.append(range(last, last_start + 1))
last_start = x
last = x
res.append(range(last, last_start + 1))
return res
SORTSYM = ("\u25B2", "\u25BC", "\u25A0") # desc, asc, none
# State modifier flags for tk event. These are hardcoded by tuple position
# in tkinter.
def with_shift(e):
return bool(e.state & 1)
def with_ctrl(e):
return bool(e.state & 4)
SCROLLCOMMAND = """
if {{[tk windowingsystem] eq "aqua"}} {{
bind {w} <MouseWheel> {{
%W yview scroll [expr {{- (%D)}}] units
}}
bind {w} <Option-MouseWheel> {{
%W yview scroll [expr {{-10 * (%D)}}] units
}}
bind {w} <Shift-MouseWheel> {{
%W xview scroll [expr {{- (%D)}}] units
}}
bind {w} <Shift-Option-MouseWheel> {{
%W xview scroll [expr {{-10 * (%D)}}] units
}}
}} else {{
bind {w} <MouseWheel> {{
%W yview scroll [expr {{- (%D / 120) * 4}}] units
}}
bind {w} <Shift-MouseWheel> {{
%W xview scroll [expr {{- (%D / 120) * 4}}] units
}}
}}
if {{"x11" eq [tk windowingsystem]}} {{
bind {w} <4> {{
if {{!$tk_strictMotif}} {{
%W yview scroll -5 units
}}
}}
bind {w} <Shift-4> {{
if {{!$tk_strictMotif}} {{
%W xview scroll -5 units
}}
}}
bind {w} <5> {{
if {{!$tk_strictMotif}} {{
%W yview scroll 5 units
}}
}}
bind {w} <Shift-5> {{
if {{!$tk_strictMotif}} {{
%W xview scroll 5 units
}}
}}
}}
"""
class _Column():
"""
Class whose purpose is to store data and information regarding a
column. Can be assigned to frames of a MultiframeList, displaying
its data in there.
!!! Columns should not be instantiated or controlled directly,
only through methods of a MultiframeList !!!
Required args:
mfl: Parent, must be a MultiframeList
Optional args:
col_id: The identifying name of the column it will be addressed by.
This is recommended to be a descriptive name set by the developer.
If not specified, is set to an integer that is not
in use by another Column. May not be changed after creation.
names: Name to appear in the label and title the column.
sort: Whether the column should sort the entire MultiframeList when its
label is clicked.
sortkey: A function that will be used to sort values in this column,
just like the regular `sorted` `key` kwarg.
minsize: Specify the minimum amount of pixels the column should be wide.
This option gets passed to the grid geometry manager and will at least
be `MIN_WIDTH`.
weight: Weight parameter, passed to the grid geometry manager. Note that
it should be in proportion with `WEIGHT`, as default weights are very large.
formatter: A function that formats each element in a column's datalist.
This is especially useful for i. e. dates, where you want
to be able to sort by a unix timestamp but still be able to have the
dates in a human-readable format.
fallback_type: A datatype that all elements of the column will be converted
to in case it has to be sorted and the sort fails due to a TypeError.
Note that this will modify the contained elements upon sorting and is
meant for type correction if they are entered uncleanly. For a key
function, see `sortkey`.
If not specified and elements are of different types, exception will be
raised normally.
dblclick_cmd: A command that will be run when the column is double-clicked.
Will be called with an event as only parameter.
"""
# COLUMNS ARE RESPONSIBLE FOR UI UPDATING. GENERAL FLOW LIKE THIS:
# USER INTERFACES WITH THE MFL, MFL KEEPS TRACK OF A FEW LISTS AND
# VARS, VALIDATES, GIVES COMMANDS TO COLUMNS, COLUMNS UPDATE UI
# THEMSELVES
class Config():
__slots__ = (
"name", "sort", "sortkey", "minsize", "weight", "formatter",
"fallback_type", "dblclick_cmd",
)
def __init__(
self,
name = BLANK, sort = False, sortkey = None,
minsize = MIN_WIDTH, weight = WEIGHT, formatter = None,
fallback_type = None, dblclick_cmd = None,
):
self.name = name
self.sort = sort
self.sortkey = sortkey
self.minsize = minsize
self.weight = weight
self.formatter = formatter
self.fallback_type = fallback_type
self.dblclick_cmd = dblclick_cmd
def __init__(self, mfl, col_id = None, **kwargs):
if not isinstance(mfl, MultiframeList):
raise TypeError("Bad Column parent, must be MultiframeList.")
self.mfl = mfl
self.assignedframe = None
self._cnfcmd = {
"name": self._cnf_name, "sort": self._cnf_sort,
"sortkey": lambda: False, "minsize": self._cnf_grid,
"weight": self._cnf_grid, "formatter": self.format,
"fallback_type": lambda: False, "dblclick_cmd": self._cnf_dblclick_cmd,
}
if col_id is None:
self.col_id = self._generate_col_id()
else:
if col_id in self.mfl.columns:
raise ValueError(f"Column id {col_id!r} is already in use!")
self.col_id = col_id
self.data = [BLANK for _ in range(self.mfl.length)]
self.sortstate = 2 # 0 if next sort will be descending, else 1
self.cnf = self.Config(**kwargs)
def __repr__(self):
return (
f"<{type(self).__name__} of {type(self.mfl).__name__} at "
f"0x{id(self):016X}, col_id: {self.col_id}>"
)
def __len__(self):
return len(self.data)
def _generate_col_id(self):
curid = 0
while curid in self.mfl.columns:
curid += 1
return curid
def _cnf_dblclick_cmd(self):
if self.assignedframe is None:
return
if self.cnf.dblclick_cmd is None:
self.mfl.frames[self.assignedframe][1].unbind("<Double-Button-1>")
else:
self.mfl.frames[self.assignedframe][1].bind(
"<Double-Button-1>", self.cnf.dblclick_cmd
)
def _cnf_grid(self):
# Hacky corrector
if self.cnf.minsize < MIN_WIDTH:
self.cnf.minsize = MIN_WIDTH
if self.assignedframe is None:
return
cur_grid = self.mfl.framecontainer.grid_columnconfigure(self.assignedframe)
callargs = {}
for value in ("minsize", "weight"):
if cur_grid[value] != getattr(self.cnf, value):
callargs[value] = getattr(self.cnf, value)
if callargs:
self.mfl.framecontainer.grid_columnconfigure(self.assignedframe, **callargs)
def _cnf_name(self):
if self.assignedframe is None:
return
self.mfl.frames[self.assignedframe][2].config(text = self.cnf.name)
def _cnf_sort(self):
if self.assignedframe is None:
return
if self.cnf.sort:
self.set_sortstate(self.sortstate)
else:
self.mfl.frames[self.assignedframe][3].configure(text = BLANK)
def config(self, **kw):
if not kw:
return {s: getattr(self.cnf, s) for s in self.cnf.__slots__}
for k, v in kw.items():
if not k in self.Config.__slots__:
raise ValueError(
f"Unkown configuration arg {k!r}, must be one of "
f"{', '.join(self.Config.__slots__)}."
)
setattr(self.cnf, k, v)
self._cnfcmd[k]()
def data_clear(self):
"""Clears self.data, refreshes interface, if assigned a frame."""
self.data.clear()
if self.assignedframe is not None:
self.mfl.frames[self.assignedframe][1].delete(0, tk.END)
def data_insert(self, elem, index=None):
"""
Inserts elem to self.data at index and refreshes interface, if
assigned a frame. If index is not specified, elem will be appended
instead.
"""
if index is not None:
self.data.insert(index, elem)
else:
self.data.append(elem)
index = tk.END
if self.assignedframe is not None:
if self.cnf.formatter is not None:
self.mfl.frames[self.assignedframe][1].insert(index, self.cnf.formatter(elem))
else:
self.mfl.frames[self.assignedframe][1].insert(index, elem)
def data_delete(self, from_, to = None):
"""
Removes the elements from `from_` to `to` (end-exclusive), or
just `from_` if `to` is not given. No effect if `to` <= `from_`.
Refreshes interface if assigned a frame.
"""
to = from_ + 1 if to is None else to
if to <= from_:
return
self.data = self.data[:from_] + self.data[to:]
if self.assignedframe is not None:
self.mfl.frames[self.assignedframe][1].delete(from_, to - 1)
def data_set(self, newdata):
"""
Sets the column's data to the list specified, refreshes interface
if assigned a frame.
"""
if not isinstance(newdata, list):
raise TypeError("Data has to be a list!")
self.data = newdata
if self.assignedframe is not None:
self.mfl.frames[self.assignedframe][1].delete(0, tk.END)
self.mfl.frames[self.assignedframe][1].insert(tk.END, *self.data)
def format(self, exclusively = None):
"""
If interface frame is specified, runs all data through
`self.cnf.formatter` and displays result.
If exclusively is set (as an iterable), only specified indices
will be formatted.
"""
if self.cnf.formatter is None or self.assignedframe is None:#
return
if exclusively is None:
f_data = [self.cnf.formatter(i) for i in self.data]
self.mfl.frames[self.assignedframe][1].delete(0, tk.END)
self.mfl.frames[self.assignedframe][1].insert(tk.END, *f_data)
else:
for i in exclusively:
tmp = self.data[i]
self.mfl.frames[self.assignedframe][1].delete(i)
self.mfl.frames[self.assignedframe][1].insert(i, self.cnf.formatter(tmp))
def setdisplay(self, wanted_frame):
"""
Sets the display frame of the column to wanted_frame. To unregister,
set it no None.
May raise IndexError.
"""
if wanted_frame is None:
self.being_dragged = self.being_pressed = False
# This block effectively undoes anything the `_cnf_*` methods and the block below
# do to the widgets and tries to get them into the default state.
self.mfl._clear_frame(self.assignedframe)
self.assignedframe = wanted_frame
return
self.assignedframe = wanted_frame
self.mfl.frames[self.assignedframe][2].bind(
"<ButtonPress-1>",
lambda evt: self.mfl._on_frame_header_press(evt, self.assignedframe)
)
self.mfl.frames[self.assignedframe][2].bind(
"<Leave>",
self.mfl._on_frame_header_leave
)
self.mfl.frames[self.assignedframe][2].bind(
"<Motion>",
lambda evt: self.mfl._on_frame_header_motion(evt, self.assignedframe)
)
self.mfl.frames[self.assignedframe][2].bind(
"<ButtonRelease-1>",
lambda evt: self.mfl._on_frame_header_release(evt, self.assignedframe)
)
self.set_sortstate(self.sortstate)
# NOTE: I don't think these two recurring lines warrant their own
# "setframetodata" method.
self.mfl.frames[self.assignedframe][1].delete(0, tk.END)
self.mfl.frames[self.assignedframe][1].insert(tk.END, *self.data)
for fnc in set(self._cnfcmd.values()):
fnc()
def set_sortstate(self, to):
"""
Sets the column's sortstate, also updating it on the UI if it is being
displayed and sortable.
"""
if self.assignedframe is not None and self.cnf.sort:
self.mfl.frames[self.assignedframe][3].configure(text = SORTSYM[to])
self.sortstate = to
class MultiframeList(ttk.Frame):
"""
A multiframe tkinter based listview, for rough description see module docstring.
A terrible idea of a feature:
The MultiframeList will grab the currently active theme (as well as
listen to the <<ThemeChanged>> event) and attempt to apply style
configuration options in the current theme's style called
"MultiframeList.Listbox" to its listboxes, as those are not
available as ttk variants.
The column title labels listen to the style "MultiframeListTitle.TLabel"
The column sort indicators listen to the style "MultiframeListSortInd.Tlabel"
The reorder/resizing indicators listen to the styles
"MultiframeListResizeInd.TFrame" and "MultiframeListReorderInd.TFrame".
The styles "MultiframeList.ActiveCell" and "MultiframeList.ActiveRow" are
responsible for the colors of the active cell. They are implemented by
calling the listboxes' `itemconfigure` method and thus only support the
arguments given by it: `foreground`, `background`, `selectforeground` and
`selectbackground`.
"ActiveRow" is only relevant if the MultiframeList is configured to color
the active cell's row as well.
The list broadcasts the Virtual event "<<MultiframeSelect>>" after the selection
is modified in any way.
The list broadcasts the Virtual event "<<MultiframeRightclick>>" whenever the right
click mouse button is released or the context menu button is pressed.
The list will reset the active selection when Escape is pressed.
"""
_DEFAULT_LISTBOX_CONFIG = {
"activestyle": "underline",
"background": "#FFFFFF",
"borderwidth": 1,
"cursor": "",
"disabledforeground": "#6D6D6D",
"font": "TkDefaultFont",
"foreground": "#000000",
"highlightbackground": "#FFFFFF",
"highlightcolor": "#B4B4B4",
"highlightthickness": 1,
"justify": "left",
"relief": "sunken",
"selectbackground": "#3399FF",
"selectborderwidth": 0,
"selectforeground": "#FFFFFF",
}
_DEFAULT_ITEMCONFIGURE = {
"background": "",
"foreground": "",
"selectbackground": "",
"selectforeground": "",
}
class Config():
__slots__ = (
"rightclickbtn", "click_key", "listboxheight", "reorderable",
"resizable", "selection_type", "active_cell_span_row", "active_cell_style",
"active_cell_row_style",
)
def __init__(
self, rightclickbtn = "3", click_key = "space", listboxheight = 10,
reorderable = False, resizable = False, selection_type = SELECTION_TYPE.MULTIPLE,
active_cell_span_row = False, active_cell_style = None, active_cell_row_style = None,
):
self.rightclickbtn = rightclickbtn
self.click_key = click_key
self.listboxheight = listboxheight
self.reorderable = reorderable
self.resizable = resizable
self.selection_type = selection_type
self.active_cell_span_row = active_cell_span_row
self.active_cell_style = {} if active_cell_style is None \
else active_cell_style
self.active_cell_row_style = {} if active_cell_row_style is None \
else active_cell_row_style
def __init__(self, master, inicolumns = None, **kwargs):
"""
Arguments:
Instantiation only:
master - parent object, should be tkinter root or a tkinter widget
inicolumns <List<Dict>>: The columns here will be created and displayed
upon instantiation.
The dicts supplied should take form of Column constructor kwargs. See
the `multiframe_list._Column` class for a list of acceptable kwargs.
Modifiable during runtime:
rightclickbtn <Str>: The mouse button that will trigger the
MultiframeRightclick virtual event. It is "3" (standard) on Windows,
this may differ from platform to platform.
click_key <Str>: The key to be used for clicking cells via keyboard
navigation. "space" by default.
listboxheight <Int>: The height (In items) the listboxes will take up.
10 by tkinter default.
reorderable <Bool>: Whether the columns of the MultiframeList should be
reorderable by the user dragging and dropping the column headers
as well as Ctrl-Left/Ctrl-Right. False by default.
resizable <Bool>: Whether the columns of the MultiframeList should be
resizable by the user dragging the column headers. False by default.
selection_type <SELECTION_TYPE>: Selection type to use for the MultiframeList.
When changed, the selection will be cleared. MULTIPLE by default.
active_cell_span_row <Bool>: Whether the selected active cell will apply a
per-item style across its entire row. False by default.
"""
super().__init__(master, takefocus = True)
self.master = master
self.cnf = self.Config(**kwargs)
self.bind("<Up>", lambda e: self._on_arrow_y(e, -1))
self.bind("<Down>", lambda e: self._on_arrow_y(e, 1))
self.bind("<Left>", lambda e: self._on_arrow_x(e, -1))
self.bind("<Right>", lambda e: self._on_arrow_x(e, 1))
if os.name == "nt":
ctxtmen_btn = "App"
elif os.name == "posix":
ctxtmen_btn = "Menu"
else:
ctxtmen_btn = None
if ctxtmen_btn is not None:
self.bind(f"<KeyPress-{ctxtmen_btn}>", self._on_menu_button)
self.bind(f"<KeyPress-{self.cnf.click_key}>", self._on_click_key)
self.bind(f"<Escape>", lambda _: self._selection_clear(with_event = True))
self.ttk_style = ttk.Style()
self.bind("<<ThemeChanged>>", self._theme_update)
# Last direct cell that was interacted with
self.active_cell_x = None
self.active_cell_y = None
# Listbox-local coordinate the interaction was made at
self.coordx = None
self.coordy = None
# Selected items
self.selection = set()
# --Stolen-- borrowed from tk, the first item a selection was started
# with, used for expanding it via shift-clicks/Up-Downs
self._selection_anchor = None
# The element last dragged over in a mouse dragging selection.
# Does not include the initially clicked element.
self._last_dragged_over_element = None
# The last ButtonPress event for a click on a listbox.
# If None, no selection is being made.
self._last_click_event = None
self._active_cell_style, self._active_row_style = self._load_active_cell_style()
# Frame index of the last pressed frame header
self.pressed_frame = None
# X Position of the last pressed frame header's press event.
self.pressed_x = None
# Current dragintent
self.dragging = None
self.scrollbar = ttk.Scrollbar(self, command = self._scrollallbar)
self.framecontainer = ttk.Frame(self)
self.framecontainer.grid_rowconfigure(0, weight = 1)
self._listboxheight_hack = ttk.Frame(self, width = 0)
self.resize_highlight = ttk.Frame(
self.framecontainer, style = "MultiframeListResizeInd.TFrame"
)
self.reorder_highlight = ttk.Frame(
self.framecontainer, style = "MultiframeListReorderInd.TFrame"
)
self.frames = [] # Each frame contains interface elements for display.
self.columns = {} # Columns will provide data storage capability as
# well as some metadata.
self.length = 0
if inicolumns is not None:
self.add_frames(len(inicolumns))
# using self.add_columns would require iterating a dict relying
# on the fact it's sorted, i don't like that so we copypaste 2 lines
for index, colopt in enumerate(inicolumns):
new_col = _Column(self, **colopt)
new_col.setdisplay(index)
self.columns[new_col.col_id] = new_col
self.scrollbar.pack(fill = tk.Y, expand = 0, side = tk.RIGHT)
self.framecontainer.pack(expand = 1, fill = tk.BOTH, side = tk.RIGHT)
self._listboxheight_hack.pack(expand = 0, fill = tk.Y, side = tk.RIGHT)
#====USER METHODS====
def add_columns(self, *coldicts):
"""
Takes any amount of dicts, then adds columns where the column
constructor receives the dicts as kwargs. See the
multiframe_list._Column class for a list of acceptable kwargs.
"""
for coldict in coldicts:
new_col = _Column(self, **coldict)
# Columns will give themselves a proper id
self.columns[new_col.col_id] = new_col
def add_frames(self, amount):
"""
Adds amount of frames, display slots in a way, fills their listboxes
up with empty strings and immediatedly displays them.
"""
startindex = len(self.frames)
for i in range(amount):
new_frame = [None for _ in range(4)]
rcb = self.cnf.rightclickbtn
curindex = startindex + i
self.frames.append(new_frame)
new_frame[0] = ttk.Frame(self.framecontainer)
new_frame[0].grid_rowconfigure(1, weight = 1)
new_frame[0].grid_columnconfigure(0, weight = 1)
new_frame[1] = tk.Listbox(
new_frame[0], exportselection = False, takefocus = False,
height = self.cnf.listboxheight
)
new_frame[2] = ttk.Label(
new_frame[0], text = BLANK, anchor = tk.W,
style = "MultiframeListTitle.TLabel"
)
new_frame[3] = ttk.Label(
new_frame[0], text = BLANK, anchor = tk.W,
style = "MultiframeListSortInd.TLabel"
)
# REMOVE Listbox bindings from listboxes
new_frame[1].bindtags((new_frame[1].bindtags()[0], '.', 'all'))
def _m1_press_handler(event, curindex = curindex):
return self._on_listbox_mouse_press(event, 1, curindex)
def _m1_release_handler(event, curindex = curindex):
return self._on_listbox_mouse_release(event, 1, curindex)
def _motion_handler(event, curindex = curindex):
return self._on_listbox_mouse_motion(event, 1, curindex)
def _rcb_press_handler(event, rcb = rcb, curindex = curindex):
return self._on_listbox_mouse_press(event, rcb, curindex)
def _rcb_release_handler(event, rcb = rcb, curindex = curindex):
return self._on_listbox_mouse_release(event, rcb, curindex)
new_frame[1].bind("<Button-1>", _m1_press_handler)
new_frame[1].bind("<ButtonRelease-1>", _m1_release_handler)
new_frame[1].bind("<Motion>", _motion_handler)
new_frame[1].bind(f"<Button-{rcb}>", _rcb_press_handler)
new_frame[1].bind(f"<ButtonRelease-{rcb}>", _rcb_release_handler)
self.tk.eval(SCROLLCOMMAND.format(w = new_frame[1]._w))
new_frame[1].configure(
**self._get_listbox_conf(new_frame[1]),
yscrollcommand = self._scrollalllistbox
)
self._clear_frame(curindex)
new_frame[3].grid(row = 0, column = 1, sticky = "news") # sort_indicator
new_frame[2].grid(row = 0, column = 0, sticky = "news") # label
new_frame[1].grid(row = 1, column = 0, sticky = "news", columnspan = 2) # listbox
new_frame[0].grid(row = 0, column = curindex, sticky = "news") # frame
new_frame[0].grid_propagate(False)
# For some reason necessary so the grid manager reacts to the new frame,
# in conjunction with the <Configure> event below
self.framecontainer.update_idletasks()
self._listboxheight_hack.configure(height = new_frame[1].winfo_reqheight())
self.framecontainer.event_generate("<Configure>")
self._redraw_active_cell()
self._redraw_selection()
def assign_column(self, col_id, req_frame):
"""
Sets display of a column given by its column id to req_frame.
The same frame may not be occupied by multiple columns and must
exist. Set req_frame to None to hide the column.
"""
if req_frame is not None:
self.frames[req_frame] # Raises error on failure
for col in self.columns.values():
if col.assignedframe == req_frame:
raise RuntimeError(
f"Frame {req_frame} is already in use by column {col.col_id!r}"
)
self._get_col_by_id(col_id).setdisplay(req_frame)
self._redraw_active_cell()
self._redraw_selection()
def clear(self):
"""Clears the MultiframeList."""
# self._set_active_cell(None, None)
self._set_length(0)
for col in self.columns.values():
col.data_clear()
def config(self, **kwargs):
"""
Change configuration options of the MultiframeList/underlying frame.
All non-MultiframeList options will be routed to the frame:
For configurable options, see the `Modifiable during runtime` section
in the `__init__` docstring.
"""
for mfl_arg in self.Config.__slots__:
if mfl_arg in kwargs:
old_value = getattr(self.cnf, mfl_arg)
setattr(self.cnf, mfl_arg, kwargs.pop(mfl_arg))
cnf_method = None
try:
cnf_method = getattr(self, f"_cnf_{mfl_arg}")
except AttributeError:
pass
# To prevent catching and AttributeError in the cnf method
if cnf_method is not None:
cnf_method(old_value)
super().configure(**kwargs)
def config_column(self, col_id, **cnf):
"""
Update the configuration of the column referenced by col_id
with the values specified in cnf as kwargs.
"""
col = self._get_col_by_id(col_id)
col.config(**cnf)
def format(self, targetcols = None, indices = None):
"""
Format the entire list based on the formatter functions in columns.
Optionally, a list of columns to be formatted can be supplied by their
id, which will leave all non-mentioned columns alone.
Also, if indices is specified, only the indices included in that list
will be formatted.
! Call this after all input has been performed !
"""
if indices is not None:
tmp = self.length - 1
for i in indices:
if i > tmp:
raise ValueError("Index is out of range.")
if targetcols is None:
for col in self.columns.values():
col.format(exclusively = indices)
else:
for col_id in targetcols:
self._get_col_by_id(col_id).format(exclusively = indices)
self._redraw_active_cell()
self._redraw_selection()
def get_active_cell(self):
"""
Returns the coordinates of the currently selected active cell as a
tuple of length 2; (0, 0) starting in the top left corner;
The two values may also be None.
"""
return (self.active_cell_x, self.active_cell_y)
def get_columns(self):
"""
Returns a dict where key is a column id and value is the column's
current display slot (frame). Value is None if the column is hidden.
"""
return {c.col_id: c.assignedframe for c in self.columns.values()}
def get_last_click(self):
"""
Returns the absolute screen coordinates the last user interaction
was made at as a tuple. May consist of int or None.
This method can be used to get coordinates to open a popup window at.
"""
return (self.coordx, self.coordy)
def get_length(self):
"""Returns length of the MultiframeList."""
return self.length
def get_selection(self):
"""
Returns the selection of the MultiframeList.
If in SINGLE selection mode, returns only the selected index
or `None`, otherwise passes through the selection set.
This mainly serves as convenience for the SINGLE selection
type, it is preferrable to check for selection emptiness
with simply `if mfl.selection:`
"""
if self.cnf.selection_type is SELECTION_TYPE.SINGLE:
return next(iter(self.selection)) if self.selection else None
else:
return self.selection
def remove_column(self, col_id):
"""
Deletes the column addressed by col_id, safely unregistering all
related elements.
"""
self.assign_column(col_id, None)
self.columns.pop(col_id)
def remove_frames(self, amount):
"""
Safely remove the specified amount of frames from the
MultiframeList, unregistering all related elements.
"""
to_purge = range(len(self.frames) - 1, len(self.frames) - amount - 1, -1)
for col in self.columns.values():
if col.assignedframe in to_purge:
col.setdisplay(None)
for i in to_purge:
if self.active_cell_x is not None and self.active_cell_x >= i:
self._set_active_cell(i - 1, self.active_cell_y)
self.framecontainer.grid_columnconfigure(i, weight = 0, minsize = 0)
# update in conjunction with the <Configure> event is for some
# reason necessary so the grid manager actually releases
# the space occupied by the deleted frames and redistributes it.
self.frames[i][0].destroy()
self.framecontainer.update()
self.frames.pop(i)
self.framecontainer.event_generate("<Configure>")
def set_active_cell(self, x, y):
"""
Sets the active cell to the specified x and y coordinates.
You may also pass None to any of those.
If outside of viewport, the frames will be scrolled towards the
new index.
"""
if not all(isinstance(v, (int, NoneType)) for v in (x, y)):
raise TypeError("Invalid type for x and/or y coordinate.")
if isinstance(x, int) and x >= len(self.frames):
raise ValueError("New x selection out of range.")
if isinstance(y, int) and y >= self.length:
raise ValueError("New y selection exceeds length.")
self._set_active_cell(x, y)
if y is not None:
for i in self.frames:
i[1].see(self.active_cell_y)
self._redraw_selection()
def set_selection(self, new_selection):
"""
Sets the listbox' selection to be made out of only these
contained within the given iterable or index and generates
a <<MultiframeSelect>> event.
If the selection type does not allow the selection to be made
up of multiple indices when multiple are passed in, the last
item in the iterable will be the selection.
Will set the view to look at the last index.
"""
# Wasteful iteration just to look at the last idx but whatever
new_selection = tuple(new_selection)
self._selection_set(new_selection)
self.event_generate("<<MultiframeSelect>>", when = "tail")
if new_selection:
for i in self.frames:
i[1].see(new_selection[-1])
#==DATA MODIFICATION==
def insert_row(self, data, insindex = None, reset_sortstate = True):
"""
Inserts a row of data into the MultiframeList.
Data should be supplied in the shape of a dict where a key is a
column's id and the corresponding value is the element that should
be appended to the column.
If insindex is not specified, data will be appended, else inserted
at the given position.
The function takes an optional reset_sortstate parameter to control whether
or not to reset the sortstates on all columns. (Default True)
"""
if reset_sortstate:
self._reset_sortstate()
for col in self.columns.values():
col.data_insert(data.get(col.col_id, BLANK), insindex)
self._set_length(self.length + 1)
def remove_rows(self, what, to = None):
"""
If `what` is an int, deletes the rows from `what` to `to`
(end-exclusive).
If `to` is not given, only removes the row at `what`.
Has no effect if `to` <= `what`.
If `what` is not an int, it must be a container and all indices its
iteration yields will be removed. `to` will be ignored.
Properly sets the length and will clear the selection
Raises an IndexError if any index should be out of the list's range.
"""
if isinstance(what, int):
to = what + 1 if to is None else to
if what < 0 or what > (self.length - 1):
raise IndexError(f"`from` index {what} out of range.")
if to < 0 or to > self.length:
raise IndexError(f"`to` index {what} out of range.")
to_delete = [range(what, to)]
else:
# Must be reversed to delete entries starting from the back,
# otherwise deletion of selection blocks will affect others
to_delete = sorted(what, reverse = True)
if to_delete and to_delete[0] > self.length - 1:
raise IndexError(f"Inaccessible deletion index: {to_delete[0]}")
if to_delete and to_delete[-1] < 0:
raise IndexError(f"Inaccessible deletion index: {to_delete[-1]}")
to_delete = _find_consecutive_sequences(to_delete)
self._set_length(self.length - sum(len(rng) for rng in to_delete))
for rng in to_delete:
for col in self.columns.values():
col.data_delete(rng.start, rng.stop)
self._redraw_active_cell()
def set_data(self, data, reset_sortstate = True):
"""
Sets the data of the MultiframeList, clearing everything beforehand.
Data has to be supplied as a dict where:
- key is a column id
- value is a list of values the column targeted by key should be set to.
If the lists are of differing lengths, a ValueError will be raised.
The function takes an optional reset_sortstate parameter to control whether
or not to reset the sortstates on all columns. (Default True)
"""
self.clear()
if not data:
return
ln = len(data[next(iter(data))])
if any(len(d) != ln for d in data.values()):
raise ValueError("Differing lengths in supplied column data.")
if reset_sortstate:
self._reset_sortstate()
for col in self.columns.values():
if col.col_id in data:
col.data_set(data[col.col_id])
else:
col.data_set([BLANK for _ in range(ln)])
self._set_length(ln)
def set_cell(self, col_to_mod, y, data, reset_sortstate = True):
"""
Sets the cell in col_to_mod at y to data.
Formatter is applied automatically, if present.
The function takes an optional reset_sortstate parameter to control whether
or not to reset the sortstates on all columns. (Default True)
"""
if reset_sortstate:
self._reset_sortstate()
col = self._get_col_by_id(col_to_mod)
if y > (self.length - 1):
raise IndexError("Cell index does not exist.")
col.data_delete(y)
col.data_insert(data, y)
def set_column(self, col_to_mod, data, reset_sortstate = True):
"""
Sets column specified by col_to_mod to data.
Raises an exception if length differs from the rest of the columns.
The function takes an optional reset_sortstate parameter to control whether
or not to reset the sortstates on all columns. (Default True)
"""
if reset_sortstate:
self._reset_sortstate()
targetcol = self._get_col_by_id(col_to_mod)
datalen = len(data)
if len(self.columns) == 1:
targetcol.data_set(data)
self._set_length(datalen)
else:
for col in self.columns.values():
if len(col.data) != datalen:
raise ValueError(
"Length of supplied column data is different from length of " \
"column {col.col_id!r}."
)
targetcol.data_set(data)
#==DATA RETRIEVAL==
def get_rows(self, start, end = None):
"""
Retrieves rows between a start and an optional end parameter.
If end is omitted, only the row indexed at start will be included.
If end is set to END, all data from start to the end of the
MultiframeListbox will be returned.
If start is set to ALL, all data that is present in the
MultiframeListbox' columns will be included.
This method will return two elements:
A two-dimensional list that contains the requested rows from start to
end, a row being unformatted data.
A dict where the values are integers and the keys all column's ids.
The integer for a column gives the index of all sub-lists in the
first returned list that make up the data of a column, in order.
For example, if the return values were:
[["egg", "2", ""], ["foo", "3", "Comment"], ["bar", "0", ""]] and
{"name_col":0, "comment_col":2, "rating_col":1}, the data of the
column "name_col" would be ["egg", "foo", "bar"], "rating_col"
["2", "3", "0"] and "comment_col" ["", "Comment", ""].
"""
if start == ALL:
start = 0
end = self.length
if end == END:
end = self.length
if end is None:
end = start + 1
col_id_map = {col_id: i for i, col_id in enumerate(self.columns.keys())}
r_data = [[col.data[idx] for col in self.columns.values()] for idx in range(start, end)]
# Performance location: out the window, on the sidewalk
return r_data, col_id_map
def get_column(self, col_id):
"""Returns the data of the column with col_id as a list."""
col = self._get_col_by_id(col_id)
return col.data
def get_cell(self, col_id, y):
"""Returns element y of the column specified by col_id."""
col = self._get_col_by_id(col_id)
return col.data[y]
#====SORT METHOD====
def sort(self, _, call_col):
"""
Sort the list, modifying all column's data.
This function is designed to only be called through labels,
taking an event placeholder (which is ignored), followed by the
calling column where id, sortstate and - if needed - the
fallback type are read from.
"""
caller_id = call_col.col_id
scroll = self._scroll_get()
new_sortstate = abs(int(call_col.sortstate) - 1)
rev = bool(new_sortstate)
call_col.set_sortstate(new_sortstate)
for col in self.columns.values(): # reset sortstate of other columns
if col.col_id != caller_id:
col.set_sortstate(2)
tmpdat, colidmap = self.get_rows(ALL)
datacol_index = colidmap[caller_id]
keyfunc_internal = itemgetter(datacol_index)
if call_col.cnf.sortkey is not None:
keyfunc = lambda e: call_col.cnf.sortkey(keyfunc_internal(e))
else:
keyfunc = keyfunc_internal
try:
tmpdat = sorted(tmpdat, key = keyfunc, reverse = rev)
except TypeError:
fb_type = call_col.cnf.fallback_type
if fb_type is None:
raise
for i, _ in enumerate(tmpdat):
tmpdat[i][datacol_index] = fb_type(tmpdat[i][datacol_index])
tmpdat = sorted(tmpdat, key = keyfunc, reverse = rev)
newdat = {
col_id: [r[idx] for r in tmpdat]
for col_id, idx in colidmap.items()
}
self.set_data(newdat, reset_sortstate = False)
self.format()
self._scroll_restore(scroll)
#====INTERNAL METHODS - cnf====
def _cnf_listboxheight(self, _):
"""
Callback for when the listbox height is changed via the
config method.
"""
for frame in self.frames:
frame[1].configure(height = self.cnf.listboxheight)
if self.frames:
self._listboxheight_hack.configure(height = self.frames[0][1].winfo_reqheight())
def _cnf_rightclickbtn(self, old):
"""
Callback for when rightclickbtn is changed via the config
method.
"""
for idx, frame in enumerate(self.frames):
def _right_click_handler(event, button = self.cnf.rightclickbtn, frameidx = idx):
return self._on_listbox_mouse_press(event, button, frameidx)
frame[1].unbind(f"<Button-{old}>")
frame[1].bind(f"<Button-{self.cnf.rightclickbtn}>", _right_click_handler)
def _cnf_selection_type(self, _):
"""
Callback for when the selection type is changed via the config
method.
"""
self._selection_clear()
def _cnf_active_cell_span_row(self, old):
"""
Callback for when active_cell_span_row is changed.
Will refresh the active cell highlights.
"""
# NOTE: Extremely hacky but works so whatever
cur = self.cnf.active_cell_span_row
self.cnf.active_cell_span_row = old
self._undraw_active_cell()
self.cnf.active_cell_span_row = cur
self._redraw_active_cell()
#====INTERNAL METHODS====
def _clear_frame(self, frame_idx):
"""
Will set up default bindings on a frame, and clear its label,
sort and listbox, as well as reset its grid manager parameters.
Usable for a part of the work that goes into removing a column
from a frame or initial setup.
"""
tgt_frame = self.frames[frame_idx]
tgt_frame[1].delete(0, tk.END)
tgt_frame[1].insert(0, *(BLANK for _ in range(self.length)))
tgt_frame[1].configure(width = _DEF_LISTBOX_WIDTH)
tgt_frame[1].unbind("<Double-Button-1>")
tgt_frame[2].configure(text = BLANK)
tgt_frame[2].bind("<Button-1>",
lambda e: self._on_frame_header_press(e, frame_idx)
)
tgt_frame[2].bind("<ButtonRelease-1>",
lambda e: self._on_frame_header_release(e, frame_idx)
)
tgt_frame[2].bind("<Leave>", self._on_frame_header_leave)
tgt_frame[2].bind("<Motion>",
lambda e: self._on_frame_header_motion(e, frame_idx)
)
tgt_frame[3].configure(text = BLANK)
self.framecontainer.grid_columnconfigure(frame_idx,
weight = WEIGHT, minsize = MIN_WIDTH
)
def _get_clamps(self, dragged_frame):
c_frame = self.frames[dragged_frame]
p_frame = self.frames[dragged_frame - 1]
return (
p_frame[0].winfo_x() +
self.framecontainer.grid_columnconfigure(dragged_frame - 1)["minsize"],
c_frame[0].winfo_width() + c_frame[0].winfo_x() -
self.framecontainer.grid_columnconfigure(dragged_frame)["minsize"]
)
def _get_clamped_resize_pos(self, dragged_frame, event):
"""
Returns the position a resize operation started on the label of frame
`dragged_frame` should be at, relative to the MultiframeList's position.
"""
cmin, cmax = self._get_clamps(dragged_frame)
abs_pos = event.widget.winfo_rootx() + event.x - self.framecontainer.winfo_rootx()
return max(cmin, min(abs_pos, cmax))
def _get_col_by_id(self, col_id):
"""
Returns the column specified by col_id, raises an exception if it
is not found.
"""
col = self.columns.get(col_id)
if col is None:
raise ValueError(f"No column with column id {col_id!r}!")
return col
def _get_col_by_frame(self, frame):
"""Returns the column in `frame` or None if there is none in it."""
for col in self.columns.values():
if col.assignedframe == frame:
return col
return None
def _get_empty_frames(self):
"""Returns the indexes of all frames that are not assigned a column."""
assignedframes = [col.assignedframe for col in self.columns.values()]
return [f for f in range(len(self.frames)) if not f in assignedframes]
def _get_frame_at_x(self, x):
"""
Returns frame index of the frame at screen pixel position x,
clamping to 0 and (len(self.frames) - 1).
"""
highlight_idx = -1
for frame in self.frames:
if frame[1].winfo_rootx() > x:
break
highlight_idx += 1
return max(highlight_idx, 0)
def _get_listbox_conf(self, listbox):
"""
Creates a dict of style options based on the ttk Style settings in
`style_identifier` that listboxes can be directly configured with.
The listbox passed to the method will be queried for its config and
only configuration keys it returns present in the output dict.
"""
conf = self._DEFAULT_LISTBOX_CONFIG.copy()
to_query = (".", "MultiframeList.Listbox")
for style in to_query:
cur_style_cnf = self.ttk_style.configure(style)
if cur_style_cnf is not None:
conf.update(cur_style_cnf)
ok_options = listbox.configure().keys()
conf = {k: v for k, v in conf.items() if k in ok_options}
return conf
def _get_listbox_entry_height(self, lb):
"""
Returns the height of a listbox' entry by measuring its
font and border width parameters.
"""
fm = self.tk.call("font", "metrics", lb["font"]).split()
return int(fm[fm.index("-linespace") + 1]) + 1 + 2 * int(lb["selectborderwidth"])
def _get_index_from_mouse_y(self, lb, y_pos):
"""
Calculates the index of a listbox from pixel y position
by measuring font height, y offset and border settings.
"""
offset = int(lb.yview()[0] * self.length)
borderwidth = int(lb["borderwidth"])
e_height = self._get_listbox_entry_height(lb)
return ((y_pos - borderwidth) // e_height) + offset
def _load_active_cell_style(self):
"""
Returns a 2-value tuple of the active cell style and the active
row style, with default values if none are given in the style
database.
"""
ac = self._DEFAULT_ITEMCONFIGURE.copy()
ac.update(self.ttk_style.configure("MultiframeList.ActiveCell") or {})
ar = self._DEFAULT_ITEMCONFIGURE.copy()
ar.update(self.ttk_style.configure("MultiframeList.ActiveRow") or {})
return ac, ar
def _on_arrow_x(self, event, direction):
"""
Executed when the MultiframeList receives <Left> and <Right> events,
triggered by the user pressing the arrow keys.
"""
new_x = 0 if self.active_cell_x is None and self.frames else self.active_cell_x + direction
new_y = 0 if self.active_cell_y is None and self.length > 0 else self.active_cell_y
if new_x < 0 or new_x > len(self.frames) - 1:
return
self._set_active_cell(new_x, new_y)
def _on_arrow_y(self, event, direction):
"""
Executed when the MultiframeList receives <Up> and <Down> events,
triggered by the user pressing the arrow keys. Changes
`self.active_cell_y`. It may be called with the control and the shift key
held, in which case it will arrange for multiple item selection.
"""
new_x = 0 if self.active_cell_x is None and self.frames else self.active_cell_x
new_y = 0 if self.active_cell_y is None else self.active_cell_y + direction
if new_y < 0 or new_y > self.length - 1:
return
self._set_active_cell(new_x, new_y)
for i in self.frames:
i[1].see(self.active_cell_y)
selection_made = True
if with_shift(event):
self._selection_set_from_anchor(self.active_cell_y, clear = not with_ctrl(event))
elif with_ctrl(event):
selection_made = False
else:
self._selection_set(self.active_cell_y)
if selection_made:
self.event_generate("<<MultiframeSelect>>", when = "tail")
def _on_click_key(self, event):
"""
Called when the "click" key (Space by default) is pressed.
Generates a <<MultiframeSelect>> event and modifies the
selection depending on whether shift and ctrl were being held.
"""
new_x = 0 if self.active_cell_x is None and self.frames else self.active_cell_x
new_y = 0 if self.active_cell_y is None and self.length > 0 else self.active_cell_y
if new_y is None or new_x is None:
return
self._set_active_cell(new_x, new_y)
if with_shift(event):
self._selection_set_from_anchor(self.active_cell_y, clear = not with_ctrl(event))
elif with_ctrl(event):
self._selection_anchor = None
self._selection_set_item(self.active_cell_y, toggle = True)
else:
self._selection_set(self.active_cell_y)
self.event_generate("<<MultiframeSelect>>", when = "tail")
def _on_column_release(self, event, released_frame, drag_intent):
if drag_intent is DRAGINTENT.REORDER and self.cnf.reorderable:
self.reorder_highlight.place_forget()
self._swap_by_frame(
self._get_frame_at_x(event.widget.winfo_rootx() + event.x),
released_frame
)
elif drag_intent is DRAGINTENT.RESIZE and self.cnf.resizable:
# Shouldn't really happen, but you can never be too sure
if released_frame == 0:
return
self.resize_highlight.place_forget()
total_weight = (
self.framecontainer.grid_columnconfigure(released_frame)["weight"] +
self.framecontainer.grid_columnconfigure(released_frame - 1)["weight"]
)
minclamp, maxclamp = self._get_clamps(released_frame)
maxclamp += (1 if maxclamp == minclamp else 0) # Prevent zero div
pos = (self._get_clamped_resize_pos(released_frame, event) - minclamp)
# Subtracting minclamp from maxclamp will effectively get the area pos moves in
prv_weight = round((pos / (maxclamp - minclamp)) * total_weight)
rel_weight = total_weight - prv_weight
for fidx, weight in ((released_frame, rel_weight), (released_frame - 1, prv_weight)):
col = self._get_col_by_frame(fidx)
if col is None:
self.framecontainer.grid_columnconfigure(fidx, weight = weight)
else:
col.config(weight = weight)
elif self.dragging is None:
rcol = self._get_col_by_frame(released_frame)
if rcol is not None and rcol.cnf.sort:
self.sort(None, rcol)
def _on_column_drag(self, event, dragged_frame):
if self.dragging is DRAGINTENT.REORDER and self.cnf.reorderable:
highlight_idx = self._get_frame_at_x(event.widget.winfo_rootx() + event.x)
self.reorder_highlight.place(
x = self.frames[highlight_idx][0].winfo_x(),
y = self.frames[highlight_idx][1].winfo_y(),
width = 3, height = self.frames[highlight_idx][1].winfo_height()
)
self.reorder_highlight.tkraise()
elif self.dragging is DRAGINTENT.RESIZE and self.cnf.resizable:
self.resize_highlight.place(
x = self._get_clamped_resize_pos(dragged_frame, event),
y = self.frames[0][1].winfo_y(),
width = 3, height = self.frames[0][1].winfo_height()
)
self.resize_highlight.tkraise()
def _on_frame_header_leave(self, evt):
evt.widget.configure(cursor = "arrow")
def _on_frame_header_motion(self, evt, fidx):
if self.pressed_frame is not None:
if self.dragging is not None:
self._on_column_drag(evt, fidx)
elif self.dragging is None and abs(evt.x - self.pressed_x) > DRAG_THRES:
self.dragging = _drag_intent(self.pressed_x, self.pressed_frame)
else:
evt.widget.configure(
cursor = "sb_h_double_arrow" if
_drag_intent(evt.x, fidx) is DRAGINTENT.RESIZE and self.cnf.resizable
else "arrow"
)
def _on_frame_header_press(self, evt, fidx):
"""
Callback to register the pressed frame and initial press position
while dragging.
"""
self.pressed_frame = fidx
self.pressed_x = evt.x
def _on_frame_header_release(self, evt, fidx):
"""
Callback to reset press variables and invoke release handler after
dragging a column header.
"""
self._on_column_release(evt, fidx, self.dragging)
self.pressed_frame = self.pressed_x = None
self.dragging = None
def _on_listbox_mouse_motion(self, event, button, frameindex):
"""
Called by listboxes whenever a mousebutton is dragged.
Will set the selection in accordance to whether the click the
drag stems from was done with ctrl/shift, the selection anchor
and the selection type.
"""
if self._last_click_event is None:
return
hovered = self._get_index_from_mouse_y(self.frames[frameindex][1], event.y)
if hovered < 0:
return
hovered = min(hovered, self.length - 1)
if self._last_dragged_over_element == hovered:
return
self._last_dragged_over_element = hovered
self._set_active_cell(frameindex, hovered)
if with_ctrl(event):
self._selection_set_item(hovered, toggle = True)
elif with_shift(event):
self._selection_set_item(hovered)
else:
self._selection_set_from_anchor(hovered)
for i in self.frames:
i[1].see(hovered)
self.event_generate("<<MultiframeSelect>>", when = "tail")
def _on_listbox_mouse_press(self, event, button, frameindex):
"""
Called by listboxes whenever a mouse button is pressed on them.
Sets the active cell to the cell under the mouse pointer and
sets internal drag selection variables.
"""
# Reset focus to mfl, all mouse events will still go to the listbox
self.focus()
if self.length == 0:
return
tosel = self._get_index_from_mouse_y(self.frames[frameindex][1], event.y)
if tosel < 0:
return
tosel = min(tosel, self.length - 1)
self._set_active_cell(frameindex, tosel)
if button != self.cnf.rightclickbtn or tosel not in self.selection:
# NOTE: these should be handled differently / behave very
# specifically in the windows listboxes but tbh who cares
if with_shift(event):
self._selection_set_from_anchor(tosel)
elif with_ctrl(event):
self._selection_set_item(tosel, toggle = True)
else:
self._selection_set(tosel)
self.event_generate("<<MultiframeSelect>>", when = "tail")
self._last_dragged_over_element = tosel
self._last_click_event = event
def _on_listbox_mouse_release(self, event, button, frameindex):
"""
Called by listboxes when the mouse is released over them.
If the released button was the rightclick one, generates a
<<MultiframeRightclick>> event.
Resets click variables.
"""
if self._last_click_event is None:
return
self.coordx = self.frames[frameindex][0].winfo_rootx() + event.x
self.coordy = self.frames[frameindex][0].winfo_rooty() + 20 + event.y
self._last_dragged_over_element = None
self._last_click_event = None
if button == self.cnf.rightclickbtn:
self.event_generate("<<MultiframeRightclick>>", when = "tail")
def _on_menu_button(self, _):
"""
User has pressed the menu button.
This generates a <<MultiframeRightclick>> event and modifies
self.coord[xy] to an appropriate value.
"""
if not self.frames:
return
if self.active_cell_y is None:
return
local_actcellx = 0 if self.active_cell_x is None else self.active_cell_x
pseudo_lbl = self.frames[local_actcellx][0]
pseudo_lbx = self.frames[local_actcellx][1]
first_offset = pseudo_lbx.yview()[0]
entry_height = self._get_listbox_entry_height(pseudo_lbx)
tmp_x = pseudo_lbl.winfo_rootx() + 5
tmp_y = entry_height * (self.active_cell_y - (self.length * first_offset)) + \
20 + pseudo_lbl.winfo_rooty()
tmp_x = int(round(tmp_x))
tmp_y = int(round(tmp_y))
tmp_y = max(tmp_y, 0) + 10
self.coordx = tmp_x
self.coordy = tmp_y
self.event_generate("<<MultiframeRightclick>>", when = "tail")
def _redraw_active_cell(self):
"""
Sets the active cell's itemconfigurations.
Should be used after e.g. new frames have been added or reordered.
"""
if self.active_cell_x is None or self.active_cell_y is None:
return
if self.cnf.active_cell_span_row:
for idx, i in enumerate(self.frames):
i[1].itemconfigure(self.active_cell_y, **(
self._active_cell_style
if idx == self.active_cell_x else
self._active_row_style
))
else:
self.frames[self.active_cell_x][1].itemconfigure(
self.active_cell_y, self._active_cell_style
)
def _redraw_selection(self):
"""
Sets the visual selection to the selected indices in each frame's
listbox.
"""
for i in self.frames:
i[1].selection_clear(0, tk.END)
if self.selection is None:
return
for idx in self.selection:
for i in self.frames:
i[1].selection_set(idx)
def _reset_sortstate(self):
"""
Reset the sortstate of all columns to 2.
"""
for column in self.columns.values():
column.set_sortstate(2)
def _swap_by_frame(self, tgt_frame, src_frame):
"""
Swaps the contents of two frames. Whether any, none or both of them
are blank is handled properly. Will copy over the weight from empty
frames as their `weight` is the only "configurable" option they have
stored in them. (Implicitly by the user resizing them).
If the
"""
tgt_col = src_col = None
tgt_col = self._get_col_by_frame(tgt_frame)
src_col = self._get_col_by_frame(src_frame)
# They're the same, no action required
if tgt_col == src_col and tgt_col is not None:
return
scroll = self._scroll_get()
src_w = self.framecontainer.grid_columnconfigure(src_frame)["weight"] \
if src_col is None else None
tgt_w = self.framecontainer.grid_columnconfigure(tgt_frame)["weight"] \
if tgt_col is None else None
if src_col is not None:
src_col.setdisplay(None)
if tgt_col is not None:
tgt_col.setdisplay(None)
if src_col is not None:
src_col.setdisplay(tgt_frame)
else:
self.framecontainer.grid_columnconfigure(tgt_frame, weight = src_w)
if tgt_col is not None:
tgt_col.setdisplay(src_frame)
else:
self.framecontainer.grid_columnconfigure(src_frame, weight = tgt_w)
self._scroll_restore(scroll)
self._redraw_active_cell()
self._redraw_selection()
def _scroll_get(self):
if not self.frames:
return None
return self.frames[0][1].yview()[0]
def _scroll_restore(self, scroll):
if scroll is not None:
self._scrollalllistbox(scroll, 1.0)
def _scrollallbar(self, *args):
"""Bound to the scrollbar; Will scroll listboxes."""
# args can have 2 or 3 values
for i in self.frames:
i[1].yview(*args)
def _scrollalllistbox(self, a, b):
"""Bound to all listboxes so that they will scroll the other ones
and scrollbar.
"""
for i in self.frames:
i[1].yview_moveto(a)
self.scrollbar.set(a, b)
def _selection_clear(self, redraw = True, with_event = False):
"""
Clears the selection anchor and the selection.
If `redraw` is `True`, will also redraw the selection.
If `with_event` is `True`, a <<MultiframeSelect>> event will
be generated if the selection was not empty beforehand.
"""
was_not_empty = bool(self.selection)
self._selection_anchor = None
self.selection.clear()
if redraw:
self._redraw_selection()
if with_event and was_not_empty:
self.event_generate("<<MultiframeSelect>>", when = "tail")
def _selection_set(self, new, anchor = None, toggle = False):
"""
Clears and then sets the selection to the given iterable or single index.
If `anchor` is not `None`, the selection anchor will be set to `anchor`.
Otherwise, anchor will be set to the first value seen in the new selection set,
whose order can possibly not be guaranteed.
`toggle` will be passed on to all calls to `self._selection_set_item`.
"""
self._selection_clear(False)
if anchor is not None:
self._selection_anchor = anchor
if isinstance(new, int):
self._selection_set_item(new, False, toggle)
else:
for idx in new:
self._selection_set_item(idx, False, toggle)
self._redraw_selection()
def _selection_set_from_anchor(self, target, toggle = False, clear = True):
"""
If the selection mode is `MULTIPLE`, sets the selection from the current
anchor to the given target index. If the anchor does not exist, will set
the selection as just the target item and make it the new anchor.
If the selection mode is `SINGLE`, will simply set the selection to `target`.
`toggle` will be passed on to `self._selection_set`.
Only relevant for `MULTIPLE` selection mode, if `clear` is set to `False`,
the current selection will be kept and the new selection added as a union to it.
"""
if self.cnf.selection_type is SELECTION_TYPE.SINGLE or self._selection_anchor is None:
self._selection_set(target, toggle = toggle)
return
step = -1 if target < self._selection_anchor else 1
new_sel = set() if clear else self.selection.copy()
new_sel.update(range(self._selection_anchor, target + step, step))
self._selection_set(new_sel, self._selection_anchor, toggle)
def _selection_set_item(self, idx, redraw = True, toggle = False):
"""
Adds a new index to the MultiframeList's selection, be it in single
or multiple selection mode. If the selection mode is SINGLE, the
selection will be cleared. If the selection anchor is None, it
will be set to the given item.
If `redraw` is `True`, will redraw the selection.
If `toggle` is `True`, will toggle the index instead of setting it.
"""
if self.cnf.selection_type is SELECTION_TYPE.SINGLE:
self._selection_clear(False)
if self._selection_anchor is None:
self._selection_anchor = idx
if toggle and idx in self.selection:
self.selection.remove(idx)
else:
self.selection.add(idx)
if redraw:
self._redraw_selection()
def _set_active_cell(self, new_x, new_y):
"""
Sets the active cell to the new values and updates its highlights
appropiately. The values may be `None`, to keep one of the fields
unchanged, pass in `self.active_cell_x|y` as needed.
"""
old_x = self.active_cell_x
old_y = self.active_cell_y
if new_x != old_x:
self.active_cell_x = new_x
if old_x is not None and old_y is not None:
self.frames[old_x][1].itemconfigure(old_y, **(
self._active_row_style
if self.cnf.active_cell_span_row else
self._DEFAULT_ITEMCONFIGURE
))
if new_x is not None and new_y is not None:
self.frames[new_x][1].itemconfigure(new_y, **self._active_cell_style)
if new_y != old_y:
if old_y is not None:
self._undraw_active_cell()
self.active_cell_y = new_y
self._redraw_active_cell()
def _set_length(self, new_length):
"""
Use this for any change to `self.length`. This method updates
frames without a column so the amount of blank strings in them
stays correct, clears the selection generating an event if it
was not empty previously, will adjust the active cell if it runs
out of bounds and clear the click/dragging event.
"""
self.length = new_length
# Will cause errors otherwise if change occurs while user is dragging
self._last_click_event = None
self._last_dragged_over_element = None
if self.active_cell_y is not None:
new_ay = self.active_cell_y
if new_ay > self.length - 1:
new_ay = self.length - 1 if self.length > 0 else None
if new_ay != self.active_cell_y:
self._set_active_cell(self.active_cell_x, new_ay)
self._selection_clear(with_event = True)
for fi in self._get_empty_frames():
curframelen = self.frames[fi][1].size()
if curframelen > self.length:
self.frames[fi][1].delete(self.length, tk.END)
elif curframelen < self.length:
self.frames[fi][1].insert(
tk.END, *(BLANK for _ in range(self.length - curframelen))
)
def _theme_update(self, _):
"""
Called from event binding when the current theme changes.
Changes Listbox look, as those are not available as ttk variants,
and updates the active cell style.
"""
self._active_cell_style, self._active_row_style = self._load_active_cell_style()
if not self.frames:
return
conf = self._get_listbox_conf(self.frames[0][1])
for f in self.frames:
f[1].configure(**conf)
self._redraw_active_cell()
def _undraw_active_cell(self):
"""
Removes all itemconfigure options on the active cell/the active
cell's row, depending on `self.cnf.active_cell_span_row`.
"""
if self.active_cell_y is None:
return
if self.cnf.active_cell_span_row:
for f in self.frames:
f[1].itemconfigure(self.active_cell_y, **self._DEFAULT_ITEMCONFIGURE)
else:
self.frames[self.active_cell_x][1].itemconfigure(
self.active_cell_y, **self._DEFAULT_ITEMCONFIGURE
)
if __name__ == "__main__":
from multiframe_list.demo import run_demo
run_demo()
|
{"/multiframe_list/demo2.py": ["/multiframe_list/multiframe_list.py"], "/multiframe_list/multiframe_list.py": ["/multiframe_list/demo.py"], "/multiframe_list/demo.py": ["/multiframe_list/multiframe_list.py"], "/multiframe_list/__main__.py": ["/multiframe_list/demo.py"], "/multiframe_list/__init__.py": ["/multiframe_list/multiframe_list.py", "/multiframe_list/demo.py"]}
|
26,744
|
Square789/multiframe_list
|
refs/heads/master
|
/multiframe_list/demo.py
|
"""
Shoddy demonstration of the MultiframeList.
To run in, call run_demo().
"""
from random import choice, randint, sample
import tkinter as tk
from multiframe_list.multiframe_list import MultiframeList, END, SELECTION_TYPE, WEIGHT
def priceconv(data):
return f"${data}"
def getlongest(seq):
longest = 0
for i in seq:
if isinstance(i, (list, tuple)):
res = getlongest(i)
else:
res = len(str(i))
longest = max(longest, res)
return longest
class Demo:
def __init__(self):
self.root = tk.Tk()
self.mfl = MultiframeList(self.root, inicolumns = (
{"name": "Small", "minsize": 40},
{"name": "Sortercol", "col_id": "sorter"},
{"name": "Pricecol", "sort": True, "col_id": "sickocol",
"weight": round(WEIGHT * 3)},
{"name": "-100", "col_id": "sub_col", "formatter": lambda n: n - 100},
{"name": "Wide col sorting randomly", "minsize": 200,
"sort": True, "sortkey": lambda _: randint(1, 100)},
{"col_id": "cnfcl"},
{"name": "Doubleclick me", "col_id": "dbc_col", "minsize": 80,
"dblclick_cmd": self.doubleclick_column_callback},
),
active_cell_span_row = False,
reorderable = True,
)
self.mfl.bind(
"<<MultiframeRightclick>>",
lambda e: print("Rightclick on", e.widget, "@", self.mfl.get_last_click())
)
self.mfl.config_column("sickocol", formatter = priceconv)
self.mfl.config_column("sorter", sort = True)
self.mfl.config_column(
"cnfcl",
name = "Configured Name",
sort = True,
fallback_type = lambda x: int("0" + str(x))
)
self.mfl.pack(expand = 1, fill = tk.BOTH)
self.mfl.add_frames(2)
self.mfl.remove_frames(1)
self.randstyle()
for _ in range(10):
self.adddata()
btns = (
tk.Button(self.root, text="+row", command=self.adddata),
tk.Button(self.root, text="-sel", command=self.remsel),
tk.Button(self.root, text="---", command=self.mfl.clear),
tk.Button(self.root, text="+frame", command=lambda: self.mfl.add_frames(1)),
tk.Button(self.root, text="-frame", command=self.remframe),
tk.Button(self.root, text="?columns", command=lambda: print(self.mfl.get_columns())),
tk.Button(self.root, text="?currow", command=self.getcurrrow),
tk.Button(self.root, text="?to_end", command=lambda: self.getcurrrow(END)),
tk.Button(self.root, text="?curcell", command=lambda: print(self.mfl.get_active_cell())),
tk.Button(self.root, text="?length", command=lambda: print(self.mfl.get_length())),
tk.Button(self.root, text="+column", command=self.add1col),
tk.Button(self.root, text="swap01", command=self.swap01),
tk.Button(self.root, text="swaprnd", command=self.swaprand),
tk.Button(self.root, text="bgstyle", command=lambda: self.root.tk.eval(
"ttk::style configure . -background #{0}{0}{0}".format(hex(randint(50, 255))[2:])
)),
tk.Button(self.root, text="lbstyle", command=self.randstyle),
tk.Button(self.root, text="conf", command=self.randcfg),
tk.Button(self.root, text="randac", command=self.randactive),
)
for btn in btns:
btn.pack(fill = tk.X, side = tk.LEFT)
def adddata(self):
self.mfl.insert_row({col_id: randint(0, 100) for col_id in self.mfl.get_columns()})
self.mfl.format()
def add1col(self):
if "newcol" in self.mfl.get_columns():
if self.mfl.get_columns()["newcol"] != 6:
print("Please return that column to frame 6, it's where it feels at home.")
return
self.mfl.remove_column("newcol")
elif 6 in self.mfl.get_columns().values():
print("Something's in frame 6 already, get it cleared first!")
else:
self.mfl.add_columns(
{"col_id": "newcol", "name": "added @ runtime; wide.",
"minsize": 30, "weight": 3 * WEIGHT}
)
self.mfl.assign_column("newcol", 6)
def doubleclick_column_callback(self, _):
x, y = self.mfl.get_active_cell()
if y is None:
print("Empty column!")
else:
print(f"{self.mfl.get_cell('dbc_col', y)} @ ({x}, {y})")
def getcurrrow(self, end = None):
x_idx = self.mfl.get_active_cell()[1]
if x_idx is None:
print("No row is selected, cannot tell.")
return
outdat, mapdict = self.mfl.get_rows(x_idx, end)
l_elem = max(getlongest(outdat), getlongest(mapdict.keys()))
print("|".join(f"{k:<{l_elem}}" for k in mapdict.keys()))
print("-" * (l_elem + 1) * len(mapdict.keys()))
for row in outdat:
print("|".join(f"{i:<{l_elem}}" for i in row))
def randcfg(self):
cfg = {
"listboxheight": randint(5, 10),
"reorderable": bool(randint(0, 1)),
"resizable": bool(randint(0, 1)),
"rightclickbtn": randint(2, 3),
"selection_type": choice([SELECTION_TYPE.SINGLE, SELECTION_TYPE.MULTIPLE]),
"active_cell_span_row": bool(randint(0, 1)),
}
print(f"Randomly configuring: {cfg!r}")
self.mfl.config(**cfg)
def randactive(self):
length = self.mfl.get_length()
if length < 1:
return
self.mfl.set_active_cell(0, randint(0, length - 1))
def randstyle(self):
self.root.tk.eval((
"ttk::style configure MultiframeList.Listbox -background #{0}{0}{0} -foreground #0000{1}\n"
"ttk::style configure MultiframeList.Listbox -selectbackground #{1}{2}{3}\n"
"ttk::style configure MultiframeListReorderInd.TFrame -background #{0}0000\n"
"ttk::style configure MultiframeListResizeInd.TFrame -background #0000{0}\n"
"ttk::style configure MultiframeList.ActiveCell -background #{0}{1}{2} -selectbackground #{0}0000\n"
"ttk::style configure MultiframeList.ActiveRow -background #000000 -selectbackground #333333\n"
).format(
f"{randint(120, 255):0>2X}",
f"{randint( 0, 255):0>2X}",
f"{randint( 0, 255):0>2X}",
f"{randint( 0, 255):0>2X}",
))
def remframe(self):
if len(self.mfl.frames) <= 7:
print("Cannot remove this many frames from example!"); return
self.mfl.remove_frames(1)
def remsel(self):
if not self.mfl.selection:
print("Make a selection to delete!")
return
self.mfl.remove_rows(self.mfl.selection)
def swap(self, first, second):
_tmp = self.mfl.get_columns()
f_frm = _tmp[first]
s_frm = _tmp[second]
self.mfl.assign_column(first, None)
self.mfl.assign_column(second, f_frm)
self.mfl.assign_column(first, s_frm)
def swap01(self):
c_a, c_b = 1, 0
if self.mfl.get_columns()[0] == 0:
c_a, c_b = 0, 1
self.swap(c_a, c_b)
def swaprand(self):
l = self.mfl.get_columns().keys()
a, b = sample(l, 2)
print(f"Swapping {a} with {b}")
self.swap(a, b)
def run_demo():
demo = Demo()
demo.root.mainloop()
|
{"/multiframe_list/demo2.py": ["/multiframe_list/multiframe_list.py"], "/multiframe_list/multiframe_list.py": ["/multiframe_list/demo.py"], "/multiframe_list/demo.py": ["/multiframe_list/multiframe_list.py"], "/multiframe_list/__main__.py": ["/multiframe_list/demo.py"], "/multiframe_list/__init__.py": ["/multiframe_list/multiframe_list.py", "/multiframe_list/demo.py"]}
|
26,745
|
Square789/multiframe_list
|
refs/heads/master
|
/multiframe_list/__main__.py
|
from multiframe_list.demo import run_demo
if __name__ == "__main__":
run_demo()
|
{"/multiframe_list/demo2.py": ["/multiframe_list/multiframe_list.py"], "/multiframe_list/multiframe_list.py": ["/multiframe_list/demo.py"], "/multiframe_list/demo.py": ["/multiframe_list/multiframe_list.py"], "/multiframe_list/__main__.py": ["/multiframe_list/demo.py"], "/multiframe_list/__init__.py": ["/multiframe_list/multiframe_list.py", "/multiframe_list/demo.py"]}
|
26,746
|
Square789/multiframe_list
|
refs/heads/master
|
/multiframe_list/__init__.py
|
from multiframe_list.multiframe_list import (
MultiframeList, SELECTION_TYPE, END, ALL, WEIGHT
)
from multiframe_list.demo import run_demo
__all__ = ("MultiframeList", "SELECTION_TYPE", "END", "ALL", "WEIGHT", "run_demo")
|
{"/multiframe_list/demo2.py": ["/multiframe_list/multiframe_list.py"], "/multiframe_list/multiframe_list.py": ["/multiframe_list/demo.py"], "/multiframe_list/demo.py": ["/multiframe_list/multiframe_list.py"], "/multiframe_list/__main__.py": ["/multiframe_list/demo.py"], "/multiframe_list/__init__.py": ["/multiframe_list/multiframe_list.py", "/multiframe_list/demo.py"]}
|
26,768
|
GeneZH/Car_Value_Evaluation
|
refs/heads/master
|
/Website/evaluation/PredictionModel/model3.py
|
import pandas as pd
import numpy as np
import re
from sklearn.linear_model import LinearRegression, Lasso, Ridge, SGDRegressor, ElasticNet
from sklearn.svm import SVR
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn import datasets, linear_model, preprocessing, svm
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.kernel_ridge import KernelRidge
import matplotlib
import matplotlib.pyplot as plt
import pickle
from sklearn.externals import joblib
def preprocess(dataFile):
df = pd.read_csv(dataFile, sep=',', header=0, encoding='cp1252')
# df.drop(['seller', 'offerType', 'abtest', 'dateCrawled', 'nrOfPictures', 'lastSeen', 'postalCode', 'dateCreated', 'name'],
# axis='columns', inplace=True)
# print("Without odometer %d" % df.loc[df.odometer == 'None'].count['price'])
df = df[df.odometer != 'None']
df['odometer'] = df['odometer'].apply(pd.to_numeric)
print("Too new: %d" % df.loc[df.year > 2017].count()['price'])
print("Too old: %d" % df.loc[df.year < 1990].count()['price'])
print("Too cheap: %d" % df.loc[df.price < 100].count()['price'])
print("Too expensive: %d" % df.loc[df.price > 150000].count()['price'])
print("Too few km: %d" % df.loc[df.odometer < 1000].count()['price'])
print("Too many km: %d" % df.loc[df.odometer > 300000].count()['price'])
df = df[
(df.year <= 2017)
& (df.year >= 1990)
& (df.price > 100)
& (df.price < 150000)
& (df.odometer > 1000)
& (df.odometer < 300000)
]
print df.describe()
df['VIN'].fillna(value='None', inplace=True)
df['VIN'] = df['VIN'].replace(to_replace='^((?!None).)*$', value='Yes', regex=True)
print df['VIN'].unique()
df['make and model'] = df['make and model'].str.lower()
df['make'], df['model'] = df['make and model'].str.split(pat=None, n=1).str
df['model'] = df['model'].str.replace('-', '')
df['make'].fillna(value='None', inplace=True)
df['model'].fillna(value='None', inplace=True)
df = df[df['make'].isin(df['make'].value_counts().index.tolist()[:50]) &
df['model'].isin(df['model'].value_counts().index.tolist()[:100])]
# replace values
df['make'].replace('vw', 'volkswagen', inplace=True)
df['make'].replace('chevy', 'chevrolet', inplace=True)
df['make'].replace('cheverolet', 'chevrolet', inplace=True)
df['model'].replace('camry le', 'camry', inplace=True)
print df['make'].value_counts()
print df['model'].value_counts()
print df.isnull().sum()
labels = ['make', 'model', 'VIN', 'condition', 'cylinders', 'drive', 'fuel', 'color', 'size', 'title',
'transmission', 'type']
les = {}
''' l in labels:
les[l] = preprocessing.LabelBinarizer()
les[l].fit(df[l])
tr = les[l].transform(df[l])
df.loc[:, l + '_feat'] = pd.Series(tr, index=df.index)'''
labeled = df[['price'
, 'odometer'
, 'year'
]
+ [x for x in labels]]
print labeled.sample()
return labeled
#### Removing the outliers
# print("-----------------\nData kept for analisys: %d percent of the entire set\n-----------------" % (
# 100 * dedups['name'].count() / df['name'].count()))
def stat():
print '-'
def model(dataset):
Y = dataset['price'].as_matrix()
#X = dataset['year'].as_matrix()
#X = np.append(X, dataset['odometer'].as_matrix())
labels = ['make', 'model', 'VIN', 'condition', 'cylinders', 'drive', 'fuel', 'color', 'size', 'title',
'transmission', 'type']
les = {}
vecs = None
for l in labels:
les[l] = preprocessing.LabelBinarizer()
les[l].fit(dataset[l])
with open(l+'_encoder', 'wb') as handle:
pickle.dump(les[l], handle, protocol=pickle.HIGHEST_PROTOCOL)
if vecs is None:
vecs = les[l].transform(dataset[l])
else:
vecs = np.hstack((vecs,les[l].transform(dataset[l])))
X= np.hstack((vecs, dataset['year'].values.reshape(-1,1)))
X= np.hstack((X, dataset['odometer'].values.reshape(-1,1)))
# matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
'''# plt.figure()
prices = pd.DataFrame({"1. Original": Y, "2.Log": np.log1p(Y)})
prices.hist()
plt.show()'''
Y = np.log1p(Y)
# Percent of the X array to use as training set. This implies that the rest will be test set
test_size = .25
# Split into train and validation
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=test_size, random_state=3)
print(X_train.shape, X_val.shape, Y_train.shape, Y_val.shape)
lr = LinearRegression()
lr.fit(X_train, Y_train)
joblib.dump(lr, 'model')
print ('-----Linear Regression-----')
print 'Training Data R2:',
print lr.score(X_train, Y_train)
print 'Test Data R2:',
print lr.score(X_val, Y_val)
'''param_grid = {"alpha": [1e-15, 1e-10, 1e-8, 1e-4, 1e-3, 1e-2, 1, 5, 10, 20, 50]}
trg = GridSearchCV(estimator=Ridge(), param_grid=param_grid, cv=5, n_jobs=-1, verbose=1)
trg.fit(X_train,Y_train)
bp= trg.best_params_
rg = Ridge(alpha=bp['alpha'])
rg.fit(X_train,Y_train)
print ('-----Ridge Regression-----')
print 'Training Data R2:',
print rg.score(X_train,Y_train)
print 'Test Data R2:',
print rg.score(X_val,Y_val)
tlo = GridSearchCV(estimator=Lasso(), param_grid=param_grid, cv=2, n_jobs=-1, verbose=5)
tlo.fit(X_train,Y_train)
bp= trg.best_params_
lo = Lasso(alpha=bp['alpha'])
lo.fit(X_train,Y_train)
print ('-----Lasso-----')
print 'Training Data R2:',
print lo.score(X_train,Y_train)
print 'Test Data R2:',
print lo.score(X_val,Y_val)
en = ElasticNet()
en.fit(X_train,Y_train)
print ('-----Elastic Net-----')
print 'Training Data R2:',
print en.score(X_train,Y_train)
print 'Test Data R2:',
print en.score(X_val,Y_val)'''
'''param_grid = {"C": [1e0,1e1,1e2,1e3]
, "gamma": np.logspace(-2,2,5)}
tsvr = GridSearchCV(estimator=SVR(kernel='rbf'), param_grid=param_grid, cv=5, n_jobs=-1, verbose=1)
tsvr.fit(X_train,Y_train)
bp= tsvr.best_params_
svr= SVR(kernel='rbf', C=bp['C'], gamma=bp['gamma'])
print ('-----Support Vector-----')
print 'Training Data R2:',
print svr.score(X_train,Y_train)
print 'Test Data R2:',
print svr.score(X_val,Y_val)'''
rf = RandomForestRegressor()
param_grid = {"min_samples_leaf": xrange(3, 4)
, "min_samples_split": xrange(3, 4)
, "max_depth": xrange(14, 15)
, "n_estimators": [500]}
gs = GridSearchCV(estimator=rf, param_grid=param_grid, cv=2, n_jobs=-1, verbose=1)
gs = gs.fit(X_train, Y_train)
bp = gs.best_params_
forest = RandomForestRegressor(criterion='mse',
min_samples_leaf=bp['min_samples_leaf'],
min_samples_split=bp['min_samples_split'],
max_depth=bp['max_depth'],
n_estimators=bp['n_estimators'])
forest.fit(X_train, Y_train)
print ('-----Random Forest -----')
print 'Training Data R2:',
print forest.score(X_train, Y_train)
print 'Test Data R2:',
print forest.score(X_val, Y_val)
if __name__ == '__main__':
dataset = preprocess('data/all.csv')
model(dataset)
|
{"/Website/evaluation/views.py": ["/Website/evaluation/forms.py"]}
|
26,769
|
GeneZH/Car_Value_Evaluation
|
refs/heads/master
|
/Website/evaluation/views.py
|
from django.shortcuts import render
from .forms import CarForm
from PredictionModel.predict import evaluate
def index(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = CarForm(request.POST)
# check whether it's valid:
if form.is_valid():
make = form.cleaned_data['make']
model = form.cleaned_data['model']
year = form.cleaned_data['year']
odometer = form.cleaned_data['odometer']
title = form.cleaned_data['title']
condition = form.cleaned_data['condition']
value = evaluate(make, model, year, odometer, title, condition)
return render(request, 'evaluation/index.html', {'form': form, 'value': value})
# if a GET (or any other method) we'll create a blank form
else:
form = CarForm()
return render(request, 'evaluation/index.html', {'form': form})
|
{"/Website/evaluation/views.py": ["/Website/evaluation/forms.py"]}
|
26,770
|
GeneZH/Car_Value_Evaluation
|
refs/heads/master
|
/Website/evaluation/PredictionModel/model2.py
|
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression, Lasso, Ridge, SGDRegressor, ElasticNet
from sklearn.svm import SVR
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn import datasets, linear_model, preprocessing, svm
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.kernel_ridge import KernelRidge
import matplotlib
import matplotlib.pyplot as plt
def preprocess(dataFile):
df = pd.read_csv(dataFile, sep=',', header=0, encoding='cp1252')
#print df.describe()
df.drop(['seller', 'offerType', 'abtest', 'dateCrawled', 'nrOfPictures', 'lastSeen', 'postalCode', 'dateCreated', 'name'],
axis='columns', inplace=True)
dedups = df.drop_duplicates(['price', 'vehicleType', 'yearOfRegistration'
, 'gearbox', 'powerPS', 'model', 'kilometer', 'monthOfRegistration', 'fuelType'
, 'notRepairedDamage'])
#### Removing the outliers
dedups = dedups[
(dedups.yearOfRegistration <= 2017)
& (dedups.yearOfRegistration >= 1990)
& (dedups.price >= 100)
& (dedups.price <= 100000)
& (dedups.powerPS >= 10)
& (dedups.powerPS <= 500)
& (pd.notnull(dedups.model))]
#print("-----------------\nData kept for analisys: %d percent of the entire set\n-----------------" % (
#100 * dedups['name'].count() / df['name'].count()))
dedups['notRepairedDamage'].fillna(value=' nein', inplace=True)
dedups['fuelType'].fillna(value='benzin', inplace=True)
dedups['gearbox'].fillna(value='manuell', inplace=True)
dedups['vehicleType'].fillna(value='not-declared', inplace=True)
print dedups.isnull().sum()
labels = ['gearbox', 'notRepairedDamage', 'model', 'brand', 'fuelType', 'vehicleType']
les = {}
for l in labels:
les[l] = preprocessing.LabelEncoder()
les[l].fit(dedups[l])
tr = les[l].transform(dedups[l])
dedups.loc[:, l + '_feat'] = pd.Series(tr, index=dedups.index)
labeled = dedups[['price'
, 'yearOfRegistration'
, 'powerPS'
, 'kilometer'
, 'monthOfRegistration']
+ [x + "_feat" for x in labels]]
return labeled
def stat():
print '-'
def model(dataset):
Y = dataset['price']
X = dataset.drop(['price'], axis='columns', inplace=False)
#matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
plt.figure()
prices = pd.DataFrame({"1. Original": Y, "2.Log": np.log1p(Y)})
prices.hist()
plt.show()
'''Y = np.log1p(Y)
# Percent of the X array to use as training set. This implies that the rest will be test set
test_size = .25
# Split into train and validation
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=test_size, random_state=3)
print(X_train.shape, X_val.shape, Y_train.shape, Y_val.shape)
lr = LinearRegression()
lr.fit(X_train,Y_train)
print ('-----Linear Regression-----')
print 'Training Data R2:',
print lr.score(X_train,Y_train)
print 'Test Data R2:',
print lr.score(X_val,Y_val)
param_grid = {"alpha": [1e-15, 1e-10, 1e-8, 1e-4, 1e-3, 1e-2, 1, 5, 10, 20, 50]}
trg = GridSearchCV(estimator=Ridge(), param_grid=param_grid, cv=5, n_jobs=-1, verbose=1)
trg.fit(X_train,Y_train)
bp= trg.best_params_
rg = Ridge(alpha=bp['alpha'])
rg.fit(X_train,Y_train)
print ('-----Ridge Regression-----')
print 'Training Data R2:',
print rg.score(X_train,Y_train)
print 'Test Data R2:',
print rg.score(X_val,Y_val)
tlo = GridSearchCV(estimator=Lasso(), param_grid=param_grid, cv=2, n_jobs=-1, verbose=5)
tlo.fit(X_train,Y_train)
bp= trg.best_params_
lo = Lasso(alpha=bp['alpha'])
lo.fit(X_train,Y_train)
print ('-----Lasso-----')
print 'Training Data R2:',
print lo.score(X_train,Y_train)
print 'Test Data R2:',
print lo.score(X_val,Y_val)
en = ElasticNet()
en.fit(X_train,Y_train)
print ('-----Elastic Net-----')
print 'Training Data R2:',
print en.score(X_train,Y_train)
print 'Test Data R2:',
print en.score(X_val,Y_val)
param_grid = {"C": [1e0,1e1,1e2,1e3]
, "gamma": np.logspace(-2,2,5)}
tsvr = GridSearchCV(estimator=SVR(kernel='rbf'), param_grid=param_grid, cv=5, n_jobs=-1, verbose=1)
tsvr.fit(X_train,Y_train)
bp= tsvr.best_params_
svr= SVR(kernel='rbf', C=bp['C'], gamma=bp['gamma'])
print ('-----Support Vector-----')
print 'Training Data R2:',
print svr.score(X_train,Y_train)
print 'Test Data R2:',
print svr.score(X_val,Y_val)
rf = RandomForestRegressor()
param_grid = {"criterion": ["mse"]
, "min_samples_leaf": [3]
, "min_samples_split": [3]
, "max_depth": [10]
, "n_estimators": [500]}
gs = GridSearchCV(estimator=rf, param_grid=param_grid, cv=2, n_jobs=-1, verbose=5)
gs = gs.fit(X_train, Y_train)
bp = gs.best_params_
forest = RandomForestRegressor(criterion=bp['criterion'],
min_samples_leaf=bp['min_samples_leaf'],
min_samples_split=bp['min_samples_split'],
max_depth=bp['max_depth'],
n_estimators=bp['n_estimators'])
forest.fit(X_train, Y_train)
print ('-----Random Forest -----')
print 'Training Data R2:',
print forest.score(X_train,Y_train)
print 'Test Data R2:',
print forest.score(X_val,Y_val)'''
if __name__=='__main__':
dataset = preprocess('data/autos.csv')
model(dataset)
|
{"/Website/evaluation/views.py": ["/Website/evaluation/forms.py"]}
|
26,771
|
GeneZH/Car_Value_Evaluation
|
refs/heads/master
|
/DataCollection/combine.py
|
"""
Reference:
http://blog.csdn.net/bytxl/article/details/23372405
"""
import csv
import os
allFileNum = 0
csv_head = ['make and model', 'year', 'VIN', 'condition', 'cylinders', 'drive', 'fuel', 'color', 'odometer', 'size', 'title', 'transmission', 'type', 'price']
def printPath(level, path):
global allFileNum
dirList = []
fileList = []
files = os.listdir(path)
dirList.append(str(level))
for f in files:
if(os.path.isdir(path + '/' + f)):
# hidden folder will not be checked
if(f[0] == '.'):
pass
else:
dirList.append(f)
if(os.path.isfile(path + '/' + f)):
if(f[0] == '.'):
pass
else:
fileList.append(f)
i_dl = 0
for dl in dirList:
if(i_dl == 0):
i_dl = i_dl + 1
else:
print ('-' * (int(dirList[0])), dl)
printPath((int(dirList[0]) + 1), path + '/' + dl)
for fl in fileList:
print ('-' * (int(dirList[0])), fl)
allFileNum = allFileNum + 1
flList = []
with open(path + '/' + fl, 'r') as f:
reader = csv.reader(f)
for row in reader:
flList.append(row)
f.close()
with open("./all.csv", 'a+') as ff:
writer = csv.writer(ff)
writer.writerows(flList[1:])
ff.close()
if __name__ == '__main__':
with open("./all.csv", 'w') as f:
writer = csv.writer(f)
writer.writerow(csv_head)
f.close()
printPath(1, './Data')
print ('total files =', allFileNum)
|
{"/Website/evaluation/views.py": ["/Website/evaluation/forms.py"]}
|
26,772
|
GeneZH/Car_Value_Evaluation
|
refs/heads/master
|
/KnowledgeDiscovery/cars.py
|
# Developed by Chu-Sheng Ku
import pandas as pd
import matplotlib.pyplot as plt
cars = pd.read_csv('cars.csv')
# Select the cars made from 1988 to 2018
cars = cars.loc[cars['year'].isin(range(1988, 2019))]
cars.info()
# Group the price of car by year
price_groupby_year = cars['price'].groupby(cars['year'])
price_groupby_year.describe()
year = []
price = []
count = []
# Remove the outliers by price
for name, group in price_groupby_year:
q1, q3 = group.quantile([0.25, 0.75])
iqr = q3 - q1
group = group[(group > q1 - iqr * 1.5) & (group < q3 + iqr * 1.5)]
year.append(name)
price.append(group.mean())
count.append(group.size)
# Plot the scatter chart of year and the mean of price
plt.scatter(year, price)
plt.title('The Correlation of Price and Year')
plt.xlabel('Year')
plt.ylabel('Price')
plt.grid()
plt.show()
# Plot the bar chart of year and count
plt.bar(year, count)
plt.title('Distribution of Cars Made from 1988 to 2018 (N~180K)')
plt.xlabel('Year')
plt.ylabel('Number of Cars Posted on Craigslist')
plt.show()
# Print out the cars made in 2018 to see why the mean of price is not reasonable
print(cars.loc[(cars['year'] == 2018) & (cars['price'] < 1000)])
|
{"/Website/evaluation/views.py": ["/Website/evaluation/forms.py"]}
|
26,773
|
GeneZH/Car_Value_Evaluation
|
refs/heads/master
|
/Website/evaluation/PredictionModel/predict.py
|
import pandas as pd
import numpy as np
import argparse
from sklearn.linear_model import LinearRegression, Lasso, Ridge, SGDRegressor, ElasticNet
from sklearn.svm import SVR
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn import datasets, linear_model, preprocessing, svm
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.kernel_ridge import KernelRidge
import matplotlib
import matplotlib.pyplot as plt
import pickle
from sklearn.externals import joblib
import os.path
def evaluate(make, model, year, odometer, title, condition):
labels = ['make', 'model', 'VIN', 'condition', 'cylinders', 'drive', 'fuel', 'color', 'size', 'title',
'transmission', 'type']
inputs={}
inputs['make'] = make
inputs['model'] = model
inputs['odometer'] = odometer
inputs['year'] = year
inputs['title'] = title
inputs['condition'] = condition
inputs['cylinders'] = 'None'
inputs['drive'] = 'None'
inputs['fuel'] = 'gas'
inputs['color'] = 'None'
inputs['size'] = 'None'
inputs['VIN'] = 'None'
inputs['transmission'] = 'automatic'
inputs['type'] = 'None'
X = np.array([])
BASE = os.path.dirname(os.path.abspath(__file__))
for l in labels:
with open(os.path.join(BASE, l+'_encoder')) as handle:
encoder=pickle.load(handle)
X = np.append(X, encoder.transform([inputs[l]])[0])
X = np.append(X, inputs['year'])
X = np.append(X, inputs['odometer'])
model = joblib.load(os.path.join(BASE, 'model'))
return np.exp(model.predict([X])[0])-1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Input car info then get predicted value')
parser.add_argument('-make', type=str,
help="Make of the car",
required=True)
parser.add_argument("-model", type=str,
help="Model of the car",
required=True)
parser.add_argument("-year", type=int,
help="Year of the car",
required=True)
parser.add_argument("-odometer", type=int,
help="Odometer of the car",
required=True)
parser.add_argument("-title", type=str,
help="Title status of the car",
choices=['salvage', 'rebuilt', 'clean', 'parts only', 'lien', 'missing'],
default='clean',
required=False)
parser.add_argument("-condition", type=str,
help="Condition of the car",
choices=['fair', 'good', 'excellent', 'like new', 'new'],
default='None',
required=False)
args = parser.parse_args()
value = evaluate(args.make, args.model, args.year, args.odometer, args.title, args.condition)
print value
|
{"/Website/evaluation/views.py": ["/Website/evaluation/forms.py"]}
|
26,774
|
GeneZH/Car_Value_Evaluation
|
refs/heads/master
|
/KnowledgeDiscovery/yearMileage.py
|
import csv
csv_head = ['make and model', 'year', 'VIN', 'condition', 'cylinders', 'drive', 'fuel', 'color', 'odometer', 'size', 'title', 'transmission', 'type', 'price']
dict = {}
ls = []
def cal(filename):
sum = 0
count = 0
cnt = 1
with open(filename, 'r') as f:
reader = csv.reader(f)
for row in reader:
ls.append(row)
for r in ls[1:]:
if cnt == 1:
cnt += 1
pass
if r[8] == 'None':
r[8] = 0
else:
r[8] = int(r[8])
for r in ls[1:]:
if r[8] != 0 :
year = 2017 - int(r[1])
if year != 0:
avg = r[8]/year
sum += avg
count += 1
f.close()
print(sum/count)
cal('all.csv')
|
{"/Website/evaluation/views.py": ["/Website/evaluation/forms.py"]}
|
26,775
|
GeneZH/Car_Value_Evaluation
|
refs/heads/master
|
/Website/evaluation/PredictionModel/model.py
|
import csv
import collections
import scipy
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
def most_frequent_model(dataFile):
#print the most frequent make-model pairs in dataset
models=[]
columns={}
f = open(dataFile)
reader = csv.reader(f)
row1 = next(reader)
i=0
for col in row1:
columns[col] = i
i+=1
counts=0
for row in reader:
models.append((row[columns['brand']], row[columns['model']]))
counts+=1
if counts%10000==0:
print counts
counter=collections.Counter(models)
print counter.most_common(10)
def vectorize(dataFile):
dicts=[]
x=[]
y=[]
columns={}
f = open(dataFile)
reader = csv.reader(f)
row1 = next(reader)
i = 0
for col in row1:
columns[col] = i
i += 1
for row in reader:
if float(row[columns['yearOfRegistration']])<2000 or float(row[columns['yearOfRegistration']])>=2017:
continue
y.append(float(row[columns['price']]))
curlist=[]
#curlist.append(float(row[columns['yearOfRegistration']]))
#curlist.append(float(row[columns['kilometer']]))
#curlist.append(float(row[columns['powerPS']]))
x.append(curlist)
curdict={}
curdict[row[columns['kilometer']]] = 1
curdict[row[columns['yearOfRegistration']]] = 1
curdict[row[columns['brand']]] = 1
curdict[row[columns['model']]] = 1
if row[columns['notRepairedDamage']] is None:
curdict['nein'] = 1
else:
curdict[row[columns['notRepairedDamage']]] = 1
if row[columns['vehicleType']] is None:
curdict['kleinwagen'] = 1
else:
curdict[row[columns['vehicleType']]] = 1
dicts.append(curdict)
v=DictVectorizer(sparse=False)
X=v.fit_transform(dicts)
for i in xrange(len(X)):
for ele in list(X[i]):
x[i].append(ele)
print x[0]
return x,y
def model(x,y):
lineReg=LinearRegression()
lineReg.fit(x,y)
print lineReg.score(x,y)
if __name__ == "__main__":
x, y = vectorize('data/autos.csv')
model(x,y)
#most_frequent_model('data/autos.csv')
|
{"/Website/evaluation/views.py": ["/Website/evaluation/forms.py"]}
|
26,776
|
GeneZH/Car_Value_Evaluation
|
refs/heads/master
|
/DataCollection/crawler.py
|
"""
Author: Yijun Zhang
Time: 2017 Fall
About: Data Mining Project - data collection part
****************
The request header below is used on my own computer, please change if necessary.
Accept:text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8
Accept-Encoding:gzip, deflate, br
Accept-Language:en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2
Cache-Control:max-age=0
Connection:keep-alive
Cookie:cl_b=jFm7EPmi5xGSvSmFWvDnugTHf0E; cl_def_hp=boulder; cl_tocmode=sss%3Agrid
Host:boulder.craigslist.org
If-Modified-Since:Sun, 12 Nov 2017 23:19:37 GMT
Referer:https://boulder.craigslist.org/search/cta
Upgrade-Insecure-Requests:1
User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36
******************
"""
import requests
import csv
from lxml import html
import time
# make & model: 0, year: 1, price: 13
attr_dic = {
'VIN: ': 2,
'condition: ': 3,
'cylinders: ': 4,
'drive: ': 5,
'fuel: ': 6,
'paint color: ': 7,
'odometer: ': 8,
'size: ': 9,
'title status: ': 10,
'transmission: ': 11,
'type: ': 12
}
csv_head = ['make and model', 'year', 'VIN', 'condition', 'cylinders', 'drive', 'fuel', 'color', 'odometer', 'size', 'title', 'transmission', 'type', 'price']
posts_per_city = dict()
header = {
'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2',
'Accept-Encoding': 'gzip, deflate, br',
'Connection': "keep-alive",
'Pragma': 'No-cache',
'Cache-Control': 'No-cache',
'Upgrade-Insecure-Requests': '1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
}
def crawl_post(posturl, rows):
r = requests.get(posturl, headers=header)
p = r.content
root = html.fromstring(p)
attrType = [i.text for i in root.xpath("//p[@class='attrgroup']/span[not(@class = 'otherpostings')]")]
attrValue = [i.text for i in root.xpath("//p[@class='attrgroup']/span[not(@class = 'otherpostings')]/b")]
price = root.xpath("//span[@class='price']")[0].text[1:]
car = attrValue[0].split(' ')
year = car[0]
make_model = ' '.join(car[1:])
# missing field
row = ['None'] * 14
row[0] = make_model
row[1] = year
row[13] = price
for i in range(1, len(attrType)):
row[attr_dic[attrType[i]]] = attrValue[i]
rows.append(row)
def crawl_page(pageurl, host_name, rows):
e_count = 0 #exception
s_count = 0 #success
r = requests.get(pageurl, headers=header)
print("page response status: ", r.status_code)
p = r.content
root = html.fromstring(p)
posts = root.xpath("//li[@class='result-row']/a[@href]/@href")
next_url = host_name + root.xpath("//a[@class = 'button next']/@href")[0]
print("scanning...")
for url in posts:
try:
crawl_post(url, rows)
except BaseException:
e_count += 1
pass
else:
s_count += 1
print("good posts: ", s_count)
print("bad posts: ", e_count)
print()
return next_url
def crawl(url, host_name, city):
rows = [] # save all posts of a city
print("city: ", city)
r = requests.get(url, headers=header, allow_redirects=False)
print("response status: ", r.status_code)
p = r.content
root = html.fromstring(p)
total_count = root.xpath("//span[@class='totalcount']")[0].text
# in this case, Craigslist offers a bug
if total_count == 2500:
page_num = int(int(total_count) / 120)
posts_per_city[city] = 2400
else:
page_num = int(int(total_count) / 120) + 1
posts_per_city[city] = total_count
print("total posts:", total_count)
print("total pages:", page_num)
print()
for i in range(0, page_num):
print("page: ", i)
try:
url = crawl_page(url, host_name, rows)
except BaseException:
print("error occurs\n")
pass
file_name = city + '.csv'
# change the location for different cities/states
with open('CO/' + file_name, 'w') as f:
writer = csv.writer(f)
writer.writerow(csv_head)
writer.writerows(rows)
f.close()
if __name__ == '__main__':
# exception: Craigslist have another kind of url with domain name .craigslist.com.mx. Simply ignore this case
# error: status HTTP 3XX. when the urls are different from what we expected, loop of redirection will cause error
# change the url for different cities
# choose any city for each state and then cities nearby will be processed automatically
first_url = "https://boulder.craigslist.org/search/cto"
r = requests.get(first_url, headers=header)
p = r.content
root = html.fromstring(p)
cities = root.xpath("//select[@id='areaAbb']/option/@value")
print("list of cities nearby: ", cities)
print()
for city in cities:
# .craigslist.com.mx
header['Host'] = city + ".craigslist.org"
# cars + trucks by owners
host_name = "https://" + header['Host']
url = host_name + "/search/cto"
crawl(url, host_name, city)
print(posts_per_city)
|
{"/Website/evaluation/views.py": ["/Website/evaluation/forms.py"]}
|
26,777
|
GeneZH/Car_Value_Evaluation
|
refs/heads/master
|
/Website/evaluation/forms.py
|
from django import forms
class CarForm(forms.Form):
make = forms.CharField()
model = forms.CharField()
year = forms.IntegerField()
odometer = forms.IntegerField(min_value=0)
title = forms.CharField(required=False)
condition = forms.CharField(required=False)
|
{"/Website/evaluation/views.py": ["/Website/evaluation/forms.py"]}
|
26,782
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/aromas/migrations/0002_aroma_botanicalname.py
|
# Generated by Django 2.1.7 on 2019-04-25 07:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aromas', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='aroma',
name='Botanicalname',
field=models.TextField(blank=True),
),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,783
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/attars/urls.py
|
from django.urls import path
from . import views
urlpatterns=[
path('Attar', views.Attar , name='Attar'),
path('<int:attarss_id>',views.attarss,name='attarss'),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,784
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/florals/urls.py
|
from django.urls import path
from . import views
urlpatterns=[
path('Floural', views.Floral , name='Floural'),
path('<int:floral_id>',views.floralss,name='floralss'),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,785
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/essentials/urls.py
|
from django.urls import path
from . import views
urlpatterns=[
path('Essential', views.Essential , name='Essential'),
path('<int:ess_id>',views.ess,name='ess'),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,786
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/aromas/migrations/0003_aroma_maxprice.py
|
# Generated by Django 2.1.7 on 2019-05-02 15:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aromas', '0002_aroma_botanicalname'),
]
operations = [
migrations.AddField(
model_name='aroma',
name='maxprice',
field=models.IntegerField(blank=True, default=1),
preserve_default=False,
),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,787
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/pages/urls.py
|
from django.urls import path
from . import views
urlpatterns=[
path('',views.index, name='index'),
path('about', views.about, name='about'),
path('Account', views.Account , name='Account'),
path('mnv', views.mnv , name='mnv'),
path('Quality', views.Quality , name='Quality'),
path('search', views.search , name='search'),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,788
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/attars/views.py
|
from django.shortcuts import get_object_or_404,render
from django.core.paginator import EmptyPage,PageNotAnInteger,Paginator
# Create your views here.
from .models import attar
def Attar(request):
attars=attar.objects.all()
paginator=Paginator(attars,6)
page=request.GET.get('page')
page_attars=paginator.get_page(page)
context={
'attars':page_attars
}
return render(request, 'pages/attar.html',context)
def attarss(request, attarss_id):
attarss=get_object_or_404(attar, pk=attarss_id)
context = {
'attarss':attarss
}
return render(request, 'pages/attarsingle.html',context)
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,789
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/featuredproduct/urls.py
|
from django.urls import path
from . import views
urlpatterns=[
path('<int:fps_id>',views.fpsss,name='fpss'),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,790
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/florals/admin.py
|
from django.contrib import admin
# Register your models here.
from .models import floral
admin.site.register(floral)
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,791
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/essentials/migrations/0006_auto_20190504_1247.py
|
# Generated by Django 2.1.7 on 2019-05-04 07:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('essentials', '0005_essential_variation'),
]
operations = [
migrations.RenameField(
model_name='essential',
old_name='variation',
new_name='variation1',
),
migrations.AddField(
model_name='essential',
name='variation2',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='essential',
name='variation3',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='essential',
name='variation4',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='essential',
name='variation5',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='essential',
name='variation6',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='essential',
name='variation7',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='essential',
name='variation8',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='essential',
name='variation9',
field=models.TextField(blank=True),
),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,792
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/featuredproduct/migrations/0004_auto_20190508_2059.py
|
# Generated by Django 2.1.7 on 2019-05-08 15:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('featuredproduct', '0003_fp_qtytype'),
]
operations = [
migrations.AddField(
model_name='fp',
name='variation1',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='fp',
name='variation2',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='fp',
name='variation3',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='fp',
name='variation4',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='fp',
name='variation5',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='fp',
name='variation6',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='fp',
name='variation7',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='fp',
name='variation8',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='fp',
name='variation9',
field=models.TextField(blank=True),
),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,793
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/florals/migrations/0003_floral_maxprice.py
|
# Generated by Django 2.1.7 on 2019-05-02 15:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('florals', '0002_floral_longdesc'),
]
operations = [
migrations.AddField(
model_name='floral',
name='maxprice',
field=models.IntegerField(blank=True, default=1),
preserve_default=False,
),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,794
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/florals/views.py
|
from django.shortcuts import get_object_or_404,render
from django.core.paginator import EmptyPage,PageNotAnInteger,Paginator
# Create your views here.
from .models import floral
def Floral(request):
florals=floral.objects.all()
paginator=Paginator(florals,6)
page=request.GET.get('page')
page_florals=paginator.get_page(page)
context={
'florals':page_florals
}
return render(request, 'pages/floural.html',context)
def floralss(request, floral_id):
floralss=get_object_or_404(floral, pk=floral_id)
context = {
'floralss':floralss
}
return render(request, 'pages/floralsingle.html',context)
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,795
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/essentials/views.py
|
from django.shortcuts import get_object_or_404, render
from django.core.paginator import EmptyPage,PageNotAnInteger,Paginator
# Create your views here.
from .models import essential
def Essential(request):
essentials=essential.objects.all()
paginator=Paginator(essentials,6)
page=request.GET.get('page')
page_essential=paginator.get_page(page)
context={
'essentials':page_essential
}
return render(request, 'pages/essential.html',context)
def ess(request, ess_id):
ess=get_object_or_404(essential, pk=ess_id)
context = {
'ess':ess
}
return render(request, 'pages/esssingle.html',context)
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,796
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/essentials/migrations/0004_auto_20190427_1143.py
|
# Generated by Django 2.1.7 on 2019-04-27 06:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('essentials', '0003_auto_20190427_1142'),
]
operations = [
migrations.AddField(
model_name='essential',
name='maxprice',
field=models.IntegerField(blank=True, default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='essential',
name='price',
field=models.IntegerField(),
),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,797
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/aromas/models.py
|
from django.db import models
class aroma(models.Model):
title=models.CharField(max_length=200)
shordesc=models.TextField(blank=True)
longdesc=models.TextField(blank=True)
origin=models.TextField(blank=True)
Botanicalname=models.TextField(blank=True)
price=models.IntegerField()
maxprice=models.IntegerField(blank=True)
photo_main=models .ImageField(upload_to='photos/%Y/%m/%d/')
photo_1=models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_2=models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
def __str__(self):
return self.title
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,798
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/florals/apps.py
|
from django.apps import AppConfig
class FloralsConfig(AppConfig):
name = 'florals'
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,799
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/aromas/urls.py
|
from django.urls import path
from . import views
urlpatterns=[
path('Aroma', views.Aroma , name='Aroma'),
path('<int:ass_id>',views.ass,name='ass'),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,800
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/essentials/admin.py
|
from django.contrib import admin
from .models import essential
admin.site.register(essential)
# Register your models here.
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,801
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/contact/views.py
|
from django.shortcuts import render, redirect
from django.contrib import messages
from django.http import HttpResponse
from django.core.mail import send_mail
from .models import Contact
import time
def contact(request):
if request.method == 'POST':
pname=request.POST['listing']
fname=request.POST['fname']
pageurl=request.POST['pageurl']
lname=request.POST['lname']
username=request.POST['username']
company=request.POST['company']
email=request.POST['email']
phone=request.POST['phone']
desc=request.POST['message']
try:
send_mail(
'Product Inquiry',
'Hi Rahul\n\n, Below mentioned details are the new Product Inquiries...\n\n'+
'Name:'+fname+' '+lname+'\n'+
'UserName:'+username+'\n'+
'Company:'+company+'\n'+
'Phone:'+phone+'\n'+
'Email:'+email+'\n'+
'Product Query:'+desc+'\n',
'singhvishal7000@gmail.com',
['singhvishal7000@gmail.com'],
fail_silently=False,
)
except Exception:
messages.error(request,'You cannot make request right now. Please check you internet Connection.')
return redirect(pageurl)
else:
contact=Contact(listing=pname, fname=fname, lname=lname, email=email,
phone=phone, company=company ,username=username, message=desc)
contact.save()
messages.success(request,'your request has been successfully submitted, vendor will get back to you soon in 24 hours')
return redirect('Account')
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,802
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/attars/migrations/0002_attar_maxprice.py
|
# Generated by Django 2.1.7 on 2019-05-02 15:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attars', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='attar',
name='maxprice',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,803
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/aromas/apps.py
|
from django.apps import AppConfig
class AromasConfig(AppConfig):
name = 'aromas'
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,804
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/pages/views.py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib import messages
from contact.models import Contact
from featuredproduct.models import fp
from attars.models import attar
from essentials.models import essential
from aromas.models import aroma
from florals.models import floral
def index(request):
fps=fp.objects.all()
context={
'fps':fps
}
return render(request, 'pages/index.html',context)
# Create your views here.
def about(request):
return render(request, 'pages/about.html')
def search(request):
if request.method == 'POST' :
searchname=request.POST['search']
essentials = essential.objects.filter(title__icontains=searchname)
aromas = aroma.objects.filter(title__icontains=searchname)
attars = attar.objects.filter(title__icontains=searchname)
florals = floral.objects.filter(title__icontains=searchname)
mycontext = {
'mysearch':searchname,
'essentials':essentials,
'aromas':aromas,
'attars':attars,
'florals':florals
}
return render(request, 'pages/search.html',mycontext)
def Account(request):
if request.session._session:
user_contacts=Contact.objects.order_by('-contact_date').filter(username=request.user.username)
context={
'contacts':user_contacts
}
return render(request,'pages/account.html',context)
else:
messages.error(request,'Please Login!')
return redirect('login')
def mnv(request):
return render(request, 'pages/mnv.html')
def Quality(request):
return render(request, 'pages/quality.html')
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,805
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/florals/migrations/0002_floral_longdesc.py
|
# Generated by Django 2.1.7 on 2019-04-25 11:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('florals', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='floral',
name='longdesc',
field=models.TextField(blank=True),
),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,806
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/essentials/migrations/0002_auto_20190425_1155.py
|
# Generated by Django 2.1.7 on 2019-04-25 06:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('essentials', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='essential',
name='botanicalname',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='essential',
name='origin',
field=models.TextField(blank=True),
),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,807
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/authentication/views.py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.models import models, User, auth
from django.contrib import messages
from django.core.mail import send_mail
from featuredproduct.models import fp
import random
def register(request):
if request.method == 'POST' :
first_name=request.POST['first_name']
last_name=request.POST['last_name']
username=request.POST['username']
email=request.POST['email']
password=request.POST['password']
cpassword=request.POST['password2']
if password == cpassword:
if User.objects.filter(username=username).exists():
messages.error(request,'username is already taken')
return redirect('register')
else:
if User.objects.filter(email=email).exists():
messages.error(request,'email is already taken')
return redirect('register')
else:
user= User.objects.create_user(first_name=first_name,last_name=last_name, username=username,email=email,
password=password)
user.save()
messages.success(request,'You are now registered')
return redirect('login')
else:
messages.error(request,'Password did not match')
return redirect('register')
else:
return render(request,'authentication/register.html')
def login(request):
if request.method == 'POST' :
username=request.POST['username']
password=request.POST['password']
user=auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request,user)
fps=fp.objects.all()
context={
'fps':fps
}
return render(request,'pages/index.html',context)
else:
messages.error(request,'Invalid Credentials!')
return redirect('login')
else:
return render(request,'authentication/login.html')
def forget(request):
if request.method == 'POST' :
username=request.POST['username']
try:
userdata=User.objects.get(username=username)
rand = User.objects.make_random_password()
send_mail(
'Password Recovery',
'Hi, \n\n Please find below your password..\n\n'+
'Password:'+rand+'\n',
'singhvishal7000@gmail.com',
[userdata.email],
fail_silently=False,
)
except Exception:
messages.error(request,'Please try again later or check your internet Connection!!')
return render(request,'authentication/forget.html')
else:
userdata.set_password(rand)
userdata.save()
messages.success(request,"Your temporary password has been sent to your registerd email.")
return render(request,'authentication/login.html')
else:
return render(request,'authentication/forget.html')
def logout(request):
if request.method == 'POST' :
auth.logout(request)
return redirect('index')
def changepass(request):
if request.method == 'POST' :
password=request.POST['newpassword']
cpassword=request.POST['confirmpassword']
if password == cpassword:
currentuser=request.user
currentuser.set_password(password)
currentuser.save()
messages.success(request,"password changed successfully")
return render(request,'pages/account.html')
else:
messages.error(request,"password does not matched")
return render(request,'authentication/changepassword.html')
return render(request,'authentication/changepassword.html')
# Create your views here.
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,808
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/aromas/views.py
|
from django.shortcuts import get_object_or_404, render
from django.core.paginator import EmptyPage,PageNotAnInteger,Paginator
# Create your views here.
from .models import aroma
def Aroma(request):
aromas=aroma.objects.all()
paginator=Paginator(aromas,6)
page=request.GET.get('page')
page_aroma=paginator.get_page(page)
context={
'aromas':page_aroma
}
return render(request, 'pages/aroma.html',context)
def ass(request, ass_id):
ass=get_object_or_404(aroma, pk=ass_id)
context = {
'ass':ass
}
return render(request, 'pages/asssingle.html',context)
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,809
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/essentials/migrations/0003_auto_20190427_1142.py
|
# Generated by Django 2.1.7 on 2019-04-27 06:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('essentials', '0002_auto_20190425_1155'),
]
operations = [
migrations.AlterField(
model_name='essential',
name='price',
field=models.IntegerField(blank=True),
),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,810
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/attars/apps.py
|
from django.apps import AppConfig
class AttarsConfig(AppConfig):
name = 'attars'
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,811
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/attars/migrations/0003_auto_20190502_2107.py
|
# Generated by Django 2.1.7 on 2019-05-02 15:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attars', '0002_attar_maxprice'),
]
operations = [
migrations.AlterField(
model_name='attar',
name='maxprice',
field=models.IntegerField(blank=True),
),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,812
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/featuredproduct/apps.py
|
from django.apps import AppConfig
class FeaturedproductConfig(AppConfig):
name = 'featuredproduct'
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,813
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/contact/models.py
|
from django.db import models
from datetime import datetime
# Create your models here.
class Contact(models.Model):
listing=models.CharField(max_length=200)
fname=models.CharField(max_length=200)
lname=models.CharField(max_length=200)
email=models.CharField(max_length=200)
phone=models.CharField(max_length=200)
company=models.CharField(max_length=200, blank=True)
message=models.TextField()
contact_date=models.DateTimeField(default=datetime.now, blank=True)
username=models.CharField(max_length=200)
vendor_comments=models.CharField(max_length=200,blank=True)
def __str__(self):
return self.username
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,814
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/contact/migrations/0003_auto_20190505_1718.py
|
# Generated by Django 2.1.7 on 2019-05-05 11:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contact', '0002_contact_vendor_comments'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='vendor_comments',
field=models.CharField(blank=True, max_length=200),
),
]
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,815
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/featuredproduct/views.py
|
from django.shortcuts import get_object_or_404, render
# Create your views here.
from .models import fp
def fpsss(request, fps_id):
fps=get_object_or_404(fp, pk=fps_id)
context = {
'fps':fps
}
return render(request, 'pages/fpssingle.html',context)
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,816
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/contact/admin.py
|
from django.contrib import admin
# Register your models here.
from .models import Contact
class ContactAdmin(admin.ModelAdmin):
list_display = ('id','username','listing','fname','lname','email','contact_date','message')
list_display_links = ('id','username','listing','fname','lname','email','contact_date','message')
list_per_page=25
admin.site.register(Contact,ContactAdmin)
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,817
|
bucky1134/bbdperfumers
|
refs/heads/master
|
/aromas/admin.py
|
from django.contrib import admin
# Register your models here.
from .models import aroma
admin.site.register(aroma)
|
{"/contact/views.py": ["/contact/models.py"], "/pages/views.py": ["/contact/models.py", "/aromas/models.py"], "/aromas/views.py": ["/aromas/models.py"], "/contact/admin.py": ["/contact/models.py"], "/aromas/admin.py": ["/aromas/models.py"]}
|
26,818
|
danielsalim/TUGAS-BESAR
|
refs/heads/main
|
/ubahjumlah.py
|
from ubahdata import split
from ubahdata import savenewdata
# Program ubahjumlah.py
# menambah atau mengurangi gadget/consumable yang terdaftar
# KAMUS
# variabel
# fungsi dan prosedur
def ubahjumlah():
# I.S. gadget/consumable belum ditambah/dikurangi
# F.S. gadget/consumable sudah ditambah/dikurangi
# KAMUS LOKAL
# gadget : seqfile of gadget.csv
# consumable : seqfile of consumable.csv
# ALGORITMA PROSEDUR
gadget = open("gadget.csv","r")
g = gadget.readlines()
gadget.close()
consumable = open("consumable.csv",'r')
c = consumable.readlines()
consumable.close()
data_gadget = split(g)
for data in data_gadget:
data[3] = int(data[3])
data_consumable = split(c)
for data in data_consumable:
data[3] = int(data[3])
id = input("Masukan ID : ")
jumlah = int(input("Masukkan jumlah : "))
index = -1
if id[0] == 'G':
for i in range(len(data_gadget)):
if data_gadget[i][0] == id:
index = i
elif id[0] == 'C':
for i in range(len(data_consumable)):
if data_consumable[i][0] == id:
index = i
if index == -1:
print("Tidak ada item dengan ID tersebut")
else:
if id[0] == 'G':
if data_gadget[index][3] + jumlah < 0:
print(f"{-1*jumlah} {data_gadget[index][1]} gagal dibuang karena stok kurang. Stok sekarang : {data_gadget[index][3]} (<{-1*jumlah})")
else:
data_gadget[index][3] += jumlah
if jumlah < 0:
print(f"{-1*jumlah} {data_gadget[index][1]} berhasil dibuang. Stok sekarang : {data_gadget[index][3]}")
elif jumlah > 0:
print(f"{jumlah} {data_gadget[index][1]} berhasil ditambahkan. Stok sekarang : {data_gadget[index][3]}")
else:
print(f"Tidak terjadi penambahan atau pengurangan {data_gadget[index][1]}. Stok sekarang : {data_gadget[index][3]}")
if id[0] == 'C':
if data_consumable[index][3] + jumlah < 0:
print(f"{-1*jumlah} {data_consumable[index][1]} gagal dibuang karena stok kurang. Stok sekarang : {data_consumable[index][3]} (<{-1*jumlah})")
else:
data_consumable[index][3] += jumlah
if jumlah < 0:
print(f"{-1*jumlah} {data_consumable[index][1]} berhasil dibuang. Stok sekarang : {data_consumable[index][3]}")
elif jumlah > 0:
print(f"{jumlah} {data_consumable[index][1]} berhasil ditambahkan. Stok sekarang : {data_consumable[index][3]}")
else:
print(f"Tidak terjadi penambahan atau pengurangan {data_consumable[index][1]}. Stok sekarang : {data_consumable[index][3]}")
g = open("gadget.csv",'w')
g.write("id;nama;deskripsi;jumlah;rarity;tahun_ditemukan\n")
savenewdata(data_gadget,g)
c = open("consumable.csv",'w')
c.write("id;nama;deskripsi;jumlah;rarity\n")
savenewdata(data_consumable,c)
|
{"/main.py": ["/register.py", "/login.py", "/cari_tahun.py", "/carirarity.py", "/ubahjumlah.py", "/hapusitem.py"], "/cari_tahun.py": ["/carirarity.py"]}
|
26,819
|
danielsalim/TUGAS-BESAR
|
refs/heads/main
|
/main.py
|
from register import register
from login import login
from cari_tahun import caritahun
from carirarity import carirarity
from ubahjumlah import ubahjumlah
from hapusitem import hapusitem
from help import help_admin
from help import help_user
login()
role = login.role
while True:
print("Ketik 'help' untuk melihat semua perintah")
command = input(">>> ")
if command == "register" and role == "admin":
register()
elif command == "caritahun":
caritahun()
elif command == "carirarity":
carirarity()
elif command == "ubahjumlah" and role == "admin":
ubahjumlah()
elif command == "hapusitem" and role == "admin":
hapusitem()
elif command == "help":
if role == "admin":
help_admin()
elif role == "user":
help_user()
elif command == "quit":
print("Terima kasih")
break
else:
print("Invalid Command")
|
{"/main.py": ["/register.py", "/login.py", "/cari_tahun.py", "/carirarity.py", "/ubahjumlah.py", "/hapusitem.py"], "/cari_tahun.py": ["/carirarity.py"]}
|
26,820
|
danielsalim/TUGAS-BESAR
|
refs/heads/main
|
/carirarity.py
|
from ubahdata import split
def carirarity():
f = open("gadget.csv", "r")
lines = f.readlines()
f.close()
req = input("Masukkan rarity: ")
data_gadget = split(lines)
state = True
print("\nHasil pencarian: ")
for i in range (len(data_gadget)):
if (data_gadget[i][4] == req):
state = False
outputGadget(data_gadget, i)
if state:
print("\nTidak ditemukan gadget dengan rarity", req)
def outputGadget(data, index):
print("\nNama :", data[index][1])
print("Deskripsi :", data[index][2])
print("Jumlah :", data[index][3])
print("Rarity :", data[index][4])
print("Tahun Ditemukan :", data[index][5])
|
{"/main.py": ["/register.py", "/login.py", "/cari_tahun.py", "/carirarity.py", "/ubahjumlah.py", "/hapusitem.py"], "/cari_tahun.py": ["/carirarity.py"]}
|
26,821
|
danielsalim/TUGAS-BESAR
|
refs/heads/main
|
/hapusitem.py
|
from ubahdata import split
from ubahdata import savenewdata
def hapusitem():
gadget = open("gadget.csv", "r")
g = gadget.readlines()
gadget.close()
consumable = open("consumable.csv", "r")
c = consumable.readlines()
consumable.close()
data_gadget = split(g)
data_consumable = split(c)
id = input("Masukkan ID item: ")
index = 0
exist = False
if (id[0] == 'G'):
for i in range(len(data_gadget)):
if (data_gadget[i][0] == id):
index = i
exist = True
if (exist == True):
checkhapus(data_gadget, index)
elif (id[0] == 'C'):
for i in range(len(data_consumable)):
if (data_consumable[i][0] == id):
index = i
exist = True
if (exist == True):
checkhapus(data_consumable, index)
else:
print("Tidak ada item dengan ID tersebut.")
gadget = open("gadget.csv", "w")
gadget.write("id;nama;deskripsi;jumlah;rarity;tahun_ditemukan\n")
savenewdata(data_gadget,gadget)
consumable = open("consumable.csv", "w")
consumable.write("id;nama;deskripsi;jumlah;rarity\n")
savenewdata(data_consumable,consumable)
def checkhapus(data, index):
ans = input("Apakah anda yakin ingin menghapus " + str(data[index][1]) + "(Y/N)? ")
if (ans == 'Y') or (ans == 'y'):
data.pop(index)
print("\nItem telah berhasil dihapus dari database.")
elif (ans == 'N') or (ans == 'n'):
print("\nItem gagal dihapus dari database")
else: #Jika diberi input selain Y dan N
print("Input invalid")
|
{"/main.py": ["/register.py", "/login.py", "/cari_tahun.py", "/carirarity.py", "/ubahjumlah.py", "/hapusitem.py"], "/cari_tahun.py": ["/carirarity.py"]}
|
26,822
|
danielsalim/TUGAS-BESAR
|
refs/heads/main
|
/register.py
|
# PROGRAM register.py
# program untuk mendaftar user
# KAMUS
# variabel
# fungsi/prosedur
def register():
# i.s. : akun dengan username tertentu belum terdaftar
# f.s. : akun sudah terdaftar
# KAMUS LOKAL
# user : array of string
# data_nama, data_username, data_password, data_alamat : array of string
# nama, username, password, alamat : string
# ALGORITMA PROSEDUR
data_user = open("user.csv","r")
user = data_user.readlines()
data_nama = []
data_username = []
data_password = []
data_alamat = []
# csv parser
for i in range(1,len(user)):
split_value = []
tmp = ''
for c in user[i]:
if c == ";":
split_value.append(tmp)
tmp = ''
else:
tmp += c
if tmp:
split_value.append(tmp)
data_nama.append(split_value[1])
data_username.append(split_value[2])
data_alamat.append(split_value[4])
data_password.append(split_value[3])
nama = input("Masukkan nama : ")
username = input("Masukkan username : ")
while not isUsernameValid(username, data_username):
print("Username sudah digunakan, silakan masukan username lain.")
username = input("Masukkan username : ")
password = input("Masukkan password : ")
alamat = input("Masukkan alamat : ")
f = open("user.csv","a+")
f.write(f"\n{len(user)};{nama.title()};{username};{password};{alamat};user")
f.close()
print(f"User {username} telah berhasil register ke dalam Kantong Ajaib.")
def isUsernameValid(username,data_username):
# menghasilkan true jika username belum pernah digunakan
# KAMUS LOKAL
isUsernameUsed = False
# ALGORITMA FUNGSI
for username_test in data_username:
if username == username_test:
isUsernameUsed = True
return not isUsernameUsed
|
{"/main.py": ["/register.py", "/login.py", "/cari_tahun.py", "/carirarity.py", "/ubahjumlah.py", "/hapusitem.py"], "/cari_tahun.py": ["/carirarity.py"]}
|
26,823
|
danielsalim/TUGAS-BESAR
|
refs/heads/main
|
/login.py
|
# PROGRAM login.py
# program untuk login ke Kantong Ajaib
# KAMUS
# variabel
# fungsi/prosedur
def login():
# i.s. : user/admin belum login
# f.s. : user/admin sudah login
# KAMUS LOKAL
# user : array of string
# data_username, data_password, data_role : array of string
# username, password : string
# ALGORITMA PROSEDUR
f = open("user.csv","r")
user = f.readlines()
f.close()
data_username = []
data_password = []
data_role = []
login.role = ""
# csv parser
for i in range(1,len(user)):
split_value = []
tmp = ''
for c in user[i]:
if c == ";":
split_value.append(tmp)
tmp = ''
else:
tmp += c
if tmp:
split_value.append(tmp)
data_username.append(split_value[2])
data_password.append(split_value[3])
data_role.append(split_value[-1].replace('\n',''))
username = input("Masukkan username : ")
while username_id(username,data_username) == 0:
print("Username tidak terdaftar")
username = input("Masukkan username : ")
password = input("Masukkan password : ")
if password != data_password[username_id(username, data_username) - 1]:
print("Password salah!")
login()
else:
login.role = data_role[username_id(username, data_username) - 1]
print(f"Halo {username}! Selamat datang di Kantong Ajaib.")
def username_id(username, data_username):
# menghasilkan id username jika sudah terdaftar, 0 jika belum terdaftar
# KAMUS LOKAL
# username_idx : int
# ALGORITMA FUNGSI
username_idx = -1
for i in range(len(data_username)):
if username == data_username[i]:
username_idx = i
return username_idx + 1
|
{"/main.py": ["/register.py", "/login.py", "/cari_tahun.py", "/carirarity.py", "/ubahjumlah.py", "/hapusitem.py"], "/cari_tahun.py": ["/carirarity.py"]}
|
26,824
|
danielsalim/TUGAS-BESAR
|
refs/heads/main
|
/cari_tahun.py
|
from carirarity import outputGadget
def caritahun():
f = open("gadget.csv", "r")
gadget = f.readlines()
f.close()
print()
tahun = int(input("Masukkan tahun: "))
kategori = input("Masukkan kategori: ")
print()
print("Hasil pencarian:")
print()
# Mengubah tanda petik dan enter pada list
old_lines = [raw_line.replace('"', "") for raw_line in gadget]
lines = [raw_line.replace("\n", "") for raw_line in old_lines]
# Mengkonversi baris pada list menjadi array
data = []
data_tahun = []
data_kategori = ['<', '>', '>=', '<=', '=']
for i in range(1, len(lines)):
new_file = []
kata = ''
for j in (lines[i]):
if (j == ";"):
new_file.append(kata)
kata = ''
else:
kata += j
if kata:
new_file.append(kata)
array = [data.strip() for data in new_file]
hasil = convertArray(array)
data.append(hasil)
data_tahun.append(hasil[5])
# Memvalidasi input
if (tahun > 999) & (kategori in data_kategori):
if (kategori == '='):
ind = data_tahun.index(tahun)
outputGadget(data,ind)
elif (kategori == '<'):
for item in data_tahun:
if item < tahun:
ind = data_tahun.index(item)
outputGadget(data,ind)
print()
elif (kategori == '>'):
for item in data_tahun:
if item > tahun:
ind = data_tahun.index(item)
outputGadget(data,ind)
print()
elif (kategori == '<='):
for item in data_tahun:
if item <= tahun:
ind = data_tahun.index(item)
outputGadget(data,ind)
print()
elif (kategori == '>='):
for item in data_tahun:
if item >= tahun:
ind = data_tahun.index(item)
outputGadget(data,ind)
print()
else:
print("Tidak ada gadget yang ditemukan")
# Fungsi untuk mengkonversi array menjadi value sebenarnya
def convertArray(array):
arr = array[:]
for i in range(6):
# Untuk kolom indeks ke-5 value sebenarnya adalah integer
if(i == 5):
arr[i] = int(arr[i])
return(arr)
|
{"/main.py": ["/register.py", "/login.py", "/cari_tahun.py", "/carirarity.py", "/ubahjumlah.py", "/hapusitem.py"], "/cari_tahun.py": ["/carirarity.py"]}
|
26,866
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/vis_util.py
|
import matplotlib.pyplot as plt
from train import tbh_train
from model.tbh import TBH
from util.eval_tools import eval_cls_map, gen_sim_mat, compute_hamming_dist
from util.distribution_tools import get_mean_logvar
import tensorflow as tf
from util.data.dataset import Dataset
import numpy as np
import os
import sys
from meta import REPO_PATH
#generate train codes
def generate_train(model, dataset, data_parser, set="cifar10"):
record_name = os.path.join(REPO_PATH, 'data', set, "train" + '.tfrecords')
data = tf.data.TFRecordDataset(record_name).map(data_parser, num_parallel_calls=50)
data = data.batch(50000)
trainiter = iter(data)
train = next(trainiter)
feat_in = tf.cast(train[1], dtype=tf.float32)
mean, logvar = get_mean_logvar(model.encoder, feat_in)
bbntrain = (tf.sign(mean) + 1.0) / 2.0
dataset.update(train[0].numpy(), bbntrain.numpy(), train[2].numpy(), 'train')
return dataset
#generate test codes + compute mAP
def generate_test(model, dataset, data_parser, set="cifar10"):
record_name = os.path.join(REPO_PATH, 'data', set, "test" + '.tfrecords')
scores = []
data = tf.data.TFRecordDataset(record_name).map(data_parser, num_parallel_calls=50).batch(10000)
testiter = iter(data)
test = next(testiter)
feat_in = tf.cast(test[1], dtype=tf.float32)
mean, logvar = get_mean_logvar(model.encoder, feat_in)
bbntest = (tf.sign(mean) + 1.0) / 2.0
dataset.update(test[0].numpy(), bbntest.numpy(), test[2].numpy(), 'test')
return dataset
def test_hook(loaded_model, dataset, data_parser):
record_name = os.path.join(REPO_PATH, 'data', dataset.set_name, "test" + '.tfrecords')
scores = []
for i in range(10):
data = tf.data.TFRecordDataset(record_name).map(data_parser, num_parallel_calls=50).shuffle(1000).batch(1000)
testiter = iter(data)
test = next(testiter)
feat_in = tf.cast(test[1], dtype=tf.float32)
mean, logvar = get_mean_logvar(loaded_model.encoder, feat_in)
bbntest = (tf.sign(mean) + 1.0) / 2.0
dataset.update(test[0].numpy(), bbntest.numpy(), test[2].numpy(), 'test')
test_hook = eval_cls_map(bbntest.numpy(), dataset.train_code, test[2].numpy(), dataset.train_label, at=1000)
scores.append(test_hook)
return scores
#calculate precision-recall curve values
def get_prec_rec_matrix(dataset, data_parser, model):
record_name = os.path.join(REPO_PATH, 'data', "cifar10", "test" + '.tfrecords')
data = tf.data.TFRecordDataset(record_name).map(data_parser, num_parallel_calls=50).shuffle(1000).batch(1000)
testiter = iter(data)
test = next(testiter)
feat_in = tf.cast(test[1], dtype=tf.float32)
mean, logvar = get_mean_logvar(model.encoder, feat_in)
bbntest = (tf.sign(mean) + 1.0) / 2.0
dataset.update(test[0].numpy(), bbntest.numpy(), test[2].numpy(), 'test')
query, target, cls1, cls2, at = bbntest.numpy(), dataset.train_code, test[2].numpy(), dataset.train_label, 50000
top_k = at
sim_mat = gen_sim_mat(cls1, cls2)
query_size = query.shape[0]
distances = compute_hamming_dist(query, target)
dist_argsort = np.argsort(distances)
prec_rec = [[0 for i in range(top_k)] for i in range(query_size)]
map_count = 0.
average_precision = 0.
average_recall = 0.
for i in range(query_size):
gt_count = 0.
precision = 0.
top_k = at if at is not None else dist_argsort.shape[1]
for j in range(top_k):
this_ind = dist_argsort[i, j]
if sim_mat[i, this_ind] == 1:
prec_rec[i][j] = 1
gt_count += 1.
precision += gt_count / (j + 1.)
average_recall += gt_count/5000
if gt_count > 0:
average_precision += precision / gt_count
map_count += 1.
average_recall /= (query_size)
prec_rec = np.array(prec_rec)
avg_prec = [0 for i in range(100)]
avg_rec = [0 for i in range(100)]
for t in range(1,101):
map_count = 0.
for i in range(prec_rec.shape[0]):
gt_count = np.sum(prec_rec[i][:int(prec_rec.shape[1]*t/100)])
prec = float(gt_count) / (prec_rec.shape[1]*t/100)
if gt_count>0:
map_count += 1
avg_prec[t-1] += prec
avg_rec[t-1] += gt_count/5000
avg_prec[t-1] /= prec_rec.shape[0]
avg_rec[t-1] /= prec_rec.shape[0]
return avg_prec, avg_rec
def top_10_retrieval(model,dataset, data_parser,orig_train,orig_test):
record_name = os.path.join(REPO_PATH, 'data', dataset.set_name, "test" + '.tfrecords')
data = tf.data.TFRecordDataset(record_name).map(data_parser, num_parallel_calls=50).batch(10).shuffle(1000)
testiter = iter(data)
test = next(testiter)
test = next(testiter)
feat_in = tf.cast(test[1], dtype=tf.float32)
mean, logvar = get_mean_logvar(model.encoder, feat_in)
bbntest = (tf.sign(mean) + 1.0) / 2.0
query, target, cls1, cls2 = bbntest.numpy(), dataset.train_code, test[2].numpy(), dataset.train_label
sim_mat = gen_sim_mat(cls1, cls2)
query_size = query.shape[0]
distances = compute_hamming_dist(query, target)
dist_argsort = np.argsort(distances)
retrievals = [0 for i in range(10)]
for i in range(10):
retrievals[i] = orig_train[0][dist_argsort[i][0:10]]
retrievals[i] = np.concatenate(retrievals[i],axis=1)
retrievals[i] = np.concatenate((orig_test[0][test[0][i]],np.zeros([32,5,3], dtype = int),retrievals[i]),axis=1)
retrievals = np.concatenate(retrievals,axis = 0)
for i in range(10):
for j in range(10):
if (orig_train[1][dist_argsort[i][0:10]][j] == orig_test[1][test[0][i]]):
retrievals[i*32:i*32+1,j*32+37:j*32+69,:3] = np.concatenate([np.zeros([1,32,1],dtype=int),np.ones([1,32,1],dtype=int)*130,np.zeros([1,32,1],dtype=int)],axis=2)
retrievals[i*32+31:i*32+32,j*32+37:j*32+69,:3] = np.concatenate([np.zeros([1,32,1],dtype=int),np.ones([1,32,1],dtype=int)*130,np.zeros([1,32,1],dtype=int)],axis=2)
retrievals[i*32:i*32+32,j*32+37:j*32+38,:3] = np.concatenate([np.zeros([32,1,1],dtype=int),np.ones([32,1,1],dtype=int)*130,np.zeros([32,1,1],dtype=int)],axis=2)
retrievals[i*32:i*32+32,j*32+68:j*32+69,:3] = np.concatenate([np.zeros([32,1,1],dtype=int),np.ones([32,1,1],dtype=int)*130,np.zeros([32,1,1],dtype=int)],axis=2)
else:
retrievals[i*32:i*32+1,j*32+37:j*32+69,:3] = np.concatenate([200*np.ones([1,32,1],dtype=int),np.ones([1,32,1],dtype=int),np.zeros([1,32,1],dtype=int)],axis=2)
retrievals[i*32+31:i*32+32,j*32+37:j*32+69,:3] = np.concatenate([200*np.ones([1,32,1],dtype=int),np.ones([1,32,1],dtype=int),np.zeros([1,32,1],dtype=int)],axis=2)
retrievals[i*32:i*32+32,j*32+37:j*32+38,:3] = np.concatenate([200*np.ones([32,1,1],dtype=int),np.ones([32,1,1],dtype=int),np.zeros([32,1,1],dtype=int)],axis=2)
retrievals[i*32:i*32+32,j*32+68:j*32+69,:3] = np.concatenate([200*np.ones([32,1,1],dtype=int),np.ones([32,1,1],dtype=int),np.zeros([32,1,1],dtype=int)],axis=2)
return retrievals
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,867
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/vis.py
|
import vis_util
import tensorflow as tf
from util.data.dataset import Dataset
from meta import REPO_PATH
from util.data.set_processor import SET_DIM, SET_LABEL, SET_SPLIT, SET_SIZE
import matplotlib.pyplot as plt
path = "\\result\\cifar10\\32bit\\model"
loaded_model = tf.keras.models.load_model(REPO_PATH + path)
set_name = "cifar10"
def data_parser(tf_example: tf.train.Example):
feat_dict = {'id': tf.io.FixedLenFeature([], tf.int64),
'feat': tf.io.FixedLenFeature([SET_DIM.get(set_name, 4096)], tf.float32),
'label': tf.io.FixedLenFeature([SET_LABEL.get(set_name, 10)], tf.float32)}
features = tf.io.parse_single_example(tf_example, features=feat_dict)
_id = tf.cast(features['id'], tf.int32)
_feat = tf.cast(features['feat'], tf.float32)
_label = tf.cast(features['label'], tf.int32)
return _id, _feat, _label
dataset = Dataset(set_name = set_name, batch_size=1024, code_length=32)
train_dataset = vis_util.generate_train(loaded_model, dataset, data_parser)
# test mAP for 1000 random queries (10 times)
print(vis_util.test_hook(loaded_model, train_dataset, data_parser))
# calculate precision-recall curves
avg_prec, avg_rec = vis_util.get_prec_rec_matrix(train_dataset, data_parser, loaded_model)
plt.plot(avg_rec, avg_prec)
plt.show()
# visualise top-10 retrievals
orig_train, orig_test = tf.keras.datasets.cifar10.load_data()
retrievals = vis_util.top_10_retrieval(loaded_model, train_dataset, data_parser, orig_train, orig_test)
fig, ax = plt.subplots(figsize=(18, 20))
ax.imshow(retrievals)
plt.show()
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,868
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/layer/encodec.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from layer.binary_activation import binary_activation, custom_activation
from util.distribution_tools import get_mean_logvar, split_node
class VaeEncoderGeco(tf.keras.layers.Layer):
def compute_output_signature(self, input_signature):
pass
def __init__(self, middle_dim, bbn_dim, cbn_dim):
"""
:param middle_dim: hidden units
:param bbn_dim: binary bottleneck size
:param cbn_dim: continuous bottleneck size
"""
super(VaeEncoderGeco, self).__init__()
self.code_length = bbn_dim
self.fc_1 = tf.keras.layers.Dense(middle_dim, activation='gelu',kernel_constraint=tf.keras.constraints.MaxNorm(max_value=1), bias_constraint=tf.keras.constraints.MaxNorm(max_value=1))
self.fc_2_1 = tf.keras.layers.Dense(bbn_dim*2,kernel_constraint=tf.keras.constraints.MaxNorm(max_value=1), bias_constraint=tf.keras.constraints.MaxNorm(max_value=1))
self.fc_2_2 = tf.keras.layers.Dense(cbn_dim*2,kernel_constraint=tf.keras.constraints.MaxNorm(max_value=1), bias_constraint=tf.keras.constraints.MaxNorm(max_value=1))
self.reconstruction1 = tf.keras.layers.Dense(2048, activation='gelu',kernel_constraint=tf.keras.constraints.MaxNorm(max_value=1), bias_constraint=tf.keras.constraints.MaxNorm(max_value=1))
self.reconstruction2 = tf.keras.layers.Dense(4096, activation='sigmoid',kernel_constraint=tf.keras.constraints.MaxNorm(max_value=1), bias_constraint=tf.keras.constraints.MaxNorm(max_value=1))
def call(self, inputs, training=True, **kwargs):
batch_size = tf.shape(inputs)[0]
fc_1 = self.fc_1(inputs)
mean, logvar = get_mean_logvar(self, inputs)
if training:
self.eps = tf.clip_by_value(tf.random.normal(shape=mean.shape),-5,5)
else:
self.eps = tf.zeros(shape=mean.shape)
bbn = custom_activation(mean,logvar,self.eps)
cbn = self.fc_2_2(fc_1)
mean2, logvar2 = split_node(cbn)
if training:
self.eps2 = tf.clip_by_value(tf.random.normal(shape=mean2.shape),-5,5)
else:
self.eps2 = tf.zeros(shape=mean2.shape)
cbn = mean2 + logvar2*self.eps2
return bbn, cbn
# noinspection PyAbstractClass
class Decoder(tf.keras.layers.Layer):
def __init__(self, middle_dim, feat_dim):
"""
:param middle_dim: hidden units
:param feat_dim: data dim
"""
super(Decoder, self).__init__()
self.fc_1 = tf.keras.layers.Dense(middle_dim, activation='gelu',kernel_constraint=tf.keras.constraints.MaxNorm(max_value=1), bias_constraint=tf.keras.constraints.MaxNorm(max_value=1))
self.fc_2 = tf.keras.layers.Dense(feat_dim, activation='gelu',kernel_constraint=tf.keras.constraints.MaxNorm(max_value=1), bias_constraint=tf.keras.constraints.MaxNorm(max_value=1))
def call(self, inputs, **kwargs):
fc_1 = self.fc_1(inputs)
return self.fc_2(fc_1)
if __name__ == '__main__':
a = tf.ones([2, 4096], dtype=tf.float32)
encoder = Encoder(1024, 64, 512)
b = encoder(a)
print(encoder.trainable_variables)
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,869
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/meta.py
|
import os
REPO_PATH = os.path.abspath(__file__)[:os.path.abspath(__file__).rfind(os.path.sep)]
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,870
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/util/data/make_data.py
|
import os
import tensorflow as tf
from util.data.set_processor import SET_PROCESSOR, SET_SPLIT
from meta import REPO_PATH
# noinspection PyUnusedLocal
def default_processor(root_folder):
raise NotImplementedError
def process_mat(set_name, root_folder):
processor = SET_PROCESSOR.get(set_name)
return processor(root_folder)
def _int64_feature(value):
"""Create a feature that is serialized as an int64."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def convert_tfrecord(data, set_name, part_name):
data_length = data['feat'].shape[0]
save_path = os.path.join(REPO_PATH, 'data', set_name)
print(REPO_PATH)
if not os.path.exists(save_path):
os.makedirs(save_path)
file_name = os.path.join(save_path, part_name + '.tfrecords')
writer = tf.io.TFRecordWriter(file_name)
for i in range(data_length):
print(i)
this_id = _int64_feature(data['fid'][i])
this_feat = _float_feature(data['feat'][i, :])
this_label = _float_feature(data['label'][i, :])
feat_dict = {'id': this_id,
'feat': this_feat,
'label': this_label}
feature = tf.train.Features(feature=feat_dict)
example = tf.train.Example(features=feature)
writer.write(example.SerializeToString())
writer.close()
def build_dataset(set_name, root_folder):
train_dict, test_dict = process_mat(set_name, root_folder)
convert_tfrecord(train_dict, set_name, SET_SPLIT[0])
convert_tfrecord(test_dict, set_name, SET_SPLIT[1])
if __name__ == '__main__':
build_dataset('cifar10', '/home/ymcidence/Workspace/CodeGeass/GraphBinary/data/')
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,871
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/model/tbh.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from layer import encodec, twin_bottleneck
from layer.binary_activation import custom_activation
from util.distribution_tools import get_mean_logvar
from util.data.set_processor import SET_DIM
# noinspection PyAbstractClass
class TBH(tf.keras.Model):
def __init__(self, set_name, bbn_dim, cbn_dim, middle_dim=1024, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_name = set_name
self.bbn_dim = bbn_dim
self.cbn_dim = cbn_dim
self.middle_dim = middle_dim
self.feat_dim = SET_DIM.get(set_name, 4096)
self.encoder = encodec.VaeEncoderGeco(middle_dim, bbn_dim, cbn_dim)
self.decoder = encodec.Decoder(middle_dim, self.feat_dim)
self.tbn = twin_bottleneck.TwinBottleneck(bbn_dim, cbn_dim)
self.dis_1 = tf.keras.layers.Dense(1, activation='sigmoid')
self.dis_2 = tf.keras.layers.Dense(1, activation='sigmoid')
def call(self, inputs, training=True, mask=None, continuous=True):
feat_in = tf.cast(inputs[0][1], dtype=tf.float32)
bbn, cbn = self.encoder(feat_in, training=training)
if training:
bn = self.tbn(bbn, cbn)
dis_1 = self.dis_1(bbn)
dis_2 = self.dis_2(bn)
feat_out = self.decoder(bn)
sample_bbn = inputs[1]
sample_bn = inputs[2]
dis_1_sample = self.dis_1(sample_bbn)
dis_2_sample = self.dis_2(sample_bn)
return bbn, feat_out, dis_1, dis_2, dis_1_sample, dis_2_sample
else:
return bbn
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,872
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/util/distribution_tools.py
|
import tensorflow as tf
import numpy as np
def log_normal_pdf(sample, mean, logvar, raxis=1):
log2pi = tf.math.log(2. * np.pi)
return -.5 * ((tf.exp(-logvar/2)*(sample - mean)) ** 2. + logvar + log2pi)
def get_mean_logvar(enc, x):
fc_1 = enc.fc_1(x)
bbn = enc.fc_2_1(fc_1)
mean, logvar = split_node(bbn)
return mean, logvar
def split_node(bbn):
mean, logvar = tf.split(bbn, num_or_size_splits=2, axis=1)
if(mean.shape[0] == None):
mean = tf.reshape(mean,[1,mean.shape[1]])
logvar = tf.reshape(logvar,[1,logvar.shape[1]])
return mean, logvar
def elbo_decomposition(mean, logvar, eps, actor_loss):
logpx = -actor_loss
qz_samples = eps * tf.exp(logvar * .5) + mean
nlogpz = tf.reduce_mean(log_normal_pdf(qz_samples, 0., 0.),axis=0)
nlogqz_condx = tf.reduce_mean(log_normal_pdf(qz_samples, mean, logvar),axis=0)
marginal_entropies, joint_entropy = estimate_entropies(qz_samples, mean, logvar)
# Independence term
# KL(q(z)||prod_j q(z_j)) = log q(z) - sum_j log q(z_j)
dependence = (- joint_entropy + tf.math.reduce_mean(marginal_entropies))[0]
# Information term
# KL(q(z|x)||q(z)) = log q(z|x) - log q(z)
information = (- tf.math.reduce_mean(nlogqz_condx) + joint_entropy)[0]
# Dimension-wise KL term
# sum_j KL(q(z_j)||p(z_j)) = sum_j (log q(z_j) - log p(z_j))
dimwise_kl = tf.math.reduce_mean(- marginal_entropies + nlogpz)
# Compute sum of terms analytically
# KL(q(z|x)||p(z)) = log q(z|x) - log p(z)
analytical_cond_kl = tf.reduce_mean(- nlogqz_condx + nlogpz)
return logpx, dependence, information, dimwise_kl, analytical_cond_kl, marginal_entropies, joint_entropy
def estimate_entropies(qz_samples, mean, logvar, n_samples=1024, weights=None):
"""Computes the term:
E_{p(x)} E_{q(z|x)} [-log q(z)]
and
E_{p(x)} E_{q(z_j|x)} [-log q(z_j)]
where q(z) = 1/N sum_n=1^N q(z|x_n).
Assumes samples are from q(z|x) for *all* x in the dataset.
Assumes that q(z|x) is factorial ie. q(z|x) = prod_j q(z_j|x).
Computes numerically stable NLL:
- log q(z) = log N - logsumexp_n=1^N log q(z|x_n)
Inputs:
-------
qz_samples (N, K) Variable
mean (N, K) Variable
logvar (N, K) Variable
"""
# S batch size, K hidden units
S, K = qz_samples.shape
weights = -tf.math.log(float(S))
marginal_entropies = tf.zeros(K)
joint_entropy = tf.zeros(1)
k = 0
while k < S:
batch_size = min(10, S - k)
logqz_i = log_normal_pdf(qz_samples[k:k + batch_size,:],mean[k:k + batch_size,:], logvar[k:k + batch_size,:])
k += batch_size
# computes - log q(z_i) summed over minibatch
marginal_entropies += - (weights + log_sum_exp(logqz_i , dim=0, keepdim=False))
# computes - log q(z) summed over minibatch
logqz = tf.math.reduce_mean(logqz_i, axis=1) # (N, S)
joint_entropy += (tf.math.log(float(S)) - log_sum_exp(logqz, dim=0, keepdim=False))
marginal_entropies /= S
joint_entropy /= S
return marginal_entropies, joint_entropy
def calc_mi(model, x):
"""Approximate the mutual information between x and z
I(x, z) = E_xE_{q(z|x)}log(q(z|x)) - E_xE_{q(z|x)}log(q(z))
Returns: Float"""
# [x_batch, nz]
mu, logvar = get_mean_logvar(model.encoder, x)
batch_size, nz = mu.shape
# E_{q(z|x)}log(q(z|x)) = -0.5*nz*log(2*\pi) - 0.5*(1+logvar).sum(-1)
neg_entropy = tf.math.reduce_mean(-0.5 * nz * tf.math.log(2 * np.pi)- 0.5 * tf.math.reduce_sum(1 + logvar,axis=-1))
# [z_batch, 1, nz]
std = tf.math.exp(tf.math.scalar_mul(0.5,logvar))
mu_expd = tf.expand_dims(mu,1)
std_expd = tf.expand_dims(std,1)
eps = tf.random.normal(std_expd.shape)
z_samples = mu_expd + tf.math.multiply(eps, std_expd)
# [1, x_batch, nz]
mu, logvar = tf.expand_dims(mu,0), tf.expand_dims(logvar,0)
var = tf.math.exp(logvar)
# (z_batch, x_batch, nz)
dev = z_samples - mu
# (z_batch, x_batch) tf.reduce_sum((dev ** 2)/var,axis=-1)
log_density = -0.5 * tf.math.multiply(tf.reduce_sum(1/var,axis=-1), tf.reduce_sum(dev ** 2,axis=-1)) - \
0.5 * (nz * tf.math.log(2 * np.pi) + tf.reduce_sum(logvar,axis=-1))
# log q(z): aggregate posterior
# [z_batch]
log_qz = log_sum_exp(log_density, dim=1) - tf.math.log(float(batch_size))
#print("qz",tf.math.reduce_any(tf.math.is_inf(log_qz)))
return (neg_entropy - tf.math.reduce_mean(log_qz,axis=-1)).numpy()
def log_sum_exp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
if dim is not None:
m = tf.math.reduce_max(value, axis=dim, keepdims=True)
value0 = value - m
if keepdim is False:
m = tf.squeeze(m,dim)
return m + tf.math.log(tf.math.reduce_sum(tf.math.exp(value0), axis=dim, keepdims=keepdim))
else:
m = tf.math.reduce_max(value)
sum_exp = tf.math.reduce_sum(tf.math.exp(value - m))
return m + tf.math.log(sum_exp)
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,873
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/util/data/set_processor.py
|
import scipy.io as sio
import numpy as np
import os
SET_SPLIT = ['train', 'test']
SET_DIM = {'cifar10': 4096, "NETosis": 20000, "multimodal":400,"coco":2048,"2018_02_27_P103_evHeLa_4M":1280,"2018_03_16_P103_shPerk_bQ":1280}
SET_LABEL = {'cifar10': 10, "NETosis": 2, "multimodal":1,"coco":80,"2018_02_27_P103_evHeLa_4M":1,"2018_03_16_P103_shPerk_bQ":1}
SET_SIZE = {'cifar10': [50000, 10000], "NETosis": [27881,5577],"2018_03_16_P103_shPerk_bQ": [92577,10399], "multimodal":[1910982,213676], "coco":[10000,5000],"2018_02_27_P103_evHeLa_4M":[24948,2772]}
def cifar_processor(root_folder):
class_num = 10
def reader(file_name, part=SET_SPLIT[0]):
data_mat = sio.loadmat(file_name)
feat = data_mat[part + '_data']
label = np.squeeze(data_mat[part + '_label'])
fid = np.arange(0, feat.shape[0])
label = np.eye(class_num)[label]
return {'feat': feat, 'label': label, 'fid': fid}
train_name = os.path.join(root_folder, 'cifar10_fc7_train.mat')
train_dict = reader(train_name)
test_name = os.path.join(root_folder, 'cifar10_fc7_test.mat')
test_dict = reader(test_name, part=SET_SPLIT[1])
return train_dict, test_dict
SET_PROCESSOR = {'cifar10': cifar_processor, "NETosis": cifar_processor}
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,874
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/train/tbh_train.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from model.tbh import TBH
from util.data.dataset import Dataset
from util.eval_tools import eval_cls_map
from util.distribution_tools import log_normal_pdf, calc_mi, get_mean_logvar, elbo_decomposition, split_node
from util.optimizers.cocob import COCOB
from layer.twin_bottleneck import build_adjacency_hamming
import matplotlib.pyplot as plt
import pickle
from meta import REPO_PATH
import os
from time import gmtime, strftime
import numpy as np
def hook(query, base, label_q, label_b, at=1000):
return eval_cls_map(query, base, label_q, label_b, at)
@tf.function
def adv_loss(real, fake):
real_loss = tf.reduce_mean(tf.keras.losses.binary_crossentropy(tf.ones_like(real), real))
fake_loss = tf.reduce_mean(tf.keras.losses.binary_crossentropy(tf.zeros_like(fake), fake))
total_loss = real_loss + fake_loss
return total_loss
@tf.function
def reconstruction_loss(pred, origin):
return tf.reduce_mean(tf.reduce_sum(tf.math.square(pred - origin),axis=1))
@tf.function
def divergent_loss(mean, logvar, eps):
logpx, dependence, information, dimwise_kl, analytical_cond_kl, marginal_entropies, joint_entropy = elbo_decomposition(mean, logvar, eps, 0.)
return -(information + dimwise_kl + 1.5*dependence)
def GECO(x, reconstruction_mu, latent_mu, latent_logsigma, z, tol):
log_p = tf.math.reduce_sum(tf.math.pow(reconstruction_mu - x, 2), axis = -1) - tol
log_q = tf.math.reduce_sum(-0.5 * ((z - latent_mu)/tf.math.exp(latent_logsigma))**2 - latent_logsigma, axis=-1)
log_prior = tf.math.reduce_sum(-0.5 * tf.math.pow(z,2), -1)
total = log_p/1000. + log_prior - log_q
total = total - tf.math.reduce_max(total)
weights = tf.math.exp(total)
normalized_weights = weights / tf.stop_gradient(tf.math.reduce_sum(weights))
out = -tf.math.reduce_mean(tf.math.reduce_sum(normalized_weights * total, 0))
return out
def train_step(model: TBH, batch_data, bbn_dim, cbn_dim, batch_size, actor_opt: tf.optimizers.Optimizer,
critic_opt: tf.optimizers.Optimizer, divergence_opt: tf.optimizers.Optimizer, lambd):
random_binary = (tf.sign(tf.random.uniform([batch_size, bbn_dim]) - 0.5) + 1) / 2
random_cont = tf.random.uniform([batch_size, cbn_dim])*0.+.5
with tf.GradientTape() as actor_tape, tf.GradientTape() as critic_tape, tf.GradientTape() as divergence_tape, tf.GradientTape() as divergence_tape2:
model_input = [batch_data, random_binary, random_cont]
model_output = model(model_input, training=True, continuous=True)
mean, logvar = get_mean_logvar(model.encoder,batch_data[1])
eps = model.encoder.eps
divergence_loss = divergent_loss(mean, logvar, eps)
critic_loss = adv_loss(model_output[5], model_output[3])
fc_1 = model.encoder.fc_1(batch_data[1])
cbn = model.encoder.fc_2_2(fc_1)
latent_mu, latent_logsigma = split_node(cbn)
z = latent_mu + latent_logsigma*model.encoder.eps2
fc_2 = model.encoder.reconstruction1(model_output[1])
reconstruction_mu = model.encoder.reconstruction2(fc_2)
tol = 2400
constraint = tf.reduce_mean(tf.reduce_sum(tf.math.pow(reconstruction_mu - batch_data[1], 2), axis = 1) - tol)
KL_div = GECO(batch_data[1], reconstruction_mu, latent_mu, latent_logsigma, z, tol)
product = constraint*lambd
loss = KL_div+ product
actor_scope = model.encoder.trainable_variables + model.decoder.trainable_variables + model.tbn.trainable_variables
divergence_scope = model.encoder.fc_2_1.trainable_variables+model.encoder.fc_1.trainable_variables
critic_scope = model.dis_2.trainable_variables
actor_gradient = actor_tape.gradient(loss, sources=actor_scope)
divergence_gradient = divergence_tape.gradient(divergence_loss, sources=divergence_scope)
critic_gradient = critic_tape.gradient(critic_loss, sources=critic_scope)
divergence_gradient = [(tf.clip_by_value(grad, -1.0, 1.0)) for grad in divergence_gradient]
divergence_opt.apply_gradients(zip(divergence_gradient, divergence_scope))
actor_opt.apply_gradients(zip(actor_gradient, actor_scope))
critic_opt.apply_gradients(zip(critic_gradient, critic_scope))
return model_output[0].numpy(), constraint + tol, critic_loss.numpy(), divergence_loss.numpy(), constraint
def test_step(model: TBH, batch_data):
model_input = [batch_data]
model_output = model(model_input, training=False)
return model_output.numpy()
def train(set_name, bbn_dim, cbn_dim, batch_size, middle_dim=1024, max_iter=1000000):
model = TBH(set_name, bbn_dim, cbn_dim, middle_dim)
data = Dataset(set_name=set_name, batch_size=batch_size, code_length=bbn_dim)
actor_opt = tf.keras.optimizers.Adam(1e-5)
critic_opt = tf.keras.optimizers.Adam(1e-5)
divergence_opt = tf.keras.optimizers.Adam(1e-8)
train_iter = iter(data.train_data)
test_iter = iter(data.test_data)
test_batch = next(test_iter)
time_string = strftime("%a%d%b%Y-%H%M%S", gmtime())
result_path = os.path.join(REPO_PATH, 'result', set_name)
save_path = os.path.join(result_path, 'model')
summary_path = os.path.join(result_path, 'log', time_string)
if not os.path.exists(result_path):
os.makedirs(result_path)
if not os.path.exists(save_path):
os.makedirs(save_path)
writer = tf.summary.create_file_writer(summary_path)
checkpoint = tf.train.Checkpoint(actor_opt=actor_opt, critic_opt=critic_opt, divergence_opt=divergence_opt, model=model)
save_name = os.path.join(save_path)
manager = tf.train.CheckpointManager(checkpoint, save_name, max_to_keep=1)
best_actor = 9999.
best_hook = 0
lambd = 1.
constrain_ma = 1.
alpha = .99
for i in range(max_iter):
with writer.as_default():
train_batch = next(train_iter)
train_code, actor_loss, critic_loss, divergence_loss, constraint = train_step(model, train_batch, bbn_dim, cbn_dim, batch_size, actor_opt, critic_opt, divergence_opt, lambd)
train_label = train_batch[2].numpy()
train_entry = train_batch[0].numpy()
data.update(train_entry, train_code, train_label, 'train')
if i == 0:
constrain_ma = constraint
else:
constrain_ma = alpha * constrain_ma + (1. - alpha) * constraint
if i % 100 == 0:
lambd *= tf.clip_by_value(tf.math.exp(constrain_ma), .9, 1.1)
lambd = tf.clip_by_value(lambd,1e-6,1e12)
if lambd != lambd:#check NaN values
lambd = 1e12
if (i + 1) % 100 == 0:
test_batch = next(test_iter)
train_hook = hook(train_code, train_code, train_label, train_label, at=min(batch_size, 1000))
tf.summary.scalar("train/lambd", lambd, step=i)
tf.summary.scalar("train/constrain", constrain_ma, step=i)
tf.summary.scalar('train/actor', actor_loss, step=i)
tf.summary.scalar('train/critic', critic_loss, step=i)
tf.summary.scalar('train/divergence', divergence_loss, step=i)
tf.summary.scalar('train/hook', train_hook, step=i)
writer.flush()
print('batch {}: train_hook {}, actor {}, critic {}, divergence {}, lambda {}'.format(i, train_hook, actor_loss, critic_loss, divergence_loss, lambd))
if (i + 1) % 2000 == 0:
print('Testing!!!!!!!!')
test_batch = next(test_iter)
test_code = test_step(model, test_batch)
test_label = test_batch[2].numpy()
test_entry = test_batch[0].numpy()
data.update(test_entry, test_code, test_label, 'test')
test_hook = hook(test_code, data.train_code, test_label, data.train_label, at=1000)
tf.summary.scalar('test/hook', test_hook, step=i)
if test_hook >= best_hook:
best_hook = test_hook
tf.keras.models.save_model(model, filepath = save_path)
print("test_hook: ", test_hook)
if __name__ == '__main__':
train('cifar10', 32, 512, 400)
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,875
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/util/data/dataset.py
|
import tensorflow as tf
import os
import numpy as np
from meta import REPO_PATH
from util.data.set_processor import SET_DIM, SET_LABEL, SET_SPLIT, SET_SIZE
class ParsedRecord(object):
def __init__(self, **kwargs):
self.set_name = kwargs.get('set_name', 'cifar10')
self.part_name = kwargs.get('part_name', 'train')
self.batch_size = kwargs.get('batch_size', 256)
rand = kwargs.get("random", True)
self.data = self._load_data(rand)
def _load_data(self, rand):
def data_parser(tf_example: tf.train.Example):
feat_dict = {'id': tf.io.FixedLenFeature([], tf.int64),
'feat': tf.io.FixedLenFeature([SET_DIM.get(self.set_name, 4096)], tf.float32),
'label': tf.io.FixedLenFeature([SET_LABEL.get(self.set_name, 10)], tf.float32)}
features = tf.io.parse_single_example(tf_example, features=feat_dict)
_id = tf.cast(features['id'], tf.int32)
_feat = tf.cast(features['feat'], tf.float32)
_label = tf.cast(features['label'], tf.int32)
return _id, _feat, _label
if self.set_name=="NETosis" and self.part_name=="train":
strings = []
for i in range(4):
record_name = os.path.join(REPO_PATH, 'data', self.set_name, self.part_name + str(i)+ '.tfrecords')
strings.append(record_name)
tf.constant(strings, dtype = tf.string)
else:
record_name = os.path.join(REPO_PATH, 'data', self.set_name, self.part_name + '.tfrecords')
data = tf.data.TFRecordDataset(record_name).map(data_parser, num_parallel_calls=50).prefetch(self.batch_size)
if rand:
data = data.cache().repeat().shuffle(10000).batch(self.batch_size)
else:
data = data.cache().repeat().batch(self.batch_size)
return data
@property
def output_contents(self):
return ['fid', 'feature', 'label']
class Dataset(object):
def __init__(self, **kwargs):
self.set_name = kwargs.get('set_name', 'cifar10')
self.batch_size = kwargs.get('batch_size', 256)
self.code_length = kwargs.get('code_length', 32)
self.rand = kwargs.get('random', True)
self._load_data(self.rand)
set_size = SET_SIZE.get(self.set_name)
self.train_code = np.zeros([set_size[0], self.code_length])
self.test_code = np.zeros([set_size[1], self.code_length])
self.train_label = np.zeros([set_size[0], SET_LABEL.get(self.set_name, 10)])
self.test_label = np.zeros([set_size[1], SET_LABEL.get(self.set_name, 10)])
def _load_data(self, rand):
# 1. training data
settings = {'set_name': self.set_name,
'batch_size': self.batch_size,
'part_name': SET_SPLIT[0],
"random": rand}
self.train_data = ParsedRecord(**settings).data
# 2. test data
settings['part_name'] = SET_SPLIT[1]
self.test_data = ParsedRecord(**settings).data
def update(self, entry, code, label, split):
if split == SET_SPLIT[0]:
self.train_code[entry, :] = code
self.train_label[entry, :] = label
elif split == SET_SPLIT[1]:
self.test_code[entry, :] = code
self.test_label[entry, :] = label
else:
raise NotImplementedError
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,876
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/run_tbh.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
from train import tbh_train
tbh_train.train('cifar10', 32, 512, 400)
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,877
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/layer/twin_bottleneck.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from layer import gcn
import matplotlib.pyplot as plt
@tf.function
def build_adjacency_hamming(tensor_in):
"""
Hamming-distance-based graph. It is self-connected.
:param tensor_in: [N D]
:return:
"""
code_length = tf.cast(tf.shape(tensor_in)[1], tf.float32)
m1 = tensor_in - 1
c1 = tf.matmul(tensor_in, m1, transpose_b=True)
c2 = tf.matmul(m1, tensor_in, transpose_b=True)
normalized_dist = tf.math.abs(c1 + c2) / code_length
return tf.pow(1 - normalized_dist, 1.4)
@tf.function
def hamming_split(t1):
code_length = tf.cast(tf.shape(t1)[1], tf.float32)
m1 = t1 - 1
c1 = tf.matmul(t1, m1, transpose_b=True)
c2 = tf.matmul(m1, t1, transpose_b=True)
t1 = 1 - tf.math.abs(c1 + c2) / code_length
return t1
@tf.function
def build_adjacency_hamming_adapt(tensor_in):
"""
Hamming-distance-based graph. It is self-connected.
:param tensor_in: [N D]
:return:
"""
cl = tf.shape(tensor_in)[1]
idxs = tf.range(cl)
ridxs = tf.random.shuffle(idxs)
rinput = tf.gather(tensor_in,ridxs,axis=1)
#rinput = hamming_split(rinput)
#t = tf.stack(tf.split(tensor_in,4,1))
#normalized_dist = tf.map_fn(fn=hamming_split,elems=t)
#y=(x), y = x**2, y=1- reshape(1-x)**2
#normalized_dist = normalized_dist/tf.math.reduce_max(normalized_dist)
#normalized_dist = 1-tf.pow(normalized_dist+.0001, .5)
normalized_dist = tf.pow(rinput, 1.4)
#means = tf.math.reduce_mean(normalized_dist,axis=0)
#var = tf.math.reduce_variance(normalized_dist,axis=0)
return rinput#tf.stack([means,var])
# noinspection PyAbstractClass
class TwinBottleneck(tf.keras.layers.Layer):
def __init__(self, bbn_dim, cbn_dim, **kwargs):
super().__init__(**kwargs)
self.bbn_dim = bbn_dim
self.cbn_dim = cbn_dim
self.gcn = gcn.GCNLayer(cbn_dim)
# noinspection PyMethodOverriding
def call(self, bbn, cbn):
adj = build_adjacency_hamming(bbn)
return tf.nn.sigmoid(self.gcn(cbn, adj))
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,878
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/layer/gcn.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
OVERFLOW_MARGIN = 1e-8
# noinspection PyAbstractClass
class GCNLayer(tf.keras.layers.Layer):
def __init__(self, out_dim, **kwargs):
super().__init__(**kwargs)
self.out_dim = out_dim
self.fc = tf.keras.layers.Dense(out_dim)
self.rs = tf.keras.layers.Flatten()#Reshape((-1,4*out_dim))
# noinspection PyMethodOverriding
def call(self, values, adjacency, **kwargs):
"""
:param values:
:param adjacency:
:param kwargs:
:return:
"""
return self.spectrum_conv(values, adjacency)
@tf.function
def spectrum_conv(self, values, adjacency):
"""
Convolution on a graph with graph Laplacian
:param values: [N D]
:param adjacency: [N N] must be self-connected
:return:
"""
fc_sc = self.fc(values)
conv_sc = self.graph_laplacian(adjacency) @ fc_sc
return conv_sc
@tf.function
def spectrum_conv_adapt(self, values, adjacency):
"""
Convolution on a graph with graph Laplacian
:param values: [N D]
:param adjacency: [N N] must be self-connected
:return:
"""
fc_sc = self.fc(values)
#conv_sc = tf.map_fn(fn=self.graph_laplacian,elems=adjacency) @ fc_sc
conv_sc = self.graph_laplacian(adjacency[0])
conv_sc = tf.stack([conv_sc,adjacency[1]]) @ fc_sc
return self.rs(tf.transpose(conv_sc, [1, 0, 2]))
@staticmethod
@tf.function
def graph_laplacian(adjacency):
"""
:param adjacency: must be self-connected
:return:
"""
graph_size = tf.shape(adjacency)[0]
d = adjacency @ tf.ones([graph_size, 1])
d_inv_sqrt = tf.pow(d + OVERFLOW_MARGIN, -0.5)
d_inv_sqrt = tf.eye(graph_size) * d_inv_sqrt
laplacian = d_inv_sqrt @ adjacency @ d_inv_sqrt
return laplacian
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,879
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/util/data/array_reader.py
|
import tensorflow as tf
class ArrayReader(object):
def __init__(self, set_name='1', batch_size=256, pre_process=False):
config = tf.compat.v1.ConfigProto(
device_count={'GPU': 0}
)
self.sess = tf.compat.v1.Session(config=config)
self.set_name = set_name
self.batch_size = batch_size
self.pre_process = pre_process
self.data = self._build_data()
def _build_data(self):
raise NotImplementedError()
def get_batch(self, part='training'):
assert hasattr(self.data, part + '_handle')
assert hasattr(self.data, 'train_test_handle')
assert hasattr(self.data, 'feed')
handle = getattr(self.data, part + '_handle')
batch_data = self.sess.run(self.data.feed, feed_dict={self.data.train_test_handle: handle})
return batch_data
def get_batch_tensor(self, part='training'):
pass
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,880
|
maximverwilst/deepimagehashing-VAE
|
refs/heads/main
|
/layer/binary_activation.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
def sigmoid_sign(logits, eps):
"""
{0,1} sign function with (1) sigmoid activation (2) perturbation of eps in sigmoid
:param logits: bottom layer output
:param eps: randomly sampled values between [0,1]
:return:
"""
prob = 1.0 / (1.0 + tf.exp(-logits))
code = (tf.sign(prob - eps) + 1.0) / 2.0
return code, prob
@tf.custom_gradient
def custom_activation(mean, logvar, eps):
"""
:param bbn:
:param eps:
:return:
"""
probmean = tf.exp(-mean)/(tf.exp(-mean)+1)**2
probvar = tf.exp(-logvar)/(tf.exp(-logvar)+1)**2
code = eps * logvar + mean
bbn = (tf.sign(code) + 1.0) / 2.0
def grad(_d_code):
"""
Gaussian derivative
:param _d_code: gradients through code
:param _d_prob: gradients through prob
:return:
"""
d_mean = probmean*_d_code
d_logvar = probvar*eps*_d_code
d_eps = _d_code
return d_mean, d_logvar, d_eps
return bbn, grad
@tf.custom_gradient
def binary_activation(logits, eps):
"""
:param logits:
:param eps:
:return:
"""
code, prob = sigmoid_sign(logits, eps)
def grad(_d_code, _d_prob):
"""
Distributional derivative with Bernoulli probs
:param _d_code: bp gradients through code
:param _d_prob: bp gradients through prob
:return:
"""
d_logits = prob * (1 - prob) * (_d_code + _d_prob)
d_eps = _d_code
return d_logits, d_eps
return [code, prob], grad
if __name__ == '__main__':
a = tf.constant([0.1, 0.2, -1, -0.7], dtype=tf.float32)
b = tf.random.uniform([4])
with tf.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
c, cc = binary_activation(a, b)
d = tf.reduce_sum(c)
e = tape.gradient(target=c, sources=a)
print(b)
print(c, e)
print(cc)
print(d)
|
{"/vis_util.py": ["/model/tbh.py", "/util/distribution_tools.py", "/util/data/dataset.py", "/meta.py"], "/vis.py": ["/vis_util.py", "/util/data/dataset.py", "/meta.py", "/util/data/set_processor.py"], "/layer/encodec.py": ["/layer/binary_activation.py", "/util/distribution_tools.py"], "/util/data/make_data.py": ["/util/data/set_processor.py", "/meta.py"], "/model/tbh.py": ["/layer/binary_activation.py", "/util/distribution_tools.py", "/util/data/set_processor.py"], "/train/tbh_train.py": ["/model/tbh.py", "/util/data/dataset.py", "/util/distribution_tools.py", "/layer/twin_bottleneck.py", "/meta.py"], "/util/data/dataset.py": ["/meta.py", "/util/data/set_processor.py"]}
|
26,881
|
RafalKornel/pictionar
|
refs/heads/main
|
/server/app/main/views.py
|
from flask.globals import current_app
from flask.helpers import send_from_directory
from flask_wtf.csrf import generate_csrf
from . import main
from flask import render_template, request, Response, json, jsonify
from flask_login import login_required, current_user, logout_user
from .. import db
from ..models import Group, User, Word, associations, Theme
from .utilities import validate_word, clean_input
from .forms import ThemeForm
import random, math
@main.route("/add", methods=["GET", "POST"])
@login_required
def add_word():
if request.method == "GET":
return { "csrf_token": generate_csrf() }
data = request.get_json()
words = clean_input(data["words"])
group_name = data["group"];
group = Group.query.filter_by(name=group_name).first()
if not current_user.is_authenticated or len(words) == 0:
return "User not authenticated", 405
if group is None:
return "Wrong group.", 400
if group.name not in current_user.group_names():
return "Not allowed", 405
added_words = []
for w in words:
if validate_word(w) and not Word.query.filter_by(group_id=group.id).filter_by(word=w).first():
added_words.append(w)
word = Word(word=w, user_id=current_user.get_id(), group_id=group.id)
db.session.add(word)
db.session.commit()
if len(added_words) == 0:
return "No words added.", 400
return {"added_words": added_words,
"count": len(added_words) }
@main.route("/words")
@login_required
def words_demo():
query = Word.query \
.join(associations, Word.group_id == associations.columns.group_id) \
.filter_by(user_id=current_user.id)
length = len(query.all())
words = [ query.offset( math.floor(random.random() * length)).first() for _ in range(27) ]
data = []
for w in words:
if w:
data.append( w.format() )
return jsonify(data)
@main.route("/bank")
@login_required
def retrieve_words():
request_args = request.args["groups"].split(",")
group_ids = []
for arg in request_args:
group = Group.query.filter_by(name = arg).first()
if group is None or group not in current_user.groups.all():
return "Wrong group or not allowed", 400
group_ids.append( group.id )
words = Word.query \
.join(associations, Word.group_id == associations.columns.group_id) \
.filter( associations.columns.group_id.in_(group_ids) ) \
.all()
result = ""
for w in words:
result += f"{w.word}, "
return jsonify(result)
@main.route("/add_theme", methods=["GET", "POST"])
@login_required
def add_theme():
if request.method == "GET":
form = ThemeForm()
return { "csrf_token": form.csrf_token.current_token }
data = request.get_json()
name = data["themeName"]
schema = [ "--gradient-light",
"--gradient-dark",
"--text-color",
"--form-color",
"--input-color" ]
for entry in schema:
if entry not in data:
return "Wrong theme format", 400
form = ThemeForm(
name = name,
gradient_light = data[schema[0]],
gradient_dark = data[schema[1]],
text_color = data[schema[2]],
main_color = data[schema[3]],
accent_color = data[schema[4]]
)
if form.validate():
theme = Theme(
name = name,
user_id = current_user.get_id(),
gradient_light = data[schema[0]],
gradient_dark = data[schema[1]],
text_color = data[schema[2]],
main_color = data[schema[3]],
accent_color = data[schema[4]]
)
db.session.add(theme)
db.session.commit()
return "Color added.", 200
return list(form.errors.values())[0][0], 400
# TODO: change method to delete perhaps?
@main.route("/remove_theme/<theme_name>")
@login_required
def remove_theme(theme_name):
theme = Theme.query \
.filter_by(user_id=current_user.get_id()) \
.filter_by(name=theme_name) \
.first()
if theme is None:
return "Theme not found", 400
db.session.delete(theme)
db.session.commit()
return "Removed theme", 200
|
{"/server/app/main/views.py": ["/server/app/__init__.py", "/server/app/models.py", "/server/app/main/utilities.py", "/server/app/main/forms.py"], "/server/app/models.py": ["/server/app/__init__.py"], "/server/kalambury.py": ["/server/app/__init__.py"], "/server/app/auth/views.py": ["/server/app/auth/forms.py", "/server/app/models.py", "/server/app/__init__.py"], "/server/app/__init__.py": ["/server/config.py"], "/server/app/main/forms.py": ["/server/app/models.py"], "/server/app/auth/forms.py": ["/server/app/models.py"]}
|
26,882
|
RafalKornel/pictionar
|
refs/heads/main
|
/migrations/versions/7c22990aa3e1_.py
|
"""empty message
Revision ID: 7c22990aa3e1
Revises: a60eaab47053
Create Date: 2021-01-05 23:55:18.741694
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7c22990aa3e1'
down_revision = 'a60eaab47053'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key(None, 'words', 'groups', ['group_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'words', type_='foreignkey')
# ### end Alembic commands ###
|
{"/server/app/main/views.py": ["/server/app/__init__.py", "/server/app/models.py", "/server/app/main/utilities.py", "/server/app/main/forms.py"], "/server/app/models.py": ["/server/app/__init__.py"], "/server/kalambury.py": ["/server/app/__init__.py"], "/server/app/auth/views.py": ["/server/app/auth/forms.py", "/server/app/models.py", "/server/app/__init__.py"], "/server/app/__init__.py": ["/server/config.py"], "/server/app/main/forms.py": ["/server/app/models.py"], "/server/app/auth/forms.py": ["/server/app/models.py"]}
|
26,883
|
RafalKornel/pictionar
|
refs/heads/main
|
/server/config.py
|
import os
from datetime import timedelta
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get("SECRET_KEY", "something very hard to guess kalambury i guess")
SQLALCHEMY_TRACK_MODIFICATIONS = False
PERMANENT_SESSION_LIFETIME = timedelta(days=31)
WTF_CSRF_TIME_LIMIT = None
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get(
"DEV_DATABASE_URL",
"sqlite:///" + os.path.join(basedir, "data-dev.sqlite")
)
class ProductionConfig(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get(
"DATABASE_URL",
"sqlite:///" + os.path.join(basedir, "data.sqlite")
)
config = {
"development": DevelopmentConfig,
"production": ProductionConfig,
"default": DevelopmentConfig,
}
|
{"/server/app/main/views.py": ["/server/app/__init__.py", "/server/app/models.py", "/server/app/main/utilities.py", "/server/app/main/forms.py"], "/server/app/models.py": ["/server/app/__init__.py"], "/server/kalambury.py": ["/server/app/__init__.py"], "/server/app/auth/views.py": ["/server/app/auth/forms.py", "/server/app/models.py", "/server/app/__init__.py"], "/server/app/__init__.py": ["/server/config.py"], "/server/app/main/forms.py": ["/server/app/models.py"], "/server/app/auth/forms.py": ["/server/app/models.py"]}
|
26,884
|
RafalKornel/pictionar
|
refs/heads/main
|
/server/app/models.py
|
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin
from . import db, login_manager
from werkzeug.security import check_password_hash, generate_password_hash
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
associations = db.Table("associations",
db.Column("user_id", db.Integer, db.ForeignKey("users.id")),
db.Column("group_id", db.Integer, db.ForeignKey("groups.id")),
)
class User(db.Model, UserMixin):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), unique=True, nullable=False)
_password_hash = db.Column(db.String(200), nullable=False)
group_id = db.Column(db.Integer, db.ForeignKey("groups.id"))
groups = db.relationship("Group",
secondary=associations,
backref=db.backref("users", lazy="dynamic"),
lazy="dynamic")
words = db.relationship("Word", backref="user")
themes = db.relationship("Theme", backref="user")
def groups_parsed(self):
return [
{
"name": group.name,
"key": group.key,
"count": len(group.words)
}
for group in self.groups.all() ]
def group_names(self):
return [ group.name for group in self.groups.all() ]
def themes_parsed(self):
return { t.name: t.colors() for t in self.themes }
@property
def password(self):
return AttributeError("Attribute not accessible.")
@password.setter
def password(self, password):
self._password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self._password_hash, password)
def __repr__(self):
return f"<User {self.name} | groups {self.groups.all()}>"
class Group(db.Model):
__tablename__ = "groups"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), unique=True)
key = db.Column(db.String(30), unique=True)
words = db.relationship("Word", backref="group")
#users = db.relationship("User", backref="group")
def __repr__(self):
return f"<Group {self.name}>"
class Word(db.Model):
__tablename__ = "words"
id = db.Column(db.Integer, primary_key=True)
word = db.Column(db.String(30), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
group_id = db.Column(db.Integer, db.ForeignKey("groups.id"))
def format(self):
return {"word": self.word,
"user": self.user.name }
def __repr__(self):
return f"<Word {self.word}>"
class Theme(db.Model):
__tablename__ = "themes"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
gradient_light = db.Column(db.String(7), nullable=False)
gradient_dark = db.Column(db.String(7), nullable=False)
text_color = db.Column(db.String(7), nullable=False)
main_color = db.Column(db.String(7), nullable=False)
accent_color = db.Column(db.String(7), nullable=False)
def colors(self):
return {
"--gradient-light": self.gradient_light,
"--gradient-dark": self.gradient_dark,
"--text-color": self.text_color,
"--form-color": self.main_color,
"--input-color": self.accent_color
}
|
{"/server/app/main/views.py": ["/server/app/__init__.py", "/server/app/models.py", "/server/app/main/utilities.py", "/server/app/main/forms.py"], "/server/app/models.py": ["/server/app/__init__.py"], "/server/kalambury.py": ["/server/app/__init__.py"], "/server/app/auth/views.py": ["/server/app/auth/forms.py", "/server/app/models.py", "/server/app/__init__.py"], "/server/app/__init__.py": ["/server/config.py"], "/server/app/main/forms.py": ["/server/app/models.py"], "/server/app/auth/forms.py": ["/server/app/models.py"]}
|
26,885
|
RafalKornel/pictionar
|
refs/heads/main
|
/server/kalambury.py
|
from .app import create_app, db, models
from flask_migrate import Migrate
from flask import request, session
import click
app = create_app("production")
@app.shell_context_processor
def make_shell_context():
return {
"db":db,
"User":models.User,
"Group":models.Group,
"Word":models.Word,
"associations":models.associations,
"Theme":models.Theme
}
@app.before_request
def before_request():
session.permament = True
@app.cli.command("create_tables")
def create_tables():
db.create_all()
@app.cli.command("drop_tables")
def drop_tables():
db.drop_all()
@app.cli.command("migrate_groups")
def migrate_groups():
users = models.User.query.all()
for user in users:
group_id = user.group_id
group = models.Group.query.get(group_id)
if group not in user.groups.all():
user.groups.append(group)
db.session.add(user)
db.session.commit()
@app.cli.command("migrate_words")
def migrate_words():
words = models.Word.query.all()
for word in words:
group_id = word.user.groups.first().id
if word.group_id is None:
word.group_id = group_id
db.session.add(word)
db.session.commit()
@app.cli.command("create_group")
@click.argument("group_name")
@click.argument("group_key")
def add_group(group_name, group_key):
g = models.Group(name=group_name, key=group_key)
db.session.add(g)
db.session.commit()
|
{"/server/app/main/views.py": ["/server/app/__init__.py", "/server/app/models.py", "/server/app/main/utilities.py", "/server/app/main/forms.py"], "/server/app/models.py": ["/server/app/__init__.py"], "/server/kalambury.py": ["/server/app/__init__.py"], "/server/app/auth/views.py": ["/server/app/auth/forms.py", "/server/app/models.py", "/server/app/__init__.py"], "/server/app/__init__.py": ["/server/config.py"], "/server/app/main/forms.py": ["/server/app/models.py"], "/server/app/auth/forms.py": ["/server/app/models.py"]}
|
26,886
|
RafalKornel/pictionar
|
refs/heads/main
|
/server/app/main/utilities.py
|
import re
def validate_word(word):
return len(word) >= 3 and len(word) < 30 and (re.fullmatch("[a-zA-Z0-9ąćęłńóśźżĄĘŁŃÓŚŹŻ ]+", word) is not None)
def clean_input(words):
words.split(",")
return list(set(map(lambda w : w.strip(), words.split(","))))
|
{"/server/app/main/views.py": ["/server/app/__init__.py", "/server/app/models.py", "/server/app/main/utilities.py", "/server/app/main/forms.py"], "/server/app/models.py": ["/server/app/__init__.py"], "/server/kalambury.py": ["/server/app/__init__.py"], "/server/app/auth/views.py": ["/server/app/auth/forms.py", "/server/app/models.py", "/server/app/__init__.py"], "/server/app/__init__.py": ["/server/config.py"], "/server/app/main/forms.py": ["/server/app/models.py"], "/server/app/auth/forms.py": ["/server/app/models.py"]}
|
26,887
|
RafalKornel/pictionar
|
refs/heads/main
|
/server/app/auth/views.py
|
from flask.helpers import make_response
from flask.templating import render_template
from flask_login.utils import login_required
from . import auth
from .forms import RegisterForm, LoginForm, CreateGroupForm, JoinGroupForm
from flask import redirect, url_for, request, Response
from flask_login import login_user, logout_user, current_user
from ..models import User, Group
from .. import db
@auth.route("/user")
def check_if_logged():
if current_user.is_authenticated:
return {
"name": current_user.name,
"groups": current_user.groups_parsed(),
"themes": current_user.themes_parsed()
}
return Response(status=401)
@auth.route("/create_group", methods=["GET", "POST"])
def create_group():
if request.method == "GET":
form = CreateGroupForm()
return { "csrf_token": form.csrf_token.current_token }
data = request.get_json()
form = CreateGroupForm(
group_name = data["group_name_create"],
group_key = data["group_key_create"])
if form.validate():
group = Group.query.filter( (Group.name == form.group_name.data) | (Group.key == form.group_key.data) ).first()
if group is not None:
return "Group already exists.", 400
group = Group(
name=form.group_name.data,
key=form.group_key.data)
db.session.add(group)
db.session.commit()
return {"name": group.name, "key": group.key}
return "Something went wrong", 400
@auth.route("/join_group", methods=["GET", "POST"])
@login_required
def join_group():
if request.method == "GET":
form = JoinGroupForm()
return { "csrf_token": form.csrf_token.current_token }
data = request.get_json()
form = JoinGroupForm(
group_key = data["group_key_join"])
if form.validate():
group = Group.query.filter_by(key=form.group_key.data).first()
if group is None:
return "Group doesn't exist", 400
if group in current_user.groups.all():
return "You are already in group.", 400
current_user.groups.append(group)
db.session.add(current_user)
db.session.commit()
return {"name": group.name}
return "Something went wrong.", 400
@login_required
@auth.route("/leave_group", methods=["GET", "POST"])
def leave_group():
if request.method == "GET":
form = JoinGroupForm()
return { "csrf_token": form.csrf_token.current_token }
data = request.get_json()
form = JoinGroupForm(
group_key = data["group_key_leave"])
if form.validate():
group = Group.query.filter_by(key=form.group_key.data).first()
if group is None:
return "Group doesn't exist", 400
user_groups = current_user.groups.all()
if group not in user_groups:
return "You are not in this group", 400
current_user.groups.remove(group)
db.session.add(current_user)
db.session.commit()
return "Succesfully left group.", 200
return "Something went wrong.", 400
@auth.route("/leave_group/<group_key>")
@login_required
def leave(group_key):
group = Group.query.filter_by(key=group_key).first()
if group is None:
return "Group doesn't exist", 400
user_groups = current_user.groups.all()
if group not in user_groups:
return "You are not in this group", 400
if len(user_groups) == 1:
return "You have to be in at least one group", 400
current_user.groups.remove(group)
db.session.commit()
return "Succesfully left group.", 200
@auth.route("/login", methods=["GET", "POST"])
def login():
if request.method == "GET":
form = LoginForm()
return { "csrf_token": form.csrf_token.current_token }
if current_user.is_authenticated:
return Response(status=200)
data = request.get_json()
form = LoginForm(
user_name=data["user_name"],
user_pass=data["user_pass"])
if form.validate():
user = User.query.filter_by(name=form.user_name.data).first()
if user and user.verify_password(form.user_pass.data):
login_user(user)
return {"groups": user.groups_parsed()}
return "Username or password is incorrect.", 400
@auth.route("/logout")
@login_required
def logout():
print("logging out")
logout_user()
return { "logged": "false" };
@auth.route("/register", methods=["GET", "POST"])
def register():
if request.method == "GET":
form = RegisterForm()
return { "csrf_token": form.csrf_token.current_token }
data = request.get_json()
form = RegisterForm(
user_name=data["user_name"],
user_pass=data["user_pass"],
user_pass_repeat=data["user_pass_repeat"],
secret_key=data["secret_key"]
)
if form.validate_on_submit():
group = Group.query.filter_by(key=form.secret_key.data).first()
user = User(
name=form.user_name.data,
password=form.user_pass.data,
groups = [group])
db.session.add(user)
db.session.commit()
return Response(status=200)
return list(form.errors.values())[0][0], 400
|
{"/server/app/main/views.py": ["/server/app/__init__.py", "/server/app/models.py", "/server/app/main/utilities.py", "/server/app/main/forms.py"], "/server/app/models.py": ["/server/app/__init__.py"], "/server/kalambury.py": ["/server/app/__init__.py"], "/server/app/auth/views.py": ["/server/app/auth/forms.py", "/server/app/models.py", "/server/app/__init__.py"], "/server/app/__init__.py": ["/server/config.py"], "/server/app/main/forms.py": ["/server/app/models.py"], "/server/app/auth/forms.py": ["/server/app/models.py"]}
|
26,888
|
RafalKornel/pictionar
|
refs/heads/main
|
/server/app/__init__.py
|
from flask import Flask
from flask.helpers import url_for
from flask_login import login_manager
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_wtf.csrf import CSRFProtect
from ..config import config
import os
db = SQLAlchemy()
csrf = CSRFProtect()
login_manager = LoginManager()
def create_app(config_name):
app = Flask(__name__, static_folder=os.path.abspath("build"), static_url_path="/")
app.config.from_object(config[config_name])
db.init_app(app)
csrf.init_app(app)
login_manager.init_app(app)
from .main import main as main_blueprint
from .auth import auth as auth_blueprint
app.register_blueprint(main_blueprint, url_prefix="/api")
app.register_blueprint(auth_blueprint, url_prefix="/api")
@app.route("/")
def index():
return app.send_static_file("index.html")
return app
|
{"/server/app/main/views.py": ["/server/app/__init__.py", "/server/app/models.py", "/server/app/main/utilities.py", "/server/app/main/forms.py"], "/server/app/models.py": ["/server/app/__init__.py"], "/server/kalambury.py": ["/server/app/__init__.py"], "/server/app/auth/views.py": ["/server/app/auth/forms.py", "/server/app/models.py", "/server/app/__init__.py"], "/server/app/__init__.py": ["/server/config.py"], "/server/app/main/forms.py": ["/server/app/models.py"], "/server/app/auth/forms.py": ["/server/app/models.py"]}
|
26,889
|
RafalKornel/pictionar
|
refs/heads/main
|
/migrations/versions/a3dd231edad5_.py
|
"""empty message
Revision ID: a3dd231edad5
Revises: fad5a5dc43bb
Create Date: 2020-11-13 01:13:18.889466
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a3dd231edad5'
down_revision = 'fad5a5dc43bb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('words', sa.Column('group_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'words', 'users', ['group_id'], ['group_id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'words', type_='foreignkey')
op.drop_column('words', 'group_id')
# ### end Alembic commands ###
|
{"/server/app/main/views.py": ["/server/app/__init__.py", "/server/app/models.py", "/server/app/main/utilities.py", "/server/app/main/forms.py"], "/server/app/models.py": ["/server/app/__init__.py"], "/server/kalambury.py": ["/server/app/__init__.py"], "/server/app/auth/views.py": ["/server/app/auth/forms.py", "/server/app/models.py", "/server/app/__init__.py"], "/server/app/__init__.py": ["/server/config.py"], "/server/app/main/forms.py": ["/server/app/models.py"], "/server/app/auth/forms.py": ["/server/app/models.py"]}
|
26,890
|
RafalKornel/pictionar
|
refs/heads/main
|
/server/app/main/forms.py
|
from flask_wtf import FlaskForm
from wtforms import StringField, ValidationError
from wtforms.validators import DataRequired, Length, Regexp
from ..models import Theme
name_regex = Regexp('^[A-Za-z][A-Za-z0-9 ]*$', 0,
"Field can only contain letters.")
hex_regex = Regexp('^#[a-fA-F0-9]{6}$', 0, "Invalid color code")
color_validators = [DataRequired(), Length(7), hex_regex]
class ThemeForm(FlaskForm):
name = StringField("Name", validators=[DataRequired(), Length(1, 30), name_regex])
gradient_light = StringField("gradient light", validators=color_validators)
gradient_dark = StringField("gradient dark", validators=color_validators)
text_color = StringField("text color", validators=color_validators)
main_color = StringField("main color", validators=color_validators)
accent_color = StringField("accent color", validators=color_validators)
def validate_name(self, field):
theme = Theme.query.filter_by(name=field.data).first()
if theme:
raise ValidationError("Theme already added")
|
{"/server/app/main/views.py": ["/server/app/__init__.py", "/server/app/models.py", "/server/app/main/utilities.py", "/server/app/main/forms.py"], "/server/app/models.py": ["/server/app/__init__.py"], "/server/kalambury.py": ["/server/app/__init__.py"], "/server/app/auth/views.py": ["/server/app/auth/forms.py", "/server/app/models.py", "/server/app/__init__.py"], "/server/app/__init__.py": ["/server/config.py"], "/server/app/main/forms.py": ["/server/app/models.py"], "/server/app/auth/forms.py": ["/server/app/models.py"]}
|
26,891
|
RafalKornel/pictionar
|
refs/heads/main
|
/migrations/versions/fad5a5dc43bb_.py
|
"""empty message
Revision ID: fad5a5dc43bb
Revises:
Create Date: 2020-11-13 00:41:40.233886
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'fad5a5dc43bb'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('groups',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=30), nullable=True),
sa.Column('key', sa.String(length=30), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('key'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=30), nullable=False),
sa.Column('_password_hash', sa.String(length=200), nullable=False),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('words',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('word', sa.String(length=30), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('word')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('words')
op.drop_table('users')
op.drop_table('groups')
# ### end Alembic commands ###
|
{"/server/app/main/views.py": ["/server/app/__init__.py", "/server/app/models.py", "/server/app/main/utilities.py", "/server/app/main/forms.py"], "/server/app/models.py": ["/server/app/__init__.py"], "/server/kalambury.py": ["/server/app/__init__.py"], "/server/app/auth/views.py": ["/server/app/auth/forms.py", "/server/app/models.py", "/server/app/__init__.py"], "/server/app/__init__.py": ["/server/config.py"], "/server/app/main/forms.py": ["/server/app/models.py"], "/server/app/auth/forms.py": ["/server/app/models.py"]}
|
26,892
|
RafalKornel/pictionar
|
refs/heads/main
|
/server/app/auth/forms.py
|
from flask.app import Flask
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, EqualTo, Length, Regexp, ValidationError
from ..models import User, Group
regex = Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
"Field must have only letters, numbers, dots or unserscores")
class LoginForm(FlaskForm):
user_name = StringField("Name", validators=[DataRequired(), Length(1, 30), regex])
user_pass = PasswordField("Password", validators=[DataRequired(), Length(1, 30), regex])
class CreateGroupForm(FlaskForm):
group_name = StringField("Name", validators=[DataRequired(), Length(1, 30), regex])
group_key = StringField("Key", validators=[DataRequired(), Length(1, 30), regex])
class JoinGroupForm(FlaskForm):
group_key = StringField("Key", validators=[DataRequired(), Length(1, 30), regex])
class RegisterForm(FlaskForm):
user_name = StringField("Name", validators=[DataRequired(), Length(1, 30), regex])
user_pass = PasswordField("Password", validators=[DataRequired(), Length(1, 30), regex])
user_pass_repeat = PasswordField("Repeat password", validators=[
DataRequired(), Length(1, 30), regex,
EqualTo("user_pass", message="Passwords must match.")])
secret_key = StringField("Secret key", validators=[DataRequired(), Length(1, 30), regex])
def validate_user_name(self, field):
user = User.query.filter_by(name=field.data).first()
if user:
raise ValidationError("User already registered.")
def validate_secret_key(self, field):
group = Group.query.filter_by(key=field.data).first()
if group is None:
raise ValidationError("Group doesn't exist.")
|
{"/server/app/main/views.py": ["/server/app/__init__.py", "/server/app/models.py", "/server/app/main/utilities.py", "/server/app/main/forms.py"], "/server/app/models.py": ["/server/app/__init__.py"], "/server/kalambury.py": ["/server/app/__init__.py"], "/server/app/auth/views.py": ["/server/app/auth/forms.py", "/server/app/models.py", "/server/app/__init__.py"], "/server/app/__init__.py": ["/server/config.py"], "/server/app/main/forms.py": ["/server/app/models.py"], "/server/app/auth/forms.py": ["/server/app/models.py"]}
|
26,918
|
benterris/chordstransposer
|
refs/heads/master
|
/chordstransposer/config.py
|
ALLTONES = "A#|B#|C#|D#|E#|F#|G#|Ab|Bb|Cb|Db|Eb|Fb|Gb|A|B|C|D|E|F|G"
SCALES = """A Bb B C C# D Eb E F F# G G#
Bb B C Db D Eb E F Gb G Ab A
B C C# D D# E F F# G G# A A#
C C# D Eb E F F# G Ab A Bb B
Db D Eb Fb F Gb G Ab A Bb B C
D Eb E F F# G Ab A Bb B C C#
Eb E F Gb G Ab A Bb Cb C Db D
E F F# G G# A A# B C C# D D#
F F# G Ab A Bb B C Db D Eb E
F# G G# A A# B C C# D D# E E#
G Ab A Bb B C C# D Eb E F F#
Ab A Bb Cb C Db D Eb Fb F Gb G"""
EQUIVALENT = {'A#': 'Bb', 'Bb': 'A#', 'B': 'Cb', 'Cb': 'B', 'B#': 'C', 'C': 'B#', 'C#': 'Db', 'Db': 'C#',
'D#': 'Eb', 'Eb': 'D#', 'E': 'Fb', 'Fb': 'E', 'E#': 'F', 'F': 'E#', 'F#': 'Gb', 'Gb': 'F#', 'G#': 'Ab', 'Ab': 'G#'}
CHORDSYMBOLS = 'ABCDEFGb#-majsu67913°+/ ()'
|
{"/chordstransposer/transposer.py": ["/chordstransposer/config.py"], "/chordstransposer/__init__.py": ["/chordstransposer/transposer.py"]}
|
26,919
|
benterris/chordstransposer
|
refs/heads/master
|
/chordstransposer/transposer.py
|
import re
import sys
from .config import ALLTONES, CHORDSYMBOLS, EQUIVALENT, SCALES
def transpose(text, from_tone, to_tone):
"""
Given a text with words and chords, keep the words and transpose
the chords
Args:
text (str): the lyrics and chords of the song to be transposed
from_tone (str): the tone in which is the song
to_tone (str): the tone to which we want to transpose the song
Returns:
str: The texte of the transposed song
"""
result = ""
original_scale = get_scale(from_tone)
dest_scale = get_scale(to_tone)
lines = text.split('\n')
for line in lines:
if is_chord_line(line):
result += transpose_line(line, original_scale, dest_scale) + "\n"
else:
result += line + "\n"
return result
def transpose_by(text, semitones):
"""
Given a text with chords and words, and a number of semitones, transpose the
song by this number of semitones
Args:
text (str): the lyrics and chords of the song to be transposed
semitones (int): the number of semitones to transpose by (can be negative)
Returns:
str: The texte of the transposed song
"""
a_scale = get_scale('A')
to_tone = a_scale[semitones % 12]
return transpose(text, 'A', to_tone)
def get_scale(tone):
"""
Find the scale associated to a tone, to make sure we write harmonically
coherent tones (eg. in A scale, we prefer to write C# over Db)
Args:
tone: the tone to get the scale from (eg. 'A' to get an A scale)
Returns:
The scale associated to the tone
"""
scales_list = SCALES.split('\n')
equivalent_tone = None
if tone in EQUIVALENT.keys():
equivalent_tone = EQUIVALENT[tone]
for scale in scales_list:
degrees = scale.split(' ')
if degrees[0] == tone \
or (equivalent_tone and degrees[0] == equivalent_tone):
return degrees
return 'Error : not a recognized tone'
def dest_tone(original_tone, original_scale, dest_scale):
"""
Find the equivalent of the original tone in the destination scale,
wrt the original scale
"""
if original_tone in original_scale:
degree = original_scale.index(original_tone)
elif EQUIVALENT[original_tone] in original_scale:
degree = original_scale.index(EQUIVALENT[original_tone])
else:
print('Error : tone ' + original_tone + ' or equivalent ' +
EQUIVALENT[original_tone] + ' not found in scale ' + original_scale)
return dest_scale[degree]
def transpose_line(line, original_scale, dest_scale):
"""
Given a chord line, transpose all of its chords from the original scale
to the destination scale
"""
present_tones = re.findall(ALLTONES, line)
sample_line = line
while present_tones:
tone_to_replace = present_tones.pop(0)
index = sample_line.index(tone_to_replace)
transposed_tone = dest_tone(
tone_to_replace, original_scale, dest_scale)
sample_line = sample_line[:index] + 'X' * \
len(transposed_tone) + sample_line[index + len(tone_to_replace):]
line = line[:index] + transposed_tone + \
line[index + len(tone_to_replace):]
return line
def is_chord_line(line):
"""
Return True if line is a chord line (and not lyrics or title etc.),
based on the proportion of chord-like characters in the line
"""
if line:
# Proportion of non-chords symbols allowed in the line:
tolerance = .1
count = 0
for c in line:
if c not in CHORDSYMBOLS:
count += 1
return count/len(line) < tolerance
return False
def read_from_input(file_path):
"""File reading helper"""
f = open(file_path, 'r')
r = f.read()
f.close()
return r
def write_to_output(text):
"""File writing helper"""
f = open(sys.argv[1] + ".transposed.txt", 'w')
print(text, file=f)
f.close()
if __name__ == '__main__':
if len(sys.argv) != 4:
raise TypeError(
'Wrong number of args: chordstransposer takes exactly 3 parameters')
song_text = read_from_input(sys.argv[1])
if not sys.argv[2] in ALLTONES:
print('Error : value ' + sys.argv[2] + ' is not a tone')
elif not sys.argv[3] in ALLTONES:
print('Error : value ' + sys.argv[3] + ' is not a tone')
else:
transposed_song = transpose(song_text, sys.argv[2], sys.argv[3])
write_to_output(transposed_song)
|
{"/chordstransposer/transposer.py": ["/chordstransposer/config.py"], "/chordstransposer/__init__.py": ["/chordstransposer/transposer.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.