index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
72,580 | christhemart/jpdriller | refs/heads/master | /website/jpdriller/models.py | from django.contrib.auth.models import User
from django.db import models
class Vocabulary(models.Model):
id = models.IntegerField('Identifier', primary_key=True)
group = models.CharField('Group', max_length=20)
vocabulary = models.CharField('Vocabulary', max_length=20)
pronunciation = models.CharField('Pronunciation', max_length=20, null=True)
translation = models.CharField('Translation', max_length=20)
note = models.CharField('Note', max_length=20, null=True)
def __str__(self):
return self.vocabulary
class UserVocabStats(models.Model):
id = models.IntegerField('Identifier', primary_key=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
vocabulary = models.ForeignKey(Vocabulary, on_delete=models.CASCADE)
count = models.IntegerField('Count')
streak = models.IntegerField('Streak')
def __str__(self):
return str(self.vocabulary)
class UserSettings(models.Model):
user = models.OneToOneField(User, primary_key=True, on_delete=models.CASCADE)
weight = models.IntegerField('Against')
cutoff = models.IntegerField('Cutoff')
selection = models.CharField('Selection', max_length=1000, null=True)
last_vocab = models.ForeignKey(Vocabulary, on_delete=models.CASCADE, null=True)
def __str__(self):
return str(self.user)
| {"/website/jpdriller/views.py": ["/website/jpdriller/models.py"]} |
72,581 | christhemart/jpdriller | refs/heads/master | /website/jpdriller/migrations/0004_auto_20180504_1053.py | # Generated by Django 2.0.4 on 2018-05-04 15:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jpdriller', '0003_vocabulary_note'),
]
operations = [
migrations.AlterField(
model_name='vocabulary',
name='pronounciation',
field=models.CharField(max_length=20, null=True, verbose_name='Pronounciation'),
),
]
| {"/website/jpdriller/views.py": ["/website/jpdriller/models.py"]} |
72,582 | christhemart/jpdriller | refs/heads/master | /website/jpdriller/urls.py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name=''),
path('check_username', views.check_username, name='check_username'),
path('get_vocabulary', views.get_vocabulary, name='get_vocabulary'),
path('login', views.login_view, name='login'),
path('logout', views.logout_view, name='logout'),
path('naughty', views.naughty, name='naughty'),
path('register', views.register_view, name='register'),
path('stat_update', views.stat_update, name='stat_update'),
path('save_settings', views.save_settings, name='save_settings'),
path('save_groups', views.save_groups, name='save_groups'),
] | {"/website/jpdriller/views.py": ["/website/jpdriller/models.py"]} |
72,583 | christhemart/jpdriller | refs/heads/master | /website/website/urls.py | from django.contrib.auth.decorators import login_required
from django.contrib import admin
from django.urls import include, path
#admin.autodiscover()
#admin.site.login = login_required(admin.site.login)
urlpatterns = [
path('jpdriller/', include('jpdriller.urls')),
path('admin/', admin.site.urls),
] | {"/website/jpdriller/views.py": ["/website/jpdriller/models.py"]} |
72,584 | christhemart/jpdriller | refs/heads/master | /website/jpdriller/migrations/0003_vocabulary_note.py | # Generated by Django 2.0.4 on 2018-05-04 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jpdriller', '0002_katakana_vocabulary'),
]
operations = [
migrations.AddField(
model_name='vocabulary',
name='note',
field=models.CharField(max_length=20, null=True, verbose_name='Note'),
),
]
| {"/website/jpdriller/views.py": ["/website/jpdriller/models.py"]} |
72,585 | christhemart/jpdriller | refs/heads/master | /website/jpdriller/migrations/0001_initial.py | # Generated by Django 2.0.4 on 2018-05-03 15:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hiragana',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, verbose_name='Identifier')),
('hiragana', models.CharField(max_length=6, verbose_name='Hiragana')),
('romanji', models.CharField(max_length=6, verbose_name='Romanji')),
],
),
]
| {"/website/jpdriller/views.py": ["/website/jpdriller/models.py"]} |
72,586 | christhemart/jpdriller | refs/heads/master | /website/jpdriller/apps.py | from django.apps import AppConfig
class JpdrillerConfig(AppConfig):
name = 'jpdriller'
| {"/website/jpdriller/views.py": ["/website/jpdriller/models.py"]} |
72,594 | dhalpern13/csc238 | refs/heads/master | /main.py | from collections import Counter
from datetime import datetime
from math import log, sqrt
import pandas as pd
from scipy.stats import uniform, beta
from funcs import smallest_odd_larger, run_experiment
batch_size = 10000
iterations = 100
total_runs = batch_size * iterations
output_file = f'data/experiment-{total_runs}-runs-{datetime.now().strftime("%Y%m%d%H%M%S")}.csv'
n_vals = [3, 10, 50, 200, 500]
k_funcs = {
'const1': lambda n: 1,
'const3': lambda n: 3,
'logn': lambda n: smallest_odd_larger(log(n)),
'n^.4': lambda n: smallest_odd_larger(n ** .4),
'sqrt': lambda n: smallest_odd_larger(sqrt(3 / 4 * n)),
'n/2': lambda n: smallest_odd_larger(n / 2),
}
dists = {
'uniform[0,1]': uniform(0, 1),
'uniform[.1,.9]': uniform(.1, .8), # scipy has weird syntax, first param is lower end and second is length
'beta[2,2]': beta(2, 2),
}
if __name__ == '__main__':
rows = []
for n in n_vals:
# Compute actual k values for this n.
k_vals = {k_name: k_func(n) for k_name, k_func in k_funcs.items()}
for dist_name, dist in dists.items():
# Keep track of total number of correct for each k value.
counts = Counter()
for i in range(iterations):
if i % 10 == 0:
print(f'{dist_name}, n={n}: Iteration {i}')
counts += run_experiment(n, k_vals, dist, batch_size)
row = {'dist': dist_name, 'n': n, **counts}
rows.append(row)
df = pd.DataFrame(rows, columns=['dist', 'n', *k_funcs.keys()])
df.to_csv(output_file, index=False)
| {"/main.py": ["/funcs.py"]} |
72,595 | dhalpern13/csc238 | refs/heads/master | /funcs.py | from math import ceil
import numpy as np
from scipy.stats import bernoulli, rv_continuous
def smallest_odd_larger(m: float) -> int:
"""Given a float m, return the smallest odd number larger than m."""
c = ceil(m)
return c + (not c % 2)
def best_k_accuracies(expert_opinions: np.ndarray, k: int) -> int:
"""
Given an n x batch_size array of expert 0/1 judgements such that in each column,
experts are sorted by ascending competence, compute the number of rows where a
majority of the top k experts are correct.
"""
# Restrict to k most accurate experts (numpy only natively sorts ascending, so we take the last k).
top_k = expert_opinions[-k:, ::]
# Compute how many correct experts in each instance.
num_correct = top_k.sum(axis=0)
# Whole instance is correct if at least k/2 out of top k correct.
majority_correct = num_correct > k / 2
# Add up total number of correct.
return int(np.sum(majority_correct))
def run_experiment(n: int, k_vals: dict[str, int], dist: rv_continuous, batch_size: int) -> dict[str, int]:
"""
Run an experiment with the given number of experts, k_vals, distribution, and batch size.
:param n: Number of experts.
:param k_vals: Dictionary with keys that are the names of the k values (i.e., "sqrt", "logn") and
values that are the actual k values.
:param dist: Distribution to draw the expert competencies.
:param batch_size: Number of instances to run at once (makes computation faster as one big array).
:return: Dictionary with keys as k_value names, and values that are the number of instances
(out of batch_size total),that the top k experts got the correct answer.
"""
# Sample competencies
competencies = dist.rvs((n, batch_size))
# Sort by expert competencies
sorted_comps = np.sort(competencies, axis=0)
# Sample expert opinions from their competencies
expert_opinions = bernoulli(sorted_comps).rvs()
# Calculate number correct for each k.
return {k_name: best_k_accuracies(expert_opinions, k_val) for k_name, k_val in k_vals.items()}
| {"/main.py": ["/funcs.py"]} |
72,617 | cmyui/drinkwithfriends | refs/heads/master | /server/objects/glob.py | from typing import Any, List, Optional
from mysql.connector.pooling import MySQLConnectionPool
from time import time
from common.objects.user import User
start_time: float = time()
users: List[User] = [
#User(51, 'senor', 1, 100),
#User(432, 'ds', 0, 100),
#User(12, 'hth', 8, 100),
#User(5, 'yes', 1, 100),
#User(32, 'yeahahe', 1, 100)
]
db: Optional[MySQLConnectionPool] = None
config: Optional[Any] = None
debug: bool = False
max_bytes: int = 512
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,618 | cmyui/drinkwithfriends | refs/heads/master | /client/objects/glob.py | from time import time
max_bytes: int = 512
ip: str = ''#'51.79.17.191'
port: int = 6999
start_time: float = time()
debug: bool = False
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,619 | cmyui/drinkwithfriends | refs/heads/master | /common/objects/inventory.py | from typing import Optional, List
from random import randint
from common.objects.bottle import Bottle
from colorama import init as clr_init, Fore as colour
clr_init(autoreset=True)
class BottleCollection:
def __init__(self, name: str = None, bottles: Optional[List[Bottle]] = []) -> None:
self.name = name
self.bottles = bottles
self.unit: str = 'bottles' # SHIT design
#self.finished_bottles: Optional[List[Bottle]] = None # maybe later
@property
def is_empty(self) -> bool: # cursed
return not len(self.bottles)
#def __repr__(self) -> None:
# if not len(self.bottles): return 'Your inventory is empty!\n'
# return '\n'.join(f'#{i + 1}. [{b.volume}ml @ {b.abv}%] {b.name} ' for i, b in enumerate(self.bottles))
def display(self) -> None:
print(
f'{colour.GREEN}{self.name}',
'=========', sep='\n'
)
if not self.bottles:
print(f'You have no {self.unit} to display!')
return
print('\n'.join(f'#{i + 1} - [{b.volume}ml @ {b.abv}%] {b.name}' for i, b in enumerate(self.bottles)))
""" Add a bottle to inventory. """
def __iadd__(self, other: Bottle) -> List[Bottle]:
self.bottles.append(other)
return self.bottles
def __add__(self, other: Bottle) -> List[Bottle]:
self.bottles.append(other)
return self.bottles
""" Remove a bottle from inventory. """
def __isub__(self, other: Bottle) -> List[Bottle]:
self.bottles.remove(other)
return self.bottles
def __sub__(self, other: Bottle) -> List[Bottle]:
self.bottles.remove(other)
return self.bottles
def get_bottle(self, i: int) -> Bottle:
return self.bottles[i - 1] # Choice will be from list, so +1 (__repr__)
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,620 | cmyui/drinkwithfriends | refs/heads/master | /common/objects/shot.py | #from typing import Optional
#
#class Shot:
# def __init__(self, name: Optional[str] = None, volume: int = 0, abv: float = 0.0) -> None:
# self.name = name
# self.volume = volume
# self.abv = abv
#
# @property
# def cmyunits(self) -> float:
# """
# 'cmyunits' are simply a measurement of volume * abv.
# This effectively allows a user to compare how much alcohol they have drank
# vs another user.
#
# For exmaple:
# 400ml of 5% beer = 2000 cmyunits (400 / 5 = 2000)
# 2000 cmyunits in 40% vodka = 50ml (2000 / 40 = 50)
#
# So we can say 400ml beer ~= 50ml of vodka (a bit above a standard shot).
# This obviously doesn't account for advanced things such as how much the
# watered down beer would sober you up, but we're not trying to be THAT precise.
# """
# if not any(self.volume, self.abv): return 0.0
# return self.volume * self.abv
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,621 | cmyui/drinkwithfriends | refs/heads/master | /common/db/dbConnector.py | from typing import Tuple, Dict, Optional, Union
from mysql.connector import errors
from mysql.connector import pooling
class SQLPool:
def __init__(self, pool_size: int, config: Dict[str, str]) -> None:
self.pool = pooling.MySQLConnectionPool(
pool_name = 'Aika',
pool_size = pool_size,
pool_reset_session = True,
autocommit = True,
**config
)
return
def execute(self, query: str, params: Tuple[Union[str, int, float]]=()) -> Optional[int]:
cnx = self.pool.get_connection()
if not cnx:
print('MySQL Error: Failed to retrieve a worker!')
return None
cursor = cnx.cursor()
cursor.execute(query, params)
# Discard result.
cursor.fetchmany()
res = cursor.lastrowid
if cursor: cursor.close()
if cnx: cnx.close()
return res
def fetch(self, query: str, params: Tuple[Union[str, int, float]] = (), _all: bool = False) -> Optional[Dict[str, Union[str, int, float]]]:
cnx = self.pool.get_connection()
if not cnx:
print('MySQL Error: Failed to retrieve a worker!')
return None
cursor = cnx.cursor(dictionary=True)
cursor.execute(query, params)
res = cursor.fetchall() if _all else cursor.fetchone()
if cursor: cursor.close()
if cnx: cnx.close()
return res
def fetchall(self, query: str, params: Tuple[Union[str, int, float]] = ()) -> Optional[Dict[str, Union[str, int, float]]]:
return self.fetch(query, params, True)
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,622 | cmyui/drinkwithfriends | refs/heads/master | /common/constants/privileges.py | USER_PERMS: int = 1
ADMIN_PERMS: int = 2# << 0
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,623 | cmyui/drinkwithfriends | refs/heads/master | /common/constants/dataTypes.py | # Byte
#PAD_BYTE = 0
BYTE = 1
# Integer
INT16 = 2
UINT16 = 3
#INT = 4
#UINT = 5
INT32 = 6
UINT32 = 7
INT64 = 8
UINT64 = 9
# Floating point
FLOAT = 10
DOUBLE = 11
# String
STRING = 12 # ULEB128
# List of integers
INT16_LIST = 13
INT32_LIST = 14
# Custom types, for our classes.
USERINFO = 15
USERINFO_LIST = 16
DRINK = 17 # Used for both shots and bottles
DRINK_LIST = 18 #
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,624 | cmyui/drinkwithfriends | refs/heads/master | /client/client.py | from typing import List, Tuple, Optional, Union
from socket import socket, AF_INET, SOCK_STREAM, error as sock_err
from sys import exit, stdout
from time import sleep, time
from re import match as re_match
from numpy import arange # float range() function
#from bcrypt import hashpw, gensalt
from random import randint
from getpass import getpass
from objects import glob
from common.objects.user import User
from common.objects.bottle import Bottle
from common.objects.inventory import BottleCollection
from common.constants import dataTypes, packets, privileges
from common.helpers.packetHelper import Packet, Connection
#from objects import glob # not yet needed
from colorama import init as clr_init, Fore as colour
clr_init(autoreset=True)
class Client:
def __init__(self, start_loop: bool = False, debug: bool = False):
""" Client Information. """
self.version = 100
self.debug = debug
self.lines_printed = 0
""" User information (our client user). """
self.user = User() # User object for the player. Will be used if playing online.
self.inventory = BottleCollection('Inventory', []) # Our user's inventory. Will be used if playing online.
self.ledger = BottleCollection('Ledger', [])
self.ledger.unit = 'shots'
self.online_users: List[User] = [] # A list of online users connected to the server. Inclues our user.
""" Start up the game loop. """
print(f'[CLNT] {colour.LIGHTBLUE_EX}Drink with Friends v{self.version / 100:.2f}\n')
if start_loop:
self._gameLoop()
return
@property
def is_online(self) -> bool:
return self.user and self.user.id
def __del__(self) -> None: # If we're logged in, log out before closing.
if self.is_online:
self.make_request(packets.client_logout)
print('Thanks for playing! <3')
return
def _gameLoop(self) -> None:
while True:
self.print_main_menu()
choice_count: int = 2
if self.is_online:
if self.user.privileges & privileges.USER_PERMS: choice_count += 6
if self.user.privileges & privileges.ADMIN_PERMS: choice_count += 2
choice: int = self.get_user_int(0, choice_count)
if not choice: break # Exit
self.move_cursor_up(1 + choice_count + self.lines_printed, 43)
self.lines_printed = 0
if not self.is_online: # *** Not logged in. ***
if choice == 1: # Login
self.make_request(packets.client_login)
if not self.is_online: continue # failed to login
self.log_debug('Getting inventory.')
self.make_request(packets.client_getInventory)
self.log_debug('Getting ledger.')
self.make_request(packets.client_getLedger)
elif choice == 2: # Register
self.make_request(packets.client_registerAccount)
else: # *** Logged in. ***
if choice == 1: # Logout
self.make_request(packets.client_logout)
elif choice == 2: # Request online users list.
self.make_request(packets.client_getOnlineUsers)
self.print_online_users()
elif choice == 3: # Add a bottle to inventory.
self.make_request(packets.client_addBottle)
self.make_request(packets.client_getInventory)
elif choice == 4: # Display your inventory.
self.inventory.display()
self.lines_printed += (self.inventory.bottles.__len__() + 2 if self.inventory.bottles else 3)
elif choice == 5: # Take a shot
if self.inventory.is_empty:
print(
'Your inventory is empty!',
'How are you supposed to take a shot? x('
)
continue
self.make_request(packets.client_takeShot)
#self.make_request(packets.client_getLedger)
elif choice == 6:
self.ledger.display() # Check your ledger.
self.lines_printed += (self.ledger.bottles.__len__() + 2 if self.ledger.bottles else 3)
# Admin
elif choice == 7: pass # Ban user?
elif choice == 8: pass # Shutdown server?
return
def make_request(self, packetID: int) -> None:
with socket(AF_INET, SOCK_STREAM) as sock:
try: sock.connect((glob.ip, glob.port))
except sock_err as err:
self.log_error(f'Failed to establish a connection to the server: {err}.\n')
return
# We've established a valid connection.
with Packet(packetID) as packet:
self._handle_connection(sock, packet)
return
def _handle_connection(self, sock: socket, packet: Packet) -> None:
if packet.id == packets.client_login:
username: str = input('Username: ') # newline to space from menu
password: str = getpass()
print() # Print new line to split from.. everything
packet.pack_data([
[username, dataTypes.STRING],
[password, dataTypes.STRING],
[self.version, dataTypes.INT16]
])
del password
sock.send(packet.get_data())
del packet
resp: int = ord(sock.recv(1))
if resp == packets.server_generalSuccess:
self.log_info('Authenticated.')
try: conn = Connection(sock.recv(glob.max_bytes))
except:
self.log_error('Connection died - server_generalSuccess.')
del resp, username
return
with Packet() as packet:
packet.read_data(conn.body)
self.user.id, self.user.privileges = packet.unpack_data([dataTypes.INT16, dataTypes.INT16])#[0]
self.online_users = [User(u[1], u[0], u[2]) for u in packet.unpack_data([dataTypes.USERINFO_LIST])]
self.user.username = username
self.print_online_users()
elif resp == packets.server_generalFailure:
self.log_error('Invalid login parameters.')
elif resp == packets.server_generalNoSuchUsername:
self.log_error('No such username found.')
elif resp == packets.server_generalIncorrectPassword:
self.log_error('Incorrect password.')
elif resp == packets.server_generalBanned:
self.log_error('Your account has been banned.')
else: self.log_error(f'Invalid packetID {resp}')
#input('Press enter to continue..')
#print('\033[F', ' ' * 25, sep='') # lol i should just make a function for wiping lines already..
del resp, username
return
elif packet.id == packets.client_logout:
self.log_info('Logging out..', colour.YELLOW)
packet.pack_data([[self.user.id, dataTypes.INT16]])
sock.send(packet.get_data())
del packet
self.user.__del__()
return
elif packet.id == packets.client_getOnlineUsers:
sock.send(packet.get_data())
del packet
try: conn = Connection(sock.recv(glob.max_bytes)) # TODO: with conn
except:
self.log_error('Connection died - client_getOnlineUsers.')
return
with Packet() as packet:
packet.read_data(conn.body)
resp: Tuple[Union[int, str]] = packet.unpack_data([dataTypes.USERINFO_LIST])
if len(resp) != 1: return # TODO: response
if (x for x in (_x[1] for _x in resp) if x not in (u.username for u in self.online_users)):
self.online_users = [User(u[1], u[0], u[2]) for u in resp]
del resp
elif packet.id == packets.client_registerAccount:
print(
'Registration',
'------------',
f'{colour.RED}NOTE: currently in plaintext, do not use a real password!\n',
sep='\n'
)
username: str = self.get_user_str_lenrange(3, 16, 'Username: ')
password: str = self.get_user_str_lenrange(6, 32, 'Password: ')
packet.pack_data([
[username, dataTypes.STRING],
[password, dataTypes.STRING]
])
sock.send(packet.get_data())
del packet
resp: int = ord(sock.recv(1))
if resp == packets.server_generalSuccess:
self.log_error('Account successfully created.')
elif resp == packets.server_registrationUsernameTaken:
self.log_error('That username is already taken!')
elif resp == packets.server_generalFailure:
self.log_error('Invalid parameters.')
del username, password
elif packet.id == packets.client_addBottle:
print(
'Add bottle to inventory',
'-----------------------',
sep='\n'
)
b = Bottle(
self.get_user_str_lenrange(1, 32, 'Bottle name: '),
self.get_user_int(50, 5000, 'ml: '),
self.get_user_float(1.0, 100.0, 'ABV: ')
)
packet.pack_data([
[self.user.id, dataTypes.INT16],
[[b.name, b.volume, b.abv], dataTypes.DRINK]
])
sock.send(packet.get_data())
del packet
resp: int = ord(sock.recv(1))
if resp == packets.server_generalSuccess:
self.inventory += b
# TODO: nice print function for inventory
self.log_info(f'"{b.name}" has been added to your inventory.')
elif resp == packets.server_generalFailure:
self.log_error('Server failed to add bottle to database.')
del b
elif packet.id == packets.client_getInventory:
#print(f'{colour.YELLOW}Requesting inventory from server..')
packet.pack_data([[self.user.id, dataTypes.INT16]]) # UserID
sock.send(packet.get_data())
del packet
resp: int = ord(sock.recv(1))
if resp == packets.server_alreadyUpToDate:
self.log
return
del resp
try: conn = Connection(sock.recv(glob.max_bytes)) # TODO: with conn
except:
self.log_error('Connection died - client_getInventory.')
return
with Packet() as packet:
packet.read_data(conn.body)
resp = packet.unpack_data([dataTypes.DRINK_LIST])
self.inventory = BottleCollection('Inventory', [Bottle(*b) for b in resp])
elif packet.id == packets.client_takeShot:
if self.get_user_bool('Would you like to choose your drink?\n>> '):
self.inventory.display()
b = self.inventory.get_bottle(self.get_user_int(1, len(self.inventory.bottles)))
else:
b = self.inventory.get_bottle(randint(1, len(self.inventory.bottles)))
# TODO: add choice for amt?
vol: int = randint(30, 85) # 40.5ml = standard shot
if vol > b.volume: vol = b.volume
# Send to server to update inventory
packet.pack_data([
[self.user.id, dataTypes.INT16],
[[b.name, b.volume, b.abv], dataTypes.DRINK]
])
sock.send(packet.get_data())
del packet
resp: int = ord(sock.recv(1))
if resp == packets.server_generalFailure:
self.log_error('An error has occurred while syncing with the server. Your inventory has not been modified.')
return
del resp
print(
"\nHere's what the doctor ordered:",
f'Drink: {b.name} [{b.abv}%]',
f'Volume: {vol}ml [~{vol * b.abv:.0f} cmynts] ({(vol / b.volume) * 100:.0f}% of bottle)',
'Bottoms up!', sep='\n'
)
self.lines_printed += 5
b.volume -= vol
self.ledger += b
if not b.volume: # finished the bottle
self.log_info(f"You've finished your {b.name}!", colour.YELLOW)
self.inventory -= b
elif packet.id == packets.client_getLedger:
packet.pack_data([
[self.user.id, dataTypes.INT16]
])
sock.send(packet.get_data())
del packet
resp: int = ord(sock.recv(1))
if resp == packets.server_alreadyUpToDate:
return
del resp
try: conn = Connection(sock.recv(glob.max_bytes)) # TODO: with conn
except:
self.log_error('Connection died - client_getLedger.')
return
with Packet() as packet:
packet.read_data(conn.body)
resp = packet.unpack_data([dataTypes.DRINK_LIST])
self.ledger = BottleCollection('Ledger', [Bottle(*b) for b in resp])
self.ledger.unit = 'shots'
del resp
else: # Unknown packet ID.
self.log_warn(f'Recieved an unknown packet. (ID: {packet.id})')
return
def get_user_str_lenrange(self, min: int, max: int, message: Optional[str] = None) -> str: # ugly name but what else??
"""
Get a string with the length in range min-max (inclusive) from stdin.
"""
while True:
tmp: str = input(message if message else '>')
if len(tmp) in range(min, max + 1): return tmp
# TODO: print backspace to clean up previous failures? keep menu on screen..
self.log_error(f'Input string must be between {min}-{max} characters.')
def get_user_int(self, min: int, max: int, message: Optional[str] = None) -> int:
"""
Get a single integer in range min-max (inclusive) from stdin.
"""
while True:
tmp: str = input(message if message else '> ')
if re_match(r'^-?\d+$', tmp) and int(tmp) in range(min, max + 1): return int(tmp)
# TODO: print backspace to clean up previous failures? keep menu on screen..
self.log_error('Please enter a valid value.')
def get_user_float(self, min: float, max: float, message: Optional[str] = None) -> float:
"""
Get a single float in range min-max (inclusive) from stdin.
"""
while True:
tmp: str = input(message if message else '> ')
if re_match(r'^-?\d+(?:\.\d+)?$', tmp) and float(tmp) in arange(min, max + 1): return float(tmp)
# TODO: print backspace to clean up previous failures? keep menu on screen..
self.log_error('Please enter a valid value.')
def get_user_bool(self, message: Optional[str] = None) -> bool:
"""
Get a bool from stdin (message must contain 'y' and not contain 'n').
"""
while True:
tmp: str = input(message if message else '> ')
if not re_match(r'^(?:y|n)(?:.*)$', tmp):
self.log_error('Please enter a valid value.')
continue
return tmp.startswith('y')
def print_main_menu(self) -> None:
print('', # Print a space at the top of menu.
f'{colour.CYAN}<- {colour.YELLOW}Main Menu {colour.CYAN}->',
sep='\n'
)
if not self.is_online: # *** Not logged in. ***
print(
'1. Login',
'2. Register an account.',
sep='\n'
)
else: # *** Logged in. ***
print(
'1. Logout | 2. List online users.',
'3. Add bottle. | 4. Display your inventory',
'5. Take a shot. | 6. Check your ledger.',
sep='\n'
)
# Just ideas, not finished.
#if self.user.privileges & privileges.ADMIN_PERMS: print(
# '7. Kick user.',
# '8. Ban user.',
# '9. Restart server.',
# '10. Shutdown server.', sep='\n'
#)
print('0. Exit.', end='\n\n')
return
def print_online_users(self) -> None:
print(f'\n{colour.CYAN}<- {colour.YELLOW}Online Users {colour.CYAN}->')
for u in self.online_users: print(f'{u.id} - {u.username}.')
self.lines_printed += self.online_users.__len__() + 2
return
@staticmethod
def move_cursor_up(count: int = 1, spaces: int = 0) -> None:
for _ in range(count + 1):
stdout.write('\033[F')
if not spaces: continue
stdout.write(' ' * spaces)
if spaces: stdout.write('\r')
return
def log_info(self, message: str, col: int = colour.LIGHTBLUE_EX) -> None:
self._print('[INFO]', message, col)
return
def log_warn(self, message: str) -> None:
self._print('[WARN]', message, colour.YELLOW)
return
def log_error(self, message: str) -> None:
self._print('[ERR]', message, colour.LIGHTRED_EX)
return
def log_debug(self, message: str) -> None:
if self.debug:
self._print('[DEBUG]', message, colour.LIGHTBLUE_EX)
return
def _print(self, prefix: str, message: str, col: int) -> None:
print(f'[{prefix}] {col}{message}')
for c in message: # add line for each \n found in message as well
if c == '\n': self.lines_printed += 1
self.lines_printed =+ 1
return
if __name__ == '__main__':
Client(True, False)#input('Launch in debug?\n>> ').startswith('y'))
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,625 | cmyui/drinkwithfriends | refs/heads/master | /common/constants/packets.py | # If a packet starts with 'client', it is client -> server.
# Otherwise, it is server -> client.
client_login = 1
client_logout = 2
server_generalSuccess = 3
server_generalFailure = 4
server_generalInvalidArguments = 5
server_generalNoSuchUsername = 6
server_generalIncorrectPassword = 7
server_generalBanned = 8
# Server basic user information to the client (userID, online players).
server_sendUserInfo = 9
# Get an INT16_LIST of all users online.
client_getOnlineUsers = 10
server_sendOnlineUsers = 11
client_registerAccount = 12
server_registrationUsernameTaken = 13
# Create a new bottle.
client_addBottle = 14
# Request a list of our inventory of incomplete bottles.
client_getInventory = 15
server_sendInventory = 16
# Take a shot! B)
client_takeShot = 17
client_getLedger = 18
server_sendLedger = 19
server_alreadyUpToDate = 20
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,626 | cmyui/drinkwithfriends | refs/heads/master | /server/server.py | # -*- coding: utf-8 -*-
from typing import List, Dict, Optional
from socket import socket, AF_INET, SOCK_STREAM
from os import path, chmod, remove
from json import loads
from bcrypt import checkpw, hashpw, gensalt
import mysql.connector
from common.constants import dataTypes
from common.constants import packets
from common.helpers.packetHelper import Packet, Connection
from common.db import dbConnector
from objects import glob
from common.objects.user import User
from common.objects.bottle import Bottle
from common.objects.inventory import BottleCollection
from colorama import init as clr_init, Fore as colour
clr_init(autoreset=True)
with open(f'{path.dirname(path.realpath(__file__))}/config.json', 'r') as f:
glob.config = loads(f.read())
class Server(object):
def __init__(self, start_loop: bool = False):
self.served: int = 0 # Amount of connections served.
self.users: List[User] = []
# Attempt connection to MySQL.
self.db = dbConnector.SQLPool(
pool_size = 4,
config = {
'user': glob.config['mysql_user'],
'password': glob.config['mysql_passwd'],
'host': glob.config['mysql_host'],
'port': 3306,
'database': glob.config['mysql_database']
}
)
# Create socket (check and remove if prior version exists).
if path.exists(glob.config['socket_location']):
remove(glob.config['socket_location'])
# Actual socket
self._sock: socket = socket(AF_INET, SOCK_STREAM)
self._sock.bind(('', glob.config['port']))
self._sock.listen(glob.config['concurrent_connections'])
self.sock: Optional[socket] = None # Current 'instance' socket.
print(f'[SERV] {colour.CYAN}Drink with Friends v{glob.config["version"]:.2f} @ localhost:{glob.config["port"]}')
if start_loop:
self.handle_connections()
return
def __del__(self) -> None:
self.db.pool._remove_connections()
return
def send_byte(self, packet_id) -> None:
self.sock.send(packet_id.to_bytes(1, 'little'))
return
def handle_connections(self) -> None:
while True:
with self._sock.accept()[0] as self.sock: # Don't even take the addr lol
data: Optional[bytes] = self.sock.recv(glob.max_bytes)
if len(data) == glob.max_bytes:
print('[WARN] Max connection data recived. Most likely missing some data! (ignoring req)\n{data}')
return
conn = Connection(data)
#try: conn = Connection(data)
#except:
# print('Ignored unknown packet')
# del data
# return
#else: del data
del data
with Packet() as packet:
packet.read_data(conn.body)
self._handle_connection(packet)
del conn
self.served += 1
def _handle_connection(self, packet: Packet) -> None:
if packet.id == packets.client_login: # Login packet
username, client_password, game_version = packet.unpack_data([ # pylint: disable=unbalanced-tuple-unpacking
dataTypes.STRING, # Username
dataTypes.STRING, # Password
dataTypes.INT16 # Game version
])
del packet
if game_version < 100:
print(f'{username} attempted to login with an out-of-date client -- v{game_version}.')
return
res = self.db.fetch('SELECT id, password, privileges FROM users WHERE username_safe = %s', [User.safe_username(username)])
if not res:
self.send_byte(packets.server_generalNoSuchUsername)
return
u = User(res['id'], username, res['privileges'])
# TODO: bcrypt
#if not checkpw(client_password.encode(), res['password'].encode()):
if not client_password == res['password']:
self.send_byte(packets.server_generalIncorrectPassword)
return
del client_password, res
if not u.privileges:
print(f'Banned user {username} attempted to login.')
self.send_byte(packets.server_generalBanned)
return
""" Login success, nothing wrong™️ """
print(f'{username} has logged in.')
if u.id not in [_u.id for _u in self.users]: self.users.append(u)
self.send_byte(packets.server_generalSuccess)
packet = Packet(packets.server_sendUserInfo)
packet.pack_data([
[u.id, dataTypes.INT16],
[u.privileges, dataTypes.INT16],
[[[_u.username, _u.id, _u.privileges] for _u in self.users], dataTypes.USERINFO_LIST]
])
self.sock.send(packet.get_data())
del packet
elif packet.id == packets.client_logout:
index = [u.id for u in self.users].index(packet.unpack_data([dataTypes.INT16])[0])
print(f'{self.users[index].username} has logged out.')
del self.users[index]
del packet
elif packet.id == packets.client_getOnlineUsers:
del packet
self.sendUserList()
elif packet.id == packets.client_registerAccount:
resp: bytes = packet.unpack_data([dataTypes.STRING, dataTypes.STRING])
if len(resp) == 2: username, password = resp
else:
self.send_byte(packets.server_generalFailure)
return
if all((len(username) not in range(3, 17), len(password) in range(6, 33))):
self.send_byte(packets.server_generalFailure)
return
del packet
# Check if username already exists
if self.db.fetch('SELECT 1 FROM users WHERE username = %s', [username]):
self.send_byte(packets.server_registrationUsernameTaken)
return
""" Passed checks """
# Add user to DB.
self.db.execute(
'INSERT INTO users (id, username, username_safe, privileges, password) VALUES (NULL, %s, %s, 1, %s)',
[username, User.safe_username(username), password]
)
del username, password
self.send_byte(packets.server_generalSuccess)
elif packet.id == packets.client_addBottle: # TODO: return failed packet rather than returning during fails
resp: bytes = packet.unpack_data([
dataTypes.INT16, # userid
dataTypes.DRINK
])
del packet
if len(resp) != 4:
del resp
return
user_id = resp[0]
b: Bottle = Bottle(*resp[1:4])
del resp
if not b.is_valid():
del user_id
self.send_byte(packets.server_generalFailure)
return
""" Passed checks """
self.db.execute(
'INSERT INTO bottles (id, user_id, name, volume, abv) VALUES (NULL, %s, %s, %s, %s)',
[user_id, b.name, b.volume, b.abv]
)
print(f'{user_id} added bottle: {b.name} [{b.volume}ml @ {b.abv}%]')
self.send_byte(packets.server_generalSuccess)
del user_id
elif packet.id == packets.client_getInventory:
user_id: int = packet.unpack_data([dataTypes.UINT16])[0] # TODO: get_userid function?
del packet
if user_id not in (u.id for u in self.users):
return # TODO: make not_logged_in packet, generalize these
res = self.db.fetchall('SELECT name, volume, abv FROM bottles WHERE user_id = %s AND volume > 0', [user_id])
if not res:
self.send_byte(packets.server_alreadyUpToDate)
return
with Packet(packets.server_sendInventory) as packet:
packet.pack_data([
[[[row['name'], row['volume'], row['abv']] for row in res], dataTypes.DRINK_LIST]
])
self.sock.send(packet.get_data())
elif packet.id == packets.client_takeShot:
resp: bytes = packet.unpack_data([ # TODO: only send bottleid
dataTypes.INT16, # userid
dataTypes.DRINK # updated bottle information
])
del packet
if len(resp) != 4:
self.send_byte(packets.server_generalFailure)
del resp
return
user_id = resp[0]
b: Bottle = Bottle(*resp[1:4])
del resp
#if not b.is_valid():
# del user_id
# self.send_byte(packets.server_generalFailure)
# return
# Ensure the drink exists.
res = self.db.fetch('SELECT id FROM bottles WHERE user_id = %s AND name = %s AND abv = %s', [user_id, b.name, b.abv])
if not res:
self.send_byte(packets.server_generalFailure)
return
bottle_id: int = res['id']
del res
""" Passed checks """
self.db.execute( # Don't delete from inv so we can use name from bottles in ledger.
'UPDATE bottles SET volume = %s WHERE user_id = %s AND name = %s AND abv = %s',
[b.volume, user_id, b.name, b.abv]
)
self.db.execute( # Update ledger
'INSERT INTO ledger (id, user_id, volume, bottle, time) VALUES (NULL, %s, %s, %s, UNIX_TIMESTAMP())',
[user_id, b.volume, bottle_id]
)
#if b.volume:
# self.db.execute('UPDATE bottles SET volume = %s WHERE user_id = %s AND name = %s AND abv = %s', [b.volume, user_id, b.name, b.abv])
#else: # they finished bottle, delete from db
# self.db.execute('DELETE FROM bottles WHERE user_id = %s AND name = %s AND abv = %s', [user_id, b.name, b.abv])
self.send_byte(packets.server_generalSuccess)
elif packet.id == packets.client_getLedger:
user_id: int = packet.unpack_data([dataTypes.INT16])[0]
del packet
if user_id not in (u.id for u in self.users):
return # TODO: make not_logged_in packet, generalize these
res = self.db.fetchall('''
SELECT bottles.name, ledger.volume, bottles.abv
FROM ledger
LEFT JOIN bottles ON bottles.id = ledger.bottle
WHERE ledger.user_id = %s''', [user_id]
)
if not res:
self.send_byte(packets.server_alreadyUpToDate)
return
with Packet(packets.server_sendInventory) as packet:
packet.pack_data([
[[[row['name'], row['volume'], row['abv']] for row in res], dataTypes.DRINK_LIST]
])
self.sock.send(packet.get_data())
else:
print(f'Unfinished packet requeted -- ID: {packet.id}')
self.send_byte(packets.server_generalFailure)
return
def sendUserList(self) -> None:
with Packet(packets.server_sendOnlineUsers) as packet:
packet.pack_data([
[[[u.username, u.id, u.privileges] for u in self.users], dataTypes.USERINFO_LIST]
])
self.sock.send(packet.get_data())
return
if __name__ == '__main__':
Server(start_loop = True)
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,627 | cmyui/drinkwithfriends | refs/heads/master | /common/objects/user.py | from typing import Optional
class User(object):
def __init__(self, id: int = 0, username: str = '', privileges: int = 0) -> None:
self.id = id
self.username = username
self.username_safe = self.safe_username(self.username)
self.privileges = privileges
return
def __del__(self) -> None: # Logout
self.id = 0
self.username = ''
#del self.username_safe
self.privileges = 0
return
@staticmethod
def safe_username(username: str) -> str:
return username.replace(' ', '_').strip()
def _safe_username(self) -> None:
self.username_safe = self.safe_username(self.username)
return
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,628 | cmyui/drinkwithfriends | refs/heads/master | /common/objects/bottle.py | from typing import Optional
from numpy import arange
class Bottle:
def __init__(self, name: Optional[str] = None, volume: int = 0, abv: float = 0.0) -> None:
self.name = name
self.volume = volume
self.abv = abv
def __repr__(self) -> None:
return f'\n [{self.volume}ml @ {self.abv}%] {self.name}\n'
@property
def cmyunits(self) -> float:
"""
'cmyunits' are simply a measurement of volume * abv.
This effectively allows a user to compare how much alcohol they have drank
vs another user.
For exmaple:
400ml of 5% beer = 2000 cmyunits (400 / 5 = 2000)
2000 cmyunits in 40% vodka = 50ml (2000 / 40 = 50)
So we can say 400ml beer ~= 50ml of vodka (a bit above a standard shot).
This obviously doesn't account for advanced things such as how much the
watered down beer would sober you up, but we're not trying to be THAT precise.
"""
return self.volume * self.abv if self.is_valid() else 0.0
def is_valid(self) -> bool:
return all((
len(self.name) in range(1, 33), # name len: 1 - 32
self.volume in range(50, 5001), # volume: 50 - 5000
self.abv > 0 and self.abv < 100 # abv: 0 - 100
))
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,629 | cmyui/drinkwithfriends | refs/heads/master | /common/helpers/packetHelper.py | from typing import Any, Dict, List, Tuple, Optional, Union
from common.constants import dataTypes, packets
from common.objects.user import User
from struct import pack as _pack, unpack as _unpack, calcsize
class Packet(object):
def __init__(self, id: Optional[int] = None):
self.id: Optional[int] = id
#self.data: bytearray = bytearray()
self.data: bytes = b''
self.length = 0
self.offset = 0 # only used for unpacking
return
def __enter__(self):
return self
def __exit__(self, *args):
del self
return
def _reuse(self, id: Optional[int] = None):
"""
Basically reset our packet so we can reuse it lol.
"""
self.id = id
self.data = b''
self.length = 0
self.offset = 0
return
def get_data(self) -> bytes:
"""
Return the request in HTTP format.
This should be used in the final step of sending data to the client.
"""
self.data = _pack('<hh', self.id, len(self.data)) + self.data # ew?
return f'HTTP/1.1 200 OK\r\nContent-Length: {len(self.data)}\r\n\r\n'.encode() + self.data
def pack_data(self, _data: List[List[Union[List[int], int, str, float, User]]]) -> None:
"""
Pack `_data` into `self.data`.
Data should be passed into this function in the format [[data, data_type], ...]
"""
for data, type in _data:
if type in [dataTypes.INT16_LIST, dataTypes.INT32_LIST]:
self.data += data.__len__().to_bytes(1, 'little')
for i in data: self.data += i.to_bytes(2 if type == dataTypes.INT16_LIST else 4, 'little')
elif type == dataTypes.STRING: self.data += (b'\x0b' + data.__len__().to_bytes(1, 'little') + data.encode()) if data else b'\x00'
elif type == dataTypes.USERINFO: self.data += data[0].__len__().to_bytes(1, 'little') + data[0].encode() + _pack('<hh', *data[1:3])
elif type == dataTypes.USERINFO_LIST:
self.data += data.__len__().to_bytes(1, 'little')
for u in data: self.data += u[0].__len__().to_bytes(1, 'little') + u[0].encode() + _pack('<hh', *u[1:3])
elif type == dataTypes.DRINK: self.data += data[0].__len__().to_bytes(1, 'little') + data[0].encode() + _pack('<hf', *data[1:3])
elif type == dataTypes.DRINK_LIST:
self.data += data.__len__().to_bytes(1, 'little')
for b in data: self.data += b[0].__len__().to_bytes(1, 'little') + b[0].encode() + _pack('<hf', *b[1:3])
else:
fmt: str = self.get_fmtstr(type)
if not fmt: continue
else: fmt = f'<{fmt}'
self.data += _pack(fmt, data)
return
def unpack_data(self, types: List[int]) -> Tuple[Any]: # TODO: return type
"""
Unpack `self.data` one var at a time from with the types from `types`.
Types should be passed in the format [data_type, ...]
"""
unpacked: List[Any] = []
for type in types:
if type in [dataTypes.INT16_LIST, dataTypes.INT32_LIST]:
"""
Send a list of integers.
Format:
> 1 byte: length of list
> 2/4 bytes: (depending on int16/int32 list) for each int
"""
l: List[int] = []
self.offset += 1
size: int = 2 if dataTypes.INT16_LIST else 4
for _ in range(self.data[self.offset - 1]):
l.extend(_unpack('<h' if type == dataTypes.INT16_LIST else '<i', self.data[self.offset:self.offset + size]))
self.offset += size
unpacked.append(tuple(l))
del l
elif type == dataTypes.STRING: # cant be cheap this time :(
"""
Pack a string.
Format:
> 1 byte: '\x0b' if string is not empty, '\x00' if empty
> 1 byte: length
> indef. bytes: our string
"""
if self.data[self.offset] == 11: # '\x0b
self.offset += 1
length: int = self.data[self.offset]
self.offset += 1
unpacked.append(self.data[self.offset:self.offset + length].decode())
self.offset += length
else: self.offset += 1 # '\x00'
elif type == dataTypes.USERINFO:
"""
Pack basic information about a user.
Format:
> 2 bytes: userID
> 1 byte: length of subsequent usernmae string
> indef. bytes: username string
> 2 bytes: privileges
"""
length: int = self.data[self.offset] # username (note: '\x0b' byte is not sent here)
self.offset += 1
# Username
unpacked.append(self.data[self.offset:self.offset + length].decode())
self.offset += length
# UserID
unpacked.append(_unpack('<h', self.data[self.offset:self.offset + 2])) # TODO: combine
self.offset += 2
unpacked.append(_unpack('<h', self.data[self.offset:self.offset + 2]))
self.offset += 2
elif type == dataTypes.USERINFO_LIST:
"""
Pack basic information for multiple users.
Format:
> 1 byte: length of list
> indef. bytes: list of `USERINFO` types.
"""
self.offset += 1
for _ in range(self.data[self.offset - 1]):
strlen: int = self.data[self.offset]
self.offset += 1
unpacked.append([ # ugly code lol
self.data[self.offset:self.offset + strlen].decode(),
*_unpack('<h', self.data[self.offset + strlen:self.offset + strlen + 2]), # TODO: combine
*_unpack('<h', self.data[self.offset + 2 + strlen:self.offset + 2 + strlen + 2])
])
self.offset += strlen + 4
#del unpacked
elif type == dataTypes.DRINK:
"""
Pack a bottle's basic information.
Format:
> 1 byte: length of subsequent bottle name string
> indef. bytes: bottle name string
> 2 bytes: bottle volume
> 4 bytes: bottle abv
"""
length: int = self.data[self.offset]
self.offset += 1
unpacked.append(self.data[self.offset:self.offset + length].decode()) # bottle name
self.offset += length
unpacked.extend(_unpack('<h', self.data[self.offset:self.offset + 2])) # volume
self.offset += 2
unpacked.extend(_unpack('<f', self.data[self.offset:self.offset + 4])) # abv
self.offset += 4
elif type == dataTypes.DRINK_LIST:
"""
Pack information about a list of bottles.
Format:
> 1 byte: length of bottle list.
> indef. bytes: list of `DRINK` types.
"""
self.offset += 1
for _ in range(self.data[self.offset - 1]):
strlen: int = self.data[self.offset]
self.offset += 1
unpacked.append([
self.data[self.offset: self.offset + strlen].decode(),
*_unpack('<h', self.data[self.offset + strlen:self.offset + strlen + 2]), # TODO; combine
*_unpack('<f', self.data[self.offset + strlen + 2:self.offset + strlen + 2 + 4])
])
self.offset += strlen + 6
else:
"""
Pack something using the `struct` library.
This will be used only for primitive types.
"""
fmt: str = self.get_fmtstr(type)
if not fmt: continue
else: fmt = f'<{fmt}'
unpacked.extend([x for x in _unpack(fmt, self.data[self.offset:self.offset + calcsize(fmt)])])
self.offset += calcsize(fmt)
return tuple(unpacked)
def read_data(self, data) -> None:
"""
Read the ID and length of a packet.
(increments the offset accordingly)
"""
self.data = data
size: int = calcsize('<hh')
self.id, self.length = _unpack('<hh', self.data[self.offset:self.offset + size])
self.offset += size
del size
return
@staticmethod
def get_fmtstr(type: int) -> Optional[str]:
"""
Get the format string for a primitive type from `dataTypes`.
"""
if type == dataTypes.INT16: return 'h'
elif type == dataTypes.UINT16: return 'H'
elif type == dataTypes.INT32: return 'i' # not using long
elif type == dataTypes.UINT32: return 'I' #
elif type == dataTypes.INT64: return 'q'
elif type == dataTypes.UINT64: return 'Q'
elif type == dataTypes.FLOAT: return 'f'
elif type == dataTypes.DOUBLE: return 'd'
print(f'[WARN] Unknown dataType {type}.')
return
class Connection(object):
def __init__(self, data: bytes) -> None:
self.raw: List[bytes] = data.split(b'\r\n\r\n', maxsplit=1)
self.parse_headers(self.raw[0].decode().split('\r\n'))
self.body: bytes = self.raw[1]
return
def parse_headers(self, _headers: bytes) -> None:
"""
Parse HTTP headers, splitting them into a dictionary.
"""
self.headers: Dict[str, str] = {}
for k, v in (line.split(':') for line in _headers if ':' in line):
self.headers[k] = v.lstrip()
return
| {"/server/objects/glob.py": ["/common/objects/user.py"], "/common/objects/inventory.py": ["/common/objects/bottle.py"], "/client/client.py": ["/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py", "/common/helpers/packetHelper.py"], "/server/server.py": ["/common/helpers/packetHelper.py", "/common/objects/user.py", "/common/objects/bottle.py", "/common/objects/inventory.py"], "/common/helpers/packetHelper.py": ["/common/objects/user.py"]} |
72,642 | WisChang005/insta-login-test-pytest | refs/heads/master | /src/drivers/__init__.py | import pathlib
def get_path():
return str(pathlib.Path(__file__).parent.absolute())
| {"/tests/test_instagram_login.py": ["/src/drivers/__init__.py", "/src/utils/config.py"]} |
72,643 | WisChang005/insta-login-test-pytest | refs/heads/master | /src/drivers/driver_helper.py | import os
import platform
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options
from src import drivers
def get_chrome_driver():
driver_bin_map = {
"Windows": "chromedriver.exe",
"Darwin": "chromedriver",
}
driver_path = os.path.join(drivers.get_path(), driver_bin_map[platform.system()])
chrome_options = Options()
chrome_options.add_argument("--lang=en")
return Chrome(executable_path=driver_path, service_args=["--verbose"], options=chrome_options)
| {"/tests/test_instagram_login.py": ["/src/drivers/__init__.py", "/src/utils/config.py"]} |
72,644 | WisChang005/insta-login-test-pytest | refs/heads/master | /tests/test_instagram_login.py | import time
import pytest
from selenium.webdriver.common.by import By
from src.drivers import driver_helper
from src.utils.config import InstagramConfig
@pytest.fixture
def chrome_driver():
yield driver_helper.get_chrome_driver()
def test_instagram_title_is_correct(chrome_driver):
with chrome_driver as driver:
driver.implicitly_wait(10)
driver.get("https://www.instagram.com/")
assert driver.title == "Instagram"
def test_instagram_login_success(chrome_driver):
with chrome_driver as driver:
driver.implicitly_wait(10)
driver.get("https://www.instagram.com/")
# input account/pwd
driver.find_element(By.NAME, "username").send_keys(InstagramConfig.get_instagram_account())
driver.find_element(By.NAME, "password").send_keys(InstagramConfig.get_instagram_password())
# click login button
LOGIN_BUTTON = (By.XPATH, '//button[@type="submit"]')
driver.find_element(*LOGIN_BUTTON).click()
# verify loging success
SEARCH_BAR = (By.XPATH, "//div[@class=\"pbgfb Di7vw \" and @role=\"button\"]")
assert driver.find_element(*SEARCH_BAR)
def test_instagram_login_fail(chrome_driver):
with chrome_driver as driver:
driver.implicitly_wait(10)
driver.get("https://www.instagram.com/")
# input account/pwd
driver.find_element(By.NAME, "username").send_keys("my_account@gmail.com")
driver.find_element(By.NAME, "password").send_keys("password")
# click login button
LOGIN_BUTTON = (By.XPATH, '//button[@type="submit"]')
driver.find_element(*LOGIN_BUTTON).click()
# alert verify
LOGIN_ERROR_ALERT = (By.ID, "slfErrorAlert")
alert_text = driver.find_element(*LOGIN_ERROR_ALERT).text
WRONG_PASSWORD_ALERT_TEXT = "Sorry, your password was incorrect. Please double-check your password."
assert alert_text == WRONG_PASSWORD_ALERT_TEXT
| {"/tests/test_instagram_login.py": ["/src/drivers/__init__.py", "/src/utils/config.py"]} |
72,645 | WisChang005/insta-login-test-pytest | refs/heads/master | /src/utils/config.py | import os
class InstagramConfig:
@staticmethod
def get_instagram_account():
return os.environ["INSTA_ACCOUNT"]
@staticmethod
def get_instagram_password():
return os.environ["INSTA_PASSWORD"]
| {"/tests/test_instagram_login.py": ["/src/drivers/__init__.py", "/src/utils/config.py"]} |
72,646 | SOOBINIM/korinsam | refs/heads/main | /korinsam_compare/stock/admin.py | from django.contrib import admin
from .models import Product
admin.site.register(Product)
# admin.site.register(Price)
# admin.site.register(Client)
# Register your models here.
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,647 | SOOBINIM/korinsam | refs/heads/main | /korinsam_community/korinsam_community/urls.py | from django.contrib import admin
from django.urls import path, include
from korin_user.views import home
urlpatterns = [
path('admin/', admin.site.urls),
path('korin_user/', include('korin_user.urls')),
path('', home),
]
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,648 | SOOBINIM/korinsam | refs/heads/main | /korinsam_community/korin_user/urls.py | from django.urls import path, include
from . import views
urlpatterns = [
path('register/', views.korin_user_reg),
path('login/', views.korin_user_login),
path('logout/', views.logout),
]
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,649 | SOOBINIM/korinsam | refs/heads/main | /korinsam_compare/board/admin.py | from django.contrib import admin
# from .models import Product, Client
from .models import Board
admin.site.register(Board)
# admin.site.register(Product)
# # admin.site.register(Price)
# admin.site.register(Client)
# # Register your models here.
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,650 | SOOBINIM/korinsam | refs/heads/main | /korinsam_community/korin_user/views.py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import Kuser
from django.contrib.auth.hashers import make_password, check_password
def home(request):
user_id = request.session.get('user')
if user_id:
kuser = Kuser.objects.get(pk=user_id)
return HttpResponse(kuser.user_name)
return HttpResponse('HOME!!!')
def logout(request):
if request.session.get('user'):
del(request.session['user'])
return redirect('/')
def korin_user_login(request):
if request.method == 'GET':
return render(request, 'korin_user_login.html')
elif request.method == 'POST':
userId = request.POST.get('userId', None)
password = request.POST.get('password', None)
res_data = {}
if not(userId and password):
res_data['error'] = '모든 값을 입력해야 합니다.'
else:
kuser = Kuser.objects.get(user_id=userId)
if check_password(password, kuser.user_pw):
request.session['user'] = kuser.id
return redirect('/')
else:
res_data['error'] = '비밀번호를 틀렸습니다.'
return render(request, 'korin_user_login.html', res_data)
def korin_user_reg(request):
if request.method == 'GET':
return render(request, 'korin_user_reg.html')
elif request.method == 'POST':
username = request.POST.get('username', None)
userId = request.POST.get('userId', None)
password = request.POST.get('password', None)
re_password = request.POST.get('re-password', None)
res_data ={}
if not (username and userId and password and re_password):
res_data['error'] = '모든 값을 입력 해야합니다.'
elif password != re_password:
res_data['error'] = '비밀번호가 다릅니다.'
else:
kuser = Kuser(
user_name = username,
user_id = userId,
user_pw = make_password(password),
)
kuser.save()
return render(request, 'korin_user_reg.html', res_data)
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,651 | SOOBINIM/korinsam | refs/heads/main | /korinsam_community/korin_user/models.py | from django.db import models
class Kuser(models.Model):
user_name = models.CharField(max_length=32, verbose_name='사용자 이름')
user_id = models.CharField(max_length=32, verbose_name='아이디')
user_pw = models.CharField(max_length=128, verbose_name='비밀번호')
user_re_pw = models.CharField(max_length=128, verbose_name='비밀번호 확인')
def __str__(self):
return self.user_name
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,652 | SOOBINIM/korinsam | refs/heads/main | /korinsam_compare/stock/urls.py | from django.urls import path, include
from django.conf.urls import url
from .import views
urlpatterns = [
path('stock/', views.stockList),
]
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,653 | SOOBINIM/korinsam | refs/heads/main | /korinsam_compare/stock/dataUpload.py | import openpyxl
data = openpyxl.load_workbook('korinsam_data.xlsx')
sheet1 = data.get_sheet_by_name('오픈마켓')
rows = sheet1['B5':'B31']
for row in rows:
for cell in row:
print(cell.value)
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,654 | SOOBINIM/korinsam | refs/heads/main | /korinsam_community/korin_user/admin.py | from django.contrib import admin
from .models import Kuser
admin.site.register(Kuser)
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,655 | SOOBINIM/korinsam | refs/heads/main | /korinsam_compare/board/urls.py | from django.urls import path
from . import views
urlpatterns = [
path('stockList/', views.board_list),
]
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,656 | SOOBINIM/korinsam | refs/heads/main | /korinsam_community/korin_user/apps.py | from django.apps import AppConfig
class KorinUserConfig(AppConfig):
name = 'korin_user'
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,657 | SOOBINIM/korinsam | refs/heads/main | /korinsam_compare/stock/views.py | from django.http import HttpResponse
from django.shortcuts import render
import openpyxl
from .models import Product
def stockList(request):
if request.method == 'GET':
return render(request, 'stockList.html')
if request.method == 'POST':
return render(request,'stockList.html')
# def excel_crawling(reqeust):
# data = openpyxl.load_workbook('C:\Program Files\dev\korinsam\korinsam_compare\stock\korinsam_insert_data_200921.xlsx')
# sheet1 = data.get_sheet_by_name('Sheet1')
# product_name_rows = sheet1['B2':'B26']
# product_subject_rows = sheet1['C2':'C26']
# print(product_name_rows)
# return render(reqeust, 'stockList1.html', {'product_name_rows': product_name_rows})
# for row in rows:
# for cell in row:
# product = Product.objects.create(product_name = cell.value)
# # product.save()
# # print(cell.value)
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,658 | SOOBINIM/korinsam | refs/heads/main | /korinsam_compare/board/models.py | from django.db import models
class Board(models.Model):
no = models.IntegerField(blank=True, null=True)
product_name = models.CharField(max_length=128, verbose_name='상품명')
standard = models.CharField(max_length=64, verbose_name='규격')
stock = models.IntegerField(blank=True, null=True)
stock_status = models.CharField(max_length=64, verbose_name='재고상태')
def __str__(self):
return self.product_name
class Meta:
# db_table = 'korinsam_stockList'
verbose_name = '고려인삼 재고리스트'
verbose_name_plural = '고려인삼 재고리스트'
# class Product(models.Model):
# product_name = models.CharField(max_length=128)
# def __str__(self):
# return self.product_name
# class Client(models.Model):
# product = models.ForeignKey(Product, on_delete = models.CASCADE)
# client_name = models.CharField(max_length=50, null = True, default='고객사입력')
# class Price(models.Model):
# client = models.ForeignKey(Client, on_delete = models.CASCADE)
# selling_price = models.IntegerField(blank = True, null = True) # 판매가
# supply_price = models.IntegerField(blank = True, null = True) # 공급가
# setteld_price = models.IntegerField(blank = True, null = True) # 정산가
# shipping_price = models.IntegerField(blank = True, null = True) # 배송비
# fee = models.IntegerField(blank = True, null = True) # 수수료 | {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,659 | SOOBINIM/korinsam | refs/heads/main | /korinsam_compare/board/views.py | from django.shortcuts import render
from .models import Board
def board_list(request):
boards = Board.objects.all().order_by('-no')
return render(request, 'stockList.html', {'boards' : boards})
# def stockList(request):
# if request.method == 'GET':
# return render(request, 'stockList.html')
# if request.method == 'POST':
# return render(request,'stockList.html') | {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,660 | SOOBINIM/korinsam | refs/heads/main | /wedding/wedding.py | import time
from selenium import webdriver
driver = webdriver.Ie(
'C:\\Users\\임수빈\\Desktop\\IEDriverServer_x64_3.9.0\\IEDriverServer.exe')
time.sleep(2)
driver.get("http://www.google.com")
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,661 | SOOBINIM/korinsam | refs/heads/main | /ajaxTest.py | import requests
import json
from bs4 import BeautifulSoup
# id = id id_email_2
# pw = id id_password_3
# submit = class btn_g btn_confirm submit
LOGIN_INFO = {
'userID' : '743490@naver.com',
'userPassword' : 'skWkd92!'
}
with requests.Session() as s:
req_login = s.post('https://accounts.kakao.com/login?continue=https%3A%2F%2Faccounts.kakao.com%2Fweblogin%2Faccount', data=LOGIN_INFO)
print(req_login.status_code)
# custom_header = {
# "referer" : 'https://accounts.kakao.com/login?continue=https%3A%2F%2Fcomm-auth-web.kakao.com%2Flogin%2Fcheck?hash=pb2b13tCS3gfvWojP3pf1T_KvKO3U-UbUs2v_cz60ns',
# "user-agent" : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36',
# }
url = "https://comm-auth-web.kakao.com/seller/gate?groupId=237405"
req_index = requests.get(url)
# req_index = requests.get(url, header= custom_header)
dom = BeautifulSoup(req_index.content, "html.parser")
print(dom)
# req = s.post('https://sell.kakao.com/dashboard/index')
# def kakao_login(id_email_2,id_password_3):
# custom_header = {
# "referer" : 'https://accounts.kakao.com/login?continue=https%3A%2F%2Fcomm-auth-web.kakao.com%2Flogin%2Fcheck?hash=pb2b13tCS3gfvWojP3pf1T_KvKO3U-UbUs2v_cz60ns',
# "user-agent" : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36',
# }
# url = "https://accounts.kakao.com/login?continue=https%3A%2F%2Faccounts.kakao.com%2Fweblogin%2Faccount"
# req = requests.get(url, headers = custom_header)
# # response = requests.get(link, headers = headers)
# # dom = BeautifulSoup(response.content, "html.parser")
# # # text = dom.findAll("span", {"strong":re.complie("희망가격")})
# # return dom
# if req.status_code == 200:
# print("접속 성공")
# stock_data = json.loads(req.text)
# print(stock_data)
# else:
# print("접속 실패")
# if __name__ == "__main__":
# kakao_login('743490@naver.com','skWkd92!')
# s = kakao_session('743490', 'skWkd7434!@')
# import re
# import uuid
# import requests
# import rsa
# import lzstring
# from urllib3.util.retry import Retry
# from requests.adapters import HTTPAdapter
# from bs4 import BeautifulSoup
# import konlpy
# def encrypt(key_str, uid, upw):
# def naver_style_join(l):
# return ''.join([chr(len(s)) + s for s in l])
# sessionkey, keyname, e_str, n_str = key_str.split(',')
# e, n = int(e_str, 16), int(n_str, 16)
# message = naver_style_join([sessionkey, uid, upw]).encode()
# pubkey = rsa.PublicKey(e, n)
# encrypted = rsa.encrypt(message, pubkey)
# return keyname, encrypted.hex()
# def encrypt_account(uid, upw):
# key_str = requests.get('https://nid.naver.com/login/ext/keys.nhn').content.decode("utf-8")
# return encrypt(key_str, uid, upw)
# def naver_session(nid, npw):
# encnm, encpw = encrypt_account(nid, npw)
# s = requests.Session()
# retries = Retry(
# total=5,
# backoff_factor=0.1,
# status_forcelist=[500, 502, 503, 504]
# )
# s.mount('https://', HTTPAdapter(max_retries=retries))
# request_headers = {
# 'User-agent': 'Mozilla/5.0'
# }
# bvsd_uuid = uuid.uuid4()
# encData = '{"a":"%s-4","b":"1.3.4","d":[{"i":"id","b":{"a":["0,%s"]},"d":"%s","e":false,"f":false},{"i":"%s","e":true,"f":false}],"h":"1f","i":{"a":"Mozilla/5.0"}}' % (bvsd_uuid, nid, nid, npw)
# bvsd = '{"uuid":"%s","encData":"%s"}' % (bvsd_uuid, lzstring.LZString.compressToEncodedURIComponent(encData))
# resp = s.post('https://nid.naver.com/nidlogin.login', data={
# 'svctype': '0',
# 'enctp': '1',
# 'encnm': encnm,
# 'enc_url': 'http0X0.0000000000001P-10220.0000000.000000www.naver.com',
# 'url': 'www.naver.com',
# 'smart_level': '1',
# 'encpw': encpw,
# 'bvsd': bvsd
# }, headers=request_headers)
# finalize_url = re.search(r'location\.replace\("([^"]+)"\)', resp.content.decode("utf-8")).group(1)
# s.get(finalize_url)
# return s
# def get_total(keyword):
# url = "https://m.cafe.naver.com/ArticleSearchList.nhn?search.query=%" + keyword + \
# "&search.menuid=424&search.searchBy=0&search.sortBy=date&search.clubid=10625158&search.option=0&search.defaultValue=&search.page="
# response = requests.get(url)
# dom = BeautifulSoup(response.content, "html.parser")
# return dom.select_one("#ct > div.search_contents > div.search_sort > div.sort_l > span").text
# def get_list(keyword, page):
# url = "https://m.cafe.naver.com/ArticleSearchList.nhn?search.query=%" + keyword + \
# "&search.menuid=424&search.searchBy=0&search.sortBy=date&search.clubid=10625158&search.option=0&search.defaultValue=&search.page=" + str(page)
# res = requests.get(url)
# dom = BeautifulSoup(res.content, "html.parser")
# result = dom.findAll("a", {"href":re.compile("ArticleRead.nhn?")})
# get_link(result)
# return result
# def get_link(result):
# # ls = []
# for i in range(0, len(result)):
# link = result[i].get('href')
# link = "http://m.cafe.naver.com"+link
# print(link, sep='\n')
# print( sep='\n')
# # ls.append(link)
# # print(ls, sep='\t')
# # return ls
# def get_text(link):
# headers = {
# "Referer" : "https://m.cafe.naver.com/ArticleSearchList.nhn?search.query=%EB%B0%98%EB%8B%A4%EB%82%98&search.menuid=0&search.searchBy=0&search.sortBy=date&search.clubid=10625158&search.option=0&search.defaultValue=1",
# "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36",
# }
# response = requests.get(link, headers = headers)
# dom = BeautifulSoup(response.content, "html.parser")
# # text = dom.findAll("span", {"strong":re.complie("희망가격")})
# return dom
# def get_all_texts(keyword):
# total = get_total(keyword)
# pages = int(total)
# # text_sets = []
# for page in range(1, 2):
# text = get_list(keyword, page)
# link_ls = get_link(text)
# for link in link_ls:
# all_text = get_text(link)
# # text_sets.extend(all_text)
# print(all_text)
# if __name__ == "__main__":
# s = naver_session('743490', 'skWkd7434!@')
# get_list("반다나",1)
# # get_all_texts("반다나")
| {"/korinsam_compare/board/admin.py": ["/korinsam_compare/board/models.py"], "/korinsam_community/korin_user/views.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_community/korin_user/admin.py": ["/korinsam_community/korin_user/models.py"], "/korinsam_compare/board/views.py": ["/korinsam_compare/board/models.py"]} |
72,663 | u1273400/rpynotes | refs/heads/master | /ok-rnn/ok-rnn-step2.py | # display some stats on the data
epoch_size = len(codetext) // (BATCHSIZE * SEQLEN)
txt.print_data_stats(len(codetext), len(valitext), epoch_size) | {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,664 | u1273400/rpynotes | refs/heads/master | /azure_notebooks/add_dur_scatter_c.py |
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib as plt
import numpy as np
import os,sys
#import sox
import sys,time
# In[2]:
dp=[f for f in os.listdir('data/e/') if f.startswith('cv-') and f.endswith('csv')]
fn='data/e/'
data=[pd.read_csv(fn+i) for i in dp]
data[0].head(1)
# In[3]:
df = data[0].iloc[0:0]
for i in data:
df=pd.concat([df,i])
df=df.reset_index(drop=True)
# In[4]:
def printnnl(string):
import sys
sys.stdout.write('\r%s'%(string))
sys.stdout.flush()
# In[ ]:
total=len(df)
start_time = time.time()
t=1
import os,datetime
for k in range(len(df)):
end_time = time.time()
uptime =end_time - start_time
el=(datetime.timedelta(seconds=int(uptime)))
pc=(total/(t+1)*uptime)-uptime
eta=(datetime.timedelta(seconds=int(pc)))
if t%2000==0 or t==10 or t==100 or t==750 or t==250 or t==total:
printnnl("{:%d, %b %Y %H:%M:%S}>> ".format(datetime.datetime.today())+'processing {} of {} files .. {:.2f}% complete. time elapsed {} eta {}'.format(t,total,t/total*100,str(el),str(eta)))
#df.to_csv('data/e/cvd.csv',encoding='latin_1')
t+=1
wpath=df['wav_filename'][k]#.replace("/wave","").replace(".wav",".mp3")
mpath=df['scatterc'][k]#.replace("/wave","").replace(".wav",".mp3")
if df['wav_exists'][k]==1 or df['wav_exists'][k]==0:
continue
if os.path.exists(wpath):
df['wav_exists'][k]=1
else:
print("\n{:%d, %b %Y %H:%M:%S}>> ".format(datetime.datetime.today())+'could not find',wpath)
df['wav_exists'][k]=0
if os.path.exists(mpath):
df['mfc_exists'][k]=1
df['mfc_size'][k]=os.path.getsize(mpath)
else:
df['mfc_exists'][k]=0
print("\n{:%d, %b %Y %H:%M:%S}>> ".format(datetime.datetime.today())+'could not find',mpath)
df.head(1)
df.to_csv('data/e/cvd.csv',encoding='latin_1')
# In[4]:
os.path.exists("P:/Student_Save/Backup/s/cv_corpus_v1/cv-valid-test/wave/sample-000000.wav")
# In[ ]:
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,665 | u1273400/rpynotes | refs/heads/master | /ok-rnn/elog3.py | #!/usr/bin/env python3
import time, datetime
from subprocess import Popen, PIPE
import urllib.request
import json, pickle, os, base64
# from slack_webhook import Slack
'''
pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib
'''
from email.mime.image import MIMEImage
import mimetypes
from email.mime.base import MIMEBase
from email.mime.audio import MIMEAudio
from googleapiclient import errors
from google_auth_oauthlib.flow import Flow, InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload
from google.auth.transport.requests import Request
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def Create_Service(client_secret_file, api_name, api_version, *scopes):
#print(client_secret_file, api_name, api_version, scopes, sep='-')
CLIENT_SECRET_FILE = client_secret_file
API_SERVICE_NAME = api_name
API_VERSION = api_version
SCOPES = [scope for scope in scopes[0]]
#print(SCOPES)
cred = None
pickle_file = f'token_{API_SERVICE_NAME}_{API_VERSION}.pickle'
if os.path.exists(pickle_file):
with open(pickle_file, 'rb') as token:
cred = pickle.load(token)
if not cred or not cred.valid:
if cred and cred.expired and cred.refresh_token:
cred.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRET_FILE, SCOPES)
cred = flow.run_local_server()
with open(pickle_file, 'wb') as token:
pickle.dump(cred, token)
try:
service = build(API_SERVICE_NAME, API_VERSION, credentials=cred)
#print(API_SERVICE_NAME, 'service created successfully')
return service
except Exception as e:
print('Unable to connect.')
print(e)
return None
def convert_to_RFC_datetime(year=1900, month=1, day=1, hour=0, minute=0):
dt = datetime.datetime(year, month, day, hour, minute, 0).isoformat() + 'Z'
return dt
CLIENT_SECRET_FILE = '/mnt/c/Users/User/credentials.json'
API_NAME = 'gmail'
API_VERSION = 'v1'
SCOPES = ['https://mail.google.com/']
service = Create_Service(CLIENT_SECRET_FILE, API_NAME, API_VERSION, SCOPES)
interval_hrs = 1
dint_min = 5
log_lines = 50
display_lines = 15
root = './'
log = f'{root}ok201222.log'
myurl = "https://hooks.slack.com/services/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"
kfiles = ['vloss.json']
sender, to, subject, message_text, file = (
'ESPNet Research',
'john.alamina@hud.ac.uk',
'ESPNet Research: OK-RNN',
'',
[f'{root}{file}' for file in kfiles]
)
def create_message(sender, to, subject, message_text):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
Returns:
An object containing a base64url encoded email object.
"""
message = MIMEText(message_text)
message['to'] = to
message['from'] = sender
message['subject'] = subject
return {'raw': base64.urlsafe_b64encode(message.as_string().encode('utf-8')).decode('utf8')}
def create_message_with_attachment(
sender, to, subject, message_text, file):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
file: The path to the file to be attached.
Returns:
An object containing a base64url encoded email object.
"""
message = MIMEMultipart()
message['to'] = to
message['from'] = sender
message['subject'] = subject
msg = MIMEText(message_text)
message.attach(msg)
if type(file) is not list:
file = [file]
for f in file:
if not os.path.exists(f):
continue
content_type, encoding = mimetypes.guess_type(f)
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
main_type, sub_type = content_type.split('/', 1)
if main_type == 'text':
fp = open(f, 'rb')
msg = MIMEText(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'image':
fp = open(f, 'rb')
msg = MIMEImage(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'audio':
fp = open(f, 'rb')
msg = MIMEAudio(fp.read(), _subtype=sub_type)
fp.close()
else:
fp = open(f, 'rb')
msg = MIMEBase(main_type, sub_type)
msg.set_payload(fp.read())
fp.close()
filename = os.path.basename(f)
msg.add_header('Content-Disposition', 'attachment', filename=filename)
message.attach(msg)
return {'raw': base64.urlsafe_b64encode(message.as_string().encode('utf-8')).decode('utf-8')}
def send_message(service, user_id, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
print(f'sending from {user_id}.')
try:
message = (service.users().messages().send(userId=user_id, body=message)
.execute())
print('Sent. Message Id: %s' % message['id'])
return message
except (errors.HttpError, Exception) as error:
print('An error occurred: %s' % error)
def tail(n):
process = Popen(["tail", f"-n {n}", f"{log}"], stdout=PIPE)
#process = Popen(["type", f"{log}"], stdout=PIPE)
(output, err) = process.communicate()
_ = process.wait()
return err.decode('utf-8') if err is not None else output.decode('utf-8')
def df():
process = Popen(["df", "-h"], stdout=PIPE)
(output, err) = process.communicate()
_ = process.wait()
return err.decode('utf-8') if err is not None else output.decode('utf-8')
#slack = Slack(url=myurl)
#slack.post(text="Hello, world.")
def main():
c = 0
while c > -1:
time.sleep(1)
if c % (60 * dint_min) == 0:
output = tail(display_lines)
print(output)
if c % (60 * 60 * interval_hrs) == 0:
msg = tail(log_lines)
dayx = int(c/(60 * 60 * 24))
msg = create_message_with_attachment(sender, to, f'{subject} (Day {dayx})', msg, file)
send_message(service, 'me', msg)
c += 1
if __name__ == '__main__':
main()
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,666 | u1273400/rpynotes | refs/heads/master | /ok-rnn/preprocess.py | #!/usr/bin/python
import sys
import nltk
for line in sys.stdin:
for sentence in nltk.sent_tokenize(line):
print(' '.join(nltk.word_tokenize(sentence)).lower())
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,667 | u1273400/rpynotes | refs/heads/master | /gallery_1d_jupyter/espmine/feature_scatter.py | from typing import List
from typing import Tuple
from typing import Union
import librosa
import numpy as np
import torch
from torch_complex.tensor import ComplexTensor
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
class ScatterTransform(torch.nn.Module):
def __init__(self,
# filter options,
):
super().__init__()
def forward(self, feat: torch.Tensor, ilens: torch.LongTensor) \
-> Tuple[torch.Tensor, torch.LongTensor]:
# feat: (B, T, D1) x melmat: (D1, D2) -> mel_feat: (B, T, D2)
mel_feat = torch.matmul(feat, self.melmat)
logmel_feat = (mel_feat + 1e-20).log()
# Zero padding
logmel_feat = logmel_feat.masked_fill(
make_pad_mask(ilens, logmel_feat, 1), 0.0)
# We now create the Scattering1D object that will be used to calculate the scattering coefficients.
# scattering = Scattering1D(J, T, Q)
# If we are using CUDA, the scattering transform object must be transferred to the GPU by calling its cuda() method. The data is similarly transferred.
# if use_cuda:
# scattering.cuda()
# x_all = x_all.cuda()
# y_all = y_all.cuda()
# Compute the scattering transform for all signals in the dataset.
# Sx_all = scattering.forward(x_all)
# Since it does not carry useful information, we remove the zeroth-order scattering coefficients, which are always placed in the first channel of the scattering Tensor.
# Sx_all = Sx_all[:,1:,:]
# To increase discriminability, we take the logarithm of the scattering coefficients (after adding a small constant to make sure nothing blows up when scattering coefficients are close to zero).
# Sx_all = torch.log(torch.abs(Sx_all) + log_eps)
# Finally, we average along the last dimension (time) to get a time-shift invariant representation.
# Sx_all = torch.mean(Sx_all, dim=-1)
return logmel_feat, ilens
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,668 | u1273400/rpynotes | refs/heads/master | /ok-rnn/process_results.py | import my_txtutils
import tensorflow.compat.v1 as tf
import numpy as np
import math
import torch.nn as nn
import torch
SEQLEN = 30
BATCHSIZE = 1
ALPHASIZE = my_txtutils.ALPHASIZE
INTERNALSIZE = 512
NLAYERS = 3
learning_rate = 0.001 # fixed learning rate
dropout_pkeep = 0.8 # some dropout
tf.disable_v2_behavior()
import kenlm
with open('txts/gal_eph_new.txt', encoding='utf-16') as f:
s = f.read()
author = 'checkpoints/rnn_train_1608440693-210000000'
def get_scores(corpora, use_log=False):
probs = []
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph(author + '.meta')
new_saver.restore(sess, author)
x = my_txtutils.convert_from_alphabet(ord(corpora[0]))
x = np.array([[x]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
# initial values
y = x
h = np.zeros([1, INTERNALSIZE * NLAYERS], dtype=np.float32) # [ BATCHSIZE, INTERNALSIZE * NLAYERS]
for i in range(1, len(corpora)):
yo, h = sess.run(['Yo:0', 'H:0'], feed_dict={'X:0': y, 'pkeep:0': 1., 'Hin:0': h, 'batchsize:0': 1})
next_ord = my_txtutils.convert_from_alphabet(ord(corpora[i]))
# If sampling is bedone from the topn most likely characters, the generated text
# is more credible and more "english". If topn is not set, it defaults to the full
# distribution (ALPHASIZE)
if use_log:
probs.append(math.log(yo[0][next_ord]))
else:
probs.append(yo[0][next_ord])
# Recommended: topn = 10 for intermediate checkpoints, topn=2 or 3 for fully trained checkpoints
y = np.array([[next_ord]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
return probs
nc = len(s)
nw = len(s.split())
model = kenlm.Model('txteval/text.arpa')
sc = [score for score, _, _ in model.full_scores(s)]
gpufound = torch.cuda.is_available()
device = 'cuda' if gpufound else 'cpu'
print('using gpu' if gpufound else 'using cpu')
class RNN2(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN2, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(BATCHSIZE, self.hidden_size, device=device)
class RNN1(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN1, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.GRUCell(input_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(input, hidden)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(BATCHSIZE, self.hidden_size, device=device)
def mb2t(rows):
rows = rows.transpose()
tensor = torch.zeros(rows.shape[0], rows.shape[1], ALPHASIZE, device=device)
for i, row in enumerate(rows):
for j, letter_code in enumerate(row):
tensor[i][j][letter_code] = 1
return tensor
def tf_play(size):
ncnt = 0
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph(author + '.meta')
new_saver.restore(sess, author)
x = my_txtutils.convert_from_alphabet(ord("L"))
x = np.array([[x]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
# initial values
y = x
h = np.zeros([1, INTERNALSIZE * NLAYERS], dtype=np.float32) # [ BATCHSIZE, INTERNALSIZE * NLAYERS]
for i in range(size):
yo, h = sess.run(['Yo:0', 'H:0'], feed_dict={'X:0': y, 'pkeep:0': 1., 'Hin:0': h, 'batchsize:0': 1})
# If sampling is bedone from the topn most likely characters, the generated text
# is more credible and more "english". If topn is not set, it defaults to the full
# distribution (ALPHASIZE)
# Recommended: topn = 10 for intermediate checkpoints, topn=2 or 3 for fully trained checkpoints
c = my_txtutils.sample_from_probabilities(yo, topn=2)
y = np.array([[c]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
c = chr(my_txtutils.convert_to_alphabet(c))
print(c, end="")
if c == '\n':
ncnt = 0
else:
ncnt += 1
if ncnt == 70:
print("")
ncnt = 0
def play(size, rnn, path):
probs = []
rnn.load_state_dict(torch.load(path, map_location=torch.device('cpu')))
hidden = torch.zeros(1, INTERNALSIZE, device=device) # [ BATCHSIZE, INTERNALSIZE * NLAYERS]
x = my_txtutils.convert_from_alphabet(ord("L"))
x = np.array([[x]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
y = mb2t(x)
ncnt = 0
for i in range(size):
yo, hidden = rnn(y[0], hidden)
c = my_txtutils.sample_from_probabilities(yo.detach().numpy(), topn=2)
y = mb2t(np.array([[c]])) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
c = chr(my_txtutils.convert_to_alphabet(c))
print(c, end="")
if c == '\n':
ncnt = 0
else:
ncnt += 1
if ncnt == 70:
print("")
ncnt = 0
def pt_scores(corpora, rnn, path):
probs = []
rnn.load_state_dict(torch.load(path, map_location=torch.device('cpu')))
hidden = torch.zeros(1, INTERNALSIZE, device=device) # [ BATCHSIZE, INTERNALSIZE * NLAYERS]
x = [my_txtutils.convert_from_alphabet(ord(c)) for c in corpora]
x = np.array([x]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
line_tensor = mb2t(x)
for i in range(1, line_tensor.size()[0]):
yo, hidden = rnn(line_tensor[i-1], hidden)
next_ord = my_txtutils.convert_from_alphabet(ord(corpora[i]))
probs.append(yo[0][next_ord])
return torch.stack(probs).detach().numpy()
def wppx_logprob(scores, corpus):
nw = len(corpus.split()) + 1
ip = -np.sum(scores) / nw
print(ip, 2**ip, math.exp(ip))
return 10**ip
def wppx(sentence):
"""
Compute perplexity of a sentence.
@param sentence One full sentence to score. Do not include <s> or </s>.
"""
words = len(sentence.split()) + 1 # For </s>
return 10.0**(-model.score(sentence) / words)
def ln2lt(lst):
return np.log10(np.exp(lst))
def cppx_logits(scores, corpus):
nw = len(corpus.split()) + 1
nc = len(corpus)
ip = -np.sum(np.log10(scores)) / nc
print(ip, 10 ** ip)
return 10**(ip * nc / nw)
def cppx_logprob(scores, corpus, convert=True):
nw = len(corpus.split()) + 1
nc = len(corpus)
ip = -np.sum((ln2lt(scores) if convert else scores)) / nc
print(ip, 10 **ip)
return 10**(ip * nc / nw)
gPATH = './slgru_epoch120.model'
rPATH = './slrnn_epoch90.model'
gru = RNN1(ALPHASIZE, INTERNALSIZE, ALPHASIZE)
rnn = RNN2(ALPHASIZE, INTERNALSIZE, ALPHASIZE)
# scores = get_scores(s)
# gsc = pt_scores(s, gru, gPATH)
# rsc = pt_scores(s, rnn, rPATH)
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,669 | u1273400/rpynotes | refs/heads/master | /ok-rnn/pytorch_train_LOCAL_14369.py | # encoding: UTF-8
# Copyright 2017 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import tensorflow as tf
# from tensorflow.contrib import layers
# from tensorflow.contrib import rnn # rnn stuff temporarily in contrib, moving back to code in TF 1.1
import os
import time
import math
import numpy as np
import my_txtutils as txt
import json
import torch
import datetime
# tf.set_random_seed(0)
# model parameters
#
# Usage:
# Training only:
# Leave all the parameters as they are
# Disable validation to run a bit faster (set validation=False below)
# You can follow progress in Tensorboard: tensorboard --log-dir=log
# Training and experimentation (default):
# Keep validation enabled
# You can now play with the parameters anf follow the effects in Tensorboard
# A good choice of parameters ensures that the testing and validation curves stay close
# To see the curves drift apart ("overfitting") try to use an insufficient amount of
# training data (shakedir = "shakespeare/t*.txt" for example)
#
SEQLEN = 30
BATCHSIZE = 200
ALPHASIZE = txt.ALPHASIZE
INTERNALSIZE = 512
NLAYERS = 3
learning_rate = 0.001 # fixed learning rate
dropout_pkeep = 0.8 # some dropout
nb_epoch = 75
VALI_SEQLEN = 30
# load data, either shakespeare, or the Python source of Tensorflow itself
shakedir = "txts/*.txt"
#shakedir = "../tensorflow/**/*.py"
codetext, valitext, bookranges = txt.read_data_files(shakedir, validation=True)
# display some stats on the data
epoch_size = len(codetext) // (BATCHSIZE * SEQLEN)
txt.print_data_stats(len(codetext), len(valitext), epoch_size)
if not os.path.exists("checkpoints"):
os.mkdir("checkpoints")
# model
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.GRUCell(input_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(input,hidden)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(BATCHSIZE, self.hidden_size, device=device)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('using gpu..' if torch.cuda.is_available() else 'using cpu..')
rnn = RNN(ALPHASIZE, INTERNALSIZE, ALPHASIZE)
rnn.to(device)
criterion = nn.NLLLoss()
# training fn
learning_rate = 0.005 # If you set this too high, it might explode. If too low, it might not learn
def train(category_tensor, line_tensor):
hidden = rnn.initHidden()
rnn.zero_grad()
lint = []
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
lint.append(output)
input = torch.stack(lint).transpose(0,1).transpose(1,2)
# print(f'is={input.size()}, cs={category_tensor.size()}')
# print(f'is[1:]={input.size()[1:]}, cs[1:]={category_tensor.size()[1:]}')
# print(f'is[2:]={input.size()[2:]}, cs[2:]={category_tensor.size()[2:]}')
loss = criterion(input, category_tensor)
loss.backward()
# Add parameters' gradients to their values, multiplied by learning rate
for p in rnn.parameters():
p.data.add_(p.grad.data, alpha=-learning_rate)
return torch.stack(lint).transpose(0,1), loss.item()
# init train
def lin2txt(lt):
return ''.join([chr(txt.convert_to_alphabet(c)) if c != 0 else '' for c in lt])
def mb2t(rows):
rows=rows.transpose()
tensor = torch.zeros(rows.shape[0], rows.shape[1], ALPHASIZE, device=device)
for i, row in enumerate(rows):
for j, letter_code in enumerate(row):
tensor[i][j][letter_code] = 1
return tensor
print_every = 250
plot_every = 100
# Keep track of losses for plotting
current_loss = 0
all_losses = []
vloss = []
iter=0
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
start = time.time()
for x, y_, epoch in txt.rnn_minibatch_sequencer(codetext, BATCHSIZE, SEQLEN, nb_epochs=nb_epoch):
#category, line, category_tensor, line_tensor = randomTrainingExample()
category = [lin2txt(l) for l in y_]
lines = [lin2txt(l) for l in x]
category_tensor=mb2t(y_)
line_tensor=mb2t(x)
output, loss = train(torch.tensor(y_, device=device, dtype=torch.long), line_tensor)
current_loss += loss
# Print iter number, loss, name and guess
if iter % print_every == 0:
guess = [lin2txt([ch.argmax(dim=0) for ch in line]) for line in output]
for i in range(2):
elapsed_time = time.time() - start
tss = str(datetime.timedelta(seconds=elapsed_time)) # time since start string
if epoch > 0:
speed = epoch/elapsed_time
eta = (nb_epoch-epoch)/speed
sspeed = speed*60*60
seta = str(datetime.timedelta(seconds=int(eta)))
stats = f'average epoch rate per hr = %3.2f, eta = {seta}'%(sspeed)
else:
stats ='initialising stats..'
correct = '✓' if guess[i] == category[i] else '✗ %s' % stats
print('epoch %d of %d (%s) %.4f %s / %s %s' % (epoch+1, nb_epoch, tss, loss, lines[i], guess[0], correct))
PATH = './slgru_epoch120.model'
torch.save(rnn.state_dict(), PATH)
# Add current loss avg to list of losses
if iter % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
vali_x, vali_y, _ = next(txt.rnn_minibatch_sequencer(valitext, BATCHSIZE, VALI_SEQLEN, 1)) # all data in 1 batch
line_tensor = mb2t(vali_x)
output, loss = train(torch.tensor(vali_y, device=device, dtype=torch.long), line_tensor)
vloss.append(loss)
with open('vloss.json', 'w') as f:
json.dump(str({"vloss":vloss,"tloss":all_losses}),f)
iter += 1
with open('pytorch_train.json', 'w') as f:
json.dump(vloss, f)
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,670 | u1273400/rpynotes | refs/heads/master | /pytorch/trl/tictactoeGame.py | import random
class TicTacGame():
def __init__(self):
self.winstates = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[0, 3, 6],
[1, 4, 7],
[2, 5, 8],
[0, 4, 8],
[2, 4, 6],
]
self.gameState = ["-", "-", "-", "-", "-", "-", "-", "-", "-"]
self.XTurnToPlay = True
self.winner = "TicTacToe Demo"
self.windex = -1
def gameLoop(self):
tictactoe = TicTacGame()
tictactoe.getNextState()
while not tictactoe.isBoardFilled():
tictactoe.getNextState()
if tictactoe.isWinState():
break
if tictactoe.isWinState():
print(("O" if tictactoe.XTurnToPlay else "X") , "wins");
else:
print("game was a draw")
tictactoe.printState()
def reset(self):
self.gameState = ["-", "-", "-", "-", "-", "-", "-", "-", "-"]
self.XTurnToPlay = True
self.winner = "TicTacToe Demo"
self.windex = -1
def gamePlay(self):
if self.isWinState() or self.isBoardFilled():
self.reset()
self.getNextState()
def getNextState(self):
v = random.randint(0, 8)
while self.gameState[v] != "-":
v = random.randint(0, 8)
self.gameState[v] = "X" if self.XTurnToPlay else "O"
self.XTurnToPlay = not self.XTurnToPlay
self.winner = (("O" if self.XTurnToPlay else "X") + " wins") if self.isWinState() else ("game was a draw" if self.isBoardFilled() else self.winner)
# print('this.windex=${this.windex}');
# this.testWinState();
def isWinState(self):
winstate = False
for i in range(len(self.winstates)):
if (self.gameState[self.winstates[i][0]] != "-" and self.gameState[self.winstates[i][0]]
== self.gameState[self.winstates[i][1]] and
self.gameState[self.winstates[i][1]] == self.gameState[self.winstates[i][2]]):
self.windex=i
winstate = True
break
return winstate
def isBoardFilled(self):
return "-" not in self.gameState
def printState(self):
sb = ""
for i in range(3):
for j in range(3):
sb += self.gameState[i * 3+j]
print(sb)
sb=""
def main():
ttt=TicTacGame()
ttt.gameLoop()
if __name__ == '__main__':
main()
'''
def testWinState(self)
{
for (var i=0;i < this.winstates.Length;i++){
// Console.WriteLine( @ "${this.winstates[i][0]}=${this.gameState[this.winstates[i][0]]}:${this.gameState[this.winstates[i][0]]!=" - "}");
// Console.WriteLine( @ "${this.winstates[i][1]}=${this.gameState[this.winstates[i][1]]}:${this.gameState[this.winstates[i][1]]!="-"}");
// Console.WriteLine( @ "${this.winstates[i][2]}=${this.gameState[this.winstates[i][2]]}:${this.gameState[this.winstates[i][2]]!="-"}");
}
}
''' | {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,671 | u1273400/rpynotes | refs/heads/master | /ok-rnn/ok-rnn-step7.py | # training loop
for x, y_, epoch in txt.rnn_minibatch_sequencer(codetext, BATCHSIZE, SEQLEN, nb_epochs=10):
# train on one minibatch
feed_dict = {X: x, Y_: y_, Hin: istate, lr: learning_rate, pkeep: dropout_pkeep, batchsize: BATCHSIZE}
_, y, ostate = sess.run([train_step, Y, H], feed_dict=feed_dict)
# log training data for Tensorboard display a mini-batch of sequences (every 50 batches)
if step % _50_BATCHES == 0:
feed_dict = {X: x, Y_: y_, Hin: istate, pkeep: 1.0, batchsize: BATCHSIZE} # no dropout for validation
y, l, bl, acc, smm = sess.run([Y, seqloss, batchloss, accuracy, summaries], feed_dict=feed_dict)
txt.print_learning_learned_comparison(x, y, l, bookranges, bl, acc, epoch_size, step, epoch)
summary_writer.add_summary(smm, step)
# run a validation step every 50 batches
# The validation text should be a single sequence but that's too slow (1s per 1024 chars!),
# so we cut it up and batch the pieces (slightly inaccurate)
# tested: validating with 5K sequences instead of 1K is only slightly more accurate, but a lot slower.
if step % _50_BATCHES == 0 and len(valitext) > 0:
VALI_SEQLEN = 1*1024 # Sequence length for validation. State will be wrong at the start of each sequence.
bsize = len(valitext) // VALI_SEQLEN
txt.print_validation_header(len(codetext), bookranges)
vali_x, vali_y, _ = next(txt.rnn_minibatch_sequencer(valitext, bsize, VALI_SEQLEN, 1)) # all data in 1 batch
vali_nullstate = np.zeros([bsize, INTERNALSIZE*NLAYERS])
feed_dict = {X: vali_x, Y_: vali_y, Hin: vali_nullstate, pkeep: 1.0, # no dropout for validation
batchsize: bsize}
ls, acc, smm = sess.run([batchloss, accuracy, summaries], feed_dict=feed_dict)
txt.print_validation_stats(ls, acc)
# save validation data for Tensorboard
validation_writer.add_summary(smm, step)
# display a short text generated with the current weights and biases (every 150 batches)
if step // 3 % _50_BATCHES == 0:
txt.print_text_generation_header()
ry = np.array([[txt.convert_from_alphabet(ord("K"))]])
rh = np.zeros([1, INTERNALSIZE * NLAYERS])
for k in range(1000):
ryo, rh = sess.run([Yo, H], feed_dict={X: ry, pkeep: 1.0, Hin: rh, batchsize: 1})
rc = txt.sample_from_probabilities(ryo, topn=10 if epoch <= 1 else 2)
print(chr(txt.convert_to_alphabet(rc)), end="")
ry = np.array([[rc]])
txt.print_text_generation_footer()
# save a checkpoint (every 500 batches)
if step // 10 % _50_BATCHES == 0:
saved_file = saver.save(sess, 'checkpoints/rnn_train_' + timestamp, global_step=step)
print("Saved file: " + saved_file)
# display progress bar
progress.step(reset=step % _50_BATCHES == 0)
# loop state around
istate = ostate
step += BATCHSIZE * SEQLEN
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,672 | u1273400/rpynotes | refs/heads/master | /interspeech2019/tools/config_updater.py | #!/usr/bin/env python3
from pathlib import Path
from traitlets.config.manager import BaseJSONConfigManager
path = Path.home() / ".jupyter" / "nbconfig"
cm = BaseJSONConfigManager(config_dir=str(path))
cm.update(
"rise",
{
"scroll": True,
"transition": "none",
"enable_chalkboard": True,
}
)
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,673 | u1273400/rpynotes | refs/heads/master | /ok-rnn/ok-rnn-step4.py | # using a NLAYERS=3 layers of GRU cells, unrolled SEQLEN=30 times
# dynamic_rnn infers SEQLEN from the size of the inputs Xo
# How to properly apply dropout in RNNs: see README.md
cells = [rnn.GRUCell(INTERNALSIZE) for _ in range(NLAYERS)]
# "naive dropout" implementation
dropcells = [rnn.DropoutWrapper(cell,input_keep_prob=pkeep) for cell in cells]
multicell = rnn.MultiRNNCell(dropcells, state_is_tuple=False)
multicell = rnn.DropoutWrapper(multicell, output_keep_prob=pkeep) # dropout for the softmax layer
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,674 | u1273400/rpynotes | refs/heads/master | /ok-rnn/ok-rnn-step5.py | Yr, H = tf.nn.dynamic_rnn(multicell, Xo, dtype=tf.float32, initial_state=Hin)
# Yr: [ BATCHSIZE, SEQLEN, INTERNALSIZE ]
# H: [ BATCHSIZE, INTERNALSIZE*NLAYERS ] # this is the last state in the sequence
H = tf.identity(H, name='H') # just to give it a name
# Softmax layer implementation:
# Flatten the first two dimension of the output [ BATCHSIZE, SEQLEN, ALPHASIZE ] => [ BATCHSIZE x SEQLEN, ALPHASIZE ]
# then apply softmax readout layer. This way, the weights and biases are shared across unrolled time steps.
# From the readout point of view, a value coming from a sequence time step or a minibatch item is the same thing.
Yflat = tf.reshape(Yr, [-1, INTERNALSIZE]) # [ BATCHSIZE x SEQLEN, INTERNALSIZE ]
Ylogits = layers.linear(Yflat, ALPHASIZE) # [ BATCHSIZE x SEQLEN, ALPHASIZE ]
Yflat_ = tf.reshape(Yo_, [-1, ALPHASIZE]) # [ BATCHSIZE x SEQLEN, ALPHASIZE ]
loss = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Yflat_) # [ BATCHSIZE x SEQLEN ]
loss = tf.reshape(loss, [batchsize, -1]) # [ BATCHSIZE, SEQLEN ]
Yo = tf.nn.softmax(Ylogits, name='Yo') # [ BATCHSIZE x SEQLEN, ALPHASIZE ]
Y = tf.argmax(Yo, 1) # [ BATCHSIZE x SEQLEN ]
Y = tf.reshape(Y, [batchsize, -1], name="Y") # [ BATCHSIZE, SEQLEN ]
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,675 | u1273400/rpynotes | refs/heads/master | /ok-rnn/process_results2.py | import my_txtutils
import numpy as np
import math
import torch
import json
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# product = reduce((lambda x, y: x * y), [1, 2, 3, 4])
with open('vloss.json') as f:
vloss = json.load(f)
vloss = json.loads(vloss.replace("'",'"'))
plt.figure()
plt.plot(vloss['vloss'])
plt.plot(vloss['tloss'])
with open('vloss_main.json') as f:
main = json.load(f)
main = json.loads(main.replace("'",'"'))
plt.plot(main['acc'])
plt.figure()
plt.plot(main['loss'])
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,676 | u1273400/rpynotes | refs/heads/master | /pytorch/trl/qNet.py | import torch
from torch import nn
import torch.optim as optim
from pytorch.trl.qGame import TicTacGame
INPUT_SIZE = 9
OUTPUT_SIZE = 9
class TicTacNet(nn.Module):
def __init__(self):
super().__init__()
self.dl1 = nn.Linear(INPUT_SIZE, 36)
self.dl2 = nn.Linear(36, 36)
self.output_layer = nn.Linear(36, OUTPUT_SIZE)
def forward(self, x):
x = self.dl1(x)
x = torch.relu(x)
x = self.dl2(x)
x = torch.relu(x)
x = self.output_layer(x)
x = torch.sigmoid(x)
return x
@staticmethod
def train():
net=TicTacNet()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# testdata = []
# testlabels = []
running_loss = 0.0
for i in range(20000): # loop over the dataset multiple times
# get the inputs; data is a list of [inputs, labels]
inputs, labels, qVal, lVal, qTest, lTest = TicTacGame.minibatch()
# testdata += qTest
# testlabels += lTest
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 1000 == 999: # print every 1000 mini-batches
print('[mini-batch %5d] loss: %.3f' %
(i + 1, running_loss / (i+1)))
print('Finished Training')
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,677 | u1273400/rpynotes | refs/heads/master | /pytorch/trl/qGame.py | import random
import torch
class TicTacGame():
def __init__(self):
self.winstates = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[0, 3, 6],
[1, 4, 7],
[2, 5, 8],
[0, 4, 8],
[2, 4, 6],
]
self.gameState = ["-", "-", "-", "-", "-", "-", "-", "-", "-"]
self.XTurnToPlay = True
self.winner = "TicTacToe Demo"
self.windex = -1
self.gameHistory = []
self.qTrain = []
self.qVal = []
self.qTest = []
self.lTrain = []
self.lVal = []
self.lTest = []
self.gameCount = 0
self.pindex = -1
def gameLoop(self):
self.reset()
self.getNextState()
while not self.isBoardFilled():
self.getNextState()
if self.isWinState():
break
# if self.isWinState():
# print(("O" if self.XTurnToPlay else "X"), "wins")
# else:
# print("game was a draw")
#self.printState()
self.qUpdate()
#print("game count =", self.gameCount)
def qUpdate(self):
qHist = []
if not self.isBoardFilled() and self.XTurnToPlay:
return;
for i in range(len(self.gameHistory)):
qState = []
gState = self.gameHistory.pop()
for gp in gState:
if gp == 'X':
qState.append((1, 0))
elif gp == 'O':
qState.append((-1, 0))
elif gp == '-':
qState.append((0, 0))
else:
qState.append((1, 1))
qHist = [[f for f, c in qState]
, [c for f, c in qState].index(1)]
# print(qHist[0])
if self.gameCount % 5 == 0 and self.gameCount % 45 != 0:
self.qTest.append(qHist[0])
self.lTest.append(qHist[1])
elif self.gameCount % 9 == 0:
self.qVal.append(qHist[0])
self.lVal.append(qHist[1])
elif self.gameCount % 5 != 0 or self.gameCount % 9 != 0:
self.qTrain.append(qHist[0])
self.lTrain.append(qHist[1])
self.gameCount += 1
def reset(self):
self.gameState = ["-", "-", "-", "-", "-", "-", "-", "-", "-"]
self.XTurnToPlay = True
self.winner = "TicTacToe Demo"
self.windex = -1
self.gameHistory = []
self.pindex = -1
def gamePlay(self):
if self.isWinState() or self.isBoardFilled():
self.reset()
self.getNextState()
def getNextState(self):
v = random.randint(0, 8)
while self.gameState[v] != "-":
v = random.randint(0, 8)
if self.XTurnToPlay:
self.gameState[v] = '*'
# print(self.gameState)
self.gameHistory.append([x for x in self.gameState])
self.gameState[v] = 'X'
else:
self.gameState[v] = "O"
self.XTurnToPlay = not self.XTurnToPlay
self.winner = (("O" if self.XTurnToPlay else "X") + " wins") if self.isWinState() else (
"game was a draw" if self.isBoardFilled() else self.winner)
# this.testWinState();
def isWinState(self):
winstate = False
for i in range(len(self.winstates)):
if (self.gameState[self.winstates[i][0]] != "-" and self.gameState[self.winstates[i][0]]
== self.gameState[self.winstates[i][1]] and
self.gameState[self.winstates[i][1]] == self.gameState[self.winstates[i][2]]):
self.windex = i
winstate = True
break
return winstate
def isBoardFilled(self):
return "-" not in self.gameState
def printState(self):
sb = ""
for i in range(3):
for j in range(3):
sb += self.gameState[i * 3 + j]
print(sb)
sb = ""
@staticmethod
def minibatch():
ttt=TicTacGame()
for i in range(50):
ttt.gameLoop()
return torch.tensor(ttt.qTrain, dtype=torch.float), torch.tensor(ttt.lTrain, dtype=torch.long), \
torch.tensor(ttt.qVal, dtype=torch.float), torch.tensor(ttt.lVal, dtype=torch.long), \
torch.tensor(ttt.qTest, dtype=torch.float), torch.tensor(ttt.lTest, dtype=torch.long)
def main():
ttt = TicTacGame()
ttt.gameLoop()
if __name__ == '__main__':
main()
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,678 | u1273400/rpynotes | refs/heads/master | /ok-rnn/ok-rnn-step6.py | # stats for display
seqloss = tf.reduce_mean(loss, 1)
batchloss = tf.reduce_mean(seqloss)
accuracy = tf.reduce_mean(tf.cast(tf.equal(Y_, tf.cast(Y, tf.uint8)), tf.float32))
loss_summary = tf.summary.scalar("batch_loss", batchloss)
acc_summary = tf.summary.scalar("batch_accuracy", accuracy)
summaries = tf.summary.merge([loss_summary, acc_summary])
# Init Tensorboard stuff. This will save Tensorboard information into a different
# folder at each run named 'log/<timestamp>/'. Two sets of data are saved so that
# you can compare training and validation curves visually in Tensorboard.
timestamp = str(math.trunc(time.time()))
summary_writer = tf.summary.FileWriter("log/" + timestamp + "-training")
validation_writer = tf.summary.FileWriter("log/" + timestamp + "-validation")
# Init for saving models. They will be saved into a directory named 'checkpoints'.
# Only the last checkpoint is kept.
if not os.path.exists("checkpoints"):
os.mkdir("checkpoints")
saver = tf.train.Saver(max_to_keep=1000)
# for display: init the progress bar
DISPLAY_FREQ = 50
_50_BATCHES = DISPLAY_FREQ * BATCHSIZE * SEQLEN
progress = txt.Progress(DISPLAY_FREQ, size=111+2, msg="Training on next "+str(DISPLAY_FREQ)+" batches")
# init
istate = np.zeros([BATCHSIZE, INTERNALSIZE*NLAYERS]) # initial zero input state
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
step = 0
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,679 | u1273400/rpynotes | refs/heads/master | /ok-rnn/ok-rnn-step3.py | #
# the model (see FAQ in README.md)
#
lr = tf.placeholder(tf.float32, name='lr') # learning rate
pkeep = tf.placeholder(tf.float32, name='pkeep') # dropout parameter
batchsize = tf.placeholder(tf.int32, name='batchsize')
# inputs
X = tf.placeholder(tf.uint8, [None, None], name='X') # [ BATCHSIZE, SEQLEN ]
Xo = tf.one_hot(X, ALPHASIZE, 1.0, 0.0) # [ BATCHSIZE, SEQLEN, ALPHASIZE ]
# expected outputs = same sequence shifted by 1 since we are trying to predict the next character
Y_ = tf.placeholder(tf.uint8, [None, None], name='Y_') # [ BATCHSIZE, SEQLEN ]
Yo_ = tf.one_hot(Y_, ALPHASIZE, 1.0, 0.0) # [ BATCHSIZE, SEQLEN, ALPHASIZE ]
# input state
Hin = tf.placeholder(tf.float32, [None, INTERNALSIZE*NLAYERS], name='Hin') # [ BATCHSIZE, INTERNALSIZE * NLAYERS]
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,680 | u1273400/rpynotes | refs/heads/master | /boto.py | import boto
import os
AWS_ACCESS_KEY_ID=os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY=os.getenv('AWS_SECRET_ACCESS_KEY')
s3=boto.s3.connect_to_region(
location,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY
)
apps=s3.get_all_buckets()[0].name
b=s3.get_bucket(apps) | {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,681 | u1273400/rpynotes | refs/heads/master | /interspeech2019/notebooks/interspeech2019_asr/custom.py | import chainer
import torch
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import LabelSmoothingLoss
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
class Reporter(chainer.Chain):
def report(self, **kwargs):
chainer.reporter.report(kwargs, self)
class ASRTransformer(ASRInterface, torch.nn.Module):
@staticmethod
def add_arguments(parser):
parser.add_argument("--label-smoothing", default=0.0, type=float)
return parser
def __init__(self, idim, odim, args=None):
torch.nn.Module.__init__(self)
self.encoder = Encoder(idim, input_layer="linear")
self.decoder = Decoder(odim)
self.criterion = LabelSmoothingLoss(odim, -1, args.label_smoothing, True)
self.sos = odim - 1
self.eos = odim - 1
self.ignore_id=-1
self.subsample = [0]
self.reporter = Reporter()
# for training
def forward(self, xs_pad, ilens, ys_pad):
"""Compute scalar loss for backprop"""
src_mask = (~make_pad_mask(ilens.tolist())).to(xs_pad.device).unsqueeze(-2)
hs_pad, hs_mask = self.encoder(xs_pad, src_mask)
ys_in_pad, ys_out_pad = self.add_sos_eos(ys_pad)
ys_mask = self.target_mask(ys_in_pad)
pred_pad, pred_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask)
loss = self.criterion(pred_pad, ys_out_pad)
self.acc = th_accuracy(pred_pad.view(-1, pred_pad.size(-1)), ys_out_pad, ignore_label=self.ignore_id)
self.reporter.report(loss=loss, acc=self.acc)
return loss
def add_sos_eos(self, ys_pad):
from espnet.nets.pytorch_backend.nets_utils import pad_list
eos = ys_pad.new([self.eos])
sos = ys_pad.new([self.sos])
ys = [y[y != self.ignore_id] for y in ys_pad] # parse padded ys
ys_in = [torch.cat([sos, y], dim=0) for y in ys]
ys_out = [torch.cat([y, eos], dim=0) for y in ys]
return pad_list(ys_in, self.eos), pad_list(ys_out, self.ignore_id)
def target_mask(self, ys_in_pad):
ys_mask = ys_in_pad != self.ignore_id
m = subsequent_mask(ys_mask.size(-1), device=ys_mask.device).unsqueeze(0)
return ys_mask.unsqueeze(-2) & m
# for decoding
def encode(self, feat):
"""Encode speech feature."""
return self.encoder(feat.unsqueeze(0), None)[0][0]
def scorers(self):
"""Scorer used in beam search"""
return {"decoder": self.decoder}
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,682 | u1273400/rpynotes | refs/heads/master | /ok-rnn/ok-rnn-step1.py | # encoding: UTF-8
# Copyright 2017 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import rnn # rnn stuff temporarily in contrib, moving back to code in TF 1.1
import os
import time
import math
import numpy as np
import my_txtutils as txt
tf.set_random_seed(0)
# model parameters
#
# Usage:
# Training only:
# Leave all the parameters as they are
# Disable validation to run a bit faster (set validation=False below)
# You can follow progress in Tensorboard: tensorboard --log-dir=log
# Training and experimentation (default):
# Keep validation enabled
# You can now play with the parameters anf follow the effects in Tensorboard
# A good choice of parameters ensures that the testing and validation curves stay close
# To see the curves drift apart ("overfitting") try to use an insufficient amount of
# training data (shakedir = "shakespeare/t*.txt" for example)
#
SEQLEN = 30
BATCHSIZE = 200
ALPHASIZE = txt.ALPHASIZE
INTERNALSIZE = 512
NLAYERS = 3
learning_rate = 0.001 # fixed learning rate
dropout_pkeep = 0.8 # some dropout
# load data, either shakespeare, or the Python source of Tensorflow itself
shakedir = "txts/*.txt"
#shakedir = "../tensorflow/**/*.py"
codetext, valitext, bookranges = txt.read_data_files(shakedir, validation=True)
| {"/pytorch/trl/qNet.py": ["/pytorch/trl/qGame.py"]} |
72,705 | rballester/tthistograms | refs/heads/master | /ih_vs_noncumulative.py | """
Compression quality for histogram reconstruction: a) use a integral histogram vs b) use its derivative, i.e. the raw per-bin slices. a) is much better
"""
import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
import pickle
import tt
import config
import time
import bruteforce
import ttrecipes as tr
np.random.seed(1)
def histogram_tt1(L_t, corners):
start = time.time()
prod = np.array([[1]])
cores = tt.vector.to_list(L_t)
for corner, core in zip(corners, cores[:-1]):
prod = prod.dot(np.sum(core[:, corner[0]:corner[1], :], axis=1))
reco = np.squeeze(prod.dot(cores[-1][:, :, 0]))
time_tt1 = time.time() - start
return reco, time_tt1
def histogram_tt2(L_C_t, corners):
cores2 = tt.vector.to_list(L_C_t)
start = time.time()
prod = np.array([[1]])
for corner, core in zip(corners, cores2[:-1]):
prod = prod.dot(core[:, corner[1], :] - core[:, corner[0], :])
reco = np.squeeze(prod.dot(cores2[-1][:, :, 0]))
time_tt2 = time.time() - start
return reco, time_tt2
### Parameters
input_dataset = os.path.join(config.data_folder, 'channel_512_512_512_float32.raw')
B = 32
P = 100
I = 64
S = 32
# targets1 = 10**np.linspace(-2, -4, 15)
# targets2 = 10**np.linspace(-7, -9, 15)
targets1 = 10**np.linspace(-0.75, -4, 25)
targets2 = 10**np.linspace(-3, -7, 25)
###
X, basename = bruteforce.read_tensor(input_dataset)
X = X.astype(float)
shape = X.shape
N = X.ndim
X = X[:I, :I, :I]
X = ((X - X.min()) / (X.max() - X.min()) * (B - 1)).astype(int)
# Prepare corners
clist = []
for p in range(P):
corners = []
for i in range(N):
left = np.random.randint(0, I-S)
right = left + S
corners.append([left, right])
clist.append(corners)
for target1 in targets1:
print('TT1 - {}'.format(target1))
name = basename + '_tt1_{}_{}_{:.8f}.pickle'.format(I, B, target1)
name = os.path.join(config.data_folder, name)
if not os.path.exists(name):
L = bruteforce.create_levelset(X, B)
L_t = tr.core.tt_svd(L, eps=target1)
pickle.dump(L_t, open(name, 'wb'))
for target2 in targets2:
print('TT2 - {}'.format(target2))
name = basename + '_tt2_{}_{}_{:.8f}.pickle'.format(I, B, target2)
name = os.path.join(config.data_folder, name)
if not os.path.exists(name):
L_C = bruteforce.create_ih(X, B)
L_C_t = tr.core.tt_svd(L_C, eps=target2)
cores = tt.vector.to_list(L_C_t)
cores[:-1] = [np.concatenate([np.zeros([core.shape[0], 1, core.shape[2]]), core], axis=1) for core in cores[:-1]]
L_C_t = tt.vector.from_list(cores)
pickle.dump(L_C_t, open(name, 'wb'))
means_times_gt = []
nnz_tt1 = []
means_errors_tt1 = []
means_times_tt1 = []
for target1 in targets1:
name = basename + '_tt1_{}_{}_{:.8f}.pickle'.format(I, B, target1)
name = os.path.join(config.data_folder, name)
L_t = pickle.load(open(name, 'rb'))
nnz_tt1.append(len(L_t.core))
times_gt = []
errors_tt1 = []
times_tt1 = []
for corners in clist:
gt, time_gt = bruteforce.box(X, B, corners)
times_gt.append(time_gt)
reco, time_tt1 = bruteforce.histogram_tt1(L_t, corners)
errors_tt1.append(np.linalg.norm(reco - gt) / np.linalg.norm(gt))
times_tt1.append(time_tt1)
means_times_gt.append(np.mean(times_gt))
means_errors_tt1.append(np.mean(errors_tt1))
means_times_tt1.append(np.mean(times_tt1))
print(means_times_gt)
print(nnz_tt1)
print(means_errors_tt1)
print(means_times_tt1)
print()
means_times_gt = []
nnz_tt2 = []
means_errors_tt2 = []
means_times_tt2 = []
for target2 in targets2:
name = basename + '_tt2_{}_{}_{:.8f}.pickle'.format(I, B, target2)
name = os.path.join(config.data_folder, name)
L_C_t = pickle.load(open(name, 'rb'))
nnz_tt2.append(len(L_C_t.core))
times_gt = []
errors_tt2 = []
times_tt2 = []
for corners in clist:
gt, time_gt = bruteforce.box(X, B, corners)
times_gt.append(time_gt)
reco, time_tt2 = bruteforce.histogram_tt2(L_C_t, corners)
errors_tt2.append(np.linalg.norm(reco - gt) / np.linalg.norm(gt))
times_tt2.append(time_tt2)
means_times_gt.append(np.mean(times_gt))
means_errors_tt2.append(np.mean(errors_tt2))
means_times_tt2.append(np.mean(times_tt2))
print(means_times_gt)
print(nnz_tt2)
print(means_errors_tt2)
print(means_times_tt2)
print()
fig = plt.figure()
plt.plot(nnz_tt1, np.array(means_errors_tt1)*100, marker='o', label='Non-cumulative')
plt.plot(nnz_tt2, np.array(means_errors_tt2)*100, marker='o', label='Integral histogram')
plt.legend()
plt.xlabel('NNZ')
plt.ylabel(r'Relative error (\%)')
plt.show()
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,706 | rballester/tthistograms | refs/heads/master | /overlay_examples.py | """
Example box- and Gaussian-shaped histograms over a waterfall image
"""
import numpy as np
import config
import bruteforce
import util
import os
import ttrecipes as tr
import matplotlib.pyplot as plt
###
input_dataset = os.path.join(config.data_folder, 'waterfall_4096_4096_uint8.raw')
B = 64
eps = 1e-3
basename, X, tth = util.prepare_dataset(config.data_folder, input_dataset, B, eps)
###
def plot_overlay(X, corners_list, pattern_list, color=(0.25, 0.25, 1), alpha=0.5):
fig = plt.figure(figsize=(8, 8), frameon=False)
mask = np.zeros(list(X.shape)+[4])
for corners, pattern in zip(corners_list, pattern_list):
# pattern = pattern[:, :, np.newaxis]*np.array(color)[np.newaxis, np.newaxis, :]
Xrgb = np.repeat((X/B*255)[:, :, np.newaxis].astype(np.uint8), 3, axis=2)
plt.imshow(Xrgb, vmin=0, vmax=255)
plt.axis('off')
mask[[slice(c[0], c[1]) for c in corners]+[slice(0, 3)]] = color
mask[[slice(c[0], c[1]) for c in corners]+[3]] = pattern*alpha
plt.imshow(mask, vmin=0, vmax=255)
# plt.savefig(os.path.join(config.data_folder, 'overlaid.png'))
print('IH compression ratio:', np.prod(X.shape)*B / len(tth.tensor.core))
print(X.shape)
N = X.ndim
corners_list = []
pattern_list = []
### Box
print('*** Box ***')
offset = [1024*3, int(1024*0.7)]
S = 512
shape = [S]*N
corners = np.array([[offset[0], offset[0]+S], [offset[1], offset[1]+S]])
gt, elapsed = bruteforce.box(X, B, corners)
print('Elapsed GT:', elapsed)
reco, elapsed = tth.box(corners)
print('Elapsed TT:', elapsed)
corners_list.append(corners)
pattern_list.append(np.ones(corners[:, 1] - corners[:, 0]))
fig = plt.figure(figsize=(4, 3))
plt.plot(gt, label='Groundtruth')
plt.plot(reco, label='TT')
plt.legend()
plt.xlabel('Bin')
plt.ylabel('Count')
plt.title('Box')
plt.tight_layout()
plt.savefig(os.path.join(config.data_folder, 'overlaid_plot_box.pdf'))
###
print()
### Gaussian
print('*** Gaussian ***')
offset = [1024*1, 1024*2]
S = 768
shape = [S]*N
corners = np.array([[offset[0], offset[0]+S], [offset[1], offset[1]+S]])
print(corners)
shape = corners[:, 1] - corners[:, 0]
pattern = tr.core.gaussian_tt(shape, shape/4)
pattern *= (1./np.max(pattern.full()))
gt, elapsed = bruteforce.pattern(X, B, corners, pat=pattern.full())
print('Elapsed GT:', elapsed)
reco, elapsed = tth.separable(corners, pat=pattern)
print('Elapsed TT:', elapsed)
corners_list.append(corners)
pattern_list.append(pattern.full())
fig = plt.figure(figsize=(4, 3))
plt.plot(gt, label='Groundtruth')
plt.plot(reco, label='TT')
plt.legend()
plt.xlabel('Bin')
plt.ylabel('Count')
plt.title('Gaussian')
plt.tight_layout()
plt.savefig(os.path.join(config.data_folder, 'overlaid_plot_gaussian.pdf'))
###
plot_overlay(X, corners_list, pattern_list)
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,707 | rballester/tthistograms | refs/heads/master | /tthistogram.py | """
The most important class: the TTHistogram. Includes code compression and look-up over a variety of ROIs
"""
import numpy as np
import tt
import scipy.signal
import time
import sys
import ttrecipes as tr
class TTHistogram(object):
def __init__(self, X, B, eps, verbose=False):
"""
Build (incrementally) a compressed integral histogram that can be later queried
:param X: an ndarray, *already quantized* with values in 0, ..., B-1
:param B: number of bins
:param eps: relative error for the hierarchical sum-and-compress
"""
N = X.ndim
if not (X.min() >= 0 and X.max() < B):
raise ValueError('Please provide an array that is already quantized')
start = time.time()
def create_generator():
for b in range(B):
if verbose:
print(b)
sys.stdout.flush()
sl = (X == b)
for n in range(N):
sl = np.cumsum(sl, axis=n)
sys.stdout.flush()
sl_t = tr.core.tt_svd(sl, eps, verbose=False)
sys.stdout.flush()
cores = tt.vector.to_list(sl_t)
onehot = np.zeros([1, B, 1])
onehot[0, b, 0] = 1
cores.append(onehot)
sl_t = tt.vector.from_list(cores)
if verbose:
print('Done')
yield sl_t
generator = create_generator()
self.tensor = tr.core.sum_and_compress(generator, rounding=eps/np.log2(B), verbose=verbose)
cores = tt.vector.to_list(self.tensor)
cores[:-1] = [np.concatenate([np.zeros([c.shape[0], 1, c.shape[2]]), c], axis=1) for c in cores[:-1]]
self.tensor = tt.vector.from_list(cores)
self.total_time = time.time() - start
def box(self, corners):
"""
:param corners: a list of pairs [[i0, i1], [j0, j1], ...] encoding the query box
:return: a vector with B elements
"""
cores = tt.vector.to_list(self.tensor)
start = time.time()
reco = np.array([[1]])
for corner, core in zip(corners, cores[:-1]):
reco = reco.dot(core[:, corner[1], :] - core[:, corner[0], :])
reco = np.squeeze(reco.dot(cores[-1][:, :, 0]))
elapsed = time.time() - start
return reco, elapsed
def separable(self, corners, pat):
"""
:param corners: a list of pairs [[i0, i1], [j0, j1], ...] containing all window positions to compute
:param pat: a rank-1 TT encoding the separable region (must fit in `corners`)
:return: an array of dimension N+1 and size (i1-i0+1) x (j1-j0+1) x ... x B
"""
assert np.all(corners[:, 1] - corners[:, 0] == pat.n)
cores = tt.vector.to_list(self.tensor)
coresp = [-np.diff(np.concatenate([np.zeros([c.shape[0], 1, c.shape[2]]), c, np.zeros([c.shape[0], 1, c.shape[2]])], axis=1), axis=1) for c in tt.vector.to_list(pat)]
start = time.time()
reco = np.array([[1]])
for corner, core, corep, in zip(corners, cores[:-1], coresp):
comb = np.einsum('ijk,ljm->ilkm', core[:, corner[0]:corner[1]+1, :], corep)
comb = np.reshape(comb, [comb.shape[0]*comb.shape[1], comb.shape[-2]*comb.shape[-1]])
reco = reco.dot(comb)
reco = np.squeeze(reco.dot(cores[-1][:, :, 0]))
elapsed = time.time() - start
return reco, elapsed
def nonseparable(self, corners, pat):
"""
As `separable`, but `pat` does not need have rank 1
"""
assert np.all(corners[:, 1] - corners[:, 0] == pat.n)
cores = tt.vector.to_list(self.tensor)
coresp = [-np.diff(np.concatenate([np.zeros([c.shape[0], 1, c.shape[2]]), c, np.zeros([c.shape[0], 1, c.shape[2]])], axis=1), axis=1) for c in tt.vector.to_list(pat)]
start = time.time()
cores[:-1] = [core[:, corner[0]:corner[1] + 1, :] for corner, core in zip(corners, cores[:-1])]
Rprod = np.array([[1]])
def partial_dot_right(cores1, cores2, mu, Rprod):
Ucore = np.einsum('ij,ikl->jkl', Rprod, cores1[mu])
Vcore = cores2[mu]
return np.dot(tr.core.left_unfolding(Ucore).T, tr.core.left_unfolding(Vcore))
d = len(coresp)
for mu in range(d):
Rprod = partial_dot_right(coresp, cores, mu, Rprod)
Rprod = Rprod.dot(cores[-1][:, :, 0])
elapsed = time.time() - start
return np.squeeze(Rprod), elapsed
def box_field(self, corners, shape):
"""
:param corners: a list of pairs [[i0, i1], [j0, j1], ...] containing all window positions to compute
:param shape: a list of N integers (all must be odd) encoding the shape of each window
:return: an array of dimension N+1 and size (i1-i0+1) x (j1-j0+1) x ... x B
"""
assert all(np.mod(shape, 2) == 0)
shape = shape // 2
assert all(corners[:, 0] - shape >= 0)
assert all(corners[:, 1] + shape <= self.tensor.n[:-1])
cores = tt.vector.to_list(self.tensor)
start = time.time()
reco = np.ones([1, 1, 1])
for corner, sh, core in zip(corners, shape, cores[:-1]):
chunk = core[:, corner[0]+sh:corner[1]+sh, :] - core[:, corner[0]-sh:corner[1]-sh, :] # r1 x corner[1]-corner[0] x r2
reco = np.tensordot(reco, chunk, axes=[2, 0])
reco = np.reshape(reco, [reco.shape[0], -1, reco.shape[-1]])
reco = np.squeeze(np.tensordot(reco, cores[-1], axes=[2, 0]))
reco = np.reshape(reco, list(corners[:, 1] - corners[:, 0]) + [self.tensor.n[-1]])
elapsed = time.time() - start
return reco, elapsed
def separable_field(self, corners, pat):
"""
As `box_field`, but for non-rectangular separable regions.
:param pat: a rank-1 TT
"""
shape = pat.n
assert all(np.mod(shape, 2) == 0)
shape = shape // 2
cores = tt.vector.to_list(self.tensor)
coresp = [np.diff(np.concatenate([np.zeros([c.shape[0], 1, c.shape[2]]), c, np.zeros([c.shape[0], 1, c.shape[2]])], axis=1), axis=1) for c in tt.vector.to_list(pat)]
start = time.time()
reco = np.ones([1, 1])
for corner, sh, core, corep in zip(corners, shape, cores[:-1], coresp):
chunk = core[:, corner[0]-sh:corner[1]+sh, :]
convolution = scipy.signal.convolve(chunk, corep, mode='valid')
reco = np.einsum('jk,klm', reco, convolution)
reco = np.reshape(reco, [-1, convolution.shape[-1]])
reco = np.squeeze(np.tensordot(reco, cores[-1], axes=[1, 0]))
reco = np.reshape(reco, list(corners[:, 1] - corners[:, 0]) + [self.tensor.n[-1]])
elapsed = time.time() - start
return reco, elapsed
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,708 | rballester/tthistograms | refs/heads/master | /bruteforce.py | """
Compute histograms via brute-force traversal in the CPU, for comparison against TT-histograms.
Important: all these functions assume that the data set X has already been quantized to B bins,
i.e. takes values in 0, ..., B-1
"""
import numpy as np
import time
import scipy.signal
def box(X, B, corners, weights=None):
"""
Query a box histogram via brute-force traversal
:param X: an ND data set
:param B: number of bins
:param corners: a list of pairs [[i0, i1], [j0, j1], ...] encoding the query box
:return: a vector with B elements
"""
start = time.time()
chunk = X[[slice(corner[0], corner[1]) for corner in corners]]
gt = np.histogram(chunk, bins=B, range=[-0.5, B - 0.5], weights=weights)[0]
elapsed = time.time() - start
return gt, elapsed
def pattern(X, B, corners, pat):
"""
Query a non-rectangular histogram via brute-force traversal
:param X: an ND data set
:param B: number of bins
:param corners: a list of pairs [[i0, i1], [j0, j1], ...] encoding the pattern's bounding box
:param pat: a multiarray (its size must fit `corners`) containing the region weights
:return: a vector with B elements
"""
return box(X, B, corners, weights=pat)
def box_field(X, B, corners, shape, verbose=False):
"""
Compute a box histogram field
:param X: an ND data set
:param B: number of bins
:param corners: a list of pairs [[i0, i1], [j0, j1], ...] containing all window positions to compute
:param shape: a list of N integers (all must be odd) encoding the shape of each window
:param verbose:
:return: an array of dimension N+1 and size (i1-i0+1) x (j1-j0+1) x ... x B
"""
if verbose:
print('Computing box field')
start = time.time()
shape = shape // 2
N = X.ndim
chunk = X[[slice(corner[0]-sh, corner[1]+sh) for corner, sh in zip(corners, shape)]]
tmp = chunk
elapsed = time.time() - start
chunk = np.zeros(np.array(chunk.shape) + 1)
chunk[[slice(1, None)] * N] = tmp
start = time.time()
result = np.zeros(list(corners[:, 1] - corners[:, 0]) + [B])
for b in range(B):
if verbose:
print('b = {}'.format(b))
sl = (chunk == b)
for n in range(N):
sl = np.cumsum(sl, axis=n)
blocks = []
for corner, sh in zip(corners, shape):
blocks.append([slice(0, corner[1]-corner[0], 1), slice(2*sh, corner[1]-corner[0]+2*sh, 1)])
codes = np.array(np.unravel_index(np.arange(2**N), [2]*N)).T
for code in codes:
sign = (-1) ** (codes.shape[1] - np.sum(code))
result[..., b] += sign * sl[[block[c] for block, c in zip(blocks, code)]]
elapsed += time.time() - start
return result, elapsed
def separable_field(X, B, corners, pat, verbose=False):
"""
As `box_field`, but for non-rectangular separable regions.
:param pat: a list of vectors encoding the rank-1 expression of the weights. This substitutes the parameter `shape`
"""
if verbose:
print('Computing separable field')
start = time.time()
shape = np.array([len(v) for v in pat])
shape = shape // 2
N = X.ndim
chunk = X[[slice(corner[0] - sh, corner[1] - 1 + sh) for corner, sh in zip(corners, shape)]]
result = np.zeros(list(corners[:, 1] - corners[:, 0]) + [B])
for b in range(B):
if verbose:
print('b = {}'.format(b))
sl = (chunk == b)
for i, v in enumerate(pat):
slicing = [np.newaxis]*N
slicing[i] = slice(None)
sl = scipy.signal.convolve(sl, v[slicing], mode='valid')
result[..., b] = sl
elapsed = time.time() - start
return result, elapsed
def nonseparable_field(X, B, corners, pat, verbose=False):
"""
As `box_field`, but for non-rectangular general regions.
:param pat: an ND NumPy array containing the weights
"""
if verbose:
print('Computing non-separable field')
N = X.ndim
start = time.time()
shape = np.array(pat.shape)
shape = shape // 2
chunk = X[[slice(corner[0] - sh, corner[1] - 1 + sh) for corner, sh in zip(corners, shape)]]
result = np.zeros(list(corners[:, 1] - corners[:, 0]) + [B])
pat = pat[[slice(None, None, -1)]*N]
for b in range(B):
if verbose:
print('b = {}'.format(b))
sl = (chunk == b)
sl = scipy.signal.convolve(sl, pat, mode='valid')
result[..., b] = sl
elapsed = time.time() - start
return result, elapsed
def create_levelset(X, B):
L = np.zeros(list(X.shape) + [B])
for b in range(B):
sl = (X == b)
L[..., b] = sl
return L
def create_ih(X, B):
L_C = np.zeros(list(X.shape) + [B])
for b in range(B):
sl = (X == b)
sl = np.cumsum(sl, axis=0)
sl = np.cumsum(sl, axis=1)
sl = np.cumsum(sl, axis=2)
L_C[..., b] = sl
return L_C
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,709 | rballester/tthistograms | refs/heads/master | /sphere_slices.py | """
Visual results: a 3D sphere after compression in the TT format
"""
import numpy as np
import config
import util
import matplotlib.pyplot as plt
import ttrecipes as tr
N = 3
S = 64
s = util.sphere(config.data_folder, N, S, eps=0.0)
s, _ = s[0], s[1]
eps = 0
for rmax in 2**np.arange(0, 5):
t = tr.core.tt_svd(s, eps=eps, rmax=rmax, verbose=False)
t = t.full()
print(rmax, np.linalg.norm(s - t) / np.linalg.norm(s))
fig = plt.figure(frameon=False)
fig.set_size_inches(5, 5)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(t[:, :, S//2], cmap='gray', vmin=0, vmax=1, aspect='normal')
plt.axis('off')
plt.savefig('tt_ball_{:03d}.jpg'.format(rmax))
plt.clf()
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,710 | rballester/tthistograms | refs/heads/master | /compression_table.py | """
Gather TT-histogram offline compression statistics and display them as a LaTeX table
"""
import pickle
import numpy as np
import re
import pandas as pd
import os
import config
input_datasets = [
os.path.join(config.data_folder, 'waterfall_64_0.0005000.tth'),
os.path.join(config.data_folder, 'bonsai_volvis_128_0.0002000.tth'),
os.path.join(config.data_folder, 'hnut_128_0.0000250.tth'),
os.path.join(config.data_folder, 'flower_128_0.0000150.tth')
]
df = pd.DataFrame(index=['Size (MB)', '$T_D$ (s)', 'B', 'Full IH (GB)', r'\specialcell{Compression \\ target $\eps$}', r'\specialcell{Compressed \\ IH (MB)}', 'TT ranks'], columns=[r'\textbf{Waterfall}', r'\textbf{Bonsai}', r'\textbf{Hazelnut}', r'\textbf{Flower}'])
for i, filename in enumerate(input_datasets):
tth = pickle.load(open(filename, 'rb'))
size = np.prod(tth.tensor.n[:-1]-1) / (1024**2)
rs = re.search(r'\D+_(\d+)_(0.\d+)', filename)
B = int(rs.group(1))
full_size = np.prod(tth.tensor.n[:-1]-1)*B
if full_size // B > 2**32: # 64-bit integers are needed
full_size *= 8
else: # 32-bit integers are needed
full_size *= 4
full_size /= (1024**3) # Size in GB's
eps = float(rs.group(2))
compressed_size = len(tth.tensor.core)*8 / (1024**2)
tt_ranks = ", ".join([str(r) for r in tth.tensor.r[1:-1]])
print(size)
print(tth.total_time)
print(B)
print(full_size)
print(eps)
print(compressed_size)
print(tt_ranks)
df.iloc[:, i] = [size, tth.total_time, B, full_size, eps, compressed_size, tt_ranks]
print()
print(df.to_latex(escape=False, column_format='ccccc'))
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,711 | rballester/tthistograms | refs/heads/master | /config.py | raise Exception('Edit the data_folder variable in config.py to indicate where your data sets are')
data_folder = 'data/' # Folder with input data sets; also where TT-compressed integral histograms, temporary files and results are to be stored
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,712 | rballester/tthistograms | refs/heads/master | /query_benchmark.py | """
Main benchmarking experiments: times and accuracy for queries over various regions
"""
import numpy as np
import config
import bruteforce
import bruteforce_cupy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import pandas as pd
import util
import ttrecipes as tr
def run(X, tth, eps, S, P):
"""
Generate many random ROIs and measure their histograms (both groundtruth and from a TTHistogram)
:param X: a tensor
:param tth: its TTHistogram
:param eps:
:param S: region size
:param P: number of samples to draw
:return: a DataFrame
"""
times_gt_box = []
times_gt_gaussian = []
times_gt_sphere = []
times_cp_box = []
times_cp_gaussian = []
times_cp_sphere = []
errors_tt_box = []
times_tt_box = []
errors_tt_gaussian = []
times_tt_gaussian = []
errors_tt_sphere = []
times_tt_sphere = []
N = X.ndim
B = tth.tensor.n[-1]
sphere, sphere_t = util.sphere(config.data_folder, N, S, eps=0.1)
# print(sphere_t)
# print(np.linalg.norm(sphere - sphere_t.full()) / np.linalg.norm(sphere))
denominator = np.sum(sphere)
sphere /= denominator
sphere_t *= (1./denominator)
corners = []
for i in range(N):
# left = np.random.randint(0, X.shape[i] + 1 - S)
left = X.shape[i] // 2 - S // 2
right = left + S
corners.append([left, right])
corners = np.array(corners)
for p in range(P):
# Box
gt, time_gt = bruteforce.box(X, B, corners)
# _, time_tf_gt = box_tf(X, B, corners)
times_gt_box.append(time_gt)
gt_cp, time_cp = bruteforce_cupy.box_cp(X, B, corners)
times_cp_box.append(time_cp)
reco, time_tt = tth.box(corners)
errors_tt_box.append(np.linalg.norm(reco - gt) / np.linalg.norm(gt))
times_tt_box.append(time_tt)
# Gaussian
pattern = tr.core.gaussian_tt(corners[:, 1] - corners[:, 0], [S/5] * 3)
pattern *= (1. / tr.core.sum(pattern))
patternfull = pattern.full()
gt, time_gt = bruteforce.pattern(X, B, corners, pat=patternfull)
times_gt_gaussian.append(time_gt)
gt_cp, time_cp = bruteforce_cupy.pattern_cp(X, B, corners, pat=patternfull)
times_cp_gaussian.append(time_cp)
reco, time_tt = tth.separable(corners, pat=pattern)
errors_tt_gaussian.append(np.linalg.norm(reco - gt) / np.linalg.norm(gt))
times_tt_gaussian.append(time_tt)
# Sphere
gt, time_gt = bruteforce.pattern(X, B, corners, pat=sphere)
times_gt_sphere.append(time_gt)
gt_cp, time_cp = bruteforce_cupy.pattern_cp(X, B, corners, pat=sphere)
times_cp_sphere.append(time_cp)
reco, time_tt = tth.nonseparable(corners, pat=sphere_t)
errors_tt_sphere.append(np.linalg.norm(reco - gt) / np.linalg.norm(gt))
times_tt_sphere.append(time_tt)
df = pd.DataFrame(columns=['B', 'method', 'heps', 'roitype', 'roisize', 'fieldsize', 'meantime', 'mediantime', 'maxtime', 'meanerror', 'medianerror', 'maxerror'])
df.loc[0, :] = [B, 'gt', None, 'box', S, 1, np.mean(times_gt_box), np.median(times_gt_box), np.max(times_gt_box), 0, 0, 0]
df.loc[1, :] = [B, 'gt', None, 'gaussian', S, 1, np.mean(times_gt_gaussian), np.median(times_gt_gaussian), np.max(times_gt_gaussian), 0, 0, 0]
df.loc[2, :] = [B, 'gt', None, 'sphere', S, 1, np.mean(times_gt_sphere), np.median(times_gt_sphere), np.max(times_gt_sphere), 0, 0, 0]
df.loc[3, :] = [B, 'cp', None, 'box', S, 1, np.mean(times_cp_box), np.median(times_cp_box), np.max(times_cp_box), 0, 0, 0]
df.loc[4, :] = [B, 'cp', None, 'gaussian', S, 1, np.mean(times_cp_gaussian), np.median(times_cp_gaussian), np.max(times_cp_gaussian), 0, 0, 0]
df.loc[5, :] = [B, 'cp', None, 'sphere', S, 1, np.mean(times_cp_sphere), np.median(times_cp_sphere), np.max(times_cp_sphere), 0, 0, 0]
df.loc[6, :] = [B, 'tt', eps, 'box', S, 1, np.mean(times_tt_box), np.median(times_tt_box), np.max(times_tt_box), np.mean(errors_tt_box), np.median(errors_tt_box), np.max(errors_tt_box)]
df.loc[7, :] = [B, 'tt', eps, 'gaussian', S, 1, np.mean(times_tt_gaussian), np.median(times_tt_gaussian), np.max(times_tt_gaussian), np.mean(errors_tt_gaussian), np.median(errors_tt_gaussian), np.max(errors_tt_gaussian)]
df.loc[8, :] = [B, 'tt', eps, 'sphere', S, 1, np.mean(times_tt_sphere), np.median(times_tt_sphere), np.max(times_tt_sphere), np.mean(errors_tt_sphere), np.median(errors_tt_sphere), np.max(errors_tt_sphere)]
return df
def run_field(X, tth, eps, S, K, P):
"""
Field histogram reconstruction
"""
times_gt_box_field = []
times_gt_gaussian_field = []
errors_tt_box_field = []
times_tt_box_field = []
errors_tt_gaussian_field = []
times_tt_gaussian_field = []
N = X.ndim
B = tth.tensor.n[-1]
shape = np.array([S]*N)
for p in range(P):
print(p)
corners = []
for i in range(N):
left = np.random.randint(S // 2, X.shape[i] + 1 - S // 2 - K)
right = left + K
corners.append([left, right])
corners = np.array(corners)
# Box field
gt, elapsed = bruteforce.box_field(X, B, corners, shape)
times_gt_box_field.append(elapsed)
reco, elapsed = tth.box_field(corners, shape)
errors_tt_box_field.append(np.linalg.norm(reco - gt) / np.linalg.norm(gt))
times_tt_box_field.append(elapsed)
# Gaussian field
pattern = tr.core.gaussian_tt(shape, shape/5)
pattern *= (1. / tr.core.sum(pattern))
gt, time_gt = bruteforce.separable_field(X, B, corners, pat=[np.squeeze(c) for c in tt.vector.to_list(pattern)])
times_gt_gaussian_field.append(time_gt)
reco, time_tt = tth.separable_field(corners, pat=pattern)
errors_tt_gaussian_field.append(np.linalg.norm(reco - gt) / np.linalg.norm(gt))
times_tt_gaussian_field.append(time_tt)
df = pd.DataFrame(columns=['B', 'method', 'heps', 'roitype', 'roisize', 'fieldsize', 'meantime', 'mediantime', 'maxtime', 'meanerror', 'medianerror', 'maxerror'])
df.loc[0, :] = [B, 'gt', None, 'box_field', S, K, np.mean(times_gt_box_field), np.median(times_gt_box_field), np.max(times_gt_box_field), 0, 0, 0]
df.loc[1, :] = [B, 'gt', None, 'gaussian_field', S, K, np.mean(times_gt_gaussian_field), np.median(times_gt_gaussian_field), np.max(times_gt_gaussian_field), 0, 0, 0]
df.loc[2, :] = [B, 'tt', eps, 'box_field', S, K, np.mean(times_tt_box_field), np.median(times_tt_box_field), np.max(times_tt_box_field), np.mean(errors_tt_box_field), np.median(errors_tt_box_field), np.max(errors_tt_box_field)]
df.loc[3, :] = [B, 'tt', eps, 'gaussian_field', S, K, np.mean(times_tt_gaussian_field), np.median(times_tt_gaussian_field), np.max(times_tt_gaussian_field), np.mean(errors_tt_gaussian_field), np.median(errors_tt_gaussian_field), np.max(errors_tt_gaussian_field)]
return df
def batch():
P = 3
shape = np.array(X.shape)
if np.all(shape == [1024, 1024, 1024]):
Ss = (np.linspace(shape[0]/48, 512, 11).astype(int))//2 * 2
# print(Ss)
else:
Ss = (np.linspace(shape[0]/17, shape[0]/1.2, 11).astype(int))//2 * 2
df = None
for S in Ss:
print('S = {}'.format(S))
partial = run(X, tth, eps, S=S, P=P)
if df is None:
df = partial
else:
df = pd.concat([df, partial], ignore_index=True)
return df
def batch_field():
P = 2
K = 16
shape = np.array(X.shape)
Ss = (np.linspace(shape[0]/8, shape[0]//2, 2).astype(int))//2 * 2
# if S < shape[0] - K:
df = None
for S in Ss:
partial = run_field(X, tth, eps, S=S, K=K, P=P)
if df is None:
df = partial
else:
df = pd.concat([df, partial], ignore_index=True)
return df
def plot(basename):
title = basename.capitalize()
if title == 'Bonsai_volvis':
title = 'Bonsai'
df = pd.read_excel(os.path.join(config.data_folder, '{}.xlsx'.format(basename)), sheetname=basename)
figsize = (6, 3)
# Times GT vs TT (single)
# colors = ['#1f77b4', '#ff7f0e']#, '#2ca02c']
roitypes = ['box', 'gaussian']#, 'sphere']
markers = ['s', 'o']
fig = plt.figure(figsize=figsize)
for roitype, marker in zip(roitypes, markers):
select = df.loc[(df['method'] == 'gt') & (df['roitype'] == roitype)]
plt.plot(select['roisize'], np.log10(select['meantime']), label='{}: brute-force'.format(roitype.capitalize()), linestyle='-', marker=marker
, color='#1f77b4')
select = df.loc[(df['method'] == 'cp') & (df['roitype'] == roitype)]
plt.plot(select['roisize'], np.log10(select['meantime']), label='{}: brute-force (CuPy)'.format(roitype.capitalize()),
linestyle='-', marker=marker, color='#ff7f0e')
select = df.loc[(df['method'] == 'tt') & (df['roitype'] == roitype)]
plt.plot(select['roisize'], np.log10(select['meantime']), label='{}: TT'.format(roitype.capitalize()), marker=marker, color='#2ca02c')
# plt.legend(loc='upper left')
# plt.legend()
plt.title(title)
plt.xlabel('ROI size')
plt.ylabel('log10(time) (s)')
plt.ylim([-4.15, 1])
plt.tight_layout()
# plt.show()
pdf = os.path.join(config.data_folder, '{}_single_times.pdf').format(basename)
handles, labels = plt.gca().get_legend_handles_labels()
plt.savefig(pdf)
os.system('pdfcrop {} {}'.format(pdf, pdf))
plt.savefig(os.path.join(config.data_folder, '{}_single_times.png').format(basename))
fig_legend = plt.figure(figsize=(8,8), frameon=False)
axi = fig_legend.add_subplot(111)
fig_legend.legend(handles, labels, loc='center', scatterpoints=1, ncol=2)
axi.xaxis.set_visible(False)
axi.yaxis.set_visible(False)
plt.axis('off')
fig_legend.canvas.draw()
plt.tight_layout()
plt.savefig(os.path.join(config.data_folder, '{}_single_times_legend.pdf').format(basename))
# assert 0
# fig_legend.show()
# Times GT vs TT (field)
colors = ['#1f77b4', '#ff7f0e']
roitypes = ['box_field', 'gaussian_field']
fig = plt.figure(figsize=figsize)
for color, roitype in zip(colors, roitypes):
select = df.loc[(df['method'] == 'gt') & (df['roitype'] == roitype)]
plt.plot(select['roisize'], np.log10(select['meantime']), label='{}: brute-force'.format(roitype.capitalize()), linestyle='-', marker='o', color=color)
select = df.loc[(df['method'] == 'tt') & (df['roitype'] == roitype)]
plt.plot(select['roisize'], np.log10(select['meantime']), label='{}: TT'.format(roitype.capitalize()), marker='o', color=color)
plt.legend(loc='upper left')
plt.title(title)
plt.xlabel('ROI size')
plt.ylabel('log10(time) (s)')
plt.tight_layout()
pdf = os.path.join(config.data_folder, '{}_field_times.pdf').format(basename)
plt.savefig(pdf)
os.system('pdfcrop {} {}'.format(pdf, pdf))
plt.savefig(os.path.join(config.data_folder, '{}_field_times.png').format(basename))
# plt.show()
# Errors for TT
colors = ['#2ca02c', '#2ca02c']#, '#2ca02c']
roitypes = ['box', 'gaussian']#, 'sphere']
fig = plt.figure(figsize=figsize)
for color, roitype, marker in zip(colors, roitypes, markers):
select = df.loc[(df['method'] == 'tt') & (df['roitype'] == roitype)]
plt.plot(select['roisize'], np.log10(select['medianerror']), label='{}: TT'.format(roitype.capitalize()), marker=marker, color=color)
# plt.legend()
plt.title(title)
plt.xlabel('ROI size')
plt.ylabel('log10(relative error)')
plt.ylim([-5.2, -1])
plt.tight_layout()
# plt.show()
pdf = os.path.join(config.data_folder, '{}_errors.pdf').format(basename)
handles, labels = plt.gca().get_legend_handles_labels()
plt.savefig(pdf)
os.system('pdfcrop {} {}'.format(pdf, pdf))
plt.savefig(os.path.join(config.data_folder, '{}_errors.png').format(basename))
fig_legend = plt.figure(figsize=(2,2), frameon=False)
axi = fig_legend.add_subplot(111)
fig_legend.legend(handles, labels, loc='center', scatterpoints=1)
axi.xaxis.set_visible(False)
axi.yaxis.set_visible(False)
plt.axis('off')
fig_legend.canvas.draw()
plt.tight_layout()
plt.savefig(os.path.join(config.data_folder, '{}_errors_legend.pdf').format(basename))
###
input_datasets = [os.path.join(config.data_folder, 'waterfall_4096_4096_uint8.raw'), os.path.join(config.data_folder, 'bonsai_volvis_256_256_256_uint8.raw'), os.path.join(config.data_folder, 'lung_512_512_512_uint8.raw'), os.path.join(config.data_folder, 'flower_1024_1024_1024_uint8.raw')]
Bs = [64, 128, 128, 128]
epss = [0.0005000, 0.0001000, 0.0002000, 0.0000150]
###
for input_dataset, B, eps in zip(input_datasets, Bs, epss):
basename, X, tth = util.prepare_dataset(config.data_folder, input_dataset, B=B, eps=eps)
excel_name = os.path.join(config.data_folder, '{}.xlsx'.format(basename))
if not os.path.exists(excel_name):
df1 = batch()
# df2 = batch_field()
# df = pd.concat([df1, df2], ignore_index=True)
df = df1
writer = pd.ExcelWriter(excel_name)
df.to_excel(writer, basename)
writer.save()
else:
print('Excel exists; skipping...')
plot(basename)
# run(X, tth, eps, 128, 10)
# run_field(X, tth, eps, 32, 32, 10)
# corners = [[0, sh//2] for sh in X.shape]
# gt, _ = bruteforce.box(X, B, corners)
# reco, _ = tth.box(corners)
# print(np.sum(gt))
# print(np.sum(reco))
# print(np.linalg.norm(gt - reco) / np.linalg.norm(gt))
# fig = plt.figure()
# plt.plot(gt)
# plt.plot(reco)
# plt.show()
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,713 | rballester/tthistograms | refs/heads/master | /entropy.py | """
Compute box- and Gaussian-entropy fields from a hurricane directional histogram
"""
import numpy as np
import config
import util
import tt
import ttrecipes as tr
import bruteforce
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import mpl_toolkits.mplot3d
import os
def save_image(image, filename):
fig = plt.figure(figsize=(10,10), frameon=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(image, aspect='auto', cmap=cm.GnBu)
fig.savefig(filename)
def entropy(X, axis=-1):
P = X / np.sum(X, axis=axis, keepdims=True)
return -np.sum(P*np.log2(P), axis=axis)
input_dataset = os.path.join(config.data_folder, 'hurricane_500_500_91_uint8.raw')
B = 128
eps = 0.0001
basename, X, tth = util.prepare_dataset(config.data_folder, input_dataset, B=B, eps=eps)
print(tth.tensor)
N = 3
shape = np.array([20, 20, 90])
Is = [500, 500, 91]
corners = np.array([[shape[0]//2, Is[0]-shape[0]//2], [shape[1]//2, Is[1]-shape[1]//2], [45, 46]])
# Box field
gt, elapsed = bruteforce.box_field(X, B, corners, shape=shape, verbose=True)
gt = np.squeeze(gt)
print('Elapsed (box GT):', elapsed)
print('[{}, {}]'.format(gt.min(), gt.max()))
field = entropy(np.abs(gt+1))
save_image(field, os.path.join(config.data_folder, 'hurricane_box_field_gt.jpg'))
print()
reco, elapsed = tth.box_field(corners, shape)
reco = np.squeeze(reco)
print('Elapsed (box TT):', elapsed)
print('[{}, {}]'.format(reco.min(), reco.max()))
field = entropy(np.abs(reco+1))
save_image(field, os.path.join(config.data_folder, 'hurricane_box_field_tt.jpg'))
# Gaussian field
shape = np.array([10, 10, 90])
pattern = tr.core.gaussian_tt(shape, shape/4)
pattern *= (1./tr.core.sum(pattern))*np.prod(shape)
gt, elapsed = bruteforce.separable_field(X, B, corners, pat=pattern, verbose=True)
gt = np.squeeze(gt)
field = entropy(np.abs(gt+1))
save_image(field, os.path.join(config.data_folder, 'hurricane_gaussian_field_gt.jpg'))
print('Elapsed (Gaussian GT):', elapsed)
reco, elapsed = tth.separable_field(corners, pat=pattern)
reco = np.squeeze(reco)
field = entropy(np.abs(reco+1))
save_image(field, os.path.join(data_folder, 'hurricane_gaussian_field_tt.jpg'))
print('Elapsed (Gaussian TT):', elapsed)
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,714 | rballester/tthistograms | refs/heads/master | /ih_slices.py | """
Show example IH slices, before and after TT compression
"""
import numpy as np
import pickle
import config
import bruteforce
import tthistogram
import os
import pandas as pd
import util
import ttrecipes as tr
import scipy.misc
import matplotlib.pyplot as plt
###
input_dataset = os.path.join(config.data_folder, 'waterfall_4096_4096_uint8.raw')
B = 64
eps = 1e-3
basename, X, tth = util.prepare_dataset(config.data_folder, input_dataset, B, eps)
###
factor = 16
N = X.ndim
bs = [16, 32, 48]
origs = []
recos = []
diffs = []
for b in bs:
# print(X.shape)
orig = (X == b)
tmp = orig
orig = np.zeros(np.array(orig.shape)+1)
orig[[slice(1, None)]*N] = tmp
print(orig.shape)
for n in range(N):
orig = np.cumsum(orig, axis=n)
orig = orig.astype(float)
orig = orig[[slice(None, None, factor)]*N]
origs.append(orig)
reco = tth.tensor[[slice(None)]*N + [b]].full()
reco = reco[[slice(None, None, factor)] * N]
recos.append(reco)
diffs.append(np.abs(orig-reco))
print('Original range:', origs[0].min(), origs[0].max())
print('Absolute error range:', diffs[0].min(), diffs[0].max())
vmin = float('inf')
vmax = float('-inf')
for o in orig:
vmin = min(vmin, np.min(o))
vmax = max(vmax, np.max(o))
fig = plt.figure()
for i, orig in enumerate(origs):
fig.add_subplot(len(bs), 1, i+1)
plt.imshow(orig, cmap='gray', vmin=vmin, vmax=vmax)
plt.axis('off')
plt.savefig('orig_slices', bbox_inches='tight')
plt.clf()
fig = plt.figure()
for i, reco in enumerate(recos):
fig.add_subplot(len(bs), 1, i+1)
plt.imshow(reco, cmap='gray', vmin=vmin, vmax=vmax)
plt.axis('off')
plt.savefig('reco_slices', bbox_inches='tight')
plt.clf()
fig = plt.figure()
for i, diff in enumerate(diffs):
fig.add_subplot(len(bs), 1, i+1)
plt.imshow(vmax - 100*diff, cmap='gray', vmin=vmin, vmax=vmax)
plt.axis('off')
plt.savefig('diff_slices', bbox_inches='tight')
plt.clf()
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,715 | rballester/tthistograms | refs/heads/master | /cross_correlation.py | """
Normalized cross-correlation interactive display for the hurricane data set
"""
import numpy as np
import copy
import config
import util
import tt
import ttrecipes as tr
import matplotlib.pyplot as plt
import os
import time
input_dataset = os.path.join(config.data_folder, 'hurricane_500_500_91_uint8.raw')
B = 128
eps = 0.0001
basename, X, tth = util.prepare_dataset(config.data_folder, input_dataset, B=B, eps=eps)
print('Total computing time: {}'.format(tth.total_time))
print('NNZ: {}'.format(len(tth.tensor.core)))
def interactive_loop(tth, basis_size=8):
"""
Display an interactive image showing the normalized cross-correlation between
a histogram field and any window selected by the user
:param tth:
:param basis_size: used to approximate the norm of each individual histogram in the field
"""
cores = tt.vector.to_list(tth.tensor)
tr.core.orthogonalize(cores, len(cores)-1)
basis = np.linalg.svd(cores[-1][:, :, 0], full_matrices=0)[2][:basis_size, :]
cores[-1] = np.einsum('ijk,aj->iak', cores[-1], basis)
pca = tt.vector.from_list(cores)
pcatth = copy.copy(tth)
pcatth.tensor = pca
shape = np.array([8, 8, 90])
Is = [500, 500, 91]
corners = np.array([[shape[0] // 2, Is[0] - shape[0] // 2], [shape[1] // 2, Is[1] - shape[1] // 2], [45, 46]])
pcafield, elapsed = pcatth.box_field(corners, shape)
print('Box field computation time: {}'.format(elapsed))
pcafield = np.squeeze(pcafield)
norms = np.sqrt(np.sum(pcafield**2, axis=-1))
global im
im = None
global sc
sc = None
global counter
counter = 1
def update(x, y):
start = time.time()
v, elapsed = tth.box(np.array([[x-4, x+3], [y-4, y+3], [0, 91]]))
v = v / np.linalg.norm(v)
cores = tt.vector.to_list(tth.tensor)
cores[-1] = np.einsum('ijk,j->ik', cores[-1], v)[:, np.newaxis, :]
proj = tt.vector.from_list(cores)
projtth = copy.copy(tth)
projtth.tensor = proj
field, elapsed = projtth.box_field(corners, shape)
field = np.squeeze(field.T) / norms
global im
global sc
global counter
if im is None:
plt.axis('off')
im = plt.imshow(field, cmap='pink', vmin=0, vmax=1)
sc, = plt.plot(x, y, marker='+', ms=25, mew=5, color='red')
plt.show()
else:
im.set_data(field)
sc.set_data(x, y)
fig.canvas.draw()
extent = plt.gca().get_window_extent().transformed(fig.dpi_scale_trans.inverted())
plt.savefig(os.path.join(config.data_folder, 'similarity_{:03d}.pdf'.format(counter)), bbox_inches=extent)
counter += 1
def onclick(event):
x = int(event.xdata)
y = int(event.ydata)
print(x, y)
update(x, y)
fig = plt.figure()
fig.canvas.mpl_connect('button_press_event', onclick)
update(250, 250)
interactive_loop(tth)
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,716 | rballester/tthistograms | refs/heads/master | /util.py | """
Miscellaneous utility functions
"""
import numpy as np
import os
import re
import pickle
import ttrecipes as tr
import tthistogram
def read_tensor(filename):
"""
Read an ndarray from a file of the form name_size1_[...]_sizeN_type
Example: bonsai_256_256_256_uint8
:param filename:
:return: an ndarray, and its "short" name (e.g. "bonsai")
"""
filename = os.path.expanduser(filename)
rs = re.search(r'(\D+)((_\d+)+)_(\w+)\.[^\.]+$', os.path.basename(filename))
basename = rs.group(1)
shape = [int(part) for part in rs.group(2)[1:].split("_")]
input_type = getattr(np, rs.group(4))
X = np.reshape(np.fromfile(filename, dtype=input_type), shape)
return X.astype(float), basename
def prepare_dataset(data_folder, filename, B, eps):
"""
Given a data set, quantize it and generate its TT-histogram with given number of bins and tolerance error
:param data_folder:
:param filename: the data set name with sizes and type, e.g. hurricane_500_500_91_uint8.raw
:param B: number of bins
:param eps: relative error
:return: (data set "base" name, a NumPy array containing it, and its TT-histogram)
"""
X, basename = read_tensor(filename)
X = X.astype(np.float32)
dump_name = os.path.join(data_folder, '{}_{}_{:.7f}.tth'.format(basename, B, eps))
Xmin = X.min()
Xmax = X.max()
X = ((X - Xmin) / (Xmax - Xmin) * (B - 1)).astype(int)
print(dump_name)
if not os.path.exists(dump_name):
tth = tthistogram.TTHistogram(X, B, eps, verbose=True)
pickle.dump(tth, open(dump_name, 'wb'))
tth = pickle.load(open(dump_name, 'rb'))
return basename, X, tth
def sphere(data_folder, N, S, eps):
"""
Create (or read if available) a sphere, and compress it
:param data_folder:
:param S:
:return: the S^N sphere and an eps-compression with TT
"""
filename = os.path.join(data_folder, 'sphere_{}_{}_{}.pickle'.format(N, S, eps))
if not os.path.exists(filename):
mg = np.meshgrid(*list([np.linspace(-1, 1, S)]*N))
sqsum = np.zeros([S]*N)
for m in mg:
sqsum += m**2
sphere = (np.sqrt(sqsum) <= 1)
sphere_t = tr.core.tt_svd(sphere, eps=eps)
pickle.dump({'raw': sphere, 'tt': sphere_t}, open(filename, 'wb'))
sphere = pickle.load(open(filename, 'rb'))
return sphere['raw'].astype(float), sphere['tt']
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,717 | rballester/tthistograms | refs/heads/master | /bruteforce_cupy.py | """
Brute-force histogram computations using CuPy (https://cupy.chainer.org/)
"""
import numpy as np
import cupy as cp
import time
def bincount(X, B, weights=None):
if weights is None:
b = cp.zeros((B,), dtype=cp.int32)
startin = time.time()
cp.ElementwiseKernel(
'S x', 'raw U bin',
'atomicAdd(&bin[x], 1)',
'bincount_kernel'
)(X, b)
b = b.astype(np.intp)
else:
b = cp.zeros((B,), dtype=cp.float32)
cp.ElementwiseKernel(
'S x, T w', 'raw U bin',
'atomicAdd(&bin[x], w)',
'bincount_with_weight_kernel'
)(X, weights, b)
b = b.astype(cp.float64)
return b
def box_cp(X, B, corners):
lim = 512
if np.prod(X.shape) > lim ** 3:
Xg = cp.asarray(X[:lim, :lim, :lim].astype(np.uint8))
corners -= corners[:, 0:1]
else:
Xg = cp.asarray(X.astype(np.uint8))
start = time.time()
slicing = [slice(corner[0], corner[1]) for corner in corners]
Xg = Xg[slicing]
result = bruteforce_cupy.bincount(Xg.flatten(), B=B)
elapsed = time.time() - start
return cp.asnumpy(result).astype(np.int), elapsed
def pattern_cp(X, B, corners, pat):
# Xg = cp.asarray(X.astype(np.uint8))
lim = 512
if np.prod(X.shape) > lim**3:
Xg = cp.asarray(X[:lim, :lim, :lim].astype(np.uint8))
corners -= corners[:, 0:1]
else:
Xg = cp.asarray(X.astype(np.uint8))
patg = cp.asarray(pat.astype(np.float32).flatten())
start = time.time()
slicing = [slice(corner[0], corner[1]) for corner in corners]
Xg = Xg[slicing]
result = bincount(Xg.flatten(), B=B, weights=patg)
elapsed = time.time() - start
return cp.asnumpy(result).astype(np.int), elapsed
if __name__ == '__main__': # Some tests
B = 128
x = np.random.randint(0, B, [512, ]*3).astype(np.uint8)
slicing = [slice(10, 500)]*3
# weights = np.ones(x.shape)
xg = cp.asarray(x)
print(xg.dtype)
print(xg.shape)
# weights = cp.asarray(weights)
# xg = cp.take(xg, np.arange(10, 500), axis=0)
xg = xg[slicing]
start = time.time()
# xg = xg.flatten()
hist = bincount(xg.flatten(), B=B, weights=None)
elapsed = time.time() - start
print('Elapsed:', elapsed)
print(hist)
start = time.time()
gt = np.histogram(x[slicing], bins=B, range=[-0.5, B - 0.5], weights=None)[0]
print(gt)
elapsed = time.time() - start
print('Elapsed:', elapsed)
| {"/ih_vs_noncumulative.py": ["/config.py", "/bruteforce.py"], "/overlay_examples.py": ["/config.py", "/bruteforce.py", "/util.py"], "/sphere_slices.py": ["/config.py", "/util.py"], "/compression_table.py": ["/config.py"], "/query_benchmark.py": ["/config.py", "/bruteforce.py", "/bruteforce_cupy.py", "/util.py"], "/entropy.py": ["/config.py", "/util.py", "/bruteforce.py"], "/ih_slices.py": ["/config.py", "/bruteforce.py", "/tthistogram.py", "/util.py"], "/cross_correlation.py": ["/config.py", "/util.py"], "/util.py": ["/tthistogram.py"]} |
72,718 | forana/igdb-api-python | refs/heads/master | /src/igdb/wrapper.py | """IGDB wrapper module for the api v4 with Apicalypse"""
from requests import post
from requests.models import Request, Response
API_URL = "https://api.igdb.com/v4/"
class IGDBWrapper:
def __init__(self, client_id:str, auth_token:str) -> None:
self.client_id = client_id
self.auth_token = auth_token
def api_request(self, endpoint:str, query:str) -> Response:
"""
Takes an endpoint and the Apicalypse query and returns the api response as a byte string.
"""
url = IGDBWrapper._build_url(endpoint)
params = self._compose_request(query)
response = post(url, **params)
response.raise_for_status()
return response.content
@staticmethod
def _build_url(endpoint:str='') -> str:
return ('%s%s' % (API_URL, endpoint))
def _compose_request(self, query:str) -> Request:
if not query:
raise Exception('No query provided!\nEither provide an inline query following Apicalypse\'s syntax or an Apicalypse object')
request_params = {
'headers': {
'Client-ID': self.client_id,
'Authorization': ('Bearer %s' % (self.auth_token)),
}
}
if isinstance(query, str):
request_params['data'] = query
return request_params
raise TypeError('Incorrect type of argument \'query\', only Apicalypse-like strings or Apicalypse objects are allowed')
| {"/tests/test_wrapper.py": ["/src/igdb/wrapper.py"]} |
72,719 | forana/igdb-api-python | refs/heads/master | /tests/test_wrapper.py | from pytest import raises
from src.igdb.wrapper import IGDBWrapper
def test_stores_authentication():
wrapper = IGDBWrapper('client', 'token')
assert hasattr(wrapper, 'client_id')
assert wrapper.client_id == 'client'
assert hasattr(wrapper, 'auth_token')
assert wrapper.auth_token == 'token'
def test_composes_query():
wrapper = IGDBWrapper('client', 'token')
assert IGDBWrapper._build_url('dummy') == 'https://api.igdb.com/v4/dummy'
assert wrapper._compose_request('fields test,test2,test3; offset 2') == {
'data': 'fields test,test2,test3; offset 2',
'headers': {
'Client-ID': 'client',
'Authorization': 'Bearer token'
}
}
def test_raises_when_query_bad():
wrapper = IGDBWrapper('', '')
with raises(Exception) as exc:
wrapper._compose_request()
assert exc.type is Exception
assert exc.value.args[0] == 'No query provided!\nEither provide an inline query following Apicalypse\'s syntax or an Apicalypse object'
with raises(TypeError) as exc:
wrapper._compose_request(11)
assert exc.type is TypeError
assert exc.value.args[0] == 'Incorrect type of argument \'query\', only Apicalypse-like strings or Apicalypse objects are allowed'
| {"/tests/test_wrapper.py": ["/src/igdb/wrapper.py"]} |
72,720 | forana/igdb-api-python | refs/heads/master | /setup.py | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="igdb-api-v4",
version="0.0.5",
author="Felix Nordén",
author_email="felixnorden@gmail.com",
description="An API wrapper for IGDB API v4",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/twitchtv/igdb-api-python/",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3',
packages=['igdb'],
package_dir={'igdb': 'src/igdb'},
install_requires=['requests', 'protobuf']
)
| {"/tests/test_wrapper.py": ["/src/igdb/wrapper.py"]} |
72,721 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/wizard/wizard_reporte_listado_x_sacramento.py | # -*- coding: utf-8 -*-
from odoo import models, _, api, fields
SACRAMENTOS = [('bautismo', 'Bautismo'),
('confirmacion', 'Confirmacion'),
('bendicion', 'Bendicion de matrimonio'),
('orden_sagrada', 'Orden sagrada'),
('obituario', 'Obituario')]
class WizardListadoXSacaramento(models.TransientModel):
_name = 'iglesias.wizard_reporte_lbautizos'
_inherit = 'iglesias.wizard_commun'
tipo_sacramento = fields.Selection(SACRAMENTOS, required=True)
def _print_report(self, data):
return self.env['report'].get_action(self, 'iglesias.report_listado_bautizos', data=data)
def _build_contexts(self, data):
result = super(WizardListadoXSacaramento, self)._build_contexts(data)
result.update({'tipo_sacramento': data['form']['tipo_sacramento'] or False})
return result
@api.multi
def check_report(self):
self.ensure_one()
data = {}
data['ids'] = self.env.context.get('active_ids', [])
data['model'] = self.env.context.get('active_model', 'ir.ui.menu')
data['form'] = self.read(['fecha_inicio', 'fecha_final', 'congregaciones_ids', 'tipo_sacramento'])[0]
used_context = self._build_contexts(data)
data['form']['used_context'] = dict(used_context, lang=self.env.context.get('lang', 'en_US'))
return self._print_report(data)
@api.onchange('fecha_inicio', 'fecha_final', 'tipo_sacramento')
def _change_product_ids(self):
super(WizardListadoXSacaramento, self)._change_product_ids()
if self.fecha_inicio:
sacramentos = self.env['iglesias.sacramento'].get_sacramentos_entre_fechas(self.tipo_sacramento,
self.fecha_inicio,
self.fecha_final)
miembro_ids = [s.miembro_id.id for s in sacramentos]
domain1 = [('miembros_ids.id', 'in', miembro_ids)]
domain = {'congregaciones_ids': domain1}
self.congregaciones_ids = self.env['iglesias.congregacion'].search(domain1)
return {'domain': domain}
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,722 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/wizard/wizard_reporte_estado_congregaciones.py | # -*- coding: utf-8 -*-
from odoo import models, _
class WizardComun(models.TransientModel):
_name = 'iglesias.wizard_reporte_ec'
_inherit = 'iglesias.wizard_commun'
def _print_report(self, data):
return self.env['report'].get_action(self, 'iglesias.report_ce', data=data)
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,723 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/report/__init__.py | import congregaciones_estado
import estado_financiero
import listado_altas
import listado_bajas
import listado_bautizos | {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,724 | michelcabrera/my_work_odoo | refs/heads/master | /my_work/models/__init__.py | # -*- coding: utf-8
__author__ = 'reinaldo'
import work
import res_user
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,725 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/wizard/__init__.py | import wizard_reporte_comun
import wizard_reporte_estado_congregaciones
import wizard_reporte_estado_financiero
import wizard_reporte_listado_altas
import wizard_reporte_listado_bajas
import wizard_reporte_listado_x_sacramento
import wizard_reporte_fe_bautismo
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,726 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/__manifest__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-Today Serpent Consulting Services Pvt. Ltd.
# (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
"name": "Iglesias",
"version": "1.1",
"category": "Iglesias",
"description": """
Registro de Oficios.
=================================
Gestionar Medios Básicos, Miembros, Sacramentos y Gastos en las Iglesias
""",
"author": "Reinaldo",
# "images": ['images/1_servers_synchro.jpeg', 'images/2_synchronize.jpeg', 'images/3_objects_synchro.jpeg',],
"depends": ['report', 'hr', 'contacts', 'product'],
'website': "",
"data": [
# "security/my_work_security.xml",
# "security/ir.model.access.csv",
# "data/my_work_data.xml",
# "wizard/base_synchro_view.xml",
"views/iglesias_view.xml",
"views/product_views.xml",
"views/congregacion_view.xml",
"views/sacramento_view.xml",
"views/oficio_view.xml",
"views/hr_view.xml",
"views/estacion_liturgica_view.xml",
"wizard/wizard_reporte_comun_view.xml",
"wizard/wizard_reporte_estado_congregaciones_view.xml",
"wizard/wizard_reporte_estado_financiero_view.xml",
"wizard/wizard_reporte_listado_altas_view.xml",
"wizard/wizard_reporte_listado_bajas_view.xml",
"wizard/wizard_reporte_listado_bautizos_view.xml",
"wizard/wizard_reporte_fe_bautismo_view.xml",
"views/report_congregacion.xml",
"views/report_fe_bautismo.xml",
"views/report_congregacion_junta.xml",
"views/report_congregacion_membresia.xml",
"views/report_congregacion_mediosb.xml",
"views/report_congregacion_enfermos.xml",
"views/report_congregacion_estado.xml",
"views/report_estado_financiero.xml",
"views/report_congregacion_altas.xml",
"views/report_congregacion_bajas.xml",
"views/report_congregacion_bautizos.xml",
"views/iglesias_report.xml",
# "report/my_work_template_reports.xml",
# 'reports.xml',
# "views/res_request_view.xml",
],
"installable": True,
'application': True,
"auto_install": False,
}
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,727 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/wizard/wizard_reporte_listado_bajas.py | # -*- coding: utf-8 -*-
from odoo import models, _, api, fields
class WizardListadoBajas(models.TransientModel):
_name = 'iglesias.wizard_reporte_lb'
_inherit = 'iglesias.wizard_commun'
def _print_report(self, data):
return self.env['report'].get_action(self, 'iglesias.report_listado_bajas', data=data)
@api.onchange('fecha_inicio', 'fecha_final')
def _change_product_ids(self):
super(WizardListadoBajas, self)._change_product_ids()
if self.fecha_inicio:
domain1 = [('miembros_ids.fecha_baja', '>=', self.fecha_inicio)]
if self.fecha_final:
domain1 += [('miembros_ids.fecha_baja', '<=', self.fecha_final)]
domain = {'congregaciones_ids': domain1}
self.congregaciones_ids = self.env['iglesias.congregacion'].get_congregaciones_con_bajas(self.fecha_inicio,
self.fecha_final)
return {'domain': domain}
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,728 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/models/iglesias.py | # -*- coding: utf-8 -*-
from odoo.exceptions import ValidationError
from odoo import models, fields, api
SACRAMENTOS = [('bautismo', 'Bautismo'),
('confirmacion', 'Confirmacion'),
('bendicion', 'Bendicion de matrimonio'),
('orden_sagrada', 'Orden sagrada'),
('obituario', 'Obituario')]
class Congregacion(models.Model):
_name = 'iglesias.congregacion'
_rec_name = 'nombre'
_order = 'no_registro'
nombre = fields.Char('Nombre', required=True)
no_registro = fields.Integer('No de registro', required=True)
ministro_id = fields.Many2one('hr.employee', 'Ministro o Rector', domain=[('es_empleado', '=', True),
('categoria', 'in',
['diacono', 'presvitero'])])
status = fields.Selection([('parroquia', 'Parroquias'), ('misiones', 'Misiones de parroquias'),
('extensiones', 'Extensiones de parroquias')], 'Status')
miembros_ids = fields.One2many('hr.employee', 'congregacion_id', 'Miembros', domain=[('es_miembro', '=', True)])
liderazgo_ids = fields.One2many('congregacion.employee', 'congregacion_id', 'Liderazgo')
sacramentos_ids = fields.One2many('iglesias.sacramento', 'congregacion_id', 'Sacramentos')
mediosb_ids = fields.One2many('product.template', 'congregacion_id', 'Medios básicos')
enfermos_ids = fields.One2many('hr.employee', 'congregacion_id', 'Enfermos',
domain=[('padecimientos', '!=', False)])
cantidad_mediosb = fields.Integer(compute='_calc_cantidad_mediosb')
@api.one
@api.depends('mediosb_ids')
def _calc_cantidad_mediosb(self):
self.cantidad_mediosb = self.mediosb_ids and len(self.mediosb_ids.ids) or 0
@api.model
def get_congregaciones_con_altas(self, fecha1, fecha2):
dom = [('miembros_ids.fecha_alta', '>=', fecha1)]
if fecha2:
dom += [('miembros_ids.fecha_alta', '<=', fecha2)]
return self.search(dom)
@api.model
def get_congregaciones_con_bajas(self, fecha1, fecha2):
dom = [('miembros_ids.fecha_baja', '>=', fecha1)]
if fecha2:
dom += [('miembros_ids.fecha_baja', '<=', fecha2)]
return self.search(dom)
class CongregacionEmployee(models.Model):
_table = 'congregacion_employee_rel'
_name = 'congregacion.employee'
congregacion_id = fields.Many2one('iglesias.congregacion', 'Congregación')
employee_id = fields.Many2one('hr.employee', 'Líder')
funcion = fields.Selection([('rector', 'Rector'), ('ministro', 'Ministro'),
('capiller_mayor', 'Capiller mayor'),
('capiller_menor', 'Capiller menor'),
('sercretario', 'Secretario'),
('tesorero', 'Tesorero'),
('vocal', 'Vocal'),
('maestra', 'Maestra EBN')], "Función")
fecha_instalado = fields.Date("Fecha de instalado")
fecha_removido = fields.Date("Fecha de removido")
class Sacramento(models.Model):
_name = 'iglesias.sacramento'
_order = 'fecha'
tipo_sacramento = fields.Selection(SACRAMENTOS, required=True)
estacion_liturgica_id = fields.Many2one('iglesias.estacion_liturgica', 'Estación litúrgica', required=True)
congregacion_id = fields.Many2one('iglesias.congregacion', 'Congregación', required=True)
oficio_id = fields.Many2one('iglesias.oficio', 'Oficio')
fecha = fields.Date('Fecha', required=True)
lugar = fields.Text('Lugar')
padre = fields.Many2one('hr.employee', 'Padre',
domain="[('id', 'not in', [miembro_id,madre,madrina, padrino]),('gender','in', [False,'male'])]")
madre = fields.Many2one('hr.employee', 'Madre',
domain="[('id', 'not in', [miembro_id,padre,madrina, padrino]),('gender','in', [False,'female'])]")
padrino = fields.Many2one('hr.employee', 'Padrino',
domain="[('id', 'not in', [miembro_id,padre, madre, madrina]),('gender','in', [False,'male'])]")
madrina = fields.Many2one('hr.employee', 'Madrina',
domain="[('id', 'not in', [miembro_id,padre, madre, padrino]),('gender','in', [False,'female'])]")
ministro = fields.Many2one('hr.employee', 'Ministro',
domain=[('es_empleado', '=', True), ('gender', 'in', [False, 'male'])])
obispo = fields.Many2one('hr.employee', 'Obispo',
domain=[('es_empleado', '=', True), ('gender', 'in', [False, 'male'])])
miembro_id = fields.Many2one('hr.employee', 'Hermano', required=True,
domain="[('es_miembro', '=', True),('congregacion_id', '=', congregacion_id)]",
help="Hermano al que se le realiza el sacramento")
conyuge = fields.Many2one('hr.employee', 'Nombre', domain="[('es_miembro', '=', True),('id', '!=', miembro_id)]")
no_registro = fields.Integer('Número de registro')
testigo1 = fields.Many2one('hr.employee', 'Primer testigo',
domain="[('es_miembro', '=', True),('id', 'not in', [conyuge,testigo2, miembro_id])]")
testigo2 = fields.Many2one('hr.employee', 'Segundo testigo',
domain="[('es_miembro', '=', True), ('id', 'not in', [conyuge,testigo1, miembro_id])]")
miembro_categoria = fields.Selection(related='miembro_id.categoria')
confirmacion_id = fields.Many2one('iglesias.sacramento', compute='calc_confirmacion_id')
@api.one
def calc_confirmacion_id(self):
confirmacion = False
if self.tipo_sacramento == 'bautismo' and self.miembro_id:
confirmacion = self.search([('id', '!=', self.id), ('tipo_sacramento', '=', 'confirmacion'),
('miembro_id', '=', self.miembro_id.id)], limit=1)
self.confirmacion_id = confirmacion
@api.multi
def print_fe_bautismo(self):
return self.env['report'].get_action(self, 'iglesias.report_fe_bautismo')
@api.onchange('tipo_sacramento')
def onchange_tipo_sacramento(self):
self.lugar = False
self.padre = False
self.madre = False
self.padrino = False
self.madrina = False
self.obispo = False
self.conyuge = False
self.no_registro = False
self.testigo1 = False
self.testigo2 = False
@api.onchange('congregacion_id')
def onchange_congregacion_id(self):
if self.congregacion_id:
self.ministro = self.congregacion_id.ministro_id
@api.model
def create(self, vals={}):
res = super(Sacramento, self).create(vals)
if vals.get('tipo_sacramento', False):
res.actualizar_hr()
return res
@api.multi
def write(self, vals={}):
res = super(Sacramento, self).write(vals)
if vals.get('tipo_sacramento', False):
for s in self:
s.actualizar_hr()
return res
@api.one
def actualizar_hr(self):
if self.miembro_id:
if self.tipo_sacramento == 'obituario':
self.miembro_id.fecha_baja = self.fecha
self.miembro_id.causa = 'fallecido'
elif self.tipo_sacramento == 'bendicion':
self.miembro_id.marital = 'married'
if self.conyuge:
self.conyuge.marital = 'married'
else:
self.miembro_id.fecha_baja = False
self.miembro_id.causa = False
@api.model
def get_sacramentos_entre_fechas(self, tipo, fecha1, fecha2, congregaciones_ids=[]):
dom = [('fecha', '>=', fecha1), ('tipo_sacramento', '=', tipo)]
if fecha2:
dom += [('fecha', '<=', fecha2)]
if congregaciones_ids:
dom += [('miembro_id.congregacion_id', 'in', congregaciones_ids)]
return self.search(dom)
@api.constrains('tipo_sacramento', 'miembro_id')
def _check_unique_sacramento(self):
for sacramento in self:
if sacramento.tipo_sacramento != 'orden_sagrada' and self.search(
[('tipo_sacramento', '=', sacramento.tipo_sacramento),
('miembro_id', '=', sacramento.miembro_id.id),
('id', '!=', sacramento.id)]):
raise ValidationError(
"El miembro %s solo puede tener un sacramento de tipo %s" % (
sacramento.miembro_id.name, sacramento.tipo_sacramento))
class EstacionLiturgica(models.Model):
_name = 'iglesias.estacion_liturgica'
_rec_name = 'nombre'
nombre = fields.Char('Nombre')
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,729 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/models/hr.py | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
import datetime
CATEGORIA = [('lector_laico', 'Lector Laico'),
('diacono', 'Diacono'),
('presvitero', 'Presbitero'),
('arcediano', 'Arcediano'),
('obispo', 'Obispo')]
class HrEmployee(models.Model):
_inherit = 'hr.employee'
@api.multi
@api.depends('name', 'categoria')
def name_get(self):
result = []
for persona in self:
if persona.name:
if persona.categoria == 'lector_laico' :
name = 'L.Lco ' + persona.name
elif persona.categoria in ['diacono', 'presvitero']:
name = 'Rev ' + persona.name
elif persona.categoria == 'arcediano':
name = 'Ven ' + persona.name
elif persona.categoria == 'obispo':
name = 'Exmo ' + persona.name
else:
name = persona.name
result.append((persona.id, name))
return result
es_miembro = fields.Boolean('Es miembro?')
es_empleado = fields.Boolean('Es empleado?')
no_registro = fields.Integer('No de registro')
congregacion_id = fields.Many2one('iglesias.congregacion', 'Congregación')
padecimientos = fields.Text('Padecimientos')
medicamentos = fields.Text('Medicamentos')
membresia = fields.Selection([('visitante', 'Visitante'), ('catecumeno', 'Catecumeno'),
('comulgante', 'Comulgante'), ('pasivo', 'Pasivo')], 'Tipo de membresía')
fecha_alta = fields.Date('Fecha de alta', default=fields.Date.today())
es_diezmador = fields.Boolean('Es Diezmador?')
no_familia = fields.Integer('# de familia episcopal')
fecha_baja = fields.Date('Fecha de baja')
causa = fields.Selection([('traslado', 'Traslado'), ('fallecido', 'Fallecido')], 'Causa de baja')
edad = fields.Integer('Edad', compute='calc_edad')
categoria = fields.Selection(CATEGORIA, 'Categoría')
cantidad_sacramentos = fields.Integer(compute='_calc_cantidad_sacramentos')
sacramento_ids = fields.One2many('iglesias.sacramento', 'miembro_id')
cantidad_mediosb = fields.Integer(compute='_calc_cantidad_mediosb')
medios_basicos_ids = fields.One2many('product.template', 'responsable_id')
_sql_constraints = [('iglesias_employee_ci_unique', 'unique(identification_id)', 'El CI debe ser único en la bd!')]
@api.one
@api.depends('birthday')
def calc_edad(self):
edad = self.birthday and (
(datetime.datetime.now().date() - datetime.datetime.strptime(self.birthday, '%Y-%m-%d').date()).days / 365) or 0
if edad < 0:
self.edad = 0
else:
self.edad = edad
@api.one
@api.depends('sacramento_ids')
def _calc_cantidad_sacramentos(self):
self.cantidad_sacramentos = self.sacramento_ids and len(self.sacramento_ids.ids) or 0
@api.one
@api.depends('medios_basicos_ids')
def _calc_cantidad_mediosb(self):
self.cantidad_mediosb = self.medios_basicos_ids and len(self.medios_basicos_ids.ids) or 0
@api.onchange('identification_id')
def onchange_identification_id(self):
if self.identification_id:
self.gender = int(self.identification_id[-2]) % 2 == 0 and 'male' or 'female'
self._set_date_from_ci()
@api.one
def _set_date_from_ci(self):
if self.identification_id and len(self.identification_id) == 11:
strdate = str(self.identification_id[:6])
try:
aux_str = "19" + strdate if int(strdate[:2]) > 25 else "20" + strdate
self.birthday = datetime.datetime.strptime(aux_str, "%Y%m%d").date()
except Exception, e:
raise ValidationError(_('There are errors on employee CI'))
else:
self.birthday = False
@api.multi
def print_fe_bautismo(self):
bautismos_ids = []
for employe in self:
for s in employe.sacramento_ids:
if s.tipo_sacramento == 'bautismo':
bautismos_ids.append(s.id)
break
return self.env['iglesias.sacramento'].browse(bautismos_ids).print_fe_bautismo()
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,730 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/models/__init__.py | import product
import hr
import iglesias
import oficio | {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,731 | michelcabrera/my_work_odoo | refs/heads/master | /my_work/__init__.py | __author__ = 'reinaldo'
import models | {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,732 | michelcabrera/my_work_odoo | refs/heads/master | /my_work/models/work.py | __author__ = 'reinaldo'
# -*- coding: utf-8
from odoo import models, api, fields
class WorkOrder(models.Model):
_name = 'my_work.work_order'
_rec_name = 'no_orden'
@api.one
def confirmar(self):
self.state = 'abierto'
if self.tipo_orden == 'pc':
self.no_orden = self.env['ir.sequence'].get('work_order_pc')
else:
self.no_orden = self.env['ir.sequence'].get('work_order_hdd')
@api.one
def pendiente_revisar(self):
self.state = 'p_revisar'
if self.tipo_orden == 'pc':
self.no_orden = self.env['ir.sequence'].get('work_order_pc')
else:
self.no_orden = self.env['ir.sequence'].get('work_order_hdd')
@api.one
def reparado(self):
self.state = 'reparado'
@api.one
def entregar(self):
self.state = 'cerrado'
@api.one
def sin_solucion(self):
self.state = 'nts'
@api.one
def cancelar(self):
self.state = 'cancelado'
@api.one
def reiniciar(self):
self.state = 'nuevo'
@api.model
def create(self, vals):
work_order_id = super(WorkOrder, self).create(vals)
# TODO: this is needed to set given values to first variant after creation
# these fields should be moved to product as lead to confusion
vals.update({'work_order_id': work_order_id.id})
if vals.get('tipo_orden') == 'hdd':
self.env['my_work.work_order_hdd'].create(vals)
else:
self.env['my_work.work_order_pc'].create(vals)
return work_order_id
wpc_ids = fields.One2many('my_work.work_order_pc', 'work_order_id', string='ORDENES DE DISCO DURO')
whdd_ids = fields.One2many('my_work.work_order_hdd', 'work_order_id', string='ORDENES DE HDD')
a_nombre = fields.Many2one('res.partner', string="A NOMBRE DE")
ci = fields.Char(related='a_nombre.ci')
fecha_inicial = fields.Date('FECHA DE LA SOLICITUD', required=True)
fecha = fields.Date('FECHA DE REPARACION', states={'abierto': [('required', True), ('invisible', False)],
'cerrado': [('invisible', False)]})
no_orden = fields.Char('NO DE ORDEN', copy=False, readonly=True, states={'nuevo': [('invisible', True)]})
piezas = fields.Text('PIEZAS O ACCESORIOS EMPLEADOS')
state = fields.Selection([('nuevo', 'Nuevo'), ('p_revisar', 'Pendiente a Revisar'), ('abierto', 'En Proceso'),
('reparado', 'Reparado'), ('cerrado', 'Entregado'),
('nts', 'Sin Solucion'), ('cancelado', 'Cancelado')], default='nuevo', string='ESTADO')
work_done_ids = fields.One2many('my_work.work_done', 'work_order_id', string='TRABAJOS REALIZADOS')
costo = fields.Float('COSTO DEL SERVICIO')
realizado_por = fields.Many2one('hr.employee', string="REALIZADO POR")
entregado_por = fields.Many2one('hr.employee', string="ENTREGADO POR")
recibido_por = fields.Many2one('res.partner', string="RECIBIDO POR")
tipo_orden = fields.Selection([('pc', 'PC'), ('hdd', 'HDD')], string='TIPO DE ORDEN', required=True)
# related de Work_order_PC
pc = fields.Many2one('my_work.resource_pc', 'EQUIPO', related='wpc_ids.pc', invisible='self.tipo_orden == "hdd"')
motherboard = fields.Many2one('resource.resource', string='MOTHERBOARD', related='pc.motherboard',
invisible='self.tipo_orden == "hdd"')
fuente = fields.Many2one('resource.resource', string='FUENTE', related='pc.fuente',
invisible='self.tipo_orden == "hdd"')
memorias_ram = fields.Many2many(related='pc.memorias_ram', invisible='self.tipo_orden == "hdd"')
lectores = fields.Many2many(related='pc.lectores', invisible='self.tipo_orden == "hdd"')
hdd = fields.Many2many(related='pc.hdd', invisible='self.tipo_orden == "hdd"')
chasis = fields.Char(string='CHASIS', related='pc.chasis', invisible='self.tipo_orden == "hdd"')
cpu = fields.Many2one(related='pc.cpu', invisible='self.tipo_orden == "hdd"')
otros_nota = fields.Text(related='pc.otros', invisible='self.tipo_orden == "hdd"')
laptop = fields.Boolean('LAPTOP', related='pc.laptop', invisible='self.tipo_orden == "hdd"')
# Tipo de trabajo
mantenimiento = fields.Boolean('MANTENIMIENTO', related='wpc_ids.mantenimiento')
rmenor = fields.Boolean('REPARACION MENOR', related='wpc_ids.rmenor')
rmayor = fields.Boolean('REPARACION MAYOR', related='wpc_ids.rmayor')
otros = fields.Boolean('OTROS SERVICIOS', related='wpc_ids.otros')
so = fields.Boolean('SISTEMA OPERATIVO', related='wpc_ids.so')
act = fields.Boolean('ACT. DE SOFT', related='wpc_ids.act')
insoft = fields.Boolean(related='wpc_ids.insoft')
antivirus = fields.Boolean('DE ANTIVIRUS', related='wpc_ids.antivirus')
# related de Work_order_HDD
hdd_resource = fields.Many2one('my_work.resource_hdd', 'DISCO DURO', related='whdd_ids.hdd')
recuperacion_datos = fields.Boolean(related='whdd_ids.recuperacion_datos')
reparcion_hardware = fields.Boolean(related='whdd_ids.reparcion_hardware')
sus_placa = fields.Boolean('SUSTITUCION DE PLACA', related='whdd_ids.sus_placa')
sus_componentes = fields.Boolean('SUSTITUCION DE COMPONENTES', related='whdd_ids.sus_componentes')
no_placa = fields.Char('#PLACA', related='hdd_resource.no_placa')
marca = fields.Many2one('my_work.marca_hdd', related='hdd_resource.marca')
no_serie = fields.Char(related='hdd_resource.no_serie')
firw = fields.Char(related='hdd_resource.firw')
capacidad = fields.Many2one(related='hdd_resource.capacidad')
tipo_conexion = fields.Selection(related='hdd_resource.tipo_conexion')
class WorkOrderPc(models.Model):
_inherits = {'my_work.work_order': 'work_order_id'}
_name = 'my_work.work_order_pc'
# @api.one
# def confirmar(self):
# self.work_order_id.no_orden = self.env['ir.sequence'].get('my_work.sequence_work_order_pc')
pc = fields.Many2one('my_work.resource_pc', 'EQUIPO',
context="{'default_resource_type':'material'}")
work_order_id = fields.Many2one('my_work.work_order', 'ORDEN DE TRABAJO', required=True, ondelete="cascade")
mantenimiento = fields.Boolean('MANTENIMIENTO')
rmenor = fields.Boolean('REPARACION MENOR')
rmayor = fields.Boolean('REPARACION MAYOR')
otros = fields.Boolean('OTROS SERVICIOS')
so = fields.Boolean('SISTEMA OPERATIVO')
act = fields.Boolean('ACT. DE SOFT')
insoft = fields.Boolean('INST. SOFT ACT')
antivirus = fields.Boolean('DE ANTIVIRUS')
class WorkOrderHdd(models.Model):
_inherits = {'my_work.work_order': 'work_order_id'}
_name = 'my_work.work_order_hdd'
@api.one
def confirmar(self):
super(WorkOrderPc, self).confirmar()
self.work_order_id.no_orden = self.env['ir.sequence'].get('my_work.sequence_work_order_hdd')
hdd = fields.Many2one('my_work.resource_hdd', 'DISCO DURO',
context="{'default_resource_cat':'hdd',"
"'default_resource_type':'material'}")
recuperacion_datos = fields.Boolean('RECUPERACION DE DATOS')
reparcion_hardware = fields.Boolean('REPARACION DE HARDWARE')
sus_placa = fields.Boolean('SUSTITUCION DE PLACA')
sus_componentes = fields.Boolean('SUSTITUCION DE COMPONENTES')
no_placa = fields.Char('#PLACA', related='hdd.no_placa')
work_order_id = fields.Many2one('my_work.work_order', 'ORDEN DE TRABAJO', required=True, ondelete="cascade")
class WorkType(models.Model):
_name = 'my_work.work_type'
nombre = fields.Char(string='TIPO DE SERVICIO', required=True)
class ResourceDescriptionPC(models.Model):
_inherit = 'resource.resource'
_name = 'my_work.resource_pc'
motherboard = fields.Many2one('resource.resource', string='MOTHERBOARD',
domain="[('resource_cat','=','motherboard')]",
context="{'default_resource_cat':'motherboard',"
"'default_resource_type':'material'}")
fuente = fields.Many2one('resource.resource', string='FUENTE', domain="[('resource_cat','=','fuente_pc')]",
context="{'default_resource_cat':'fuente_pc',"
"'default_resource_type':'material'}")
memorias_ram = fields.Many2many('resource.resource', 'my_work_resource_pc_ram_rel', string='MEMORIAS RAM',
domain="[('resource_cat','=','ram')]",
context="{'default_resource_cat':'ram',"
"'default_resource_type':'material'}")
lectores = fields.Many2many('resource.resource', 'my_work_resource_pc_lectores_rel', string='LECTORES', domain="[('resource_cat','=','lectores')]",
context="{'default_resource_cat':'lectores',"
"'default_resource_type':'material'}")
hdd = fields.Many2many('my_work.resource_hdd', 'my_work_resources_pc_hdd_rel', string='HDD',
context="{'default_resource_cat':'hdd',"
"'default_resource_type':'material'}")
chasis = fields.Char(string='CHASIS')
cpu = fields.Many2one('resource.resource', string='CPU', domain="[('resource_cat','=','cpu')]",
context="{'default_resource_cat':'cpu',"
"'default_resource_type':'material'}")
otros = fields.Text('OTROS')
laptop = fields.Boolean('LAPTOP')
class MarcaHdd(models.Model):
_name = 'my_work.marca_hdd'
_rec_name = 'marca'
code = fields.Char('CODIGO')
marca = fields.Char('MARCA')
class ResourceDescriptionHDD(models.Model):
_inherit = 'resource.resource'
_name = 'my_work.resource_hdd'
marca = fields.Many2one('my_work.marca_hdd', string='MARCA')
no_placa = fields.Char('#PLACA')
no_serie = fields.Char('#SERIE', related='code', readonly=True)
firw = fields.Char('FIRW')
capacidad = fields.Many2one('my_work.hdd_capacity', string='CAP')
tipo_conexion = fields.Selection([('sata', 'SATA'), ('ide', 'IDE')], string='TIPO DE CONEX')
class HDDCapacity(models.Model):
_name = 'my_work.hdd_capacity'
name = fields.Char('CAPACIDAD', required=True)
class WorkDone(models.Model):
_name = 'my_work.work_done'
_order = 'no asc'
no = fields.Integer('NO')
denominacion = fields.Char('DENOMINACION')
precio = fields.Float('PRECIO')
importe = fields.Float('IMPORTE')
work_order_id = fields.Many2one('my_work.work_order', 'Orden de Trabajo')
class ResourseResource(models.Model):
_inherit = 'resource.resource'
@api.multi
def name_get(self):
#result = super(ResourseResource, self).name_get()
result = []
for resource in self:
result.append((resource.id,
'[%s] %s' % (resource.code, resource.name)))
return result
resource_cat = fields.Selection([('ram', 'MEMORIA RAM'), ('cpu', 'CPU'), ('lectores', 'LECTORES'),
('fuente_pc', 'FUENTE INTERNA DE PC'), ('motherboard', 'MOTHERBOARD'),
('hdd','DISCO DURO')],
string="Categoria de recurso")
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,733 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/models/product.py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
ESTADO = [('bueno', 'Bueno'), ('regular', 'Regular'), ('deficiente', 'Deficiente')]
class ProductTemplate(models.Model):
_inherit = 'product.template'
_order = 'default_code'
congregacion_id = fields.Many2one('iglesias.congregacion', 'Congregación')
ministro_id = fields.Many2one('hr.employee', 'Ministro a Cargo',
related='congregacion_id.ministro_id', readonly=True)
responsable_id = fields.Many2one('hr.employee', 'Responsable')
direccion = fields.Text('Dirección')
estado_tecnico = fields.Selection(ESTADO, 'Estado técnico')
fecha_alta = fields.Date('Fecha de alta')
fecha_baja = fields.Date('Fecha de baja')
observaciones = fields.Text('Observaciones')
@api.onchange('responsable_id')
def onchange_responsable(self):
if self.responsable_id and self.responsable_id.address_home_id:
self.direccion = self.responsable_id.address_home_id.contact_address | {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,734 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/report/estado_financiero.py | # -*- coding: utf-8 -*-
import time
from odoo import api, models, fields
class ReportTrialBalance(models.AbstractModel):
_name = 'report.iglesias.report_ef'
def _get_saldo_hasta_fecha(self, congregaciones, fecha):
saldo = 0
domain = [('congregacion_id', 'in', congregaciones.ids), ('fecha', '<', fecha)]
servicios = self.env['iglesias.oficio'].search(domain)
for s in servicios:
saldo += s.total_ofrendas
saldo -= s.total_egresos
return saldo
def _get_desgloze_entre_fechas(self, congregaciones, fecha1, fecha2):
result = {}
domain = [('congregacion_id', 'in', congregaciones.ids), ('fecha', '<=', fecha2)]
if fecha1:
domain += [('fecha', '>=', fecha1)]
servicios = self.env['iglesias.oficio'].search(domain)
result.update({'diezmos': 0,
'bandejas': 0,
'unidad_gracia': 0,
'cuaresma': 0,
'navidad': 0,
'diocesis_ninnos': 0,
'diocesis_iglesia': 0,
'otras_ofrendas': 0,
'egreso_01': 0,
'egreso_02': 0,
'egreso_02': 0,
'egreso_03': 0,
'egreso_04': 0,
'egreso_05': 0,
'egreso_06': 0,
'egreso_07': 0,
'egreso_08': 0,
'egreso_09': 0,
'egreso_10': 0,
'egreso_11': 0,
'egreso_12': 0,
'egreso_13': 0,
'egreso_14': 0,
'egreso_15': 0,
'egreso_16': 0,
'egreso_17': 0,
'egreso_18': 0,
'saldo': 0
})
for s in servicios:
result.update({'diezmos': result.get('diezmos', 0) + s.diezmos,
'bandejas': result.get('bandejas', 0) + s.bandejas,
'unidad_gracia': result.get('unidad_gracia', 0) + s.unidad_gracia,
'cuaresma': result.get('cuaresma', 0) + s.cuaresma,
'navidad': result.get('navidad', 0) + s.navidad,
'diocesis_ninnos': result.get('diocesis_ninnos', 0) + s.diocesis_ninnos,
'diocesis_iglesia': result.get('diocesis_iglesia', 0) + s.diocesis_iglesia,
'otras_ofrendas': result.get('otras_ofrendas', 0) + s.otras_ofrendas,
'egreso_01': result.get('egreso_01', 0) + s.egreso_01,
'egreso_02': result.get('egreso_02', 0) + s.egreso_02,
'egreso_02': result.get('egreso_02', 0) + s.egreso_02,
'egreso_03': result.get('egreso_03', 0) + s.egreso_03,
'egreso_04': result.get('egreso_04', 0) + s.egreso_04,
'egreso_05': result.get('egreso_05', 0) + s.egreso_05,
'egreso_06': result.get('egreso_06', 0) + s.egreso_06,
'egreso_07': result.get('egreso_07', 0) + s.egreso_07,
'egreso_08': result.get('egreso_08', 0) + s.egreso_08,
'egreso_09': result.get('egreso_09', 0) + s.egreso_09,
'egreso_10': result.get('egreso_10', 0) + s.egreso_10,
'egreso_11': result.get('egreso_11', 0) + s.egreso_11,
'egreso_12': result.get('egreso_12', 0) + s.egreso_12,
'egreso_13': result.get('egreso_13', 0) + s.egreso_13,
'egreso_14': result.get('egreso_14', 0) + s.egreso_14,
'egreso_15': result.get('egreso_15', 0) + s.egreso_15,
'egreso_16': result.get('egreso_16', 0) + s.egreso_16,
'egreso_17': result.get('egreso_17', 0) + s.egreso_17,
'egreso_18': result.get('egreso_18', 0) + s.egreso_18,
'saldo': result.get('saldo', 0) + s.total_ofrendas - s.total_egresos
})
return result
@api.model
def render_html(self, docids, data=None):
self.model = self.env.context.get('active_model')
docs = self.env[self.model].browse(self.env.context.get('active_ids', []))
congregaciones = data['form'].get('congregaciones_ids', False)
if congregaciones:
congregaciones = self.env['iglesias.congregacion'].search([('id', 'in', congregaciones)])
fecha_inicio = data['form'].get('fecha_inicio', False)
fecha_final = data['form'].get('fecha_final', False)
if not fecha_final:
fecha_final = fields.date.today()
saldo_inicial = self._get_saldo_hasta_fecha(congregaciones, fecha_inicio)
desgloze = self._get_desgloze_entre_fechas(congregaciones, fecha_inicio, fecha_final)
docargs = {
'doc_ids': self.ids,
'doc_model': self.model,
'data': data['form'],
'docs': docs,
'time': time,
'fecha_inicial': fecha_inicio,
'fecha_final': fecha_final or fields.date.today(),
'saldo_inicial': saldo_inicial,
'desgloze': desgloze,
'congregaciones':congregaciones
}
return self.env['report'].render('iglesias.report_ef', docargs)
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,735 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/wizard/wizard_reporte_fe_bautismo.py | # -*- coding: utf-8 -*-
from odoo import api, fields, models, _
class WizardFeBautismo(models.TransientModel):
_name = 'iglesias.wizard_fe_bautismo'
congregacion_id = fields.Many2one('iglesias.congregacion', 'Congregación')
fecha = fields.Date(string='Fecha de bautismo')
miembros_ids = fields.Many2many('hr.employee', string='Hermanos', required=True)
@api.onchange('congregacion_id', 'fecha')
def _change_miembros_ids(self):
bautismo_ids = self.env['iglesias.sacramento'].search([('tipo_sacramento', '=', 'bautismo')])
confirmacion_ids = self.env['iglesias.sacramento'].search([('tipo_sacramento', '=', 'confirmacion')])
if bautismo_ids and confirmacion_ids:
domain1 = [('id', 'in', [c.miembro_id.id for c in confirmacion_ids])]
if self.congregacion_id:
domain1 += [('congregacion_id', '=', self.congregacion_id.id)]
if self.fecha:
bautismofecha_ids = self.env['iglesias.sacramento'].search([('id', 'in', bautismo_ids.ids),
('fecha', '=', self.fecha)])
domain1 += [('id', 'in', [b.miembro_id.id for b in bautismofecha_ids])]
else:
domain1 += [('id', 'in', [b.miembro_id.id for b in bautismo_ids])]
domain = {'miembros_ids': domain1}
if self.congregacion_id or self.fecha:
self.miembros_ids = self.env['hr.employee'].search(domain1)
return {'domain': domain}
@api.multi
def print_report(self):
self.ensure_one()
return self.miembros_ids.print_fe_bautismo() | {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,736 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/wizard/wizard_reporte_comun.py | # -*- coding: utf-8 -*-
from odoo import api, fields, models, _
class WizardComun(models.TransientModel):
_name = 'iglesias.wizard_commun'
fecha_inicio = fields.Date(string='Fecha de inicio', required=True)
fecha_final = fields.Date(string='Fecha final')
congregaciones_ids = fields.Many2many('iglesias.congregacion', string='Congregaciones', required=True,
default=lambda self: self.env['iglesias.congregacion'].search([]))
@api.onchange('fecha_inicio', 'fecha_final')
def _change_product_ids(self):
if self.fecha_final and self.fecha_final:
if self.fecha_final > self.fecha_final:
return {
'warning': {
'title': _("Fecha incorrecta"),
'message': _("La fecha de inicio debe ser menor que la fecha final."),
},
}
def _build_contexts(self, data):
result = {}
result['fecha_inicio'] = data['form']['fecha_inicio'] or False
result['fecha_final'] = data['form']['fecha_final'] or False
return result
def _print_report(self, data):
raise (_('Error!'), _('Not implemented.'))
@api.multi
def check_report(self):
self.ensure_one()
data = {}
data['ids'] = self.env.context.get('active_ids', [])
data['model'] = self.env.context.get('active_model', 'ir.ui.menu')
data['form'] = self.read(['fecha_inicio', 'fecha_final', 'congregaciones_ids'])[0]
used_context = self._build_contexts(data)
data['form']['used_context'] = dict(used_context, lang=self.env.context.get('lang', 'en_US'))
return self._print_report(data)
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,737 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/models/oficio.py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class Oficio(models.Model):
_name = 'iglesias.oficio'
_rec_name = 'nombre'
congregacion_id = fields.Many2one('iglesias.congregacion', 'Congregación')
fecha = fields.Date('Fecha')
estacion_liturgica_id = fields.Many2one('iglesias.estacion_liturgica', 'Estación litúrgica')
actividad = fields.Selection([('comunion', 'Servicio de Santa comunión'), ('otros', 'Otros servicios')],
'Actividad que se realiza')
# sacramentos_celebrados_ids = fields.One2many('sacramentos.oficios', 'oficio_id', 'Sacramentos celebrados')
nombre = fields.Char('Nombre', compute='_calc_nombre', readonly=True, store=True)
asistencia_total = fields.Integer('Asistencia total')
sacramentos_ids = fields.One2many('iglesias.sacramento', 'oficio_id', 'Sacramentos')
bautismos = fields.Integer('Bautismos')
cantidad_bautizados = fields.Integer('Cantidad de bautizados')
confirmados = fields.Integer('Confirmados')
cantidad_confirmaciones = fields.Integer('Cantidad de confirmaciones')
santa_comunion = fields.Integer('Santa comunión')
cantidad_comulgantes = fields.Integer('Cantidad de comulgantes')
bendiciones = fields.Integer('Bendición de matrimonios')
cantidad_bendiciones = fields.Integer('Cantidad de matrimonios bendecidos')
# Ingresos
bandejas = fields.Float('Ofrendas de bandejas', digits=(16, 2))
diezmos = fields.Float('Ofrendas de diezmos', digits=(16, 2))
unidad_gracia = fields.Float('Ofrenda de unidad de gracia', digits=(16, 2))
cuaresma = fields.Float('Ofrenda especial de Cuaresma', digits=(16, 2))
navidad = fields.Float('Ofrendas para cena de Navidad', digits=(16, 2))
diocesis_ninnos = fields.Float('Suplemento de la diócesis para niños', digits=(16, 2))
diocesis_iglesia = fields.Float('Suplemento de la diócesis para iglesia', digits=(16, 2))
otras_ofrendas = fields.Float('Otras ofrendas', digits=(16, 2))
total_ofrendas = fields.Float('Total ofrendas', digits=(16, 2), compute='_calc_total_ofrendas')
# Egresos
egreso_01 = fields.Float('01', digits=(16, 2))
egreso_02 = fields.Float('02', digits=(16, 2))
egreso_03 = fields.Float('03', digits=(16, 2))
egreso_04 = fields.Float('04', digits=(16, 2))
egreso_05 = fields.Float('05', digits=(16, 2))
egreso_06 = fields.Float('06', digits=(16, 2))
egreso_07 = fields.Float('07', digits=(16, 2))
egreso_08 = fields.Float('08', digits=(16, 2))
egreso_09 = fields.Float('09', digits=(16, 2))
egreso_10 = fields.Float('10', digits=(16, 2))
egreso_11 = fields.Float('11', digits=(16, 2))
egreso_12 = fields.Float('12', digits=(16, 2))
egreso_13 = fields.Float('13', digits=(16, 2))
egreso_14 = fields.Float('14', digits=(16, 2))
egreso_15 = fields.Float('15', digits=(16, 2))
egreso_16 = fields.Float('16', digits=(16, 2))
egreso_17 = fields.Float('17', digits=(16, 2))
egreso_18 = fields.Float('18', digits=(16, 2))
total_egresos = fields.Float('Total de egresos', digits=(16, 2), compute='_calc_total_egresos')
observaciones = fields.Text('Observaciones')
ministro = fields.Many2one('hr.employee', 'Ministro', domain=[('es_miembro', '=', False)],
context={'default_es_miembro': False})
titulo = fields.Char('Título de la predicación')
@api.one
@api.depends('bandejas', 'diezmos', 'unidad_gracia', 'cuaresma', 'navidad', 'diocesis_ninnos', 'diocesis_iglesia',
'otras_ofrendas')
def _calc_total_ofrendas(self):
self.total_ofrendas = self.bandejas + self.diezmos + self.unidad_gracia + self.cuaresma + self.navidad + \
self.diocesis_ninnos + self.diocesis_iglesia + self.otras_ofrendas
@api.one
@api.depends('egreso_01', 'egreso_02', 'egreso_03', 'egreso_05', 'egreso_06', 'egreso_07', 'egreso_08',
'egreso_09', 'egreso_10', 'egreso_11', 'egreso_12', 'egreso_13', 'egreso_14', 'egreso_15',
'egreso_16', 'egreso_17', 'egreso_18')
def _calc_total_egresos(self):
self.total_egresos = self.egreso_01 + self.egreso_02 + self.egreso_03 + self.egreso_04 + self.egreso_05 \
+ self.egreso_06 + self.egreso_07 + self.egreso_08 + self.egreso_09 + self.egreso_10 + \
self.egreso_11 + self.egreso_12 + self.egreso_13 + self.egreso_14 + self.egreso_15 + \
self.egreso_16 + self.egreso_17 + self.egreso_18
@api.onchange('congregacion_id')
def onchange_congregacion_id(self):
self.sacramentos_ids = False
@api.onchange('sacramentos_ids')
def onchange_sacramentos_ids(self):
"Actualiza los totales"
bautismos = cb = confirmados = cc = santa_comunion = ccomulgantes = bendiciones = cbendiciones = 0
for s in self.sacramentos_ids:
if s.tipo_sacramento == 'bautismo':
bautismos += 1
elif s.tipo_sacramento == 'confirmacion':
confirmados += 1
elif s.tipo_sacramento == 'bendicion':
bendiciones += 1
self.bautismos = bautismos
self.cantidad_bautizados = bautismos
self.confirmados = confirmados
self.cantidad_confirmaciones = confirmados
self.bendiciones = bendiciones
self.cantidad_bendiciones = bendiciones
@api.one
@api.depends('fecha', 'congregacion_id')
def _calc_nombre(self):
if self.fecha and self.congregacion_id:
self.nombre = str(self.fecha + '-' + self.congregacion_id.nombre).upper()
else:
self.nombre = 'Nuevo' | {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,738 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/report/listado_altas.py | # -*- coding: utf-8 -*-
import time
from odoo import api, models, fields
class ReportListadoAltas(models.AbstractModel):
_name = 'report.iglesias.report_listado_altas'
def _get_miembross_con_altas(self, congregaciones, fecha1, fecha2):
dom = [('es_miembro', '=', True), ('fecha_alta', '>=', fecha1), ('fecha_alta', '<=', fecha2)]
if congregaciones:
dom += [('congregacion_id', 'in', congregaciones.ids)]
return self.env['hr.employee'].search(dom)
def _get_result(self, congregaciones, fecha1, fecha2):
result = {}
miembros = self._get_miembross_con_altas(congregaciones, fecha1, fecha2)
for miembro in miembros:
result.setdefault(miembro.congregacion_id, []).append(miembro)
return result
@api.model
def render_html(self, docids, data=None):
self.model = self.env.context.get('active_model')
docs = self.env[self.model].browse(self.env.context.get('active_ids', []))
congregaciones = data['form'].get('congregaciones_ids', False)
if congregaciones:
congregaciones = self.env['iglesias.congregacion'].search([('id', 'in', congregaciones)])
fecha_inicio = data['form'].get('fecha_inicio', False)
fecha_final = data['form'].get('fecha_final', False)
if not fecha_final:
fecha_final = fields.date.today()
result = self._get_result(congregaciones, fecha_inicio, fecha_final)
docargs = {
'doc_ids': self.ids,
'doc_model': self.model,
'data': data['form'],
'docs': docs,
'time': time,
'fecha_inicial': fecha_inicio,
'fecha_final': fecha_final,
'result': result
}
return self.env['report'].render('iglesias.report_listado_altas', docargs)
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,739 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/report/congregaciones_estado.py | # -*- coding: utf-8 -*-
import time
from odoo import api, models, fields
class ReportTrialBalance(models.AbstractModel):
_name = 'report.iglesias.report_ce'
def _get_total_sacramento(self, congregaciones, tipo_sacramento, fecha1=False, fecha2=fields.Date.today()):
domain = [('congregacion_id', 'in', congregaciones.ids), ('tipo_sacramento', '=', tipo_sacramento),
('fecha', '<=', fecha2)]
if fecha1:
domain += [('fecha', '>=', fecha1)]
sacramentos = self.env['iglesias.sacramento'].search(domain)
return len(sacramentos)
def _get_totales_sin_fecha(self, congregaciones):
result = {}
fecha = fields.Date.today()
parroquias = misiones = extensiones = 0
for c in congregaciones:
if c.status == 'parroquia':
parroquias += 1
elif c.status == 'misiones':
misiones += 1
elif c.status == 'extensiones':
extensiones += 1
miembros = self.env['hr.employee'].search([('congregacion_id', 'in', congregaciones.ids),
('es_miembro', '=', True)])
for c in congregaciones:
for l in c.liderazgo_ids:
miembros |= l.employee_id
diezmadores = 0
ninnas = 0
ninnos = 0
m_jovenes = 0
h_jovenes = 0
m_adultos = 0
h_adultos = 0
ancianas = 0
ancianos = 0
familias = set()
for miembro in miembros:
if miembro.es_diezmador:
diezmadores += 1
familias.add(miembro.no_familia)
if fecha >= miembro.birthday:
if miembro.edad <= 14:
if miembro.gender == 'female':
ninnas += 1
else:
ninnos += 1
elif miembro.edad >= 15 and miembro.edad <= 29:
if miembro.gender == 'female':
m_jovenes += 1
else:
h_jovenes += 1
elif miembro.edad >= 30 and miembro.edad <= 59:
if miembro.gender == 'female':
m_adultos += 1
else:
h_adultos += 1
elif miembro.edad >= 60:
if miembro.gender == 'female':
ancianas += 1
else:
ancianos += 1
result.update({'diezmadores': diezmadores,
'parroquias': parroquias,
'misiones': misiones,
'extensiones': extensiones,
'familias': len(familias),
'ninnas': ninnas,
'ninnos': ninnos,
'm_jovenes': m_jovenes,
'h_jovenes': h_jovenes,
'm_adultos': m_adultos,
'h_adultos': h_adultos,
'ancianas': ancianas,
'ancianos': ancianos,
'bautizados': self._get_total_sacramento(congregaciones, 'bautismo'),
'confirmaciones': self._get_total_sacramento(congregaciones, 'confirmacion'),
'bendiciones': self._get_total_sacramento(congregaciones, 'bendicion'),
'total_miembros': len(miembros)
})
return result
def _get_servicios(self, congregaciones, fecha1=False, fecha2=fields.Date.today()):
sta_com = otros = 0
domain = [('congregacion_id', 'in', congregaciones.ids), ('fecha', '<=', fecha2)]
if fecha1:
domain += [('fecha', '>=', fecha1)]
servicios = self.env['iglesias.oficio'].search(domain)
asistencias_comunion = asistencias_otros = 0
for s in servicios:
if s.actividad == 'comunion':
asistencias_comunion += s.asistencia_total
sta_com += 1
elif s.actividad == 'otros':
otros += 1
asistencias_otros += s.asistencia_total
promedio_comunion = promedio_otros = 0
if asistencias_comunion:
promedio_comunion = round(float(asistencias_comunion) / sta_com, 2)
if asistencias_otros:
promedio_otros = round(float(asistencias_otros) / otros, 2)
return sta_com, otros, promedio_comunion, promedio_otros,
def _get_total_membresia_hasta_fecha(self, congregaciones, fecha):
result = {}
altas = self.env['hr.employee'].search([('congregacion_id', 'in', congregaciones.ids),
('fecha_alta', '<=', fecha),
('es_miembro', '=', True)])
bajas = self.env['hr.employee'].search([('congregacion_id', 'in', congregaciones.ids),
('fecha_baja', '<=', fecha),
('es_miembro', '=', True)])
bajas_traslado = 0
bajas_fallecidos = 0
for miembro in bajas:
if miembro.causa == 'traslado':
bajas_traslado += 1
elif miembro.causa == 'fallecido':
bajas_fallecidos += 1
visitantes = 0
catecumenos = 0
comulgantes = 0
pasivos = 0
for miembro in altas:
if miembro.membresia == 'visitante':
visitantes += 1
elif miembro.membresia == 'catecumeno':
catecumenos += 1
elif miembro.membresia == 'comulgante':
comulgantes += 1
elif miembro.membresia == 'pasivo':
pasivos += 1
sta_comunion, otros, promedio_comunion, promedio_otros = self._get_servicios(congregaciones, False, fecha)
result.update({'altas': len(altas),
'bajas': len(bajas),
'bajas_traslado': bajas_traslado,
'bajas_fallecido': bajas_fallecidos,
'visitantes': visitantes,
'catecumenos': catecumenos,
'comulgantes': comulgantes,
'pasivos': pasivos,
'bautizados': self._get_total_sacramento(congregaciones, 'bautismo', False, fecha),
'confirmaciones': self._get_total_sacramento(congregaciones, 'confirmacion', False, fecha),
'bendiciones': self._get_total_sacramento(congregaciones, 'bendicion', False, fecha),
'servicio_sta_comunion': sta_comunion,
'servicio_otros': otros,
'promedio_comunion': promedio_comunion,
'promedio_otros': promedio_otros
})
return result
@api.model
def render_html(self, docids, data=None):
self.model = self.env.context.get('active_model')
docs = self.env[self.model].browse(self.env.context.get('active_ids', []))
congregaciones = data['form'].get('congregaciones_ids', False)
if congregaciones:
congregaciones = self.env['iglesias.congregacion'].search([('id', 'in', congregaciones)])
fecha_inicio = data['form'].get('fecha_inicio', False)
fecha_final = data['form'].get('fecha_final', False)
if not fecha_final:
fecha_final = fields.Date.today()
total_membresia_fecha1 = {}
total_membresia_fecha2 = {}
if fecha_inicio:
total_membresia_fecha1 = self._get_total_membresia_hasta_fecha(congregaciones, fecha_inicio)
total_membresia_fecha2 = self._get_total_membresia_hasta_fecha(congregaciones, fecha_final)
sta_com, otros, promedio_comunion, promedio_otros = self._get_servicios(congregaciones, fecha_inicio,
fecha_final)
totales_sin_fecha = self._get_totales_sin_fecha(congregaciones)
docargs = {
'doc_ids': self.ids,
'doc_model': self.model,
'data': data['form'],
'docs': docs,
'time': time,
'congregaciones': congregaciones,
'total_membresia_fecha1': total_membresia_fecha1,
'total_membresia_fecha2': total_membresia_fecha2,
'totales_sin_fecha': totales_sin_fecha,
'pc_entre_fechas': promedio_comunion,
'po_entre_fechas': promedio_otros,
'fecha_inicio': fecha_inicio,
'fecha_final': fecha_final or fields.Date.today()
}
return self.env['report'].render('iglesias.report_ce', docargs)
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,740 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/__init__.py | __author__ = 'reinaldo'
import models
import wizard
import report
| {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,741 | michelcabrera/my_work_odoo | refs/heads/master | /iglesias/models/contact.py | # -*- coding: utf-8 -*-
from odoo import models, fields
class Contacts(models.Model):
_inherit = 'res.partner'
congregacion_id = fields.Many2one('iglesias.congregacion', 'Congregación') | {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
72,742 | michelcabrera/my_work_odoo | refs/heads/master | /my_work/models/res_user.py | from odoo import models, api, fields
class ResPartner(models.Model):
_inherit = 'res.partner'
ci = fields.Char('CARNET DE IDENTIDAD') | {"/iglesias/models/__init__.py": ["/iglesias/__init__.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.