index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
63,744 | deanrock/cookie-checker | refs/heads/master | /test-chrome.py | from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
import os
import shutil
try:
shutil.rmtree('./profiles/*')
except:
pass
chromedriver = "./chromedriver"
os.environ["webdriver.chrome.driver"] = chromedriver
print "3"
browser = webdriver.Chrome(chromedriver)
print "4"
browser.get('http://24ur.com')
links = browser.find_elements_by_xpath("//a")
print links
print "5"
browser.quit()
| {"/app_wsgi.py": ["/app.py"]} |
63,745 | deanrock/cookie-checker | refs/heads/master | /config-example.py | import md5
dbhost = '127.0.0.1'
dbuser = 'root'
dbpass = ''
dbname = ''
DB_URI = 'mysql://' + dbuser + ':' + dbpass + '@' + dbhost + '/' +dbname
secret_key = '' | {"/app_wsgi.py": ["/app.py"]} |
63,746 | deanrock/cookie-checker | refs/heads/master | /app.py | # -*- coding: utf-8 -*-
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, make_response, jsonify, Response
import json, datetime
from datetime import timedelta
from time import mktime
from functools import wraps
from urllib import urlopen
import urllib
import string,random
from flask import Flask
import hashlib
import string,random
import datetime
import urllib
import hashlib
import string,random
from model import *
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config.DB_URI
app.config['SQLALCHEMY_POOL_RECYCLE'] = 3600
app.secret_key = config.secret_key
db.init_app(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/test/<url>')
def test(url):
test = Test.query.filter(Test.url == url).first()
if not test:
return redirect(url_for('index'))
return render_template('test.html', test=test)
@app.route('/js-test-info/<url>')
def js_test_info(url):
test = Test.query.filter(Test.url == url).first()
if not test:
return jsonify({'test':None})
return jsonify({'test':test.serialize})
@app.route('/js-get-cookies/<url>')
def js_get_cookies(url):
test = Test.query.filter(Test.url == url).first()
urls = []
for url in test.urls:
cookies = [cookie.serialize for cookie in url.cookies]
urls.append({'url': url.serialize, 'cookies':cookies})
return jsonify({'urls':urls})
"""cookies = [c.serialize for c in test.cookies]
cookies_distinct = []
for c in cookies:
found = False
for co in cookies_distinct:
if co['domain'] == c['domain'] and co['name'] == c['name']:
found = True
break
if not found:
cookies_distinct.append(c)
return jsonify({'cookies': cookies_distinct})"""
@app.route('/check', methods=['POST'])
def check():
if 'domain' in request.form:
if len(request.form['domain']) < 4:
return redirect(url_for('index'))
domain = 'http://'+str(request.form['domain'])
test = Test()
test.domain = domain
test.datetime = datetime.datetime.now()
test.generate_url()
if 'public' in request.form and request.form['public'] == 'yes':
test.private = 0
else:
test.private = 1
test.status = 1
db.session.add(test)
db.session.commit()
return redirect(url_for('test', url=test.url))
return redirect(url_for('index'))
@app.route('/history')
def history():
return render_template('history.html', tests = Test.query.order_by(Test.id.desc()).all())
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=2000)
| {"/app_wsgi.py": ["/app.py"]} |
63,747 | deanrock/cookie-checker | refs/heads/master | /task.py | from model import *
from time import sleep
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
import os
import config as myconfig
import MySQLdb as mdb
from xvfbwrapper import Xvfb
import shutil
from urlparse import urlparse
import random
import socket
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config.DB_URI
app.config['SQLALCHEMY_POOL_RECYCLE'] = 3600
app.secret_key = config.secret_key
db.init_app(app)
class get_cookies:
def __init__(self):
self.browser = None
self.to_visit = []
self.test = None
self.vdisplay = None
def __parse_cookies(self, url):
print "parse %s" % url
try:
socket.setdefaulttimeout(15)
self.browser.set_page_load_timeout(15)
self.browser.get(url)
u = Url()
u.test_id = self.test.id
u.url = url
u.datetime = datetime.datetime.now()
db.session.add(u)
db.session.commit()
cookies = self.browser.get_cookies()
for co in cookies:
c = Cookie()
c.url_id = u.id
c.name = co['name']
c.domain = co['domain']
c.value = co['value'][:200]
if 'expiry' in co:
c.expires = co['expiry']
db.session.add(c)
db.session.commit()
except socket.timeout:
return
socket.setdefaulttimeout(2)
def check_cookies(self, t):
self.test = t
t.status = 2
db.session.commit()
os.system("killall -9 chrome")
os.system("killall -9 chromedriver")
os.system("killall -9 Xvfb")
self.vdisplay = Xvfb()
self.vdisplay.start()
#chrome options
#co = webdriver.ChromeOptions()
#co.add_argument("--user-data-dir=./profiles")
chromedriver = "./chromedriver"
os.environ["webdriver.chrome.driver"] = chromedriver
#browser = webdriver.Chrome(chromedriver, chrome_options = co)
self.browser = webdriver.Chrome(chromedriver)
self.browser.delete_all_cookies()
#visit first page
self.url = t.domain
"""try:
self.browser.get(self.url)
except:
t.status = 4
t.info = 'Wrong URL! Task Aborted'
db.session.commit()
vdisplay.stop()
return
"""
self.__parse_cookies(t.domain)
domain = urlparse(t.domain).netloc
links = []
try:
socket.setdefaulttimeout(2)
links = self.browser.find_elements_by_tag_name("a")
random.shuffle(links)
for link in links:
href = link.get_attribute('href')
if href:
u = urlparse(href)
if u.netloc == domain and href != self.url:
self.to_visit.append(href)
if len(self.to_visit) == 3:
break
except socket.timeout:
pass
for link in self.to_visit:
self.__parse_cookies(link)
print t.domain
#finish
t.status = 3
t.finished = datetime.datetime.now()
db.session.commit()
try:
self.browser.close()
except:
pass
self.vdisplay.stop()
if __name__ == '__main__':
while True:
ctx = app.test_request_context()
ctx.push()
t = Test.query.filter(Test.status==1).first()
if t:
try:
gc = get_cookies()
gc.check_cookies(t)
except Exception,e:
print str(e)
t.status = 4
t.info = "Please try again!"
db.session.commit()
else:
sleep(1)
ctx.pop()
if browser:
browser.close()
| {"/app_wsgi.py": ["/app.py"]} |
63,748 | deanrock/cookie-checker | refs/heads/master | /run_moss.py | url = 'http://www.moss-soz.si/si/rezultati_moss/obdobje/default.html'
import re
import urllib, urllib2
import requests
import json
f = urllib.urlopen(url)
content = f.read()
def test_url(url):
payload = {'public':'yes',
'domain':url}
print "a"
data = payload
print "b"
r = requests.post('http://cookies.kirrupt.com/check',
data=data)
print "x"
x = re.findall('<strong>(.*)<\/strong>', content)
y = x[:-1]
i = 0
test
for x in y:
x = x.split('<')
i = i+1
print "%d. %s" % (i, x[0])
test_url(x[0])
| {"/app_wsgi.py": ["/app.py"]} |
63,749 | deanrock/cookie-checker | refs/heads/master | /app_wsgi.py |
import sys
sys.path.insert(0, '/apps/cookies')
from app import app as application
| {"/app_wsgi.py": ["/app.py"]} |
63,754 | heofs/flask-publisher | refs/heads/master | /main.py | from flask import Flask, request, jsonify
from pub import Publisher
from datetime import datetime
app = Flask(__name__)
publisher = Publisher()
@app.route('/api', methods=['POST'])
def post_data():
data = request.get_json(force=True)
# Add time
now = datetime.now()
timestamp = datetime.timestamp(now)
data['timestamp'] = timestamp
# Add location
data['location'] = "bedroom"
publisher.publish_data(data)
print(data)
return jsonify(data)
@app.route('/status', methods=['GET'])
def status():
return 'OK'
if __name__ == '__main__':
app.run(host='0.0.0.0')
| {"/main.py": ["/pub.py"]} |
63,755 | heofs/flask-publisher | refs/heads/master | /mqtt_publish.py | import datetime
import json
import os
import ssl
import time
import paho.mqtt.client as mqtt
import jwt
class Publisher(object):
"""Represents the state of a single device."""
def __init__(self):
self.connected = False
with open('credentials.json') as json_file:
config = json.load(json_file)
registry_id = config['registry_id']
cloud_region = config['cloud_region']
self.project_id = config['project_id']
device_id = config['device_id']
self.algorithm = config['algorithm']
self.private_key_file = config['private_key_file']
print(self.private_key_file)
ca_certs = config['ca_certs']
mqtt_bridge_hostname = "mqtt.googleapis.com"
mqtt_bridge_port = 8883
# jwt_expires_minutes = int(os.getenv('jwt_expires_minutes'))
# Create the MQTT client and connect to Cloud IoT.
self.client = mqtt.Client(
client_id='projects/{}/locations/{}/registries/{}/devices/{}'.format(
self.project_id,
cloud_region,
registry_id,
device_id))
self.client.username_pw_set(
username='unused', password=self.create_jwt())
self.client.tls_set(ca_certs=ca_certs,
tls_version=ssl.PROTOCOL_TLSv1_2)
self.client.on_connect = self.on_connect
self.client.on_publish = self.on_publish
self.client.on_disconnect = self.on_disconnect
self.client.on_subscribe = self.on_subscribe
self.client.connect(mqtt_bridge_hostname, mqtt_bridge_port)
self.client.loop_start()
# This is the topic that the device will publish telemetry events
# (temperature data) to.
self.mqtt_telemetry_topic = '/devices/{}/events'.format(device_id)
# This is the topic that the device will receive configuration updates on.
self.mqtt_config_topic = '/devices/{}/config'.format(device_id)
# Wait up to 5 seconds for the device to connect.
self.wait_for_connection(5)
# Subscribe to the config topic.
self.client.subscribe(self.mqtt_config_topic, qos=1)
def publish_message(self, data):
print(data)
payload = json.dumps(data)
print('Publishing payload', payload)
self.client.publish(self.mqtt_telemetry_topic, payload, qos=1)
def wait_for_connection(self, timeout):
"""Wait for the device to become connected."""
total_time = 0
while not self.connected and total_time < timeout:
time.sleep(1)
total_time += 1
if not self.connected:
raise RuntimeError('Could not connect to MQTT bridge.')
def on_connect(self, unused_client, unused_userdata, unused_flags, rc):
"""Callback for when a device connects."""
print('Connection Result:', self.error_str(rc))
self.connected = True
def on_disconnect(self, unused_client, unused_userdata, rc):
"""Callback for when a device disconnects."""
error_code = int(rc)
if(error_code == 5 or error_code == 5):
self.client.username_pw_set(
username='unused', password=self.create_jwt())
print('Disconnected:', self.error_str(rc))
self.connected = False
def on_publish(self, unused_client, unused_userdata, unused_mid):
"""Callback when the device receives a PUBACK from the MQTT bridge."""
print('Published message acked.')
def on_subscribe(self, unused_client, unused_userdata, unused_mid,
granted_qos):
"""Callback when the device receives a SUBACK from the MQTT bridge."""
print('Subscribed: ', granted_qos)
if granted_qos[0] == 128:
print('Subscription failed.')
def close(self):
self.client.disconnect()
self.client.loop_stop()
print('Finished loop successfully. Goodbye!')
def create_jwt(self):
"""Create a JWT (https://jwt.io) to establish an MQTT connection."""
token = {
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
'aud': self.project_id
}
with open(self.private_key_file, 'r') as f:
private_key = f.read()
print('Creating JWT using {} from private key file {}'.format(
self.algorithm, self.private_key_file))
return jwt.encode(token, private_key, algorithm=self.algorithm)
def error_str(self, rc):
"""Convert a Paho error to a human readable string."""
return '{}: {}'.format(rc, mqtt.error_string(rc))
if __name__ == '__main__':
publisher = Publisher()
num_messages = 10
# Update and publish temperature readings at a rate of one per second.
for x in range(num_messages):
# Report the device's temperature to the server by serializing it
# as a JSON string.
publisher.publish_message(
{'device_id': '', 'temperature': (x * 1.25 + 10), 'humidity': (x * 1.25 + 40)})
time.sleep(2)
publisher.close()
| {"/main.py": ["/pub.py"]} |
63,756 | heofs/flask-publisher | refs/heads/master | /pub.py | """
export GOOGLE_APPLICATION_CREDENTIALS="/home/user/Downloads/[FILE_NAME].json"
"""
from google.cloud import pubsub_v1
import json
class Publisher(object):
def __init__(self, project_id="henofs-project", topic_name="sensor-data"):
self.publisher = pubsub_v1.PublisherClient()
# The `topic_path` method creates a fully qualified identifier
# in the form `projects/{project_id}/topics/{topic_name}`
self.topic_path = self.publisher.topic_path(project_id, topic_name)
def publish_data(self, data={"test": 100}):
data = json.dumps(data).encode('utf-8')
# When you publish a message, the client returns a future.
future = self.publisher.publish(self.topic_path, data=data)
return future.result()
if __name__ == "__main__":
publisher = Publisher()
publisher.publish_data()
print('Published messages.')
| {"/main.py": ["/pub.py"]} |
63,760 | nazirimu/Pong-Game | refs/heads/main | /main.py | from turtle import Screen
from paddles import Paddle
from ball import Ball
from scoreboard import Scoreboard
import time
screen = Screen()
screen.setup(width= 900, height = 600)
screen.bgcolor("black")
screen.title("Pong")
screen.tracer(0)
r_paddle = Paddle(425)
l_paddle = Paddle(-425)
ball = Ball()
scoreboard = Scoreboard()
scoreboard.display()
rounds = int(screen.textinput("Pong", "First to:"))
screen.listen()
screen.onkey(r_paddle.up, "Up")
screen.onkey(r_paddle.down, "Down")
screen.onkey(l_paddle.up, "w")
screen.onkey(l_paddle.down, "s")
game_on = True
# time_sleep dictates the speed of the game
while game_on:
time.sleep(ball.moves_speed)
screen.update()
ball.move()
# detecting wall collision (top and bottom)
if ball.ycor() > 280 or ball.ycor() < -280:
ball.bounce()
# detecting collision with paddles
if ball.distance(r_paddle) < 50 and ball.xcor() > 400 or ball.distance(l_paddle) < 50 and ball.xcor() < -400:
ball.hit()
# detects when the paddle misses on the left
elif ball.xcor() < -430:
time.sleep(1)
scoreboard.r_point()
ball.miss()
# detects when the paddle misses on the right
elif ball.xcor() > 430:
time.sleep(0.5)
scoreboard.l_point()
ball.miss()
# stops the game when one of the users reaches 10
if scoreboard.r_score > rounds or scoreboard.l_score > rounds:
game_on = False
scoreboard.game_over()
screen.exitonclick()
| {"/main.py": ["/paddles.py", "/ball.py"]} |
63,761 | nazirimu/Pong-Game | refs/heads/main | /ball.py | from turtle import Turtle
class Ball(Turtle):
def __init__(self):
super().__init__()
self.shape("circle")
self.color("white")
self.penup()
self.speed("fastest")
self.move_x = 10
self.move_y = 10
self.moves_speed = 0.05
def move(self):
new_x = self.xcor() + self.move_x
new_y = self.ycor() + self.move_y
self.goto(new_x,new_y)
def bounce(self):
self.move_y *= -1
self.move()
def hit(self):
self.move_x *= -1
self.moves_speed *= 0.8
self.move()
def miss(self):
self.move_x *= -1
self.move_y *= -1
self.moves_speed = 0.05
self.goto(0,0)
self.move()
| {"/main.py": ["/paddles.py", "/ball.py"]} |
63,762 | nazirimu/Pong-Game | refs/heads/main | /paddles.py | from turtle import Turtle
class Paddle(Turtle):
def __init__(self, xposition):
super().__init__()
self.penup()
self.color("white")
self.shape("square")
self.shapesize(stretch_wid=5, stretch_len=1)
self.setposition(xposition, 0)
self.speed('fastest')
def up(self):
new_y = self.ycor() + 20
old_x = self.xcor()
self.goto(old_x,new_y)
def down(self):
new_y = self.ycor() - 20
old_x = self.xcor()
self.goto(old_x, new_y)
| {"/main.py": ["/paddles.py", "/ball.py"]} |
63,772 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/model/__init__.py | from .model import Attention, Decoder, Seq2Seq, BeamSearch | {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,773 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/app.py | from os.path import abspath, dirname
import sys
from get_best_sentences import generate_sentences
from flask import Flask, json, request, Response
import torch
import string
import random
from flask_cors import CORS, cross_origin
from data import Preprocess
app = Flask(__name__)
cors = CORS(app, resources={r"/generate-questions": {"origins": "*"}, "/get-random-doc": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
sys.path.insert(0, dirname(dirname(abspath(__file__))))
@app.after_request
def after_request(response):
header = response.headers
header['Access-Control-Allow-Origin'] = '*'
return response
@app.route("/")
def hello():
return f"Hello, World!{torch.cuda.is_available()}"
@app.route("/generate-questions", methods=['POST', 'OPTIONS'])
@cross_origin(origin='*')
def generate_questions():
if request.json:
letters = string.ascii_letters
file_name = ''.join(random.choice(letters) for i in range(10)) + ".json"
file_path = f"./temp_files/{file_name}"
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(request.json, f, ensure_ascii=False, indent=4)
dataset = Preprocess(file_path, 'dbmdz/bert-base-turkish-cased')
dataset.save(f'../data/bert/dbmdz/bert-base-turkish-cased/requests/{file_name}')
resp = json.dumps({"data":generate_sentences(file_name)}, ensure_ascii=False)
response = Response(resp, content_type="application/json; charset=utf-8",status=200)
else:
response = Response(status=400)
return response
@app.route("/get-random-doc", methods=['GET', 'OPTIONS'])
@cross_origin(origin='*')
def get_random_doc():
f = open('../data/turquad/test_delex.json', "r")
dataset = json.load(f)
rand_doc = random.choice(dataset['data'])
resp = json.dumps(rand_doc, ensure_ascii=False)
response = Response(resp, content_type="application/json; charset=utf-8",status=200)
return response
if __name__ == '__main__':
print("App is now running on 0.0.0.0:5000 !")
app.run('0.0.0.0', 5000, debug=True)
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,774 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/model/utils/checkpoint.py | import torch
def save_checkpoint(name, epoch, model, optimizer, valid_loss, train_loss, bleu):
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'valid_loss': valid_loss,
'train_loss': train_loss,
'bleu': bleu,
}, name)
def load_checkpoint(filename):
checkpoint = torch.load(filename, map_location=None if torch.cuda.is_available() else torch.device('cpu'), encoding='bytes')
return checkpoint['epoch'], checkpoint['model_state_dict'],\
checkpoint['optimizer_state_dict'], checkpoint['valid_loss'], checkpoint['train_loss'], checkpoint['bleu']
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,775 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/get_best_sentences.py | import logging
from turkish_suffix_library.turkish import Turkish
import re
import io
import pprint
from nltk.translate.bleu_score import SmoothingFunction
from nltk.translate import bleu
logging.getLogger('transformers').setLevel(logging.WARNING)
log = logging.getLogger(__name__)
import time
import math
import torch
from torch import optim, nn, cuda
from transformers import AdamW, BertTokenizer
from torch.utils.data import DataLoader
from transformers import BertModel
from config import checkpoint, bert_path, mb, dl_workers, device, bert_hidden_size, decoder_hidden_size, \
bert_vocab_size, decoder_input_size, dropout, epochs, clip, model_path, bert_model, encoder_trained, \
attention_hidden_size, num_layers, weight_decay, betas, lr, momentum
from model.utils import load_checkpoint, init_weights, save_checkpoint, enable_reproducibility, model_size, no_grad
from model import Attention, Decoder, Seq2Seq
from data import BertDataset
from run import train, eval
from run.utils.time import epoch_time
def extract_name(input_list):
name = ''
i = 0
try:
while (input_list[i] != '<'):
i += 1
pass
i += 1
while (input_list[i] != '>'):
name += input_list[i] + ' '
i += 1
return name[:-1].replace(" ##", "")
except Exception as e:
print("Error extracting name on", input_list)
return "name"
_suffix1, _suffix2, _suffix3 = "_suffix1", "_suffix2", "_suffix3"
def get_suffix(word: str, sffx_type: str):
suffix = ''
if sffx_type == "_suffix1":
suffix = Turkish(word).genitive(proper_noun=True) # -in
elif sffx_type == "_suffix2":
suffix = Turkish(word).dative(proper_noun=True) # -e
elif sffx_type == "_suffix3":
suffix = Turkish(word).ablative(proper_noun=True) #-den
elif sffx_type == "_suffix4":
suffix = Turkish(word).ablative(proper_noun=True)
suffix = str(suffix)
return suffix[len(word):]
def format_question(name, question_pattern):
try:
if _suffix1 in question_pattern:
question_pattern = question_pattern.format(
name=name, _suffix1=get_suffix(name, _suffix1))
elif _suffix2 in question_pattern:
question_pattern = question_pattern.format(
name=name, _suffix2=get_suffix(name, _suffix2))
elif _suffix3 in question_pattern:
question_pattern = question_pattern.format(
name=name, _suffix3=get_suffix(name, _suffix3))
else:
question_pattern = question_pattern.format(name=name)
return question_pattern
except Exception as e:
print("Error on: {} - {} \n{}\n".format(e, name, question_pattern))
return question_pattern
def format_sentence_list(raw_content, names):
formatted_questions = []
for i, line in enumerate(raw_content):
line = line[1:3]
formatted_lines = []
special_case = False
for question in line:
formatted_line = ''
for token in question:
if token == "[SEP]":
continue
elif token[0] == "#" or token == "." or special_case:
formatted_line += token.replace('#', '').replace("'",'')
special_case = False
elif token == "{" or token == "_":
formatted_line += token
special_case = True
elif token == "}":
formatted_line += token
elif token == "?":
formatted_line += "?"
break
else:
formatted_line += " " + token.replace('#', '').replace("'",'')
special_case = False
formatted_lines.append(format_question(names[i], formatted_line.replace("_ s", "_s")))
print(formatted_lines)
formatted_questions.append(formatted_lines)
return formatted_questions
def bleu_score(prediction, ground_truth):
prediction = prediction.max(2)[1]
bleu_list = []
for x, y in zip(ground_truth, prediction):
x = tokenizer.convert_ids_to_tokens(x.tolist())
y = tokenizer.convert_ids_to_tokens(y.tolist())
idx1 = x.index('[SEP]') if '[SEP]' in x else len(x)
idx2 = y.index('[SEP]') if '[SEP]' in y else len(y)
bleu_list.append((bleu([x[1:idx1]], y[1:idx2], [0.25, 0.25, 0.25, 0.25],
smoothing_function=SmoothingFunction().method4), x[1:idx1], y[1:idx2]))
return (max(bleu_list, key=lambda x: x[0]))
def loss(prediction, ground_truth):
criterion = nn.CrossEntropyLoss(ignore_index=0, reduction='none')
train_loss = []
for x, y in zip(ground_truth, prediction):
w = tokenizer.convert_ids_to_tokens(x.tolist())
z = tokenizer.convert_ids_to_tokens(y.max(1)[1].tolist())
idx1 = w.index('[SEP]') if '[SEP]' in w else len(w)
idx2 = z.index('[SEP]') if '[SEP]' in z else len(z)
loss = criterion(y, x.to(device))
loss = loss.sum()
train_loss.append((loss, w[1:idx1], z[1:idx2]))
return (max(train_loss, key=lambda x: x[0]))
tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-turkish-cased")
def generate_sentences(filename, is_request=True):
enable_reproducibility(121314)
dataset_path = filename if not is_request else f"requests/{filename}"
valid_set = BertDataset(bert_path / bert_model / dataset_path)
valid_loader = DataLoader(valid_set, batch_size=1, shuffle=True, num_workers=dl_workers, pin_memory=True if device == 'cuda' else False)
attention = Attention(bert_hidden_size, decoder_hidden_size)
decoder = Decoder(bert_vocab_size, decoder_input_size, bert_hidden_size, decoder_hidden_size, dropout, attention, device)
model = Seq2Seq(decoder, device)
print("../data/model/bert-base-cased")
encoder = BertModel.from_pretrained("../data/model/dbmdz/bert-base-turkish-cased/" + "model0epoch" + str(epochs - 1))
encoder.to(device)
f = open("../data/model/decoder/model0epoch" + str(epochs - 1), 'rb')
_, model_dict, _, _, _, _ = load_checkpoint(f)
model.load_state_dict(model_dict)
model.to(device)
bleu_list = []
loss_list = []
names_list = []
with torch.no_grad():
for i, (input_, output_) in enumerate(valid_loader):
input_data, input_length = input_
output_data, output_length = output_
names_list.append(extract_name(tokenizer.convert_ids_to_tokens(input_data[0][0])))
input_ids, token_type_ids, attention_mask = input_data
bert_hs = encoder(input_ids.to(device), token_type_ids=token_type_ids.to(device),
attention_mask=attention_mask.to(device))
prediction = model(bert_hs[0], output_data.to(device), 0)
loss_list.append(loss(prediction, output_data.to(device)))
f_list = format_sentence_list(loss_list[0:], names_list)
return f_list
if __name__ == '__main__':
generate_sentences('test', False)
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,776 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/print_training_loss.py | from model.utils import load_checkpoint
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
checkpoint = '../data/model/decoder/model5678epoch39'
last_epoch, model_dict, optim_dict, valid_loss, train_loss, bleu_score = load_checkpoint(checkpoint)
epochs = range(1, len(train_loss) + 1)
fig, ax1 = plt.subplots()
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Training Loss')
ax1.plot(epochs, train_loss, color='tab:red', label='Loss')
ax2 = ax1.twinx()
ax2.set_ylabel('Bleu Score')
ax2.plot(epochs, bleu_score, '--', color='tab:blue', label='BLEU')
print(bleu_score)
print(np.argmax(bleu_score))
print(np.max(bleu_score))
ax2.scatter(np.argmax(bleu_score), np.max(bleu_score), color='none', edgecolor='yellow')
lines1, labels1 = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines1 + lines2, labels1 + labels2, loc=0)
fig.tight_layout()
plt.show() | {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,777 | alpgokcek/turkish-qg-model | refs/heads/main | /Preliminary_QG/MinifiedQuestionPatterns.py | class MinifiedQuestionPatterns:
patterns = {
"Asker": {
"rütbesi": [
"{name} hangi rütbeye sahiptir?"
],
"hizmetyılları": [
"{name}'{_suffix1} hizmet yılları nedir?"
],
"bağlılığı": [
"{name} hangi orduda görev aldı?"
],
"savaşları": [
"{name}'{_suffix1} katıldığı savaşlar nelerdir?"
],
"komutaettikleri": [
"{name}'{_suffix1} komuta ettikleri kimlerdir?"
],
"madalya": [
"{name}'{_suffix2} takdim edilen madalyalar nelerdir?"
]
},
"Basketbolcu": {
"pozisyon": [
"{name}'{_suffix1} oynamakta olduğu pozisyon nedir?"
],
"takım1": [
"{name}'{_suffix1} profesyonel kariyerindeki ilk takım nedir?"
],
"takımyıl1": [
"{name} profesyonel kariyerine ne zaman başlamıştır?"
],
"takım2": [
"{name}'{_suffix1} profesyonel kariyerindeki ikinci takım nedir?"
],
"takımyıl2": [
"{name}'{_suffix1} oynadığı ikinci takımda hangi yıllar arasında oynamıştır?"
],
"takım": [
"{name}'{_suffix1} güncel takımı hangisidir?"
],
"lig": [
"{name} hangi ligde oynamaktadır?"
]
},
"Bilim adamı": {
"milliyeti": [
"{name} nerelidir?"
],
"çalıştığıyerler": [
"{name}'{_suffix1} çalıştığı kurumlar nerelerdir?"
],
"ödüller": [
"{name}'{_suffix1} sahip olduğu ödüller nelerdir?"
],
"önemlibaşarıları": [
"{name}'{_suffix1} elde ettiği önemli başarılar nelerdir?"
],
"vatandaşlığı": [
"{name} hangi ülkenin vatandaşıdır?"
]
},
"Buz patencisi": {
"ülke": [
"{name} hangi ülke adına mücadele etmektedir?"
],
"koç": [
"{name}'{_suffix1} koçu kimdir?"
]
},
"Filozof": {
"çağ": [
"{name} hangi çağda yaşadı?"
],
"bölge": [
"{name} hangi bölgede yaşadı?"
],
"etkilendikleri": [
"{name}'{_suffix1} etkilendiği kişiler kimlerdir?"
],
"etkiledikleri": [
"{name}'{_suffix1} etkilediği kişiler kimlerdir?"
],
"ilgialanları": [
"{name}'{_suffix1} ilgi alanları nelerdir?"
],
"okulgelenek": [
"{name}'{_suffix1} okulunun gelenekleri nelerdir?"
]
},
"Hakem": {},
"Güreşçi": {
"debut": [
"{name} ilk maçına ne zaman çıkmıştır?"
],
"ringadları": [
"{name}'{_suffix1} ring adı nedir?"
],
"eğiten": [
"{name}'{_suffix1} eğiten kişiler kimlerdir?"
],
"eğitildiğiyer": [
"{name} nerede eğitilmiştir?"
],
"yaşadığıyer": [
"{name} nerede yaşamaktadır?"
],
"emekliliği": [
"{name} hangi yılda emekli olmuştur?"
]
},
"Futbolcu": {
"pozisyon": [
"{name} hangi pozisyonda oynamaktadır?"
],
"tamadı": [
"{name}'{_suffix1} tam adı nedir?"
],
"kulüp1": [
"{name}'{_suffix1} ilk oynadığı takım hangisidir?"
],
"kulüpyıl1": [
"{name} profesyonel kariyerine ne zaman başlamıştır?"
],
"kulüp2": [
"{name}'{_suffix1} oynadığı ikinci takım nedir?"
],
"kulüpyıl2": [
"{name}'{_suffix1} ikinci oynadığı takımda hangi yıllar arasında oynamıştır?"
]
},
"Kişi": {
"meslek": [
"{name}'{_suffix1} gerçek mesleği nedir?"
],
"aktifyılları": [
"{name}'{_suffix1} aktif yılları nedir?"
],
"yer": [
"{name} nerede yaşamaktadır?"
],
"etinyılları": [
"{name}'{_suffix1} etkin yılları nedir?"
],
},
"Kraliyet": {
"hükümsüresi": [
"{name} hangi yıllar arası hüküm sürmüştür?"
],
"sonragelen": [
"{name}'{_suffix3} sonra gelen hükümdar kim?",
],
"öncegelen": [
"{name}'{_suffix3} önce gelen hükümdar kimdir?"
],
"babası": [
"{name}'{_suffix1} babasının adı nedir?"
],
"hanedan": [
"{name} hangi hanedandandır?"
],
"annesi": [
"{name}'{_suffix1} annesinin adı nedir?"
],
},
"Makam sahibi": {
"makam": [
"{name} hangi makamda görev almıştır?"
],
"dönembaşı": [
"{name} hangi yılda göreve başlamıştır?"
],
"öncegelen": [
"{name}'{_suffix3} önce görev alan kişi kimdir?"
],
"dönemsonu": [
"{name} hangi yılda görevi sona ermiştir?"
],
"sonragelen": [
"{name}'{_suffix3} sonra görev alan kişi kimdir?"
],
"partisi": [
"{name} hangi partinin bir mensubudur?"
]
},
"Manken": {
"gözrengi": [
"{name}'{_suffix1} göz rengi nedir?"
],
"saçrengi": [
"{name}'{_suffix1} saç rengi nedir?"
],
"ulus": [
"{name} hangi ülke asıllıdır?"
],
"boy": [
"{name}'{_suffix1} boyu nedir?"
]
},
"Müzik sanatçısı": {
"artalan": [
"{name}'{_suffix1} art alanı nedir?"
],
"tarz": [
"{name}'{_suffix1} ne tarzda müzik yapmaktadır?"
],
"etkinyıllar": [
"{name} hangi yıllarda aktif olarak müzik yapmıştır?"
],
"meslek": [
"{name}'{_suffix1} gerçek mesleği nedir?"
],
"plakşirketi": [
"{name} hangi plak şirketiyle çalışmaktadır?"
],
"köken": [
"{name} kökeni nedir?"
],
"çalgı": [
"{name} hangi müzik aletini çalmaktadır?"
]
},
"Oyuncu": {
"yer": [
"{name} nerede yaşamaktadır?"
],
"meslek": [
"{name}'{_suffix1} gerçek mesleği nedir?"
],
"etkinyıllar": [
"{name} hangi yıllarda aktif olarak rol almıştır?"
],
"evlilik": [
"{name}'{_suffix1} eşi kimdir?"
],
"ulus": [
"{name} hangi ülke asıllıdır?"
]
},
"Profesyonel güreşçi": {
"debut": [
"{name} ilk maçına ne zaman çıkmıştır?"
],
"ringadları": [
"{name}'{_suffix1} ring adları nedir?"
],
"eğiten": [
"{name}'{_suffix1} eğiten kişiler kimlerdir?"
],
"eş": [
"{name} kimle evlidir?",
],
"eğitildiğiyer": [
"{name} nerede eğitilmiştir?"
],
"çocuklar": [
"{name}'{_suffix1} çocukları kimlerdir?"
]
},
"Sanatçı": {
"alanı": [
"{name}'{_suffix1} alanı nedir?"
],
"milliyeti": [
"{name} hangi ülke asıllıdır?"
],
"yer": [
"{name} nerede yaşamaktadır?"
],
"ünlüyapıtları": [
"{name}'{_suffix1} ünlü yapıtları nelerdir?"
]
},
"Sporcu": {
"ülke": [
"{name} hangi ülke adına ter dökmektedir?"
],
"spor": [
"{name} hangi sporu yapar?"
],
"uyruk": [
"{name} hangi ülke asıllıdır?"
],
"kei": [
"{name}'{_suffix1} en iyi derecesi nedir?"
],
"ağırlık": [
"{name}'{_suffix1} kilosu nedir?"
]
},
"Tenis sporcu": {
"vatandaşlık": [
"{name} hangi ülke vatandaşıdır?"
],
"enyükseksıralama": [
"{name}'{_suffix1} en iyi derecesi nedir?"
],
"oyunstili": [
"{name}'{_suffix1} oyun stili nedir?"
],
"wimbledonsonuçları": [
"{name}'{_suffix1} Amerika açık sonuçları nelerdir?"
],
"amerikaaçıksonuçları": [
"{name}'{_suffix1} Amerika açık sonuçları nelerdir?"
],
"fransaaçıksonuçları": [
"{name}'{_suffix1} Fransa açık sonuçları nelerdir?"
],
"avustralyaaçıksonuçları": [
"{name}'{_suffix1} Avustralya açık sonuçları nelerdir?"
],
"toplamkupa": [
"{name} toplam kaç kupa kazanmıştır?"
],
"yaşadığıyer": [
"{name} nerede yaşamaktadır?"
]
},
"Voleybolcu": {
"pozisyon": [
"{name} hangi pozisyonda oynamaktadır?"
],
"milliyeti": [
"{name} hangi ülke asıllıdır?"
],
"kulüptakım": [
"{name} hangi takımlarda oynamıştır?"
],
"bulunduğukulüp": [
"{name} hangi takımda oynamaktadır?"
],
"numarası": [
"{name}'{_suffix1} forma numarası nedir?"
],
"millitakım": [
"{name} hangi milli takımda oynamaktadır?"
],
},
"Yazar": {
"meslek": [
"{name}'{_suffix1} gerçek mesleği nedir?"
],
"tür": [
"{name} hangi türlerde eserler vermiştir?"
],
"dönem": [
"{name} hangi dönemde eserler vermiştir?"
],
"ilkeser": [
"{name}'{_suffix1} ilk eseri nedir?"
]
}
}
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,778 | alpgokcek/turkish-qg-model | refs/heads/main | /Preliminary_QG/QuestionPatterns.py | class QuestionPatterns:
patterns = {
"Asker": {
"rütbesi": [
"{name} hangi rütbede görev yapmaktaydı?",
"{name}'{_suffix1} rütbesi nedir?",
"{name}'{_suffix1} sahip olduğu rütbe nedir?",
"{name} hangi rütbeye sahiptir?",
"{name} ordudaki rütbesi nedir?"
],
"hizmetyılları": [
"{name}'{_suffix1} hizmet yılları nedir?",
"{name}'{_suffix1} hizmet yılları ne zamandır?",
"{name}'{_suffix1} ordudaki yılları nedir?",
"{name}'{_suffix1} orduda geçirdiği yıllar nedir?",
"{name}'{_suffix1} orduda bulunduğu yıllar nedir?",
"{name} ne zaman hizmet vermiştir?",
"{name} ne zaman orduda bulunmuştur?",
"{name} ne zamanlar hizmet vermiştir?",
"{name} ne zaman asker olarak görev yapmıştır?",
"{name} hangi yıllar arasında hizmet vermiştir",
"{name} hangi yıllar arasında orduda hizmet vermiştir?",
"{name} hangi yıllar arasında orduda bulunmuştur?",
"{name}'{_suffix1} orduda geçirdiği yıllar hangileridir?",
"{name} hangi yıllarını orduda geçirmiştir?",
"{name} hangi yıllarda orduda bulundu?",
"{name} hangi yıllarda asker olarak görev yapmıştır?",
"{name} hangi yıllarda arasında askerlik yapmaktaydı?",
],
"bağlılığı": [
"{name} hangi ordudaydı?",
"{name} hangi orduda görev aldı?",
"{name} hangi orduya bağlıdır?",
"{name} hangi orduya bağlı görev aldı?",
"{name} hangi orduya bağlı olarak görev almıştır?",
"{name} hangi ülke ordusunda bulunuyordu?",
"{name} hangi ülke ordusunda görev almıştır?",
"{name} hangi ülke ordusuna mensuptur?",
"{name}'{_suffix1} bağlı bulunduğu ordu hangisidir?"
"{name}'{_suffix1} bağlılığı hangi ordudaydı?"
],
"savaşları": [
"{name} nerede savaşmıştır?",
"{name} nerelerde savaşmıştır",
"{name} hangi cephelerde savaştı?",
"{name} hangi savaşlarda savaştı?",
"{name} hangi savaşlara katılmıştır?",
"{name}'{_suffix1} katıldığı savaşlar nelerdir?",
"{name}'{_suffix1} bulunduğu savaşlar hangileriydi?",
],
"komutaettikleri": [
"{name}'{_suffix1} komuta ettikleri kimlerdir?",
"{name}'{_suffix1} komuta ettiği askerler kimlerdi?",
"{name} kimleri komuta etmiştir?",
"{name}'{_suffix1} komutası altında kimler bulunuyordu?"
],
"madalya": [
"{name} hangi madalyalara sahiptir?",
"{name} hangi madalyaları almıştır?",
"{name}'{_suffix2} takdim edilen madalyalar nelerdir?",
"{name}'{_suffix2} verilen madalyalar nelerdir?",
"{name}'{_suffix1} kazandığı madalyalar hangileriydi?",
"{name}'{_suffix1} sahip olduğu madalyalar nelerdir?"
]
},
"Basketbolcu": {
"pozisyon": [
"{name} hangi pozisyonda oynamaktadır?",
"{name} hangi pozisyonda oynuyor?",
"{name} hangi mevkiide oynamaktadır?",
"{name} hangi mevkiide oynar?",
"{name} hangi mevkiide oynuyor?",
"{name}'{_suffix1} oynadığı pozisyon nedir?",
"{name}'{_suffix1} oynamakta olduğu pozisyon nedir?",
"{name}'{_suffix1} pozisyonu nedir?",
"{name}'{_suffix1} oyun pozisyonu nedir?",
"{name}'{_suffix1} maçlardaki pozisyonu nedir?"
],
"takım1": [
"{name}'{_suffix1} ilk oynadığı takım hangisidir?",
"{name}'{_suffix1} oynadığı ilk takım hangisidir?",
"{name}'{_suffix1} ilk takımı hangisi?",
"{name}'{_suffix1} ilk takımı hangisidir?",
"{name}'{_suffix1} kariyerine başladığı takım hangisidir?",
"{name}'{_suffix1} kariyerindeki ilk takım hangisidir?",
"{name} kariyerine hangi takımda başlamıştır?",
"{name} profesyonel kariyerine hangi takımda başlamıştır?",
"{name} profesyonel kariyerine hangi takımla başlamıştır?",
"{name} profesyonel kariyerine hangi takımla adım atmıştır?",
"{name}'{_suffix1} profesyonel kariyerindeki ilk takım hangisidir?",
"{name}'{_suffix1} profesyonel kariyerindeki ilk takım hangisi?",
],
"takımyıl1": [
"{name} profesyonel kariyerine ne zaman başlamıştır?",
"{name}'{_suffix1} profesyonel kariyerine başlangıcı ne zamandır?",
"{name}'{_suffix1} profesyonel kariyerine başlangıcı ne zaman oldu?",
"{name}'{_suffix1} profesyonel kariyerine başlangıcı ne zaman olmuştur?",
"{name} profesyonel kariyerine hangi yılda başladı?",
"{name} profesyonel kariyerine hangi yılda başlamıştır?",
"{name} profesyonel kariyerine ne zaman başladı?",
"{name} profesyonel kariyerine ne zaman başlamıştır?",
"{name}'{_suffix1} ilk takımında ne zaman oynamıştır?",
"{name}'{_suffix1} ilk takımında hangi yıllar oynamıştır?",
"{name}'{_suffix1} ilk takımında hangi yıllar oynadı?",
"{name}'{_suffix1} ilk takımında oynadığı zaman aralığı nedir?",
"{name}'{_suffix1} ilk oynadığı takımda ne zaman oynamıştır?",
"{name}'{_suffix1} ilk oynadığı takımda hangi yıllar arasında oynamıştır?",
"{name}'{_suffix1} ilk oynadığı takımda hangi yıllar arasında oynamıştı?",
"{name}'{_suffix1} ilk oynadığı takımda hangi yıllar arasında oynadı?"
],
"takım2": [
"{name}'{_suffix1} ikinci oynadığı takım hangisidir?",
"{name}'{_suffix1} oynadığı ikinci takım hangisidir?",
"{name}'{_suffix1} oynadığı ikinci takım nedir?",
"{name}'{_suffix1} ikinci takımı nedir?",
"{name}'{_suffix1} ikinci takımı hangisi?",
"{name}'{_suffix1} ikinci takımı hangisidir?",
"{name}'{_suffix1} kariyerindeki ikinci takım hangisidir?",
"{name}'{_suffix1} kariyerindeki ikinci takım nedir?",
"{name}'{_suffix1} profesyonel kariyerindeki ikinci takım hangisidir?",
"{name}'{_suffix1} profesyonel kariyerindeki ikinci takım hangisiydi?",
"{name}'{_suffix1} profesyonel kariyerindeki ikinci takım hangisi?",
"{name}'{_suffix1} profesyonel kariyerindeki ikinci takım nedir?"
],
"takımyıl2": [
"{name} kariyerindeki ikinci takımda ne zaman oynamıştır?",
"{name} ikinci takımında ne zaman oynamıştır?",
"{name} ikinci takımında hangi yıllar oynamıştır?",
"{name} ikinci takımında hangi yıllar oynadı?",
"{name}'{_suffix1} ikinci takımında oynadığı zaman aralığı nedir?",
"{name} ikinci oynadığı takımda hangi yıllar arasında oynamıştır?",
"{name} oynadığı ikinci takımda hangi yıllar arasında oynadı?",
"{name} oynadığı ikinci takımda hangi yıllar arasında oynamıştır?",
"{name} oynadığı ikinci takımda hangi periyotta oynamıştır?"
],
"takım": [
"{name} hangi takımda oynamaktadır?",
"{name} şu an hangi takımda oynamaktadır?",
"{name} hangi takımda oynuyor?",
"{name} şu an hangi takımda oynuyor?",
"{name} hangi takımın oyuncusudur?",
"{name} şu an hangi takımın oyuncusudur?",
"{name} hangi takımdadır?",
"{name} şu an hangi takımdadır?",
"{name}'{_suffix1} oynadığı takım hangisidir?",
"{name}'{_suffix1} takımı hangisidir?",
"{name}'{_suffix1} güncel takımı hangisidir?",
"{name}'{_suffix1} şu an oynadığı takım hangisidir?"
],
"lig": [
"{name} hangi ligde oynamaktadır?",
"{name} hangi ligde oynadı?",
"{name} şu an hangi ligde oynamaktadır?",
"{name} hangi ligde oynuyor?",
"{name} şu an hangi ligde oynuyor?",
"{name} hangi ligin oyuncusudur?",
"{name} şu an hangi ligin oyuncusudur?",
"{name} hangi ligdedir?",
"{name} şu an hangi ligdedir?",
"{name}'{_suffix1} oynadığı lig hangisidir?",
"{name}'{_suffix1} ligi hangisidir?",
"{name}'{_suffix1} güncel ligi hangisidir?",
"{name}'{_suffix1} şu an oynadığı takım hangisidir?"
]
},
"Bilim adamı": {
"milliyeti": [
"{name} nerelidir?",
"{name} hangi ülke asıllıdır?",
"{name} hangi millettendir?",
"{name}'{_suffix1} hangi millettendir?"
],
"çalıştığıyerler": [
"{name}'{_suffix1} çalıştığı yerler nedir?",
"{name}'{_suffix1} çalıştığı yerler nerelerdir?",
"{name} nerelerde çalışmıştır?",
"{name} nerelerde çalıştı?",
"{name} nerelerde çalışmıştı?",
"{name}'{_suffix1} çalıştığı kurumlar nerelerdir?",
"{name} hangi kurumlarda çalışmıştır?",
"{name}'{_suffix1} çalıştığı kurumlar nedir?",
],
"ödüller": [
"{name} hangi ödüllere sahiptir?",
"{name} hangi ödülleri almıştır?",
"{name} sahip olduğu ödüller nelerdir?",
"{name}'{_suffix2} takdim edilen ödüller nelerdir?",
"{name}'{_suffix2} verilen ödüller nelerdir?",
"{name}'{_suffix1} kazandığı ödüller nelerdir?",
"{name}'{_suffix1} sahip olduğu ödüller nelerdir?"
],
"önemlibaşarıları": [
"{name}'{_suffix1} önemli başarıları nelerdir?",
"{name}'{_suffix1} sahip olduğu önemli başarılar nelerdir?",
"{name}'{_suffix1} elde ettiği önemli başarılar nelerdir?",
"{name}'{_suffix1} en önemli başarıları nelerdir?",
"{name} en önemli başarıları nelerdir?"
],
"vatandaşlığı": [
"{name} hangi ülke vatandaşıdır?",
"{name} hangi ülkenin vatandaşıdır?",
"{name} nerenin vatandaşıdır?",
"{name} vatandaşlığı hangi ülkedendir?",
"{name}'{_suffix1} vatandaşlığı hangi ülkedendir?",
"{name}'{_suffix1} vatandaşı olduğu ülke hangisidir?",
"{name}'{_suffix1} vatandaşı olduğu ülke nedir?",
"{name}'{_suffix1} vatandaşlığına sahip olduğu ülke nedir?",
"{name}'{_suffix1} vatandaşlığına sahip olduğu ülke hangisidir?"
]
},
"Buz patencisi": {
"ülke": [
"{name} hangi ülke vatandaşıdır?",
"{name} hangi ülke adına mücadele etmektedir?",
"{name} hangi ülkenin sporcusudur?",
"{name} hangi ülkenin sporcusuydu?",
"{name} hangi ülkenin vatandaşıdır?",
"{name} nerenin vatandaşıdır?",
"{name} vatandaşlığı hangi ülkedendir?",
"{name}'{_suffix1} vatandaşlığı hangi ülkedendir?",
"{name}'{_suffix1} vatandaşı olduğu ülke hangisidir?",
"{name}'{_suffix1} vatandaşı olduğu ülke nedir?",
"{name}'{_suffix1} vatandaşlığına sahip olduğu ülke nedir?",
"{name}'{_suffix1} vatandaşlığına sahip olduğu ülke hangisidir?"
],
"koç": [
"{name}'{_suffix1} koçu kimdir?",
"{name}'{_suffix1} koçu kim?",
"{name}'{_suffix1} koçu kimdi?",
"{name}'{_suffix1} spor koçu kimdi?"
]
},
"Filozof": {
"çağ": [
"{name} hangi çağda yaşamaktaydı?",
"{name} hangi çağda yaşadı?",
"{name} yaşadığı çağ hangisiir?",
"{name} hangi çağda yaşadı?",
"{name} hangi çağda yaşadı?",
"{name} hangi çağda yaşadı?"
],
"bölge": [
"{name} hangi bölgede yaşamaktaydı?",
"{name} hangi bölgede yaşadı?"
],
"etkilendikleri": [
"{name} kimlerden etkilenmiştir?",
"{name} kimlerden etkilendi?",
"{name}'{_suffix1} etkilendiği kişiler kimlerdir?"
],
"etkiledikleri": [
"{name} kimleri etkilemiştir?",
"{name} kimleri etkiledi?",
"{name}'{_suffix1} etkilediği kişiler kimlerdir?"
],
"ilgialanları": [
"{name}'{_suffix1} ilgi alanları nelerdir?",
"{name}'{_suffix1} ilgi alanları nelerdi?"
],
"okulgelenek": [
"{name}'{_suffix1} okulunun geleneği nedir?",
"{name}'{_suffix1} okulunun gelenekleri nelerdir?"
]
},
"Hakem": {
"turnuva": [
"{name}'{_suffix1} görev aldığı turnuvalar hangileridir?",
"{name} hangi turnuvalarda görev yapmıştır?",
"{name} bulunduğu turnuvalar nelerdir?"
],
"turnuva": [
"{name}'{_suffix1} görevi nedir?",
"{name} hangi görevi yapmaktadır?",
"{name} üslendiği görev nedir?"
],
},
"Güreşçi": {
"debut": [
"{name}'{_suffix1} ilk ringe çıkışı ne zamandır?",
"{name}'{_suffix1} ilk maça çıkışı ne zamandır?",
"{name} ilk maçına ne zaman çıkmıştır?",
"{name} ilk olarak ringe ne zaman çıkmıştır?"
],
"ringadları": [
"{name}'{_suffix1} ring adları nelerdir?",
"{name}'{_suffix1} ring adları nedir?",
"{name}'{_suffix1} ring adı nedir?"
],
"eğiten": [
"{name}'{_suffix1} eğiten kişiler kimlerdir?",
"{name}'{_suffix1} eğiten kişilerin adları nelerdir?",
"{name}'{_suffix1} eğiten kişilerin adları nedir?"
],
"eğitildiğiyer": [
"{name} nerede eğitilmiştir?",
"{name} nerede eğitildi?",
"{name} eğitildiği yer neresidir?",
"{name} eğitildiği yer neresi"
],
"yaşadığıyer": [
"{name} nerede yaşamaktadır?",
"{name} yaşadığı yer neresidir?",
"{name} yaşadığı yer neresi"
],
"emekliliği": [
"{name} hangi yılda emekli olmuştur?",
"{name} hangi yılda emekli olmuştu?"
]
},
"Futbolcu": {
"pozisyon": [
"{name} hangi pozisyonda oynamaktadır?",
"{name} hangi pozisyonda oynar?",
"{name} hangi pozisyonda oynuyor?",
"{name} hangi mevkiide oynamaktadır?",
"{name} hangi mevkiide oynar?",
"{name} hangi mevkiide oynuyor?",
"{name}'{_suffix1} oynadığı pozisyon nedir?",
"{name}'{_suffix1} oynamakta olduğu pozisyon nedir?",
"{name}'{_suffix1} pozisyonu nedir?",
"{name}'{_suffix1} oyun pozisyonu nedir?",
"{name}'{_suffix1} maçlardaki pozisyonu nedir?"
],
"tamadı": [
"{name}'{_suffix1} tam adı nedir?"
],
"kulüp1": [
"{name}'{_suffix1} ilk oynadığı takım hangisidir?",
"{name}'{_suffix1} oynadığı ilk takım hangisidir?",
"{name}'{_suffix1} ilk takımı hangisi?",
"{name}'{_suffix1} ilk takımı hangisidir?",
"{name}'{_suffix1} kariyerine başladığı takım hangisidir?",
"{name}'{_suffix1} kariyerindeki ilk takım hangisidir?",
"{name} profesyonel kariyerine hangi takımda başlamıştır?",
"{name} profesyonel kariyerine hangi takımla başlamıştır?",
"{name} profesyonel kariyerine hangi takımla adım atmıştır?",
"{name}'{_suffix1} profesyonel kariyerindeki ilk takım hangisidir?",
"{name}'{_suffix1} profesyonel kariyerindeki ilk takım hangisiydi?",
"{name}'{_suffix1} profesyonel kariyerindeki ilk takım hangisi?",
"{name}'{_suffix1} profesyonel kariyerindeki ilk takım nedir?",
"{name}'{_suffix1} ilk oynadığı kulüp hangisidir?",
"{name}'{_suffix1} oynadığı ilk kulüp hangisidir?",
"{name}'{_suffix1} ilk kulübü hangisi?",
"{name}'{_suffix1} ilk kulübü hangisidir?",
"{name}'{_suffix1} kariyerine başladığı kulüp hangisidir?",
"{name}'{_suffix1} kariyerindeki ilk kulüp hangisidir?",
"{name} profesyonel kariyerine hangi kulüpte başlamıştır?",
"{name} profesyonel kariyerine hangi kulüple başlamıştır?",
"{name} profesyonel kariyerine hangi kulüple adım atmıştır?",
"{name}'{_suffix1} profesyonel kariyerindeki ilk kulüp hangisidir?",
"{name}'{_suffix1} profesyonel kariyerindeki ilk kulüp hangisiydi?",
"{name}'{_suffix1} profesyonel kariyerindeki ilk kulüp hangisi?",
"{name}'{_suffix1} profesyonel kariyerindeki ilk kulüp nedir?"
],
"kulüpyıl1": [
"{name} profesyonel kariyerine ne zaman başlamıştır?",
"{name}'{_suffix1} profesyonel kariyerine başlangıcı ne zamandır?",
"{name}'{_suffix1} profesyonel kariyerine başlangıcı ne zamandı",
"{name}'{_suffix1} profesyonel kariyerine başlangıcı ne zaman",
"{name}'{_suffix1} profesyonel kariyerine başlangıcı ne zaman oldu?",
"{name}'{_suffix1} profesyonel kariyerine başlangıcı ne zaman olmuştur?",
"{name} profesyonel kariyerine hangi yılda başladı?",
"{name} profesyonel kariyerine hangi yılda başlamıştı?",
"{name} profesyonel kariyerine hangi yılda başlamıştır?",
"{name} profesyonel kariyerine ne zaman başladı?",
"{name} profesyonel kariyerine ne zaman başlamıştı?",
"{name} profesyonel kariyerine ne zaman başlamıştır?",
"{name}'{_suffix1} ilk takımında ne zaman oynamıştır?",
"{name}'{_suffix1} ilk takımında ne zamanlar oynamıştır?",
"{name}'{_suffix1} ilk takımında hangi yıllar oynamıştır?",
"{name}'{_suffix1} ilk takımında hangi yıllar oynadı?",
"{name}'{_suffix1} ilk takımında oynadığı zaman aralığı nedir?",
"{name}'{_suffix1} ilk takımında oynadığı zaman aralığı nedir?",
"{name}'{_suffix1} ilk oynadığı takımda ne zaman oynamıştır?",
"{name}'{_suffix1} ilk oynadığı takımda hangi yıllar arasında oynamıştır?",
"{name}'{_suffix1} ilk oynadığı takımda hangi yıllar arasında oynamıştı?",
"{name}'{_suffix1} ilk oynadığı takımda hangi yıllar arasında oynadı?"
],
"kulüp2": [
"{name}'{_suffix1} ikinci oynadığı takım hangisidir?",
"{name}'{_suffix1} oynadığı ikinci takım hangisidir?",
"{name}'{_suffix1} oynadığı ikinci takım nedir?",
"{name}'{_suffix1} ikinci takımı nedir?",
"{name}'{_suffix1} ikinci takımı hangisi?",
"{name}'{_suffix1} ikinci takımı hangisidir?",
"{name}'{_suffix1} kariyerindeki ikinci takım hangisidir?",
"{name}'{_suffix1} kariyerindeki ikinci takım nedir?",
"{name}'{_suffix1} profesyonel kariyerindeki ikinci takım hangisidir?",
"{name}'{_suffix1} profesyonel kariyerindeki ikinci takım hangisiydi?",
"{name}'{_suffix1} profesyonel kariyerindeki ikinci takım hangisi?",
"{name}'{_suffix1} profesyonel kariyerindeki ikinci takım nedir?"
],
"kulüpyıl2": [
"{name}'{_suffix1} ikinci takımında ne zaman oynamıştır?",
"{name}'{_suffix1} ikinci takımında ne zamanlar oynamıştır?",
"{name} ikinci takımında hangi yıllar oynamıştır?",
"{name} ikinci takımında hangi yıllar oynadı?",
"{name}'{_suffix1} ikinci takımında oynadığı zaman aralığı nedir?",
"{name} ikinci takımında oynadığı zaman aralığı nedir?",
"{name}'{_suffix1} ikinci oynadığı takımda ne zaman oynamıştır?",
"{name} ikinci oynadığı takımda ne zaman oynamıştır?",
"{name}'{_suffix1} ikinci oynadığı takımda hangi yıllar arasında oynamıştır?",
"{name}'{_suffix1} ikinci oynadığı takımda hangi yıllar arasında oynamıştı?",
"{name}'{_suffix1} ikinci oynadığı takımda hangi yıllar arasında oynadı?",
"{name}'{_suffix1} oynadığı ikinci takımda hangi yıllar arasında oynadı?",
"{name}'{_suffix1} oynadığı ikinci takımda hangi yıllar arasında oynamıştır?",
"{name}'{_suffix1} oynadığı ikinci takımda hangi yıllar arasında oynamıştı?",
"{name}'{_suffix1} oynadığı ikinci takımda hangi periyotta oynamıştı?",
"{name}'{_suffix1} oynadığı ikinci takımda hangi periyotta oynamıştır?"
]
},
"Kişi": {
"meslek": [
"{name}'{_suffix1} mesleği nedir?",
"{name} ne iş yapar?",
"{name} işi nedir?"
],
"aktifyılları": [
"{name} hangi yıllar arası aktifti?",
"{name} hangi yıllar arası aktif kalmıştır?",
"{name}'{_suffix1} aktif yılları nedir?"
],
"yer": [
"{name} nerede yaşamaktadır?",
"{name} yaşadığı yer neresidir?",
"{name} yaşadığı yer neresi"
],
"etinyılları": [
"{name} hangi yıllar arası etkindi?",
"{name} hangi yıllar arası etkin kalmıştır?",
"{name}'{_suffix1} etkin yılları nedir?"
],
},
"Kraliyet": {
"hükümsüresi": [
"{name} hangi yıllar arası hüküm sürmüştür?",
"{name} hangi yıllar arası hüküm sürdü?",
"{name} hangi yıllar arasında hüküm sürdü?",
"{name} hangi yıllar arasında hüküm sürmüştür?"
],
"sonragelen": [
"{name}'{_suffix3} sonra gelen hükümdar kimir?",
"{name}'{_suffix3} sonra hangi hükümdar gelmiştir?",
"{name}'{_suffix3} sonra tahta kim geçmiştir?",
"{name}'{_suffix1} ardından kim hüküm sürmüştür?",
"{name}'{_suffix1} ardından kim tahta geçmiştir?"
],
"öncegelen": [
"{name}'{_suffix3} önce gelen hükümdar kimdir?",
"{name}'{_suffix3} önce kim hüküm sürmekteydi?",
"{name} kimin ardından tahta çıkmıştır?",
"{name} hangi hükümdardan sonra gelmiştir?",
"{name} hangi tahtı kimden devralmıştır?"
],
"babası": [
"{name}'{_suffix1} babası kimdir?",
"{name}'{_suffix1} babasının adı nedir?"
],
"hanedan": [
"{name}'{_suffix1} hanedanı nedir?",
"{name} hangi hanedandandır?"
],
"annesi": [
"{name}'{_suffix1} annesi kimdir?",
"{name}'{_suffix1} annesinin adı nedir?"
],
},
"Makam sahibi": {
"makam": [
"{name} hangi makama sahiptir?",
"{name} hangi makamdadır?",
"{name} hangi makamda görev almıştır?",
"{name}'{_suffix1} görevi nedir"
],
"dönembaşı": [
"{name} göreve ne zaman başlamıştır?",
"{name} ilk görev yılı nedir?",
"{name} göreve ne zaman gelmiştir?",
"{name} dönemi ne zaman başlamıştır?"
],
"öncegelen": [
"{name}'{_suffix3} önce gelen kişi kimdir?",
"{name}'{_suffix3} önce gelen kişi kim?",
"{name}'{_suffix3} önce görev alan kişi kim?",
"{name}'{_suffix3} önce görev alan kişi kimdir?"
],
"dönemsonu": [
"{name} en son hangi yıl görev yapmıştır?",
"{name} görevini ne zaman devretmiştir?",
"{name}'{_suffix1} son görev yılı ne zamandır?",
"{name}'{_suffix1} görevi ne zaman sona ermiştir?"
],
"sonragelen": [
"{name}'{_suffix3} sonra gelen kişi kimdir?",
"{name}'{_suffix3} sonra gelen kişi kim?",
"{name}'{_suffix3} sonra görev alan kişi kim?",
"{name}'{_suffix3} sonra görev alan kişi kimdir?"
],
"partisi": [
"{name} hangi partinin mensubudur?",
"{name}'{_suffix1} partisi nedir?",
]
},
"Manken": {
"gözrengi": [
"{name}'{_suffix1} göz rengi nedir?",
"{name}'{_suffix1} gözü hangi renktir"
],
"saçrengi": [
"{name}'{_suffix1} saç rengi nedir?",
"{name}'{_suffix1} saçı hangi renktir?"
],
"ulus": [
"{name} nerelidir?",
"{name} hangi ülke asıllıdır?",
"{name} hangi millettendir?",
"{name}'{_suffix1} uyruğu neresidir"
],
"boy": [
"{name}'{_suffix1} boyu ne kadardır?",
"{name}'{_suffix1} boyu nedir?",
]
},
"Müzik sanatçısı": {
"artalan": [
"{name}'{_suffix1} art alanı nedir?",
"{name} hangi türden gelmektedir"
],
"tarz": [
"{name} hangi tür müzik yapar?",
"{name}'{_suffix1} hangi tarzda müzik yapmaktadır?",
"{name}'{_suffix1} hangi tarzda müzik yapmakta",
"{name}'{_suffix1} hangi tarzda müzik yapar?",
"{name}'{_suffix1} hangi tarzlarda müzik yapmaktadır?",
"{name}'{_suffix1} müzik tarzı nedir",
"{name}'{_suffix1} tarzı nedir",
"{name}'{_suffix1} ne tarz müzik yapmaktadır?",
"{name}'{_suffix1} ne tarzda müzik yapar?",
"{name}'{_suffix1} hangi tarzda müzik yapmaktadır?",
"{name}'{_suffix1} ne tarzda müzik yapmaktadır?",
],
"etkinyıllar": [
"{name} hangi yıllarda aktif olarak müzik yapmıştır?",
"{name} hangi yıllarda müzisyenlik yapmıştı?",
"{name} aktif yılları nelerdir?"
"{name} aktif yılları nedir?"
"{name} etkin yılları nelerdir?"
"{name} etkin yılları nedir?"
"{name} etkin olduğu yıllar nedir?"
],
"meslek": [
"{name}'{_suffix1} gerçek mesleği",
"{name}'{_suffix1} gerçek mesleği nedir?",
"{name}'{_suffix1} mesleği nedir?",
"{name} mesleği nedir?",
"{name} hangi mesleğe mensuptur?",
"{name} hangi mesleği yapıyordu?",
"{name} hangi işi yapıyor?",
"{name} hangi işi yapıyordu?"
],
"plakşirketi": [
"{name} hangi plak şirketiyle çalışmaktadır?",
"{name} hangi plak şirketiyle çalışıyordu?",
"{name} hangi plak şirketiyle çalışıyor?",
],
"köken": [
"{name} nerelidir?",
"{name} hangi ülke asıllıdır?",
"{name} hangi millettendir?",
"{name}'{_suffix1} hangi millettendir?",
"{name} kökeni nedir?",
],
"çalgı": [
"{name} hangi çalgıyı çalmaktadır?",
"{name} hangi çalgıyı çalar?",
"{name} hangi çalgıyı çalıyor?",
"{name} hangi müzik aletini çalmaktadır?",
"{name} hangi müzik aletini çalar?",
"{name} hangi müzik aletini çalıyor?"
]
},
"Oyuncu": {
"yer": [
"{name} nerede yaşamaktadır?",
"{name} yaşadığı yer neresidir?",
"{name} ikamet ettiği yer neresidir?"
],
"meslek": [
"{name}'{_suffix1} gerçek mesleği nedir?",
"{name}'{_suffix1} mesleği nedir?",
"{name} mesleği nedir?",
"{name} hangi mesleğe mensuptur?",
"{name} hangi işi yapıyor?",
],
"etkinyıllar": [
"{name} hangi yıllarda aktif olarak rol almıştır?",
"{name} aktif yılları nelerdir?"
"{name} aktif yılları nedir?"
"{name} etkin yılları nelerdir?"
"{name} etkin yılları nedir?"
"{name} etkin olduğu yıllar nedir?"
],
"evlilik": [
"{name} eşi kimdir?"
"{name}'{_suffix1} eşi kimdir?",
"{name} kiminle evlidir?",
"{name} kiminle evlendi?"
],
"ulus": [
"{name} nerelidir?",
"{name} hangi ülke asıllıdır?",
"{name} hangi millettendir?",
"{name}'{_suffix1} hangi millettendir?"
]
},
"Profesyonel güreşçi": {
"debut": [
"{name}'{_suffix1} ilk ringe çıkışı ne zamandır?",
"{name}'{_suffix1} ilk maça çıkışı ne zamandır?",
"{name} ilk maçına ne zaman çıkmıştır?",
"{name} ilk olarak ringe ne zaman çıkmıştır?"
],
"ringadları": [
"{name}'{_suffix1} ring adları nelerdir?",
"{name}'{_suffix1} ring adları nedir?",
"{name}'{_suffix1} ring adı nedir?"
],
"eğiten": [
"{name}'{_suffix1} eğiten kişiler kimlerdir?",
"{name}'{_suffix1} eğiten kişilerin adları nelerdir?",
],
"eş": [
"{name} kimle evlidir?",
"{name}'{_suffix1} evli olduğu kişi kimdir?",
],
"eğitildiğiyer": [
"{name} nerede eğitilmiştir?",
"{name} nerede eğitildi?",
"{name} eğitildiği yer neresidir?",
"{name} eğitildiği yer neresi"
],
"çocuklar": [
"{name}'{_suffix1} çocukları kimlerdir?",
"{name}'{_suffix1} çocukları kim?",
"{name}'{_suffix1} çocuklarının adları nedir?",
"{name}'{_suffix1} çocuklarının adları nelerdir?",
"{name} çocukları kimdir?",
"{name} çocuklarının isimleri nedir?",
"{name} çocuklarının isimleri?"
]
},
"Sanatçı": {
"alanı": [
"{name}'{_suffix1} alanı nedir?",
"{name} hangi alanda sanat yapmaktadır?",
"{name} hangi alanda sanat yapmaktaydı?",
"{name} hangi alanda sanat yapıyor?",
"{name} hangi alanda eser vermektedir"
],
"milliyeti": [
"{name} nerelidir?",
"{name} hangi ülke asıllıdır?",
"{name} hangi millettendir?",
"{name}'{_suffix1} hangi millettendir?"
],
"yer": [
"{name} nerede yaşamaktadır?",
"{name} yaşadığı yer neresidir?",
"{name} yaşadığı yer neresi"
],
"ünlüyapıtları": [
"{name}'{_suffix1} ünlü yapıtları nelerdir?",
"{name}'{_suffix1} ünlü yapıtları nedir?",
"{name}'{_suffix1} yarattığı ünlü yapıtlar nelerdir?",
"{name}'{_suffix1} yarattığı ünlü yapıtlar nedir?",
]
},
"Sporcu": {
"ülke": [
"{name} hangi ülke adına ter dökmektedir?",
"{name} hangi ülkenin sporcusudur?",
"{name} hangi ülke adına yarışmaktadır?",
],
"spor": [
"{name} hangi spor dalındadır?",
"{name} hangi spor dalında mücadele etmektedir?",
"{name} hangi sporu yapmaktadır?"
],
"uyruk": [
"{name} nerelidir?",
"{name} hangi ülke asıllıdır?",
"{name} hangi millettendir?",
"{name}'{_suffix1} hangi millettendir?"
],
"kei": [
"{name}'{_suffix1} en iyi derecesi nedir?",
"{name}'{_suffix1} kariyerinde elde ettiği en yüksek derece nedir?",
"{name}'{_suffix1} elde ettiği en yüksek başarı nedir?"
],
"ağırlık": [
"{name}'{_suffix1} kilosu nedir?",
"Ünlü sporcu {name} kaç kilodur?",
"{name}'{_suffix1} ağırlığı nedir?",
]
},
"Tenis sporcu": {
"vatandaşlık": [
"{name} hangi ülke vatandaşıdır?",
"{name} hangi ülkenin vatandaşıdır?",
"{name} nerenin vatandaşıdır?",
"{name}'{_suffix1} vatandaşlığı hangi ülkedendir?",
"{name}'{_suffix1} vatandaşı olduğu ülke hangisidir?",
"{name}'{_suffix1} vatandaşı olduğu ülke nedir?",
"{name}'{_suffix1} vatandaşlığına sahip olduğu ülke nedir?",
"{name}'{_suffix1} vatandaşlığına sahip olduğu ülke hangisidir?"
],
"enyükseksıralama": [
"{name}'{_suffix1} en iyi derecesi nedir?",
"{name}'{_suffix1} en yüksek sıralaması nedir?",
"{name}'{_suffix1} kariyerinde elde ettiği en yüksek sıralama nedir?",
"{name}'{_suffix1} elde ettiği en yüksek başarı nedir?"
],
"oyunstili": [
"{name}'{_suffix1} oyun stili nedir?",
"{name} hangi stilde oynamaktadır?"
],
"wimbledonsonuçları": [
"{name}'{_suffix1} Wimbledon sonuçları nedir?",
"{name} Wimbledon'da hangi sonucu elde etmiştir?",
"{name} Wimbledon sonuçları nasıldır?"
],
"amerikaaçıksonuçları": [
"{name}'{_suffix1} Amerika açık sonuçları nedir?",
"{name} Amerika açıkta hangi sonucu elde etmiştir?",
"{name} Amerika açık sonuçları nasıldır?"
],
"fransaaçıksonuçları": [
"{name}'{_suffix1} Fransa açık sonuçları nedir?",
"{name} Fransa açıkta hangi sonucu elde etmiştir?",
"{name} Fransa açık sonuçları nasıldır?"
],
"avustralyaaçıksonuçları": [
"{name}'{_suffix1} Avustralya açık sonuçları nedir?",
"{name} Avustralya açıkta hangi sonucu elde etmiştir?",
"{name} Avustralya açık sonuçları nasıldır?"
],
"toplamkupa": [
"{name}'{_suffix1} kazandığı toplam kupa sayısı kaçtır?",
"{name} kariyerinde toplam kaç tane kupa kazanmıştır?",
"{name} toplam kaç adet kupa kazanmıştır?",
"{name} kaç adet kupaya sahiptir?",
"{name}'{_suffix1} sahip olduğu kupa sayısı nedir?"
],
"yaşadığıyer": [
"{name} nerede yaşamaktadır?",
"{name} yaşadığı yer neresidir?"
]
},
"Voleybolcu": {
"pozisyon": [
"{name} hangi pozisyonda oynamaktadır?",
"{name} hangi pozisyonda oynar?",
"{name} hangi pozisyonda oynuyor?",
"{name} hangi mevkiide oynamaktadır?",
"{name} hangi mevkiide oynar?",
"{name} hangi mevkiide oynuyor?",
"{name}'{_suffix1} oynadığı pozisyon nedir?",
"{name}'{_suffix1} oynamakta olduğu pozisyon nedir?",
"{name}'{_suffix1} pozisyonu nedir?",
"{name}'{_suffix1} oyun pozisyonu nedir?",
"{name}'{_suffix1} maçlardaki pozisyonu nedir?"
],
"milliyeti": [
"{name} nerelidir?",
"{name} hangi ülke asıllıdır?",
"{name} hangi millettendir?",
"{name}'{_suffix1} uyruğu nedir?"
],
"kulüptakım": [
"{name} hangi takımlarda oynamıştır?",
"{name} geçmişte hangi takımlarda oynamıştır?",
"{name} daha önce oynadığı takımlar nelerdir?",
"{name} bulunduğu takımlar hangileridir?",
],
"bulunduğukulüp": [
"{name} hangi takımda oynamaktadır?",
"{name} şu an hangi takımda oynamaktadır?",
"{name} hangi takımda oynuyor?",
"{name} şu an hangi takımda oynuyor?",
"{name} hangi takımın oyuncusudur?",
"{name} şu an hangi takımın oyuncusudur?",
"{name} hangi takımdadır?",
"{name} şu an hangi takımdadır?",
"{name}'{_suffix1} oynadığı takım hangisidir?",
"{name}'{_suffix1} takımı hangisidir?",
"{name}'{_suffix1} güncel takımı hangisidir?",
"{name}'{_suffix1} şu an oynadığı takım hangisidir?"
],
"numarası": [
"{name}'{_suffix1} forma numarası nedir?",
"{name}'{_suffix1} forması kaç numaradır?",
"{name}'{_suffix1} forması kaç numaraydı?",
"{name} kaç numaralı formayı giymektedir?",
"{name} kaç numaralı formayı giymişti?",
"{name} hangi forma numarasına sahiptir?",
"{name}'{_suffix1} sahip olduğu forma numarası nedir?"
],
"millitakım": [
"{name} hangi milli takımda oynamaktadır?",
"{name} hangi milli takımda oynuyor?",
"{name} hangi milli takımda oynamakta?",
"{name} hangi ülkenin milli takımında oynamaktadır?",
"{name} hangi ülkenin milli takımında oynadı?",
"{name} oynadığı milli takım hangisidir?",
"{name}'{_suffix1} oynadığı milli takım hangisidir?",
"{name}'{_suffix1} oynadığı milli takım hangisi?",
"{name}'{_suffix1} oynadığı milli takım nedir?",
"{name} nerenin milli takımında oynamaktadır?",
"{name} nerenin milli takımında oynamıştı?",
"{name} nerenin milli takımında oynuyor?",
"{name} nerenin milli takımında oynadı?"
],
},
"Yazar": {
"meslek": [
"{name}'{_suffix1} gerçek mesleği nedir?",
"{name}'{_suffix1} mesleği nedir?",
"{name} mesleği nedir?",
"{name} hangi mesleğe mensuptur?",
"{name} hangi mesleği yapıyordu?",
"{name} hangi işi yapıyor?",
"{name} hangi işi yapıyordu?"
],
"tür": [
"{name} hangi türde eser vermiştir?",
"{name} hangi türde eserler vermiştir?",
"{name} hangi türde yazmıştır?",
"{name} hangi türde yazmaktaydır?",
"{name}'{_suffix1} eserleri hangi türdedir?",
"{name}'{_suffix1} eser türleri?",
"{name}'{_suffix1} eserleri hangi türdedir?",
"{name}'{_suffix1} eserleri hangi türdeydi?",
"{name}'{_suffix1} eser türü nedir?"
],
"dönem": [
"{name} hangi dönemde eser vermiştir?",
"{name} hangi dönemde eserler vermiştir?",
"{name} hangi dönem yazarıdır?",
"{name} eser verdiği dönem nedir?",
"{name} eser verdiği dönem hangisidir?"
],
"ilkeser": [
"{name}'{_suffix1} ilk eseri nedir?",
"{name}'{_suffix1} ilk eseri hangisidir?",
"{name}'{_suffix1} ilk eserinin adı nedir?",
"{name}'{_suffix1} verdiği ilk eserinin adı nedir?",
"{name}'{_suffix1} verdiği ilk eseri nedir?",
"{name}'{_suffix1} verdiği ilk eseri hangisidir?"
"Yazar {name}'{_suffix1} ilk eseri nedir?"
]
}
}
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,779 | alpgokcek/turkish-qg-model | refs/heads/main | /Preliminary_QG/CommonPatterns.py | class CommonPatterns:
patterns = {
"doğumyeri": [
"{name} nerede doğmuştur?",
"{name}'{_suffix1} doğum yeri neresidir?",
"{name}'{_suffix1} memleketi nedir?",
"{name}'{_suffix1} doğduğu yer neresidir?",
"{name} hangi ülkede doğmuştur?",
"{name}'{_suffix1} doğduğu ülke nedir?"
],
"doğumtarihi": [
"{name}'{_suffix1} doğum tarihi nedir?",
"{name}'{_suffix1} doğum günü ne zamandır?",
"{name}'{_suffix1} doğum günü nedir?",
"{name} ne zaman doğmuştur?",
"{name} gözlerini hayata ne zaman açmıştır?"
],
"ölümyeri": [
"{name} nerede ölmüştür?",
"{name}'{_suffix1} ölüm yeri neresidir?",
"{name} ölüm yeri neresidir?",
"{name}'{_suffix1} öldüğü yer neresidir?"
],
"ölümtarihi": [
"{name}'{_suffix1} ölüm tarihi nedir?",
"{name}'{_suffix1} ölüm yılı nedir?",
"{name} ne zaman ölmüştür?",
"{name} hayatını ne zaman kaybetmiştir?",
"{name} gözlerini hayata ne zaman kapamıştır?"
]
}
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,780 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/preprocess/main.py | import random
import json
import pickle
from pprint import pprint
import numpy as np
import torch
from pytorch_transformers import BertTokenizer
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer
class Preprocess:
def __init__(self, squad_path, bert_model):
with open(squad_path, 'r') as read_file:
data = json.load(read_file)
input, output = _extract_squad_data(data)
self.data = _tokenize_data(input, output, bert_model)
def save(self, path):
with open(path, 'wb') as write_file:
pickle.dump(self.data, write_file)
def _extract_squad_data(data):
data = data['data']
input = []
output = []
print(len(data))
for doc in data:
context = doc['description'][:512]
for qas in doc['data']:
answer = qas['answer']
question = random.choice(qas['questions'])
input.append((context, answer))
output.append(question)
input = input[:int(0.1 * len(input))]
output = output[:int(0.1 * len(output))]
return input, output
def _tokenize_data(input, output, bert_model):
tokenizer = BertTokenizer.from_pretrained(bert_model)
data = tokenizer.batch_encode_plus(input, max_length = 512, pad_to_max_length=False, padding=True, return_tensors='pt')
out_dict = tokenizer.batch_encode_plus(output, max_length = 512, pad_to_max_length=False, padding=True, return_tensors='pt')
data['output_ids'] = out_dict['input_ids']
data['output_len'] = out_dict['attention_mask'].sum(dim=1)
data['input_len'] = data['attention_mask'].sum(dim=1)
idx = (data['input_len'] <= 512)
in_m = max(data['input_len'][idx])
out_m = max(data['output_len'][idx])
data['input_ids'] = data['input_ids'][idx, :in_m]
data['attention_mask'] = data['attention_mask'][idx, :in_m]
data['token_type_ids'] = data['token_type_ids'][idx, :in_m]
data['input_len'] = data['input_len'][idx]
data['output_ids'] = data['output_ids'][idx, :out_m]
data['output_len'] = data['output_len'][idx]
return data
if __name__ == "__main__":
train_dataset = Preprocess('../../data/turquad/train_fp.json', 'dbmdz/bert-base-turkish-cased')
train_dataset.save(f'../../data/bert/dbmdz/bert-base-turkish-cased/train_fp')
test_dataset = Preprocess('../../data/turquad/test_fp.json', 'dbmdz/bert-base-turkish-cased')
test_dataset.save(f'../../data/bert/dbmdz/bert-base-turkish-cased/test_fp')
valid_dataset = Preprocess('../../data/turquad/valid_fp.json', 'dbmdz/bert-base-turkish-cased')
valid_dataset.save(f'../../data/bert/dbmdz/bert-base-turkish-cased/valid_fp')
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,781 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/main.py | import logging
logging.basicConfig(filename="example.log", level=logging.DEBUG)
logging.getLogger('transformers').setLevel(logging.WARNING)
log = logging.getLogger(__name__)
import time
import math
import os
import torch
from torch import optim, nn, cuda
from transformers import AdamW
from torch.utils.data import DataLoader
from transformers import BertModel
from config import checkpoint, bert_path, mb, dl_workers, device, bert_hidden_size, decoder_hidden_size, \
bert_vocab_size, decoder_input_size, dropout, epochs, clip, model_path, bert_model, encoder_trained, \
attention_hidden_size, num_layers, weight_decay, betas, lr, momentum
from model.utils import load_checkpoint, init_weights, save_checkpoint, enable_reproducibility, model_size, no_grad
from model import Attention, Decoder, Seq2Seq, BeamSearch
from data import BertDataset
from run import train, eval
from run.utils.time import epoch_time
if __name__ == '__main__':
log = logging.getLogger(__name__)
log.info(f'Running on device {cuda.current_device() if device=="cuda" else "cpu"}')
enable_reproducibility(121314)
train_set = BertDataset(bert_path / bert_model / 'train')
valid_set = BertDataset(bert_path / bert_model / 'valid')
training_loader = DataLoader(train_set, batch_size=mb, shuffle=True,
num_workers=dl_workers, pin_memory=True if device == 'cuda' else False)
valid_loader = DataLoader(valid_set, batch_size=mb, shuffle=True,
num_workers=dl_workers, pin_memory=True if device == 'cuda' else False)
print(len(training_loader))
attention = Attention(bert_hidden_size, decoder_hidden_size)
decoder = Decoder(bert_vocab_size, decoder_input_size, bert_hidden_size, decoder_hidden_size, dropout, attention, device)
model = Seq2Seq(decoder, device)
encoder = BertModel.from_pretrained(model_path / bert_model)
encoder.to(device)
optimizer = optim.Adam(decoder.parameters())
criterion = nn.CrossEntropyLoss(ignore_index=0, reduction='none')
if checkpoint is not None:
last_epoch, model_dict, optim_dict, valid_loss_list, train_loss_list, bleu_list = load_checkpoint(checkpoint)
last_epoch += 1
model.load_state_dict(model_dict)
best_valid_loss = min(valid_loss_list)
optimizer.load_state_dict(optim_dict)
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(device)
log.info(f'Using checkpoint {checkpoint}')
else:
last_epoch = 0
valid_loss_list, train_loss_list = [], []
model.apply(init_weights)
model.to(device)
for epoch in range(last_epoch, epochs):
start_time = time.time()
print("epoch:", epoch)
log.info(f'Epoch {epoch+1} training')
train_loss = train(model, device, training_loader, optimizer, criterion, clip, encoder, encoder_trained)
log.info(f'\nEpoch {epoch + 1} validation')
valid_loss, bleu_score = eval(model, device, valid_loader, criterion, encoder)
train_loss_list.append(train_loss)
valid_loss_list.append(valid_loss)
end_time = time.time()
print("time took:",end_time-start_time)
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
save_checkpoint(model_path /f'decoder/model0epoch{epoch}', epoch, model, optimizer, valid_loss_list, train_loss_list, bleu_score)
tmp_path ="{}/{}/{}/model0epoch{}".format(model_path,bert_model,epoch)
os.mkdir(tmp_path)
encoder.save_pretrained(tmp_path)
log.info(f'\nEpoch: {epoch + 1:02} completed | Time: {epoch_mins}m {epoch_secs}s')
log.info(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
log.info(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f} | Val. BLEU {bleu_score}\n\n')
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,782 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/config.py | import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
import json
from pathlib import Path
import torch
from transformers import BertModel
from data import Preprocess
def setup(bert_model, model_path, turquad_path, bert_path):
log = logging.getLogger(__name__)
file = Path('.setup').open('a+')
file.seek(0, 0)
if bert_model in (file.readline().split()):
file.close()
log.info(f'Setup: {bert_model} setup already performed')
return
file.write(f' {bert_model} ')
file.close()
log.info(f'Setup: downloading {bert_model}')
BertModel.from_pretrained(bert_model).save_pretrained(model_path / bert_model)
log.info(f'Setup: preprocessing {bert_model} input')
for x in turquad_path.iterdir():
if x.is_file():
dataset = Preprocess(turquad_path / x.name, bert_model)
dataset.save(bert_path / bert_model / x.name[11:-5])
log.info(f'Setup: {bert_model} setup completed')
# runtime environment
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# paths
turquad_path = Path('../data/turquad')
bert_path = Path('../data/bert')
model_path = Path('../data/model/')
bert_model = 'dbmdz/bert-base-turkish-cased'
# if not present download the right bert version and preprocess and save the dataset
setup(bert_model, model_path, turquad_path, bert_path)
# encoder parameter
with (model_path / bert_model / 'config.json').open('r') as f:
conf = json.load(f)
bert_hidden_size = conf['hidden_size']
bert_vocab_size = conf['vocab_size']
#optimizer
weight_decay = 0.001
betas = (0.9, 0.999) # only for Adam
lr = 0.00005
momentum = 0.9 # only for SGD
# decoder parameter
decoder_hidden_size = 512
decoder_input_size = 512 # embedding dimesions
attention_hidden_size = 512
num_layers = 1
clip = 1
dropout = 0.1
# training parameters
epochs = 3
mb = 28
dl_workers = 0
checkpoint = None
encoder_trained = False
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,783 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/bs_eval.py | import logging
import torch
from nltk.translate.bleu_score import SmoothingFunction
from torch import nn
from nltk.translate import bleu
from transformers import BertTokenizer
pw_criterion = nn.CrossEntropyLoss(ignore_index=0)
tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-turkish-cased')
def eval(model, device, dataloader, criterion, encoder):
log = logging.getLogger(__name__)
model.eval()
encoder.eval()
epoch_loss = 0
epoch_bleu = 0
with torch.no_grad():
for i, (input_, output_) in enumerate(dataloader):
input_data, input_length = input_
output_data, output_length = output_
input_ids, token_type_ids, attention_mask = input_data
bert_hs = encoder(input_ids.to(device), token_type_ids=token_type_ids.to(device),
attention_mask=attention_mask.to(device))
prediction = model(bert_hs[0])
bleu = bleu_score(prediction, output_data.to(device))
epoch_bleu += bleu
return epoch_loss / len(dataloader), epoch_bleu / len(dataloader)
def bleu_score(prediction, ground_truth):
acc_bleu = 0
for x, y in zip(ground_truth, prediction):
x = tokenizer.convert_ids_to_tokens(x.tolist())
y = tokenizer.convert_ids_to_tokens(y.tolist())
idx1 = x.index('[SEP]') if '[SEP]' in x else len(x)
idx2 = y.index('[SEP]') if '[SEP]' in y else len(y)
try:
acc_bleu += bleu([x[1:idx1]], y[1:idx2], [0.25, 0.25, 0.25, 0.25],
smoothing_function=SmoothingFunction().method4)
except ZeroDivisionError:
print(f'{idx1} {x}')
print(f'{idx2} {y}')
return acc_bleu / prediction.size(0)
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,784 | alpgokcek/turkish-qg-model | refs/heads/main | /Preliminary_QG/MinifiedCommonPatterns.py | class MinifiedCommonPatterns:
patterns = {
"doğumyeri": [
"{name}'{_suffix1} doğduğu yer neresidir?"
],
"doğumtarihi": [
"{name}'{_suffix1} doğum tarihi nedir"
],
"ölümyeri": [
"{name}'{_suffix1} öldüğü yer neresidir"
],
"ölümtarihi": [
"{name}'{_suffix1} ölüm tarihi nedir"
]
}
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,785 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/run/eval.py | import logging
import torch
from nltk.translate.bleu_score import SmoothingFunction
from torch import nn
from nltk.translate import bleu
from transformers import BertTokenizer
pw_criterion = nn.CrossEntropyLoss(ignore_index=0)
tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-turkish-cased')
def eval(model, device, dataloader, criterion, encoder):
log = logging.getLogger(__name__)
model.eval()
encoder.eval()
epoch_loss = 0
epoch_bleu = 0
with torch.no_grad():
for i, (input_, output_) in enumerate(dataloader):
input_data, input_length = input_
output_data, output_length = output_
input_ids, token_type_ids, attention_mask = input_data
bert_hs = encoder(input_ids.to(device), token_type_ids=token_type_ids.to(device),
attention_mask=attention_mask.to(device))
prediction = model(bert_hs[0], output_data.to(device), 0)
sample_t = tokenizer.convert_ids_to_tokens(output_data[0].tolist())
sample_p = tokenizer.convert_ids_to_tokens(prediction[0].max(1)[1].tolist())
idx1 = sample_t.index('[SEP]') if '[SEP]' in sample_t else len(sample_t)
idx2 = sample_p.index('[SEP]') if '[SEP]' in sample_p else len(sample_p)
bleu = bleu_score(prediction, output_data.to(device))
trg_sent_len = prediction.size(1)
prediction = prediction[:, 1:].contiguous().view(-1, prediction.shape[-1])
output_data = output_data[:, 1:].contiguous().view(-1)
pw_loss = pw_criterion(prediction, output_data.to(device))
loss = criterion(prediction, output_data.to(device))
loss = loss.view(-1, trg_sent_len - 1)
loss = loss.sum(1)
loss = loss.mean(0)
if i % int(len(dataloader) * 0.1) == int(len(dataloader) * 0.1) - 1:
log.info(f'Batch {i} Sentence loss: {loss.item()} Word loss: {pw_loss.item()} BLEU score: {bleu}\n'
f'Target {sample_t[1:idx1]}\n'
f'Prediction {sample_p[1:idx2]}\n\n')
epoch_loss += pw_loss.item()
epoch_bleu += bleu
return epoch_loss / len(dataloader), epoch_bleu / len(dataloader)
def bleu_score(prediction, ground_truth):
prediction = prediction.max(2)[1]
acc_bleu = 0
for x, y in zip(ground_truth, prediction):
x = tokenizer.convert_ids_to_tokens(x.tolist())
y = tokenizer.convert_ids_to_tokens(y.tolist())
idx1 = x.index('[SEP]') if '[SEP]' in x else len(x)
idx2 = y.index('[SEP]') if '[SEP]' in y else len(y)
acc_bleu += bleu([x[1:idx1]], y[1:idx2], [0.25, 0.25, 0.25, 0.25],smoothing_function=SmoothingFunction().method4)
return acc_bleu / prediction.size(0) | {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,786 | alpgokcek/turkish-qg-model | refs/heads/main | /format.py | import re
with open("qs-not.txt", 'r') as f:
raw_content = f.read().strip()
raw_content = re.split(',\s\[', raw_content)
raw_content.pop(0) # first element is tensor specifier
raw_content[-1] = raw_content[-1][:-2] # last two chars are closing paranthesis
for line in raw_content:
formatted_line = ''
line = line.split('), (')[0][:-1] # last char of every line is ]
special_case = False
for token in line.split(', '):
token = re.sub("'$", '', token)
token = re.sub("^'", '', token)
if token == "[SEP]":
continue
elif token[0] == "#" or token == "?" or token == "." or special_case:
formatted_line += token.replace('#', '').replace("'",'')
special_case = False
elif token.find("'") != -1:
special_case = True
formatted_line += "\'"
else:
formatted_line += " " + token.replace('#', '').replace("'",'')
print(formatted_line.strip())
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,787 | alpgokcek/turkish-qg-model | refs/heads/main | /Preliminary_QG/print-attributes.py | from pprint import pprint
from PersonDataParser import Parser
def main():
parser = Parser("everyone.txt")
parser.parse()
print("Total persons: ", parser.get_person_count())
print("\nTop Occupations and their attributes: ")
persons = parser.get_persons()
occupations = parser.get_occupations()
occupations_with_attr = parser.get_top_occupations_and_attributes(top=10)
pprint(occupations_with_attr)
if __name__ == '__main__':
main() | {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,788 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/__init__.py | from .get_best_sentences import generate_sentences
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,789 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/load.py | import torch
import transformers
checkpoint = torch.load("../data/model/dbmdz/bert-base-turkish-cased/model0epoch2/pytorch_model.bin", map_location=None)
print(checkpoint.keys())
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,790 | alpgokcek/turkish-qg-model | refs/heads/main | /Preliminary_QG/tokmain_mp.py | import random
import string
random.seed(5)
import nltk
nltk.download('punkt')
from nltk.tokenize import sent_tokenize
from pprint import pprint
from fuzzywuzzy import fuzz
from PersonDataParser import Parser
from QuestionPatterns import QuestionPatterns
from CommonPatterns import CommonPatterns
from MinifiedQuestionPatterns import MinifiedQuestionPatterns
from MinifiedCommonPatterns import MinifiedCommonPatterns
from bs4 import BeautifulSoup
from Person import Person
import json
import os
from multiprocessing import Pool
from turkish_suffix_library.turkish import Turkish
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output", help="path of output file",
type=str)
parser.add_argument(
"--dev", help="development environment selection", action='store_true')
args = parser.parse_args()
ATTRIBUTES_PATH = "sample_data/sample_wiki_persons.txt" if args.dev else "data/wiki_persons.txt"
WIKI_PATH = "sample_data/sample_wiki_dump.html" if args.dev else "data/wiki_whole_data.html"
OUTPUT_PATH = "out/data.json" if not args.output else args.output
CONFIG_FILE = json.load(open('config.json', 'r'))
OCCUPATION_SETTINGS = CONFIG_FILE["occupation_settings"]
THRESHOLD = 60
parser = Parser(ATTRIBUTES_PATH)
parser.parse()
occupational_patterns = QuestionPatterns.patterns if not CONFIG_FILE[
'use_minified'] else MinifiedQuestionPatterns.patterns
common_patterns = CommonPatterns.patterns if not CONFIG_FILE[
'use_minified'] else MinifiedCommonPatterns.patterns
#print("occupational_patterns", occupational_patterns)
out = open("out/data_tokmain_mp.txt", 'w')
full_wiki = open(WIKI_PATH).read()
soup = BeautifulSoup(full_wiki, "html.parser")
output_dict = {'data': list()}
#print("Total persons: ", parser.get_person_count())
#print("\nTop Occupations and their attributes: ")
persons = parser.get_persons()
occupations = parser.get_occupations()
occupations_with_attr = parser.get_top_occupations_and_attributes(top=10)
def create_common_questions(p: Person):
bitmap = OCCUPATION_SETTINGS[p.occupation]["common_questions"]
pair_list = list()
for i, (feature, question_patterns) in enumerate(common_patterns.items()):
if bitmap[i] == "0":
continue
elif feature in p.attributes:
ans = p.attributes[feature]
qa_pair = {'answer': ans, 'questions': list()}
for pattern in question_patterns:
q = pattern
if q:
qa_pair['questions'].append(q)
pair_list.append(qa_pair)
return pair_list
def process(enum):
index, p = enum
print("Idx,", index, "P", p)
person_dict = dict()
description_tag = soup.find(
"div", {"id": int(p.doc_id)}) # large description text
if description_tag:
description = description_tag.get_text().split("\n")[4:]
description = ' '.join(description).translate(str.maketrans('', '', string.punctuation))[:512]
description = f"<{p.name}>{description}"
del description_tag
sentence_tokenizations = sent_tokenize(description)
person_dict['data'] = list()
desc_list = {1: sentence_tokenizations[0]}
common_questions = create_common_questions(p)
person_dict['data'] = person_dict['data'] + common_questions
feature_patterns = occupational_patterns[p.occupation]
# pattern type = rutbesi, pozisyon, etc.
for pattern_type in feature_patterns.keys():
if pattern_type in p.attributes.keys():
answer = p.attributes[pattern_type]
qa_pair = {'answer': answer, 'questions': list()}
for question_pattern in feature_patterns[pattern_type]:
# does the description paragraph contain answer?
for i, sent in enumerate(sentence_tokenizations):
ratio = int(fuzz.partial_token_set_ratio(
sent, answer))
if ratio > THRESHOLD:
desc_list[i] = sent
# else:
# if random.randint(0, 31) == 2:
# desc_list[i] = sentence_tokenizations[i]
q = question_pattern
if q:
qa_pair['questions'].append(q)
if len(qa_pair['questions']) > 0: # if any relevant qa pair is present
person_dict['data'].append(qa_pair)
else:
del qa_pair
if len(person_dict['data']) > 0:
out = open("out/data_tokmain_mp.txt", 'a')
person_dict['description'] = ' '.join(desc_list.values())
person_dict['name'] = p.name
out.write(json.dumps(person_dict, ensure_ascii=False) + ",\n")
out.close()
del person_dict
if __name__ == '__main__':
pool = Pool(os.cpu_count()) # Create a multiprocessing Pool
pool.map(process, enumerate(persons))
pool.close()
pool.join()
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,791 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/data/preprocess.py | import json
import pickle
from transformers import BertTokenizer
class Preprocess:
def __init__(self, turquad_path, bert_model):
with open(turquad_path, 'r') as read_file:
data = json.load(read_file)
input, output = _extract_turquad_data(data)
self.data = _tokenize_data(input, output, bert_model)
def save(self, path):
with open(path, 'wb+') as write_file:
pickle.dump(self.data, write_file)
def _extract_turquad_data(data):
data = data['data']
input = []
output = []
for doc in data:
context = doc['description'][:512]
for qas in doc['data']:
answer = qas['answer']
question = qas['questions'][0]
input.append((context, answer))
output.append(question)
return input, output
def _tokenize_data(input, output, bert_model):
tokenizer = BertTokenizer.from_pretrained(bert_model)
data = tokenizer.batch_encode_plus(input, pad_to_max_length=True, return_tensors='pt')
out_dict = tokenizer.batch_encode_plus(output, pad_to_max_length=True, return_tensors='pt')
data['output_ids'] = out_dict['input_ids']
data['output_len'] = out_dict['attention_mask'].sum(dim=1)
data['input_len'] = data['attention_mask'].sum(dim=1)
idx = (data['input_len'] <= 512)
in_m = max(data['input_len'][idx])
out_m = max(data['output_len'][idx])
data['input_ids'] = data['input_ids'][idx, :in_m]
data['attention_mask'] = data['attention_mask'][idx, :in_m]
data['token_type_ids'] = data['token_type_ids'][idx, :in_m]
data['input_len'] = data['input_len'][idx]
data['output_ids'] = data['output_ids'][idx, :out_m]
data['output_len'] = data['output_len'][idx]
return data
if __name__ == '__main__':
dataset = Preprocess('data.json', 'dbmdz/bert-base-turkish-cased')
dataset.save(f'../data/bert/dbmdz/bert-base-turkish-cased/test')
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,792 | alpgokcek/turkish-qg-model | refs/heads/main | /Preliminary_QG/Person.py |
class Person:
def __init__(self, doc_id, name, occupation, attributes, description):
self.name = name
self.doc_id = doc_id
self.occupation = occupation
self.attributes = attributes
self.description = description
def __str__(self):
return "{}: {} \n\t{}".format(self.doc_id, self.name, self.occupation, self.description)
| {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,793 | alpgokcek/turkish-qg-model | refs/heads/main | /Preliminary_QG/PersonDataParser.py | from Person import Person
from typing import Set, Dict, List
from collections import defaultdict
import json
class Parser:
def __init__(self, path):
self.path = path
self.string = open(path, 'r').read().rstrip()
self.paragraphs = []
self.persons: List[Person] = []
self.occupations: Dict[str, int] = {}
self.occupations_and_attr: Dict[str, defaultdict] = {}
def parse(self):
paragraphs = self.string.split('\n\n')
for par in paragraphs:
attributes = par.split('#')
doc_id = attributes[0]
name = attributes[1]
occup = attributes[2]
self.occupations[occup] = self.occupations.get(occup, 0) + 1
attr: Dict[str, str] = json.loads(attributes[4])
self.drop_key(attr, "ad")
self._add_attr_to_occupations(occup, attr)
short_desc = attributes[7]
self.persons.append(Person(doc_id, name, occup, attr, short_desc))
def get_occupations(self):
return self.occupations
def get_person_count(self):
return len(self.persons)
def get_persons(self):
return self.persons
def _add_attr_to_occupations(self, occup, attr):
if occup in self.occupations_and_attr:
for key in attr.keys():
_key = key.lower()
self.occupations_and_attr[occup][_key] += 1
else:
self.occupations_and_attr[occup] = defaultdict(int)
self._add_attr_to_occupations(occup, attr)
def get_top_occupations_and_attributes(self, top: int = None):
var = self.occupations_and_attr
for key, val in var.items():
tmp = var[key]
var[key] = sorted(tmp.items(), key=lambda kv: kv[1], reverse=True)[:top]
return var
@staticmethod
def drop_key(d: Dict, key):
try:
del d[key]
except KeyError:
pass | {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,794 | alpgokcek/turkish-qg-model | refs/heads/main | /qg-model/src/model/model.py | import random
import torch
from torch import nn
import torch.nn.functional as F
class Attention(nn.Module):
"""Implements additive attention and return the attention vector used to weight the values.
Additive attention consists in concatenating key and query and then passing them trough a linear layer."""
def __init__(self, enc_hid_dim, dec_hid_dim):
super().__init__()
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.attn = nn.Linear(enc_hid_dim + dec_hid_dim, dec_hid_dim)
self.v = nn.Parameter(torch.rand(dec_hid_dim))
def forward(self, key, queries):
batch_size = queries.shape[0]
src_len = queries.shape[1]
key = key.unsqueeze(1).repeat(1, src_len, 1)
energy = torch.tanh(self.attn(torch.cat((key, queries), dim=2)))
energy = energy.permute(0, 2, 1)
v = self.v.repeat(batch_size, 1).unsqueeze(1)
attention = torch.bmm(v, energy).squeeze(1)
return F.softmax(attention, dim=1)
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention, device):
super().__init__()
self.device = device
self.emb_dim = emb_dim
self.enc_hid_dim = enc_hid_dim
self.dec_hid_dim = dec_hid_dim
self.output_dim = output_dim
self.dropout = dropout
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.GRU(enc_hid_dim + emb_dim, dec_hid_dim, batch_first=True, num_layers=1)
self.out = nn.Linear(enc_hid_dim + dec_hid_dim + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, queries, key):
input = input.unsqueeze(1)
embedded = self.dropout(self.embedding(input))
a = self.attention(key, queries)
a = a.unsqueeze(1)
weighted = torch.bmm(a, queries)
rnn_input = torch.cat((embedded, weighted), dim=2)
output, hidden = self.rnn(rnn_input, key.unsqueeze(0))
embedded = embedded.squeeze(1)
output = output.squeeze(1)
weighted = weighted.squeeze(1)
output = self.out(torch.cat((output, weighted, embedded), dim=1))
return output, hidden.squeeze(0)
class Seq2Seq(nn.Module):
def __init__(self, decoder, device):
super().__init__()
self.decoder = decoder
self.device = device
def forward(self, src, trg, teacher_forcing_ratio=0.5):
batch_size = src.shape[0]
max_len = trg.shape[1]
trg_vocab_size = self.decoder.output_dim
outputs = torch.zeros(batch_size, max_len, trg_vocab_size).to(self.device)
output = trg[:, 0]
hidden = torch.zeros(output.shape[0], self.decoder.dec_hid_dim).to(self.device)
for t in range(1, max_len):
output, hidden = self.decoder(output, src, hidden)
outputs[:, t] = output
teacher_force = random.random() < teacher_forcing_ratio
top1 = output.max(1)[1]
output = (trg[:, t] if teacher_force else top1)
return outputs
import torch.nn.functional as F
from torch import nn
import torch
class BeamSearch(nn.Module):
def __init__(self, decoder, device, k):
super().__init__()
self.decoder = decoder
self.device = device
self.k = k
def forward(self, src):
batch_size = src.shape[0]
max_len = 40
trg_vocab_size = self.decoder.output_dim
search_results = torch.zeros(batch_size, self.k, max_len).type(torch.LongTensor).to(self.device)
search_map = torch.zeros(batch_size, self.k, max_len).type(torch.LongTensor).to(self.device)
outputs = torch.zeros(batch_size, max_len).type(torch.LongTensor).to(self.device)
hiddens = torch.zeros(batch_size, self.k, self.decoder.dec_hid_dim).to(self.device)
ended = torch.zeros(batch_size, self.k).to(self.device)
true = torch.ones(ended.shape).to(self.device)
no_prob = torch.Tensor(batch_size, trg_vocab_size).fill_(float('-Inf')).to(self.device)
no_prob[:, 102] = 0
lengths = torch.zeros(batch_size, self.k).to(self.device)
output = torch.Tensor(batch_size).fill_(102).type(torch.LongTensor).to(self.device)
hidden = torch.zeros(output.shape[0], self.decoder.dec_hid_dim).to(self.device)
output, hidden = self.decoder(output, src, hidden)
output = F.log_softmax(output, dim=1)
for i in range(self.k):
hiddens[:, i, :] = hidden
scores, search_results[:, :, 0] = torch.topk(output, self.k, 1)
for t in range(1, max_len):
candidates = torch.Tensor(batch_size, 0).to(self.device)
for i in range(self.k):
idx = search_map[:, 0, t - 1].unsqueeze(1).unsqueeze(1)
idx = idx.expand(-1, -1, hiddens.shape[2])
hidden = hiddens.gather(1, idx).squeeze(1).squeeze(1)
output, hiddens[:, i, :] = self.decoder(search_results[:, i, t - 1], src,
hidden)
output = F.log_softmax(output, dim=1)
output = torch.where(ended[:, i].unsqueeze(1).expand_as(output) == 0, output, no_prob)
lengths[:, i] = torch.where(ended[:, i] == 0, lengths[:, i] + 1, lengths[:, i])
output = output + scores[:, i].unsqueeze(1)
candidates = torch.cat((candidates, output), 1)
norm_cand = torch.tensor(candidates)
for i in range(self.k - 1):
norm_cand[:, trg_vocab_size * i:trg_vocab_size * (i + 1)] /= (lengths[:, i] ** 0.7).unsqueeze(1)
_, topk = torch.topk(norm_cand, self.k, 1)
for i in range(topk.shape[0]):
scores[i, :] = candidates[i, topk[i, :]]
ended = torch.where((topk - (topk / trg_vocab_size) * trg_vocab_size) == 102, true, ended)
search_results[:, :, t] = topk - (
topk / trg_vocab_size) * trg_vocab_size
search_map[:, :, t] = topk / trg_vocab_size
_, idx = torch.max(scores, 1)
for t in range(max_len - 1, -1, -1):
outputs[:, t] = search_results[:, :, t].gather(1, idx.unsqueeze(1)).squeeze(1)
idx = search_map[:, :, t].gather(1, idx.unsqueeze(1)).squeeze(1)
return outputs | {"/qg-model/src/model/__init__.py": ["/qg-model/src/model/model.py"], "/qg-model/src/__init__.py": ["/qg-model/src/get_best_sentences.py"]} |
63,796 | HorttanainenSami/Tietokantasovellus_2021 | refs/heads/main | /db.py | from app import app
from flask_sqlalchemy import SQLAlchemy
from os import getenv
app.config['SQLALCHEMY_DATABASE_URI'] =getenv('DATABASE_URL')
app.secret_key = getenv('SECRET_KEY')
db = SQLAlchemy(app)
| {"/query.py": ["/db.py"], "/user.py": ["/db.py"], "/chat.py": ["/db.py"], "/routes.py": ["/user.py", "/query.py", "/chat.py"]} |
63,797 | HorttanainenSami/Tietokantasovellus_2021 | refs/heads/main | /query.py | from db import db
def search(region, min, max):
sql = 'SELECT id, published_at, header, price, content'\
' FROM advertisement WHERE published=true AND region=:region AND price BETWEEN :min AND :max ORDER BY published_at DESC'
result = db.session.execute(sql, {'region':region, 'min':min, 'max':max})
return result.fetchall()
def get_all():
sql = 'SELECT id, published_at, header, price, content FROM advertisement WHERE published=true ORDER BY published_at DESC'
result = db.session.execute(sql)
return result.fetchall()
def update(content, header,price, id, region):
sql = 'UPDATE advertisement SET region=:region, content=:content, header=:header, price=:price WHERE id =:id'
db.session.execute(sql,{'region':region,'id':id, 'content':content, 'header':header,'price':price})
db.session.commit()
def new(id):
sql = "INSERT INTO advertisement (user_id, published, header, content,price,region) VALUES (:user_id, FALSE, '', '', 0 ,NULL) RETURNING id"
advertisement_id = db.session.execute(sql, {'user_id':id})
db.session.commit()
return advertisement_id.fetchone()[0]
def get_published(user_id):
sql ='SELECT id, published_at, header, price, content FROM advertisement as a WHERE a.published=TRUE AND a.user_id=:user_id ORDER BY a.published_at'
result = db.session.execute(sql, {'user_id':user_id})
return result.fetchall()
def get_incomplete(user_id, advertisement_id):
sql = 'SELECT id, published_at, header, price, content FROM advertisement WHERE user_id=:user_id AND id=:id'
result = db.session.execute(sql, {'user_id':user_id, 'id':advertisement_id})
return result.fetchone()
def get_incompletes(user_id):
sql = 'SELECT id, published_at, header, price, content FROM advertisement WHERE published=FALSE AND user_id=:user_id'
result = db.session.execute(sql, {'user_id':user_id})
return result.fetchall()
def get(id):
## get advertisement and creator info
sql = 'SELECT a.id, u.id, a.header, a.price, a.content, a.published_at, u.username, i.id, a.region'\
' FROM advertisement as a INNER JOIN users as u ON u.id=a.user_id'\
' LEFT JOIN images as i ON i.user_id=u.id'\
' WHERE a.id=:id'
result = db.session.execute(sql, {'id':id}).fetchone()
if result == None:
return 'Wrong id'
return result
def remove(id,user_id):
## delete chats also and messages
sql = 'DELETE FROM advertisement WHERE id=:id AND user_id=:user_id'
db.session.execute(sql, {'id':id, 'user_id':user_id})
db.session.commit()
return 'OK'
def publish(id, user_id):
sql = 'UPDATE advertisement SET published=TRUE, published_at =NOW() WHERE id=:id AND user_id=:user_id'
db.session.execute(sql, {'id':id, 'user_id':user_id})
db.session.commit()
return 'OK'
def get_images(id):
sql = 'SELECT * FROM images WHERE advertisement_id=:id'
result = db.session.execute(sql, {'id':id})
return result.fetchall()
def image_show(id):
sql = 'SELECT data FROM images WHERE id=:id'
result = db.session.execute(sql, {'id':id}).fetchone()
if result == None:
return 'Wrong id'
return result[0]
def image_remove(img_id):
sql='DELETE FROM images as i WHERE i.id=:img_id'
db.session.execute(sql, { 'img_id':img_id})
db.session.commit()
return
def image_save(id, file):
name = file.filename
if not name.endswith('.jpeg'):
return 'Invalid filename'
data = file.read()
if len(data) > 100*1024:
return 'Too big file'
sql = 'INSERT INTO images (data, advertisement_id) VALUES (:data, :advertisement_id)'
db.session.execute(sql, {'data':data, 'advertisement_id':id})
db.session.commit()
return 'OK'
| {"/query.py": ["/db.py"], "/user.py": ["/db.py"], "/chat.py": ["/db.py"], "/routes.py": ["/user.py", "/query.py", "/chat.py"]} |
63,798 | HorttanainenSami/Tietokantasovellus_2021 | refs/heads/main | /user.py | from werkzeug.security import check_password_hash, generate_password_hash
from db import db
def handleLogin(username, password):
sql = "SELECT * FROM users WHERE username=:username"
result = db.session.execute(sql, {"username":username})
user = result.fetchone()
if user is None:
# wrong username
return None
else:
#correct username
correct_hash_value= user[2]
if check_password_hash(correct_hash_value, password):
# correct username and password, login
return user
#incorrect password
return None
def handleRegister(username, password):
sql = 'SELECT * FROM users WHERE username=:username'
result = db.session.execute(sql, {'username':username}).fetchone()
if result is not None:
return 'error'
hash_value = generate_password_hash(password)
sql = "INSERT INTO users (username, password, created_at) VALUES (:username, :password, NOW())"
db.session.execute(sql, {"username":username, "password" : hash_value})
db.session.commit()
return()
def get_user(user_id):
sql = 'SELECT u.id, u.username,u.created_at, region, info, i.id, i.data FROM users as u LEFT join images as i ON i.user_id = u.id AND i.avatar=true WHERE u.id=:user_id'
result = db.session.execute(sql, {'user_id':user_id})
return result.fetchone()
def avatar_save(user_id, file):
name = file.filename
if not name.endswith(tuple(['.jpeg', '.png'])):
return "Invalid filename"
data = file.read()
if len(data) > 100*1024:
return "Too big file"
sql = "INSERT INTO images (data,avatar, user_id) VALUES (:data, TRUE,:user_id)"
db.session.execute(sql, {"data":data, "user_id":user_id})
db.session.commit()
return "OK"
def avatar_remove(user_id):
sql = 'DELETE FROM images WHERE user_id=:user_id AND avatar=TRUE'
db.session.execute(sql, {'user_id': user_id})
db.session.commit()
return 'OK'
def update(pitch, region, user_id):
sql = 'UPDATE users SET info=:pitch, region=:region WHERE id=:user_id'
db.session.execute(sql, {'pitch':pitch, 'region':region, 'user_id':user_id})
db.session.commit()
return 'OK'
def change_password(old, new, user_id):
sql = 'SELECT password FROM users WHERE id=:id'
correct_hash = db.session.execute(sql, {'id':user_id}).fetchone()
if correct_hash is not None:
if check_password_hash(correct_hash[0], old):
print('toka')
new_hash=generate_password_hash(new)
sql = 'UPDATE users SET password=:new_hash WHERE id =:user_id'
db.session.execute(sql, {'new_hash':new_hash, 'user_id':user_id})
db.session.commit()
return 'OK'
return 'error'
def remove_user(user_id):
sql= 'DELETE FROM users WHERE id=:user_id'
db.session.execute(sql, {'user_id':user_id})
db.session.commit()
return 'OK'
| {"/query.py": ["/db.py"], "/user.py": ["/db.py"], "/chat.py": ["/db.py"], "/routes.py": ["/user.py", "/query.py", "/chat.py"]} |
63,799 | HorttanainenSami/Tietokantasovellus_2021 | refs/heads/main | /chat.py | from db import db
import datetime
def get_messages(chat_id, user_id):
## check if participant in chat
sql = 'SELECT m.creator_id, m.content, m.created_at FROM message as m join participant as p ON p.chat_id=m.chat_id AND p.participant_id=:user_id WHERE m.chat_id=:chat_id ORDER BY m.created_at'
result = db.session.execute(sql, {'user_id':user_id,'chat_id':chat_id}).fetchall()
return result
def get_active(creator_id, advertisement_id):
sql = 'SELECT * FROM chat as c join participant as p ON p.participant_id=:p_id AND p.chat_id=c.id WHERE advertisement_id=:adv_id'
chat_id = db.session.execute(sql, {'p_id':creator_id, 'adv_id':advertisement_id}).fetchone()
return chat_id
def get_participant(chat_id, user_id):
## fetch info of chat participant profile and what advertisement chat connects to
sql = 'SELECT u.id, u.username, i.id, c.advertisement_id FROM chat as c, participant as p'\
' JOIN users as u ON u.id=p.participant_id LEFT JOIN images as i ON i.avatar=TRUE AND i.user_id=u.id'\
' WHERE p.chat_id=:chat_id AND p.participant_id!=:user_id AND c.id=p.chat_id'
result = db.session.execute(sql, {'chat_id':chat_id, 'user_id':user_id})
return result.fetchone()
def get_all(participant_id):
## fetch all chats,for each show last message, participant profile info
sql = 'SELECT last.chat_id, u.username, u.id, last.content, last.created_at, last.creator_id, i.id'\
' FROM participant as p,participant as p1, users as u'\
' LEFT JOIN images as i ON i.user_id=u.id,'\
' (SELECT ROW_NUMBER() OVER (PARTITION BY chat_id ORDER BY created_at DESC) as rownum, * FROM message) last'\
' WHERE rownum=1 AND p.chat_id=last.chat_id AND p.participant_id=:p_id AND p1.chat_id=p.chat_id'\
' AND p1.participant_id!=p.participant_id AND u.id=p1.participant_id ORDER BY last.created_at DESC'
result = db.session.execute(sql, {'p_id':participant_id})
return result.fetchall()
def create(creator_id,reciver_id, advertisement_id, content):
# create chat
sql = 'INSERT INTO chat (advertisement_id) VALUES (:advertisement_id) RETURNING id'
chat_id = db.session.execute(sql, {'advertisement_id': advertisement_id}).fetchone()[0]
# add participants to chat
sql = 'INSERT INTO participant (chat_id, participant_id) VALUES (:chat_id, :participant_id) '
db.session.execute(sql, {'chat_id':chat_id, 'participant_id':creator_id})
db.session.execute(sql, {'chat_id':chat_id, 'participant_id':reciver_id})
db.session.commit()
# send message
send(chat_id, content, creator_id)
return chat_id
def send(chat_id, message, creator_id):
sql = 'INSERT INTO message (creator_id, chat_id, content, created_at) VALUES (:creator_id, :chat_id, :content, NOW())'
db.session.execute(sql, {'creator_id':creator_id, 'chat_id':chat_id, 'content':message})
db.session.commit()
return 'OK'
| {"/query.py": ["/db.py"], "/user.py": ["/db.py"], "/chat.py": ["/db.py"], "/routes.py": ["/user.py", "/query.py", "/chat.py"]} |
63,800 | HorttanainenSami/Tietokantasovellus_2021 | refs/heads/main | /routes.py | from flask import redirect, render_template, request, session, make_response, url_for, flash
from app import app
import user, query, chat
from datetime import datetime, timedelta
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/user/messages')
def show_all_chats():
chats = chat.get_all(session['id'])
def format_date(date):
time_passed = datetime.now()-date
if timedelta(days=1) > time_passed:
return date.strftime('%X')
else:
return date.strftime('%d/%m/%y %X')
return render_template('chats.html', active_chats=chats, format_date=format_date)
@app.route('/chat/<int:chat_id>')
def show_chat(chat_id):
## check authority to participate in chat
messages = chat.get_messages(chat_id, session['id'])
if not messages:
return render_template('404.html'), 404
profile_info = chat.get_participant(chat_id, session['id'])
def format_date(date):
time_passed = datetime.now()-date
if timedelta(days=1) > time_passed:
return date.strftime('%X')
else:
return date.strftime('%d/%m/%y %X')
return render_template('chat.html', messages=messages, chat_id=chat_id, formatdate=format_date, profile=profile_info)
@app.route('/chat/create', methods=['POST'])
def chat_create():
advertisement_id = request.form['advertisement_id']
message = request.form['message']
participant = request.form['user_id']
## check if already active chat
chat_id = chat.get_active(session['id'], advertisement_id)
if chat_id:
#chat_send_message
chat_id = chat_id[0]
chat.send(chat_id, message, session['id'])
else:
#chat_create_new
chat_id = chat.create(session['id'],participant, advertisement_id, message)
return redirect('/chat/'+str(chat_id))
@app.route('/chat/sendmessage', methods=['POST'])
def send():
message = request.form['message']
chat_id = request.form['chat_id']
chat.send(chat_id, message, session['id'])
return redirect('/chat/'+str(chat_id))
##################################################################################################################################################################
##UserSession handling
@app.route('/user/profile/<int:user_id>')
def show_profile(user_id):
profile = user.get_user(user_id)
published = query.get_published(user_id)
images = []
for adv in published:
advert_id = adv[0]
result = query.get_images(advert_id)
images.append(result)
return render_template('profile.html', advertisements=published,images=images, profile=profile)
@app.route('/user/profile/remove', methods=['POST'])
def profile_delete():
user.remove_user(session['id'])
del session['id']
del session['username']
flash('Käyttäjätunnuksesi on poistettu','success')
return redirect('/')
@app.route('/user/profile/changepassword')
def profile_change_password_form():
if 'id' not in session:
return redirect('/login')
return render_template('change_password.html')
@app.route('/user/profile/modify')
def profile_modify():
profile = user.get_user(session['id'])
return render_template('profile_modify.html', profile=profile)
@app.route('/user/changepassword', methods=['POST'])
def profile_change_password():
old = request.form['old_password']
new = request.form['new_password']
result = user.change_password(old, new, session['id'])
if 'error' in result:
flash('Salasanasi oli väärin', 'error')
return render_template('change_password.html')
flash('Salasanasi on vaihdettu', 'success')
return redirect('/user/profile/'+str(session['id']))
@app.route('/user/profile/avatar/remove', methods=['POST'])
def remove_avatar():
user.avatar_remove(session['id'])
return redirect('/user/profile/modify')
@app.route('/user/profile/update', methods= ['POST'])
def update_profile():
if 'file' in request.files:
file = request.files['file']
user.avatar_save(session['id'], file)
pitch = request.form['pitch']
region = request.form['region']
user.update(pitch, region, session['id'])
return redirect('/user/profile/modify')
@app.route('/signin')
def signin():
return render_template('signin.html')
@app.route('/register', methods=['POST'])
def register():
username = request.form['username']
password = request.form['password']
result = user.handleRegister(username, password)
if 'error' in result:
flash('Käyttäjätunnus on jo käytössä', 'error')
return redirect('/signin')
flash('Käyttäjätunnus on luotu, kirjaudu nyt sisään', 'success')
return redirect('/login')
@app.route('/login', methods=['POST', 'GET'])
def login():
if 'recentUrl' in request.form:
session['url'] = request.form['recentUrl']
return render_template('login.html')
@app.route('/login/check', methods=['POST'])
def checklogin():
username = request.form['username']
password = request.form['password']
userQuery = user.handleLogin(username, password)
if userQuery:
session['username'] = userQuery[1]
session['id'] = userQuery[0]
flash('Tervetuloa, '+str(session['username']), 'success')
if 'url' in session:
url = session['url']
del session['url']
return redirect(url)
return redirect('/')
flash('Käyttäjä ja/tai salasana väärin', 'error')
return redirect('/login')
@app.route('/logout')
def logout():
del session['username']
del session['id']
flash('Olet kirjautunut ulos, nähdään taas pian :)', 'success')
return redirect('/')
########################################################
### handle uploading
@app.route('/')
def index():
advertisements = query.get_all()
## part query to 5 item pages
current_page = request.args.get('page', 1, type=int)
items_per_page = 5
pages = round(len(advertisements)/items_per_page+ .499)
from_page = int(current_page) * items_per_page - items_per_page
upto_page = int(current_page) * items_per_page
list_part = advertisements[from_page:upto_page]
images = []
for adv in list_part:
images.append(query.get_images(adv[0]))
return render_template('index.html', images=images, advertisements=list_part, pages=pages, current_page=current_page)
@app.route('/search', methods=['GET'])
def search():
region= request.args['region']
max=request.args['max']
min=request.args['min']
result= query.search(region, min, max)
current_page = request.args.get('page', 1, type=int)
items_per_page = 5
pages = round(len(result)/items_per_page+ .499)
from_page = int(current_page) * items_per_page - items_per_page
upto_page = int(current_page) * items_per_page
list_part = result[from_page:upto_page]
images = []
for adv in list_part:
images.append(query.get_images(adv[0]))
return render_template('search.html', images=images, advertisements=list_part, pages=pages, current_page=current_page, region=region, max=max, min=min)
#####################################################################################################################################
@app.route('/advertisement/publish', methods=['POST'])
def publish():
advert_id = request.form['advert_id']
query.publish(advert_id, session['id'])
flash('Ilmoituksesi on julkaistu', 'success')
return redirect('/advertisement/show/'+str(advert_id))
@app.route('/advertisement/unpublished')
def show_unpub_adv():
incompletes = query.get_incompletes(session['id'])
images = []
for incomplete in incompletes:
advert_id = incomplete[0]
result = query.get_images(advert_id)
images.append(result)
print(images)
return render_template('create.html', advertisements=incompletes, images=images)
@app.route('/advertisement/new', methods=['POST'])
def new_adv():
advertisement_id = query.new(session['id'])
return redirect('/advertisement/edit/'+str(advertisement_id))
@app.route('/advertisement/edit/<int:id>', methods=['GET'])
def edit_adv(id):
images = query.get_images(id)
advertisement = query.get(id)
if advertisement:
if advertisement[1]!=session['id']:
return page_not_found
return render_template('edit.html', images=images,advertisement=advertisement, advertisement_id=id)
@app.route('/advertisement/delete', methods=['POST'])
def delete_adv():
id=request.form['id']
query.remove(id, session['id'])
flash('Ilmoituksesi on poistettu', 'success')
return redirect('/')
@app.route('/advertisement/image/delete', methods=['POST'])
def adv_delete_image():
image_id=request.form['img-id']
query.image_remove(image_id)
adv_id=request.form['advertisement_id']
flash('Kuva poistettu', 'success')
return redirect('/advertisement/edit/'+str(adv_id))
@app.route('/advertisement/update', methods=['POST'])
def update_adv():
id = request.form['id']
content = request.form['content']
header = request.form['header']
price = request.form['price']
region = request.form.get('region')
if 'file' in request.files:
file=request.files['file']
query.image_save(id, file)
query.update(content, header, price, id, region)
flash('Tiedot tallennettu', 'success')
return redirect('/advertisement/edit/'+str(id))
@app.route('/advertisement/show/<int:id>')
def show_adv(id):
advertisement = query.get(id)
if 'error' in advertisement:
page_not_found('Wrong id')
images = query.get_images(id)
return render_template('advertisement.html', advertisement=advertisement, images=images)
@app.route('/show/image/<int:id>')
def show_img(id):
data = query.image_show(id)
response = make_response(bytes(data))
response.headers.set('Content-Type', 'image/jpeg')
return response
| {"/query.py": ["/db.py"], "/user.py": ["/db.py"], "/chat.py": ["/db.py"], "/routes.py": ["/user.py", "/query.py", "/chat.py"]} |
63,802 | jsl2018/Web_zidonghua | refs/heads/master | /base/get_driver.py | from selenium import webdriver
def get_driver():
driver = webdriver.Firefox()
driver.get("http://127.0.0.1/")
return driver | {"/page/page_login.py": ["/page/__init__.py"], "/scripts/test_login.py": ["/page/page_login.py", "/base/get_driver.py"]} |
63,803 | jsl2018/Web_zidonghua | refs/heads/master | /page/page_login.py | import page
from base.base import Base
class PageLogin(Base):
def page_click_login(self,):
self.base_click(page.login_click)
def page_input_username(self, username):
self.base_input(page.loc_username, username)
def page_input_password(self, password):
self.base_input(page.loc_password, password)
def page_input_verify_code(self, verify_code):
self.base_input(page.loc_verify_code, verify_code)
def page_click_login_submit(self):
self.base_click(page.submit_login_click) | {"/page/page_login.py": ["/page/__init__.py"], "/scripts/test_login.py": ["/page/page_login.py", "/base/get_driver.py"]} |
63,804 | jsl2018/Web_zidonghua | refs/heads/master | /page/__init__.py | from selenium.webdriver.common.by import By
login_click = By.XPATH, "//*[contains(@class, 'red')]"
loc_username = By.ID, "username"
loc_password = By.ID, "password"
loc_verify_code = By.ID, "verify_code"
submit_login_click = By.XPATH, "//*[contains(@class, 'J-login-submit')]" | {"/page/page_login.py": ["/page/__init__.py"], "/scripts/test_login.py": ["/page/page_login.py", "/base/get_driver.py"]} |
63,805 | jsl2018/Web_zidonghua | refs/heads/master | /scripts/test_login.py | import pytest
import time
from page.page_login import PageLogin
from base.get_driver import get_driver
class TestLogin():
def setup_class(self):
self.login = PageLogin(get_driver())
def teardown_class(self):
time.sleep(5)
self.login.driver.quit()
@pytest.mark.parametrize("username, password,verify_code", [(15573235704, 123456, 8888)])
def test_login(self, username, password,verify_code):
self.login.page_click_login()
self.login.page_input_username(username)
self.login.page_input_password(password)
self.login.page_input_verify_code(verify_code)
self.login.page_click_login_submit()
print("test00被输出了")
| {"/page/page_login.py": ["/page/__init__.py"], "/scripts/test_login.py": ["/page/page_login.py", "/base/get_driver.py"]} |
63,807 | jun-harashima/bseg | refs/heads/master | /tests/test_bunsetsu.py | import unittest
from bseg.morphology import Morphology
from bseg.bunsetsu import Bunsetsu
class TestBunsetsu(unittest.TestCase):
def setUp(self):
morp1 = Morphology("天気 名詞,一般,*,*,*,*,天気,テンキ,テンキ")
morp2 = Morphology("が 助詞,格助詞,一般,*,*,*,が,ガ,ガ")
self.bnst1 = Bunsetsu([morp1, morp2])
morp3 = Morphology("良い 形容詞,自立,*,*,形容詞・アウオ段,\
基本形,良い,ヨイ,ヨイ")
morp4 = Morphology("。 記号,句点,*,*,*,*,。,。,。")
self.bnst2 = Bunsetsu([morp3, morp4])
def test___init__(self):
self.assertEqual(self.bnst1.surface, "天気が")
def test_ispredicate(self):
self.assertFalse(self.bnst1.ispredicate())
self.assertTrue(self.bnst2.ispredicate())
if __name__ == "__main__":
unittest.main()
| {"/tests/test_bunsetsu.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_morphology.py": ["/bseg/morphology.py"], "/bseg/bseg.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_bseg.py": ["/bseg/bseg.py"]} |
63,808 | jun-harashima/bseg | refs/heads/master | /scripts/train_two_input_model.py | import torch
from pttagger.model import Model
from pttagger.dataset import Dataset
EMBEDDING_DIMS = [2, 2]
HIDDEN_DIMS = [4, 4]
examples = [
{'Xs': [['人参', 'を', '切る'],
['名詞', '助詞', '動詞']],
'Y': ['B-S', 'B-I', 'B-P']},
{'Xs': [['ざっくり', '切る'],
['副詞', '動詞']],
'Y': ['B-M', 'B-P']},
{'Xs': [['葱', 'は', '細く', '刻む'],
['名詞', '助詞', '形容詞', '動詞']],
'Y': ['B-S', 'B-I', 'B-M', 'B-P']}
]
dataset = Dataset(examples)
x_set_sizes = [len(dataset.x_to_index[0]), len(dataset.x_to_index[1])]
y_set_size = len(dataset.y_to_index)
model = Model(EMBEDDING_DIMS, HIDDEN_DIMS, x_set_sizes, y_set_size,
batch_size=3)
model.train(dataset)
torch.save(model.state_dict(), 'two_input.model')
model.load_state_dict(torch.load('two_input.model'))
examples = [
{'Xs': [['葱', 'を', '切る'],
['名詞', '助詞', '動詞']],
'Y': ['B-S', 'B-I', 'B-P']},
{'Xs': [['細く', '切る'],
['副詞', '動詞']],
'Y': ['B-M', 'B-P']},
{'Xs': [['大根', 'は', 'ざっくり', '刻む'],
['名詞', '助詞', '形容詞', '動詞']],
'Y': ['B-S', 'B-I', 'B-M', 'B-P']}
]
dataset = Dataset(examples)
results = model.test(dataset)
print(results)
| {"/tests/test_bunsetsu.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_morphology.py": ["/bseg/morphology.py"], "/bseg/bseg.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_bseg.py": ["/bseg/bseg.py"]} |
63,809 | jun-harashima/bseg | refs/heads/master | /tests/test_morphology.py | import unittest
from bseg.morphology import Morphology
class TestMorphology(unittest.TestCase):
def test___init__(self):
line = "今日 名詞,副詞可能,*,*,*,*,今日,キョウ,キョー"
morp = Morphology(line)
self.assertEqual(morp.surface, "今日")
self.assertEqual(morp.part_of_speech, "名詞")
self.assertEqual(morp.part_of_speech1, "副詞可能")
if __name__ == "__main__":
unittest.main()
| {"/tests/test_bunsetsu.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_morphology.py": ["/bseg/morphology.py"], "/bseg/bseg.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_bseg.py": ["/bseg/bseg.py"]} |
63,810 | jun-harashima/bseg | refs/heads/master | /bseg/bseg.py | from bseg.morphology import Morphology
from bseg.bunsetsu import Bunsetsu
class Bseg:
def __init__(self):
pass
def segment(self, analysis_result):
morps = self._construct_morphology_from(analysis_result)
bnsts = self._construct_bunsetsu_from(morps)
return bnsts
def _construct_morphology_from(self, analysis_result):
return [Morphology(line) for line in analysis_result.split("\n")]
def _construct_bunsetsu_from(self, morps):
bnsts = []
_morps = []
for morp in morps:
_morps.append(morp)
if not morp.isfunction():
continue
bnst = Bunsetsu(_morps)
bnsts.append(bnst)
_morps = []
bnst = Bunsetsu(_morps)
bnsts.append(bnst)
return bnsts
| {"/tests/test_bunsetsu.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_morphology.py": ["/bseg/morphology.py"], "/bseg/bseg.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_bseg.py": ["/bseg/bseg.py"]} |
63,811 | jun-harashima/bseg | refs/heads/master | /tests/test_bseg.py | import unittest
import textwrap
from bseg.bseg import Bseg
class TestBseg(unittest.TestCase):
def setUp(self):
self.analysis_result = textwrap.dedent("""
今日 名詞,副詞可能,*,*,*,*,今日,キョウ,キョー
は 助詞,係助詞,*,*,*,*,は,ハ,ワ
天気 名詞,一般,*,*,*,*,天気,テンキ,テンキ
が 助詞,格助詞,一般,*,*,*,が,ガ,ガ
良い 形容詞,自立,*,*,形容詞・アウオ段,基本形,良い,ヨイ,ヨイ
。 記号,句点,*,*,*,*,。,。,。
""")[1:-1]
def test_segment(self):
bseg = Bseg()
bnsts = bseg.segment(self.analysis_result)
self.assertEqual(bnsts[0].surface, "今日は")
def test__construct_morphology_from(self):
bseg = Bseg()
morps = bseg._construct_morphology_from(self.analysis_result)
self.assertEqual(morps[0].surface, "今日")
self.assertEqual(morps[1].surface, "は")
self.assertEqual(morps[2].surface, "天気")
self.assertEqual(morps[3].surface, "が")
self.assertEqual(morps[4].surface, "良い")
self.assertEqual(morps[5].surface, "。")
def test__construct_bunsetsu_from(self):
bseg = Bseg()
morps = bseg._construct_morphology_from(self.analysis_result)
bnsts = bseg._construct_bunsetsu_from(morps)
self.assertEqual(bnsts[0].surface, "今日は")
self.assertEqual(bnsts[1].surface, "天気が")
self.assertEqual(bnsts[2].surface, "良い。")
| {"/tests/test_bunsetsu.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_morphology.py": ["/bseg/morphology.py"], "/bseg/bseg.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_bseg.py": ["/bseg/bseg.py"]} |
63,812 | jun-harashima/bseg | refs/heads/master | /setup.py | from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
requirements = ['Click>=6.0', ]
test_requirements = ['numpy==1.15.4', 'torch==0.4.1', ]
setup(
author="Jun Harashima",
author_email='j.harashima@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Tool for segmenting ipadic-based analysis results into bunsetsu",
entry_points={
'console_scripts': [
'bseg=bseg.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme,
include_package_data=True,
keywords='bseg',
name='bseg',
packages=find_packages(include=['bseg']),
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/jun-harashima/bseg',
version='0.1.0',
zip_safe=False,
)
| {"/tests/test_bunsetsu.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_morphology.py": ["/bseg/morphology.py"], "/bseg/bseg.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_bseg.py": ["/bseg/bseg.py"]} |
63,813 | jun-harashima/bseg | refs/heads/master | /bseg/morphology.py | class Morphology:
def __init__(self, line):
surface, rest = line.split("\t")
features = rest.split(",")
self.surface = surface
self.part_of_speech = features[0]
self.part_of_speech1 = features[1]
self.part_of_speech2 = features[2]
self.part_of_speech3 = features[3]
self.conjugation_type = features[4]
self.conjugation_form = features[5]
self.base_form = features[6]
self.reading = features[7]
self.pronunciation = features[8]
def isfunction(self):
return self.part_of_speech in ["助詞", "記号"]
| {"/tests/test_bunsetsu.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_morphology.py": ["/bseg/morphology.py"], "/bseg/bseg.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_bseg.py": ["/bseg/bseg.py"]} |
63,814 | jun-harashima/bseg | refs/heads/master | /bseg/bunsetsu.py | class Bunsetsu:
def __init__(self, morps):
self.morphologies = morps
self.surface = "".join([morp.surface for morp in morps])
def ispredicate(self):
return any([morp.part_of_speech in ["動詞", "形容詞", "判定詞"]
for morp in self.morphologies])
| {"/tests/test_bunsetsu.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_morphology.py": ["/bseg/morphology.py"], "/bseg/bseg.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_bseg.py": ["/bseg/bseg.py"]} |
63,815 | jun-harashima/bseg | refs/heads/master | /scripts/knp_to_bunsetsu_segmentation_example.py | import sys
import re
def main(knp_dir, filelist_file):
files = read(filelist_file)
for file in files:
write_example(knp_dir + '/' + file)
def read(filelist_file):
files = []
with open(filelist_file) as f:
for line in f:
files.append(line.rstrip())
return files
def write_example(knp_file):
with open(knp_file) as f:
results = []
for line in f:
if re.match(r'[#+]', line):
pass
elif re.match(r'\*', line):
if len(results) == 0:
continue
results[-1] = re.sub(r'/N$', r'/Y', results[-1])
elif line == 'EOS\n':
if len(results) == 0:
continue
results[-1] = re.sub(r'/N$', r'/Y', results[-1])
print(' '.join(results))
results = []
else:
array = line.rstrip().split()
results.append(array[0] + '/' + array[3] + '/N')
if __name__ == '__main__':
args = sys.argv
main(args[1], args[2])
| {"/tests/test_bunsetsu.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_morphology.py": ["/bseg/morphology.py"], "/bseg/bseg.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_bseg.py": ["/bseg/bseg.py"]} |
63,816 | jun-harashima/bseg | refs/heads/master | /tests/__init__.py | # -*- coding: utf-8 -*-
"""Unit test package for bseg."""
| {"/tests/test_bunsetsu.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_morphology.py": ["/bseg/morphology.py"], "/bseg/bseg.py": ["/bseg/morphology.py", "/bseg/bunsetsu.py"], "/tests/test_bseg.py": ["/bseg/bseg.py"]} |
63,859 | rebelnz/torcms | refs/heads/master | /adminmodules.py | import tornado.web
import forms
import db
from time import strftime
class AdminSideNavModule(tornado.web.UIModule):
def render(self):
return self.render_string('admin/admin_side_nav.html')
class AdminTopNavModule(tornado.web.UIModule):
def render(self):
# to set active class + we can add new items to nav
nav_items = (
("/admin","Admin","icon-lock"),
("/admin/settings/site","Settings","icon-cog"), # /site url default
("/admin/users","Users","icon-user"),
("/admin/messages","Messages","icon-inbox"),
("/","View Site","icon-eye-open")
)
return self.render_string('admin/admin_top_nav.html',
nitems = nav_items)
class AdminSettingsNavModule(tornado.web.UIModule):
def render(self):
nav_items = (
("/admin/settings/site","Site"),
("/admin/settings/map","Map"),
("/admin/settings/social","Social"),
("/admin/settings/home","Home Page"),
("/admin/settings/analytics","Analytics"),
("/admin/settings/data","Data"),
("/admin/settings/campaign","Campaign"),
("/admin/settings/calendar","Calendar"),
)
return self.render_string('admin/uimodules/admin_settings_nav.html',
nitems = nav_items)
# def css_files(self):
# return "/static/css/admin-settings-site.css"
# settings module
class AdminSettingsSiteModule(tornado.web.UIModule):
def render(self):
data = db.get_site_settings()
if data:
settings = data
settings["updated"] = data["updated"].ctime() #format time
else:
settings = False
return self.render_string('admin/uimodules/admin_settings_site.html',
settings=settings,
)
#settings form
class AdminSettingsSiteFormModule(tornado.web.UIModule):
def render(self):
data = db.get_site_settings()
if data: #repopulate form
form = forms.AdminSettingsSiteForm(
sitename=data["sitename"],
contact=data["contact"],
tagline=data["tagline"],
timezone=data["timezone"]
)
else:
form = forms.AdminSettingsSiteForm() # from forms file
return self.render_string('admin/forms/admin_settings_site_form.html',
form=form,
)
# map module
class AdminSettingsMapModule(tornado.web.UIModule):
def render(self):
form = forms.AdminSettingsAddressForm() # from forms file
return self.render_string('admin/uimodules/admin_settings_map.html')
def css_files(self):
return ['/static/css/admin-settings-map.css',
'http://cdn.leafletjs.com/leaflet-0.5/leaflet.css']
def javascript_files(self):
return ['http://cdn.leafletjs.com/leaflet-0.5/leaflet.js',
'/static/js/admin-map.js','/static/js/config.js']
# address module
class AdminSettingsAddressFormModule(tornado.web.UIModule):
def render(self):
data = db.get_address_settings()
if data: #repopulate form
form = forms.AdminSettingsAddressForm(
address=data["address"],
suburb=data["suburb"],
city=data["city"],
zipcode=data["zipcode"]
)
else:
form = forms.AdminSettingsAddressForm() # from forms file
return self.render_string('admin/forms/admin_settings_address_form.html',
form=form,
)
# social module
class AdminSettingsSocialModule(tornado.web.UIModule):
def render(self):
data = db.get_social_settings()
if data:
settings = data
settings["updated"] = data["updated"].ctime() #format time
else:
settings = False
return self.render_string('admin/uimodules/admin_settings_social.html',
settings=settings,
)
# form
class AdminSettingsSocialFormModule(tornado.web.UIModule):
def render(self):
data = db.get_social_settings()
if data: #repopulate form
form = forms.AdminSettingsSocialForm(
facebook=data["facebook"],
googleplus=data["googleplus"],
kakao=data["kakao"],
twitter=data["twitter"],
linkedin=data["linkedin"]
)
else:
form = forms.AdminSettingsSocialForm() # from forms file
return self.render_string('admin/forms/admin_settings_social_form.html',
form=form,
)
class AdminSettingsCalendarModule(tornado.web.UIModule):
def render(self):
return self.render_string('admin/uimodules/admin_settings_calendar.html')
class AdminSettingsCampaignModule(tornado.web.UIModule):
def render(self):
return self.render_string('admin/uimodules/admin_settings_campaign.html')
class AdminSettingsDataModule(tornado.web.UIModule):
def render(self):
return self.render_string('admin/uimodules/admin_settings_data.html')
class AdminSettingsAnalyticsModule(tornado.web.UIModule):
def render(self):
return self.render_string('admin/uimodules/admin_settings_analytics.html')
def javascript_files(self):
return ['/static/js/tracker.js']
class AdminPagesFormModule(tornado.web.UIModule):
def render(self):
form = forms.AdminPagesForm() # from forms file
return self.render_string('admin/forms/admin_pages_add_form.html', form=form)
| {"/adminmodules.py": ["/forms.py", "/db.py"], "/uimodules.py": ["/forms.py", "/db.py"], "/settings.py": ["/uimodules.py", "/adminmodules.py"]} |
63,860 | rebelnz/torcms | refs/heads/master | /uimodules.py | import tornado.web
import forms
import db
from time import strftime
from pprint import pprint
class Form(tornado.web.UIModule):
"""
Generic form rendering module. Works with wtforms.
Use this in your template code as:
{% module Form(form) %}
where `form` is a wtforms.Form object. Note that this module does not render
<form> tag and any buttons.
"""
def render(self, form):
"""docstring for render"""
return self.render_string('uimodules/form.html', form=form)
class NavModule(tornado.web.UIModule):
def render(self):
return self.render_string('uimodules/nav.html')
# #embed map/listing edit js only if we need it
# def javascript_files(self):
# js_scripts = ['http://maps.googleapis.com/maps/api/js?\
# key=<apikey>&sensor=true',
# '/static/js/listing_edit.js']
# return js_scripts
# class ListingDetailModule(tornado.web.UIModule):
# def render(self,listing):
# return self.render_string('uimodules/detail.html',listing=listing)
# class HomepageListingDetailModule(tornado.web.UIModule):
# def render(self,listing):
# return self.render_string('uimodules/homepage_detail.html',listing=listing)
# class SuperListingDetailModule(tornado.web.UIModule):
# def render(self,listing):
# return self.render_string('uimodules/super_detail.html',listing=listing)
# def css_files(self):
# return "/static/css/super.css"
| {"/adminmodules.py": ["/forms.py", "/db.py"], "/uimodules.py": ["/forms.py", "/db.py"], "/settings.py": ["/uimodules.py", "/adminmodules.py"]} |
63,861 | rebelnz/torcms | refs/heads/master | /forms.py | from wtforms import *
from wtforms.validators import *
from wtforms.widgets import *
from datetime import date
import pytz
from util import MultiValueDict
class BaseForm(Form):
def __init__(self,handler=None,obj=None,prefix='',formdata=None,**kwargs):
if handler:
formdata = MultiValueDict()
for name in handler.request.arguments.keys():
formdata.setlist(name, handler.get_arguments(name))
Form.__init__(self, formdata, obj=obj, prefix=prefix, **kwargs)
class LoginForm(BaseForm):
email = TextField('Email',[Required(),Email()])
password = PasswordField('Password',[Required()])
class AdminSettingsSiteForm(BaseForm):
sitename = TextField(u'Site Name*',[Required()])
contact = TextField(u'Contact*',[Required()])
tagline = TextField(u'Tag Line')
timezone = SelectField('Timezone',
choices = [(tz, tz) for tz in pytz.common_timezones],
coerce=unicode, description="Timezone"
)
class AdminSettingsAddressForm(BaseForm):
address = TextField(u'Address*',[Required()])
suburb = TextField(u'Suburb')
city = TextField(u'City')
zipcode = TextField(u'Zip Code*',[Required()])
class AdminSettingsSocialForm(BaseForm):
facebook = TextField(u'Facebook')
googleplus = TextField(u'Google+')
kakao = TextField(u'Kakao')
twitter = TextField(u'Twitter')
linkedin = TextField(u'Linkedin')
class AdminPagesForm(BaseForm):
slug = TextField(u'Slug')
title = TextField(u'Title*',[Required()])
subtitle = TextField(u'Heading')
summary = TextAreaField()
content = TextAreaField()
# twitter = TextField(u'Twitter')
# linkedin = TextField(u'Linkedin')
| {"/adminmodules.py": ["/forms.py", "/db.py"], "/uimodules.py": ["/forms.py", "/db.py"], "/settings.py": ["/uimodules.py", "/adminmodules.py"]} |
63,862 | rebelnz/torcms | refs/heads/master | /settings.py | #settings
import os.path
import uimodules
import adminmodules
from tornado.options import define
define("port", default=8888, help="run on given port",type=int)
define("config", default=None, help="tornado config file")
define("debug", default=False, help="debug mode")
settings = dict(
template_path = os.path.join(os.path.dirname(__file__),'templates'),
static_path = os.path.join(os.path.dirname(__file__),'static'),
upload_path = os.path.join(os.path.dirname(__file__),'static/upload'),
xsrf_cookies = True,
cookie_secret = "11oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2Xdwe1o%Vo",
login_url = "/login",
autoescape = None,
ui_modules = [uimodules,adminmodules],
debug = True
)
| {"/adminmodules.py": ["/forms.py", "/db.py"], "/uimodules.py": ["/forms.py", "/db.py"], "/settings.py": ["/uimodules.py", "/adminmodules.py"]} |
63,863 | rebelnz/torcms | refs/heads/master | /urls.py | import tornado.web
import tornado.gen
import tornado.httpclient
import db
import forms
import simplejson
from util import MultiValueDict
from pprint import pprint
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
# def get_current_user(self):
# user_id = self.get_secure_cookie("user_id")
# user = db.get_user(user_id)
# if not user: return None
# return self.get_secure_cookie("user_id")
# def somemeth(self): #to pass to css active nav
# somevar = self.request.uri
# return somevar
class IndexHandler(BaseHandler):
def get(self):
self.render('index.html')
class LoginHandler(BaseHandler):
def get(self):
form = forms.LoginForm()
self.render('login.html',form=form)
# def post(self):
# form = forms.RegistrationForm(MultiValueDict(self.request.arguments))
# if form.validate():
# pprint(MultiValueDict(self.request.arguments))
# else:
# pprint(form.errors)
# print("invalid")
# self.render('index.html',form=form)
class AdminHandler(BaseHandler):
def get(self):
# pt = self.page_url()
# pprint(dir(self))
self.render('admin/admin_index.html')
class AdminSettingsHandler(BaseHandler):
# form is being pulled in by sMod
def get(self,sModule=None): #sModule from url /settings/[sModule]
if sModule == "savemap":
data = {'loc':[ #TODO - check latlong are saved as int?
{'latitude': self.get_argument('latitude')},
{'longitude': self.get_argument('longitude')}]
}
db.add_map_data(data)
self.render('admin/admin_settings.html',sMod=sModule)
def post(self, sModule=None):
# pprint(self.request.arguments)
if sModule == "site":
form = forms.AdminSettingsSiteForm(self)
if form.validate():
db.add_site_data(form.data)
else: #TODO form error handling
pprint(form.errors)
print("invalid")
elif sModule == "social":
form = forms.AdminSettingsSocialForm(self)
if form.validate():
db.add_social_data(form.data)
else:#TODO form error handling
pprint(form.errors)
print("invalid")
elif sModule == "map": # map added from AdminJsonGetMapHandler
form = forms.AdminSettingsAddressForm(self)
if form.validate():
db.add_address_data(form.data)
else:#TODO form error handling
pprint(form.errors)
print("invalid")
self.render('admin/admin_settings.html',sMod=sModule)
class AdminJsonGetMapHandler(BaseHandler):
def get(self):
mapdata = db.get_map_data()
self.write(simplejson.dumps(mapdata))
class AdminPagesHandler(BaseHandler):
def get(self, sModule=None):
self.render('admin/admin_pages.html',sMod=sModule)
class AdminUsersHandler(BaseHandler):
def get(self,uModule=None):
print uModule
self.render('admin/admin_users.html')
class AdminMessagesHandler(BaseHandler):
def get(self):
self.render('admin/admin_messages.html')
class JsonTrackerHandler(BaseHandler):
def get(self):
tdata = self.request.arguments
# jdata = {'innaWidth': self.get_argument('innerW')},
self.write(simplejson.dumps(tdata))
def post(self):
# jdata = self.request.arguments
jdata = {'innaWidth': self.get_argument('innerW')},
self.write(simplejson.dumps(jdata))
class TrackerPngHandler(BaseHandler):
@tornado.gen.coroutine
def get(self):
# analytics = {}
# http_client = tornado.httpclient.AsyncHTTPClient()
# response = yield http_client.fetch("http://freegeoip.net/json/203.132.164.25")
# analytics['ipdata'] = simplejson.loads(response.body)
# analytics['reqtime'] = simplejson.dumps(self.request.request_time())
# analytics['headers'] = simplejson.dumps(self.request.headers)
# db.add_analytics(analytics)
# print(analytics)
pass
handlers = [
(r"/", IndexHandler),
(r"/login", LoginHandler),
(r"/admin", AdminHandler),
(r"/admin/settings/([^/]+)",AdminSettingsHandler),
(r"/admin/users/([^/]+)", AdminUsersHandler),
(r"/admin/messages", AdminMessagesHandler),
(r"/admin/pages/([^/]+)", AdminPagesHandler),
(r"/admin/json/getmap", AdminJsonGetMapHandler),
(r"/json/tracker", JsonTrackerHandler),
(r"/tracker.png", TrackerPngHandler),
]
| {"/adminmodules.py": ["/forms.py", "/db.py"], "/uimodules.py": ["/forms.py", "/db.py"], "/settings.py": ["/uimodules.py", "/adminmodules.py"]} |
63,864 | rebelnz/torcms | refs/heads/master | /db.py | #db file
import pymongo
import unicodedata
import re
import os
import time
import bson
import datetime
from pprint import pprint
connection = pymongo.Connection("localhost",27017)
database = connection.torcms
def add_site_data(site_data):
settings_data = database.site_settings.find_one()
if settings_data:
database.site_settings.remove({})
site_data['updated'] = datetime.datetime.now()
database.site_settings.insert(site_data)
return site_data
def get_site_settings():
return database.site_settings.find_one()
def add_address_data(address_data):
old_address_data = database.address_settings.find_one()
if old_address_data:
database.address_settings.remove({})
address_data['updated'] = datetime.datetime.now()
database.address_settings.insert(address_data)
return address_data
def get_address_settings():
return database.address_settings.find_one()
def add_map_data(map_data):
old_map_data = database.map_settings.find_one()
if old_map_data:
database.map_settings.remove({})
map_data['updated'] = datetime.datetime.now()
database.map_settings.save(map_data)
return map_data
def get_map_data():
data = database.map_settings.find_one()
mapdata = {}
if data:
mapdata = data['loc'] # [{"latitude": "37.50209991181568"}, {"longitude": "126.77947998046875"}]
return mapdata
def add_social_data(data):
social_data = database.social_settings.find_one()
if social_data:
database.social_settings.remove({})
data['updated'] = datetime.datetime.now()
database.social_settings.insert(data)
return data
def get_social_settings():
return database.social_settings.find_one()
def add_analytics(data):
data['updated'] = datetime.datetime.now()
database.analytics.insert(data)
return
| {"/adminmodules.py": ["/forms.py", "/db.py"], "/uimodules.py": ["/forms.py", "/db.py"], "/settings.py": ["/uimodules.py", "/adminmodules.py"]} |
63,873 | jcalven/hotels-scraper | refs/heads/master | /hotscrape/utils.py | import yaml
from configparser import ConfigParser
def load_schema(file_path):
with open(f"{file_path}") as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
schema = yaml.load(file, Loader=yaml.FullLoader)
return schema
def read_search_config(search_file):
config = ConfigParser()
config.read(search_file)
config.sections()
return [dict(config.items(s)) for s in config.sections()] | {"/tests/test_base.py": ["/hotscrape/scraper.py", "/hotscrape/parser.py"], "/hotscrape/scraper.py": ["/hotscrape/parser.py"], "/tests/test_scraper.py": ["/tests/test_base.py"], "/tests/test_sql.py": ["/tests/test_base.py"], "/main.py": ["/hotscrape/scraper.py", "/hotscrape/utils.py", "/hotscrape/sql.py", "/hotscrape/search_parser.py"]} |
63,874 | jcalven/hotels-scraper | refs/heads/master | /tests/test_base.py | import yaml
import hotscrape.scraper as hs
from hotscrape.parser import parse
class TestBase():
search_dict = {
"destination": {"city": "Las Vegas", "state": "Nevada", "country": "United States of America"},
"checkin_datetime": "2020-06-30",
"checkout_datetime": None,
"price_min": 0,
"price_max": 10000,
"price_multiplier": 1,
"star_rating_min": 1,
"star_rating_max": 5,
"guest_rating_min": 1,
"guest_rating_max": 9,
"distance_centre": None,
"rooms": 1,
"adults": 2,
"children": 0,
"currency": "USD",
}
schema_path = "./"
with open(f"{schema_path}/db_schema.yml") as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
schema = yaml.load(file, Loader=yaml.FullLoader) | {"/tests/test_base.py": ["/hotscrape/scraper.py", "/hotscrape/parser.py"], "/hotscrape/scraper.py": ["/hotscrape/parser.py"], "/tests/test_scraper.py": ["/tests/test_base.py"], "/tests/test_sql.py": ["/tests/test_base.py"], "/main.py": ["/hotscrape/scraper.py", "/hotscrape/utils.py", "/hotscrape/sql.py", "/hotscrape/search_parser.py"]} |
63,875 | jcalven/hotels-scraper | refs/heads/master | /hotscrape/scraper.py | import pandas as pd
from bs4 import BeautifulSoup
# from requests_futures.sessions import FuturesSession
import time
import re
import logging
from datetime import datetime
from selenium.webdriver import Firefox
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from . parser import parse
logger = logging.getLogger("hotels-scraper.scraper.scraper")
class Scraper(object):
def get_dfs(self, search_dict, attributes_dict):
"""
Takes a `search_dict` containing search parameters and a `attributes_dict` dictionary containing
parsed hotels.com data and creates a Pandas DataFrame from each.
The database consists of two tables: `search` and `hotels`. The two dataframes created are upserted
into each respectively.
"""
### Processing `search_dict`
# Expand `destination` field
tmp_dict = {key: val for key, val in search_dict["destination"].items()}
search_dict.update(tmp_dict)
del search_dict["destination"]
# Add search timestamp
search_dict["search_datetime"] = datetime.now()
# Create search dataframe
df_search = pd.DataFrame(search_dict, index=[0])
# Create primary key from hashed dataframe
primary_key = pd.util.hash_pandas_object(df_search, index=False)[0] % 0xffffffff
df_search["id"] = primary_key.astype(int)
df_search.set_index("id", drop=True, inplace=True)
# Create new, derived, fields
df_search["days_from_search"] = (df_search["checkin_datetime"] - df_search["search_datetime"]).dt.days
df_search["nights"] = (df_search["checkout_datetime"] - df_search["checkin_datetime"]).dt.days
### Processing `attributes_dict`
# Create attributes dataframe
df_attributes = parse(pd.DataFrame(attributes_dict))
# Add primary_key to attributes dataframe
df_attributes["search_id"] = primary_key
# Create another primary key
primary_key = pd.util.hash_pandas_object(df_attributes, index=False) % 0xffffffff
df_attributes["id"] = primary_key.astype(int)
# Drop rows with non-unique id's
df_attributes.drop_duplicates(subset=["id"], inplace=True)
df_attributes.set_index("id", drop=True, inplace=True)
return df_search, df_attributes
def ensure_search_format(self, search_dict):
"""
Checks search dictionary formatting and required datatypes.
"""
msg = f"[~] Search parameters:\n\t {search_dict}\n"
logger.info(msg)
print(msg)
# Check destination formatting
assert search_dict.get("destination") is not None
assert search_dict.get("destination").get("city") is not None
assert search_dict.get("destination").get("state") is not None
assert search_dict.get("destination").get("country") is not None
# Check checkin/checkout formatting
search_dict["checkin_datetime"] = pd.to_datetime(search_dict.get("checkin_datetime"))
if not search_dict.get("checkout_datetime"):
search_dict["checkout_datetime"] = search_dict.get("checkin_datetime") + pd.DateOffset(1)
return search_dict
def run(self, search):
"""
Top-level function for running the parser.
"""
logger.info("\n\n")
logger.info("Scraper initiated")
search_dict = self.ensure_search_format(search)
url = self.generate_url(**search_dict)
soup = self.get_hotels_page(url)
if soup:
res = self.get_attributes(soup, **search_dict)
df_search, df_attributes = self.get_dfs(search_dict, res)
return df_search, df_attributes
else:
return pd.DataFrame(), pd.DataFrame()
class HotelsScraper(Scraper):
feature_html_details = {"name": ("h3", "p-name"),
"address": ("span", "address"),
# "maplink": ("a", "map-link xs-welcome-rewards"),
"landmarks": ("ul", "property-landmarks"),
"amenities": ("ul", "hmvt8258-amenities"),
"details": ("div", "additional-details resp-module"),
"review_box": ("div", "details resp-module"),
"rating": ("strong", re.compile("guest-reviews-badge.*")),
"num_reviews": ("span","small-view"),
"price": ("aside", re.compile("pricing resp-module.*")),
"star_rating": ("span", "star-rating-text")}
def get_hotels_page(self, url, max_scroll=100, max_scroll_global=35):
"""
Takes an url from Hotels.com and infinitely scrolls down to end of page until no more content can be loaded.
Args:
url (str): hotels.com URL
max_scroll (int, optional): Max number of webpage scrolls. Defaults to 20.
Returns:
bs4: Parsed website
"""
# Open up chrome in incognito
# chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument("--incognito")
# driver = webdriver.Chrome()
logger.info("Opening URL\n")
options = Options()
options.add_argument("--private")
options.add_argument("--headless")
driver = Firefox(executable_path="geckodriver", options=options)
driver.set_window_size(1920,1080)
# Nagivate to url
driver.get(url)
msg = "[~] Start scraping ..."
logger.info(msg)
print(msg)
# Scroll down until the end of the page
scroll_count = 0
scroll_count_global = 0
try:
while True:
# print(scroll_count)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
try:
if driver.find_element_by_id("listings-loading").value_of_css_property("display") == "block":
scroll_count = 0
else:
time.sleep(0.5)
scroll_count += 1
scroll_count_global += 1
print(f"[~] Scroll count: {scroll_count_global}")
except:
scroll_count += 1
print(f"[~] Scroll count (cont): {scroll_count_global} {scroll_count}")
if scroll_count >= max_scroll:
msg = f"[~] Reached maximum number of page loads ({scroll_count}/{max_scroll}). Stopping ..."
logger.info(msg)
print(msg)
break
else:
continue
# print(f"[~] Scroll count (outside): {scroll_count_global} {scroll_count}")
if any([cur_elem.is_displayed() for cur_elem in driver.find_elements_by_class_name("info")]):
msg = "[~] Scraping ended"
logger.info(msg)
print(msg)
break
if scroll_count >= max_scroll:
msg = f"[~] Reached maximum number of page loads ({scroll_count}/{max_scroll}). Stopping ..."
logger.info(msg)
print(msg)
break
if scroll_count_global >= max_scroll_global:
msg = f"[~] Reached maximum number of page loads ({scroll_count_global}/{max_scroll_global}). Stopping ..."
logger.info(msg)
print(msg)
break
except Exception as e:
logger.error(e)
return None
# Grabs the html of the fully scrolled-down page and parse it with BeautifulSoup
# innerHTML = driver.execute_script("return document.body.innerHTML")
parsed_html = BeautifulSoup(driver.page_source, "lxml")
driver.close()
driver.quit()
return parsed_html
def generate_url(self, destination, checkin_datetime, checkout_datetime=None, price_min=0, price_max=10000, price_multiplier=1,
star_rating_min=1, star_rating_max=5, guest_rating_min=1, guest_rating_max=9, distance_centre=None,
rooms=1, adults=2, children=0, currency="USD"):
"""
Takes hotel search parameters and returns a hotels.com URL string.
"""
#https://www.hotels.com/search.do?resolved-location=CITY%3A1504033%3AUNKNOWN%3AUNKNOWN&f-price-currency-code=USD&f-price-multiplier=1&f-price-min=30&f-price-max=395&f-star-rating=5,4,3,2,1&f-guest-rating-min=2&f-guest-rating-max=9&f-distance=2.0&f-lid=1504033&destination-id=1504033&q-destination=Las%20Vegas,%20Nevada,%20United%20States%20of%20America&q-check-in=2020-05-13&q-check-out=2020-05-14&q-rooms=1&q-room-0-adults=2&q-room-0-children=0&sort-order=DISTANCE_FROM_LANDMARK
# Concatenate star rating list into string of correct format
star_rating = "".join(map(lambda x: str(x)+",", range(star_rating_max,star_rating_min-1,-1))).rstrip(",")
# Format destination dict
destination = {key: val.replace(" ", "%20") for key, val in destination.items()}
dest_field_1 = destination.get("city")
dest_field_2 = destination.get("state")
dest_field_3 = destination.get("country")
# Format checkin/checkout dates
checkin_date = checkin_datetime.strftime("%Y-%m-%d")
checkout_date = checkout_datetime.strftime("%Y-%m-%d")
url = "".join([
"https://www.hotels.com/search.do?",
f"f-price-currency-code={currency}&",
f"f-price-multiplier={price_multiplier}&",
f"f-price-min={price_min}&",
f"f-price-max={price_max}&",
f"f-star-rating={star_rating}&",
f"f-guest-rating-min={guest_rating_min}&",
f"f-guest-rating-max={guest_rating_max}&",
f"f-distance={distance_centre}&" if distance_centre else "",
f"q-destination={dest_field_1},%20{dest_field_2},%20{dest_field_3}&",
f"q-check-in={checkin_date}&",
f"q-check-out={checkout_date}&",
f"q-rooms={rooms}&",
f"q-room-0-adults={adults}&",
f"q-room-0-children={children}"
])
msg = f"[~] Searching url:\n\t {url}\n"
logger.info(msg)
print(msg)
return url
# def get_content_list(soup, tag, class_):
# """
# Takes a bs4 object and parses it based on tag and class properties.
# Returns list of parsed bs4 object contents.
# """
# return postprocess_soup(soup, tag, class_)
def postprocess_soup(self, soup, tag, class_):
"""
Takes a bs4 object and parses it based on tag and class properties.
Returns list of parsed bs4 object contents.
"""
raw_base = soup.find_all("h3", {"class": "p-name"})
raw = soup.find_all(tag, {"class": class_})
if len(raw) != len(raw_base):
# Ugly hack to fill missing amenities with NaNs
# Ensures that each field (list) are same length even if hotel doesn't have any amenities listed
# NOTE: Might need to do for other fields as well
if class_ == "hmvt8258-amenities":
raw_tmp = soup.find_all("div", {"class": "additional-details resp-module"})
raw_list = []
for content in raw_tmp:
flag = False
for subcont in content:
if class_ in subcont:
flag = True
break
if flag:
raw_list.append(subcont)
else:
raw_list.append(None)
else:
raw_list = [content.text for content in raw]
return raw_list
def get_attributes(self, soup, **search_dict):
"""
Collects parsed hotels.com webpage data into a dictionary
"""
attributes_dict = {key: self.postprocess_soup(soup, self.feature_html_details[key][0], self.feature_html_details[key][1]) for key in self.feature_html_details}
return attributes_dict
# class BookingsScraper(Scraper):
# feature_html_details = {"name": ("h3", "p-name"),
# "address": ("span", "address"),
# # "maplink": ("a", "map-link xs-welcome-rewards"),
# "landmarks": ("ul", "property-landmarks"),
# "amenities": ("ul", "hmvt8258-amenities"),
# "details": ("div", "additional-details resp-module"),
# "review_box": ("div", "details resp-module"),
# "rating": ("strong", re.compile("guest-reviews-badge.*")),
# "num_reviews": ("span","small-view"),
# "price": ("aside", re.compile("pricing resp-module.*")),
# "star_rating": ("span", "star-rating-text")}
# def generate_url(self, destination, checkin_datetime, checkout_datetime=None, price_min=0, price_max=10000, price_multiplier=1,
# star_rating_min=1, star_rating_max=5, guest_rating_min=1, guest_rating_max=9, distance_centre=None,
# rooms=1, adults=2, children=0, currency="USD", dest_type="city"):
# """
# Takes hotel search parameters and returns a hotels.com URL string.
# """
# # Concatenate star rating list into string of correct format
# # star_rating = "".join(map(lambda x: str(x)+",", range(star_rating_max,star_rating_min-1,-1))).rstrip(",")
# # Format destination dict
# destination = {key: val.replace(" ", "%20") for key, val in destination.items()}
# dest_field_1 = destination.get("city")
# # dest_field_2 = destination.get("state")
# # dest_field_3 = destination.get("country")
# url = "".join([
# "https://www.booking.com/searchresults.html?tmpl=searchresults&",
# f"checkin_month={checkin_date.month}&",
# f"checkin_monthday={checkin_date.day}&",
# f"checkin_year={checkin_date.year}&",
# f"checkout_month={checkout_date.month}&",
# f"checkout_monthday={checkout_date.day}&",
# f"checkout_year={checkout_date.year}&",
# "class_interval=1&",
# f"dest_type={dest_type}&", # dest_type=city # if distance_centre else "",
# "dtdisc=0",
# f"group_adults={adults}&",
# f"group_children={children}&",
# f"no_rooms={rooms}&",
# "postcard=0&",
# f"raw_dest_type={dest_type}",
# "sb_price_type=total",
# "shw_aparth=0",
# f"ss={dest_field_1}",
# f"nflt=class%3D1%3Bclass%3D2%3Bclass%3D3%3Bclass%3D4%3Bclass%3D5%3Bht_id%3D204"
# ])
# msg = f"[~] Searching url:\n\t {url}\n"
# logger.info(msg)
# print(msg)
# return url
# def get_hotels_page(self, url):
# """
# Takes an url from Hotels.com and infinitely scrolls down to end of page until no more content can be loaded.
# Args:
# url (str): hotels.com URL
# max_scroll (int, optional): Max number of webpage scrolls. Defaults to 20.
# Returns:
# bs4: Parsed website
# """
# session = FuturesSession()
# response = session.get(url)
# page = response.result()
# main_page = BeautifulSoup(page.text, "lxml")
# urls = [href["href"] for href in main_page.select(".bui-pagination__link.sr_pagination_link")]
# responses = session.get(urls)
# parsed_html_list = [BeautifulSoup(response.result().text, "lxml") for response in responses]
# return parsed_html_list
# def get_attributes(self, soup, **search_dict):
# """
# Collects parsed hotels.com webpage data into a dictionary
# """
# attributes_dict = {key: postprocess_soup(soup, self.feature_html_details[key][0], self.feature_html_details[key][1]) for key in self.feature_html_details}
# return attributes_dict
# feature_html_details = {"name": ("class", "sr-hotel__name"),
# # "address": ("span", "address"),
# # "maplink": ("a", "map-link xs-welcome-rewards"),
# # "landmarks": ("ul", "property-landmarks"),
# # "amenities": ("ul", "hmvt8258-amenities"),
# # "details": ("div", "additional-details resp-module"),
# "review_box": ("div", "details resp-module"),
# "rating": ("strong", re.compile("guest-reviews-badge.*")),
# "num_reviews": ("span","small-view"),
# "price": ("aside", re.compile("pricing resp-module.*")),
# "star_rating": ("span", "star-rating-text")} | {"/tests/test_base.py": ["/hotscrape/scraper.py", "/hotscrape/parser.py"], "/hotscrape/scraper.py": ["/hotscrape/parser.py"], "/tests/test_scraper.py": ["/tests/test_base.py"], "/tests/test_sql.py": ["/tests/test_base.py"], "/main.py": ["/hotscrape/scraper.py", "/hotscrape/utils.py", "/hotscrape/sql.py", "/hotscrape/search_parser.py"]} |
63,876 | jcalven/hotels-scraper | refs/heads/master | /hotscrape/sql.py | import yaml
import logging
import numpy as np
import sqlalchemy as sqlal
from sqlalchemy import Table, Column, Integer, String, Float, MetaData, ForeignKey, DateTime
logger = logging.getLogger("hotels-scraper.sql.sql")
class TableMaker(object):
"""
Class for creating an SQLite database with a given schema.
"""
def __init__(self, metadata=None):
if metadata:
self.metadata = metadata
else:
self.metadata = MetaData()
def create_columns(self, schema):
res = []
meta = schema[0]["meta"]
columns = schema[1]["columns"]
for name, dtype in columns.items():
if name == meta["primary_key"]:
res.append(sqlal.Column(name, eval(dtype), nullable=False, primary_key=name))
elif name == meta.get("foreign_key"):
res.append(sqlal.Column(name, eval(dtype), ForeignKey(meta.get("reference"))))
else:
res.append(sqlal.Column(name, eval(dtype), nullable=True))
return res
def create_table(self, name, schema):
table = Table(name, self.metadata,
*self.create_columns(schema)
)
return table
def create_connection(filename):
"""
Creates a connection to the database
"""
engine = sqlal.create_engine(f'sqlite:///{filename}.db')
connection = engine.connect()
return connection
def create_database(filename, schemas, conn=None):
"""
Helper function for creating the database
"""
tablemaker = TableMaker()
if not conn:
conn = create_connection(filename)
for name, schema in schemas.items():
_ = tablemaker.create_table(name, schema)
# table.create_all(connection, checkfirst=True) #Creates the table
tablemaker.metadata.create_all(conn, checkfirst=True) # Creates the table
return conn
def to_sql(df, table_name, conn):
"""
Upserts new data to the database.
"""
ids = tuple(df.index.to_list())
n_rows = len(ids)
if n_rows == 1:
ids = ids[0]
res = conn.execute(f"SELECT id FROM {table_name} WHERE id IN ({ids})")
else:
ids = tuple(ids)
res = conn.execute(f"SELECT id FROM {table_name} WHERE id IN {ids}")
indx = np.array(res.fetchall()).flatten()
df = df.loc[df.index.difference(indx)]
if not df.empty:
msg = "[~] Updating records ..."
logger.info(msg)
print(msg)
try:
df.to_sql(table_name, conn, if_exists="append", index=True)
msg = f"[~] {df.shape[0]}/{n_rows} records upserted to table <{table_name}>"
logger.info(msg)
print(msg)
except Exception as error:
logger.error(error)
else:
msg = f"0/{n_rows} records upserted to <{table_name}>. (No unique records in DataFrame)"
logger.info(msg)
print(msg) | {"/tests/test_base.py": ["/hotscrape/scraper.py", "/hotscrape/parser.py"], "/hotscrape/scraper.py": ["/hotscrape/parser.py"], "/tests/test_scraper.py": ["/tests/test_base.py"], "/tests/test_sql.py": ["/tests/test_base.py"], "/main.py": ["/hotscrape/scraper.py", "/hotscrape/utils.py", "/hotscrape/sql.py", "/hotscrape/search_parser.py"]} |
63,877 | jcalven/hotels-scraper | refs/heads/master | /tests/test_scraper.py | import pandas as pd
from tests.test_base import *
class TestScraper(TestBase):
def test_url(self):
search_dict = self.search_dict
search_dict = hs.ensure_search_format(search_dict)
url = hs.generate_url(**search_dict)
checkin = search_dict["checkin_datetime"].strftime("%Y-%m-%d")
checkout = search_dict["checkout_datetime"].strftime("%Y-%m-%d")
assert url == "https://www.hotels.com/search.do?f-price-currency-code=USD&" \
"f-price-multiplier=1&f-price-min=0&f-price-max=10000&" \
"f-star-rating=5,4,3,2,1&f-guest-rating-min=1&" \
"f-guest-rating-max=9&q-destination=Las%20Vegas,%20Nevada,%20United%20States%20of%20America&" \
f"q-check-in={checkin}&q-check-out={checkout}&q-rooms=1&q-room-0-adults=2&q-room-0-children=0"
def test_get_soup(self):
search_dict = self.search_dict
search_dict = hs.ensure_search_format(search_dict)
url = hs.generate_url(**search_dict)
soup = hs.get_hotels_page(url, max_scroll=1)
assert soup.is_empty_element == False
def test_get_attributes(self):
search_dict = self.search_dict
search_dict = hs.ensure_search_format(search_dict)
url = hs.generate_url(**search_dict)
soup = hs.get_hotels_page(url, max_scroll=1)
res = hs.get_attributes(soup, **search_dict)
assert sum(len(val) for val in res.values()) == len(res) * len(res["name"])
def test_parser(self):
search_dict = self.search_dict
search_dict = hs.ensure_search_format(search_dict)
url = hs.generate_url(**search_dict)
soup = hs.get_hotels_page(url, max_scroll=1)
res = hs.get_attributes(soup, **search_dict)
df_search, df_attributes = hs.get_dfs(search_dict, res) | {"/tests/test_base.py": ["/hotscrape/scraper.py", "/hotscrape/parser.py"], "/hotscrape/scraper.py": ["/hotscrape/parser.py"], "/tests/test_scraper.py": ["/tests/test_base.py"], "/tests/test_sql.py": ["/tests/test_base.py"], "/main.py": ["/hotscrape/scraper.py", "/hotscrape/utils.py", "/hotscrape/sql.py", "/hotscrape/search_parser.py"]} |
63,878 | jcalven/hotels-scraper | refs/heads/master | /tests/test_sql.py | from hotscrape import sql
from tests.test_base import *
class TestSQL(TestBase):
def test_db_upsert(self):
search_dict = self.search_dict
schema = self.schema
search_dict_ = hs.ensure_search_format(search_dict)
url = hs.generate_url(**search_dict_)
soup = hs.get_hotels_page(url, max_scroll=1)
res = hs.get_attributes(soup, **search_dict_)
df_search, df_attributes = hs.get_dfs(search_dict_, res)
conn = sql.create_database("./test_sql", schema)
sql.to_sql(df_search, "search", conn)
sql.to_sql(df_attributes, "hotels", conn)
| {"/tests/test_base.py": ["/hotscrape/scraper.py", "/hotscrape/parser.py"], "/hotscrape/scraper.py": ["/hotscrape/parser.py"], "/tests/test_scraper.py": ["/tests/test_base.py"], "/tests/test_sql.py": ["/tests/test_base.py"], "/main.py": ["/hotscrape/scraper.py", "/hotscrape/utils.py", "/hotscrape/sql.py", "/hotscrape/search_parser.py"]} |
63,879 | jcalven/hotels-scraper | refs/heads/master | /main.py | import logging
import pandas as pd
from datetime import datetime, timedelta
import argparse
#import hotscrape.scraper as hs
from hotscrape.scraper import HotelsScraper
from hotscrape.utils import load_schema
import hotscrape.sql as sql
from hotscrape.search_parser import Search, create_search_list
logging.basicConfig(filename='logs/run.log', format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
logger = logging.getLogger('hotels-scraper.run')
def run_scraper(search, connection):
"""
Helper function for running the scraper and sql upserts
"""
hs = HotelsScraper()
df_search, df_attributes = hs.run(search)
# Upsert search and search results to DB
if not df_search.empty:
sql.to_sql(df_search, "search", connection)
if not df_attributes.empty:
sql.to_sql(df_attributes, "hotels", connection)
print("\n\n")
def run(search_path, db_path, schema_path):
"""
Top-level function for running the hotscrape program.
Args:
search_path (str): Path to search config file
db_path (str): Path to database file
schema_path (str): Path to database schema file
"""
logger.info("=======================================================")
logger.info(" START RUN ")
logger.info("=======================================================\n")
schema = load_schema(schema_path)
connection = sql.create_database(db_path, schema)
search_list = create_search_list(search_path)
if not isinstance(search_list, (list)):
search_list = [search_list]
logger.info(search_list)
for s_init in search_list:
# msg = f"Run: {s_init}"
# logger.info(msg)
# print(msg)
for s in Search.generate(s_init):
run_scraper(s.to_dict(), connection)
msg = "Run finished"
logger.info(msg)
print(msg)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("-i", "--input", default="default_search.ini", help="Config file to use for search (e.g. default.ini)")
parser.add_argument("-d", "--database", default="default_sql", help="Path to database (e.g. default_sql.db)")
parser.add_argument("-s", "--schema", default="db_schema.yml", help="Database schema file (e.g. db_schema.db)")
args = parser.parse_args()
run(args.input, args.database, args.schema) | {"/tests/test_base.py": ["/hotscrape/scraper.py", "/hotscrape/parser.py"], "/hotscrape/scraper.py": ["/hotscrape/parser.py"], "/tests/test_scraper.py": ["/tests/test_base.py"], "/tests/test_sql.py": ["/tests/test_base.py"], "/main.py": ["/hotscrape/scraper.py", "/hotscrape/utils.py", "/hotscrape/sql.py", "/hotscrape/search_parser.py"]} |
63,880 | jcalven/hotels-scraper | refs/heads/master | /hotscrape/parser.py | import pandas as pd
import re
def parse_price(row, sale=False):
"""
Extract price data from `price`column
"""
row = row[1]["price"]
# Extract dollar amounts from string
row = re.findall(r"\$\d+", row)
if not row:
return None
elif len(row) == 1:
if sale:
return None
else:
return int(row[0].lstrip("$"))
elif len(row) >= 2:
if sale:
return int(row[1].lstrip("$"))
else:
return int(row[0].lstrip("$"))
def parse_star_rating(row):
"""
Extract hotel star rating data from `star_rating` column
"""
row = row[1]["star_rating"]
return float(row.strip("-star"))
def parse_num_reviews(row):
"""
Extract number of reviews data from `num_reviews` column
"""
row = row[1]["num_reviews"]
row = re.findall(r"\d+", row)
if row:
return int(row[0])
else:
return None
def parse_rating(row, sentiment=False):
"""
Extract hotel rating data from `rating` column
"""
row = row[1]["rating"]
if sentiment:
row = re.sub(r"[-+]?\d*\.\d+|\d+", "", row).strip()
if not row:
return None
else:
return row
else:
row = re.findall(r"[-+]?\d*\.\d+|\d+", row)
if row:
return float(row[0])
else:
return None
def parse_landmarks(row):
"""
Extract landmarks data from `landmarks` column
"""
row = row[1]["landmarks"]
# Extract distance to city center
if "miles to City center" in row:
try:
return float(row.split("miles to City center")[0].strip())
except ValueError:
return None
elif "mile to City center" in row:
try:
return float(row.split("mile to City center")[0].strip())
except ValueError:
return None
else:
return None
def drop_fully_booked(df):
return df.dropna(subset=["price", "price_sale"], how="all")
def parse(df):
"""
Top-level function for parsing and formatting a Pandas DataFrame containing hotels.com
hotel search result data.
Args:
df (pd.DataFrame): Pandas hotel attributes DataFrame to be parsed
Returns:
pd.DataFrame: Parsed and formatted dataframe
"""
# Store `price` column data in new column
df["price_metadata"] = df["price"]
price = []
price_sale = []
star_rating = []
num_reviews = []
rating = []
rating_sentiment = []
city_center_distance = []
# Row-level processing
# Add parsing functions as needed
for row in df.iterrows():
price.append(parse_price(row))
price_sale.append(parse_price(row, sale=True))
star_rating.append(parse_star_rating(row))
num_reviews.append(parse_num_reviews(row))
rating.append(parse_rating(row))
rating_sentiment.append(parse_rating(row, sentiment=True))
city_center_distance.append(parse_landmarks(row))
# Update dataframe
df["price"] = price
df["price_sale"] = price_sale
df["star_rating"] = star_rating
df["num_reviews"] = num_reviews
df["rating"] = rating
df["rating_sentiment"] = rating_sentiment
df["distance_centre"] = city_center_distance
# Drop fully booked hotels (not of interest)
df = drop_fully_booked(df)
return df | {"/tests/test_base.py": ["/hotscrape/scraper.py", "/hotscrape/parser.py"], "/hotscrape/scraper.py": ["/hotscrape/parser.py"], "/tests/test_scraper.py": ["/tests/test_base.py"], "/tests/test_sql.py": ["/tests/test_base.py"], "/main.py": ["/hotscrape/scraper.py", "/hotscrape/utils.py", "/hotscrape/sql.py", "/hotscrape/search_parser.py"]} |
63,881 | jcalven/hotels-scraper | refs/heads/master | /hotscrape/search_parser.py | from configparser import ConfigParser
import pandas as pd
from datetime import datetime, timedelta
def create_search_list(config_path):
"""
Helper function for parsing the search config file.
Returns a list of config section dictionaries.
"""
config = ConfigParser()
config.read(config_path)
return [dict(config.items(s)) for s in config.sections()]
class MissingKeyError(Exception):
"""
Custom exception triggered when search dictionary is missing
a mandatory key.
"""
def __init__(self, keyword):
self.keyword = keyword
self.message = f"Must specify {self.keyword}"
super().__init__(self.message)
class ValueOutOfRangeError(Exception):
"""
Custom exception triggered when search dictionary items are out of range.
"""
def __init__(self, name, val, min, max):
self.val = val
if val > max:
cond = f"{name} > {max} ({val} > {max})"
elif val < min:
cond = f"{name} < {min} ({val} < {min})"
else:
cond = ""
self.message = f"Value is out of range: {cond} [{min} <= {name} <= {max}]"
super().__init__(self.message)
class Search():
"""
Class to structure the search config file content into a format required by scraper.py
"""
__counter = 0
search_key_limits = {
"checkin_datetime": None,
"checkout_datetime": None,
"price_min": (0, 10000),
"price_max": (0, 10000),
"price_multiplier": (1, 20),
"star_rating_min": (1, 5),
"star_rating_max": (1, 5),
"guest_rating_min": (1, 9),
"guest_rating_max": (1, 9),
"distance_centre": (0, 50),
"rooms": (1, 10),
"adults": (1, 10),
"children": (0, 20),
"currency": None
}
@classmethod
def _count(cls):
cls.__counter += 1
return cls.__counter
@classmethod
def _reset_count(cls):
cls.__counter = 0
@classmethod
def generate(cls, config, count_key="search_span"):
"""
Creates an iterator of Search instances with unique checkin/checkout dates
Args:
config (dict): Search config content
count_key (str, optional): Key to use for search span. Defaults to "search_span".
Yields:
Search: Unique Search instances
"""
search_span = int(config.get(count_key))
if search_span is not None:
for i in range(search_span):
yield cls(config)
cls._reset_count()
@staticmethod
def _recast(string):
"""
Check if string is int, float, or neither. Returns correctly typecasted input.
"""
if string == "None":
return eval(string)
try:
return int(string)
except ValueError:
pass
try:
return float(string)
except ValueError:
pass
finally:
return string
@staticmethod
def _check_value_range(name, val, min, max, is_none=False):
if not is_none:
if val < min or val > max:
raise ValueOutOfRangeError(name, val, min, max)
def __init__(self, config):
self.counter = self._count()
# Search dict from config file
self.config = config
# Default search values
self.city = None
self.state = None
self.country = None
self.checkin_datetime = None
self.checkout_datetime = None
self.price_min = 0
self.price_max = 10000
self.price_multiplier = 1
self.star_rating_min = 1
self.star_rating_max = 5
self.guest_rating_min = 1
self.guest_rating_max = 9
self.distance_centre = None
self.rooms = 1
self.adults = 2
self.children = 0
self.currency = "USD"
# Number of nights
self.nights = 1
# Search span in days
self.search_span = 182
self.check_input()
def to_dict(self):
"""
Returns a search dictionary that is accepted by scraper.py
"""
destination = {
"city": self.city,
"state": self.state,
"country": self.country
}
res = {key: self.__getattribute__(key) for key in self.search_key_limits}
res["destination"] = destination
return res
def check_input(self, config=None):
"""
Checks search config parameters for problems and updates parameters where needed.
"""
for key, val in self.config.items():
val = self._recast(val)
if self.search_key_limits.get(key) is not None:
# Exception for search parameters that are allowed to be None
if key == "distance_centre":
self._check_value_range(key, val, *self.search_key_limits.get(key), is_none=True)
else:
self._check_value_range(key, val, *self.search_key_limits.get(key), is_none=False)
if key == "checkin_datetime":
if val is None:
t_start = datetime.now() + timedelta(days=self.counter)
else:
t_start = pd.to_datetime(val) + timedelta(days=self.counter-1)
self.__setattr__(key, t_start)
else:
self.__setattr__(key, val)
if self.__getattribute__("checkout_datetime") is None:
self.__setattr__("checkout_datetime", self.__getattribute__("checkin_datetime") + timedelta(days=self.nights))
for key in ["city", "state", "country"]:
if self.__getattribute__(key) is None:
MissingKeyError(key) | {"/tests/test_base.py": ["/hotscrape/scraper.py", "/hotscrape/parser.py"], "/hotscrape/scraper.py": ["/hotscrape/parser.py"], "/tests/test_scraper.py": ["/tests/test_base.py"], "/tests/test_sql.py": ["/tests/test_base.py"], "/main.py": ["/hotscrape/scraper.py", "/hotscrape/utils.py", "/hotscrape/sql.py", "/hotscrape/search_parser.py"]} |
63,892 | sircco/pylistshine | refs/heads/master | /pylistshine/exceptions.py | logger = logging.getLogger(__name__)
class ListShineException(Exception):
pass
class ListShineAPIKeyException(ListShineException):
def __str__(self):
return 'LISTSHINE_API_KEY not defined'
| {"/pylistshine/connection.py": ["/pylistshine/constants.py", "/pylistshine/contactlist.py"], "/pylistshine/__init__.py": ["/pylistshine/connection.py"], "/pylistshine/tests/test_contactlist.py": ["/pylistshine/__init__.py"]} |
63,893 | sircco/pylistshine | refs/heads/master | /pylistshine/contactlist.py | import json
import logging
from builtins import object
import requests
logger = logging.getLogger(__name__)
class LSContact(object):
''' get information about single contact i.e. blah@blah.com'''
def __init__(self, connection, list_id):
self.connection = connection
self.list_id = list_id
self.url_base = connection.api_base + 'escontact'
def subscribe(self, email, **kwargs):
''' subscribe email to contactlist '''
api_url = self.url_base + '/contactlist/subscribe/{list_id}/'.format(list_id=self.list_id)
kwargs.update({'email': email})
response = requests.post(url=api_url, headers=self.connection.headers, json=kwargs)
logger.warning('posting to url %s', api_url)
response.raise_for_status()
return response
def list(self, email=None):
''' retrieve contacts from contactlist ''
Args:
email (str, optional): filter list by this email, if none show paged contacts in list
Returns:
http json encoded response object, use .json() if you need dictionary
'''
api_url = self.url_base + '/contactlist/{list_id}/'.format(list_id=self.list_id)
jsonfilter = {'filters': [{'filter_type': 'equal',
'filter_field': 'contactlist_uuid',
'filter_value': self.list_id}]}
if email:
jsonfilter['filters'].append({'filter_type': 'equal',
'filter_field': 'email',
'filter_value': email})
params = {'jsonfilter': json.dumps(jsonfilter)}
response = requests.get(url=api_url, headers=self.connection.headers, params=params)
logger.warning('getting from url %s', api_url)
response.raise_for_status()
return response
def unsubscribe(self, email):
''' unsubscribe email from contactlist
Args:
email (str):
Returns:
generator for json encoded response objects, use .json() on each result if you
need dictionary.
'''
api_url = self.url_base + '/contactlist/{list_id}/contact/{id}/unsubscribe/'
contacts = self.list(email)
for contact in contacts.json()['results']:
api_url = api_url.format(list_id=self.list_id, id=contact['id'])
response = requests.post(url=api_url, headers=self.connection.headers)
response.raise_for_status()
logger.warning('posting to url %s', api_url)
yield response
class LSContactList(object):
''' get information about contactlist '''
def __init__(self, connection):
self.connection = connection
self.url_base = connection.api_base + 'contactlist'
def list(self):
''' list all contactlists '''
return requests.get(url=self.url_base, headers=self.connection.headers)
def retrieve(self, list_id):
''' contactlist details '''
api_url = self.url_base + '/{list_id}/'.format(list_id=list_id)
return requests.get(url=api_url, headers=self.connection.headers)
# class LSSegment:
# def all_segments(self):
# pass
# def get_segment_by_id(self, segment_id):
# pass
| {"/pylistshine/connection.py": ["/pylistshine/constants.py", "/pylistshine/contactlist.py"], "/pylistshine/__init__.py": ["/pylistshine/connection.py"], "/pylistshine/tests/test_contactlist.py": ["/pylistshine/__init__.py"]} |
63,894 | sircco/pylistshine | refs/heads/master | /pylistshine/connection.py | from abc import ABCMeta
from builtins import object
from future.utils import with_metaclass
from .constants import LISTSHINE_API_BASE
from .contactlist import LSContact, LSContactList
class LSConnection(with_metaclass(ABCMeta, object)):
''' connection class, used for connecting to ListShine API'''
def __init__(self, api_key, api_base=LISTSHINE_API_BASE):
self.headers = {'Authorization': 'Token %s' % api_key}
self.api_base = api_base
def contact(self, list_id):
''' initialize lscontact class
Args:
list_id (str): contactlist_uuid from listhine application
Returns:
LSContact instance
'''
return LSContact(connection=self, list_id=list_id)
def contactlist(self):
return LSContactList(connection=self)
| {"/pylistshine/connection.py": ["/pylistshine/constants.py", "/pylistshine/contactlist.py"], "/pylistshine/__init__.py": ["/pylistshine/connection.py"], "/pylistshine/tests/test_contactlist.py": ["/pylistshine/__init__.py"]} |
63,895 | sircco/pylistshine | refs/heads/master | /pylistshine/__init__.py | from .connection import LSConnection
| {"/pylistshine/connection.py": ["/pylistshine/constants.py", "/pylistshine/contactlist.py"], "/pylistshine/__init__.py": ["/pylistshine/connection.py"], "/pylistshine/tests/test_contactlist.py": ["/pylistshine/__init__.py"]} |
63,896 | sircco/pylistshine | refs/heads/master | /pylistshine/tests/test_contactlist.py | import unittest
from pylistshine import connection
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class ContactTest(unittest.TestCase):
def setUp(self):
self.conn = connection.LSConnection('fake_api_key')
@patch('requests.post')
def test_subscribe(self, _):
contact = self.conn.contact("fake-list-uu-id")
response = contact.subscribe("email@email.com", firstname="name")
@patch("requests.post")
def test_unsubscribe(self, _):
contact = self.conn.contact("fake-list-uu-id")
response = contact.unsubscribe("email@email.com")
@patch("requests.get")
def test_list(self, _):
contact = self.conn.contact("fake-list-uu-id")
response = contact.list()
@patch("requests.get")
def test_contactlist_list(self, _):
cl = self.conn.contactlist()
response = cl.list()
@patch("requests.get")
def test_contactlist_retrieve(self, _):
cl = self.conn.contactlist()
response = cl.retrieve("fake-list-uu-id")
| {"/pylistshine/connection.py": ["/pylistshine/constants.py", "/pylistshine/contactlist.py"], "/pylistshine/__init__.py": ["/pylistshine/connection.py"], "/pylistshine/tests/test_contactlist.py": ["/pylistshine/__init__.py"]} |
63,897 | sircco/pylistshine | refs/heads/master | /pylistshine/constants.py | LISTSHINE_API_BASE = 'https://send.listshine.com/api/v1/'
| {"/pylistshine/connection.py": ["/pylistshine/constants.py", "/pylistshine/contactlist.py"], "/pylistshine/__init__.py": ["/pylistshine/connection.py"], "/pylistshine/tests/test_contactlist.py": ["/pylistshine/__init__.py"]} |
63,915 | gr8scott88/FFLStats4 | refs/heads/master | /models/Player.py | from web_parsing import PlayerParser
class Player:
def __init__(self, player_row, player_type):
self.parser = PlayerParser.PlayerParser()
self.soup = player_row
self.type = player_type
def parse_player_data(self):
player_data = []
if self.type == 'OFF':
player_data = self.parse_offensive_info()
elif self.type == 'KICKER':
player_data = self.parse_kicker_info()
elif self.type == 'DEF':
player_data = self.parse_defensive_info()
else:
print('Invalid player type')
return player_data
def parse_offensive_info(self):
return self.parser.parse_offensive_player(self.soup)
def parse_kicker_info(self):
return self.parser.parse_kicker(self.soup)
def parse_defensive_info(self):
return self.parser.parse_defense(self.soup)
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,916 | gr8scott88/FFLStats4 | refs/heads/master | /archive/data_downloading/ResultsDownloader.py | import requests
import os
file_root = r'/'
class ResultsDownloader:
def __init__(self, league_id, no_teams):
self.url_root = 'https://football.fantasysports.yahoo.com/f1/'
self.league_id = league_id
self.no_teams = no_teams
def download_all_current(self, current_week):
for week in range(current_week):
self.download_week(week)
def download_week(self, week):
for team in range(self.no_teams):
self.download_team(team + 1, week)
def download_team(self, team, week):
url = f'{self.url_root}/{self.league_id}/{team}/team?&week={week}'
# print(url)
page = requests.get(url)
# print(page)
folder_path = os.path.join(file_root, 'data_archive', str(self.league_id), str(week))
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_path = os.path.join(folder_path, f'{team}_results.html')
with open(file_path, 'wb') as f:
f.write(str(page.content).encode('utf-8'))
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,917 | gr8scott88/FFLStats4 | refs/heads/master | /archive/RunFile.py | from models import League
from utility import DataManager
import configparser
# currentWeek = 12
targetWeek = 1
config = configparser.ConfigParser()
config.read(r'config/config.ini')
for key in config['LEAGUES']:
print(key)
if 'afc' in key:
afc_id = config['LEAGUES'][key]
elif 'nfc'in key:
nfc_id = config['LEAGUES'][key]
# afc_id = 609682
afc_data = DataManager.DataManager()
AFC = League.League(afc_id, afc_data)
# AFC.load_all_data_points(currentWeek)
AFC.load_data_point(targetWeek, 0)
# afc_data.export_complete_team_frame(afc_id)
# nfc_id = 713428
nfc_data = DataManager.DataManager()
NFC = League.League(nfc_id, nfc_data)
# NFC.load_all_data_points(currentWeek)
NFC.load_data_point(targetWeek, 0)
# nfc_data.export_complete_team_frame(nfc_id)
r = nfc_data.cum_sum_position_by_week("BN", targetWeek)
# nfc_data.export_dataframe(r, 'NFC_Contest_Week_' + str(targetWeek))
r2 = nfc_data.max_score_position_by_week("WR", targetWeek)
r = afc_data.cum_sum_position_by_week("BN", targetWeek)
afc_data.export_dataframe(r, 'AFC_Contest_Week_' + str(targetWeek))
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,918 | gr8scott88/FFLStats4 | refs/heads/master | /models/Webpage.py | import requests
from bs4 import BeautifulSoup
from utility import FileManager
import os
class Webpage:
def __init__(self, url: str):
self.url = url
page = requests.get(url)
self.content = page.content
self.soup = BeautifulSoup(self.content, 'html.parser')
def get_content(self):
return self.content
def save_team_html(self, week, time):
file_path_info = self.parse_team_url()
print(file_path_info)
# file_path = os.path.join(file_path_info[0], file_path_info[2]
league_dir = file_path_info[0]
week_dir = 'week_' + str(week)
file_path = os.path.join(league_dir, week_dir)
file_name = str(file_path_info[1]) + '_' + str(time) + '.html'
# print(file_path)
# print(file_name)
FileManager.save_html(file_path, file_name, self.content)
def get_soup(self):
return self.soup
def parse_team_url(self):
# https://football.fantasysports.yahoo.com/f1/910981/4/team?&week=5
info = self.url.split('/')
league = info[4]
team = info[5]
week = info[6]
return [league, team, week]
def parse_league_html(self):
# https://football.fantasysports.yahoo.com/f1/910981
info = self.url.split('/')
league = info[4]
return league
def save_league_html(self):
league = self.parse_league_html()
file_name = 'LeagueHtml_' + str(league) + '.html'
FileManager.save_html(league, file_name, self.content) | {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,919 | gr8scott88/FFLStats4 | refs/heads/master | /web_parsing/PlayerParser.py | from loguru import logger
# PLAYERSCORECOLS = [UNIQUE_ID, WEEK, 'Name',
# 'PlayerPos', 'ActivePos', REAL_SCORE, PROJ_SCORE, 'PctPlayed']
OFFENSEANDBENCH = 0
KICKER = 1
DEFENSE = 2
TABLES = [OFFENSEANDBENCH, KICKER, DEFENSE]
class PlayerParser:
def __init__(self):
self.current_soup = ''
self.data_indices = {'ActivePos': 0,
'Name': 1,
'Team': 1,
'PlayerPos': 1,
'RealScore': 5,
'ProjScore': 6,
'PctPlayed': 7}
def get_all_info(self, soup):
all_data = []
for index, table in enumerate(TABLES):
if index == 0:
all_data = self.parse_player_table(soup, index)
else:
new_data = self.parse_player_table(soup, index)
for entry in new_data:
all_data.append(entry)
# all_data.append(self.parse_player_table(soup, index))
return all_data
def get_stat_table(self, soup, index):
return soup.find('table', id=f'statTable{index}').find('tbody')
def get_table_rows(self, table_soup):
return table_soup.find_all('tr')
def get_table_colunms(self, row_soup):
return row_soup.find_all('td')
def combine_all_data(self, arrays):
out_array = []
for index, arr in enumerate(arrays):
if index == 0:
out_array = arr
else:
out_array = out_array.extend(arr)
return out_array
def parse_player_table(self, soup, index):
logger.debug(f'Parsing table index = {index}')
stat_table = self.get_stat_table(soup, index)
player_rows = self.get_table_rows(stat_table)
logger.debug(f'found {len(player_rows)} rows')
all_data = []
for index, row in enumerate(player_rows):
logger.debug(f'checking row {index}')
new_data = self.parse_player(row)
all_data.append(new_data)
return all_data
def parse_player(self, row_soup):
self.current_soup = row_soup
stat_cells = self.get_table_colunms(row_soup)
if self.handle_forecast(stat_cells):
logger.debug('forecast detected')
active_position = self.get_active_pos(stat_cells[self.data_indices['ActivePos']])
if self.is_unplayed_pos(row_soup):
return_data = ['None', active_position, active_position, 0, 0, 0]
return return_data
else:
player_position = self.get_player_pos(stat_cells[self.data_indices['PlayerPos']])
player_name = self.get_player_name(stat_cells[self.data_indices['Name']])
if self.is_player_on_bye(row_soup):
return_data = [player_name, player_position, active_position, 0, 0, 0]
else:
real_score = self.get_real_score(stat_cells[self.data_indices['RealScore']])
projected_score = self.get_proj_score(stat_cells[self.data_indices['ProjScore']])
percent_start = self.get_percent_played(stat_cells[self.data_indices['PctPlayed']])
return_data = [player_name, player_position, active_position, real_score, projected_score, percent_start]
return return_data
@staticmethod
def get_active_pos(cell_soup):
return cell_soup.find('span')['data-pos']
@staticmethod
def get_player_name(cell_soup):
return cell_soup.find('a', class_='Nowrap name F-link').contents[0]
@staticmethod
def get_player_pos(cell_soup):
team_pos = cell_soup.find('span', class_='Fz-xxs').contents[0]
return team_pos.split('-')[1].strip()
@staticmethod
def get_team(cell_soup):
team_pos = cell_soup.find('span', class_='Fz-xxs').contents[0]
return team_pos.split('-')[0].strip()
@staticmethod
def get_real_score(cell_soup):
return cell_soup.find('a').contents[0]
@staticmethod
def get_proj_score(cell_soup):
return cell_soup.find('div').contents[0]
@staticmethod
def get_percent_played(cell_soup):
return cell_soup.find('div').contents[0].strip('%')
@staticmethod
def is_player_on_bye(row_soup):
if 'Bye' in str(row_soup):
return True
else:
return False
@staticmethod
def is_unplayed_pos(row_soup):
return 'Empty' in str(row_soup)
def handle_forecast(self, all_cell_soup):
if len(all_cell_soup) >= 27:
self.shift_data_indices()
return True
else:
self.reset_data_indices()
return False
try:
if 'Forecast' in all_cell_soup[4].find('a').contents[0]:
self.shift_data_indices()
return True
else:
self.reset_data_indices()
return False
except Exception as e:
self.reset_data_indices()
return False
def shift_data_indices(self):
self.data_indices = {'ActivePos': 0,
'Name': 1,
'Team': 1,
'PlayerPos': 1,
'RealScore': 6,
'ProjScore': 7,
'PctPlayed': 8}
def reset_data_indices(self):
self.data_indices = {'ActivePos': 0,
'Name': 1,
'Team': 1,
'PlayerPos': 1,
'RealScore': 5,
'ProjScore': 6,
'PctPlayed': 7}
@staticmethod
def is_empty(row_soup):
return '(Empty)' in str(row_soup) | {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,920 | gr8scott88/FFLStats4 | refs/heads/master | /data_vis/LeagueVisualizer.py | from models.League import League
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import time
import os
from matplotlib.cm import get_cmap
name = "Accent"
cmap = get_cmap(name) # type: matplotlib.colors.ListedColormap
colors = cmap.colors # type: list
class LeagueVisualizer:
def __init__(self, league_: League):
self.league = league_
sns.set_context('talk')
def plot_real_score_by_week(self, save=False):
merged = pd.merge(self.league.score_info, self.league.league_info, on='TeamID', how='left')
grouped = merged.groupby('TeamName')
fig, ax = plt.subplots(figsize=(15, 7))
for name, group in grouped:
group.plot(x='Week', y='RealScore', ax=ax, label=name)
plot_title = f'Real Score by Week for {self.league.name}'
plt.title(plot_title)
plt.legend(loc='upper left')
if save:
self.save_plot(name)
else:
plt.show()
def plot_cum_real_score_by_week(self, save=False):
merged = pd.merge(self.league.score_info, self.league.league_info, on='TeamID', how='left')
grouped = merged.groupby('TeamName')
fig, ax = plt.subplots(figsize=(15, 7))
ax.set_prop_cycle(color=colors)
for name, group in grouped:
group['CumScore'] = group['RealScore'].cumsum()
group.plot(x='Week', y='CumScore', ax=ax, label=name)
plot_title = f'Cumulative Total Score by Week for {self.league.name}'
plt.title(plot_title)
plt.legend(loc='upper left')
if save:
self.save_plot(plot_title)
else:
plt.show()
def plot_real_vs_proj_by_week(self, save=False):
merged = pd.merge(self.league.score_info, self.league.league_info, on='TeamID', how='left')
merged['Delta'] = merged['RealScore'] - merged['ProjScore']
grouped = merged.groupby('TeamName')
fig, ax = plt.subplots(figsize=(15, 7))
for name, group in grouped:
group.plot(x='Week', y='Delta', ax=ax, label=name)
plot_title = f'Estimation Error by Week for {self.league.name}'
plt.title(plot_title)
plt.legend(loc='upper left')
if save:
self.save_plot(plot_title)
else:
plt.show()
def plot_cum_real_vs_proj_by_week(self, save=False):
merged = pd.merge(self.league.score_info, self.league.league_info, on='TeamID', how='left')
merged['Delta'] = merged['RealScore'] - merged['ProjScore']
grouped = merged.groupby('TeamName')
fig, ax = plt.subplots(figsize=(15, 7))
for name, group in grouped:
group['CumDelta'] = group['Delta'].cumsum()
group.plot(x='Week', y='CumDelta', ax=ax, label=name)
plot_title = f'Cumulative Estimation Error by Week for {self.league.name}'
plt.title(plot_title)
plt.legend(loc='upper left')
if save:
self.save_plot(plot_title)
else:
plt.show()
def plot_player_breakdown_for_all_teams(self, save=False):
teams = self.league.player_info.groupby('UniqueID')
for team in teams:
self.plot_player_breakdown_by_team_var(team, save)
def plot_player_breakdown_by_team(self, team, save=False):
team_df = team[1]
team_name = self.league.league_info.loc[self.league.league_info['UniqueID'] == team[0], 'TeamName'].iloc[0]
filtered = team_df.loc[~team_df['ActivePos'].isin(['BN'])]
grouped = filtered.groupby(['UniqueID', 'Week', 'ActivePos'])['RealScore'].sum().unstack('ActivePos')
grouped.plot(kind='bar', stacked=True)
plot_title = f'Score Breakdown by Position for {team_name}'
plt.title(plot_title)
if save:
self.save_plot(plot_title)
else:
plt.show()
def plot_player_breakdown_by_team_var(self, team, save=False):
team_df = team[1]
team_name = self.league.league_info.loc[self.league.league_info['UniqueID'] == team[0], 'TeamName'].iloc[0]
filtered = team_df.loc[~team_df['ActivePos'].isin(['BN', 'IR'])]
grouped_by_id = filtered.groupby(['UniqueID'])
for name, group in grouped_by_id:
f = plt.figure(figsize=(20,10))
grouped = group.groupby(['Week', 'ActivePos'])['RealScore'].sum().unstack('ActivePos')
grouped.plot(kind='bar', stacked=True, ax=f.gca())
# legend(loc='center left', bbox_to_anchor=(1, 0.5))
plot_title = f'Score Breakdown by Position for {team_name}'
plt.title(plot_title)
plt.ylim(0, 200)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
f.subplots_adjust(right=0.8)
# mng = plt.get_current_fig_manager()
# mng.window.state('zoomed')
if save:
plt.show()
self.save_plot(plot_title)
else:
plt.show()
def plot_player_breakdown_for_season(self, save=False):
f, ax = plt.subplots(figsize=(20, 10))
ax.set_prop_cycle(color=colors)
df = pd.merge(self.league.player_info,self.league.league_info, on='UniqueID')
filtered = df.loc[~df['ActivePos'].isin(['BN', 'IR'])]
grouped = filtered.groupby(['TeamName', 'ActivePos'])['RealScore'].sum().unstack('ActivePos')
grouped.plot(kind='bar', stacked=True, ax=f.gca())
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plot_title = f'Score Breakdown by Position for {self.league.name}'
plt.title(plot_title)
f.subplots_adjust(right=0.8)
f.subplots_adjust(bottom=0.3)
plt.xticks(rotation=30, ha='right')
if save:
plt.show()
self.save_plot(plot_title)
else:
plt.show()
def save_plot(self, name):
name = name.replace('.', '')
dir_path = os.path.join('plots', self.league.name)
if not os.path.exists(dir_path):
os.mkdir(dir_path)
fpath = os.path.join('export', 'plots', self.league.name, name)
plt.savefig(fpath)
plt.close()
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,921 | gr8scott88/FFLStats4 | refs/heads/master | /models/PLAYERTYPE.py | from enum import Enum
class PLAYERTYPE(Enum):
OFFSENSE = 1
KICKER = 2
DEFENSE = 3
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,922 | gr8scott88/FFLStats4 | refs/heads/master | /utility/WebHelper.py | from bs4 import BeautifulSoup
import requests
def build_url(root, *paths):
url = root
for path in paths:
if url.endswith('/'):
url = url + str(path)
else:
url = url + r'/' + str(path)
return url
def build_url_for_week(root, league_id, team_id, week):
html_league_and_team = build_url(root, league_id, team_id)
return html_league_and_team + r'/team?&week=' + str(week)
# https://football.fantasysports.yahoo.com/f1/910981/4/team?&week=6
def get_soup(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
return soup | {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,923 | gr8scott88/FFLStats4 | refs/heads/master | /web_parsing/LeaguePageParser.py | import pandas as pd
from models import DATACONTRACT
class LeaguePageParser:
def __init__(self):
pass
def parse_league_info(self, league_page_soup) -> pd.DataFrame:
league_info = []
players = self.get_player_table(league_page_soup)
for player in players:
team_name = player.contents[0]
href = player['href']
info = self.parse_href(href)
league_id = info[0]
team_id = info[1]
unique_id = str(league_id + '_' + str(team_id))
league_info.append([unique_id, league_id, int(team_id), team_name])
league_frame = pd.DataFrame(league_info, columns=[DATACONTRACT.LEAGUEINFOCOLS[0],
DATACONTRACT.LEAGUEINFOCOLS[1],
DATACONTRACT.LEAGUEINFOCOLS[2],
DATACONTRACT.LEAGUEINFOCOLS[3]])
return league_frame
@staticmethod
def get_player_table(league_page_soup):
league_table = league_page_soup.find_all('ul', class_='List-rich')
players = league_table[0].find_all('a', class_='F-link')
return players
@staticmethod
def get_standings_table(league_page_soup):
standings_table = league_page_soup.find("section", {"id": "leaguestandings"})
return standings_table
def get_standings_info(self):
standings_table = self.get_standings_table()
standings_rows = standings_table.find_all('tr')
for row in standings_rows:
standings_row_info = self.parse_standings_row(row)
@staticmethod
def parse_standings_row(standings_row):
row_info = standings_row.find_all('td')
href = standings_row[1].find_all('a')[1].get('href')
team_id = href.split('/')[-1]
rank = row_info[0].find_all('span')[1].contents[0]
name = row_info[1].find_all('a')[1].contents[0]
WLT = row_info[2].contents[0]
PF = row_info[3].contents[0]
PA = row_info[4].contents[0]
Streak = row_info[5].contents[0]
Waiver = row_info[6].contents[0]
Moves = row_info[7].contents[0]
return [team_id, rank, name, WLT, PF, PA, Streak, Waiver, Moves]
def get_team_names(self):
team_names = []
players = self.get_player_table()
for player in players:
team_name = player.contents[0]
team_names.append(team_name)
return team_names
def get_current_standings(self):
pass
@staticmethod
def parse_href(href):
info = href.split('/')
league_id = info[2]
team_id = info[3]
return [league_id, team_id]
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,924 | gr8scott88/FFLStats4 | refs/heads/master | /data_handlers/LocalDataManager.py | from models import Webpage
import os
import pandas as pd
from models import DATACONTRACT
def save_league_html(self, league_id, page: Webpage):
folder = gen_folder_path([league_id])
fpath = os.path.join(folder, f'{DATACONTRACT.LEAGUEHTML}.html')
with open(fpath, 'w') as f:
f.write(page)
def save_team_html_by_week(league_id, team_id, week, page: Webpage):
folder = gen_folder_path([league_id, week])
fpath = os.path.join(folder, f'{team_id}_{DATACONTRACT.TEAMHTML}.html')
with open(fpath, 'w') as f:
f.write(page)
def save_matchup_html_by_week(league_id, team_id, week, page: Webpage):
folder = gen_folder_path([league_id, week])
fpath = os.path.join(folder, f'{team_id}_{DATACONTRACT.MATCHUPHTML}.html')
with open(fpath, 'w') as f:
f.write(page)
def load_league_soup(league_id):
folder = gen_folder_path([league_id])
fpath = os.path.join(folder, f'{DATACONTRACT.LEAGUEHTML}.html')
if not os.path.exists(fpath):
return False
get_soup(fpath)
def load_team_soup_by_week(league_id, team_id, week):
folder = gen_folder_path([league_id, week])
fpath = os.path.join(folder, f'{team_id}_{DATACONTRACT.TEAMHTML}.html')
if not os.path.exists(fpath):
return False
get_soup(fpath)
def load_matchup_soup_by_week(self, league_id, team_id, week):
folder = gen_folder_path([league_id, week])
fpath = os.path.join(folder, f'{team_id}_{DATACONTRACT.MATCHUPHTML}.html')
if not os.path.exists(fpath):
return False
get_soup(fpath)
def save_to_parquet(league_id, data, name, overwrite=False):
filename = f'{league_id}_{name}.parquet'
full_file = gen_full_file_path([league_id], filename)
if os.path.isfile(full_file):
if overwrite:
os.remove(full_file)
data.to_parquet(full_file, compression='gzip')
else:
print('File already exists, specify OVERWRITE')
return False
else:
data.to_parquet(full_file, compression='gzip')
print('Saved to PARQUET file')
return True
def load_from_parquet(league_id, name):
filename = f'{league_id}_{name}.parquet'
full_file = gen_full_file_path([league_id], filename)
if os.path.isfile(full_file):
df = pd.read_parquet(full_file)
print(f'Loaded file {full_file} from saved data')
return df
def get_league_directory(league_id):
return os.path.join(DATACONTRACT.DATAROOT, str(league_id))
def gen_filepath(league_id, name):
directory = os.path.join(DATACONTRACT.DATAROOT, str(league_id), name)
print(directory)
return directory
def gen_folder_path(folders):
path = DATACONTRACT.DATAROOT
for folder in folders:
folder_string = str(folder)
path = os.path.join(path, folder_string)
return path
def gen_full_file_path(folders, filename):
path = DATACONTRACT.DATAROOT
for folder in folders:
folder_string = str(folder)
path = os.path.join(path, folder_string)
path = os.path.join(path, filename)
return path
def get_soup(fpath):
with open(fpath, 'r') as f:
#TODO
pass
def export_to_csv(data:pd.DataFrame, filename):
data.to_csv(gen_full_file_path([DATACONTRACT.EXPORTDIR], filename))
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,925 | gr8scott88/FFLStats4 | refs/heads/master | /utility/DateManager.py | from datetime import datetime, timedelta
class DateManager:
def __init__(self, week_1_date: datetime):
self.start_date = week_1_date
def get_current_week(self):
now = datetime.now()
delta = now - self.start_date
week = delta.days / 7
return week
# monday1 = (d1 - timedelta(days=d1.weekday()))
# monday2 = (d2 - timedelta(days=d2.weekday()))
# print('Weeks:', (monday2 - monday1).days / 7)
def get_start_of_week_x(self, x):
pass
def get_week_breakdown(self, x):
pass
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,926 | gr8scott88/FFLStats4 | refs/heads/master | /web_parsing/TeamPageParser.py | from utility.YahooWebHelper import YahooWebHelper
class TeamParser:
def __init__(self):
pass
def parse_team_stats(self, soup):
team_score = float(self.get_team_score(soup))
proj_score = float(self.get_team_projected_score(soup))
return [team_score, proj_score]
@staticmethod
def get_team_projected_score(soup):
projhtml = soup.find_all(class_="Grid-table W-100 Fz-xs Py-lg")
projspans = projhtml[0].find_all('span')
projscore = projspans[1].contents[0]
return projscore
def get_weekly_opponent(self, match_page_soup):
matchup_box = match_page_soup.find_all('div', {'id': 'team-card-matchup'})[0]
opponent = matchup_box.find_all('a', {'class': 'Grid-u'})[1]
opponent_href = opponent.get('href')
opponent_id = self.get_id_from_href(opponent_href)
return opponent_id
@staticmethod
def get_id_from_href(href):
arr = href.split('/')
opponent_id = arr[-1]
return opponent_id
def get_team_roster(self):
pass
def get_all_player_stats(self, soup):
stat_table = soup.find_all('section', {'class': 'stat-target'})[0]
sub_tables = stat_table.findChildren('div', recursive=False)
offense_and_bench = sub_tables[0]
kickers = sub_tables[1]
defense = sub_tables[2]
pass
def parse_offsense_stats(self, soup):
pass
def parse_kicker_stats(self, soup):
pass
def parse_defense_stats(self, soup):
pass
@staticmethod
def get_team_score(soup):
matchup_box = soup.find_all('div', {'id': 'team-card-matchup'})[0]
current_player = matchup_box.find_all('div', {'class': 'Grid-u'})[0]
current_player_score = current_player.get_text().strip('\n')
return float(current_player_score)
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,927 | gr8scott88/FFLStats4 | refs/heads/master | /archive/data_analysis/data_analyzer.py | from models.League import League
class DataAnalyser:
def __init__(self, league: League):
self.league_info = league.league_info
self.draft_info = league.draft_info
self.matchup_info = league.matchup_info
self.score_info = league.score_info
self.player_info = league.player_info
def cum_score_top_x_draft_pics(self, x):
pass
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,928 | gr8scott88/FFLStats4 | refs/heads/master | /web_parsing/MatchPageParser.py |
class MatchParser:
def __init__(self):
pass
def get_opponent(self, match_page_soup):
header = match_page_soup.find_all('section', {'id': 'matchup-header'})[0]
opponent_box = header.find_all('div', {'class': 'Fz-xxl Ell'})[1]
opp_href = opponent_box.find('a').get('href')
opponent_id = self.get_id_from_href(opp_href)
return opponent_id
@staticmethod
def get_id_from_href(href):
arr = href.split('/')
opponent_id = arr[-1]
return opponent_id
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,929 | gr8scott88/FFLStats4 | refs/heads/master | /reporting/PlayerReporter.py |
class PlayerReporter:
def __init__(self):
pass
def generate_player_report(self):
pass
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,930 | gr8scott88/FFLStats4 | refs/heads/master | /archive/Scratch2.py | from web_parsing.DraftParser import DraftParser
from utility.YahooWebHelper import YahooWebHelper
from models import DATACONTRACT
helper = YahooWebHelper()
league_id = 609682
team_id = 1
unique_id = f'{league_id}_{team_id}'
s = helper.get_draft_soup(league_id, team_id)
dp = DraftParser()
# DRAFTTRACKERCOLS = [UNIQUE_ID, LEAGUE_ID, TEAM_ID, DRAFTORDER, CLASSORDER, PLAYERNAME, PLAYERPOS]
info_dict = {DATACONTRACT.UNIQUE_ID: unique_id,
DATACONTRACT.LEAGUE_ID: league_id,
DATACONTRACT.TEAM_ID: team_id}
test = dp.parse_draft_info(s, info_dict)
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,931 | gr8scott88/FFLStats4 | refs/heads/master | /utility/YahooWebHelper.py | from bs4 import BeautifulSoup
import requests
from loguru import logger
class YahooWebHelper:
def __init__(self):
self.root = r'https://football.fantasysports.yahoo.com/f1'
def build_url(self, *paths):
url = self.root
for path in paths:
if url.endswith('/'):
url = url + str(path)
else:
url = url + r'/' + str(path)
return url
def build_url_for_league(self, league_id):
return f'{self.root}/{str(league_id)}'
def build_team_url_by_week(self, league_id, team_id, week):
html_league_and_team = self.build_url(league_id, team_id)
return html_league_and_team + r'/team?&week=' + str(week)
def build_draft_url(self, league_id, team_id):
return self.build_url(league_id, team_id, 'draft')
def build_matchup_url_by_week(self, league_id, team_id, week):
html_league_and_team = self.build_url(league_id, team_id)
return html_league_and_team + r'/matchup?&week=' + str(week) + '&mid1=1'
def get_league_soup(self, league_id):
url = self.build_url_for_league(league_id)
logger.debug(url)
return self.get_soup(url)
def get_team_soup_by_week(self, league_id, team_id, week):
url = self.build_team_url_by_week(league_id, team_id, week)
logger.debug(url)
return self.get_soup(url)
def get_matchup_soup_by_week(self, league_id, team_id, week):
url = self.build_matchup_url_by_week(league_id, team_id, week)
logger.debug(url)
return self.get_soup(url)
def get_draft_soup(self, league_id, team_id):
url = self.build_draft_url(league_id, team_id)
logger.debug(url)
return self.get_soup(url)
@staticmethod
def get_soup(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
return soup
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,932 | gr8scott88/FFLStats4 | refs/heads/master | /archive/fantasy_league_management/StatManager.py | from utility import DataManager
class StatManager:
def __init__(self, data_manager: DataManager):
self.team_frame = data_manager.team_frame
self.player_frame = data_manager.player_frame
def plot_real_score_by_week(self):
team_stats = self.player_frame.groupby(['League', 'Team'])
team_stats.plot('Real Score')
def cum_sum_position_by_week(self, pos: str, week: int):
weekly_players = self.player_frame.loc[self.player_frame['Week'] == week]
weekly_players_by_pos = weekly_players.loc[weekly_players['ActivePos'] == pos]
# result = weekly_players_by_pos.groupby(['LeagueID', 'TeamID', 'TeamOrder']).sum()
result = weekly_players_by_pos.groupby(['LeagueName', 'TeamName', 'TeamOrder']).sum()
# sorted_results = result.sort_values(['LeagueID', 'TeamOrder'])
sorted_results = result.sort_values(['LeagueName', 'TeamOrder'])
return sorted_results['RealScore']
def max_score_position_by_week(self, pos: str, week: int):
weekly_players = self.player_frame.loc[self.player_frame['Week'] == week]
weekly_players_by_pos = weekly_players.loc[weekly_players['ActivePos'] == pos]
# result = weekly_players_by_pos.groupby(['LeagueID', 'TeamID', 'TeamOrder']).max()
result = weekly_players_by_pos.groupby(['LeagueName', 'TeamName', 'TeamOrder']).max()
# return result[['TeamName', 'RealScore']]
# sorted_results = result.sort_values(['LeagueID','TeamOrder'])
sorted_results = result.sort_values(['LeagueName', 'TeamOrder'])
return sorted_results['RealScore']
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,933 | gr8scott88/FFLStats4 | refs/heads/master | /web_parsing/DraftParser.py | import pandas as pd
from models import DATACONTRACT
from loguru import logger
class DraftParser:
def __init__(self):
pass
def parse_draft_info(self, draft_soup, info_dict):
# DRAFTTRACKERCOLS = [UNIQUE_ID, LEAGUE_ID, TEAM_ID, TEAM_NAME, DRAFTORDER, CLASSORDER, PLAYERNAME, PLAYERPOS]
team_table_section = draft_soup.find("section", {"id": "draft-team"})
rows = team_table_section.find_all('tr')
data_rows = rows[1::]
all_data = []
for row in data_rows:
new_dict = {}
new_dict.update(info_dict)
row_dict = self.parse_draft_row(row)
new_dict.update(row_dict)
logger.debug(new_dict)
all_data.append(new_dict)
logger.debug(all_data)
return pd.DataFrame(all_data)
def parse_draft_row(self, row_soup) -> dict:
tds = row_soup.find_all('td')
draft_order = tds[0].contents[0]
class_order = tds[1].contents[0].replace('(', '').replace(')', '')
player_name = tds[2].find('a').contents[0]
player_pos = tds[3].contents[0]
row_dict = {DATACONTRACT.DRAFTORDER: draft_order,
DATACONTRACT.CLASSORDER: class_order,
DATACONTRACT.PLAYERNAME: player_name,
DATACONTRACT.PLAYERPOS: player_pos}
logger.debug(row_dict)
return row_dict
d1 = {DATACONTRACT.PLAYERPOS: 'WR', DATACONTRACT.PLAYERNAME: ['P1']}
d2 = {DATACONTRACT.PLAYERPOS: 'WR', DATACONTRACT.PLAYERNAME: ['P2']}
d3 = {DATACONTRACT.PLAYERPOS: ['QB'], DATACONTRACT.PLAYERNAME: ['P3']}
d4 = {DATACONTRACT.PLAYERPOS: ['QB'], DATACONTRACT.DRAFTORDER: ['5']}
l = [d1, d2, d3, d4]
df = pd.DataFrame(l)
df = pd.DataFrame.from_dict(d1)
df.append(pd.DataFrame.from_dict(d2))
d = {DATACONTRACT.PLAYERPOS: ['WR', 'WR', 'QB'], DATACONTRACT.PLAYERNAME: ['P1', 'P2', 'P3']}
df = pd.DataFrame.from_dict(d)
d1 = {'r1': 1, 'r2': 2}
d2 = {'r3': 3, 'r4': 4} | {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,934 | gr8scott88/FFLStats4 | refs/heads/master | /data_handlers/PandasHandler.py | import os
import pandas as pd
class PandasDataHandler:
def __init__(self):
pass
@staticmethod
def load_local_league_info(league_id) -> pd.DataFrame:
load_file = f'{league_id}_LeagueInfo.parquet'
if os.path.isfile(load_file):
df = pd.read_parquet(load_file)
print(f'Loaded file {load_file} from saved data')
return df
@staticmethod
def save_local_league_info(league_id, league_data, overwrite=False):
save_file = f'{league_id}_LeagueInfo.parquet'
if os.path.isfile(save_file):
if overwrite:
os.remove(save_file)
league_data.to_parquet(save_file, compression='gzip')
else:
print('File already exists, specify OVERWRITE')
return False
else:
league_data.to_parquet(save_file, compression='gzip')
print('Saved to PARQUET file')
return True
@staticmethod
def load_local_team_weekly_scores(league_id):
load_file = f'{league_id}_WeeklyTeamScores.parquet'
if os.path.isfile(load_file):
df = pd.read_parquet(load_file)
print(f'Loaded file {load_file} from saved data')
return df
@staticmethod
def save_local_team_weekly_scores(league_id, weekly_score_data, overwrite=False):
save_file = f'{league_id}_WeeklyTeamScores.parquet'
if os.path.isfile(save_file):
if overwrite:
os.remove(save_file)
weekly_score_data.to_parquet(save_file, compression='gzip')
else:
print('File already exists, specify OVERWRITE')
return False
else:
weekly_score_data.to_parquet(save_file, compression='gzip')
print('Saved to PARQUET file')
return True
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,935 | gr8scott88/FFLStats4 | refs/heads/master | /archive/fantasy_league_management/LeagueManager.py | from utility.DateManager import DateManager
import configparser
import os
from utility.YahooWebHelper import YahooWebHelper
from web_parsing.LeaguePageParser import LeaguePageParser
import pandas as pd
class LeagueManager:
def __init__(self, league_id):
self.league_id = league_id
self.team_ids = []
self.date_manager = DateManager()
self.generate_league()
self.web_helper = YahooWebHelper()
def load_league(self):
config_file = f'config/{self.league_id}.ini'
if os.path.isfile(config_file):
self.load_team_ids()
pass
def generate_league(self):
config_file = f'config/{self.league_id}.ini'
if os.path.isfile(config_file):
config = configparser.ConfigParser()
config.read(config_file)
for team in config['TEAMS'].values():
self.team_ids.append(team)
else:
parser = LeaguePageParser(self.league_id, self.web_helper)
df = parser.parse_league_info()
def convert_df_to_league_config(self, df: pd.DataFrame):
pass
def load_team_ids(self, path):
with open(path) as configfile:
pass
def parse_all_data(self):
current_week = self.date_manager.get_current_week()
for week in range(current_week):
pass
def parse_league_by_week(self, week):
pass
# class MultiLeagueManager:
# def __init__(self, league_config: configparser):
# self.league_ids = []
# self.leagues = []
# self.config = league_config
# self.load_league_ids()
#
# def load_league_ids(self):
# leagues = []
# for key in self.config['LEAGUES']:
# if 'league' in key:
# leagues.append(self.config['LEAGUES'][key])
# self.league_ids = leagues
#
# def load_leagues(self):
# for league_id in self.league_ids:
# league = LeagueManager(league_id)
# # league.parse_all_data()
# self.leagues.append(league)
#
# def load_league_by_week(self, week):
# pass
#
# def load_all_data_to_date(self):
# for league in self.leagues:
# league.lo
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,936 | gr8scott88/FFLStats4 | refs/heads/master | /archive/quick_download.py | from archive.data_downloading import ResultsDownloader as rd
results = rd.ResultsDownloader(609682, 10)
# results.download_team(1, 1)
results.download_all_current(12)
results = rd.ResultsDownloader(713428, 10)
results.download_all_current(12)
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,937 | gr8scott88/FFLStats4 | refs/heads/master | /models/Team.py | from bs4 import BeautifulSoup
from web_parsing import TeamPageParser
from utility import FileManager, WebHelper
import os
from models import Player, Webpage
from archive import Helper, GLOBALS
class Team:
def __init__(self, league_id, team_id):
self.league_id = league_id
self.team_id = team_id
self.soup = False
self.parser = TeamPageParser.TeamParser()
self.team_data = []
self.player_data = []
def load_soup_for_week(self, week, time):
html_file_name = str(self.team_id) + '_' + str(time) + '.html'
html_dir = os.path.join(str(self.league_id), 'week_' + str(week))
loaded_html = FileManager.load_html(html_dir, html_file_name)
if loaded_html is False:
print('Loading HTML from website)')
# team_url = WebHelper.build_url(GLOBALS.URLROOT, self.leaguid, self.teamid)
team_url = WebHelper.build_url_for_week(GLOBALS.URLROOT, self.league_id, self.team_id, week)
print(team_url)
webpage = Webpage.Webpage(team_url)
team_soup = webpage.get_soup()
webpage.save_team_html(week, time)
else:
print('HTML loaded from file')
print(os.path.join(html_dir, html_file_name))
team_soup = BeautifulSoup(loaded_html, 'html.parser')
self.soup = team_soup
def parse_team_info(self):
if not self.soup:
print('Soup not loaded')
return False
else:
self.team_data = self.parser.parse_team_stats(self.soup)
print(self.team_data)
return self.team_data
def get_offensive_players(self):
offensive_player_table = self.soup.find_all('table', id='statTable0')
offensive_players = offensive_player_table[0].find('tbody').find_all('tr')
return offensive_players
def get_kickers(self):
kicker_table = self.soup.find_all('table', id='statTable1')
return kicker_table[0]
def get_defensive_players(self):
defensive_table = self.soup.find_all('table', id='statTable2')
return defensive_table[0]
def parse_all_player_info(self):
all_data = []
offensive_players = self.get_offensive_players()
for offensive_player_row in offensive_players:
offensive_player = Player.Player(offensive_player_row, 'OFF')
player_data = offensive_player.parse_player_data()
all_data.append(player_data)
kicker_row = self.get_kickers()
kicker = Player.Player(kicker_row, 'KICKER')
kicker_data = kicker.parse_player_data()
all_data.append(kicker_data)
defensive_row = self.get_defensive_players()
defense = Player.Player(defensive_row, 'DEF')
defense_data = defense.parse_player_data()
all_data.append(defense_data)
return Helper.player_data_float_convert(all_data)
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,938 | gr8scott88/FFLStats4 | refs/heads/master | /utility/DataManager.py | import pandas as pd
import os
import requests
from archive import Helper
from models import DATACONTRACT
class DataManager:
def __init__(self, league_id: int):
self.team_score_frame = pd.DataFrame(columns=DATACONTRACT.TEAMSCORECOLS)
self.player_score_frame = pd.DataFrame(columns=DATACONTRACT.PLAYERSCORECOLS)
self.j = pd.DataFrame(columns=DATACONTRACT.TEAMINFOCOLS)
self.league_id = league_id
self.data_folder = 'data_archive'
self.league_data_file = 'League_Info.csv'
self.data_file_path = os.path.join(self.data_folder, str(self.league_id), self.league_data_file)
self.data_directory = os.path.join(self.data_folder, str(self.league_id))
self.league_info_frame = self.load_league_info()
def add_team_from_row(self, team_row):
# print(team_row)
self.team_score_frame.loc[len(self.team_score_frame)] = team_row
def add_player_frame(self, player_frame):
self.player_score_frame.append(player_frame)
def add_league_info(self, league_info_df):
self.league_info_frame = league_info_df
def add_tracker_info(self, tracker_array):
print(tracker_array)
for row in tracker_array:
self.league_info_frame.loc[len(self.league_info_frame)] = row
def add_player_from_row(self, player_row):
# print(player_row)
self.player_score_frame.loc[len(self.player_score_frame)] = player_row
def load_league_info(self):
if self.does_file_exist(self.data_file_path):
league_info = pd.read_csv(self.data_file_path)
print(league_info)
return league_info
@staticmethod
def does_file_exist(file_path):
if os.path.isfile(file_path):
return True
else:
return False
def load_or_download_html(self, unique_id: Helper.UniqueID):
save_file = self.get_save_file_path(unique_id)
print('Save file: ' + save_file)
if self.does_file_exist(save_file):
with open(save_file) as f:
print('Opening saved file')
return f.read()
else:
url = self.gen_url(unique_id)
page = requests.get(url)
self.save_html(save_file, page.content)
print('Loading from website')
print(url)
return page.content
@staticmethod
def save_html(file_path, html):
with open(file_path, 'wb') as f:
f.write(str(html).encode('utf-8'))
def get_save_file_path(self, unique_id: Helper.UniqueID):
directory = self.get_folder_path(unique_id)
self.create_directory_if_necessary(directory)
return os.path.join(directory, self.get_file_name(unique_id))
def get_folder_path(self, unique_id: Helper.UniqueID):
return os.path.join(self.local_dir, 'data_storage', str(unique_id.week))
@staticmethod
def get_file_name(unique_id: Helper.UniqueID):
save_name = str(unique_id.league_id) + '_' + str(unique_id.team_id) + '_week' + str(unique_id.week) + '_' + str(unique_id.time) + '.html'
return save_name
@staticmethod
def gen_url(unique_id):
parse_url = 'https://football.fantasysports.yahoo.com/f1/' + str(unique_id.league) + '/' + str(unique_id.team) + '/' + 'team?&week=' + str(unique_id.week)
return parse_url
def get_team_data(self):
return self.team_score_frame
def get_player_data(self):
return self.player_score_frame
def export_team_data(self, name):
self.team_score_frame.to_csv(name)
def export_player_data(self, name):
self.player_score_frame.to_csv(name)
def quick_export(self):
quick_team_file = os.path.join(self.data_directory, 'team.csv')
quick_player_file = os.path.join(self.data_directory, 'player.csv')
if os.path.isfile(quick_team_file):
os.remove(quick_team_file)
self.export_team_data(quick_team_file)
if os.path.isfile(quick_player_file):
os.remove(quick_player_file)
self.export_player_data(quick_player_file)
@staticmethod
def create_directory_if_necessary(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def export_weekly_team_data(self, week):
pass
def add_team_info(self, team_info_array, unique_info):
full_team_data = unique_info + team_info_array
self.add_team_from_row(full_team_data)
def add_player_info(self, player_info_array, unique_info):
for row in player_info_array:
full_player_data = unique_info + row
self.add_player_from_row(full_player_data)
def cum_sum_position_by_week(self, pos: str, week: int):
weekly_players = self.player_score_frame.loc[self.player_score_frame['Week'] == week]
weekly_players_by_pos = weekly_players.loc[weekly_players['ActivePos'] == pos]
result = weekly_players_by_pos.groupby(['UniqueID']).sum()
# result = result.reset_index() # The "UniqueID" was the index not a column,
# this makes it a column again so we can join
ordered = self.join_and_sort(result)
return ordered
# return result
def max_score_position_by_week(self, pos: str, week: int):
weekly_players = self.player_score_frame.loc[self.player_score_frame['Week'] == week]
weekly_players_by_pos = weekly_players.loc[weekly_players['ActivePos'] == pos]
result = weekly_players_by_pos.groupby(['UniqueID']).max()
ordered = self.join_and_sort(result)
return ordered
def get_complete_team_frame(self):
# merged = pd.merge(league.data_manager.team_score_frame,
# league.data_manager.league_tracker_frame, on='UniqueID')
merged = pd.merge(self.team_score_frame, self.league_info_frame, on='UniqueID')
return merged
def export_complete_team_frame(self, league):
quick_league_file = os.path.join(self.data_directory, str(league) + '_TeamData.csv')
self.manage_file_existence(quick_league_file)
complete_team_frame = self.get_complete_team_frame()
sorted_team_data = complete_team_frame.sort_values(by=['Week', 'Order'])
# complete_team_frame.sort_values(by=['Order', 'Week'])
print('Exporting team info to ' + str(quick_league_file))
sorted_team_data.to_csv(quick_league_file)
@staticmethod
def manage_file_existence(file):
if os.path.isfile(file):
os.remove(file)
def join_and_sort(self, result_frame: pd.DataFrame):
joined = self.league_info_frame.join(result_frame, on='UniqueID', how='right')
ordered = joined.sort_values(by='Order')
return ordered
def export_dataframe(self, data: pd.DataFrame, name: str):
output_path = os.path.join(self.data_directory, name + '.csv')
self.manage_file_existence(output_path)
data.to_csv(output_path)
| {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
63,939 | gr8scott88/FFLStats4 | refs/heads/master | /utility/FileManager.py | import os
import pandas as pd
from archive import GLOBALS
def save_df_to_file(directory, name, df: pd.DataFrame, overwrite=True):
if not does_directory_exist(directory):
create_directory(directory)
save_file = os.path.join(directory, name)
if overwrite:
delete_file(save_file)
df.to_csv(save_file)
def delete_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
def load_league_info(league_id):
league_dir = os.path.join(GLOBALS.ROOTDIR, str(league_id))
file_name = str(league_id) + '_info.csv'
def load_df(directory, name):
df = False
try:
df = pd.read_csv(os.path.join(directory, name))
except Exception as e:
print('DF does not exist')
return df
def save_html(directory, name, html, overwrite=True):
if not does_directory_exist(directory):
create_directory(directory)
save_file = os.path.join(directory, name)
if overwrite:
delete_file(save_file)
with open(save_file, 'wb') as file:
file.write(str(html).encode('utf-8'))
def does_file_exist(file_path):
if os.path.isfile(file_path):
return True
else:
return False
def does_directory_exist(directory):
if os.path.isdir(directory):
return True
else:
return False
def create_directory(directory):
os.mkdir(directory)
def load_html(directory, file):
html_file = os.path.join(directory, file)
if not does_file_exist(html_file):
return False
else:
with open(html_file) as f:
# print('Opening saved html file')
return f.read()
# def get_html_file(league, team, week) | {"/data_vis/LeagueVisualizer.py": ["/models/League.py"], "/web_parsing/TeamPageParser.py": ["/utility/YahooWebHelper.py"], "/archive/data_analysis/data_analyzer.py": ["/models/League.py"], "/archive/Scratch2.py": ["/web_parsing/DraftParser.py", "/utility/YahooWebHelper.py"], "/archive/fantasy_league_management/LeagueManager.py": ["/utility/DateManager.py", "/utility/YahooWebHelper.py", "/web_parsing/LeaguePageParser.py"], "/Main.py": ["/data_vis/LeagueVisGen.py", "/models/League.py"], "/data_vis/LeagueVisGen.py": ["/models/League.py"], "/models/League.py": ["/web_parsing/LeaguePageParser.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/web_parsing/DraftParser.py", "/data_handlers/LocalDataManager.py", "/data_handlers/PandasHandler.py", "/utility/YahooWebHelper.py"], "/ScratchFile.py": ["/utility/YahooWebHelper.py", "/web_parsing/MatchPageParser.py", "/web_parsing/TeamPageParser.py", "/web_parsing/PlayerParser.py", "/data_handlers/PandasHandler.py", "/data_vis/LeagueVisualizer.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.