index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
18,216
|
MinjiKim77/human_wea
|
refs/heads/master
|
/nalsiwoori/migrations/0001_initial.py
|
# Generated by Django 3.0.8 on 2020-07-21 09:21
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='map_data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.CharField(max_length=255)),
('city', models.CharField(max_length=255)),
('lat', models.IntegerField(max_length=255)),
('lng', models.IntegerField(max_length=255)),
('map_x', models.IntegerField(max_length=255)),
('map_y', models.IntegerField(max_length=255)),
],
),
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=64)),
('user_email', models.EmailField(max_length=64)),
('user_nick', models.CharField(max_length=64)),
('user_pw', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Selection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('map_idx', models.IntegerField(max_length=255)),
('state', models.CharField(max_length=255)),
('city', models.CharField(max_length=255)),
('cur_wea', models.CharField(max_length=255)),
('pub_date', models.DateTimeField(default=datetime.datetime(2020, 7, 21, 9, 21, 6, 521936, tzinfo=utc))),
('map_data', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='nalsiwoori.map_data')),
('user_data', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='nalsiwoori.Users')),
],
),
]
|
{"/account/views.py": ["/account/models.py"], "/nalsiwoori/admin.py": ["/nalsiwoori/models.py"], "/nalsiwoori/views.py": ["/nalsiwoori/models.py", "/account/models.py"], "/nalsiwoori/models.py": ["/account/models.py"]}
|
18,217
|
MinjiKim77/human_wea
|
refs/heads/master
|
/nalsiwoori/models.py
|
from django.db import models
from django.utils import timezone
from account.models import User
class Users(models.Model):
user_name= models.CharField(max_length=64)
user_email= models.EmailField(max_length=64)
user_nick= models.CharField(max_length=64)
user_pw = models.CharField(max_length=64)
class map_data(models.Model):
state = models.CharField(max_length=255)
city = models.CharField(max_length=255)
lat = models.IntegerField(max_length=255)
lng = models.IntegerField(max_length=255)
map_x = models.IntegerField(max_length=255)
map_y = models.IntegerField(max_length=255)
class Selection(models.Model):
user_data = models.ForeignKey(User, on_delete=models.CASCADE, default=0)
map_data = models.ForeignKey(map_data, on_delete=models.CASCADE, null=True)
map_idx = models.IntegerField(max_length=255)
state = models.CharField(max_length=255)
city = models.CharField(max_length=255)
cur_wea = models.CharField(max_length=255)
pub_date = models.DateTimeField(default=timezone.now())
|
{"/account/views.py": ["/account/models.py"], "/nalsiwoori/admin.py": ["/nalsiwoori/models.py"], "/nalsiwoori/views.py": ["/nalsiwoori/models.py", "/account/models.py"], "/nalsiwoori/models.py": ["/account/models.py"]}
|
18,218
|
MinjiKim77/human_wea
|
refs/heads/master
|
/account/models.py
|
from django.db import models
class User(models.Model):
nick = models.CharField(max_length=255)
name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
user_id = models.CharField(max_length=255)
user_pw = models.CharField(max_length=255)
|
{"/account/views.py": ["/account/models.py"], "/nalsiwoori/admin.py": ["/nalsiwoori/models.py"], "/nalsiwoori/views.py": ["/nalsiwoori/models.py", "/account/models.py"], "/nalsiwoori/models.py": ["/account/models.py"]}
|
18,219
|
MinjiKim77/human_wea
|
refs/heads/master
|
/human_wea/urls.py
|
from django.contrib import admin
from django.urls import path,include
from nalsiwoori import views
from account import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('account.urls')),
# path('account/',include('account.urls')),
# path('nalsiwoori/',include('nalsiwoori.urls')),
path('',include('nalsiwoori.urls')),
]
|
{"/account/views.py": ["/account/models.py"], "/nalsiwoori/admin.py": ["/nalsiwoori/models.py"], "/nalsiwoori/views.py": ["/nalsiwoori/models.py", "/account/models.py"], "/nalsiwoori/models.py": ["/account/models.py"]}
|
18,220
|
bids-standard/bids-validator
|
refs/heads/master
|
/tools/prep_zenodo.py
|
#!/usr/bin/env python3
import git
import json
from subprocess import run, PIPE
from pathlib import Path
def decommify(name):
return ' '.join(name.split(', ')[::-1])
blacklist = {
'dependabot[bot]',
}
git_root = Path(git.Repo('.', search_parent_directories=True).working_dir)
zenodo_file = git_root / '.zenodo.json'
zenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}
orig_creators = zenodo.get('creators', [])
creator_map = {decommify(creator['name']): creator
for creator in orig_creators}
shortlog = run(['git', 'shortlog', '-ns'], stdout=PIPE)
counts = [line.split('\t', 1)[::-1]
for line in shortlog.stdout.decode().split('\n') if line]
commit_counts = {}
for committer, commits in counts:
commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)
# Stable sort:
# Number of commits in reverse order
# Ties broken by alphabetical order of first name
committers = [committer
for committer, _ in sorted(commit_counts.items(),
key=lambda x: (-x[1], x[0]))
if committer not in blacklist]
# Tal to the top
first_author = 'Ross Blair'
if committers[0] != first_author:
committers.remove(first_author)
committers.insert(0, first_author)
creators = [
creator_map.get(committer, {'name': committer})
for committer in committers
]
zenodo['creators'] = creators
zenodo_file.write_text(json.dumps(zenodo, ensure_ascii=False, indent=2) + '\n')
|
{"/bids-validator/bids_validator/__init__.py": ["/bids-validator/bids_validator/bids_validator.py"]}
|
18,221
|
bids-standard/bids-validator
|
refs/heads/master
|
/bids-validator/bids_validator/__init__.py
|
"""BIDS validator common Python package."""
from .bids_validator import BIDSValidator
__all__ = ['BIDSValidator']
from . import _version
__version__ = _version.get_versions()['version']
|
{"/bids-validator/bids_validator/__init__.py": ["/bids-validator/bids_validator/bids_validator.py"]}
|
18,222
|
bids-standard/bids-validator
|
refs/heads/master
|
/bids-validator/bids_validator/test_bids_validator.py
|
"""Test BIDSValidator functionality.
git-annex and datalad are used to download a test data structure without the
actual file contents.
"""
import os
import pytest
import datalad.api
from bids_validator import BIDSValidator
HOME = os.path.expanduser('~')
TEST_DATA_DICT = {
'eeg_matchingpennies': (
'https://gin.g-node.org/sappelhoff/eeg_matchingpennies'
),
}
EXCLUDE_KEYWORDS = ['git', 'datalad', 'sourcedata', 'bidsignore']
def _download_test_data(test_data_dict, dsname):
"""Download test data using datalad."""
url = test_data_dict[dsname]
dspath = os.path.join(HOME, dsname)
datalad.api.clone(source=url, path=dspath)
return dspath
def _gather_test_files(dspath, exclude_keywords):
"""Get test files from dataset path, relative to dataset."""
files = []
for r, _, f in os.walk(dspath):
for file in f:
fname = os.path.join(r, file)
fname = fname.replace(dspath, '')
if not any(keyword in fname for keyword in exclude_keywords):
files.append(fname)
return files
dspath = _download_test_data(TEST_DATA_DICT, 'eeg_matchingpennies')
files = _gather_test_files(dspath, EXCLUDE_KEYWORDS)
@pytest.mark.parametrize('fname', files)
def test_is_bids(fname):
"""Test that is_bids returns true for each file in a valid BIDS dataset."""
validator = BIDSValidator()
assert validator.is_bids(fname)
|
{"/bids-validator/bids_validator/__init__.py": ["/bids-validator/bids_validator/bids_validator.py"]}
|
18,223
|
bids-standard/bids-validator
|
refs/heads/master
|
/bids-validator/bids_validator/bids_validator.py
|
"""Validation class for BIDS projects."""
import re
import os
import json
class BIDSValidator():
"""Object for BIDS (Brain Imaging Data Structure) verification.
The main method of this class is `is_bids()`. You should use it for
checking whether a file path is compatible with BIDS.
"""
def __init__(self, index_associated=True):
"""Initialize BIDSValidator object.
Parameters
----------
index_associated : bool
Specifies if an associated data should be checked. If it is true
then any file paths in directories `code/`, `derivatives/`,
`sourcedata/` and `stimuli/` will pass the validation, else they
won't. Defaults to True.
"""
self.dir_rules = os.path.join(os.path.dirname(__file__)) + "/rules/"
self.index_associated = index_associated
def is_bids(self, path):
"""Check if file path adheres to BIDS.
Main method of the validator. uses other class methods for checking
different aspects of the file path.
Parameters
----------
path : str
Path of a file to be checked. Must be relative to root of a BIDS
dataset.
Notes
-----
When you test a file path, make sure that the path is relative to the
root of the BIDS dataset the file is part of. That is, as soon as the
file path contains parts outside of the BIDS dataset, the validation
will fail. For example "home/username/my_dataset/participants.tsv" will
fail, although "participants.tsv" is a valid BIDS file.
Examples
--------
>>> from bids_validator import BIDSValidator
>>> validator = BIDSValidator()
>>> filepaths = ["/sub-01/anat/sub-01_rec-CSD_T1w.nii.gz",
... "/sub-01/anat/sub-01_acq-23_rec-CSD_T1w.exe", # wrong extension
... "home/username/my_dataset/participants.tsv", # not relative to root
... "/participants.tsv"]
>>> for filepath in filepaths:
... print(validator.is_bids(filepath))
True
False
False
True
"""
conditions = []
conditions.append(self.is_top_level(path))
conditions.append(self.is_associated_data(path))
conditions.append(self.is_session_level(path))
conditions.append(self.is_subject_level(path))
conditions.append(self.is_phenotypic(path))
conditions.append(self.is_file(path))
return (any(conditions))
def is_top_level(self, path):
"""Check if the file has appropriate name for a top-level file."""
regexps = self.get_regular_expressions(self.dir_rules +
'top_level_rules.json')
conditions = [False if re.compile(x).search(path) is None else True for
x in regexps]
return (any(conditions))
def is_associated_data(self, path):
"""Check if file is appropriate associated data."""
if not self.index_associated:
return False
regexps = self.get_regular_expressions(self.dir_rules +
'associated_data_rules.json')
conditions = [(re.compile(x).search(path) is not None) for
x in regexps]
return any(conditions)
def is_session_level(self, path):
"""Check if the file has appropriate name for a session level."""
regexps = self.get_regular_expressions(self.dir_rules +
'session_level_rules.json')
conditions = [self.conditional_match(x, path) for x in regexps]
return (any(conditions))
def is_subject_level(self, path):
"""Check if the file has appropriate name for a subject level."""
regexps = self.get_regular_expressions(self.dir_rules +
'subject_level_rules.json')
conditions = [(re.compile(x).search(path) is not None) for
x in regexps]
return (any(conditions))
def is_phenotypic(self, path):
"""Check if file is phenotypic data."""
regexps = self.get_regular_expressions(self.dir_rules +
'phenotypic_rules.json')
conditions = [(re.compile(x).search(path) is not None) for
x in regexps]
return (any(conditions))
def is_file(self, path):
"""Check if file is phenotypic data."""
regexps = self.get_regular_expressions(self.dir_rules +
'file_level_rules.json')
conditions = [(re.compile(x).search(path) is not None) for
x in regexps]
return (any(conditions))
def get_regular_expressions(self, file_name):
"""Read regular expressions from a file."""
regexps = []
with open(file_name) as fin:
rules = json.load(fin)
for key in list(rules.keys()):
rule = rules[key]
regexp = rule["regexp"]
if "tokens" in rule:
tokens = rule["tokens"]
for token in list(tokens):
regexp = regexp.replace(token, "|".join(tokens[token]))
regexps.append(regexp)
return regexps
def conditional_match(self, expression, path):
"""Find conditional match."""
match = re.compile(expression).findall(path)
match = match[0] if len(match) >= 1 else False
# adapted from JS code and JS does not support conditional groups
if (match):
if ((match[1] == match[2][1:]) | (not match[1])):
return True
else:
return False
else:
return False
|
{"/bids-validator/bids_validator/__init__.py": ["/bids-validator/bids_validator/bids_validator.py"]}
|
18,224
|
renukartamboli/assignment
|
refs/heads/main
|
/assignment.py
|
from cryptography.fernet import Fernet
class Switcher(object):
users = {}
weatherInformation= {"goa":{"humidity":5,"Pressure":6,"Average Temperature":30,"Wind Speed":5,"Wind Degree":9,"UI index":12},"jaipur":{"humidity":5,"Pressure":6,"Average Temperature":30,"Wind Speed":5,"Wind Degree":9,"UI index":12},"banglore":{"humidity":5,"Pressure":6,"Average Temperature":30,"Wind Speed":5,"Wind Degree":9,"UI index":12},"-90/+90":{"humidity":5,"Pressure":6,"Average Temperature":30,"Wind Speed":5,"Wind Degree":9,"UI index":12}}
key = Fernet.generate_key()
cipherSuite = Fernet(key)
def __init__(self):
Switcher.users = {'testUser':Switcher.cipherSuite.encrypt('123'.encode())}
def Operation(self,method_name):
method=getattr(self,method_name,lambda :'Invalid')
return method()
def create(self):
print("enter user name:")
user = input()
print("enter password:")
password = input()
Switcher.users[user]=Switcher.cipherSuite.encrypt(password.encode())
print("User created Successfully!!")
def update(self):
print("Enter your userName to update")
user = input()
password = Switcher.users[user]
print("Do you want to update username? y/n")
ans = input()
if(ans == "y"):
print("Enter new userName")
enteredUser = input()
print("Enter your password")
i =3
p=0
while(i!=0):
enteredPass = input()
if(Switcher.cipherSuite.decrypt(password).decode()==enteredPass):
del Switcher.users[user]
Switcher.users[enteredUser] = password
print("Username updated successfully")
p=1
break
else:
print("incorrect password")
i-=1
if(p!=1):
print("Incorrect password attemp 3 times.Please try again later.")
print("Do you want to update password? y/n")
ans = input()
if(ans=="y"):
print("Enter old password")
i=3
while(i!=0):
enteredPass = input()
if(Switcher.cipherSuite.decrypt(password).decode()==enteredPass):
print("enter new password")
newPass = input()
Switcher.users[user]=Switcher.cipherSuite.encrypt(password)
p=1
print("Password updated Successfully!!")
break
else:
print("incorrect password")
i-=1
if(p!=1):
print("Incorrect password attemp 3 times.Please try again later.")
def delete(self):
print("Enter user name to delete")
user = input()
del Switcher.users[user]
print("User deleted Successfully")
def readAll(self):
print("User Entries:")
for key,value in Switcher.users.items():
print(key,end="\t")
print("\n")
def weatherInfo(self):
print("Enter City Name or Longitude and Latitude in following manner: Longitude/Latitude")
location = input()
if(location not in Switcher.weatherInformation.keys()):
print("no weather info for this location")
return
for info in Switcher.weatherInformation[location]:
print(info,':',Switcher.weatherInformation[location][info])
def helpCmd(self):
return "Press 1 for creating new user \n Press 2 to update user \n Press 3 to delete user \n Press 4 to print all users \n Press 5 for weather information \n --help for help command"
if __name__ == "__main__":
s=Switcher()
Choice = 0
while(Choice!="exit"):
print("\n")
print("Enter your Choice:")
print("1 - creating new user")
print("2 - update user")
print("3 - delete user")
print("4 - print all users")
print("5 - weather information")
print("--help for help command")
print("Type exit to quit")
print("\n")
Choice = input()
if(Choice=="--help"):
print(s.Operation('helpCmd'))
if(Choice=='1'):
s.Operation('create')
if(Choice=='2'):
s.Operation('update')
if(Choice=='3'):
s.Operation('delete')
if(Choice=='4'):
s.Operation('readAll')
if(Choice=='5'):
s.Operation('weatherInfo')
|
{"/unitTests.py": ["/assignment.py"]}
|
18,225
|
renukartamboli/assignment
|
refs/heads/main
|
/unitTests.py
|
import unittest
import builtins
import pytest
import io
import unittest.mock
from unittest.mock import patch
from assignment import Switcher
class TestMethods(unittest.TestCase):
def testhelp(self):
swicther = Switcher()
self.assertEqual(swicther.Operation('helpCmd'), "Press 1 for creating new user \n Press 2 to update user \n Press 3 to delete user \n Press 4 to print all users \n Press 5 for weather information \n --help for help command")
@unittest.mock.patch('sys.stdout', new_callable=io.StringIO)
def assert_stdout(self,operation,expected_output, mock_stdout):
swicther = Switcher()
swicther.Operation(operation)
self.assertEqual(mock_stdout.getvalue(), expected_output)
def testReadUsers(self):
self.assert_stdout('readAll','User Entries:\ntestUser\t\n\n')
def testDeleteUsers(self):
swicther = Switcher()
original_input = builtins.input
builtins.input = lambda: 'testUser'
swicther.Operation('delete')
self.assertEqual(swicther.users,{})
def testWeatherInfo(self):
original_input = builtins.input
builtins.input = lambda: 'goa'
self.assert_stdout('weatherInfo','Enter City Name or Longitude and Latitude in following manner: Longitude/Latitude\nhumidity : 5\nPressure : 6\nAverage Temperature : 30\nWind Speed : 5\nWind Degree : 9\nUI index : 12\n')
def testUpdateUser(self):
mock_args = ['testUser','y','testUser1','234','123','y','123','123']
with unittest.mock.patch('builtins.input', side_effect=mock_args):
swicther = Switcher()
swicther.Operation('update')
def testCreateUser(self):
mock_args = ['testUser1','123']
with unittest.mock.patch('builtins.input', side_effect=mock_args):
swicther = Switcher()
swicther.Operation('create')
if __name__ == '__main__':
unittest.main()
|
{"/unitTests.py": ["/assignment.py"]}
|
18,226
|
JRReynosa/metricadataanalysis
|
refs/heads/master
|
/modules/helper_methods.py
|
import pandas as pd
import numpy as np
def determine_outcome(row_type, row_subtype, rowahead_type, rowahead_subtype):
lostball = rowahead_type == "BALL LOST" and rowahead_subtype != np.nan and \
not ("FORCED" or "THEFT" or "CLEARANCE" or "END HALF") in str(rowahead_subtype)
outcome = {
"PASS": 1 if not lostball else 0,
"SHOT": 1 if "GOAL" in str(row_subtype) else 0,
}
return outcome.get(row_type, None)
def get_events_dataframe(data_location):
eventsdf = pd.read_csv(data_location, error_bad_lines=False)
eventsdf.columns = ['team', 'type', 'subtype', 'period', 'start_frame', 'start_time', 'end_frame',
'end_time', 'from_player', 'to_player', 'start_x', 'start_y', 'end_x', 'end_y']
all_events = []
field_dimen = (106., 68.)
row_iterator = eventsdf.iterrows()
_, row = next(row_iterator) # Get first row
for index, rowahead in row_iterator:
attributes = {
"team": row.team,
"period": row.period,
"type": row.type,
"subtype": row.subtype,
"outcome": determine_outcome(row.type, row.subtype, rowahead.type, rowahead.subtype),
"from_player": row.from_player,
"to_player": row.to_player,
"start_frame": row.start_frame,
"end_frame": row.end_frame,
"start_time": row.start_time,
"end_time": row.end_time,
"start_x": (row.start_x - .5) * 106., # Change field dimensions to 106x68 meters
"start_y": (row.start_y - .5) * 68.,
"end_x": (row.end_x - .5) * 106.,
"end_y": (row.end_y - .5) * 68.,
}
all_events.append(attributes)
row = rowahead
all_eventsdf = pd.DataFrame(all_events)
return all_eventsdf
def get_tracking_dataframe(data_location):
trackingdf = pd.read_csv(data_location, error_bad_lines=False, dtype=str)
trackingdf = trackingdf.drop([0, 1]).reset_index(drop=True)
trackingdf.columns = ["period", "frame", "time", "player11x", "player11y", "player1x", "player1y", "player2x",
"player2y",
"player3x", "player3y", "player4x", "player4y", "player5x", "player5y", "player6x",
"player6y",
"player7x", "player7y", "player8x", "player8y", "player9x", "player9y", "player10x",
"player10y",
"player12x", "player12y", "player13x", "player13y", "player14x", "player14y", "ballx",
"bally"]
return trackingdf
def get_all_action(event_dataframe, action):
all_actions = []
for index, row in event_dataframe.iterrows():
if row.type == action:
attributes = {
"team": row.team,
"type": row.type,
"subtype": row.subtype,
"outcome": row.outcome,
"from_player": row.from_player,
"to_player": row.to_player,
"start_frame": row.start_frame,
"end_frame": row.end_frame,
"start_time": row.start_time,
"end_time": row.end_time,
"start_x": row.start_x,
"start_y": row.start_y,
"end_x": row.end_x,
"end_y": row.end_y,
}
all_actions.append(attributes)
actiondf = pd.DataFrame(all_actions)
return actiondf
def action_exception():
raise Exception("Invalid Action")
def get_seperate_action(event_dataframe, action):
action_switch = {
"PASS": {
"home_passes_1": [],
"away_passes_1": [],
"home_passes_2": [],
"away_passes_2": []
},
"SHOT": {
"home_shots_1": [],
"away_shots_1": [],
"home_shots_2": [],
"away_shots_2": []
}
}
seperate_actions = action_switch.get(action, lambda: action_exception())
for index, row in event_dataframe.iterrows():
if row.type == action:
attributes = {
"team": row.team,
"period": row.period,
"type": row.type,
"subtype": row.subtype,
"outcome": row.outcome,
"from_player": row.from_player,
"to_player": row.to_player,
"start_frame": row.start_frame,
"end_frame": row.end_frame,
"start_time": row.start_time,
"end_time": row.end_time,
"start_x": row.start_x,
"start_y": row.start_y,
"end_x": row.end_x,
"end_y": row.end_y,
}
assign_passes(seperate_actions, attributes)
for key, value in seperate_actions.items():
# noinspection PyTypeChecker
seperate_actions[key] = pd.DataFrame(value)
return seperate_actions
def distance_to_goal(shot_loc):
if shot_loc[0] > 0:
goal_loc = np.array([53., 0.])
else:
goal_loc = np.array([-53., 0.])
return np.sqrt(np.sum((shot_loc - goal_loc) ** 2))
def goal_angle(shot_loc):
if shot_loc[0] > 0:
p0 = np.array((53., 4.)) # Left Post
p1 = np.array(shot_loc, dtype=np.float)
p2 = np.array((53., -4.)) # Right Post
v0 = p0 - p1
v1 = p2 - p1
else:
p0 = np.array((-53., -4.)) # Left Post
p1 = np.array(shot_loc, dtype=np.float)
p2 = np.array((-53., 4.)) # Right Post
v0 = p0 - p1
v1 = p2 - p1
angle = np.abs(np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1)))
return angle
def determine_starters(dataframe):
dataframe = dataframe.iloc[:1, 3:31]
players = []
i = 0
for col in dataframe:
if (dataframe[col][0] is not np.nan) and i % 2 != 0:
player = col[:len(col) - 1]
players.append(player)
i += 1
return players
def assign_passes(match_dict, pass_attributes):
if pass_attributes["team"] == "Home":
if pass_attributes["period"] == 1:
match_dict["home_passes_1"].append(pass_attributes)
else:
match_dict["home_passes_2"].append(pass_attributes)
else:
if pass_attributes["period"] == 1:
match_dict["away_passes_1"].append(pass_attributes)
else:
match_dict["away_passes_2"].append(pass_attributes)
|
{"/modules/data_visualization.py": ["/modules/helper_methods.py"], "/modules/clustering.py": ["/modules/helper_methods.py"], "/modules/data_extraction_and_transformation.py": ["/modules/helper_methods.py"], "/modules/logistic_regression.py": ["/modules/helper_methods.py"], "/modules/database_population_and_querying.py": ["/modules/helper_methods.py"], "/modules/linear_regression.py": ["/modules/helper_methods.py"]}
|
18,227
|
JRReynosa/metricadataanalysis
|
refs/heads/master
|
/modules/data_visualization.py
|
from soccerutils.pitch import Pitch
import numpy as np
import pandas as pd
import modules.helper_methods as helper
import matplotlib as plt
tracking_path = 'C:\\Users\\reynosaj\\PycharmProjects\\metrica_data_analysis\\data\\TrackingData.csv'
trackingdf = helper.get_tracking_data(tracking_path)
events_path = 'C:\\Users\\reynosaj\\PycharmProjects\\metrica_data_analysis\\data\\EventsData.csv'
eventdf = helper.get_events_data(events_path)
starters = helper.determine_starters(trackingdf)
eventsdf = helper.get_all_events(eventdf)
|
{"/modules/data_visualization.py": ["/modules/helper_methods.py"], "/modules/clustering.py": ["/modules/helper_methods.py"], "/modules/data_extraction_and_transformation.py": ["/modules/helper_methods.py"], "/modules/logistic_regression.py": ["/modules/helper_methods.py"], "/modules/database_population_and_querying.py": ["/modules/helper_methods.py"], "/modules/linear_regression.py": ["/modules/helper_methods.py"]}
|
18,228
|
JRReynosa/metricadataanalysis
|
refs/heads/master
|
/modules/clustering.py
|
import matplotlib.pylab as plt
import matplotlib.patches as mpatches
from sklearn.cluster import KMeans
from soccerutils.pitch import Pitch
import modules.helper_methods as helper
url = 'https://raw.githubusercontent.com/metrica-sports/sample-data/master/data/Sample_Game_1/' \
'Sample_Game_1_RawEventsData.csv'
eventdf = helper.get_events_dataframe(url)
passdf_dict = helper.get_seperate_action(eventdf, action="PASS")
def make_model(homedf, awaydf):
homemodel = KMeans(n_clusters=30)
awaymodel = KMeans(n_clusters=30)
homefeatures = homedf[['start_x', 'start_y', 'end_x', 'end_y']]
homefit = homemodel.fit(homefeatures)
awayfeatures = awaydf[['start_x', 'start_y', 'end_x', 'end_y']]
awayfit = awaymodel.fit(awayfeatures)
homedf["cluster"] = homemodel.predict(homefeatures)
awaydf["cluster"] = awaymodel.predict(awayfeatures)
return homefit, awayfit
def plot_arrows(model_fits, axis1, axis2):
for period in range(2): # Two periods
for team in range(2): # Two teams
for i, (start_x, start_y, end_x, end_y) in enumerate(model_fits[period][team].cluster_centers_):
axis = axis1 if period == 0 else axis2
axis.arrow(start_x, start_y, end_x - start_x, end_y - start_y,
head_width=1,
head_length=1,
color='blue' if team == 0 else 'red',
alpha=0.5,
length_includes_head=True)
# ax1.text((start_x + end_x) / 2, (start_y + end_y) / 2, str(i + 1))
match_fits = [make_model(passdf_dict["home_passes_1"], passdf_dict["away_passes_1"]),
make_model(passdf_dict["home_passes_2"], passdf_dict["away_passes_2"])]
# match_fits = [[period1], [period2]]; period1 = [homefit1, awayfit1]; period2 = [homefit2, awayfit2]
fig, (ax1, ax2) = plt.subplots(2, sharex="all", sharey="all", figsize=(10, 8))
plot_arrows(match_fits, ax1, ax2)
# Plot properties
red_patch = mpatches.Patch(color='red', label='Away Team')
blue_patch = mpatches.Patch(color='blue', label='Home Team')
fig.legend(handles=[red_patch, blue_patch])
ax1.set_title("First Half")
ax2.set_title("Second Half")
plt.xlim(-53, 53)
plt.ylim(-34, 34)
# fig.savefig('passing.png', dpi=100)
plt.show()
# Maybe work on pass difficulty making use of pass_distance and pass_angle?
|
{"/modules/data_visualization.py": ["/modules/helper_methods.py"], "/modules/clustering.py": ["/modules/helper_methods.py"], "/modules/data_extraction_and_transformation.py": ["/modules/helper_methods.py"], "/modules/logistic_regression.py": ["/modules/helper_methods.py"], "/modules/database_population_and_querying.py": ["/modules/helper_methods.py"], "/modules/linear_regression.py": ["/modules/helper_methods.py"]}
|
18,229
|
JRReynosa/metricadataanalysis
|
refs/heads/master
|
/modules/data_extraction_and_transformation.py
|
import modules.helper_methods as helper
url = 'https://raw.githubusercontent.com/metrica-sports/sample-data/master/data/Sample_Game_1/' \
'Sample_Game_1_RawEventsData.csv'
eventsdf = helper.get_events_dataframe(url)
shotsdf = helper.get_all_shots(eventsdf)
print(shotsdf)
|
{"/modules/data_visualization.py": ["/modules/helper_methods.py"], "/modules/clustering.py": ["/modules/helper_methods.py"], "/modules/data_extraction_and_transformation.py": ["/modules/helper_methods.py"], "/modules/logistic_regression.py": ["/modules/helper_methods.py"], "/modules/database_population_and_querying.py": ["/modules/helper_methods.py"], "/modules/linear_regression.py": ["/modules/helper_methods.py"]}
|
18,230
|
JRReynosa/metricadataanalysis
|
refs/heads/master
|
/modules/logistic_regression.py
|
import matplotlib.pyplot as plt
from scipy.interpolate import make_interp_spline, BSpline
import modules.helper_methods as helper
from sklearn.linear_model import LogisticRegression
import numpy as np
url = 'https://raw.githubusercontent.com/metrica-sports/sample-data/master/data/Sample_Game_1/' \
'Sample_Game_1_RawEventsData.csv'
eventsdf = helper.get_events_dataframe(url)
all_shotsdf = helper.get_all_action(eventsdf, action="SHOT")
all_shotsdf['distance_to_goal'] = all_shotsdf.apply(lambda q: helper.distance_to_goal(q[['start_x', 'start_y']]), axis=1)
all_shotsdf['goal_angle'] = all_shotsdf.apply(lambda q: helper.goal_angle(q[['start_x', 'start_y']]), axis=1)
all_shotsdf['head'] = all_shotsdf.apply(lambda q: 1 if ("HEAD" in q["subtype"]) else 0, axis=1)
model = LogisticRegression()
features = all_shotsdf[['distance_to_goal', 'goal_angle', 'head']]
labels = all_shotsdf['outcome']
fit = model.fit(features, labels)
predictions = model.predict_proba(features)[:, 1]
xnew = np.linspace(0, len(predictions), 300)
spl = make_interp_spline(range(len(predictions)), sorted(predictions), k=3) # type: BSpline
power_smooth = spl(xnew)
plt.plot(xnew, power_smooth)
plt.show()
print("----------------")
|
{"/modules/data_visualization.py": ["/modules/helper_methods.py"], "/modules/clustering.py": ["/modules/helper_methods.py"], "/modules/data_extraction_and_transformation.py": ["/modules/helper_methods.py"], "/modules/logistic_regression.py": ["/modules/helper_methods.py"], "/modules/database_population_and_querying.py": ["/modules/helper_methods.py"], "/modules/linear_regression.py": ["/modules/helper_methods.py"]}
|
18,231
|
JRReynosa/metricadataanalysis
|
refs/heads/master
|
/modules/database_population_and_querying.py
|
import modules.helper_methods as helper
from sqlalchemy import create_engine
import pandas as pd
url = 'https://raw.githubusercontent.com/metrica-sports/sample-data/master/data/Sample_Game_1/' \
'Sample_Game_1_RawEventsData.csv'
eventsdf = helper.get_events_dataframe(url)
engine = create_engine('sqlite://')
eventsdf.to_sql('events', engine)
top_passers = """
select from_player as player , count(*) as passes
from events
where outcome=1
and type = "PASS"
group by from_player
order by passes desc
"""
print(pd.read_sql(top_passers, engine).head(10))
# This was supposed to be xG but I did not have enough data to make
# a solid calculation
top_shots = """
select from_player as player, count(*) as shots
from events
where outcome=1
and type = "SHOT"
group by from_player
order by shots desc
"""
print(pd.read_sql(top_shots, engine).head(10))
|
{"/modules/data_visualization.py": ["/modules/helper_methods.py"], "/modules/clustering.py": ["/modules/helper_methods.py"], "/modules/data_extraction_and_transformation.py": ["/modules/helper_methods.py"], "/modules/logistic_regression.py": ["/modules/helper_methods.py"], "/modules/database_population_and_querying.py": ["/modules/helper_methods.py"], "/modules/linear_regression.py": ["/modules/helper_methods.py"]}
|
18,232
|
JRReynosa/metricadataanalysis
|
refs/heads/master
|
/modules/linear_regression.py
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
import modules.helper_methods as helper
url = 'https://raw.githubusercontent.com/metrica-sports/sample-data/master/data/Sample_Game_1/' \
'Sample_Game_1_RawEventsData.csv'
eventsdf = helper.get_events_dataframe(url)
passesdf = helper.get_all_action(eventsdf, action="PASS")
total_passesdf = passesdf.groupby('from_player')['outcome'].count()
pass_accuracydf = passesdf.groupby('from_player')['outcome'].mean() * 100
fig, ax = plt.subplots()
scatter = ax.scatter(total_passesdf, pass_accuracydf)
ax.set_xlabel("Total Passes")
ax.set_ylabel("Pass Completion")
plt.yticks(np.arange(0, 110, 10))
for player, total in total_passesdf.items():
x = total
y = pass_accuracydf[player]
plt.annotate(player,
(x, y),
textcoords="offset points", # how to position the text
xytext=(-5, 10), # distance from text to points (x,y)
ha='center',
arrowprops=dict(facecolor='black', arrowstyle="-")
)
# t = ax.text(x, y, player, fontsize=8)
model = LinearRegression()
fit = model.fit([[x] for x in total_passesdf], pass_accuracydf)
print("Coefficients: {}".format(fit.coef_))
print("Intercept: {}".format(fit.intercept_))
xfit = [0, 90] # This is the x-axis range of the chart
yfit = model.predict([[x] for x in xfit])
plt.plot(xfit, yfit, 'r')
plt.show()
|
{"/modules/data_visualization.py": ["/modules/helper_methods.py"], "/modules/clustering.py": ["/modules/helper_methods.py"], "/modules/data_extraction_and_transformation.py": ["/modules/helper_methods.py"], "/modules/logistic_regression.py": ["/modules/helper_methods.py"], "/modules/database_population_and_querying.py": ["/modules/helper_methods.py"], "/modules/linear_regression.py": ["/modules/helper_methods.py"]}
|
18,233
|
peverett/ImageCopy
|
refs/heads/master
|
/ImageCopy.py
|
#!/usr/bin/python
"""Copy Digital Camera IMages (DCIM) from a source
Memory Card or USB to a specified destination folder.
Allows image preview for selective copying,
file renaming based on EXIF data such as Date
and Time image was made."""
__version__ = "1.0"
__author__ = "simon.peverett@gmail.com"
__all__ = ['__version__', '__author__']
import os
import configparser
import glob
import sys
if sys.version_info[0] > 2:
import tkinter.font as tkFont
from tkinter import *
from tkinter import messagebox, filedialog
else:
import tkFont
from Tkinter import *
from PIL import Image, ImageTk
from ImageScale import ImageCanvas
from datetime import datetime
from shutil import copyfile
def GetConfigFilename():
"""Return the config file name based on the following rules:
* Config file is in the same directory as the python script.
* Config file has the same name but with extention '.ini'
"""
thisfile = os.path.splitext(
os.path.abspath(__file__)
)[0]
return os.path.join(
'.'.join([thisfile, 'ini'])
)
def UpdateConfigFile(config):
"""Update the Config File by writing it."""
with open(GetConfigFilename(), 'w') as cf:
config.write(cf)
def LoadConfigFile():
"""Load the Config File. If it doesn't exist, then create it."""
config = configparser.ConfigParser()
# If we can't read a config, create one.
if not config.read(GetConfigFilename()):
home = os.path.expanduser('~')
config['DEFAULT'] = {
'source': 'C:\\',
'destination': home,
'use_date': 'no',
'use_time': 'no',
'use_user': 'no',
'use_name': 'yes',
'descr': 'Example1,Example2',
}
UpdateConfigFile(config)
return config
def ListJpgFiles(path):
"""return a list of all files with the .jpg extension in the path passed."""
jpgfiles = glob.glob(
os.path.join(path, '*.jpg')
)
return jpgfiles
class ImageCopyController(object):
"""ImageCopy Controller Class"""
def __init__(self, root):
self.root = root
# Tkinter variables
self.src_str = StringVar()
self.dst_str = StringVar()
self.zoom_str = StringVar()
self.user_str = StringVar()
self.date_str = StringVar()
self.time_str = StringVar()
self.fn_str = StringVar()
self.fnum_str = StringVar()
self.chosen = StringVar()
self.cb_date = IntVar()
self.cb_time = IntVar()
self.cb_name = IntVar()
self.cb_user = IntVar()
# Get defaults from Config file, or set them!
self.config = LoadConfigFile()
self.jpgfiles = ListJpgFiles(self.config['DEFAULT']['source'])
self.jpgidx = 0 # Index on first image in the list.
self.jpglen = len(self.jpgfiles)
self.cb_date.set(self.config.getboolean('DEFAULT','use_date'))
self.cb_time.set(self.config.getboolean('DEFAULT','use_time'))
self.cb_user.set(self.config.getboolean('DEFAULT','use_user'))
self.cb_name.set(self.config.getboolean('DEFAULT','use_name'))
self.root.bind('<Destroy>', self.destroy_cmd)
self.MenuBar()
left_frm = Frame(self.root)
left_frm.pack(side=LEFT, fill=BOTH, expand=YES)
self.ic = ImageCanvas(left_frm)
self.button_frame(left_frm)
self.source_frame(left_frm)
self.destination_frame(left_frm)
right_frm = Frame(self.root)
right_frm.pack(side=RIGHT, fill=Y, expand=NO)
self.file_info_frame(right_frm)
self.image_options_frame(right_frm)
self.user_input_frame(right_frm)
#root.state('zoomed')
if self.jpgfiles:
self.update_image_source()
def copy_file_cmd(self):
copyfile(self.src_str.get(), self.dst_str.get())
def destroy_cmd(self, event):
"""What happens when the app is closed down."""
self.config['DEFAULT']['descr'] = ','.join(self.usr_descr)
UpdateConfigFile(self.config)
def options_cmd(self):
"""Action on check box being ticked."""
self.update_options()
self.update_destination()
def update_options(self):
"""Update options such as image file options or source and destination
directories."""
self.config['DEFAULT']['use_date'] = 'yes' if self.cb_date.get() else 'no'
self.config['DEFAULT']['use_time'] = 'yes' if self.cb_time.get() else 'no'
self.config['DEFAULT']['use_user'] = 'yes' if self.cb_user.get() else 'no'
self.config['DEFAULT']['use_name'] = 'yes' if self.cb_name.get() else 'no'
def update_destination(self):
"""Update the destination file name."""
image =self.jpgfiles[self.jpgidx]
cdt = datetime.fromtimestamp(os.path.getctime(image))
self.date_str.set(cdt.strftime('%Y-%m-%d'))
self.time_str.set(cdt.strftime('%H:%M:%S'))
cfn = list()
if self.cb_date.get():
cfn.append(cdt.strftime('%Y%m%d'))
if self.cb_time.get():
cfn.append(cdt.strftime('%H%M%S'))
ud = self.chosen.get()
if len(ud) and self.cb_user.get():
cfn.append(ud)
if self.cb_name.get():
cfn.append(self.fn_str.get())
copy_name = os.path.join(
self.config['DEFAULT']['Destination'],
"{}.jpg".format('_'.join(cfn)))
self.dst_str.set(copy_name)
self.usr_descr = self.listbox.get(0, END)
def update_image_source(self):
image =self.jpgfiles[self.jpgidx]
self.src_str.set(image)
self.fn_str.set(os.path.basename(image))
self.fnum_str.set("{} of {}".format(self.jpgidx+1, self.jpglen))
self.ic.load_image(image)
self.zoom_str.set("{:d} %".format(self.ic.get_zoom()))
self.update_destination()
def next_cmd(self):
if self.jpgfiles:
if self.jpgidx < self.jpglen-1:
self.jpgidx += 1
self.update_image_source()
def prev_cmd(self):
if self.jpgfiles:
if self.jpgidx > 0:
self.jpgidx -= 1
self.update_image_source()
def zoom_in(self):
self.ic.zoom_in()
self.zoom_str.set("{:d} %".format(self.ic.get_zoom()))
def zoom_out(self):
self.ic.zoom_out()
self.zoom_str.set("{:d} %".format(self.ic.get_zoom()))
def SetConfigDir(self, directory='destination'):
"""Set the 'directory' in the configuration. By default this is the
'destination' directory.
"""
new_dir = os.path.normpath(
filedialog.askdirectory(
initialdir=self.config['DEFAULT'][directory],
title="Select {} folder".format(directory)
)
)
if self.config['DEFAULT'][directory] != new_dir:
self.config['DEFAULT'][directory] = new_dir
def SetDestinationDir(self):
self.SetConfigDir()
self.update_destination()
def SetSourceDir(self):
self.SetConfigDir('Source')
self.jpgfiles = ListJpgFiles( self.config['DEFAULT']['source'] )
self.jpgidx = 0
self.jpglen = len(self.jpgfiles)
if not self.jpgfiles:
messagebox.showwarning(
"No JPG files found!",
'\n'.join([
"No JPG files found In directory:",
self.config['DEFAULT']['source']
])
)
else:
self.update_image_source()
def AboutImageCopy(self):
messagebox.showinfo(
"About: ImageCopy",
''.join([
__doc__, '\n\n',
'Author: ', __author__, '\n\n'
'Version: ', __version__,
])
)
def MenuBar(self):
"""Application Menu Bar"""
menubar = Menu(self.root)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Set Source Dir", command=self.SetSourceDir)
filemenu.add_command(
label="Set Destination Dir", command=self.SetDestinationDir)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.root.quit)
menubar.add_cascade(label="File", menu=filemenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", command=self.AboutImageCopy)
menubar.add_cascade(label="Help", menu=helpmenu)
self.root.config(menu=menubar)
def button_frame(self, parent):
"""Frame to contain buttons for moving through images and zooming the
current image."""
frm = Frame(parent, bd=5)
frm.pack(side=TOP, fill=X, expand=NO)
prev = Button(
frm, text="<< Prev", padx=5, pady=5, command=self.prev_cmd)
prev.pack(side=LEFT, fill=X, expand=YES)
zoom_in = Button(
frm, text="Zoom In (+)", padx=5, pady=5, command=self.zoom_in)
zoom_in.pack(side=LEFT, fill=X, expand=YES)
copy_image = Button(
frm, text="Copy Image", padx=5, pady=5,
command=self.copy_file_cmd)
copy_image.pack(side=LEFT, fill=X, expand=YES)
zoom_out = Button(
frm, text="Zoom Out (-)", padx=5, pady=5, command=self.zoom_out)
zoom_out.pack(side=LEFT, fill=X, expand=YES)
next = Button(
frm, text="Next >>", padx=5, pady=5, command=self.next_cmd)
next.pack(side=LEFT, fill=X, expand=YES)
def source_frame(self, parent):
"""Frame to contain the labels for displaying the source path & file"""
frm = Frame(parent, bd=1)
frm.pack(side=TOP, fill=X, expand=NO)
legend = Label(
frm, text="Source:", width=15, anchor=W, padx=5)
legend.pack(side=LEFT, fill=X, expand=NO)
info = Label(
frm, bd=5, bg='black', fg='white', anchor=W,
textvariable=self.src_str, width=85, padx=5)
info.pack(side=LEFT, fill=X, expand=YES)
def destination_frame(self, parent):
"""Frame to contain the labels for displaying the destination path
and filename."""
frm = Frame(parent, bd=1)
frm.pack(side=TOP, fill=X, expand=NO)
legend = Label(
frm, text="Destination:", anchor=W, width=15, padx=5)
legend.pack(side=LEFT, fill=X, expand=NO)
info = Label(
frm, bd=5, bg='black', fg='white', anchor=W,
textvariable=self.dst_str, width=85, padx=5)
info.pack(side=LEFT, fill=X, expand=YES)
def file_info_frame(self, parent):
"""Frame to display the image information."""
frm = Frame(parent, relief=RIDGE, bd=5)
frm.pack(side=TOP, fill=X, expand=NO)
title = Label(
frm, text="File Information", width=30,
justify=CENTER, pady=10, padx=5)
title.pack(side=TOP, fill=X, expand=NO)
fnum_frame = Frame(frm)
fnum_frame.pack(side=TOP, fill=X, expand=NO)
fn_legend = Label(
fnum_frame, text="File #:", width=10,
anchor=W, padx=5, pady=5)
fn_legend.pack(side=LEFT, fill=X, expand=NO)
filenum = Label(
fnum_frame, textvariable=self.fnum_str, bg='black', fg='white',
anchor=W, width=20)
filenum.pack(side=LEFT, fill=X, expand=NO)
fn_frame = Frame(frm)
fn_frame.pack(side=TOP, fill=X, expand=NO)
fn_legend = Label(
fn_frame, text="File name:", width=10,
anchor=W, padx=5, pady=5)
fn_legend.pack(side=LEFT, fill=X, expand=NO)
filename = Label(
fn_frame, textvariable=self.fn_str, bg='black', fg='white',
anchor=W, width=20)
filename.pack(side=LEFT, fill=X, expand=NO)
dt_frame = Frame(frm)
dt_frame.pack(side=TOP, fill=X, expand=NO)
dt_legend = Label(
dt_frame, text="Create date:", width=10,
anchor=W, padx=5, pady=5)
dt_legend.pack(side=LEFT, fill=X, expand=NO)
date = Label(
dt_frame, textvariable=self.date_str, bg='black', fg='white',
anchor=W, width=20)
date.pack(side=LEFT, fill=X, expand=NO)
tm_frame = Frame(frm)
tm_frame.pack(side=TOP, fill=X, expand=NO)
tm_legend = Label(
tm_frame, text="Create time:", width=10,
anchor=W, padx=5, pady=5)
tm_legend.pack(side=LEFT, fill=X, expand=NO)
date = Label(
tm_frame, textvariable=self.time_str, bg='black', fg='white',
anchor=W, width=20)
date.pack(side=LEFT, fill=X, expand=NO)
zm_frame = Frame(frm)
zm_frame.pack(side=TOP, fill=X, expand=NO)
zm_legend = Label(
zm_frame, text="Image zoom:", width=10,
anchor=W, padx=5, pady=5)
zm_legend.pack(side=LEFT, fill=X, expand=NO)
date = Label(
zm_frame, textvariable=self.zoom_str, bg='black', fg='white',
anchor=W, width=20)
date.pack(side=LEFT, fill=X, expand=NO)
def user_input_frame(self, parent):
"""Frame for user to input a part of the filename when copied.
Entry box for user input.
List for history of last 10 user inputs."""
frm = Frame(parent, relief=RIDGE, bd=5)
frm.pack(side=TOP, fill=X, expand=NO)
title = Label(
frm, text="User Description", width=30,
justify=CENTER, pady=5, padx=5)
title.pack(side=TOP, fill=X, expand=NO)
selected = Label(frm, textvariable=self.chosen, bg='black', fg='white')
selected.pack(side=TOP, fill=X, expand=NO)
ufrm = Frame(frm)
ufrm.pack(side=TOP, fill=X, expand=NO)
user_entry = Entry(
ufrm, textvariable=self.user_str, width=20, bd=5,
bg='black', fg='white')
user_entry.pack(side=LEFT, fill=X, expand=YES)
add_btn = Button(
ufrm, text="Add", padx=10, pady=5, anchor=E,
command=self.update_select)
add_btn.pack(side=LEFT, fill=X, expand=NO)
self.listbox = Listbox(frm, selectmode=SINGLE)
self.listbox.pack(side=TOP, fill=X, expand=NO)
self.listbox.bind('<Double-Button-1>', self.list_select)
for choice in self.config['DEFAULT']['descr'].split(','):
self.listbox.insert(END, choice)
def update_select(self, event=None):
if not len(self.user_str.get()):
return
self.listbox.insert(0, self.user_str.get())
self.chosen.set(self.user_str.get())
if self.listbox.size() > 10:
self.listbox.delete(END)
self.user_str.set("")
self.update_destination()
def list_select(self, event):
sel = self.listbox.curselection()[0]
temp = self.listbox.get(sel)
self.chosen.set(temp)
self.listbox.delete(sel)
self.listbox.insert(0, temp)
self.update_destination()
def image_options_frame(self, parent):
frm = Frame(parent, relief=RIDGE, bd=5)
frm.pack(side=TOP, fill=X, expand=NO)
title = Label(
frm, text="Destination Name Options", width=30,
justify=CENTER, pady=5, padx=5)
title.pack(side=TOP, fill=X, expand=NO)
date= Checkbutton(
frm, text="Created file date", padx=20,
anchor=W, variable=self.cb_date, command=self.options_cmd)
date.pack(side=TOP, fill=X, expand=NO)
time= Checkbutton(
frm, text="Created file time", padx=20,
anchor=W, variable=self.cb_time, command=self.options_cmd)
time.pack(side=TOP, fill=X, expand=NO)
user = Checkbutton(
frm, text="User description ", padx=20,
anchor=W, variable=self.cb_user, command=self.options_cmd)
user.pack(side=TOP, fill=X, expand=NO)
name = Checkbutton(
frm, text="Source file name ", padx=20,
anchor=W, variable=self.cb_name, command=self.options_cmd)
name.pack(side=TOP, fill=X, expand=NO)
def main():
"""Main function"""
root = Tk()
root.title("ImageCopy")
control = ImageCopyController(root)
root.mainloop()
if __name__ == "__main__":
main()
|
{"/ImageCopy.py": ["/ImageScale.py"]}
|
18,234
|
peverett/ImageCopy
|
refs/heads/master
|
/ImageScale.py
|
import sys
if sys.version_info[0] > 2:
import tkinter.font as tkFont
from tkinter import *
from tkinter import messagebox, filedialog
else:
import tkFont
from Tkinter import *
from PIL import Image, ImageTk
def invfrange(start, stop, step):
"""Inverted (reverse) range inclusive of the final stop value, designed
to work with floats."""
val = start
vrange = []
while val > stop:
vrange.append(val)
val = round(val - step, 2) # round to 2-decimal places.
vrange.append(stop)
return vrange
class ImageCanvas:
def __init__(self, root):
self.root = root
self.image_id = None
self.scale_range = []
self.hsb = Scrollbar(root, orient='horizontal')
self.vsb = Scrollbar(root, orient='vertical')
self.max_zoom_out = 4.0
self.canvas = Canvas(
root,
bg='black',
width=800,
height=600,
xscrollcommand=self.hsb.set,
yscrollcommand=self.vsb.set
)
self.canvas.pack(side=TOP, fill='both', expand='yes')
self.canvas.update()
self.hsb.configure(command=self.canvas.xview)
self.vsb.configure(command=self.canvas.yview)
self.canvas.bind('<Enter>', self.enter)
self.canvas.bind('<Leave>', self.leave)
self.canvas.bind('<Configure>', self.resize)
self.image = None
self.scale_idx = 0
self.scale_range = []
def load_image(self, image_path):
"""Load the image indicated"""
self.image = Image.open(image_path)
self.calc_scale_range(self.image.size)
self.scale_idx = len(self.scale_range)-1
self.show_image()
def move_from(self, event):
''' Remember previous coordinates for scrolling with the mouse '''
self.canvas.scan_mark(event.x, event.y)
def move_to(self, event):
''' Drag (move) canvas to the new position '''
self.canvas.scan_dragto(event.x, event.y, gain=1)
def enter(self, event):
self.canvas.bind('<ButtonPress-1>', self.move_from)
self.canvas.bind('<B1-Motion>', self.move_to)
self.canvas.bind_all('<MouseWheel>', self.zoom)
def leave(self, event):
self.canvas.unbind('<ButtonPress-1>')
self.canvas.unbind('<B1-Motion>')
self.canvas.unbind_all('<MouseWheel>')
def zoom_in(self):
"""Make the image bigger up to actual size"""
if self.scale_idx > 0:
self.scale_idx -= 1
self.show_image()
def zoom_out(self):
"""Make image smaller down to size of canvas"""
if self.scale_idx < (len(self.scale_range) - 1):
self.scale_idx = self.scale_idx + 1
self.show_image()
def zoom(self, event):
'''Resize and display the image'''
if event.delta == 120: # Mouse wheel up
self.zoom_in()
if event.delta == -120: # mouse wheel down
self.zoom_out()
#self.canvas.scale(
# 'all',
# self.canvas.canvasx(event.x),
# self.canvas.canvasy(event.y),
# self.scale_range[self.scale_idx],
# self.scale_range[self.scale_idx]
# )
def show_image(self):
"""Show image on the canvas"""
if self.image_id:
self.canvas.delete(self.image_id)
width, height = self.image.size
cw = self.canvas.winfo_width()
ch = self.canvas.winfo_height()
nw = int(width * self.scale_range[self.scale_idx])
nh = int(height * self.scale_range[self.scale_idx])
self.imagetk = ImageTk.PhotoImage(
self.image.resize( (nw, nh), Image.ANTIALIAS )
)
ow = (cw - nw) / 2 if nw < cw else 0
oh = (ch - nh) / 2 if nh < ch else 0
self.image_id = self.canvas.create_image(ow , oh, image=self.imagetk, anchor='nw')
self.canvas.configure(scrollregion=self.canvas.bbox('all'))
def calc_scale_range(self, size):
width, height = size
cw = self.canvas.winfo_width()
ch = self.canvas.winfo_height()
if height < ch and width < cw:
self.scale_range = [ 1.0 ]
else:
wratio = float(cw) / width
hratio = float(ch) / height
min_scale = round( min(wratio, hratio), 4)
self.scale_range = invfrange(1.0, min_scale, 0.2)
self.scale_idx = len(self.scale_range) - 1
def resize(self, event):
self.calc_scale_range(self.image.size)
self.scale_idx = len(self.scale_range)-1
self.show_image()
def get_zoom(self):
return int( self.scale_range[ self.scale_idx ] * 100.0 )
def main():
root = Tk()
root.title("Image Zoom")
im = ImageCanvas(root)
im.load_image("./TestImage.jpg")
root.mainloop()
if __name__ == "__main__":
main()
|
{"/ImageCopy.py": ["/ImageScale.py"]}
|
18,321
|
vishaljain3991/reports2sql
|
refs/heads/master
|
/done/refiner.py
|
#==========================
#PLEASE READ THE COMMENTS
#==========================
#Now we refine i.e. clean our database. In this file we open names.txt which contains the names of
#analysts as mentioned in the analysts_YYYY-MM-YY.txt file and alongside that only first and last names
#of the analysts. This file replaces all the instances of the names of full name of analyst with their
#first and last names.
import nltk
import os
import psycopg2
fo=open("names.txt", "rb+")
raw=fo.read()
conn = psycopg2.connect(database="finance", user="finance", password="iof2014", host="127.0.0.1", port="5432")
cur=conn.cursor()
#We split the text read from names.txt file using \n delimiter.
sents=raw.split('\n')
#Each sentence contains the full names of analyst alongwith their first and last names. We form a
#Dictionary where a full name points to first and last name.
index={} #forming a dictionary
for sent in sents:
if(sent!=''):
#We split every sentence into full name and "first name and last name" on the basis
#delimiter '#'
t=sent.split('#')
index[t[0]]=t[1]
#All the keys of dictionary we are basically refining our ratings1 table reducing names to just first and last names
print index['Christopher Wimmer, CFA']
#'CFA','CPA','Dr.' are the additional designations that comes with a person name, we separate them out and
#put it in a separate column called a1_add or a2_add depending on whether the person was 1st or 2nd anlayst
#on the report.
buzz=['CFA','CPA','Dr.']
for t in index.keys():
tokens=t.split(" ")
#For every full name we determine whether there is anything common between the set of words in token
#and the set of words in buzz. Generally, a name has only one designation if at all it has it. so
#if a name contains a designation the cardinality of intersected set comes out to be greater than or equal
#to one.
inter=list(set(tokens)&set(buzz)) #whether there is some intersection or not
if (len(inter)>0):
#Next, we add additional designation in a1_add or a2_add column for full names having additional desingnation
cur.execute("UPDATE RATINGS1 SET A1_ADD='"+inter[0]+"' WHERE A1_NAME='"+t+"';")
conn.commit()
cur.execute("UPDATE RATINGS1 SET A2_ADD='"+inter[0]+"' WHERE A2_NAME='"+t+"';")
conn.commit()
#Finally we update the a1_name or a2_name column with "full name and last name"
cur.execute("UPDATE RATINGS1 SET A1_NAME='"+index[t]+"' WHERE A1_NAME='"+t+"';")
conn.commit()
cur.execute("UPDATE RATINGS1 SET A2_NAME='"+index[t]+"' WHERE A2_NAME='"+t+"';")
conn.commit()
|
{"/executor.py": ["/extract_name.py"], "/a_data1.py": ["/extract_name.py"]}
|
18,322
|
vishaljain3991/reports2sql
|
refs/heads/master
|
/a_data.py
|
import os
import re, nltk, psycopg2
fo = open("/home/finance/reports2sql/r_fil_date.txt", "wb+")
root = '/home/finance/data'
#print os.walk(root, topdown=False)
for path, subdirs, files in os.walk(root, topdown=False):
for name in files:
w=os.path.join(path, name)
if((re.search(r'^.*dates$', w))):
print w
fo.write(w+" ")
#print path[21:]
#for name in subdirs:
#print(os.path.join(path, name))
fo.close()
|
{"/executor.py": ["/extract_name.py"], "/a_data1.py": ["/extract_name.py"]}
|
18,323
|
vishaljain3991/reports2sql
|
refs/heads/master
|
/comp_name.py
|
import os
import re, nltk, psycopg2
import dates, a_name
from a_name import analysts
from dates import converter
conn = psycopg2.connect(database="finance", user="finance", password="iof2014", host="127.0.0.1", port="5432")
fo = open("/home/finance/comp_name.txt", "wb+")
root = '/home/finance/data'
#print os.walk(root, topdown=False)
for path, subdirs, files in os.walk(root, topdown=False):
for name in files:
w=os.path.join(path, name)
if((re.search(r'^.*name$', w))):
print w
#fo.write(w+" ")
foo=open(w, "ab+")
t=w.split('/')
raw=foo.read()
fo.write(t[4]+"\t"+raw+"\n")
foo.close()
#print path[21:]
#for name in subdirs:
#print(os.path.join(path, name))
fo.close()
|
{"/executor.py": ["/extract_name.py"], "/a_data1.py": ["/extract_name.py"]}
|
18,324
|
vishaljain3991/reports2sql
|
refs/heads/master
|
/done/name_fetch.py
|
#==========================
#PLEASE READ THE COMMENTS
#==========================
#In this file we fetch the names of the analysts and create a names.txt. This file contains the full names of the
#analyst alongwith the first and last names of the analyst
import psycopg2
import re
conn = psycopg2.connect(database="finance", user="finance", password="iof2014", host="127.0.0.1", port="5432")
print "Opened database successfully"
cur = conn.cursor()
fo=open("names.txt", "wb+")
cur.execute("SELECT * FROM RATINGS1")
rows = cur.fetchall()
l=[]
#lis=['a']
for row in rows:
#print "Analyst 1 Name: ", row[2]
#print "Analyst 2 Name: ", row[6], "\n"
group=[row[2],row[6]]
l=l+group
a=set(l)
uni=list(a)
#print uni
#print len(uni)
for t in uni:
#Some of the names had unicode character \xc2\xa0, so we replaced the character with a blank and
#processed the names.
if(re.search(r'.*\xc2\xa0.*', t)):
t=t.replace("\xc2\xa0", " ") #replacing \xc2\xa0 with blank using str.replace method
#print t
#We split the names and basically included the first and the last token.
tokens=t.split(" ")
print [t,tokens[0]+' '+tokens[-1]]
fo.write(t+' /'+tokens[0]+' '+tokens[-1]+'\n')
|
{"/executor.py": ["/extract_name.py"], "/a_data1.py": ["/extract_name.py"]}
|
18,325
|
vishaljain3991/reports2sql
|
refs/heads/master
|
/extract_name.py
|
import os
os.chdir('/home/finance/reports2sql')
import re, nltk
import dates, a_name
import analysts_name
from analysts_name import analysts
from dates import converter
import psycopg2
def extractor(root):
u=0
fo = open(root, 'rb+')
raw=fo.read()
locations=nltk.word_tokenize(raw)
conn = psycopg2.connect(database="finance", user="finance", password="iof2014", host="127.0.0.1", port="5432")
tokens=root.split('/')
i=-1
#print locations
#print tokens
count=0
while(i+1<len(locations)):
i=i+1
string= '/home/finance/data/'+tokens[4]+'/analysts_'+locations[i]+'.txt'
try:
g=analysts(string) #here the analysts name alogwith their dept and posotion is returned
cur = conn.cursor()
print "INSERT INTO RATINGS1 VALUES ("+tokens[4]+",'"+g[0]+"','"+g[1]+"','"+g[2]+"','"+g[3]+"','"+g[4]+"','"+g[5]+"','"+g[6]+"','"+g[7]+"','"+locations[i]+"');"
cur.execute("INSERT INTO RATINGS1 VALUES ("+tokens[4]+",'"+g[0]+"','"+g[1]+"','"+g[2]+"','"+g[3]+"','"+g[4]+"','"+g[5]+"','"+g[6]+"','"+g[7]+"','"+locations[i]+"');")
conn.commit()
#print g
except IOError:
print locations[i]
print "file not there"
u=u+1
except TypeError:
print locations[i]
print "Type error"
u=u+1
except IndexError:
print "Index error"
u=u+1
print tokens[4]
conn.close()
return u
"""string='30 Nov 99'
print converter(string)"""
|
{"/executor.py": ["/extract_name.py"], "/a_data1.py": ["/extract_name.py"]}
|
18,326
|
vishaljain3991/reports2sql
|
refs/heads/master
|
/done/extract_name.py
|
#==========================
#PLEASE READ THE COMMENTS
#==========================
#This file serves as an auxilliary file for the executor.py file. In this file we have
#defined a single function extractor that takes the location of the dates file of a company
#id as input and extracts the relevant features from the analysts_YYYY-MM-YY.txt
#file of the company for all te dates mentioned in the dates file.
#We import analysts function from analysts_name.py file. This function helps us extract the
#relevant features from analysts_YYYY-MM-YY.txt file.
#We also import psycopg2 package becuase in this filwe we are interacting with the database
#f_not.txt is a file that contains the company id along with the date on which their respective
#analysts file was not found.
import os
os.chdir('/home/finance/reports2sql')
import re, nltk
import dates, a_name
import analysts_name
from analysts_name import analysts
from dates import converter
import psycopg2
def extractor(root):
u=0
bo=open('f_not.txt', 'ab+')
fo = open(root, 'rb+')
raw=fo.read()
#IN the next statement we tokenize the file that has been read to extract the dates on which
#the reports were published.
locations=nltk.word_tokenize(raw)
#Next, we connect with the database and create an object.
conn = psycopg2.connect(database="finance", user="finance", password="iof2014", host="127.0.0.1", port="5432")
#In the following operation, we split the root string to extract the company id. In this token[4]
#happens to be the company id
tokens=root.split('/')
i=-1
count=0
#NOw we execute the loop to go through every date on which the report was published and extract relevant
#information from the analysts_YYYY-MM-YY.txt file.
while(i+1<len(locations)):
i=i+1
#We create a string here so that the actual location of analysts_YYYY-MM-YY.txt can be given to
#the analysts function.
string= '/home/finance/data/'+tokens[4]+'/analysts_'+locations[i]+'.txt'
#Exception handling has been done to catch the exceptions when analysts_YYYY-MM-YY.txt is not found
#or there is some TypeError or IndexError.
try:
g=analysts(string) #here the analysts name alogwith their dept and posotion is returned
cur = conn.cursor()
print "INSERT INTO RATINGS2 VALUES ("+tokens[4]+",'"+g[0]+"','"+g[1]+"','"+g[2]+"','"+g[3]+"','"+g[4]+"','"+g[5]+"','"+g[6]+"','"+g[7]+"','"+locations[i]+"');"
#In the next statement, we execute our sql command and insert the relevant info into the
#the database ratings1.
cur.execute("INSERT INTO RATINGS2 VALUES ("+tokens[4]+",'"+g[0]+"','"+g[1]+"','"+g[2]+"','"+g[3]+"','"+g[4]+"','"+g[5]+"','"+g[6]+"','"+g[7]+"','"+locations[i]+"');")
#Next, we commit the transaction that we performed previously.
conn.commit()
#Below are the exceptions that can be handled.
except IOError:
print locations[i]
print "file not there"
u=u+1
bo.write(tokens[4]+'\t'+locations[i]+'\n')
except TypeError:
print locations[i]
print "Type error"
u=u+1
except IndexError:
print "Index error"
u=u+1
print tokens[4]
conn.close()
return u
|
{"/executor.py": ["/extract_name.py"], "/a_data1.py": ["/extract_name.py"]}
|
18,327
|
vishaljain3991/reports2sql
|
refs/heads/master
|
/executor.py
|
#==========================
#PLEASE READ THE COMMENTS
#==========================
#Upon execution of this python file you form a database named ratings1
#in which there are information stored about each report i.e. names of
#analysts, their positions, their departments etc.
#We import extractor function from extract_name.py file. The extractor
#function helps us in extracting important features from the report as
#mentioned in the first para.
import os
import re, nltk, psycopg2
import dates, a_name
import extract_name
from extract_name import extractor
d=0
#In the next statement we open r_fil_date.txt file. It contains the
#info about the location of the dates file of various company ids for
#e.g. /home/finance/data/600045616/dates happens to be the dates file
#of the company with company id 600045616
fo = open("/home/finance/reports2sql/r_fil_date.txt", "rb+")
raw=fo.read()
#We use nltk.work_tokenize to break our raw data into tokens where each
#token is a location of dates file of a company id.
locs=nltk.word_tokenize(raw)
#We loop here to go through every date file. THen from corresponding
#date file we extract the dates on which reports were published. From
#the reports we extract the relevant features and put it into our
#database ratings1
for t in locs:
d=d+extractor(t)
|
{"/executor.py": ["/extract_name.py"], "/a_data1.py": ["/extract_name.py"]}
|
18,328
|
vishaljain3991/reports2sql
|
refs/heads/master
|
/analysts_name.py
|
import os
import re, nltk
def analysts(string):
bo=open("/home/finance/reports2sql/types.txt","ab+")
#print 'yes'
foo=open(string)
raw=foo.read()
sents=raw.split('\n' );
words=nltk.word_tokenize(raw); #for splitting multiple lines
foo.close()
#foo=open(string1)
#print words
#raw=foo.read()
#print raw
#tokens=nltk.sent_tokenize(raw)
#sents1=raw.split('\n' );
sents1=['Boston', 'Buenos Aires', 'Chicago', 'Dallas', 'Mexico City', 'New York', 'Sao Paulo', 'San Francisco', 'Toronto', 'Dubai', 'Frankfurt', 'Johannesburg', 'Limassol', 'London', 'Madrid', 'Milan', 'Moscow', 'Paris', 'Warsaw', 'Beijing', 'Hong Kong', 'Seoul', 'Shanghai', 'Singapore', 'Sydney', 'Tokyo', 'India', 'Giza', 'Tel Aviv', 'Montreal', 'Toronto', 'South San Francisco', 'West Chester', 'Edinburgh', 'Grenoble', 'Port Louis', 'Saint Cloud', 'Melbourne', 'Shenzhen', 'New', 'Hong','Jersey City', 'DIFC', 'DIFC - Dubai','Frankfurt am Main']
i=0
for t in sents:
if(t in sents1):
i=i+1
g=0
for t in sents:
if(re.search('.*JOURNALISTS.*', t)):
g=g+1
if (i==2):
#bo.write('------------------------------------\n'+raw+'\n'+str(i)+' '+str(j)+'\n------------------------------------\n')
count=0
k=0
while(k<len(sents)):
if(sents[k] in sents1):
#print sents[i]
#print i
count=count+1
if(count%2==1):
a=[sents[k], sents[k+1], sents[k+2],sents[k+3]]
else:
b=[sents[k], sents[k+1], sents[k+2],sents[k+3]]
t=a+b
j=0
while(j<len(t)):
if("'" in t[j]):
t[j]=t[j][:t[j].index("'")]+t[j][t[j].index("'")+1:]
print t[j]
j=j+1
foo.close()
return t
k=k+1
elif(i==0 or g==3):
k=0
pi=0 #place index if 1 that means a place found
while(k<len(words)):
if (words[k] in sents1):
pi=pi+1
if(words[k]=='New'):
words[k]='New York'
if(words[k]=='Hong'):
words[k]='Hong Kong'
place=words[k]
k=k+1
l=0
count=0
if (pi==0): #defalut location is New York
place='New York'
a=[place,sents[0],sents[1],sents[2]]
while(l<len(sents)):
if(sents[l]=='' and count<1):
count=count+1
if (pi==0):
b=['New York', sents[l+1], sents[l+2],sents[l+3]]
else:
b=[place, sents[l+1], sents[l+2],sents[l+3]]
t=a+b
j=0
while(j<len(t)):
if("'" in t[j]):
t[j]=t[j][:t[j].index("'")]+t[j][t[j].index("'")+1:]
print t[j]
j=j+1
foo.close()
return t
l=l+1
elif(i==1 and (sents[0] in sents1)):
a=[sents[0],sents[1],sents[2],sents[3]]
k=0
while(k<len(sents)):
if(sents[k] in sents1):
#print sents[i]
#print i
b=[sents[k], sents[k+1], sents[k+2],sents[k+3]]
t=a+b
j=0
while(j<len(t)):
if("'" in t[j]):
t[j]=t[j][:t[j].index("'")]+t[j][t[j].index("'")+1:]
print t[j]
j=j+1
foo.close()
return t
k=k+1
elif(i==1 and (sents[0] not in sents1)):
k=0
print 'yes'
while(k<len(sents)):
if(sents[k] in sents1):
#print sents[i]
#print i
b=[sents[k], sents[k+1], sents[k+2],sents[k+3]]
a=[sents[k],sents[0],sents[1],sents[2]]
t=a+b
j=0
while(j<len(t)):
if("'" in t[j]):
t[j]=t[j][:t[j].index("'")]+t[j][t[j].index("'")+1:]
print t[j]
j=j+1
foo.close()
return t
k=k+1
print t
foo.close()
bo.close()
|
{"/executor.py": ["/extract_name.py"], "/a_data1.py": ["/extract_name.py"]}
|
18,329
|
vishaljain3991/reports2sql
|
refs/heads/master
|
/a_data1.py
|
import os
import re, nltk, psycopg2
import dates, a_name
import extract_name
from extract_name import extractor
from a_name import analysts
from dates import converter
d=0
fo = open("/home/finance/reports2sql/r_fil_date.txt", "rb+")
raw=fo.read()
locs=nltk.word_tokenize(raw)
#print locs
"""string='a'
for t in locs:
tokens=t.split('/')
string=string+' '+tokens[4]"""
for t in locs:
d=d+extractor(t)
#print d
#conn.close()
"""tok=nltk.word_tokenize(string)
tok=tok[1:]
#print tok[0]
print tok[tok.index('413000')+1]"""
|
{"/executor.py": ["/extract_name.py"], "/a_data1.py": ["/extract_name.py"]}
|
18,330
|
vishaljain3991/reports2sql
|
refs/heads/master
|
/f_error.py
|
import os
import re, nltk
import dates, a_name
import extract_name
from extract_name import extractor
from a_name import analysts
from dates import converter
fo = open("/home/finance/r_fil_loc.txt", "rb+")
raw=fo.read()
locs=nltk.word_tokenize(raw)
#print locs
string='a'
for t in locs:
tokens=t.split('/')
string=string+' '+tokens[4]
tok=nltk.word_tokenize(string)
tok=tok[1:]
#print tok[0]
print tok.index('761840')
print tok[tok.index('372050')+1]
|
{"/executor.py": ["/extract_name.py"], "/a_data1.py": ["/extract_name.py"]}
|
18,331
|
vishaljain3991/reports2sql
|
refs/heads/master
|
/done/analysts_name.py
|
#==========================
#PLEASE READ THE COMMENTS
#==========================
#This is one of the most important file of the lot. In the file analysts function
#is defined. With this function we extract analyst names, their designation,
#departments and positions.
import os
import re, nltk
def analysts(string):
bo=open("/home/finance/reports2sql/types.txt","ab+")
foo=open(string)
raw=foo.read()
#Next, we split analysts_YYYY-MM-YY.txt into multiple lines by using \n delimiter
sents=raw.split('\n' );
#We also tokenize the file into words
words=nltk.word_tokenize(raw);
foo.close()
#sents1 contains all the places in which Moody's have offices.
sents1=['Boston', 'Buenos Aires', 'Chicago', 'Dallas', 'Mexico City', 'New York', 'Sao Paulo', 'San Francisco', 'Toronto', 'Dubai', 'Frankfurt', 'Johannesburg', 'Limassol', 'London', 'Madrid', 'Milan', 'Moscow', 'Paris', 'Warsaw', 'Beijing', 'Hong Kong', 'Seoul', 'Shanghai', 'Singapore', 'Sydney', 'Tokyo', 'India', 'Giza', 'Tel Aviv', 'Montreal', 'Toronto', 'South San Francisco', 'West Chester', 'Edinburgh', 'Grenoble', 'Port Louis', 'Saint Cloud', 'Melbourne', 'Shenzhen', 'New', 'Hong','Jersey City', 'DIFC', 'DIFC - Dubai','Frankfurt am Main']
i=0
#Firstly we count the no. of times a place has been a token in sents
for t in sents:
if(t in sents1):
i=i+1
#Secondly we count the number of tokens which have JOURNALISTS mentioned.
g=0
for t in sents:
if(re.search('.*JOURNALISTS.*', t)):
g=g+1
#Now comes the most important part of the code. Here we decide which methodology
#to choose for extraxting the features depending on the value of i and g
#if i=2 and j is free to have any value
#so analysts_YYYY-MM-YY.txt file looks somewhat like this as in the example shown
#below
#--------------------------------
# New York
# Michael Levesque
# Senior Vice President
# Corporate Finance Group
# Moody's Investors Service
# JOURNALISTS: 212-553-0376
# SUBSCRIBERS: 212-553-1653
# New York
# Lenny J. Ajzenman
# Senior Vice President
# Corporate Finance Group
# Moody's Investors Service
# JOURNALISTS: 212-553-0376
# SUBSCRIBERS: 212-553-1653
# Moody's Investors Service
# 250 Greenwich Street
# New York, NY 10007
# U.S.A.
# JOURNALISTS: 212-553-0376
# SUBSCRIBERS: 212-553-1653
#--------------------------------
if (i==2):
count=0
k=0
while(k<len(sents)):
#Next we search whether any of the tokens is in sents1. If we find such
#token, we immediately know that the succeding sentences are name of the
#analyst, his designation and department.
if(sents[k] in sents1):
count=count+1
if(count%2==1):
#a contains information for the first analyst.
a=[sents[k], sents[k+1], sents[k+2],sents[k+3]]
else:
#b contains information for the second analyst.
b=[sents[k], sents[k+1], sents[k+2],sents[k+3]]
#We concatenate a and b to form t that contains
#info about both the analyst.
t=a+b
#In the next while loop, actually find those
#entries in t that have apostrophe and remove it.
#This is done to ensure that the entries with
#apostrophe are actually entered in the database.
#If we try to enter the data, POSTGRES throws an
#error. I could not find an alternate way to avoid
#error.
j=0
while(j<len(t)):
if("'" in t[j]):
t[j]=t[j][:t[j].index("'")]+t[j][t[j].index("'")+1:]
print t[j]
j=j+1
foo.close()
return t
k=k+1
#When i=0 (Ignore g=3).
#For e.g. analysts_YYYY-MM-YY.txt file looks somewhat like these
#------------------------------------------------
# Michael Levesque, CFA
# Senior Vice President
# Corporate Finance Group
# Moody's Investors Service, Inc.
# 250 Greenwich Street
# New York, NY 10007
# U.S.A.
# JOURNALISTS: 212-553-0376
# SUBSCRIBERS: 212-553-1653
# Peter H. Abdill, CFA
# MD - Corporate Finance
# Corporate Finance Group
# JOURNALISTS: 212-553-0376
# SUBSCRIBERS: 212-553-1653
# Releasing Office:
# Moody's Investors Service, Inc.
# 250 Greenwich Street
# New York, NY 10007
# U.S.A.
# JOURNALISTS: 212-553-0376
# SUBSCRIBERS: 212-553-1653
#--------------------------------------------------
# Michael J. Mulvaney
# Managing Director
# Corporate Finance Group
#
# Charles X. Tan
# Vice President - Senior Analyst
# Corporate Finance Group
#--------------------------------------------------
elif(i==0 or g==3):
k=0
pi=0 #place index if 1 that means a place found
while(k<len(words)):
#We determine whether any of the words that we obtained by tokenization
#is a location.
if (words[k] in sents1):
pi=pi+1
#If a word happens to be 'New', then the place is most probably New York
if(words[k]=='New'):
words[k]='New York'
#If a word happens to be 'Hong', then the place is most probably Hong Kong
if(words[k]=='Hong'):
words[k]='Hong Kong'
#place variable stores the location that was found
place=words[k]
k=k+1
l=0
count=0
#if we still find no word that happens to be one og the locations then the default location
#of the analyst is New York (this is our assumption)
if (pi==0):
place='New York'
#a stores the relevant features of first analyst
a=[place,sents[0],sents[1],sents[2]]
while(l<len(sents)):
if(sents[l]=='' and count<1):
count=count+1
if (pi==0):
b=['New York', sents[l+1], sents[l+2],sents[l+3]]
else:
b=[place, sents[l+1], sents[l+2],sents[l+3]]
t=a+b
j=0
while(j<len(t)):
if("'" in t[j]):
t[j]=t[j][:t[j].index("'")]+t[j][t[j].index("'")+1:]
print t[j]
j=j+1
foo.close()
return t
l=l+1
#When i=1 and initial sentence is a location in sents1
#For e.g. analysts_YYYY-MM-YY.txt file looks somewhat like this
#---------------------------------
# New York
# Pamela Stumpp
# Managing Director
# Corporate Finance Group
# Moody's Investors Service
# JOURNALISTS: 212-553-0376
# SUBSCRIBERS: 212-553-1653
#
# Thomas S. Coleman
# Senior Vice President
# Corporate Finance Group
#----------------------------------
elif(i==1 and (sents[0] in sents1)):
a=[sents[0],sents[1],sents[2],sents[3]]
k=0
while(k<len(sents)):
if(sents[k] in sents1):
b=[sents[k], sents[k+1], sents[k+2],sents[k+3]]
t=a+b
j=0
while(j<len(t)):
if("'" in t[j]):
t[j]=t[j][:t[j].index("'")]+t[j][t[j].index("'")+1:]
print t[j]
j=j+1
foo.close()
return t
k=k+1
#When i=1 and initial sentence is not a location in sents1
#For e.g. analysts_YYYY-MM-YY.txt file looks somewhat like this
#-----------------------------------
# Mark Gray
# Managing Director
# Corporate Finance Group
#
# New York
# David Neuhaus
# VP - Senior Credit Officer
# Corporate Finance Group
# Moody's Investors Service
# JOURNALISTS: 212-553-0376
# SUBSCRIBERS: 212-553-1653
#-----------------------------------
elif(i==1 and (sents[0] not in sents1)):
k=0
print 'yes'
while(k<len(sents)):
if(sents[k] in sents1):
b=[sents[k], sents[k+1], sents[k+2],sents[k+3]]
a=[sents[k],sents[0],sents[1],sents[2]]
t=a+b
j=0
while(j<len(t)):
if("'" in t[j]):
t[j]=t[j][:t[j].index("'")]+t[j][t[j].index("'")+1:]
print t[j]
j=j+1
foo.close()
return t
k=k+1
print t
foo.close()
bo.close()
|
{"/executor.py": ["/extract_name.py"], "/a_data1.py": ["/extract_name.py"]}
|
18,350
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/api/views.py
|
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from . import serializers
from rest_framework import mixins
from django.core.serializers import serialize
from .models import Events, Clients, ClientPhones, EmployeesPhones
from django.forms.models import model_to_dict
# добавление ивента (звонков)
class EventsViews(mixins.UpdateModelMixin, generics.ListCreateAPIView):
queryset = Events.objects.all()
serializer_class = serializers.EventsSerializer
def put(self, request, *args, **kwargs):
if request.data.get("id_asterisk") and Events.objects.filter(id_asterisk=request.data['id_asterisk']).exists():
instance = Events.objects.get(id_asterisk=request.data['id_asterisk'])
serializer = self.get_serializer(instance, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
instance._prefetched_objects_cache = {}
return Response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
# все карточки клиентов и их добавление
class ClientCardCreateGet(generics.ListCreateAPIView):
queryset = Clients.objects.all()
serializer_class = serializers.ClientsSerializer
# изменить или удалить определенную карточку клиента
class ClientCardEditingDelete(generics.RetrieveUpdateDestroyAPIView):
queryset = Clients.objects.all()
serializer_class = serializers.ClientsTestSerializer
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,351
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/api/migrations/0005_events_sc.py
|
# Generated by Django 3.2.4 on 2021-07-13 10:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20210712_1058'),
]
operations = [
migrations.AddField(
model_name='events',
name='sc',
field=models.ManyToManyField(to='api.ClientPhones'),
),
]
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,352
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/api/migrations/0006_remove_events_sc.py
|
# Generated by Django 3.2.4 on 2021-07-13 10:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0005_events_sc'),
]
operations = [
migrations.RemoveField(
model_name='events',
name='sc',
),
]
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,353
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/api/migrations/0003_auto_20210712_1005.py
|
# Generated by Django 3.2.4 on 2021-07-12 07:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0002_alter_events_id_employee'),
]
operations = [
migrations.AlterField(
model_name='events',
name='call_recording',
field=models.CharField(default='puk', max_length=500),
),
migrations.AlterField(
model_name='events',
name='id_employee',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.employee'),
),
]
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,354
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/appCallCentr/forms.py
|
from django import forms
from django.utils.translation import ugettext as _
class AddContactForm(forms.Form):
first_name = forms.CharField(label=_(u'first_name'))
last_name = forms.CharField(label=_(u'last_name'))
email = forms.EmailField(label=_(u'email'))
number_0 = forms.CharField(label=_(u'number'))
description = forms.CharField(label=_(u'description'))
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,355
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/api/migrations/0007_auto_20210713_1731.py
|
# Generated by Django 3.2.4 on 2021-07-13 14:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0006_remove_events_sc'),
]
operations = [
migrations.AlterField(
model_name='clientphones',
name='id_clients',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phones', to='api.clients'),
),
migrations.AlterField(
model_name='employeesphones',
name='id_employee',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phones', to='api.employee'),
),
migrations.AlterField(
model_name='group',
name='id_employees_phones',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='group_name', to='api.employeesphones'),
),
]
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,356
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/api/models.py
|
from django.db import models
class GroupName(models.Model):
name = models.CharField(max_length=50)
class Position(models.Model):
name = models.CharField(max_length=100)
class Employee(models.Model):
first_name = models.CharField(max_length=100, default='empty')
last_name = models.CharField(max_length=100, default='empty')
description = models.TextField(default='empty')
id_position = models.ForeignKey(Position, on_delete=models.CASCADE)
class EmployeesPhones(models.Model):
sip_phone = models.IntegerField()
id_employee = models.ForeignKey(Employee, on_delete=models.CASCADE, related_name="phones")
class Group(models.Model):
id_group_name = models.ForeignKey(GroupName, on_delete=models.CASCADE)
id_employees_phones = models.ForeignKey(EmployeesPhones, on_delete=models.CASCADE, related_name="group_name")
class Clients(models.Model):
first_name = models.CharField(max_length=100, default='empty')
last_name = models.CharField(max_length=100, default='empty')
description = models.TextField(default='empty')
class ClientPhones(models.Model):
phone_number = models.CharField(max_length=13)
id_clients = models.ForeignKey(Clients, on_delete=models.CASCADE, related_name="phones")
class Status(models.Model):
name = models.CharField(max_length=100)
class Events(models.Model):
id_asterisk = models.FloatField()
id_client = models.ForeignKey(Clients, on_delete=models.CASCADE)
id_employee = models.ForeignKey(Employee, on_delete=models.CASCADE)
id_status = models.ForeignKey(Status, on_delete=models.CASCADE)
call_recording = models.CharField(max_length=500, default='empty')
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,357
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/appCallCentr/apps.py
|
from django.apps import AppConfig
class AppcallcentrConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'appCallCentr'
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,358
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/appCallCentr/admin.py
|
from django.contrib import admin
# class StatesAdmin(admin.ModelAdmin):
# list_display = ("id", "state")
# search_fields = ("id", "state")
# list_filter = ("id", "state")
#
#
# admin.site.register(States, StatesAdmin)
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,359
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/api/serializers.py
|
from rest_framework import serializers
from .models import Events, ClientPhones, Clients, EmployeesPhones, Employee, Status, Position, GroupName, Group
from rest_framework.response import Response
# для сбора номеров из базы у определенной карточки
def get_phones(instance):
try:
phones = [i["phone_number"] for i in instance.phones.all().values()]
except:
phones = None
return phones
class ClientsTestSerializer(serializers.ModelSerializer):
class Meta:
model = Clients
fields = ("id", "first_name", "last_name", "description")
def to_representation(self, instance):
inst = super().to_representation(instance)
phones = get_phones(instance)
if phones:
inst["phones"] = phones
return inst
def to_internal_value(self, data):
ret = super().to_internal_value(data)
phones = data.get("phones", None)
if phones:
ret["phones"] = set(phones)
return ret
def update(self, instance, validated_data):
phones_model = set(i.phone_number for i in ClientPhones.objects.filter(id_clients=instance))
phones = validated_data.pop("phones", None)
if phones:
for phone in phones_model.symmetric_difference(phones):
if not ClientPhones.objects.filter(phone_number=phone, id_clients=instance).exists():
if not ClientPhones.objects.filter(phone_number=phone).exists():
ClientPhones.objects.create(phone_number=phone, id_clients=instance)
else:
ClientPhones.objects.get(phone_number=phone).delete()
inst = super().update(instance, validated_data)
return inst
class ClientsSerializer(serializers.ModelSerializer):
class Meta:
model = Clients
fields = ("id", "first_name", "last_name", "description")
def to_representation(self, instance):
inst = super().to_representation(instance)
phones = get_phones(instance)
if phones:
inst["phones"] = phones
else:
inst["phones"] = []
return inst
def to_internal_value(self, data):
ret = super().to_internal_value(data)
ret["phones"] = []
phones = data.get("phones", None)
if phones:
for phone in phones:
if not ClientPhones.objects.filter(phone_number=phone).exists():
ret["phones"].append(phone)
return ret
def create(self, validated_data):
phones = validated_data.pop("phones", None)
client_card = Clients.objects.create(**validated_data)
if phones:
for phone in set(phones):
ClientPhones.objects.create(id_clients=client_card, phone_number=phone)
return validated_data
class EmployeeSerializer(serializers.ModelSerializer):
class Meta:
model = Employee
fields = ("first_name", "last_name", "description", "id_position")
def to_representation(self, instance):
inst = super().to_representation(instance)
dict_phones = {}
for em_phone in instance.phones.all():
dict_phones[em_phone.sip_phone] = [i.id_group_name.name for i in em_phone.group_name.all()]
inst["phones"] = dict_phones
inst["id_position"] = instance.id_position.name
return inst
class EventsSerializer(serializers.ModelSerializer):
id_client = ClientsSerializer(required=False)
id_employee = EmployeeSerializer(required=False)
class Meta:
model = Events
fields = ("id", "id_asterisk", "id_client", "id_employee", "call_recording", "id_status")
def to_representation(self, instance):
ret = super().to_representation(instance)
ret["id_status"] = Status.objects.get(id=ret["id_status"]).name
return ret
def to_internal_value(self, data):
number_client = data.pop("number_client", None)
number_employee = data.pop("number_employee", None)
ret = super().to_internal_value(data)
if number_client:
if ClientPhones.objects.filter(phone_number=number_client).exists():
client_phones = ClientPhones.objects.get(phone_number=number_client)
else:
id_clients = Clients.objects.create()
client_phones = ClientPhones.objects.create(phone_number=number_client, id_clients=id_clients)
ret['id_client'] = client_phones.id_clients
if number_employee:
if EmployeesPhones.objects.filter(sip_phone=number_employee).exists():
employee_phone = EmployeesPhones.objects.get(sip_phone=number_employee)
else:
position = Position.objects.get_or_create(name='None')
employee = Employee.objects.create(id_position=position[0])
employee_phone = EmployeesPhones.objects.create(sip_phone=number_employee, id_employee=employee)
group_name = GroupName.objects.get_or_create(name='ALL')
Group.objects.create(id_group_name=group_name[0], id_employees_phones=employee_phone)
ret['id_employee'] = employee_phone.id_employee
return ret
def create(self, validated_data):
Events.objects.create(**validated_data)
return validated_data
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,360
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/api/migrations/0001_initial.py
|
# Generated by Django 3.2.4 on 2021-07-09 09:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Clients',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='EmployeesPhones',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sip_phone', models.IntegerField()),
('id_employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.employee')),
],
),
migrations.CreateModel(
name='GroupName',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Position',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('id_employees_phones', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.employeesphones')),
('id_group_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.groupname')),
],
),
migrations.CreateModel(
name='Events',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('id_asterisk', models.FloatField()),
('call_recording', models.CharField(max_length=500)),
('id_client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.clients')),
('id_employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.employee')),
('id_status', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.status')),
],
),
migrations.AddField(
model_name='employee',
name='id_position',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.position'),
),
migrations.CreateModel(
name='ClientPhones',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_number', models.CharField(max_length=13)),
('id_clients', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.clients')),
],
),
]
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,361
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/api/urls.py
|
from rest_framework import routers
# from .api import EventsViewSet, ClientNumberViewSet
from . import views
from django.urls import path
# router = routers.DefaultRouter()
# router.register('event', EventsViewSet, 'event')
# router.register('ClientNumber', ArticleView, 'ClientNumber')
urlpatterns = [
path('events', views.EventsViews.as_view()),
path('clients', views.ClientCardCreateGet.as_view()),
path('client/<int:pk>', views.ClientCardEditingDelete.as_view()),
]
#urlpatterns = router.urls
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,362
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/api/migrations/0004_auto_20210712_1058.py
|
# Generated by Django 3.2.4 on 2021-07-12 07:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20210712_1005'),
]
operations = [
migrations.AlterField(
model_name='clients',
name='description',
field=models.TextField(default='empty'),
),
migrations.AlterField(
model_name='clients',
name='first_name',
field=models.CharField(default='empty', max_length=100),
),
migrations.AlterField(
model_name='clients',
name='last_name',
field=models.CharField(default='empty', max_length=100),
),
migrations.AlterField(
model_name='employee',
name='description',
field=models.TextField(default='empty'),
),
migrations.AlterField(
model_name='employee',
name='first_name',
field=models.CharField(default='empty', max_length=100),
),
migrations.AlterField(
model_name='employee',
name='last_name',
field=models.CharField(default='empty', max_length=100),
),
migrations.AlterField(
model_name='events',
name='call_recording',
field=models.CharField(default='empty', max_length=500),
),
]
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,363
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/appCallCentr/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('contact_book', views.contact_book, name='contact_book'),
path('contacts_book', views.contacts_book, name='contacts_book'),
path('editing_contact/<pk>', views.editing_contact, name='load_layout'),
path('del_num/<pk>', views.del_number, name='del_num'),
]
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,364
|
HungrySpace/CallCenter
|
refs/heads/master
|
/call/appCallCentr/views.py
|
from django.shortcuts import render
from .forms import AddContactForm
# from .models import Client, ClientNumber
# from api.models import Events, States
from django.http import HttpResponseRedirect
def index(request):
# data = Events.objects.all().values()
#
# for levent in data:
# levent["state"] = States.objects.get(id=levent["event_id_id"]).state
# if ClientNumber.objects.filter(phone_number=int(levent["number"])).exists():
# levent["card"] = ClientNumber.objects.get(phone_number=int(levent["number"])).client_id
# else:
# levent["card"] = levent["number"]
return render(request, 'appCallCentr/index.html')
def editing_contact(request, pk):
return render(request, 'appCallCentr/editing.html')
# if request.method == 'POST':
# form = AddContactForm(request.POST or None)
# if form.is_valid():
# card = Client.objects.get(id=pk)
# card.first_name = form.cleaned_data['first_name']
# card.last_name = form.cleaned_data['last_name']
# card.email = form.cleaned_data['email']
# card.description = form.cleaned_data['description']
# card.save()
#
# print(ClientNumber.objects.filter(client_id=pk))
#
# for i in request.POST.items():
# if str(i).find("number") > 0 and not ClientNumber.objects.filter(phone_number=i[1]):
# num = ClientNumber(client_id=card, phone_number=i[1])
# num.save()
# return HttpResponseRedirect('/contact_book')
# else:
# card_client = Client.objects.filter(id=pk).values()[0]
# numbers = ClientNumber.objects.filter(client_id=pk).values()
# return render(request, 'appCallCentr/editing.html', {'card_client': card_client, "numbers": enumerate(numbers),
# 'count_num': len(numbers)})
def del_number(request, pk):
pass
# numbers = ClientNumber.objects.get(phone_number=pk)
# id_client = str(numbers.client_id.id)
# numbers.delete()
# return HttpResponseRedirect('/editing_contact/' + id_client)
def contact_book(request):
pass
# form = AddContactForm(request.POST or None)
# print(request.method, form.is_valid())
# if request.method == 'POST' and form.is_valid():
# if not ClientNumber.objects.filter(phone_number=form.cleaned_data['number_0']):
# card = Client(first_name=form.cleaned_data['first_name'],
# last_name=form.cleaned_data['last_name'],
# email=form.cleaned_data['email'],
# description=form.cleaned_data['description'])
# card.save()
# num = ClientNumber(client_id=card,
# phone_number=form.cleaned_data['number_0'])
# num.save()
# else:
# print('LOOOOOOOOOOOOOOOOOOOOOOX')
# dict_contact = {}
# for obj_field in Client.objects.all():
# dict_contact[obj_field] = list(ClientNumber.objects.filter(client_id=obj_field.id))
# return render(request, 'appCallCentr/contact_book.html', {"dict_contact": dict_contact})
def contacts_book(request):
return render(request, 'appCallCentr/contact_book.html')
|
{"/call/api/views.py": ["/call/api/models.py"], "/call/api/serializers.py": ["/call/api/models.py"], "/call/appCallCentr/views.py": ["/call/appCallCentr/forms.py"]}
|
18,401
|
felipefrm/lexical-analyzer
|
refs/heads/master
|
/token_csmall.py
|
tokens = {
"main": "MAIN",
"int": "INT",
"float": "FLOAT",
"if": "IF",
"else": "ELSE",
"while": "WHILE",
"for": "FOR",
"read": "READ",
"print": "PRINT",
"(": "LBRACKET",
")": "RBRACKET",
"{": "LBRACE",
"}": "RBRACE",
",": "COMMA",
";": "PCOMMA",
"=": "ATTR",
"==": "EQ",
"!=": "NE",
"||": "OR",
"&&": "AND",
"<": "LT",
"<=": "LE",
">": "GT",
">=": "GE",
"+": "PLUS",
"-": "MINUS",
"*": "MULT",
"/": "DIV",
"[": "LCOL",
"]": "RCOL"
}
|
{"/analyzer.py": ["/token_csmall.py"]}
|
18,402
|
felipefrm/lexical-analyzer
|
refs/heads/master
|
/analyzer.py
|
from token_csmall import tokens
from tabulate import tabulate
import argparse
import re
# Argumentos da linha de comandos: -i [arquivo de entrada .c] -o [arquivo de saida contendo os tokens]
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="insert the path and name of a .c file (DEFAULT: 'input/teste_attr1.c')", default="input/teste_attr1.c")
parser.add_argument("-o", "--output", help="insert the path and name of output file (DEFAULT: 'output/output.txt')", default="output/output.txt")
args = parser.parse_args()
f_in = open(args.input, "r") # Le arquivo de entrada
token_list = [] # Lista de tokens identificados
buffer = [] # Buffer que forma o lexema
state = 0 # Estado inicial do automato
line_count = 1
for line in f_in: # Percorre todas as linhas do arquivo de entrada
line = line.rstrip('\n')
char_count = 0
while (char_count < len(line)): # Percorre todos os caracteres da linha
char = line[char_count]
if state == 0:
if char.isalpha():
state = 1
buffer.append(char)
elif char.isnumeric():
state = 2
buffer.append(char)
elif char == '<':
state = 5
buffer.append(char)
elif char == '>':
state = 6
buffer.append(char)
elif char == '=':
state = 7
buffer.append(char)
elif char == '!':
state = 8
buffer.append(char)
elif char == '|':
state = 9
buffer.append(char)
elif char == '&':
state = 10
buffer.append(char)
elif char == ' ':
pass
else:
buffer.append(char)
lexeme = ''.join(buffer)
token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
elif state == 1:
if re.match('^[a-zA-Z0-9_]*$', char):
buffer.append(char)
else:
char_count -= 1
state = 0
lexeme = ''.join(buffer)
token_list.append([tokens[lexeme] if lexeme in tokens else "ID", lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
elif state == 2:
if char.isnumeric():
buffer.append(char)
elif char == '.':
state = 3
buffer.append(char)
else:
char_count -= 1
state = 0
lexeme = ''.join(buffer)
token_list.append(['INTEGER_CONST', lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
elif state == 3:
if char.isnumeric():
state = 4
buffer.append(char)
else:
print(f'Falha no estado {state}: produção não aceita pelo analisador léxico da linguagem Csmall.')
elif state == 4:
if char.isnumeric():
buffer.append(char)
else:
char_count -= 1
state = 0
lexeme = ''.join(buffer)
token_list.append(['FLOAT_CONST', lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
elif state == 5:
if (char == '='):
state = 0
buffer.append(char)
lexeme = ''.join(buffer)
token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
else:
char_count -= 1
state = 0
lexeme = ''.join(buffer)
token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
elif state == 6:
if (char == '='):
state = 0
buffer.append(char)
lexeme = ''.join(buffer)
token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
else:
char_count -= 1
state = 0
lexeme = ''.join(buffer)
token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
elif state == 7:
if (char == '='):
state = 0
buffer.append(char)
lexeme = ''.join(buffer)
token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
else:
char_count -= 1
state = 0
lexeme = ''.join(buffer)
token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
elif state == 8:
if (char == '='):
state = 0
buffer.append(char)
lexeme = ''.join(buffer)
token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
else:
print(f'Falha no estado {state}: produção não aceita pelo analisador léxico da linguagem Csmall.')
elif state == 9:
if (char == '|'):
state = 0
buffer.append(char)
lexeme = ''.join(buffer)
token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
else:
print(f'Falha no estado {state}: produção não aceita pelo analisador léxico da linguagem Csmall.')
elif state == 10:
if (char == '&'):
state = 0
buffer.append(char)
lexeme = ''.join(buffer)
token_list.append([tokens[lexeme], lexeme, line_count]) # Adiciona o token à lista de tokens identificados
buffer = [] # Limpa o buffer
else:
print(f'Falha no estado {state}: produção não aceita pelo analisador léxico da linguagem Csmall.')
char_count += 1
line_count += 1
# Ultimo token refere ao fim do arquivo -> EOF
token_list.append(['EOF', '', line_count])
# Escreve no arquivo de saida os tokens identificados na produção
f_out = open(args.output, "w")
f_out.write(tabulate(token_list, headers=['Token', 'Lexema', 'Linha']))
f_out.close()
f_in.close()
|
{"/analyzer.py": ["/token_csmall.py"]}
|
18,403
|
alex-px/most-common
|
refs/heads/master
|
/helpers.py
|
from nltk import pos_tag
def flat(_list):
""" [(1,2), (3,4)] -> [1, 2, 3, 4]"""
return sum([list(item) for item in _list], [])
def is_verb(word):
if not word:
return False
pos_info = pos_tag([word])
return pos_info[0][1] == 'VB'
def split_snake_case_to_words(name):
return [word for word in name.split('_') if word]
def is_magic_name(name):
return name.startswith('__') and name.endswith('__')
|
{"/most_common.py": ["/helpers.py"]}
|
18,404
|
alex-px/most-common
|
refs/heads/master
|
/most_common.py
|
from argparse import ArgumentParser
import ast
import collections
import os
from helpers import (is_verb,
flat,
split_snake_case_to_words,
is_magic_name)
def find_py_files_in_path(root):
py_files = []
for current_dir, sub_dirs, files in os.walk(root):
for current_file in files:
if not current_file.endswith('.py'):
continue
py_files.append(os.path.join(current_dir, current_file))
return py_files
def resolve_file_into_tree(file_path):
with open(file_path, 'r', encoding='utf-8') as attempt_handler:
main_file_content = attempt_handler.read()
try:
return ast.parse(main_file_content)
except SyntaxError as e:
print(e)
def fetch_trees_from_path(_path):
trees = []
py_files = find_py_files_in_path(_path)
print('total %s py files found' % len(py_files))
for py_file in py_files:
tree = resolve_file_into_tree(py_file)
if not tree:
continue
trees.append(tree)
print('trees generated')
return trees
def find_all_names_in_tree(tree):
return [node.id for node in ast.walk(tree) if isinstance(node, ast.Name)]
def find_function_names_in_tree(tree):
return [node.name.lower()
for node in ast.walk(tree)
if isinstance(node, ast.FunctionDef)]
def extract_verbs_from_snake_case(name):
return [word for word
in split_snake_case_to_words(name)
if is_verb(word)]
def receive_names_in_path(_path):
all_names = []
for tree in fetch_trees_from_path(_path):
all_names.extend(find_all_names_in_tree(tree))
return flat([split_snake_case_to_words(name)
for name in all_names
if not is_magic_name(name)])
def receive_function_names_in_path(_path):
function_names = []
for tree in fetch_trees_from_path(_path):
function_names.extend(find_function_names_in_tree(tree))
return [f_name for f_name in function_names if not is_magic_name(f_name)]
def receive_function_verbs_in_path(_path):
return flat([extract_verbs_from_snake_case(function_name)
for function_name
in receive_function_names_in_path(_path)])
def get_top_words_in_path(path, top=10):
all_words = receive_names_in_path(path)
return collections.Counter(all_words).most_common(top)
def get_top_verbs_in_path(path, top=10):
all_verbs = receive_function_verbs_in_path(path)
return collections.Counter(all_verbs).most_common(top)
def get_top_functions_names_in_path(path, top=10):
all_function_names = receive_function_names_in_path(path)
return collections.Counter(all_function_names).most_common(top)
def parse_args():
parser = ArgumentParser(description='Calculate words occurrences in path')
subparsers = parser.add_subparsers()
parser_words = subparsers.add_parser(
'words',
help='- words occurrences')
parser_words.add_argument('--path', help='path to parse', default='./')
parser_words.add_argument('--top', help='top number to return', type=int)
parser_words.set_defaults(func=get_top_words_in_path)
parser_verbs = subparsers.add_parser(
'verbs',
help='- verbs occurrences')
parser_verbs.add_argument('--path', help='path to parse', default='./')
parser_verbs.add_argument('--top', help='top number to return', type=int)
parser_verbs.set_defaults(func=get_top_verbs_in_path)
parser_funcs = subparsers.add_parser(
'functions',
help='- function names occurrences')
parser_funcs.add_argument('--path', help='path to parse', default='./')
parser_funcs.add_argument('--top', help='top number to return', type=int)
parser_funcs.set_defaults(func=get_top_functions_names_in_path)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print(args.func(args.path, args.top))
|
{"/most_common.py": ["/helpers.py"]}
|
18,413
|
PTNobel/musicctl
|
refs/heads/master
|
/process.py
|
#!/usr/bin/python3
from os import listdir as _listdir
from os.path import join as _join
from typing import List, Dict
__all__ = [
"is_comm_running",
"get_comms_to_pids",
"get_pids_to_comms",
"get_pids_to_cmdlines",
"get_pids_of_comm",
"get_comm_of_pid",
"get_pids",
"get_comms",
"get_pids_of_comm",
"update_buffers",
"get_cmdline_of_pid",
]
_buffer_map_pids_to_comms = {} # type: Dict[str, str]
_buffer_map_pids_to_cmdlines = {} # type: Dict[str, List[str]]
_buffer_map_comms_to_pids = {} # type: Dict[str, List[str]]
_buffer_running_pids = [] # type: List[str]
_buffer_list_of_comms = [] # type: List[str]
def _build_buffers() -> None:
global _buffer_map_pids_to_comms
global _buffer_map_pids_to_cmdlines
global _buffer_running_pids
global _buffer_list_of_comms
global _buffer_map_comms_to_pids
global _buffer_map_comms_to_pids
_buffer_map_pids_to_comms = {} # Dict[str, str]
_buffer_map_pids_to_cmdlines = {} # Dict[str, List[str]]
_buffer_running_pids = [pid for pid in _listdir('/proc') if pid.isdigit()]
_buffer_list_of_comms = [] # List[str]
_buffer_map_comms_to_pids = {} # type: Dict[str, List[str]]
for index, pid in enumerate(_buffer_running_pids):
try:
comm_file = open(_join('/proc', pid, 'comm'), 'r')
comm = comm_file.read().rstrip('\n')
comm_file.close()
_buffer_map_pids_to_comms[pid] = comm
cmd_file = open(_join('/proc', pid, 'cmdline'), 'r')
cmd = cmd_file.read().rstrip('\n')
cmd_file.close()
cmdline = cmd.split('\x00')
_buffer_map_pids_to_cmdlines[pid] = cmdline
if comm not in _buffer_list_of_comms:
_buffer_list_of_comms.append(comm)
_buffer_map_comms_to_pids[comm] = list()
_buffer_map_comms_to_pids[comm].append(pid)
except FileNotFoundError:
_buffer_running_pids.pop(index)
def update_buffers() -> None:
_build_buffers()
def get_pids() -> List[str]:
"""Returns a list of pids"""
return _buffer_running_pids
def get_comms() -> List[str]:
"""Returns a list of comms"""
return _buffer_list_of_comms
def get_comms_to_pids() -> Dict[str, List[str]]:
"""Returns a dict of comms as keys and a list of pids as values"""
return _buffer_map_comms_to_pids
def get_pids_to_comms() -> Dict[str, str]:
"""Returns a dict of pids as keys and a string of the comm as values"""
return _buffer_map_pids_to_comms
def get_pids_of_comm(comm: str) -> List[str]:
"""Returns a list of all pids with comm"""
try:
pids = _buffer_map_comms_to_pids[comm]
except KeyError:
pids = []
return pids
def get_pids_to_cmdlines() -> Dict[str, List[str]]:
"""Returns a dict of pids as keys and a string of the comm as values"""
return _buffer_map_pids_to_cmdlines
def get_comm_of_pid(pid: str) -> str:
"""Returns the str of the comm of a pid"""
return _buffer_map_pids_to_comms[pid]
def get_cmdline_of_pid(pid: str) -> List[str]:
"""Returns the list with each argv entry of pid as a different string"""
return _buffer_map_pids_to_cmdlines[pid]
def is_comm_running(comm: str) -> bool:
"""Returns a bool if any process with that comm is running"""
return comm in _buffer_list_of_comms
_build_buffers()
|
{"/player.py": ["/process.py"], "/musicctl.py": ["/player.py"]}
|
18,414
|
PTNobel/musicctl
|
refs/heads/master
|
/player.py
|
#!/usr/bin/python3
# A python3 port of musicctl.sh.
import time
import os
import sys
import re
import subprocess
import process
# warning() functions like print, except it prefixes everything and prints to
# stderr.
def warning(*objs, prefix='WARNING: '):
printed_list = str(prefix)
for i in objs:
printed_list += str(i)
print(printed_list, file=sys.stderr)
def get_keys(list_of_classes):
for i in list_of_classes:
print("For player " + str(i) +
" the following commands are available:")
for j in sorted(i.commands.keys()):
print(" " + j)
exit(0)
class mpd:
__name__ = 'mpd'
def __init__(self):
self.commands = {'play': self.pause, 'pause': self.pause,
'back': self.back, 'next': self.next,
'quit': self.stop, 'stop': self.stop,
'is_playing': self.is_playing_shell_wrapper}
def _call_mpc(self, *option):
devnull = open('/dev/null')
subprocess.call(['mpc', *option], stdout=devnull.buffer)
devnull.close()
def __repr__(self):
return self.__name__
def pause(self):
self._call_mpc('toggle')
def back(self):
self._call_mpc('prev')
def next(self):
self._call_mpc('next')
def stop(self):
self._call_mpc('stop')
def is_playing_shell_wrapper(self):
if self.is_playing():
exit(0)
else:
exit(1)
def is_playing(self):
try:
is_playing_present = b"playing" in subprocess.check_output(
['mpc', 'status'])
except subprocess.CalledProcessError:
is_playing_present = False
return is_playing_present
# Since the easiest way to control mopidy is through its mpd implementation,
# the mopidy class inherets its implementation from the mpd class.
class mopidy(mpd):
__name__ = 'mopidy'
class pianobar:
__name__ = 'pianobar'
def __init__(self):
self.commands = {'play': self.pause, 'pause': self.pause,
'back': self.like, 'next': self.next,
'quit': self.stop, 'stop': self.stop,
'tired': self.tired, 'like': self.like,
'dislike': self.dislike,
'is_playing': self.is_playing_shell_wrapper}
def __repr__(self):
return self.__name__
def _call_pianoctl(self, option):
subprocess.call(
['pianoctl', option])
def pause(self):
self._call_pianoctl('p')
def like(self):
self._call_pianoctl('+')
def dislike(self):
self._call_pianoctl('-')
def next(self):
self._call_pianoctl('n')
def stop(self):
self._call_pianoctl('q')
# if pianobar isn't responding kill it.
time.sleep(1)
process.update_buffers()
if process.is_comm_running("pianobar"):
subprocess.call(['kill'] + process.get_pids_of_comm('pianobar'))
def tired(self):
self._call_pianoctl('t')
def is_playing_shell_wrapper(self):
if self.is_playing():
exit(0)
else:
exit(1)
def is_playing(self):
log1_time_stamp, success1 = self._get_time()
time.sleep(2)
log2_time_stamp, success2 = self._get_time()
if not (success1 and success2):
output = False
if log1_time_stamp == log2_time_stamp:
output = False
else:
output = True
return output
def _get_time(self, tries=0):
"""Reads the pianobar time, and returns a tuple of str '##:##/##:##'
and a boolean which reflects whether it matches the regex"""
log = open(os.path.expanduser('~/.config/pianobar/out'), 'r')
time_stamp = log.read()[-12:-1]
log.close()
if re.match(r'^\d{2}:\d{2}/\d{2}:\d{2}$', time_stamp):
return (time_stamp, True)
elif tries < 3:
time.sleep(1)
return self._get_time(tries+1)
else:
return (time_stamp, False)
class playerctl:
__name__ = 'playerctl'
def __init__(self):
self.commands = {'play': self.pause, 'pause': self.pause,
'back': self.back, 'next': self.next,
'quit': self.stop, 'stop': self.stop,
'is_playing': self.is_playing_shell_wrapper}
def __repr__(self):
return self.__name__
def _call_playerctl(self, option):
subprocess.call(
['playerctl', option])
def pause(self):
self._call_playerctl('play-pause')
def back(self):
self._call_playerctl('previous')
def next(self):
self._call_playerctl('next')
def stop(self):
self._call_playerctl('stop')
def is_playing_shell_wrapper(self):
if self.is_playing():
exit(0)
else:
exit(1)
def is_playing(self):
try:
is_playing_present = b"Playing" in subprocess.check_output(
['playerctl', 'status'])
except subprocess.CalledProcessError:
is_playing_present = False
return is_playing_present
def current_player():
list_of_process_names = process.get_comms()
# pianobar get priority over mpd, unless mpd is playing.
if 'mpd' in list_of_process_names:
if 'pianobar' in list_of_process_names:
if b'playing' in subprocess.check_output(['mpc', 'status']):
output = mpd()
else:
output = pianobar()
else:
output = mpd()
elif 'pianobar' in list_of_process_names:
output = pianobar()
elif 'mopidy' in list_of_process_names:
output = mopidy()
else:
output = playerctl()
return output
def is_playing():
return current_player().is_playing()
def pause():
current_player().commands['pause']()
def stop():
current_player().commands['stop']()
def back():
current_player().commands['back']()
def next_song():
current_player().commands['next']()
def print_keys(list_of_classes=[mopidy, mpd, pianobar, playerctl]):
for i in list_of_classes:
player = i()
print("For player " + player.__repr__() +
" the following commands are available:")
for j in sorted(player.commands.keys()):
print(" " + j)
if __name__ == '__main__':
print('Please don\'t do this.')
|
{"/player.py": ["/process.py"], "/musicctl.py": ["/player.py"]}
|
18,415
|
PTNobel/musicctl
|
refs/heads/master
|
/musicctl.py
|
#!/usr/bin/python3
# A python3 port of musicctl.sh.
import os
import sys
import player
# warning() functions like print, except it prefixes everything and prints to
# stderr.
def warning(*objs, prefix='WARNING: '):
printed_list = str(prefix)
for i in objs:
printed_list += str(i)
print(printed_list, file=sys.stderr)
def usage(exit_code, name):
usage_text = "Usage: %s {[a command]|player|commands|usage|help}" % name
if exit_code == 0:
print(usage_text)
elif exit_code > 0:
warning(usage_text, prefix='')
elif exit_code < 0:
usage(exit_code, name)
exit(exit_code)
def processargs(input_argv):
# All of these run in the same scope as processargs(). They make changes to
# output.
def _help():
usage(0, output['name'])
def _trial():
output["test_mode_prefix"] = 'echo '
output["test_mode_suffix"] = ''
def _player():
if '=' in input_argv[i]:
player = input_argv[i].split('=')[1]
else:
player = input_argv[i + 1]
indexes_to_ignore.append(i + 1)
try:
output["player"] = {
'mpd': player.mpd,
'mpc': player.mpd,
'pianobar': player.pianobar,
'pianoctl': player.pianobar,
'playerctl': player.playerctl,
'mpris': player.playerctl,
}[player]()
except KeyError:
warning('Invalid player')
exit(1)
# In place of a switch-case statement the following dictionaires link argv
# entries to functions.
long_args_to_disc = {'--help': _help,
'--trial': _trial, '--player': _player}
short_args_to_disc = {'h': _help, 't': _trial, 'p': _player}
output = {"input": None,
"test_mode_prefix": '',
"test_mode_suffix": ' >/dev/null',
"name": os.path.basename(input_argv[0]),
"player": None,
}
indexes_to_ignore = list()
if len(input_argv) == 1:
warning("Not enough arguments")
usage(1, output['name'])
else:
# range() starts at 1 to prevent the name from being processed.
for i in range(1, len(input_argv)):
if i in indexes_to_ignore:
continue
elif len(input_argv[i]) >= 2 and input_argv[i][0:2] == '--':
try:
long_args_to_disc[input_argv[i].split('=')[0]]()
except KeyError:
warning("Invalid argument", prefix='')
usage(1, output['name'])
elif input_argv[i][0] == '-' and input_argv[i][1] != '-':
for j in range(1, len(input_argv[i])):
try:
short_args_to_disc[input_argv[i][j]]()
except KeyError:
warning("Invalid argument", prefix='')
usage(1, output['name'])
elif output["input"] is None:
output["input"] = input_argv[i]
else:
warning("Error parsing arguments")
usage(1, output['name'])
return output
# global arguments
arguments = processargs(sys.argv)
def main(arguments):
# Handle help and usage correctly:
if arguments["input"] == "usage" or arguments["input"] == "help":
usage(0, arguments['name'])
if arguments["input"] == "commands":
player.print_keys()
exit(0)
# Figure out what player is running.
if arguments['player'] is not None:
current_player = arguments['player']
else:
current_player = player.current_player()
if arguments["input"] == "player":
print(current_player)
exit(0)
# Catching a KeyError should prevent this from exploding over the user
# giving invalid input.
try:
current_player.commands[arguments["input"]]()
except KeyError:
warning("Invalid input.")
usage(1, arguments['name'])
if __name__ == "__main__":
main(arguments)
|
{"/player.py": ["/process.py"], "/musicctl.py": ["/player.py"]}
|
18,420
|
vinaybana/djangoapp
|
refs/heads/master
|
/blog/serializers.py
|
from .models import Post,Category,Tag,Comment
from rest_framework import serializers
import json
class PostSerializer(serializers.ModelSerializer):
comments = serializers.SerializerMethodField()
def get_comments(self,obj):
comments = Comment.objects.filter(post = obj.id).all()
data = []
for cmnt in comments:
data.append(
{
'post':obj.id,
'name':cmnt.name,
'text': cmnt.text,
'created':cmnt.created,
'updated':cmnt.updated,
'active':cmnt.active,
'parent': cmnt.parent
})
# sent = json.dumps(data)
return data
class Meta:
model = Post
fields = ['id','title', 'text', 'author', 'created_date', 'published_date','comments']
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ['id','title', 'text', 'parent', 'created_date']
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ['id','title', 'text','created_date']
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ['post','name', 'text','created','updated','active','parent']
|
{"/blog/serializers.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py", "/blog/serializers.py"], "/polls/api.py": ["/polls/serializers.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/api.py": ["/blog/serializers.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"]}
|
18,421
|
vinaybana/djangoapp
|
refs/heads/master
|
/blog/urls.py
|
from django.contrib import admin
from django.urls import path
from .import views
app_name = 'blog'
# urlpatterns = [
# path('post/<int:pk>/edit/', views.post_edit, name='post_edit'),
# path('post/new/', views.post_new, name='post_new'),
# path('blog/sign_up/',views.sign_up,name="sign-up"),
# path('blog/logout/',views.logout, name="logout"),
# path('post/<int:pk>/', views.post_detail, name='post_detail'),
# path('login/',views.login, name="login"),
# path('', views.post_list, name ='post_list'),
# path('userdetail/<int:pk>/',views.userdetail, name='userdetail'),
# path('edituser/<int:pk>/', views.edit_profile, name='edit_profile'),
# ]
urlpatterns = [
path('post/<str:slug>/edit/', views.post_edit, name='post_edit'),
path('category/<str:slug>/edit/', views.category_edit, name='category_edit'),
path('blog/sign_up/',views.sign_up,name="sign-up"),
path('blog/logout/',views.logout, name="logout"),
path('post/new/', views.post_new, name='post_new'),
path('userdetail/<int:pk>/',views.userdetail, name='userdetail'),
path('edituser/<int:pk>/', views.edit_profile, name='edit_profile'),
path('post/<str:slug>/', views.post_detail, name='post_detail'),
path('category/<str:slug>/', views.category_detail, name='category_detail'),
path('tag/<str:slug>/', views.tag_details, name='tag_details'),
path('category/', views.category_list, name ='category_list'),
path('tag/', views.tag_list, name ='tag_list'),
path('login/',views.login, name="login"),
path('', views.post_list, name ='post_list'),
]
|
{"/blog/serializers.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py", "/blog/serializers.py"], "/polls/api.py": ["/polls/serializers.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/api.py": ["/blog/serializers.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"]}
|
18,422
|
vinaybana/djangoapp
|
refs/heads/master
|
/blog/views.py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from .models import Post, Userprofile, Comment,Category,Tag
from .forms import PostForm,ProfileForm, categoryForm,CommentForm
from django.contrib.auth.models import User, auth
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login, logout
from rest_framework import viewsets
from rest_framework import permissions
from .serializers import PostSerializer
from rest_framework import generics
from django.contrib.auth.decorators import login_required
def category_list(request):
categories=Category.objects.all()
return render(request, 'blog/category_list.html', {'categories':categories})
def category_detail(request,slug):
category = get_object_or_404(Category, slug=slug)
print(category)
posts= Post.objects.filter(category=category)
print(posts)
return render(request, 'blog/category_detail.html', {'posts': posts, 'category':category})
def category_edit(request, slug):
category = get_object_or_404(Category, slug=slug)
if request.method == "POST":
form = CategoryForm(request.POST, instance=category)
if form.is_valid():
category = form.save(commit=False)
category.created_date = timezone.now()
category.save()
return redirect('blog:category_detail', slug=category.slug)
else:
form = CategoryForm(instance=category)
return render(request, 'blog/category_edit.html', {'form': form})
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, slug):
post = get_object_or_404(Post, slug=slug)
comments=Comment.objects.filter(post=post, parent=None)
new_comment=None
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
text= request.POST.get('text')
name= request.POST.get('name')
reply_id=request.POST.get('comment_id')
comment_obj=None
if reply_id:
comment_obj=Comment.objects.get(id=reply_id)
new_comment = Comment.objects.create(post=post, parent=comment_obj, text=text, name=name)
new_comment.save()
return redirect('blog:post_detail', slug=post.slug)
else:
form = CommentForm()
return render(request, 'blog/post_details.html', {'form':form , 'post': post, 'comments':comments})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('blog:post_list')
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, slug):
post = get_object_or_404(Post, slug=slug)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('blog:post_detail', slug=post.slug)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
def tag_list(request):
tags= Tag.objects.all()
print(tags)
return render(request, 'blog/tag_list.html', {'tags':tags})
def tag_details(request, slug):
tag = get_object_or_404(Tag, slug=slug)
print(tag)
posts=Post.objects.filter(tag__slug=tag)
return render(request, 'blog/tag_details.html', {'tag':tag, 'posts': posts})
def cmnt(request, slug):
post= get_object_or_404(Post, slug=slug)
cmnt= Post.cmnt.filter(slug=post.slug)
return cmnt
# def post_list(request):
# posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
# return render(request, 'blog/post_list.html', {'posts':posts})
# def post_detail(request, pk):
# post = get_object_or_404(Post, pk=pk)
# return render(request, 'blog/post_details.html', {'post': post})
# def post_new(request):
# if request.method == "POST":
# form = PostForm(request.POST)
# if form.is_valid():
# post = form.save(commit=False)
# post.author = request.user
# post.published_date = timezone.now()
# post.save()
# return redirect('blog:post_detail', pk=post.pk)
# else:
# form = PostForm()
# return render(request, 'blog/post_edit.html', {'form': form})
# def post_edit(request, pk):
# post = get_object_or_404(Post, pk=pk)
# if request.method == "POST":
# form = PostForm(request.POST, instance=post)
# if form.is_valid():
# post = form.save(commit=False)
# post.author = request.user
# post.published_date = timezone.now()
# post.save()
# return redirect('blog:post_detail', pk=post.pk)
# else:
# form = PostForm(instance=post)
# return render(request, 'blog/post_edit.html', {'form': form})
def sign_up(request):
context = {}
form = UserCreationForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
user = form.save()
auth.login(request, user)
return render(request,'blog/post_list.html')
context['form']=form
return render(request,'blog/sign_up.html',context)
def login(request):
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return render(request, 'blog/post_list.html')
else:
return render(request, 'blog/login.html')
def logout(request):
logout(request)
return render(request, 'blog/post_list.html')
@login_required
def userdetail(request, pk):
user = User.objects.get(pk=pk)
userimg= Userprofile.objects.filter(user= request.user).first()
return render(request, 'blog/userdetail.html',{'user':user, 'userimg':userimg})
def edit_profile(request, pk):
user = User.objects.get(pk=pk)
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
is_exist = Userprofile.objects.filter(user=request.user).first()
if is_exist:
is_exist.user_image= form.cleaned_data.get('user_image')
is_exist.user = request.user
is_exist.save()
else:
userimg = form.save(commit=False)
userimg.user = request.user
userimg.save()
return redirect('blog:userdetail',pk=user.pk)
else:
form = ProfileForm()
return render(request,'blog/useredit.html', {'form':form})
# Redirect to a success page.
|
{"/blog/serializers.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py", "/blog/serializers.py"], "/polls/api.py": ["/polls/serializers.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/api.py": ["/blog/serializers.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"]}
|
18,423
|
vinaybana/djangoapp
|
refs/heads/master
|
/polls/serializers.py
|
from .models import Question,Choice
from rest_framework import serializers
class QuestionSerializer(serializers.ModelSerializer):
choices = serializers.SerializerMethodField()
def get_choices(self, obj):
choices = Choice.objects.filter(question = obj.id).all()
data = []
for opt in choices:
data.append(
{
'id': opt.id,
'choice_text': opt.choice_text,
'score': opt.votes
})
return data
class Meta:
model = Question
fields = ['question_text', 'pub_date','choices']
class ChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = Choice
fields = ['question', 'choice_text', 'votes']
|
{"/blog/serializers.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py", "/blog/serializers.py"], "/polls/api.py": ["/polls/serializers.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/api.py": ["/blog/serializers.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"]}
|
18,424
|
vinaybana/djangoapp
|
refs/heads/master
|
/polls/api.py
|
from rest_framework import serializers, viewsets, status as status_code, generics, mixins
from .serializers import *
from .models import *
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.permissions import IsAuthenticated, AllowAny
from django.contrib.auth import authenticate
from rest_framework.response import Response
class QuestionViewSet(viewsets.ModelViewSet):
serializer_class = QuestionSerializer
queryset = Question.objects.all()
http_method_names = ['get','post','put','patch']
class ChoiceViewSet(viewsets.ModelViewSet):
serializer_class = ChoiceSerializer
queryset = Choice.objects.all()
http_method_names = ['get','post','put','patch']
|
{"/blog/serializers.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py", "/blog/serializers.py"], "/polls/api.py": ["/polls/serializers.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/api.py": ["/blog/serializers.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"]}
|
18,425
|
vinaybana/djangoapp
|
refs/heads/master
|
/polls/apiurls.py
|
from rest_framework import renderers
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .import api, views
router = DefaultRouter()
router.register('question', api.QuestionViewSet),
router.register('choice', api.ChoiceViewSet),
urlpatterns = [
path('', include(router.urls)),
]
|
{"/blog/serializers.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py", "/blog/serializers.py"], "/polls/api.py": ["/polls/serializers.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/api.py": ["/blog/serializers.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"]}
|
18,426
|
vinaybana/djangoapp
|
refs/heads/master
|
/blog/apiurls.py
|
from rest_framework import renderers
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .import api, views
router = DefaultRouter()
router.register('post', api.PostViewSet)
router.register('category', api.CategoryViewSet)
router.register('tag', api.TagViewSet)
router.register('comment', api.CommentViewSet)
urlpatterns = [
path('', include(router.urls)),
]
|
{"/blog/serializers.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py", "/blog/serializers.py"], "/polls/api.py": ["/polls/serializers.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/api.py": ["/blog/serializers.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"]}
|
18,427
|
vinaybana/djangoapp
|
refs/heads/master
|
/blog/admin.py
|
from django.contrib import admin
from .models import Post,Userprofile, Category, Tag, Comment
# class postAdmin(admin.ModelAdmin):
# view_on_site = True
# fieldsets = [
# (None, {'fields': ['title', 'text', 'author']}),
# ('Date information', {'fields': ['published_date'], 'classes': ['collapse']}),
# ]
# list_display = ('title', 'author', 'published_date', 'created_date')
# list_filter = ['published_date']
# search_fields = ['title']
class PostAdmin(admin.ModelAdmin):
view_on_site = True
fieldsets = [
(None, {'fields': ['title','slug', 'text', 'author', 'tag', 'category', 'cmnt']}),
('Date information', {'fields': ['published_date'], 'classes': ['collapse']}),
]
list_display = ('title', 'author', 'slug','published_date', 'created_date')
list_filter = ['published_date']
search_fields = ['title']
filter_horizontal = ('tag',)
prepopulated_fields = {'slug':("title",)}
# readonly_fields=('slug',)
class CategoryAdmin(admin.ModelAdmin):
# view_on_site = True
fieldsets = [
(None, {'fields': ['title','text','slug','parent']}),
('Date information', {'fields': ['created_date'], 'classes': ['collapse']}),
]
list_display = ('title', 'text','slug','parent', 'created_date')
# list_filter = ['published_date']
search_fields = ['title']
# filter_horizontal = ('tag',)
prepopulated_fields = {'slug':("title",)}
class TagAdmin(admin.ModelAdmin):
# view_on_site = True
fieldsets = [
(None, {'fields': ['title','text','slug']}),
('Date information', {'fields': ['created_date'], 'classes': ['collapse']}),
]
list_display = ('title', 'text','slug','created_date')
# list_filter = ['published_date']
search_fields = ['title']
# filter_horizontal = ('tag',)
prepopulated_fields = {'slug':("title",)}
class CommentAdmin(admin.ModelAdmin):
# view_on_site = True
fieldsets = [
(None, {'fields': ['post','name','text','slug','active','parent']}),
('Date information', {'fields': ['created'], 'classes': ['collapse']}),
]
list_display = ('name', 'text','post','slug','created','updated')
# list_filter = ['published_date']
search_fields = ['name']
# filter_horizontal = ('tag',)
prepopulated_fields = {'slug':("name",)}
admin.site.register(Post,PostAdmin)
admin.site.register(Userprofile)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Tag,TagAdmin)
admin.site.register(Comment,CommentAdmin)
|
{"/blog/serializers.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py", "/blog/serializers.py"], "/polls/api.py": ["/polls/serializers.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/api.py": ["/blog/serializers.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"]}
|
18,428
|
vinaybana/djangoapp
|
refs/heads/master
|
/blog/api.py
|
from rest_framework import serializers, viewsets, status as status_code, generics, mixins
from .serializers import *
from .models import *
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.permissions import IsAuthenticated, AllowAny
from django.contrib.auth import authenticate
from rest_framework.response import Response
from rest_framework import pagination
class PostViewSet(viewsets.ModelViewSet ):
"""
API endpoint that allows posts to be viewed or edited.
"""
queryset = Post.objects.all().order_by('-published_date')
serializer_class = PostSerializer
# permission_classes = [permissions.IsAuthenticated]
http_method_names = ['get','post','put','patch','delete']
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
http_method_names = ['get','post','put','patch','delete']
class TagViewSet(viewsets.ModelViewSet):
queryset = Tag.objects.all()
serializer_class = TagSerializer
http_method_names = ['get','post','put','patch','delete']
class CommentViewSet(viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
http_method_names = ['get','post','put','patch','delete']
|
{"/blog/serializers.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py", "/blog/serializers.py"], "/polls/api.py": ["/polls/serializers.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/api.py": ["/blog/serializers.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"]}
|
18,429
|
vinaybana/djangoapp
|
refs/heads/master
|
/blog/models.py
|
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django_extensions.db.fields import AutoSlugField
from autoslug import AutoSlugField
class Category(models.Model):
title= models.CharField(max_length=200)
text=models.TextField()
slug = AutoSlugField(populate_from='title', max_length=160, editable=True)
parent=models.ForeignKey('self', blank=True, null=True, related_name='children', on_delete=models.CASCADE)
created_date = models.DateTimeField(default = timezone.now)
class Meta:
verbose_name_plural = "categories"
def __str__(self):
return self.title
def slugify_function(self, content):
return content.replace('_', '-').lower()
class Tag(models.Model):
title = models.CharField(blank=True, max_length=200)
text=models.TextField(default=True)
slug = AutoSlugField(populate_from='title', max_length=160, editable=True)
created_date = models.DateTimeField(default = timezone.now)
def __str__(self):
return self.title
def slugify_function(self, content):
return content.replace('_', '-').lower()
class Post(models.Model):
tag = models.ManyToManyField('Tag')
cmnt=models.ForeignKey('Comment', null=True, blank=True,on_delete=models.CASCADE, related_query_name="posts")
category = models.ForeignKey('Category', null=True, blank=True, on_delete=models.CASCADE, related_query_name="posts")
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default = timezone.now)
published_date = models.DateTimeField(blank = True, null = True)
slug = AutoSlugField(populate_from='title', max_length=160, editable=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
def slugify_function(self, content):
return content.replace('_', '-').lower()
# class Post(models.Model):
# author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
# title = models.CharField(max_length=200)
# text = models.TextField()
# created_date = models.DateTimeField(default = timezone.now)
# published_date = models.DateTimeField(blank = True, null = True)
# def get_absolute_url(self):
# return "/post/%i" % self.id
# def publish(self):
# self.published_date = timezone.now()
# self.save()
# def __str__(self):
# return self.title
class Userprofile(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
user_image=models.ImageField(upload_to='media/',blank=True)
def __str__(self):
return self.user.username
class Comment(models.Model):
post = models.ForeignKey('Post', on_delete=models.CASCADE, related_name='comments')
name = models.CharField(max_length=200)
text = models.TextField()
created = models.DateTimeField(default=timezone.now)
updated = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
parent = models.ForeignKey('self', null=True, blank=True,on_delete=models.CASCADE, related_name='replies')
slug = AutoSlugField(populate_from='name', max_length=160, editable=True)
class Meta:
# sort comments in chronological order by default
ordering = ('created',)
def __str__(self):
return self.name
def slugify_function(self, content):
return content.replace('_', '-').lower()
|
{"/blog/serializers.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py", "/blog/serializers.py"], "/polls/api.py": ["/polls/serializers.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/api.py": ["/blog/serializers.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"]}
|
18,430
|
vinaybana/djangoapp
|
refs/heads/master
|
/blog/forms.py
|
from django import forms
from .models import Post, Userprofile, Category, Comment
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.forms import ModelForm
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'text',)
class ProfileForm(forms.ModelForm):
class Meta:
model = Userprofile
fields = ('user_image',)
# class UserForm(forms.ModelForm):
# class Meta:
# model = User
# fields = ('username', 'first_name', 'last_name', 'email')
class categoryForm(forms.ModelForm):
class Meta:
model = Category
fields=('title' , 'text',)
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('name', 'text')
|
{"/blog/serializers.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py", "/blog/serializers.py"], "/polls/api.py": ["/polls/serializers.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/api.py": ["/blog/serializers.py", "/blog/models.py"], "/blog/forms.py": ["/blog/models.py"]}
|
18,450
|
kul2002il/rkisQuestions
|
refs/heads/master
|
/main/urls.py
|
from django.urls import path
from . import views
from .views import BBLoginView, BBLogoutView, BBPasswordChangeView, RegisterDoneView, RegisterUserView
app_name = 'main'
urlpatterns = [
path('', views.index),
path('accounts/login/', BBLoginView.as_view(), name="login"),
path('accounts/register/', RegisterUserView.as_view(), name='register'),
path('accounts/register/done/', RegisterDoneView.as_view(), name='register_done'),
path('accounts/password/change/', BBPasswordChangeView.as_view(), name='password_change'),
path('accounts/logout/', BBLogoutView.as_view(), name="logout"),
]
|
{"/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"], "/main/admin.py": ["/main/models.py"]}
|
18,451
|
kul2002il/rkisQuestions
|
refs/heads/master
|
/main/migrations/0006_auto_20201102_1214.py
|
# Generated by Django 3.1.2 on 2020-11-02 12:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_auto_20201102_1213'),
]
operations = [
migrations.AlterField(
model_name='question',
name='datetime',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='Дата публикации'),
),
]
|
{"/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"], "/main/admin.py": ["/main/models.py"]}
|
18,452
|
kul2002il/rkisQuestions
|
refs/heads/master
|
/main/migrations/0003_auto_20201102_1211.py
|
# Generated by Django 3.1.2 on 2020-11-02 12:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20201102_1210'),
]
operations = [
migrations.AddField(
model_name='question',
name='datatime2',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='Момент публикации'),
),
migrations.AddField(
model_name='question',
name='title2',
field=models.CharField(default='', max_length=255, verbose_name='Название'),
),
]
|
{"/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"], "/main/admin.py": ["/main/models.py"]}
|
18,453
|
kul2002il/rkisQuestions
|
refs/heads/master
|
/main/migrations/0005_auto_20201102_1213.py
|
# Generated by Django 3.1.2 on 2020-11-02 12:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20201102_1212'),
]
operations = [
migrations.RenameField(
model_name='question',
old_name='datatime',
new_name='datetime',
),
]
|
{"/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"], "/main/admin.py": ["/main/models.py"]}
|
18,454
|
kul2002il/rkisQuestions
|
refs/heads/master
|
/main/models.py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
def Meta(self):
pass
class Question(models.Model):
title = models.CharField(max_length=255, verbose_name="Название", default='')
datetime = models.DateTimeField(auto_now_add=True, null=True, verbose_name='Дата публикации')
text = models.CharField(max_length=255, verbose_name="Текст", default='')
def __str__(self):
return self.title
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
text = models.CharField(max_length=255, verbose_name="Вариант", default='')
def __str__(self):
return self.text
class Voice(models.Model):
answer = models.ForeignKey(Answer, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.text
|
{"/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"], "/main/admin.py": ["/main/models.py"]}
|
18,455
|
kul2002il/rkisQuestions
|
refs/heads/master
|
/main/views.py
|
from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic import UpdateView, CreateView
from django.views.generic.base import TemplateView
from django.contrib.auth.views import LoginView, LogoutView, PasswordChangeView
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.urls import reverse_lazy
from .models import Question, User
from .forms import RegisterUserForm, ChangeUserInfoForm
def index(request):
que = Question.objects.all()[:10]
context = {"questions": que}
return render(request, 'main/index.html', context=context)
class BBLoginView(LoginView):
template_name = 'main/login.html'
class BBLogoutView(LoginRequiredMixin, LogoutView):
template_name = 'main/logout.html'
class ChangeUserInfoView(SuccessMessageMixin, LoginRequiredMixin, UpdateView):
model = User
template_name = 'main/changeUserInfo.html'
form_class = ChangeUserInfoForm
success_url = reverse_lazy('myApp:profile')
success_message = 'Личные данные пользователя изменены'
def dispatch(self, request, *args, **kwargs):
self.user_id = request.user.pk
return super().dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
if not queryset:
queryset = self.get_queryset()
return get_object_or_404(queryset, pk=self.user_id)
class BBPasswordChangeView(SuccessMessageMixin, LoginRequiredMixin, PasswordChangeView):
template_name = 'main/passwordChange.html'
success_url = reverse_lazy('myApp:profile')
success_message = 'Пароль пользователя изменен'
class RegisterUserView(CreateView):
model = User
template_name = 'main/registerUser.html'
form_class = RegisterUserForm
success_url = reverse_lazy('myApp:login')
class RegisterDoneView(TemplateView):
template_name = 'main/registerDone.html'
|
{"/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"], "/main/admin.py": ["/main/models.py"]}
|
18,456
|
kul2002il/rkisQuestions
|
refs/heads/master
|
/main/migrations/0004_auto_20201102_1212.py
|
# Generated by Django 3.1.2 on 2020-11-02 12:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20201102_1211'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='datatime2',
),
migrations.RemoveField(
model_name='question',
name='title2',
),
migrations.AlterField(
model_name='question',
name='datatime',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='Момент_публикации'),
),
]
|
{"/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"], "/main/admin.py": ["/main/models.py"]}
|
18,457
|
kul2002il/rkisQuestions
|
refs/heads/master
|
/main/admin.py
|
from django.contrib import admin
from .models import User, Question, Voice, Answer
admin.site.register(User)
admin.site.register(Question)
admin.site.register(Voice)
admin.site.register(Answer)
|
{"/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/models.py"], "/main/admin.py": ["/main/models.py"]}
|
18,459
|
alexgrigoras/youtube_consumer_perception
|
refs/heads/master
|
/youtube_sentiment_analysis/modules/display.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Display data module
@alexandru_grigoras
"""
# Libraries
import operator
import mplcursors
import numpy as np
import scipy
import seaborn as sns
from PyQt5.QtWidgets import QSizePolicy
from matplotlib import ticker
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from mpl_toolkits.axes_grid1 import make_axes_locatable
from wordcloud import WordCloud
# Constants
__all__ = ['DisplayData']
__version__ = '1.0'
__author__ = 'Alexandru Grigoras'
__email__ = 'alex_grigoras_10@yahoo.com'
__status__ = 'release'
class DisplayData(FigureCanvas):
"""Analyse the data and determine sentiment and word frequency"""
def __init__(self, parent=None, width=5, height=4, dpi=100):
"""Class constructor"""
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.ax = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
self.__parent = parent
self.__width = width
self.__height = height
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.__fig_flag = False
def plot_classifiers(self, x_values, y_values, size, color, x_names, y_names,
comments, videos, author, comm_time):
"""Plot the results of the classifiers"""
# center lines
self.ax.spines['left'].set_color('none')
self.ax.spines['right'].set_position('center')
self.ax.spines['bottom'].set_color('none')
self.ax.spines['top'].set_position('center')
self.ax.spines['right'].set_color('gray')
self.ax.spines['top'].set_color('gray')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(True)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
# map the size of points to [10, 200]
mapped_size = []
for x in size:
mapped_size.append(self.__map(x, min(size), max(size), 10, 200))
# scatter points
sc = self.ax.scatter(x_values, y_values, c=color, s=mapped_size)
# labels and limits
self.ax.set_xlabel(x_names)
self.ax.set_ylabel(y_names)
self.ax.set_xlim([-1, 1])
self.ax.set_ylim([0, 10])
self.ax.xaxis.set_ticks(np.arange(-1, 1, 0.25))
self.ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
self.ax.yaxis.set_ticks(np.arange(0, 10, 2))
self.ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
self.fig.suptitle("Rezultatele algoritmilor de clasificare")
# Colorbar with label
divider = make_axes_locatable(self.ax)
cax = divider.append_axes('right', size='3%', pad=0.05)
cb = self.fig.colorbar(sc, cax=cax, orientation='vertical')
cb.set_label('Gradul de încredere')
c2 = mplcursors.cursor(self.ax)
@c2.connect("add")
def _(sel):
sel.annotation.get_bbox_patch().set(fc="white", alpha=0.9)
sel.annotation.arrow_patch.set(arrowstyle="simple", fc="black", alpha=0.9)
sel.annotation.set_text("Video: " + videos[sel.target.index] + "\n" +
"Text: " + comments[sel.target.index] + "\n" +
"Likes: " + str(int(size[sel.target.index])) + "\n" +
"Author: " + author[sel.target.index] + "\n" +
"Time: " + comm_time[sel.target.index])
sel.annotation.draggable(True)
self.__fig_flag = True
self.draw()
def plot_heatmap(self, x_values, y_values, x_names, y_names):
"""Plot the heatmap for classifiers result"""
# Define numbers of generated data points and bins per axis.
n_bins = 8
# Construct 2D histogram from data using the 'plasma' colormap
h, xedges, xedges, image = self.ax.hist2d(x_values, y_values, bins=n_bins, cmap='jet', range=[[-1, 1], [0, 10]])
# Plot a colorbar with label.
cb = self.fig.colorbar(image)
cb.set_label('Număr de recenzii')
# Center lines and limits
self.ax.spines['left'].set_color('none')
self.ax.spines['right'].set_position('center')
self.ax.spines['bottom'].set_color('none')
self.ax.spines['top'].set_position('center')
self.ax.spines['right'].set_color('gray')
self.ax.spines['top'].set_color('gray')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(True)
self.ax.xaxis.set_ticks(np.arange(-1, 1, 0.25))
self.ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
self.ax.yaxis.set_ticks(np.arange(0, 10, 2))
self.ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
# Title and labels
self.fig.suptitle('Harta termografică a rezultatelor clasificării')
self.ax.set_xlabel(x_names)
self.ax.set_ylabel(y_names)
self.__fig_flag = True
self.draw()
def plot_word_frequency(self, items):
"""Plot the word frequency"""
# sort the above items
sorted_tuples = sorted(items, key=operator.itemgetter(1), reverse=True)
a = [i[0] for i in sorted_tuples[0:20]]
b = [i[1] for i in sorted_tuples[0:20]]
x = scipy.arange(len(b))
y = scipy.array(b)
color_space = sns.color_palette('viridis', len(x))
self.ax.bar(x, y, align='center', color=color_space)
self.ax.set_xticks(x)
self.ax.set_xticklabels(a, rotation=45)
self.ax.set_xlabel('Cuvinte')
self.ax.set_ylabel('Frecvența')
self.fig.suptitle('Frecvența cuvintelor din recenzii')
self.__fig_flag = True
self.draw()
def plot_wordcloud(self, tokens):
"""Plot the wordcloud"""
# Generate a word cloud image
plain_text = "".join([" " + i if not i.startswith("'") else i for i in tokens]).strip()
wordcloud = WordCloud(background_color="white", contour_color='steelblue').generate(plain_text)
self.ax.imshow(wordcloud, interpolation='bilinear')
self.ax.set_xticks([], [])
self.ax.set_yticks([], [])
self.ax.set_xlabel("")
self.ax.set_ylabel("")
self.fig.suptitle("Wordcloud")
self.__fig_flag = True
self.draw()
def plot_accuracy(self, results, names):
"""Make a boxplot with data from classifiers accuracy"""
# boxplot algorithm comparison
self.fig.suptitle('Acuretețea algoritmilor de clasificare')
bp = self.ax.boxplot(results, notch=False, patch_artist=True)
self.ax.set_xlabel('Algoritm de clasificare')
self.ax.set_ylabel('Acuratețea rezultatelor utilizând metoda Cross-Validation')
self.ax.set_xticklabels(names)
# change outline color, fill color and linewidth of the boxes
for box in bp['boxes']:
# change outline color
box.set(color='#7570b3', linewidth=2)
# change fill color
box.set(facecolor='#1b9e77')
# change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='#7570b3', linewidth=2)
# change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='#7570b3', linewidth=2)
# change color and linewidth of the medians
for median in bp['medians']:
median.set(color='#b2df8a', linewidth=2)
# change the style of fliers and their fill
for flier in bp['fliers']:
flier.set(marker='o', color='#e7298a', alpha=0.5)
self.draw()
def clear_plot(self):
"""Clear the plot data"""
if self.__fig_flag is True:
self.fig.clf()
self.ax = self.fig.add_subplot(111)
@staticmethod
def __map(value, left_min, left_max, right_min, right_max):
"""Maps a value from one interval [left_min, left_max] to another [right_min, right_max]"""
# Check intervals
if right_min >= right_max:
return right_min
if left_min >= left_max:
return right_min
# Figure out how 'wide' each range is
left_span = left_max - left_min
right_span = right_max - right_min
if left_span == 0:
return 0
# Convert the left range into a 0-1 range (float)
value_scaled = float(value - left_min) / float(left_span)
# Convert the 0-1 range into a value in the right range.
return right_min + (value_scaled * right_span)
|
{"/youtube_sentiment_analysis/modules/analysis.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"], "/youtube_sentiment_analysis/modules/crawler.py": ["/youtube_sentiment_analysis/modules/store.py"], "/youtube_sentiment_analysis/__main__.py": ["/youtube_sentiment_analysis/modules/interface.py"], "/youtube_sentiment_analysis/modules/interface.py": ["/youtube_sentiment_analysis/modules/accuracy.py", "/youtube_sentiment_analysis/modules/analysis.py", "/youtube_sentiment_analysis/modules/crawler.py", "/youtube_sentiment_analysis/modules/display.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py"], "/youtube_sentiment_analysis/modules/training.py": ["/youtube_sentiment_analysis/modules/process.py"], "/youtube_sentiment_analysis/modules/accuracy.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"]}
|
18,460
|
alexgrigoras/youtube_consumer_perception
|
refs/heads/master
|
/youtube_sentiment_analysis/modules/analysis.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Data analysis module
@alexandru_grigoras
"""
# Libraries
from __future__ import print_function
import time
from nltk.probability import *
from youtube_sentiment_analysis.modules.process import ProcessData
from youtube_sentiment_analysis.modules.sentiment_module import sentiment as anew
from youtube_sentiment_analysis.modules.store import StoreData
from youtube_sentiment_analysis.modules.training import TrainClassifier
from youtube_sentiment_analysis.modules.vote_classifier import VoteClassifier
# Constants
__all__ = ['DataAnalysis']
__version__ = '1.0'
__author__ = 'Alexandru Grigoras'
__email__ = 'alex_grigoras_10@yahoo.com'
__status__ = 'release'
class DataAnalysis:
"""Analyse the data and determine sentiment and word frequency"""
def __init__(self, keyword, like_threshold_min, like_threshold_max):
self.__keyword = keyword
self.__like_threshold_min = like_threshold_min
self.__like_threshold_max = like_threshold_max
def get_data_from_DB(self):
"""Get the downloaded videos data from MongoDB using store module"""
# create a MongoDB connection
mongo_conn = StoreData(self.__keyword, store=False)
# get the data from MongoDB
videos_data = mongo_conn.read()
return videos_data
def analyse(self, progress, console):
"""Analyse data and prepare it for display module"""
# get the starting time
start_time = time.time()
# variables
videos = []
likes = []
author = []
comm_time = []
comments = []
sentiment_val = []
confidence_val = []
sentiment_anew_arousal = []
# get machine learning classifiers
tc = TrainClassifier()
classifiers = tc.get_classifiers(progress, console)
# vote classifier object
voted_classifier = VoteClassifier(classifiers)
# process data object
pd = ProcessData()
# get data
videos_data = self.get_data_from_DB()
nr_videos = videos_data.count()
progress_value = 0
# parse data
for video in videos_data:
get_comments = video.get("comments")
nr_comments = len(get_comments)
for comment in get_comments:
# get likes
like = float(comment.get("nr_likes"))
if self.__like_threshold_min <= like <= self.__like_threshold_max:
videos.append(video.get("title"))
likes.append(like)
author.append(comment.get("author"))
comm_time.append(comment.get("time"))
# get comments and apply filters
comment_text = comment.get("text")
comments.append(comment_text)
pd.process_text(comment_text)
# machine learning algorithms sentiment value with voting system
ml_algorithms_sentiment = voted_classifier.classify(comment_text, pd)
sentiment_val.append(ml_algorithms_sentiment)
# machine learning algorithms confidence value with voting system
ml_algorithms_confidence = voted_classifier.confidence(comment_text, pd)
confidence_val.append(ml_algorithms_confidence)
# get ANEW arousal values
anew_result_arousal = anew.sentiment(pd.get_tokens())['arousal']
sentiment_anew_arousal.append(anew_result_arousal)
progress_value += 80 / nr_videos / nr_comments
progress.setValue(progress_value)
if not pd.get_all_tokens():
return
# FreqDist returns a list of tuples containing each word and the number of its occurences
fd = FreqDist(pd.get_all_tokens())
# get the ending time and calculate elapsed time
end_time = time.time()
elapsed_time = end_time - start_time
console.append("> Data processed in " + time.strftime("%H:%M:%S", time.gmtime(elapsed_time)) + " seconds")
return fd, pd, sentiment_val, sentiment_anew_arousal, likes, confidence_val, comments, videos, author, comm_time
|
{"/youtube_sentiment_analysis/modules/analysis.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"], "/youtube_sentiment_analysis/modules/crawler.py": ["/youtube_sentiment_analysis/modules/store.py"], "/youtube_sentiment_analysis/__main__.py": ["/youtube_sentiment_analysis/modules/interface.py"], "/youtube_sentiment_analysis/modules/interface.py": ["/youtube_sentiment_analysis/modules/accuracy.py", "/youtube_sentiment_analysis/modules/analysis.py", "/youtube_sentiment_analysis/modules/crawler.py", "/youtube_sentiment_analysis/modules/display.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py"], "/youtube_sentiment_analysis/modules/training.py": ["/youtube_sentiment_analysis/modules/process.py"], "/youtube_sentiment_analysis/modules/accuracy.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"]}
|
18,461
|
alexgrigoras/youtube_consumer_perception
|
refs/heads/master
|
/setup.py
|
from pip._internal.download import PipSession
from pip._internal.req import parse_requirements
from setuptools import setup, find_packages
from youtube_sentiment_analysis import __version__ as version
requirements = [
str(req.req) for req in parse_requirements('requirements.txt', session=PipSession())
]
setup(
name="youtube_sentiment_analysis",
version=version,
author="Alexandru Grigoras",
author_email="alex_grigoras_10@yahoo.com",
description="Youtube Sentiment Analysis",
url="https://bitbucket.org/grigorasalex/youtube_sentiment_analysis/src/master/",
packages=find_packages(),
keywords='youtube search sentiment analysis',
install_requires=requirements,
zip_safe=True,
classifiers=[
'Development Status :: 1.0 - Release',
"Programming Language :: Python :: 3.6",
"Artificial Intelligence :: Natural Language Processing",
"Crawler :: Youtube metadata crawler",
"Operating System :: OS Independent",
],
)
|
{"/youtube_sentiment_analysis/modules/analysis.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"], "/youtube_sentiment_analysis/modules/crawler.py": ["/youtube_sentiment_analysis/modules/store.py"], "/youtube_sentiment_analysis/__main__.py": ["/youtube_sentiment_analysis/modules/interface.py"], "/youtube_sentiment_analysis/modules/interface.py": ["/youtube_sentiment_analysis/modules/accuracy.py", "/youtube_sentiment_analysis/modules/analysis.py", "/youtube_sentiment_analysis/modules/crawler.py", "/youtube_sentiment_analysis/modules/display.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py"], "/youtube_sentiment_analysis/modules/training.py": ["/youtube_sentiment_analysis/modules/process.py"], "/youtube_sentiment_analysis/modules/accuracy.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"]}
|
18,462
|
alexgrigoras/youtube_consumer_perception
|
refs/heads/master
|
/youtube_sentiment_analysis/modules/crawler.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Web crawler module
@alexandru_grigoras
"""
# Libraries
import json
import multiprocessing
import queue
import time
import urllib.request
from urllib import robotparser
import lxml.html
import requests
from bs4 import BeautifulSoup
from lxml.cssselect import CSSSelector
from youtube_sentiment_analysis.modules.store import StoreData
# Constants
__all__ = ['WebCrawler']
__version__ = '1.0'
__author__ = 'Alexandru Grigoras'
__email__ = 'alex_grigoras_10@yahoo.com'
__status__ = 'release'
YOUTUBE_URL_SEARCH = "https://www.youtube.com/results?search_query="
YOUTUBE_PAGE_ARG = "&page="
YOUTUBE_URL = "https://www.youtube.com"
YOUTUBE_COMMENTS_URL = 'https://www.youtube.com/all_comments?v={youtube_id}'
YOUTUBE_COMMENTS_AJAX_URL = 'https://www.youtube.com/comment_ajax'
USER_AGENT = 'AG_SENTIMENT_ANALYSIS_BOT'
class Video:
"""Video class that contains the parameters of a video"""
def __init__(self, title, link, description, likes=None, dislikes=None):
"""Class constructor"""
self.__title = title
self.__link = link
self.__video_id = None
self.__description = description
self.__likes = likes
self.__dislikes = dislikes
def set_title(self, title):
"""Sets the title"""
self.__title = title
def set_link(self, link):
"""Sets the link"""
self.__link = link
def set_id(self, video_id):
"""Sets the id"""
self.__video_id = video_id
def set_description(self, description):
"""Sets the description"""
self.__description = description
def set_likes(self, likes):
"""Sets the likes"""
self.__likes = likes
def set_dislikes(self, dislikes):
"""Sets the dislikes"""
self.__dislikes = dislikes
def get_title(self):
"""Returns the title"""
return self.__title
def get_link(self):
"""Returns the link"""
return self.__link
def get_id(self):
"""Returns the id"""
return self.__video_id
def get_description(self):
"""Returns the description"""
return self.__description
def get_likes(self):
"""Returns the likes"""
return self.__likes
def get_dislikes(self):
"""Returns the dislikes"""
return self.__dislikes
def display(self, fp=None):
"""Displays the video data"""
space = " "
try:
print("\t> Title: " + self.__title, file=fp)
except IOError:
print(space + "> Invalid title!", file=fp)
try:
print(space + "Link: " + self.__link, file=fp)
except IOError:
print(space + "> Invalid link!", file=fp)
try:
print(space + "Description: " + self.__description, file=fp)
except IOError:
print(space + "> Invalid description!", file=fp)
try:
print(space + "Like: %s, Dislike: %s" % (self.__likes, self.__dislikes), file=fp)
except IOError:
print(space + "> No likes / dislikes!", file=fp)
class RobotParser:
"""Robot parser class to check the crawling rules on the domain and links"""
def __init__(self):
"""Class constructor"""
self.__rp = robotparser.RobotFileParser()
def parse_domain(self):
"""Parse the domain for robot rules"""
self.__rp.set_url(YOUTUBE_URL + "/robots.txt")
self.__rp.read()
r_rate = self.__rp.request_rate("*")
if r_rate is not None:
requests_nr = r_rate.requests
request_sec = r_rate.seconds
#print("> Parameters: ")
#print("\t - request-rate: " + str(requests_nr) + "/" + str(request_sec) + "s")
# TO DO: add other parameters to test
def can_extract(self, link):
"""Checks the link to validate the crawling permission"""
return self.__rp.can_fetch("*", link)
class Downloader(multiprocessing.Process):
"""Worker for downloading data from every video in the list"""
def __init__(self, name, keyword, videos_list):
multiprocessing.Process.__init__(self)
self.__keyword = keyword
self.__videos_list = videos_list
self.__exception = None
self.__mongo_conn = None
print("> Initialize worker " + name + " with " + str(len(videos_list)) + " videos")
def run(self):
self.__mongo_conn = StoreData(self.__keyword, store=True)
try:
# search every video for metadata
for video in self.__videos_list:
try:
print("> Crawling " + video.get_title())
self.__video_process(video)
except AttributeError:
print("> Extracted data from video is invalid (AttributeError)!")
except IndexError:
print("> Extracted data from video is invalid (IndexError)!")
except Exception as e:
self.__exception = e
def __video_process(self, video):
"""Process every video to find links"""
video_id_path = video.get_link()
video_id = video_id_path.replace("/watch?v=", "")
url_video = YOUTUBE_URL + video_id_path
rp = RobotParser()
rp.parse_domain()
if rp.can_extract(url_video) is False:
print("> Page cannot be crawled: " + url_video)
return
headers = {'User-Agent': USER_AGENT}
req = urllib.request.Request(url_video, headers=headers)
search_content = urllib.request.urlopen(req)
search_content_html = BeautifulSoup(search_content, 'lxml')
try:
like = search_content_html.findAll('button', {"class": "like-button-renderer-like-button"})
likes = like[0].span.text
except IndexError:
likes = 0
try:
dislike = search_content_html.findAll('button', {"class": "like-button-renderer-dislike-button"})
dislikes = dislike[0].span.text
except IndexError:
dislikes = 0
# create a video
video.set_link(url_video)
video.set_likes(likes)
video.set_dislikes(dislikes)
video.set_id(video_id)
if video_id_path.find("channel") is -1 and video_id_path.find("user") is -1:
self.__metadata_extractor(video)
elif video_id_path.find("channel") is not -1:
print("> " + video.get_title() + " is a channel")
elif video_id_path.find("user") is not -1:
print("> " + video.get_title() + " is a user")
else:
print("> " + video.get_title() + " is unknown")
def __metadata_extractor(self, video):
"""Extracts the data from video"""
count = self.__download_comments(video)
print('> Downloading ' + str(count) + ' comments for video: ', video.get_title(), ' (', video.get_id(), ')')
def __download_comments(self, video=None, sleep=0):
"""Extract comments from video"""
session = requests.Session()
session.headers['User-Agent'] = USER_AGENT
# get Youtube page with initial comments
response = session.get(YOUTUBE_COMMENTS_URL.format(youtube_id=video.get_id()))
html = response.text
reply_comments = self.__extract_reply_comments(html)
nr_comments = 0
nr_comments += self.__extract_comments(html, video)
page_token = self.__find_token(html, 'data-token')
session_token = self.__find_token(html, 'XSRF_TOKEN', 4)
first_iteration = True
# get remaining comments
while page_token:
data = {'video_id': video.get_id(),
'session_token': session_token}
params = {'action_load_comments': 1,
'order_by_time': True,
'filter': video.get_id()}
if first_iteration:
params['order_menu'] = True
else:
data['page_token'] = page_token
response = self.__ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL, params, data)
if not response:
break
page_token, html = response
reply_comments += self.__extract_reply_comments(html)
nr_comments += self.__extract_comments(html, video)
first_iteration = False
time.sleep(sleep)
# get replies
for cid in reply_comments:
data = {'comment_id': cid,
'video_id': video.get_id(),
'can_reply': 1,
'session_token': session_token}
params = {'action_load_replies': 1,
'order_by_time': True,
'filter': video.get_id(),
'tab': 'inbox'}
response = self.__ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL, params, data)
if not response:
break
_, html = response
nr_comments += self.__extract_comments(html, video)
time.sleep(sleep)
return nr_comments
@staticmethod
def __extract_reply_comments(html):
"""Get comments from replies"""
tree = lxml.html.fromstring(html)
sel = CSSSelector('.comment-replies-header > .load-comments')
return [i.get('data-cid') for i in sel(tree)]
def __extract_comments(self, html, video):
"""Extracts comments from html using CSSSelector to find specific classes"""
tree = lxml.html.fromstring(html)
item_sel = CSSSelector('.comment-item')
text_sel = CSSSelector('.comment-text-content')
time_sel = CSSSelector('.time')
author_sel = CSSSelector('.user-name')
like_sel = CSSSelector('.like-count')
nr_comments = 0
for item in item_sel(tree):
self.__mongo_conn.write(video,
item.get('data-cid'),
text_sel(item)[0].text_content(),
time_sel(item)[0].text_content().strip(),
author_sel(item)[0].text_content(),
like_sel(item)[0].text_content(),
)
nr_comments += 1
return nr_comments
@staticmethod
def __find_token(html, key, num_chars=2):
"""Find start and end position of a key"""
begin = html.find(key) + len(key) + num_chars
end = html.find('"', begin)
return html[begin: end]
@staticmethod
def __ajax_request(session, url, params, data, retries=1, sleep=0):
"""Ajax request to scroll page"""
for _ in range(retries):
response = session.post(url, params=params, data=data)
if response.status_code == 200:
response_dict = json.loads(response.text)
return response_dict.get('page_token', None), response_dict['html_content']
else:
time.sleep(sleep)
def get_exception(self):
"""Returns the generated exception"""
return self.__exception
class WebCrawler:
"""Search on youtube by chosen keyword and find all videos to download comments"""
def __init__(self, keyword, nr_videos, crawl_delay):
"""Class constructor"""
self.__keyword = keyword
self.__nr_videos = nr_videos
self.__crawl_delay = crawl_delay
self.__videos_queue = queue.Queue(maxsize=1000)
def run(self, console):
"""Method that runs the web crawler on YouTube with the specified keyword for search"""
# get the starting time
start_time = time.time()
# start the main processing based on arguments
self.__search_pages(console)
# get the finish time and calculate the script execution time
end_time = time.time()
elapsed_time = end_time - start_time
console.append("> Data extracted in " + time.strftime("%H:%M:%S", time.gmtime(elapsed_time)) + " seconds")
return True
def __search_pages(self, console):
"""Searches into YouTube results pages to find videos"""
# beginning search__get_videos
console.append("> Searching for: " + self.__keyword)
# check limit of pages and videos
if self.__nr_videos < 1:
console.append("> The number of videos should be > 0")
elif self.__nr_videos == 1:
console.append("> Limit the search to: " + str(self.__nr_videos) + " video")
else:
console.append("> Limit the search to: " + str(self.__nr_videos) + " videos")
extracted_videos = 0
max_nr_pages = 50
current_page = 0
# add the videos to the queue
while extracted_videos < self.__nr_videos and current_page <= max_nr_pages:
current_page += + 1
url = YOUTUBE_URL_SEARCH + self.__keyword.replace(" ", "%20") + YOUTUBE_PAGE_ARG + str(current_page)
try:
value = self.__get_videos(url, self.__nr_videos, console)
if value is -1:
console.append("> There isn't an internet connection! Please connect to the internet to get data from videos!")
print("no internet")
extracted_videos += value
except Exception:
console.append("> There isn't an internet connection! Please connect to the internet to get data from videos!")
return
time.sleep(self.__crawl_delay)
# get the number of threads
nr_threads = multiprocessing.cpu_count()
# page threads list
page_processes = []
# calculate the number of videos for each thread to be processed
nr_videos_in_queue = self.__videos_queue.qsize()
console.append("> Number of videos found: " + str(nr_videos_in_queue))
if nr_videos_in_queue == 0:
print("> Cannot run crawling with no videos!")
console.append("> Cannot run crawling with no videos!")
# create the workers to process the videos
if nr_videos_in_queue <= nr_threads:
for i in range(0, nr_videos_in_queue, 1):
videos_list = []
if self.__videos_queue.empty() is False:
videos_list.append(self.__videos_queue.get())
process = Downloader(str(i), self.__keyword, videos_list)
page_processes.append(process)
else:
video_per_thread = int(nr_videos_in_queue / nr_threads)
remaining_videos = nr_videos_in_queue % nr_threads
for i in range(0, nr_threads, 1):
videos_list = []
index = 0
if remaining_videos > 0:
total_videos = video_per_thread + 1
remaining_videos -= 1
else:
total_videos = video_per_thread
while self.__videos_queue.empty() is False and index < total_videos:
videos_list.append(self.__videos_queue.get())
index += 1
process = Downloader(str(i), self.__keyword, videos_list)
page_processes.append(process)
# start the workers
for process in page_processes:
process.start()
# wait for each worker to finish the processing
for process in page_processes:
process.join()
# check if there where any errors on workers
for t in page_processes:
e = t.get_exception()
if e:
console.append("> Error on process:" + e)
def __get_videos(self, url, max_nr_videos=None, console=None):
"""Finds the videos in the selected YouTube page"""
# set header for request
headers = {'User-Agent': USER_AGENT}
req = urllib.request.Request(url, headers=headers)
try:
# send request
try:
search_result = urllib.request.urlopen(req)
except urllib.error.URLError:
print('Cannot make request')
return -1
soup = BeautifulSoup(search_result, 'lxml')
description = soup.findAll('div', {"class": "yt-lockup-description"})
title_link = soup.findAll('a', {"class": "yt-uix-tile-link"})
# check the number of videos
if len(title_link) == 0:
return 0
if max_nr_videos:
selected_nr_videos = max_nr_videos
else:
selected_nr_videos = len(title_link)
# search every video for metadata
for video in range(0, selected_nr_videos, 1):
try:
# put the video in the queue
current_video = Video(title_link[video]['title'],
title_link[video]['href'],
description[video].text)
self.__videos_queue.put(current_video)
except AttributeError:
console.append("> Video cannot be put to queue (AttributeError)!")
except IndexError:
console.append("> Video cannot be put to queue (IndexError)!")
# returns the number of the videos found
return len(title_link)
except urllib.error.HTTPError:
console.append("> HTTP request error: Too many requests")
return 0
|
{"/youtube_sentiment_analysis/modules/analysis.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"], "/youtube_sentiment_analysis/modules/crawler.py": ["/youtube_sentiment_analysis/modules/store.py"], "/youtube_sentiment_analysis/__main__.py": ["/youtube_sentiment_analysis/modules/interface.py"], "/youtube_sentiment_analysis/modules/interface.py": ["/youtube_sentiment_analysis/modules/accuracy.py", "/youtube_sentiment_analysis/modules/analysis.py", "/youtube_sentiment_analysis/modules/crawler.py", "/youtube_sentiment_analysis/modules/display.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py"], "/youtube_sentiment_analysis/modules/training.py": ["/youtube_sentiment_analysis/modules/process.py"], "/youtube_sentiment_analysis/modules/accuracy.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"]}
|
18,463
|
alexgrigoras/youtube_consumer_perception
|
refs/heads/master
|
/youtube_sentiment_analysis/modules/store.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Store data module
@alexandru_grigoras
"""
# Libraries
import pymongo
# Constants
__all__ = ['StoreData']
__version__ = '1.0'
__author__ = 'Alexandru Grigoras'
__email__ = 'alex_grigoras_10@yahoo.com'
__status__ = 'release'
DATABASE_NAME = "sentiment_analysis"
class StoreData:
"""Class for storing the data to temporary files or MongoDB database"""
def __init__(self, keyword=None, store=True, console=None):
"""Class constructor"""
try:
self.__my_client = pymongo.MongoClient("mongodb://localhost:27017/")
except Exception:
if console:
console.append("> MongoDB database connection is closed or MongoDB is not installed!")
return
self.__db_list = self.__my_client.list_database_names()
if DATABASE_NAME not in self.__db_list:
if console:
console.append("> Database " + DATABASE_NAME + " does not exists. Creating it!")
self.__my_db = self.__my_client["sentiment_analysis"]
self.__col_list = self.__my_db.list_collection_names()
if keyword:
if keyword not in self.__col_list:
if not store:
if console:
console.append("> Collection does not exist! Extract data first!")
exit()
if console:
console.append("> Collection " + keyword + " does not exists. Creating it!")
self.__my_col = self.__my_db[keyword]
def write(self, video, cid, text, time, author, nr_likes):
"""Write data on the database"""
my_query = {
'_id': video.get_id() if video else "",
'title': video.get_title() if video else "",
'description': video.get_description() if video else "",
'nr_likes': video.get_likes() if video else "",
'nr_dislikes': video.get_dislikes() if video else "",
}
new_values = {
"$addToSet":
{
'comments':
{
'cid': cid,
'text': text,
'time': time,
'author': author,
'nr_likes': nr_likes,
}
}
}
self.__my_col.update_one(my_query, new_values, upsert=True)
def read(self):
"""Read data from mongodb database"""
return self.__my_col.find()
def get_collections(self):
"""Get the collections from database"""
return self.__col_list
|
{"/youtube_sentiment_analysis/modules/analysis.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"], "/youtube_sentiment_analysis/modules/crawler.py": ["/youtube_sentiment_analysis/modules/store.py"], "/youtube_sentiment_analysis/__main__.py": ["/youtube_sentiment_analysis/modules/interface.py"], "/youtube_sentiment_analysis/modules/interface.py": ["/youtube_sentiment_analysis/modules/accuracy.py", "/youtube_sentiment_analysis/modules/analysis.py", "/youtube_sentiment_analysis/modules/crawler.py", "/youtube_sentiment_analysis/modules/display.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py"], "/youtube_sentiment_analysis/modules/training.py": ["/youtube_sentiment_analysis/modules/process.py"], "/youtube_sentiment_analysis/modules/accuracy.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"]}
|
18,464
|
alexgrigoras/youtube_consumer_perception
|
refs/heads/master
|
/youtube_sentiment_analysis/__main__.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Youtube Sentiment Analysis - Main module
@alexandru_grigoras
"""
# Libraries
import sys
from PyQt5.QtWidgets import QApplication
from youtube_sentiment_analysis.modules.interface import SentimentAnalysisApplication
# Constants
__all__ = []
__version__ = '1.0'
__author__ = 'Alexandru Grigoras'
if __name__ == "__main__":
"""Main function that starts the application"""
app = QApplication(sys.argv)
window = SentimentAnalysisApplication()
window.show()
sys.exit(app.exec_())
|
{"/youtube_sentiment_analysis/modules/analysis.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"], "/youtube_sentiment_analysis/modules/crawler.py": ["/youtube_sentiment_analysis/modules/store.py"], "/youtube_sentiment_analysis/__main__.py": ["/youtube_sentiment_analysis/modules/interface.py"], "/youtube_sentiment_analysis/modules/interface.py": ["/youtube_sentiment_analysis/modules/accuracy.py", "/youtube_sentiment_analysis/modules/analysis.py", "/youtube_sentiment_analysis/modules/crawler.py", "/youtube_sentiment_analysis/modules/display.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py"], "/youtube_sentiment_analysis/modules/training.py": ["/youtube_sentiment_analysis/modules/process.py"], "/youtube_sentiment_analysis/modules/accuracy.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"]}
|
18,465
|
alexgrigoras/youtube_consumer_perception
|
refs/heads/master
|
/youtube_sentiment_analysis/modules/vote_classifier.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Voting system for classifiers
@alexandru_grigoras
"""
# Libraries
from statistics import mean
from statistics import mode
from nltk.sentiment import SentimentIntensityAnalyzer
from youtube_sentiment_analysis.modules.sentiment_module import sentiment as anew
# Constants
__all__ = ['VoteClassifier']
__version__ = '1.0'
__author__ = 'Alexandru Grigoras'
__email__ = 'alex_grigoras_10@yahoo.com'
__status__ = 'release'
class VoteClassifier:
"""Voting system for classifiers for selecting the most modules sentiment from a list on classifiers"""
def __init__(self, classifiers):
"""Class constructor"""
self.__classifiers = classifiers # ml classifiers
self.__sid = SentimentIntensityAnalyzer() # vader classifier
def classify(self, comment_text, pd):
"""Returns the mean value of the classifiers results"""
votes = []
# get ML classifiers results
for c in self.__classifiers:
pos = c.prob_classify(pd.get_word_feature()).prob('pos')
neg = c.prob_classify(pd.get_word_feature()).prob('neg')
votes.append(float(pos - neg))
# get Vader result
ss = self.__sid.polarity_scores(comment_text)
votes.append(ss["compound"])
# get ANEW result
anew_result = anew.sentiment(pd.get_tokens())['valence']
votes.append(self.map(anew_result, 0, 10, -1, 1))
return mean(votes)
def confidence(self, comment_text, pd):
"""Returns the confidence of the result"""
votes = []
# get ML classifiers result
for c in self.__classifiers:
v = c.classify(pd.get_word_feature())
votes.append(v)
# get Vader result
ss = self.__sid.polarity_scores(comment_text)
if ss["compound"] >= 0:
votes.append("pos")
else:
votes.append("neg")
# get ANEW result
anew_result = anew.sentiment(pd.get_tokens())['valence']
if anew_result >= 5.8:
votes.append("pos")
else:
votes.append("neg")
choice_votes = votes.count(mode(votes))
conf = choice_votes / float(len(votes))
return conf
@staticmethod
def map(value, left_min, left_max, right_min, right_max):
"""Maps a value from one interval [left_min, left_max] to another [right_min, right_max]"""
# Figure out how 'wide' each range is
left_span = left_max - left_min
right_span = right_max - right_min
# Convert the left range into a 0-1 range (float)
value_scaled = float(value - left_min) / float(left_span)
# Convert the 0-1 range into a value in the right range.
return right_min + (value_scaled * right_span)
|
{"/youtube_sentiment_analysis/modules/analysis.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"], "/youtube_sentiment_analysis/modules/crawler.py": ["/youtube_sentiment_analysis/modules/store.py"], "/youtube_sentiment_analysis/__main__.py": ["/youtube_sentiment_analysis/modules/interface.py"], "/youtube_sentiment_analysis/modules/interface.py": ["/youtube_sentiment_analysis/modules/accuracy.py", "/youtube_sentiment_analysis/modules/analysis.py", "/youtube_sentiment_analysis/modules/crawler.py", "/youtube_sentiment_analysis/modules/display.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py"], "/youtube_sentiment_analysis/modules/training.py": ["/youtube_sentiment_analysis/modules/process.py"], "/youtube_sentiment_analysis/modules/accuracy.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"]}
|
18,466
|
alexgrigoras/youtube_consumer_perception
|
refs/heads/master
|
/youtube_sentiment_analysis/modules/process.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Process data module
@alexandru_grigoras
"""
# Libraries
import re
from nltk.corpus import stopwords
# Constants
__all__ = ['ProcessData']
__version__ = '1.0'
__author__ = 'Alexandru Grigoras'
__email__ = 'alex_grigoras_10@yahoo.com'
__status__ = 'release'
class ProcessData:
"""Class for pre-processing the the for the analysis module"""
def __init__(self):
"""Class constructor"""
self.__all_tokens = []
self.__tokens = []
def process_text(self, text):
"""Process the text by filtering it and removing unwanted characters"""
self.__tokens.clear()
tokenize = [t.lower().strip(":,.!?") for t in text.split()]
filtered_tokens = self.__filter_text(tokenize)
self.__tokens.extend(filtered_tokens)
self.__all_tokens.extend(filtered_tokens)
@staticmethod
def __filter_text(tokens):
"""Pre-process comments to remove irrelevant data
Takes in a string of text, then performs the following:
1. Remove all punctuation
2. Remove all stopwords
3. Remove other characters
4. Return the cleaned text as a list of words"""
stopwords_english = stopwords.words('english')
custom_stopwords = []
hashtags = [w for w in tokens if w.startswith('#')]
ghashtags = [w for w in tokens if w.startswith('+')]
mentions = [w for w in tokens if w.startswith('@')]
links = [w for w in tokens if w.startswith('http') or w.startswith('www')]
filtered_tokens = [w for w in tokens
if w not in stopwords_english
and w not in custom_stopwords
and w.isalpha()
and not len(w) < 3
and w not in hashtags
and w not in ghashtags
and w not in links
and w not in mentions]
return filtered_tokens
@staticmethod
def __word_verify(word):
"""Check if the word contains only letters"""
if re.match("^[a-zA-Z_]*$", word):
return word.lower()
else:
return ''
def get_tokens(self):
"""Returns the filtered tokens of current process"""
return self.__tokens
def get_all_tokens(self):
"""Returns all the filtered tokens"""
return self.__all_tokens
def get_word_feature(self, tokens=None):
"""Get the word features from dictionary"""
return dict([(self.__word_verify(word), True) for word in (tokens if tokens else self.__tokens)])
|
{"/youtube_sentiment_analysis/modules/analysis.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"], "/youtube_sentiment_analysis/modules/crawler.py": ["/youtube_sentiment_analysis/modules/store.py"], "/youtube_sentiment_analysis/__main__.py": ["/youtube_sentiment_analysis/modules/interface.py"], "/youtube_sentiment_analysis/modules/interface.py": ["/youtube_sentiment_analysis/modules/accuracy.py", "/youtube_sentiment_analysis/modules/analysis.py", "/youtube_sentiment_analysis/modules/crawler.py", "/youtube_sentiment_analysis/modules/display.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py"], "/youtube_sentiment_analysis/modules/training.py": ["/youtube_sentiment_analysis/modules/process.py"], "/youtube_sentiment_analysis/modules/accuracy.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"]}
|
18,467
|
alexgrigoras/youtube_consumer_perception
|
refs/heads/master
|
/youtube_sentiment_analysis/modules/interface.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
User interface
@alexandru_grigoras
"""
# Libraries
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtWidgets import QMainWindow, QMessageBox, QAbstractItemView
from pymongo.errors import PyMongoError
from youtube_sentiment_analysis.modules.accuracy import TestAccuracy
from youtube_sentiment_analysis.modules.analysis import DataAnalysis
from youtube_sentiment_analysis.modules.crawler import WebCrawler
from youtube_sentiment_analysis.modules.display import DisplayData
from youtube_sentiment_analysis.modules.store import StoreData
from youtube_sentiment_analysis.modules.training import TrainClassifier
# Constants
__all__ = ['SentimentAnalysisApplication']
__version__ = '1.0'
__author__ = 'Alexandru Grigoras'
__email__ = 'alex_grigoras_10@yahoo.com'
__status__ = 'release'
class UIMainWindow(object):
"""Main User Interface"""
def setupUi(self, MainWindow):
"""Setup the objects for ui"""
# main window
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1366, 768)
MainWindow.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
MainWindow.setTabShape(QtWidgets.QTabWidget.Rounded)
#central widget
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidgetData = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidgetData.setGeometry(QtCore.QRect(390, 20, 951, 691))
self.tabWidgetData.setObjectName("tabWidgetData")
# sentiment page
self.sentiment = QtWidgets.QWidget()
self.sentiment.setObjectName("sentiment")
self.tabWidgetData.addTab(self.sentiment, "")
# heatmap page
self.heatmap = QtWidgets.QWidget()
self.heatmap.setObjectName("heatmap")
self.tabWidgetData.addTab(self.heatmap, "")
# word frequency page
self.word_frequency = QtWidgets.QWidget()
self.word_frequency.setObjectName("word_frequency")
self.tabWidgetData.addTab(self.word_frequency, "")
# word cloud page
self.word_cloud = QtWidgets.QWidget()
self.word_cloud.setObjectName("word_cloud")
self.tabWidgetData.addTab(self.word_cloud, "")
# comments page
self.comments = QtWidgets.QWidget()
self.comments.setObjectName("comments")
self.tabWidgetData.addTab(self.comments, "")
self.treeView = QtWidgets.QTreeView(self.comments)
self.treeView.setGeometry(QtCore.QRect(30, 20, 891, 611))
self.treeView.setObjectName("treeView")
# accuracy page
self.accuracy = QtWidgets.QWidget()
self.accuracy.setObjectName("accuracy")
self.tabWidgetData.addTab(self.accuracy, "")
# settings page
self.settings = QtWidgets.QWidget()
self.settings.setObjectName("settings")
self.groupBoxComments = QtWidgets.QGroupBox(self.settings)
self.groupBoxComments.setGeometry(QtCore.QRect(30, 50, 331, 151))
self.groupBoxComments.setObjectName("groupBoxComments")
self.labelLikeMin = QtWidgets.QLabel(self.groupBoxComments)
self.labelLikeMin.setGeometry(QtCore.QRect(30, 50, 171, 31))
self.labelLikeMin.setObjectName("labelLikeMin")
self.labelLikeMax = QtWidgets.QLabel(self.groupBoxComments)
self.labelLikeMax.setGeometry(QtCore.QRect(30, 90, 171, 31))
self.labelLikeMax.setObjectName("labelLikeMax")
self.lineEditLikeMin = QtWidgets.QLineEdit(self.groupBoxComments)
self.lineEditLikeMin.setGeometry(QtCore.QRect(230, 50, 81, 28))
self.lineEditLikeMin.setLayoutDirection(QtCore.Qt.RightToLeft)
self.lineEditLikeMin.setObjectName("lineEditLikeMin")
self.lineEditLikeMax = QtWidgets.QLineEdit(self.groupBoxComments)
self.lineEditLikeMax.setGeometry(QtCore.QRect(230, 90, 81, 28))
self.lineEditLikeMax.setObjectName("lineEditLikeMax")
self.groupBoxTraining = QtWidgets.QGroupBox(self.settings)
self.groupBoxTraining.setGeometry(QtCore.QRect(30, 240, 601, 321))
self.groupBoxTraining.setObjectName("groupBoxTraining")
self.labelDataset = QtWidgets.QLabel(self.groupBoxTraining)
self.labelDataset.setGeometry(QtCore.QRect(30, 50, 111, 31))
self.labelDataset.setObjectName("labelDataset")
self.lineEditDataset = QtWidgets.QLineEdit(self.groupBoxTraining)
self.lineEditDataset.setGeometry(QtCore.QRect(150, 50, 421, 28))
self.lineEditDataset.setLayoutDirection(QtCore.Qt.RightToLeft)
self.lineEditDataset.setText("")
self.lineEditDataset.setObjectName("lineEditDataset")
self.pushButtonTrain = QtWidgets.QPushButton(self.groupBoxTraining)
self.pushButtonTrain.setGeometry(QtCore.QRect(30, 160, 101, 31))
self.pushButtonTrain.setObjectName("pushButtonTrain")
self.pushButtonTrain.clicked.connect(self.__on_click_train)
self.labelDatasetLimit = QtWidgets.QLabel(self.groupBoxTraining)
self.labelDatasetLimit.setGeometry(QtCore.QRect(30, 100, 111, 31))
self.labelDatasetLimit.setObjectName("labelDatasetLimit")
self.lineEditDatasetLimit = QtWidgets.QLineEdit(self.groupBoxTraining)
self.lineEditDatasetLimit.setGeometry(QtCore.QRect(150, 100, 131, 28))
self.lineEditDatasetLimit.setLayoutDirection(QtCore.Qt.RightToLeft)
self.lineEditDatasetLimit.setText("200")
self.lineEditDatasetLimit.setObjectName("lineEditDatasetLimit")
self.lineEditDatasetKfold = QtWidgets.QLineEdit(self.groupBoxTraining)
self.lineEditDatasetKfold.setGeometry(QtCore.QRect(150, 210, 131, 28))
self.lineEditDatasetKfold.setLayoutDirection(QtCore.Qt.RightToLeft)
self.lineEditDatasetKfold.setText("10")
self.lineEditDatasetKfold.setObjectName("lineEditDatasetKfold")
self.labelDatasetKfold = QtWidgets.QLabel(self.groupBoxTraining)
self.labelDatasetKfold.setGeometry(QtCore.QRect(30, 210, 111, 31))
self.labelDatasetKfold.setObjectName("labelDatasetKfold")
self.pushButtonAccuracy = QtWidgets.QPushButton(self.groupBoxTraining)
self.pushButtonAccuracy.setGeometry(QtCore.QRect(30, 260, 101, 31))
self.pushButtonAccuracy.setObjectName("pushButtonAccuracy")
self.pushButtonAccuracy.clicked.connect(self.__on_click_accuracy)
self.tabWidgetData.addTab(self.settings, "")
# group box search
self.groupBoxSearch = QtWidgets.QGroupBox(self.centralwidget)
self.groupBoxSearch.setGeometry(QtCore.QRect(20, 20, 351, 101))
self.groupBoxSearch.setObjectName("groupBoxSearch")
self.lineEditSearch = QtWidgets.QLineEdit(self.groupBoxSearch)
self.lineEditSearch.setGeometry(QtCore.QRect(20, 40, 311, 41))
self.lineEditSearch.setObjectName("lineEditSearch")
self.groupBoxAnalyse = QtWidgets.QGroupBox(self.centralwidget)
self.groupBoxAnalyse.setGeometry(QtCore.QRect(20, 240, 351, 151))
self.groupBoxAnalyse.setObjectName("groupBoxAnalyse")
# group box data
self.groupBoxData = QtWidgets.QGroupBox(self.centralwidget)
self.groupBoxData.setGeometry(QtCore.QRect(20, 130, 351, 101))
self.groupBoxData.setObjectName("groupBoxData")
self.comboBoxDB = QtWidgets.QComboBox(self.groupBoxData)
self.comboBoxDB.setGeometry(QtCore.QRect(70, 40, 261, 41))
self.comboBoxDB.setAcceptDrops(False)
self.comboBoxDB.setObjectName("comboBoxDB")
self.pushButtonLoad = QtWidgets.QPushButton(self.groupBoxData)
self.pushButtonLoad.setGeometry(QtCore.QRect(20, 40, 41, 41))
self.pushButtonLoad.setObjectName("pushButtonLoad")
self.pushButtonLoad.clicked.connect(self.__on_click_load)
# button extract
self.pushButtonExtract = QtWidgets.QPushButton(self.groupBoxAnalyse)
self.pushButtonExtract.setGeometry(QtCore.QRect(130, 40, 91, 61))
self.pushButtonExtract.setObjectName("pushButtonExtract")
self.pushButtonExtract.clicked.connect(self.__on_click_extract)
# button analyse
self.pushButtonAnalyse = QtWidgets.QPushButton(self.groupBoxAnalyse)
self.pushButtonAnalyse.setGeometry(QtCore.QRect(240, 40, 91, 61))
self.pushButtonAnalyse.setObjectName("pushButtonAnalyse")
self.pushButtonAnalyse.clicked.connect(self.__on_click_analyse)
self.pushButtonAnalyse.setEnabled(False)
# number of videos
self.lineEditNrVideos = QtWidgets.QLineEdit(self.groupBoxAnalyse)
self.lineEditNrVideos.setGeometry(QtCore.QRect(20, 70, 91, 28))
self.lineEditNrVideos.setLayoutDirection(QtCore.Qt.RightToLeft)
self.lineEditNrVideos.setText("")
self.lineEditNrVideos.setObjectName("lineEditNrVideos")
self.labelNrVideos1 = QtWidgets.QLabel(self.groupBoxAnalyse)
self.labelNrVideos1.setGeometry(QtCore.QRect(30, 30, 71, 21))
self.labelNrVideos1.setObjectName("labelNrVideos1")
self.labelNrVideos2 = QtWidgets.QLabel(self.groupBoxAnalyse)
self.labelNrVideos2.setGeometry(QtCore.QRect(40, 50, 51, 21))
self.labelNrVideos2.setObjectName("labelNrVideos2")
# progress bar
self.progressBar = QtWidgets.QProgressBar(self.groupBoxAnalyse)
self.progressBar.setGeometry(QtCore.QRect(20, 110, 311, 21))
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
# console
self.groupBoxConsole = QtWidgets.QGroupBox(self.centralwidget)
self.groupBoxConsole.setGeometry(QtCore.QRect(20, 410, 351, 301))
self.groupBoxConsole.setObjectName("groupBoxConsole")
self.textEditConsole = QtWidgets.QTextEdit(self.groupBoxConsole)
self.textEditConsole.setGeometry(QtCore.QRect(20, 40, 311, 241))
self.textEditConsole.setObjectName("textEditConsole")
MainWindow.setCentralWidget(self.centralwidget)
# menu bar
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1366, 25))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionExit.setStatusTip('Leave The App')
self.actionExit.triggered.connect(self.__close_application)
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionAbout.setStatusTip('Informations about the app')
self.actionAbout.triggered.connect(self.__about_application)
self.menuFile.addAction(self.actionExit)
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
# display data module
width_val = 9.5
height_val = 6.5
self.sentiment_display = DisplayData(self.sentiment, width=width_val, height=height_val)
self.heatmap_display = DisplayData(self.heatmap, width=width_val, height=height_val)
self.word_frequency_display = DisplayData(self.word_frequency, width=width_val, height=height_val)
self.word_cloud_display = DisplayData(self.word_cloud, width=width_val, height=height_val)
self.accuracy_display = DisplayData(self.accuracy, width=width_val, height=height_val)
self.__retranslateUi(MainWindow)
self.tabWidgetData.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def __retranslateUi(self, MainWindow):
"""Sets the label names and other paramenters"""
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "YouTube Sentiment Analysis"))
self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.sentiment), _translate("MainWindow", "Sentiment"))
self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.heatmap), _translate("MainWindow", "Heatmap"))
self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.word_frequency),
_translate("MainWindow", "Word Frequency"))
self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.word_cloud),
_translate("MainWindow", "WordCloud"))
self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.comments), _translate("MainWindow", "Comments"))
self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.accuracy), _translate("MainWindow", "Accuracy"))
self.groupBoxComments.setTitle(_translate("MainWindow", "Comments"))
self.labelLikeMin.setText(_translate("MainWindow", "Minimum number of likes:"))
self.labelLikeMax.setText(_translate("MainWindow", "Maximum number of likes:"))
self.lineEditLikeMin.setText(_translate("MainWindow", "10"))
self.lineEditLikeMax.setText(_translate("MainWindow", "1000"))
self.lineEditDataset.setText(_translate("MainWindow", "/home/alex/imdb_data/"))
self.groupBoxTraining.setTitle(_translate("MainWindow", "Training and Accuracy"))
self.labelDataset.setText(_translate("MainWindow", "Dataset path:"))
self.pushButtonTrain.setText(_translate("MainWindow", "Train"))
self.labelDatasetLimit.setText(_translate("MainWindow", "Limit documents:"))
self.labelDatasetKfold.setText(_translate("MainWindow", "k-folds:"))
self.pushButtonAccuracy.setText(_translate("MainWindow", "Accuracy"))
self.groupBoxSearch.setTitle(_translate("MainWindow", "Search Data"))
self.groupBoxAnalyse.setTitle(_translate("MainWindow", "Analyse Data"))
self.pushButtonExtract.setText(_translate("MainWindow", "Extract"))
self.pushButtonAnalyse.setText(_translate("MainWindow", "Analyse"))
self.pushButtonAccuracy.setText(_translate("MainWindow", "Acccuracy"))
self.groupBoxConsole.setTitle(_translate("MainWindow", "Console"))
self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.settings), _translate("MainWindow", "Settings"))
self.labelNrVideos1.setText(_translate("MainWindow", "Number of"))
self.labelNrVideos2.setText(_translate("MainWindow", "videos:"))
self.pushButtonLoad.setText(_translate("MainWindow", "Load"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
self.groupBoxData.setTitle(_translate("MainWindow", "Data from database"))
self.actionAbout.setText(_translate("MainWindow", "About"))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(184, 207, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(184, 207, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.pushButtonAnalyse.setPalette(palette)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(184, 207, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(184, 207, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
self.comboBoxDB.setPalette(palette)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(184, 207, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(184, 207, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.pushButtonLoad.setPalette(palette)
@staticmethod
def __about_application():
"""Open a popup with application details"""
message_box = QMessageBox()
message_box.about(message_box, "About", "Youtube Sentiment Analysis Application\n"
"Degree project\n"
"Author: Alexandru Grigoras")
@staticmethod
def __close_application():
"""Close the application"""
sys.exit()
def __on_click_load(self):
"""Load the collection names"""
# get data
sd = StoreData()
try:
collections = sd.get_collections()
collections.sort()
self.comboBoxDB.clear()
self.comboBoxDB.addItems(collections)
self.pushButtonAnalyse.setEnabled(True)
except PyMongoError:
self.textEditConsole.append("> Database server is not opened!")
@pyqtSlot(name="extract")
def __on_click_extract(self):
"""Extracts the data from YouTube"""
input_text = self.lineEditSearch.text()
if input_text is not None:
if input_text is not "":
# get like threshold
try:
like_threshold_min = int(self.lineEditLikeMin.text())
if like_threshold_min < 0:
self.textEditConsole.append("> The minimum number of likes should be positive")
return
except ValueError:
self.textEditConsole.append("> The minimum number of likes is not valid")
return
try:
like_threshold_max = int(self.lineEditLikeMax.text())
if like_threshold_max < 0:
self.textEditConsole.append("> The maximum number of likes should be positive")
return
except ValueError:
self.textEditConsole.append("> The maximum number of likes is not valid")
return
try:
nr_videos = int(self.lineEditNrVideos.text())
if nr_videos < 0:
self.textEditConsole.append("> The number of videos should be positive")
return
except ValueError:
self.textEditConsole.append("> The number of videos is not valid")
return
# extract data
crawl_delay = 1
extracted_data = WebCrawler(input_text, nr_videos, crawl_delay)
crawling_result = extracted_data.run(self.textEditConsole)
if crawling_result is True:
# process data
data = DataAnalysis(input_text, like_threshold_min, like_threshold_max)
try:
fd, pd, sentiment_val, sentiment_anew_arousal, likes, confidence, comments, videos, author, \
comm_time = data.analyse(self.progressBar, self.textEditConsole)
except TypeError:
self.textEditConsole.append("> No data found or like threshold is too big!")
return
# clear any plot if it exists
self.sentiment_display.clear_plot()
self.heatmap_display.clear_plot()
self.word_frequency_display.clear_plot()
self.word_cloud_display.clear_plot()
progress_value = 80
# plot data
self.sentiment_display.plot_classifiers(sentiment_val, sentiment_anew_arousal, likes, confidence,
'Sentiment', 'Activare', comments, videos, author,
comm_time)
progress_value += 5
self.progressBar.setValue(progress_value)
self.heatmap_display.plot_heatmap(sentiment_val, sentiment_anew_arousal, "Sentiment", "Activare")
progress_value += 5
self.progressBar.setValue(progress_value)
self.word_frequency_display.plot_word_frequency(fd.items())
progress_value += 5
self.progressBar.setValue(progress_value)
self.word_cloud_display.plot_wordcloud(pd.get_all_tokens())
progress_value += 5
self.progressBar.setValue(progress_value)
# put comments to treeview
self.__populate_treeview(data)
else:
self.textEditConsole.append("> The input text is empty!")
else:
self.textEditConsole.append("> Invalid input text!")
@pyqtSlot(name="analyse")
def __on_click_analyse(self):
"""Analyses Data"""
input_text = self.comboBoxDB.currentText()
if input_text is not None:
if input_text is not "":
try:
like_threshold_min = int(self.lineEditLikeMin.text())
if like_threshold_min < 0:
self.textEditConsole.append("> The minimum number of likes should be positive")
return
except ValueError:
self.textEditConsole.append("> The minimum number of likes is not valid")
return
try:
like_threshold_max = int(self.lineEditLikeMax.text())
if like_threshold_max < 0:
self.textEditConsole.append("> The maximum number of likes should be positive")
return
if like_threshold_max <= like_threshold_min:
self.textEditConsole.append("> The maximum number of likes should greater than "
"the minimum number of likes")
return
except ValueError:
self.textEditConsole.append("> The maximum number of likes is not valid")
return
# process data
data = DataAnalysis(input_text, like_threshold_min, like_threshold_max)
try:
fd, pd, sentiment_val, sentiment_anew_arousal, likes, confidence, comments, videos, author, \
comm_time = data.analyse(self.progressBar, self.textEditConsole)
except TypeError:
self.textEditConsole.append("> No data found or like threshold is too large")
return
# clear any plot if it exists
self.sentiment_display.clear_plot()
self.heatmap_display.clear_plot()
self.word_frequency_display.clear_plot()
self.word_cloud_display.clear_plot()
progress_value = 80
# plot data
self.sentiment_display.plot_classifiers(sentiment_val, sentiment_anew_arousal, likes, confidence,
'Sentiment', 'Activare', comments, videos, author, comm_time)
progress_value += 5
self.progressBar.setValue(progress_value)
self.heatmap_display.plot_heatmap(sentiment_val, sentiment_anew_arousal, "Sentiment", "Activare")
progress_value += 5
self.progressBar.setValue(progress_value)
self.word_frequency_display.plot_word_frequency(fd.items())
progress_value += 5
self.progressBar.setValue(progress_value)
self.word_cloud_display.plot_wordcloud(pd.get_all_tokens())
progress_value += 5
self.progressBar.setValue(progress_value)
# put comments to treeview
self.__populate_treeview(data)
else:
self.textEditConsole.append("> The input text is empty!")
else:
self.textEditConsole.append("> Invalid input text!")
@pyqtSlot(name="train")
def __on_click_train(self):
"""Train the classifiers"""
try:
max_nr_docs = int(self.lineEditDatasetLimit.text())
if max_nr_docs < 1:
self.textEditConsole.append("> The maximum number of documents should be positive")
return
except ValueError:
self.textEditConsole.append("> The maximum number of documents is not valid")
return
dataset_path = self.lineEditDataset.text()
train_classifier = TrainClassifier(dataset_path, max_nr_docs)
train_classifier.train(self.progressBar, self.textEditConsole)
@pyqtSlot(name="accuracy")
def __on_click_accuracy(self):
"""Test the accuracy of the classifiers"""
try:
max_nr_docs = int(self.lineEditDatasetLimit.text())
if max_nr_docs < 1:
self.textEditConsole.append("> The maximum number of documents should be positive")
return
except ValueError:
self.textEditConsole.append("> The maximum number of documents is not valid")
return
try:
k_fold = int(self.lineEditDatasetKfold.text())
if k_fold < 1:
self.textEditConsole.append("> k should be positive")
return
except ValueError:
self.textEditConsole.append("> k number is not valid")
return
dataset_path = self.lineEditDataset.text()
# get data
test_accuracy = TestAccuracy(dataset_path, max_nr_docs)
results, names = test_accuracy.test_cross_val_score(k_fold, self.progressBar, self.textEditConsole)
# clear any plot if it exists
self.sentiment_display.clear_plot()
# display data
self.accuracy_display.plot_accuracy(results, names)
self.tabWidgetData.setCurrentIndex(5)
def __populate_treeview(self, data):
"""Populate the comments tab"""
# get data
videos_data = data.get_data_from_DB()
self.treeView.setSelectionBehavior(QAbstractItemView.SelectRows)
model = QStandardItemModel()
model.setHorizontalHeaderLabels(['element', 'value'])
self.treeView.setModel(model)
# parse data
for video in videos_data:
parent_elem = QStandardItem('video')
parent_value = QStandardItem(video.get('title'))
id_elem = QStandardItem('_id')
id_value = QStandardItem(video.get('_id'))
parent_elem.appendRow([id_elem, id_value])
description_elem = QStandardItem('description')
description_value = QStandardItem(video.get('description'))
parent_elem.appendRow([description_elem, description_value])
nr_likes_elem = QStandardItem('nr_likes')
nr_likes_value = QStandardItem(video.get('nr_likes'))
parent_elem.appendRow([nr_likes_elem, nr_likes_value])
nr_dislikes_elem = QStandardItem('nr_dislikes')
nr_dislikes_value = QStandardItem(video.get('nr_dislikes'))
parent_elem.appendRow([nr_dislikes_elem, nr_dislikes_value])
comments_elem = QStandardItem('comments')
parent_elem.appendRow(comments_elem)
comments = video.get("comments")
for comment in comments:
text_elem = QStandardItem('text')
text_value = QStandardItem(comment.get('text'))
comments_elem.appendRow([text_elem, text_value])
cid_elem = QStandardItem('cid')
cid_value = QStandardItem(comment.get('cid'))
text_elem.appendRow([cid_elem, cid_value])
time_elem = QStandardItem('time')
time_value = QStandardItem(comment.get('time'))
text_elem.appendRow([time_elem, time_value])
author_elem = QStandardItem('author')
author_value = QStandardItem(comment.get('author'))
text_elem.appendRow([author_elem, author_value])
nr_likes_elem = QStandardItem('nr_likes')
nr_likes_value = QStandardItem(comment.get('nr_likes'))
text_elem.appendRow([nr_likes_elem, nr_likes_value])
model.appendRow([parent_elem, parent_value])
class SentimentAnalysisApplication(QMainWindow, UIMainWindow):
"""Main application -> initialises User Interface"""
def __init__(self):
"""Class constructor"""
QMainWindow.__init__(self, flags=QtCore.Qt.Window)
UIMainWindow.__init__(self)
self.setupUi(self)
|
{"/youtube_sentiment_analysis/modules/analysis.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"], "/youtube_sentiment_analysis/modules/crawler.py": ["/youtube_sentiment_analysis/modules/store.py"], "/youtube_sentiment_analysis/__main__.py": ["/youtube_sentiment_analysis/modules/interface.py"], "/youtube_sentiment_analysis/modules/interface.py": ["/youtube_sentiment_analysis/modules/accuracy.py", "/youtube_sentiment_analysis/modules/analysis.py", "/youtube_sentiment_analysis/modules/crawler.py", "/youtube_sentiment_analysis/modules/display.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py"], "/youtube_sentiment_analysis/modules/training.py": ["/youtube_sentiment_analysis/modules/process.py"], "/youtube_sentiment_analysis/modules/accuracy.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"]}
|
18,468
|
alexgrigoras/youtube_consumer_perception
|
refs/heads/master
|
/youtube_sentiment_analysis/modules/training.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Classifiers training
@alexandru_grigoras
"""
# Libraries
import glob
import os
import pickle
import time
import nltk
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.svm import SVC, NuSVC
from youtube_sentiment_analysis.modules.process import ProcessData
# Constants
__all__ = ['TrainClassifier']
__version__ = '1.0'
__author__ = 'Alexandru Grigoras'
__email__ = 'alex_grigoras_10@yahoo.com'
__status__ = 'release'
CLASSIFIERS_PATH = "youtube_sentiment_analysis/data/classifiers/"
class TrainClassifier:
"""Training class for training and saving the classifiers"""
def __init__(self, dataset_path=None, max_nr_docs=None, classifiers_names=None):
"""Class constructor"""
self.__classifiers_names = []
self.__trained_classifiers = []
if not classifiers_names or classifiers_names == [""]:
self.__classifiers_names = ['multinomial_naive_bayes', 'logistic_regression', 'nu_svc']
else:
for classifier in classifiers_names:
self.__classifiers_names.append(classifier)
self.__dataset_path = dataset_path
self.__max_nr_docs = max_nr_docs
def set_classifiers(self, classifiers_names):
"""Sets the classifiers to be trained"""
self.__classifiers_names.clear()
for classifier in classifiers_names:
self.__classifiers_names.append(classifier)
def train(self, progress, console):
"""Train the classifiers with the data from nltk library"""
console.append("> Selected classifiers: " + str(self.__classifiers_names))
progress_value = 0
nr_classifiers = len(self.__classifiers_names)
for classifier_name in self.__classifiers_names:
console.append("> Training the classifier: " + classifier_name)
# get the starting time
classifier_start_time = time.time()
train_set, test_set = self.get_dataset_split()
if classifier_name == 'multinomial_naive_bayes':
cl_name = "Multinomial NB classifier"
classifier = SklearnClassifier(MultinomialNB(alpha=1))
elif classifier_name == 'bernoulli_naive_bayes':
cl_name = "Bernoulli NB Classifier"
classifier = SklearnClassifier(BernoulliNB(alpha=1, binarize=0))
elif classifier_name == 'logistic_regression':
cl_name = "Logistic Regression"
classifier = SklearnClassifier(LogisticRegression(penalty='l2', class_weight='balanced', solver='saga',
max_iter=2000, n_jobs=-1, warm_start=True))
elif classifier_name == 'svc':
cl_name = "SVC"
classifier = SklearnClassifier(SVC(kernel='linear', probability=True, gamma='scale',
class_weight='balanced', max_iter=2000, cache_size=300))
elif classifier_name == 'nu_svc':
cl_name = "Nu SVC"
classifier = SklearnClassifier(NuSVC(nu=0.5, kernel='linear', probability=True, gamma='scale',
max_iter=2000, cache_size=300, class_weight='balanced'))
else:
console.append("> Invalid classifier name")
return
classifier.train(train_set)
console.append("> " + cl_name + " accuracy percent: " +
str((nltk.classify.accuracy(classifier, test_set)) * 100) + "%")
self.__save_classifier(classifier_name, classifier)
self.__trained_classifiers.append(classifier)
progress_value += 100/nr_classifiers
progress.setValue(progress_value)
# get the ending time and calculate elapsed time
classifier_end_time = time.time()
classifier_elapsed_time = classifier_end_time - classifier_start_time
console.append("> Training " + classifier_name + " finished in " +
time.strftime("%H:%M:%S", time.gmtime(classifier_elapsed_time)) + " seconds")
def get_classifiers(self, progress=None, console=None):
"""Returns the trained classifiers or trains them"""
classifiers = []
read_directory = os.listdir(CLASSIFIERS_PATH)
if len(read_directory) == 0:
console.append("> Training the classifiers: ")
self.train(progress, console)
classifiers = self.get_trained_classifiers()
else:
console.append("> Getting the trained classifiers: ")
file_nr = 1
for f in read_directory:
console.append(" " + str(file_nr) + ". " + f)
file_nr = file_nr + 1
classifiers.append(self.open_classifier(f))
console.append(" " + str(file_nr) + ". vader classifier")
console.append(" " + str(file_nr + 1) + ". anew classifier")
return classifiers
def get_trained_classifiers(self):
"""Returns a list with trained classifiers objects"""
return self.__trained_classifiers
@staticmethod
def __save_classifier(_name, _classifier):
"""Save in file to avoid training the data again"""
save_document = open(CLASSIFIERS_PATH + _name + ".pickle", 'wb')
pickle.dump(_classifier, save_document)
save_document.close()
@staticmethod
def open_classifier(name):
"""Open the trained classifier with the data from nltk library"""
open_file = open(CLASSIFIERS_PATH + name, 'rb')
classifier = pickle.load(open_file, encoding='bytes')
open_file.close()
return classifier
def get_dataset_split(self):
"""Get dataset from files (negative and positive words)
25000 train + 25000 test (imdb)"""
file_path_train_neg = glob.glob(self.__dataset_path + 'train/neg/*.txt')
file_path_test_neg = glob.glob(self.__dataset_path + 'test/neg/*.txt')
file_path_train_pos = glob.glob(self.__dataset_path + 'train/pos/*.txt')
file_path_test_pos = glob.glob(self.__dataset_path + 'test/pos/*.txt')
neg_train_ids = []
pos_train_ids = []
neg_test_ids = []
pos_test_ids = []
pd = ProcessData()
max_docs = self.__max_nr_docs / 4
# train data
nr_docs = 0
for fp in file_path_train_neg:
with open(fp, 'r') as f:
if nr_docs < max_docs or self.__max_nr_docs is -1:
neg_train_ids = neg_train_ids + [(pd.get_word_feature(f.read().split()), 'neg')]
nr_docs = nr_docs + 1
nr_docs = 0
for fp in file_path_train_pos:
with open(fp, 'r') as f:
if nr_docs < max_docs or self.__max_nr_docs is -1:
pos_train_ids = pos_train_ids + [(pd.get_word_feature(f.read().split()), 'pos')]
nr_docs = nr_docs + 1
# test data
nr_docs = 0
for fp in file_path_test_neg:
with open(fp, 'r') as f:
if nr_docs < max_docs or self.__max_nr_docs is -1:
neg_test_ids = neg_test_ids + [(pd.get_word_feature(f.read().split()), 'neg')]
nr_docs = nr_docs + 1
nr_docs = 0
for fp in file_path_test_pos:
with open(fp, 'r') as f:
if nr_docs < max_docs / 4 or self.__max_nr_docs is -1:
pos_test_ids = pos_test_ids + [(pd.get_word_feature(f.read().split()), 'pos')]
nr_docs = nr_docs + 1
# concatenate data
train_set = neg_train_ids + pos_train_ids
test_set = neg_test_ids + pos_test_ids
return train_set, test_set
def get_dataset_labeled(self):
"""Get dataset from files (negative and positive words)
25000 train + 25000 test (imdb) with labels"""
# files path
file_path_neg = glob.glob(self.__dataset_path + 'train/neg/*.txt') + \
glob.glob(self.__dataset_path + 'test/neg/*.txt')
file_path_pos = glob.glob(self.__dataset_path + 'train/pos/*.txt') + \
glob.glob(self.__dataset_path + 'test/pos/*.txt')
text_data = []
label_values = []
max_docs = self.__max_nr_docs / 2
# negative comments
nr_docs = 0
for fp in file_path_neg:
with open(fp, 'r') as f:
if nr_docs < max_docs or self.__max_nr_docs is -1:
text_data.append(f.read())
label_values.append(-1)
nr_docs = nr_docs + 1
# positive comments
nr_docs = 0
for fp in file_path_pos:
with open(fp, 'r') as f:
if nr_docs < max_docs or self.__max_nr_docs is -1:
text_data.append(f.read())
label_values.append(1)
nr_docs = nr_docs + 1
return text_data, label_values
|
{"/youtube_sentiment_analysis/modules/analysis.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"], "/youtube_sentiment_analysis/modules/crawler.py": ["/youtube_sentiment_analysis/modules/store.py"], "/youtube_sentiment_analysis/__main__.py": ["/youtube_sentiment_analysis/modules/interface.py"], "/youtube_sentiment_analysis/modules/interface.py": ["/youtube_sentiment_analysis/modules/accuracy.py", "/youtube_sentiment_analysis/modules/analysis.py", "/youtube_sentiment_analysis/modules/crawler.py", "/youtube_sentiment_analysis/modules/display.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py"], "/youtube_sentiment_analysis/modules/training.py": ["/youtube_sentiment_analysis/modules/process.py"], "/youtube_sentiment_analysis/modules/accuracy.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"]}
|
18,469
|
alexgrigoras/youtube_consumer_perception
|
refs/heads/master
|
/youtube_sentiment_analysis/modules/sentiment_module/sentiment.py
|
#!/usr/bin/python
#- SENTIMENT.PY ------------------------------------------------------------#
# Routines to calulate average valence and arousal for one or more terms #
# using the ANEW and Happiness sentiment dictionaries #
# #
#- Modification History: ---------------------------------------------------#
# When: Who: Comments: #
# #
# 28-Sep-14 Christopher G. Healey Converted from Javascript #
# 17-Dec-17 Christopher G. Healey Changed to SENTIMENT.PY to clarify #
# these are not ANEW-only terms #
#---------------------------------------------------------------------------#
import math
import nltk
# Import raw ANEW and Happiness dictionary data from term file
from .sentiment_term import anew_word as anew_word
from .sentiment_term import anew_stem as anew_stem
from .sentiment_term import hapi_word as hapi_word
__all__ = ['add_term', 'sentiment', 'exist']
# Setup a "custom" dictionary to allow users to extend the ANEW and
# happiness dictionaries
cust_dict = { } # Custom dictionary, raw terms
cust_stem = { } # Custom dictionary, stemmed terms
def add_term( term, v, a, replace = False ):
# Add a term to the custom dictionary; if it already exists one of
# the default dictionaries, the request will be ignored unless the
# user explicitly asks for the value to be changed
#
# term: Term to add
# v: Valence
# a: Arousal
# replace: Replace term that exists in default dictionaries
global cust_dict
global cust_stem
# If term already exists and user does not ask to replace it, stop
if exist( term ) and replace != True:
return
# Otherwise either replace it or add it to the custom dictionary
if term in anew_word and replace == True:
anew_word[ term ][ 'avg' ][ 0 ] = a
anew_word[ term ][ 'avg' ][ 1 ] = v
elif term in anew_stem and replace == True:
anew_stem[ term ][ 'avg' ][ 0 ] = a
anew_stem[ term ][ 'std' ][ 1 ] = v
elif term in hapi_word:
hapi_word[ term ][ 'avg' ][ 0 ] = a
hapi_word[ term ][ 'std' ][ 1 ] = v
else:
cust_dict[ term ] = { }
cust_dict[ term ][ 'dict' ] = "custom"
cust_dict[ term ][ 'word' ] = term
cust_dict[ term ][ 'avg' ] = [ a, v ]
cust_dict[ term ][ 'std' ] = [ 1, 1 ]
cust_dict[ term ][ 'fq' ] = 1
# Build a stem for the custom term
porter = nltk.stem.porter.PorterStemmer()
stem = porter.stem( term )
cust_dict[ term ][ 'stem' ] = porter.stem( term )
# Add term to custom stem dictionary with stem as key
cust_stem[ stem ] = { }
cust_stem[ stem ][ 'dict' ] = "custom"
cust_stem[ stem ][ 'word' ] = stem
cust_stem[ stem ][ 'stem' ] = stem
cust_stem[ stem ][ 'avg' ] = [ a, v ]
cust_stem[ stem ][ 'std' ] = [ 1, 1 ]
cust_stem[ stem ][ 'fq' ] = 1
# End function add_term
def arousal( term ):
# Return the average arousal for a term
#
# term: Term to check (can be string or list of strings)
if isinstance( term, str ):
return arousal_raw( term )[ 0 ]
elif not isinstance( term, list ):
return 0.0
# At this point we know we're working with a list of terms
c = 2.0 * math.pi
prob = [ ]
prob_sum = 0.0
a_mu = [ ]
for t in term:
if exist( t ):
a = arousal_raw( t )
p = 1.0 / math.sqrt( c * math.pow( a[ 1 ], 2.0 ) )
prob.append( p )
prob_sum = prob_sum + p
a_mu.append( a[ 0 ] )
arousal = 0.0
for i in range( 0, len( a_mu ) ):
arousal = arousal + ( prob[ i ] / prob_sum * a_mu[ i ] )
return arousal
# End function arousal
def arousal_raw( term ):
# Return the raw arousal for a single term
#
# term: Term to check
global cust_dict
global cust_stem
if not exist( term ):
avg = 0.0
std = 0.0
elif term in anew_word:
avg = anew_word[ term ][ 'avg' ][ 1 ]
std = anew_word[ term ][ 'std' ][ 1 ]
elif term in anew_stem:
avg = anew_stem[ term ][ 'avg' ][ 1 ]
std = anew_stem[ term ][ 'std' ][ 1 ]
elif term in cust_dict:
avg = cust_dict[ term ][ 'avg' ][ 1 ]
std = cust_dict[ term ][ 'std' ][ 1 ]
elif term in cust_stem:
avg = cust_stem[ term ][ 'avg' ][ 1 ]
std = cust_stem[ term ][ 'std' ][ 1 ]
else:
avg = hapi_word[ term ][ 'avg' ][ 1 ]
std = hapi_word[ term ][ 'std' ][ 1 ]
return [ avg, std ]
# End function arousal_raw
def exist( term ):
# Return True if a term exists in one of the sentiment dictionaries,
# False otherwise
#
# term: Term to check (can be string or list of strings)
global cust_dict
global cust_stem
if isinstance( term, str ):
ex = term in anew_word or term in anew_stem or\
term in hapi_word or term in cust_dict or term in cust_stem
return ex
elif isinstance( term, list ):
term_list = [ ]
for t in term:
ex = t in anew_word or t in anew_stem or\
t in hapi_word or t in cust_dict or t in cust_stem
term_list.append( ex )
return term_list
else:
return False
# End function exist
def sentiment( term ):
# Return the valence and arousal sentiment for a term
#
# term: Term to check (can be string or list of strings)
sen = { 'valence': 0.0, 'arousal': 0.0 }
if isinstance( term, str ) or isinstance( term, list ):
sen[ 'valence' ] = valence( term )
sen[ 'arousal' ] = arousal( term )
return sen
# End function sentiment
def valence( term ):
# Return the average valence for a term
#
# term: Term to check (can be string or list of strings)
if isinstance( term, str ):
return valence_raw( term )[ 0 ]
elif not isinstance( term, list ):
return 0.0
# At this point we know we're working with a list of terms
c = 2.0 * math.pi
prob = [ ]
prob_sum = 0.0
v_mu = [ ]
for t in term:
if exist( t ):
v = valence_raw( t )
p = 1.0 / math.sqrt( c * math.pow( v[ 1 ], 2.0 ) )
prob.append( p )
prob_sum = prob_sum + p
v_mu.append( v[ 0 ] )
val = 0.0
for i in range( 0, len( v_mu ) ):
val = val + ( prob[ i ] / prob_sum * v_mu[ i ] )
return val
# End function valence
def valence_raw( term ):
# Return the raw valence for a single term
#
# term: Term to check
global cust_dict
global cust_stem
if not exist( term ):
avg = 0.0
std = 0.0
elif term in anew_word:
avg = anew_word[ term ][ 'avg' ][ 0 ]
std = anew_word[ term ][ 'std' ][ 0 ]
elif term in anew_stem:
avg = anew_stem[ term ][ 'avg' ][ 0 ]
std = anew_stem[ term ][ 'std' ][ 0 ]
elif term in cust_dict:
avg = cust_dict[ term ][ 'avg' ][ 0 ]
std = cust_dict[ term ][ 'std' ][ 0 ]
elif term in cust_stem:
avg = cust_stem[ term ][ 'avg' ][ 0 ]
std = cust_stem[ term ][ 'std' ][ 0 ]
else:
avg = hapi_word[ term ][ 'avg' ][ 0 ]
std = hapi_word[ term ][ 'std' ][ 0 ]
return [ avg, std ]
# End function valence_raw
|
{"/youtube_sentiment_analysis/modules/analysis.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"], "/youtube_sentiment_analysis/modules/crawler.py": ["/youtube_sentiment_analysis/modules/store.py"], "/youtube_sentiment_analysis/__main__.py": ["/youtube_sentiment_analysis/modules/interface.py"], "/youtube_sentiment_analysis/modules/interface.py": ["/youtube_sentiment_analysis/modules/accuracy.py", "/youtube_sentiment_analysis/modules/analysis.py", "/youtube_sentiment_analysis/modules/crawler.py", "/youtube_sentiment_analysis/modules/display.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py"], "/youtube_sentiment_analysis/modules/training.py": ["/youtube_sentiment_analysis/modules/process.py"], "/youtube_sentiment_analysis/modules/accuracy.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"]}
|
18,470
|
alexgrigoras/youtube_consumer_perception
|
refs/heads/master
|
/youtube_sentiment_analysis/modules/accuracy.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Classifiers accuracy
@alexandru_grigoras
"""
# Libraries
import time
import numpy as np
from nltk.sentiment import SentimentIntensityAnalyzer
from sklearn import model_selection
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import make_pipeline
from sklearn.svm import NuSVC
from youtube_sentiment_analysis.modules.process import ProcessData
from youtube_sentiment_analysis.modules.sentiment_module import sentiment as anew
from youtube_sentiment_analysis.modules.training import TrainClassifier
from youtube_sentiment_analysis.modules.vote_classifier import VoteClassifier
# Constants
__all__ = ['TestAccuracy']
__version__ = '1.0'
__author__ = 'Alexandru Grigoras'
__email__ = 'alex_grigoras_10@yahoo.com'
__status__ = 'release'
CLASSIFIERS_PATH = "youtube_sentiment_analysis/data/classifiers_large_dataset/"
class TestAccuracy:
"""Class for testing the accuracy of the algorithms"""
def __init__(self, dataset_path, max_nr_docs):
"""Class constructor"""
self.__dataset_path = dataset_path
self.__max_nr_docs = max_nr_docs
def test_cross_val_score(self, k_fold, progress, console):
"""Testing Classifiers Accuracy using Cross-Validation Method"""
# get the starting time
start_time = time.time()
tc = TrainClassifier(self.__dataset_path, self.__max_nr_docs)
text_data, label_values = tc.get_dataset_labeled()
x_elements = np.array(text_data)
y_elements = np.array(label_values)
sid = SentimentIntensityAnalyzer()
vader_accuracy = []
anew_accuracy = []
voting_accuracy = []
process_data = ProcessData()
progress_value = 0
# machine learning classifiers
classifiers = tc.get_classifiers(progress, console)
voted_classifier = VoteClassifier(classifiers)
kf = KFold(n_splits=k_fold, random_state=None, shuffle=False)
for train_index, test_index in kf.split(x_elements):
x_train, x_test = x_elements[train_index], x_elements[test_index]
y_train, y_test = y_elements[train_index], y_elements[test_index]
test_values_vader = []
test_values_anew = []
test_values_voting = []
predicted_values = []
for text, value in zip(x_test, y_test):
process_data.process_text(text)
ss = sid.polarity_scores(text)
if ss["compound"] >= 0:
test_values_vader.append("positive")
else:
test_values_vader.append("negative")
tokens = process_data.get_tokens()
if anew.sentiment(tokens)['valence'] >= 5.8:
test_values_anew.append("positive")
else:
test_values_anew.append("negative")
if value == -1:
predicted_values.append("negative")
else:
predicted_values.append("positive")
# machine learning algorithms sentiment value
ml_algorithms_sentiment = voted_classifier.classify(text, process_data)
if ml_algorithms_sentiment >= 0:
test_values_voting.append("positive")
else:
test_values_voting.append("negative")
acc_vader = accuracy_score(test_values_vader, predicted_values, normalize=True)
acc_anew = accuracy_score(test_values_anew, predicted_values, normalize=True)
acc_voting = accuracy_score(test_values_voting, predicted_values, normalize=True)
vader_accuracy.append(acc_vader)
anew_accuracy.append(acc_anew)
voting_accuracy.append(acc_voting)
progress_value += 40 / k_fold
progress.setValue(progress_value)
vader_accuracy_array = np.array(vader_accuracy)
anew_accuracy_array = np.array(anew_accuracy)
voting_accuracy_array = np.array(voting_accuracy)
console.append("> %s: %f (%f)" % ("VADER", vader_accuracy_array.mean(), vader_accuracy_array.std()))
console.append("> %s: %f (%f)" % ("ANEW", anew_accuracy_array.mean(), anew_accuracy_array.std()))
console.append("> %s: %f (%f)" % ("VOTING", voting_accuracy_array.mean(), voting_accuracy_array.std()))
# prepare configuration for cross validation test harness
models = [('NuSVC', NuSVC(nu=0.5, kernel='linear', probability=True, gamma='scale', cache_size=500,
class_weight='balanced')),
('LR', LogisticRegression(penalty='l2', class_weight='balanced', solver='saga', max_iter=5000,
n_jobs=-1, warm_start=True)),
('MNB', MultinomialNB(alpha=1))]
# evaluate each model in turn
results = []
names = []
show_info = 0
# add the VADER and ANEW classifiers
results.append(voting_accuracy)
names.append("VOTING")
for name, model in models:
tf_idf = TfidfVectorizer()
classifier = make_pipeline(tf_idf, model)
cv_results = model_selection.cross_val_score(classifier, x_elements, y_elements,
cv=k_fold, scoring='accuracy', n_jobs=-1,
verbose=show_info)
results.append(cv_results)
names.append(name)
console.append("> %s: %f (%f)" % (name, cv_results.mean(), cv_results.std()))
progress_value += 20
progress.setValue(progress_value)
# add vader and anew classifiers
results.append(vader_accuracy)
names.append("VADER")
results.append(anew_accuracy)
names.append("ANEW")
# get the ending time and calculate elapsed time
end_time = time.time()
elapsed_time = end_time - start_time
console.append("> Data processed in " + time.strftime("%H:%M:%S", time.gmtime(elapsed_time)) + " seconds")
return results, names
|
{"/youtube_sentiment_analysis/modules/analysis.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"], "/youtube_sentiment_analysis/modules/crawler.py": ["/youtube_sentiment_analysis/modules/store.py"], "/youtube_sentiment_analysis/__main__.py": ["/youtube_sentiment_analysis/modules/interface.py"], "/youtube_sentiment_analysis/modules/interface.py": ["/youtube_sentiment_analysis/modules/accuracy.py", "/youtube_sentiment_analysis/modules/analysis.py", "/youtube_sentiment_analysis/modules/crawler.py", "/youtube_sentiment_analysis/modules/display.py", "/youtube_sentiment_analysis/modules/store.py", "/youtube_sentiment_analysis/modules/training.py"], "/youtube_sentiment_analysis/modules/training.py": ["/youtube_sentiment_analysis/modules/process.py"], "/youtube_sentiment_analysis/modules/accuracy.py": ["/youtube_sentiment_analysis/modules/process.py", "/youtube_sentiment_analysis/modules/training.py", "/youtube_sentiment_analysis/modules/vote_classifier.py"]}
|
18,477
|
danghualong/stock
|
refs/heads/master
|
/src/cron/service/add_history_service.py
|
from ...constant import urls
from ...db import dboper
from ..parsers import historyParser
from ...logger import currentLogger
import datetime
import requests
def addHistoryDaily(days=300):
stocks = dboper.getStocks()
if (stocks is None or len(stocks)<=0):
currentLogger.warn("no stock fetched")
return
stop = datetime.datetime.now().strftime("%Y%m%d")
start = (datetime.datetime.now() -
datetime.timedelta(days=days)).strftime("%Y%m%d")
currentLogger.info("{0}-{1} records will be inserted".format(start, stop))
index=0
for stock in stocks:
details = _getDetails(stock.code, start, stop)
index += 1
if (details is None):
continue
for detail in details:
dboper.insertDaily(detail)
currentLogger.info("-------{2}/{3}---{0}:{1} insert all daily records".format(
stock.code, stock.name,index,len(stocks)))
def _getDetails(code, start, stop):
url = urls.HISTORY_PRICE_URL.format(code, start, stop)
resp = requests.get(url)
content = resp.text
details=None
try:
details = historyParser.parse(content)
except Exception as ex:
currentLogger.error("code:{0} error \n%s",ex)
return details
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,478
|
danghualong/stock
|
refs/heads/master
|
/src/stat/__init__.py
|
from flask import Blueprint
stat_bp = Blueprint("stat", __name__, url_prefix="/stat")
from .controller import break_point
from .controller import stock_filter
from .controller import stocks
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,479
|
danghualong/stock
|
refs/heads/master
|
/src/stat/view/trends.py
|
from ...db import dboper
from . import table
from ..service import trait_service as TraitService
def showCurrentTrends():
stocks = dboper.getStocks()
if (stocks is None or len(stocks)<=0):
print("no stock fetched")
return
for stock in stocks:
traits=TraitService.getTraits(stock.code)
table.showMAAndATR(traits, stock)
word = input("点击q退出,其他键继续...\n")
if (word == 'q'):
break
else:
print(word)
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,480
|
danghualong/stock
|
refs/heads/master
|
/src/model/model.py
|
import time
class Synchronizable(object):
update_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
class Stock(Synchronizable):
code = ''
name = ''
prefix = 'sh'
class Daily(Synchronizable):
code = ""
date = ""
open = ""
last_close = ""
current = ""
high = ""
low = ""
quantity = ""
amount = ""
bid1 = ""
bid1_amount = ""
bid2 = ""
bid2_amount = ""
bid3 = ""
bid3_amount = ""
bid4 = ""
bid4_amount = ""
bid5 = ""
bid5_amount = ""
ask1 = ""
ask1_amount = ""
ask2 = ""
ask2_amount = ""
ask3 = ""
ask3_amount = ""
ask4 = ""
ask4_amount = ""
ask5 = ""
ask5_amount = ""
turnover = ""
class Trait(object):
date = ""
# # 移动平均线
# MA = {}
# # 平均真实波动幅度
# ATR={}
close = 0.0
high = 0.0
last_close=0.0
low = 0.0
open = 0.0
quantity = 0.0
amount = 0.0
def __init__(self):
# 移动平均线
self.MA = {}
# 平均真实波动幅度
self.ATR = {}
# 获取当日真实波动幅度
def getTrueRange(self):
return max(self.high - self.low, abs(self.high - self.last_close), abs(self.low - self.last_close))
def __str__(self):
return "----{8}----,atr10:{0},atr20:{1},ma5:{2},ma10:{3},ma20:{4},ma30:{5},ma60:{6},ma120:{7}".format(round(self.ATR[10], 3),
round(self.ATR[20], 3),
round(self.MA[5], 3),
round(self.MA[10], 3),
round(self.MA[20], 3),
round(self.MA[30], 3),
round(self.MA[60], 3),
round(self.MA[120], 3),
self.date)
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,481
|
danghualong/stock
|
refs/heads/master
|
/app.py
|
from src import createApp
import os
from src.tools.response_factory import create_response
app = createApp()
@app.route("/")
def index():
mode=os.getenv("FLASK_ENV", "---")
items=[dict(path=i.rule,methods=list(i.methods),endpoint=i.endpoint) for i in app.url_map._rules]
return create_response(items)
if __name__ == '__main__':
app.run()
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,482
|
danghualong/stock
|
refs/heads/master
|
/src/model/__init__.py
|
from .model import Stock, Daily, Trait
from .errors import *
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,483
|
danghualong/stock
|
refs/heads/master
|
/src/errorhandlers.py
|
from .model import APIException, ServerError
from werkzeug.exceptions import HTTPException
from .logger import currentLogger
from flask import request
def init_app(app):
@app.errorhandler(Exception)
def framework_error(e):
currentLogger.error("url is {0},error info:{1}".format(request.path,e) if isinstance(e, HTTPException) else "error info:{0}".format(e)) # 对错误进行日志记录
if isinstance(e, APIException):
return e
if isinstance(e, HTTPException):
msg = e.description
error_code = 1007
return APIException(error_code, msg)
else:
if not app.config['DEBUG']:
return ServerError()
else:
return e
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,484
|
danghualong/stock
|
refs/heads/master
|
/src/tools/response_factory.py
|
from flask import make_response
from .serializer import DHLEncoder
import json
def create_response(payload):
result = dict(data=payload,error_code=0)
content = json.dumps(result, cls=DHLEncoder, ensure_ascii=False, indent=4)
resp = make_response(content)
resp.mimetype = "application/json"
return resp
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,485
|
danghualong/stock
|
refs/heads/master
|
/src/stat/controller/stocks.py
|
from .. import stat_bp
from ...db import dboper
from ...tools.response_factory import create_response
@stat_bp.route("/", methods=['GET'])
def getStocks():
'''
获取所有的股票代码信息
'''
stocks = dboper.getStocks()
return create_response(stocks)
@stat_bp.route("/<key>", methods=['GET'])
def getStocksByKey(key):
'''
获取某支股票的信息
key:股票代码 或者 名称
'''
stocks = dboper.getStocksByKey(key)
return create_response(stocks)
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,486
|
danghualong/stock
|
refs/heads/master
|
/src/stat/service/atr_service.py
|
def calcATR(traits, N=20):
'''
计算N日平均波动幅度
traits:汇总特征数据集合
N:均值的天数
'''
result=[]
total = 0.0
slowIndex = 0
fastIndex = 0
for fastIndex in range(len(traits)):
trait=traits[fastIndex]
if (fastIndex < N):
total += trait.getTrueRange()
trait.ATR[N]=total / (fastIndex + 1)
else:
total = total - traits[slowIndex].getTrueRange() + trait.getTrueRange()
trait.ATR[N]=total/N
slowIndex += 1
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,487
|
danghualong/stock
|
refs/heads/master
|
/src/__init__.py
|
from flask import Flask
import os
from .settings import configs
from . import cron, logger, db, routers,errorhandlers
def createApp(configName=None):
app = Flask(__name__)
if configName == None:
configName = os.getenv("FLASK_ENV", "production")
app.config.from_object(configs[configName])
logger.init_app(app)
errorhandlers.init_app(app)
db.init_app(app)
cron.init_app(app)
routers.init_app(app)
return app
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,488
|
danghualong/stock
|
refs/heads/master
|
/src/stat/view/single_ma_view.py
|
from ..service import trait_service as TraitService
from . import table
def ShowSingleMA(stock):
traits = TraitService.getTraits(stock.code)
table.showMA(traits, stock, True)
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,489
|
danghualong/stock
|
refs/heads/master
|
/src/stat/view/single_atr_view.py
|
from ..service import trait_service as TraitService
from . import table
def ShowSingleATR(stock):
traits = TraitService.getTraits(stock.code)
table.showATR(traits, stock, True)
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,490
|
danghualong/stock
|
refs/heads/master
|
/src/db/dboper.py
|
import sqlite3
import os
from ..model import Daily, Stock
from ..settings import configs
from ..logger import currentLogger
DB_NAME = configs[os.getenv("FLASK_ENV", "production")].SQLALCHEMY_DATABASE_URI
def insertStock(stock):
try:
conn = sqlite3.connect(DB_NAME)
# print("insertStock connect db successfully")
cur = conn.cursor()
cur.execute(
'insert or ignore into stock(code,name,prefix,update_at) values(?,?,?,?)',
(
stock.code,
stock.name,
stock.prefix,
stock.update_at,
))
conn.commit()
except Exception as e:
currentLogger.error("%s",e)
finally:
if cur!=None:
cur.close()
if conn!=None:
conn.close()
def getStocks():
result = []
try:
conn = sqlite3.connect(DB_NAME)
# print("getStocks connect db successfully")
cur = conn.cursor()
cur.execute('select * from stock where type<9')
items = cur.fetchall()
for item in items:
stock = Stock()
stock.code = item[0]
stock.name = item[1]
stock.prefix = item[2]
result.append(stock)
except Exception as e:
currentLogger.error("%s",e)
finally:
if cur!=None:
cur.close()
if conn!=None:
conn.close()
return result
def getStocksByKey(keyword):
result = []
try:
conn = sqlite3.connect(DB_NAME)
# print("getStocks connect db successfully")
cur = conn.cursor()
cur.execute("select * from stock where name like ? or code=?",('%'+keyword+'%',keyword,))
items = cur.fetchall()
for item in items:
stock = Stock()
stock.code = item[0]
stock.name = item[1]
stock.prefix = item[2]
result.append(stock)
except Exception as e:
currentLogger.error("%s",e)
finally:
if cur!=None:
cur.close()
if conn!=None:
conn.close()
return result
def insertDaily(daily, replace=False):
try:
conn = sqlite3.connect(DB_NAME)
# print("insertDaily connect db successfully")
cur = conn.cursor()
cur.execute(
'''insert or {0} into daily(
code,date,open,last_close,current,high,low,quantity,amount,
bid1,bid2,bid3,bid4,bid5,ask1,ask2,ask3,ask4,ask5,
bid1_amount,bid2_amount,bid3_amount,bid4_amount,bid5_amount,
ask1_amount,ask2_amount,ask3_amount,ask4_amount,ask5_amount,
turnover,update_at) values
(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''.
format('replace' if replace else 'ignore'), (
daily.code,
daily.date,
daily.open,
daily.last_close,
daily.current,
daily.high,
daily.low,
daily.quantity,
daily.amount,
daily.bid1,
daily.bid2,
daily.bid3,
daily.bid4,
daily.bid5,
daily.ask1,
daily.ask2,
daily.ask3,
daily.ask4,
daily.ask5,
daily.bid1_amount,
daily.bid2_amount,
daily.bid3_amount,
daily.bid4_amount,
daily.bid5_amount,
daily.ask1_amount,
daily.ask2_amount,
daily.ask3_amount,
daily.ask4_amount,
daily.ask5_amount,
daily.turnover,
daily.update_at,
))
conn.commit()
except Exception as e:
currentLogger.error("%s",e)
finally:
if cur!=None:
cur.close()
if conn!=None:
conn.close()
def getDailys(code,N):
result = []
try:
conn = sqlite3.connect(DB_NAME)
cur = conn.cursor()
cur.execute('select * from daily where code=? order by date desc limit ?',(code,N,))
items = cur.fetchall()
for item in items:
daily = Daily()
daily.code = item[0]
daily.date = item[1]
daily.open = item[2]
daily.last_close = item[3]
daily.current = item[4]
daily.high = item[5]
daily.low = item[6]
daily.quantity = item[7]
daily.amount = item[8]
daily.bid1 = item[9]
daily.bid1_amount = item[10]
daily.bid2 = item[11]
daily.bid2_amount = item[12]
daily.bid3 = item[13]
daily.bid3_amount = item[14]
daily.bid4 = item[15]
daily.bid4_amount = item[16]
daily.bid5 = item[17]
daily.bid5_amount = item[18]
daily.ask1 = item[19]
daily.ask1_amount = item[20]
daily.ask2 = item[21]
daily.ask2_amount = item[22]
daily.ask3 = item[23]
daily.ask3_amount = item[24]
daily.ask4 = item[25]
daily.ask4_amount = item[26]
daily.ask5 = item[27]
daily.ask5_amount = item[28]
daily.turnover = item[30]
result.append(daily)
except Exception as e:
currentLogger.error("%s",e)
finally:
if cur!=None:
cur.close()
if conn!=None:
conn.close()
return result[::-1]
if __name__ == "__main__":
from model import Stock, Daily
stock = Stock()
stock.name = "美的集团"
stock.code = "000333"
stock.prefix = "sz"
insertStock(stock)
daily = Daily()
daily.code = "000333"
daily.date = "2021-02-10"
daily.open = "102.9"
daily.last_close = "101.7"
daily.current = "107.2"
daily.high = "108"
daily.low = "101.5"
daily.quantity = "8654778686"
daily.amount = "1545678968498786.01"
daily.bid1 = "107.1"
daily.bid2 = "107.0"
daily.bid3 = "106.9"
daily.bid4 = "106.7"
daily.bid5 = "106.0"
daily.bid1_amount = "3047"
daily.bid2_amount = "3048"
daily.bid3_amount = "3046"
daily.bid4_amount = "3045"
daily.bid5_amount = "3044"
daily.ask1 = "107.2"
daily.ask2 = "107.3"
daily.ask3 = "107.4"
daily.ask4 = "107.5"
daily.ask5 = "107.6"
daily.ask1_amount = "5041"
daily.ask2_amount = "5042"
daily.ask3_amount = "5043"
daily.ask4_amount = "5044"
daily.ask5_amount = "5045"
insertDaily(daily)
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,491
|
danghualong/stock
|
refs/heads/master
|
/src/constant/urls.py
|
DAILY_PRICE_URL = 'http://hq.sinajs.cn/list={0}' # sz表示深市,sh表示上市
HISTORY_PRICE_URL = 'https://q.stock.sohu.com/hisHq?code=cn_{0}&start={1}&end={2}&stat=0&order=A&period=d&callback=historySearchHandler&rt=jsonp'
# appkey从个人管理后台获取:https://www.jisuapi.com/api/stock/
JISU_STOCK_URL='https://api.jisuapi.com/stock/list?classid=1&pagenum={0}&pagesize=40&appkey=9c9d121ac353b5c1'
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,492
|
danghualong/stock
|
refs/heads/master
|
/src/stat/view/table.py
|
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = ['sans-serif']
plt.rcParams['font.sans-serif'] = ['SimHei']
def showMAAndATR(traits, stock):
plt.subplot(2, 1, 1)
showMA(traits, stock)
plt.subplot(2, 1, 2)
showATR(traits, stock)
plt.show()
def showATR(traits, stock, autoShow=False):
y1 = list(map(lambda p: p.ATR[10],traits))
y2 = list(map(lambda p: p.getTrueRange(),traits))
plt.plot(range(len(y1)), y1,marker='o',markersize=3,label='ATR10')
plt.plot(range(len(y1)), y2, marker='*', markersize=3, label='TR')
plt.legend()
if (autoShow):
plt.title("{0}({1})".format(stock.name,stock.code))
plt.show()
def showMA(traits,stock,autoShow=False):
plt.title("{0}({1})".format(stock.name,stock.code))
y1 = list(map(lambda p: p.MA[5],traits))
y2 = list(map(lambda p: p.MA[10],traits))
y3 = list(map(lambda p: p.MA[20],traits))
y4 = list(map(lambda p: p.MA[30],traits))
y5 = list(map(lambda p: p.MA[60],traits))
y6 = list(map(lambda p: p.MA[120],traits))
plt.plot(range(len(y1)), y1,marker='o',markersize=3,label='MA05')
plt.plot(range(len(y1)), y2,marker='*',markersize=3,label='MA10')
plt.plot(range(len(y1)), y3, marker='^', markersize=3, label='MA20')
plt.legend()
# plt.plot(range(len(y1)), y4)
# plt.plot(range(len(y1)), y5)
# plt.plot(range(len(y1)), y6)
if (autoShow):
plt.show()
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
18,493
|
danghualong/stock
|
refs/heads/master
|
/src/stat/service/trait_service.py
|
from ...db import dboper
from ..util import trait_builder as TraitBuilder
from . import atr_service as ATRService
from . import ma_service as MAService
import math
def getTraits(code, days=50, N=120):
'''
计算股票的汇总特征
code:股票代码
days:观察数据数量
N:均值的天数
'''
dailys = dboper.getDailys(code, days + N - 1)
traits=[]
for daily in dailys:
trait = TraitBuilder.populateTrait(daily)
traits.append(trait)
ATRService.calcATR(traits, 10)
ATRService.calcATR(traits, 20)
MAService.calcMA(traits, 5)
MAService.calcMA(traits, 10)
MAService.calcMA(traits, 20)
MAService.calcMA(traits, 30)
MAService.calcMA(traits, 60)
MAService.calcMA(traits, 120)
return traits[-days:]
|
{"/src/cron/service/add_history_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/stat/view/trends.py": ["/src/db/__init__.py"], "/app.py": ["/src/__init__.py", "/src/tools/response_factory.py"], "/src/model/__init__.py": ["/src/model/model.py", "/src/model/errors.py"], "/src/errorhandlers.py": ["/src/model/__init__.py", "/src/logger.py"], "/src/tools/response_factory.py": ["/src/tools/serializer.py"], "/src/stat/controller/stocks.py": ["/src/stat/__init__.py", "/src/db/__init__.py", "/src/tools/response_factory.py"], "/src/__init__.py": ["/src/settings.py"], "/src/db/dboper.py": ["/src/model/__init__.py", "/src/settings.py", "/src/logger.py"], "/src/stat/service/trait_service.py": ["/src/db/__init__.py"], "/src/cron/parsers/historyParser.py": ["/src/model/__init__.py"], "/src/stat/util/trait_builder.py": ["/src/model/__init__.py"], "/src/cron/service/add_stock_service.py": ["/src/model/__init__.py", "/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/jisuStockParser.py": ["/src/model/__init__.py"], "/src/db/__init__.py": ["/src/settings.py", "/src/logger.py"], "/test.py": ["/src/model/__init__.py"], "/src/stat/controller/break_point.py": ["/src/stat/__init__.py"], "/src/routers.py": ["/src/stat/__init__.py"], "/src/stat/controller/stock_filter.py": ["/src/db/__init__.py", "/src/stat/__init__.py"], "/src/cron/service/add_stocks_service.py": ["/src/db/__init__.py", "/src/logger.py"], "/src/cron/parsers/dailyPriceParser.py": ["/src/model/__init__.py"], "/src/cron/service/add_current_daily_service.py": ["/src/db/__init__.py", "/src/logger.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.