index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
16,374
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/modules/file_handler.py
|
#
# Nimish Nayak
# 10-06-2017
#
# required libraries
import logging
import os
from utils import Utils
# File Hander
# This module will handle all the file handling operations
class FileHandler():
# initialize the instance variables
def __init__(self, input_file, output_file=None):
self.input_file = input_file
self.output_file = output_file
logging.debug("File Hander initialized")
# process the input file
def process_file(self):
file_type = Utils.get_file_type(self.input_file)
# composition: load the appropriate parser
# get the tuple of dictionaries for all the users in the data file
if file_type == ".csv":
logging.info("parsing the csv file")
from modules.csv_parser import CsvParser
parser = CsvParser(self.input_file)
elif file_type == ".yml":
logging.info("parsing the yml file")
from modules.yml_parser import YmlParser
parser = YmlParser(self.input_file)
elif file_type == ".xml":
logging.info("parsing the xml file")
from modules.xml_parser import XmlParser
parser = XmlParser(self.input_file)
else:
# input file is not of the neessary order
raise IOError("File format accepted are xml,csv and yml")
# load the xml file
parser.load_file()
# get the list of dictinary of the xml contents
parser.parse_file()
# process the value
parser.process_file()
# get the processed value from the parser as a string
self.data = str(parser.get_value())
# write the output if available else print
def write_output(self):
if not self.output_file:
logging.info("Writing output to stdout as output file name not provided")
print self.data
else:
Utils.create_output_directory(self.output_file)
with open(self.output_file, 'w') as f:
logging.info("Writing output to the file")
f.write(self.data)
|
{"/main.py": ["/modules/utils.py"]}
|
16,375
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/modules/xml_parser.py
|
#
# Nimish Nayak
# 10-06-2017
#
#
# XML Parser
#
# required libraries
import logging
import xml.dom.minidom
from parser import Parser
from utils import Utils
class XmlParser(Parser):
# initialize the instance variables
def __init__(self, input_file):
# call the base class constructor
Parser.__init__(self, input_file)
logging.debug("Xml parser initialized")
# parse the file using a xml parser
def load_file(self):
# use the parse() function to load and parse an XML file
try:
self.doc = xml.dom.minidom.parse(self.input_file)
except xml.parsers.expat.ExpatError as e:
logging.info(e)
self.doc = None
# parse the file using a xml parser
def parse_file(self):
# get a list of XML tags from the document and create the list of dictionaries
if self.doc:
users = self.doc.getElementsByTagName("user")
logging.debug("%d users:" % users.length)
for user in users:
# Default Value
val = {"name":"","value":0,"active":False}
# get all the tag values
name = user.getElementsByTagName("name")
value = user.getElementsByTagName("value")
active = user.getElementsByTagName("active")
if name and name[0] and name[0].firstChild:
val["name"] = str(name[0].firstChild.nodeValue)
if active and active[0] and active[0].firstChild:
val["active"] = Utils.str_to_bool(active[0].firstChild.nodeValue)
if value and value[0] and value[0].firstChild and val["active"]:
val["value"] = int(value[0].firstChild.nodeValue)
self.data.append(val)
logging.debug("data saved: %s"%self.data)
|
{"/main.py": ["/modules/utils.py"]}
|
16,376
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/modules/csv_parser.py
|
#
# Nimish Nayak
# 10-06-2017
#
#
# CSV Parser
#
# required libraries
import logging
import csv
from parser import Parser
from utils import Utils
class CsvParser(Parser):
# initialize the instance variables
def __init__(self, input_file):
# call the base class constructor
Parser.__init__(self, input_file)
logging.debug("Csv parser initialized")
# parse the file using a csv parser
def parse_file(self):
# get a list of CSV tags from the document and create the list of dictionaries
with open(self.input_file, "rb") as f:
users = [row for row in csv.reader(f)][1:]
logging.debug("%d users:" % len(users))
for user in users:
# default values
val = {"name":"","active":False,"value":0}
# check if csv
if len(user) == 3:
if user[0]:
val["name"] = user[0]
if user[1]:
val["active"] = Utils.str_to_bool(user[1])
if user[2]:
val["value"] = int(user[2])
self.data.append(val)
logging.debug("data saved: %s"%self.data)
|
{"/main.py": ["/modules/utils.py"]}
|
16,377
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/modules/xml_parser_test.py
|
#
# Nimish Nayak
# 10-06-2017
#
#
# Xml Parser - Class Tests
#
from xml_parser import XmlParser
import os
test_file = os.path.join(os.getcwd(),"Test.xml")
def test_xml_parser_with_missing_values():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("""<?xml version="1.0" encoding="UTF-8" ?>
<users>
<user>
<name>John</name>
<active>true</active>
</user>
<user>
<name>Mark</name>
<active>false</active>
<value>250</value>
</user>
</users>""")
y = XmlParser(test_file)
y.load_file()
y.parse_file()
y.process_file()
assert(y.get_value() == 0)
def test_xml_parser_with_malformed_xml():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("""<?xml version="1.0" encoding="UTF-8" ?>
<users>
<user
<name>John</name>
<active>true</active>
/user>
<user>
<name>Mark</name>
active>false</active>
<value>250</value
</user>
</users>""")
y = XmlParser(test_file)
y.load_file()
y.parse_file()
y.process_file()
assert(y.get_value() == 0)
def test_xml_parser_with_blank_file():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("")
y = XmlParser(test_file)
y.load_file()
y.parse_file()
y.process_file()
assert(y.get_value() == 0)
def test_xml_parser_with_no_child_nodes():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("""<?xml version="1.0" encoding="UTF-8" ?>
<users>""")
y = XmlParser(test_file)
y.load_file()
y.parse_file()
y.process_file()
assert(y.get_value() == 0)
def test_xml_parser_with_xml_dtd():
global test_file
# value missing
with open(test_file,"w")as f:
f.write("""<?xml version="1.0" encoding="UTF-8" ?>""")
y = XmlParser(test_file)
y.load_file()
y.parse_file()
y.process_file()
assert(y.get_value() == 0)
# delete the test file as not required
os.remove(test_file)
|
{"/main.py": ["/modules/utils.py"]}
|
16,378
|
wordwarrior01/MultiFileReader
|
refs/heads/master
|
/modules/parser.py
|
#
# Nimish Nayak
# 10-06-2017
#
#
# Parser - Base Class
#
# required libraries
import logging
from abc import ABCMeta, abstractmethod
class Parser():
__metaclass__ = ABCMeta
# initialize the instance variables
def __init__(self, input_file):
self.input_file = input_file
self.data = []
self.value = 0
# parse the file using a xml parser
# to be overridden
def load_file(self):
# use the parse() function to load and parse an XML file
self.doc = None
# parse the file using a xml parser
# Has to be to be overridden
@abstractmethod
def parse_file(self):
pass
# process value from the input
def process_file(self):
# list comprehension
logging.debug("Processing the xml file")
if self.data:
for k in self.data:
if type(k)==dict and len(k.keys())==3 and "active" in k and "value" in k and k["active"]:
self.value += k["value"]
#self.value += sum([k["value"] for k in self.data if k["active"]])
# return to the calling convention
def get_value(self):
logging.debug("Processed value is: %s"%str(self.value))
return self.value
|
{"/main.py": ["/modules/utils.py"]}
|
16,412
|
ademaro/teahouse
|
refs/heads/master
|
/blog/models.py
|
# -*- coding: utf-8 -*-
from django.db import models
from pyadmin import verbose_name_cases, verbose_name_field_cases
class Category(models.Model):
name = models.CharField(verbose_name = verbose_name_field_cases(u'категория', sort = u'категории', add = u'категорию'), max_length=250, unique=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = verbose_name_cases(u'категория', (u'категория', u'категории', u'категории'),
gender = 0, change = u'категорию', delete = u'категорию', add = u'категорию')
verbose_name_plural = verbose_name.plural
class Entry(models.Model):
title = models.CharField(verbose_name='Заголовок', max_length=255)
alias = models.SlugField(verbose_name='Алиас для url', null=True, blank=True, unique=True)
text = models.TextField(verbose_name='Текст', blank=True, help_text=u'Используйте синтаксис Markdown.')
category = models.ManyToManyField(Category, verbose_name=u'Категория')
date_publication = models.DateTimeField(verbose_name='Дата публикации', blank=True, null=True)
def __unicode__(self):
return self.title
#return '%s %s' % (self.title, self.category)
class Meta:
ordering = ['-date_publication']
verbose_name = 'Запись'
verbose_name_plural = 'Записи'
# class Admin:
# js = ('/static/js/jquery.js',)
|
{"/blog/models.py": ["/pyadmin.py"], "/pytils/test/templatetags/test_common.py": ["/pytils/__init__.py"], "/pytils/test/templatetags/test_translit.py": ["/pytils/test/templatetags/__init__.py"], "/blog/admin.py": ["/blog/models.py"]}
|
16,413
|
ademaro/teahouse
|
refs/heads/master
|
/pytils/test/templatetags/helpers.py
|
# -*- coding: utf-8 -*-
# pytils - russian-specific string utils
# Copyright (C) 2006-2008 Yury Yurevich
#
# http://www.pyobject.ru/projects/pytils/
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""
Helpers for templatetags' unit tests in Django webframework
"""
from django.conf import settings
encoding = 'utf-8'
settings.configure(
TEMPLATE_DIRS=(),
TEMPLATE_CONTEXT_PROCESSORS=(),
TEMPLATE_LOADERS=(),
INSTALLED_APPS=('pytils',),
DEFAULT_CHARSET=encoding,
)
from django import template
from django.template import loader
from pytils.templatetags import pseudo_str
import unittest
def pstr(ustr):
"""
Provide/Pseudo unicode
"""
return pseudo_str(ustr, encoding, None)
class TemplateTagTestCase(unittest.TestCase):
"""
TestCase for testing template tags and filters
"""
def check_template_tag(self, template_name, template_string, context, result_string):
"""
Method validates output of template tag or filter
@param template_name: name of template
@type template_name: C{str}
@param template_string: contents of template
@type template_string: C{str} or C{unicode}
@param context: rendering context
@type context: C{dict}
@param result_string: reference output
@type result_string: C{str} or C{unicode}
"""
def test_template_loader(template_name, template_dirs=None):
return pstr(template_string), template_name
loader.template_source_loaders = [test_template_loader,]
output = loader.get_template(template_name).render(template.Context(context))
self.assertEquals(output, pstr(result_string))
|
{"/blog/models.py": ["/pyadmin.py"], "/pytils/test/templatetags/test_common.py": ["/pytils/__init__.py"], "/pytils/test/templatetags/test_translit.py": ["/pytils/test/templatetags/__init__.py"], "/blog/admin.py": ["/blog/models.py"]}
|
16,414
|
ademaro/teahouse
|
refs/heads/master
|
/urls.py
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', '_tea.views.home', name='home'),
# url(r'^_tea/', include('_tea.foo.urls')),
url(r'^$', 'blog.views.index'),
url(r'^entry/(\d+)/$', '_tea.blog.views.entry'),
url(r'^tags/(\d+)/$', '_tea.blog.views.tags'),
#url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/var/www/z-gu.ru/_tea/static'}),
#url(r'mu-a0e9c771-fb62bfcb-31460fd2-f2daa98a.txt', 'django.views.static.serve', {'document_root': '/var/www/z-gu.ru/_tea/static'}),
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
)
|
{"/blog/models.py": ["/pyadmin.py"], "/pytils/test/templatetags/test_common.py": ["/pytils/__init__.py"], "/pytils/test/templatetags/test_translit.py": ["/pytils/test/templatetags/__init__.py"], "/blog/admin.py": ["/blog/models.py"]}
|
16,415
|
ademaro/teahouse
|
refs/heads/master
|
/pyadmin.py
|
# -*- coding: utf-8 -*-
"""
NEVER EVER WRITE CODE LIKE THIS!
"""
import traceback
import sys
import re
from django.db import models
from django.contrib import messages
from django.contrib.admin import ModelAdmin
from pytils.numeral import get_plural
#import django.contrib.admin.util
class verbose_name_field_cases:
"""
Extended verbose_name for field, capable of using different case for sorting in admin
"""
def __init__(self, verbose_name, **kwargs):
self._verbose_name = verbose_name
self._sort_name = kwargs.get("sort", verbose_name)
@property
def sort(self):
return self._sort_name
def __str__(self):
return self._verbose_name
def __unicode__(self):
return self._verbose_name
class verbose_name_cases:
def __init__(self, verbose_name, plural_forms, **kwargs):
self._verbose_name = verbose_name
self._change_name = kwargs.get("change", verbose_name)
self._add_name = kwargs.get("add", verbose_name)
self._delete_name = kwargs.get("delete", verbose_name)
self._plural = plural_forms
self._gender = kwargs.get("gender", 1)
if self._gender == 0:
"""
Monkey-patch Django's ModelAdmin function with our custom message handler
"""
def msg(self, request, message):
try:
msg_re = re.compile(u"(.*?) \"(.*?)\" был успешно добавлен")
if msg_re.match(message):
grp = msg_re.search(message).groups(1)
message = message.replace(u"один "+grp[0],u"одну "+self.VerboseNameCaseReplace[grp[0]]._change_name)
message = message.replace(u"был",u"была").replace(u"добавлен",u"добавлена")
message = message.replace(u"Ниже вы можете снова его отредактировать", u"Ниже вы можете снова её отредактировать")
msg_addmore_re = re.compile(u"(.*?)Ниже вы можете добавить еще один (.*?)\.")
if msg_addmore_re.match(message):
grp = msg_addmore_re.search(message).groups(1)
message = message.replace(u"Ниже вы можете добавить еще один %s." % grp[1], u"Ниже вы можете добавить еще одну %s." % self.VerboseNameCaseReplace[grp[1]]._change_name)
msg_save_re = re.compile(u"(.*?) \"(.*?)\" был успешно изменён")
if msg_save_re.match(message):
message = message.replace(u"был",u"была").replace(u"изменён",u"изменена")
message = message.replace(u"удалено", u"удалена")
msg_delete_re = re.compile(u"Успешно удалены (.*?) (.*)\.")
if msg_delete_re.match(message):
grp = msg_delete_re.search(message).groups(1)
if grp[0] > 1:
message = message.replace(u"удалены %s %s" % (grp[0], grp[1]), u"удалено %s" % (get_plural(int(grp[0]), ",".join(self.VerboseNameCaseReplace[grp[1]]._plural))))
except:
pass
messages.info(request, message)
ModelAdmin.message_user = msg
if not hasattr(ModelAdmin, "VerboseNameCaseReplace"):
ModelAdmin.VerboseNameCaseReplace = {}
ModelAdmin.VerboseNameCaseReplace[self._verbose_name] = self
ModelAdmin.VerboseNameCaseReplace[self._plural[1]] = self
#ModelAdmin.VerboseNameCaseReplace[self._verbose_name] = self
@property
def plural(self):
return self._plural[1]
@property
def plural_forms_amount(self):
return [self._plural[1],self._plural[2]]
@property
def plural_forms(self):
return unicode(",".join(self._plural))
@property
def add(self):
return self._add_name
def __str__(self):
return self._verbose_name
def __unicode__(self):
"""
Inspect stack 3 levels up, this is potentialy very bad thing as any change in i18n calls will break this, so
TODO: inspect whole stack
"""
if "Select %s to change" in traceback.extract_stack()[-3][3]: # Edit entries
return self._change_name
elif "Add %s" in traceback.extract_stack()[-3][3]: # Add new entry
return self._add_name
elif "Change %s" in traceback.extract_stack()[-3][3]: # Edit entry
return self._change_name
elif "delete_view" == traceback.extract_stack()[-3][2]: # Confirm deletion
return self._delete_name
else:
return self._verbose_name
|
{"/blog/models.py": ["/pyadmin.py"], "/pytils/test/templatetags/test_common.py": ["/pytils/__init__.py"], "/pytils/test/templatetags/test_translit.py": ["/pytils/test/templatetags/__init__.py"], "/blog/admin.py": ["/blog/models.py"]}
|
16,416
|
ademaro/teahouse
|
refs/heads/master
|
/pytils/test/templatetags/test_common.py
|
# -*- coding: utf-8 -*-
# pytils - russian-specific string utils
# Copyright (C) 2006-2008 Yury Yurevich
#
# http://www.pyobject.ru/projects/pytils/
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""
Unit tests for pytils' templatetags common things
"""
import unittest
from pytils import templatetags as tt
class TemplateTagsCommonsTestCase(unittest.TestCase):
def testInitDefaults(self):
"""
Unit-tests for pytils.templatetags.init_defaults
"""
self.assertEquals(tt.init_defaults(debug=False, show_value=False), ('', u''))
self.assertEquals(tt.init_defaults(debug=False, show_value=True), ('%(value)s', u'%(value)s'))
self.assertEquals(tt.init_defaults(debug=True, show_value=False), ('unknown: %(error)s', u'unknown: %(error)s'))
self.assertEquals(tt.init_defaults(debug=True, show_value=True), ('unknown: %(error)s', u'unknown: %(error)s'))
def testPseudoUnicode(self):
"""
Unit-tests for pytils.templatetags.pseudo_unicode
"""
self.assertEquals(tt.pseudo_unicode(u'тест', 'utf-8'), u'тест')
self.assertEquals(tt.pseudo_unicode('тест', 'utf-8'), u'тест')
self.assertEquals(tt.pseudo_unicode('тест', 'ascii'), u'')
self.assertEquals(tt.pseudo_unicode('тест', 'ascii', u'опа'), u'опа')
self.assertRaises(UnicodeDecodeError, tt.pseudo_unicode, 'тест', 'ascii', None)
def testPseudoStr(self):
"""
Unit-tests for pytils.templatetags.pseudo_str
"""
# in django unicode-branch either str() must return unicode
# this test depends on Django unicode awareness
if tt.unicode_aware:
self.assertEquals(tt.pseudo_str(u'тест', 'utf-8'), u'тест')
self.assertEquals(tt.pseudo_str(u'тест', 'utf-8'), u'тест')
self.assertEquals(tt.pseudo_str('тест', 'utf-8'), '')
self.assertEquals(tt.pseudo_str('тест', 'utf-8', u'опа'), u'опа')
self.assertEquals(tt.pseudo_str(u'тест', 'ascii'), u'тест')
self.assertEquals(tt.pseudo_str(u'тест', 'ascii', 'опа'), u'тест')
else:
self.assertEquals(tt.pseudo_str(u'тест', 'utf-8'), 'тест')
self.assertEquals(tt.pseudo_str('тест', 'utf-8'), '')
self.assertEquals(tt.pseudo_str(u'тест', 'ascii'), '')
self.assertEquals(tt.pseudo_str(u'тест', 'ascii', 'опа'), 'опа')
self.assertRaises(UnicodeEncodeError, tt.pseudo_str, u'тест', 'ascii', None)
if __name__ == '__main__':
unittest.main()
|
{"/blog/models.py": ["/pyadmin.py"], "/pytils/test/templatetags/test_common.py": ["/pytils/__init__.py"], "/pytils/test/templatetags/test_translit.py": ["/pytils/test/templatetags/__init__.py"], "/blog/admin.py": ["/blog/models.py"]}
|
16,417
|
ademaro/teahouse
|
refs/heads/master
|
/blog/views.py
|
# -*- coding: utf-8 -*-
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import Http404
from django.shortcuts import render_to_response
from _tea.blog.models import Entry, Category
from datetime import datetime
def index(request):
if not request.session.get('bred'):
request.session['bred'] = 'disabled'
if request.method == 'GET' and request.GET.get('bred') == 'enable':
request.session['bred'] = 'enabled'
elif request.method == 'GET' and request.GET.get('bred') == 'disable':
request.session['bred'] = 'disabled'
bred = request.session.get('bred')
entry_list = Entry.objects.filter(date_publication__lte=datetime.now()).exclude(id__exact=13)
paginator = Paginator(entry_list, 5, 2)
page = request.GET.get('page')
try:
entries = paginator.page(page)
except PageNotAnInteger:
entries = paginator.page(1)
except EmptyPage:
entries = paginator.page(paginator.num_pages)
categories = Category.objects.all()
#tags = 0 #(entry_list, Category.objects.get(id=entry_list[]))
return render_to_response('index.html', {'entry_list': entries, 'categories': categories, 'bred': bred })
def entry(request, entry_id):
try:
entry = Entry.objects.get(id=int(entry_id))
except Entry.DoesNotExist:
raise Http404()
else:
categories = Category.objects.all()
tags = entry.category.all()
return render_to_response('entry.html', {'entry': entry, 'tags': tags, 'categories': categories})
def tags(request, tag_id):
try:
tag = Category.objects.get(id=int(tag_id))
except Category.DoesNotExist:
raise Http404()
else:
entry_list = tag.entry_set.all()
categories = Category.objects.all()
return render_to_response('index.html',
{'entry_list': entry_list, 'categories': categories, 'category': tag})
|
{"/blog/models.py": ["/pyadmin.py"], "/pytils/test/templatetags/test_common.py": ["/pytils/__init__.py"], "/pytils/test/templatetags/test_translit.py": ["/pytils/test/templatetags/__init__.py"], "/blog/admin.py": ["/blog/models.py"]}
|
16,418
|
ademaro/teahouse
|
refs/heads/master
|
/pytils/test/templatetags/__init__.py
|
# -*- coding: utf-8 -*-
# pytils - russian-specific string utils
# Copyright (C) 2006-2008 Yury Yurevich
#
# http://www.pyobject.ru/projects/pytils/
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""
Unit tests for pytils' templatetags for Django web framework
"""
__all__ = ["test_common", "test_numeral", "test_dt", "test_translit"]
import unittest
def get_suite():
"""Return TestSuite for all unit-test of pytils' templatetags"""
suite = unittest.TestSuite()
for module_name in __all__:
imported_module = __import__("pytils.test.templatetags."+module_name,
globals(),
locals(),
["pytils.test.templatetags"])
getter = getattr(imported_module, 'get_suite', False)
if getter:
suite.addTest(getter())
loader = unittest.defaultTestLoader
suite.addTest(loader.loadTestsFromModule(imported_module))
return suite
def run(verbosity=1):
"""Run all unit-test of pytils' templatetags"""
suite = get_suite()
unittest.TextTestRunner(verbosity=verbosity).run(suite)
if __name__ == '__main__':
run(2)
|
{"/blog/models.py": ["/pyadmin.py"], "/pytils/test/templatetags/test_common.py": ["/pytils/__init__.py"], "/pytils/test/templatetags/test_translit.py": ["/pytils/test/templatetags/__init__.py"], "/blog/admin.py": ["/blog/models.py"]}
|
16,419
|
ademaro/teahouse
|
refs/heads/master
|
/pytils/__init__.py
|
# -*- coding: utf-8 -*-
# pytils - russian-specific string utils
# Copyright (C) 2006-2008 Yury Yurevich
#
# http://www.pyobject.ru/projects/pytils/
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""
Simple processing for russian strings
"""
VERSION = '0.2.3'
from pytils import numeral, dt, translit, err
|
{"/blog/models.py": ["/pyadmin.py"], "/pytils/test/templatetags/test_common.py": ["/pytils/__init__.py"], "/pytils/test/templatetags/test_translit.py": ["/pytils/test/templatetags/__init__.py"], "/blog/admin.py": ["/blog/models.py"]}
|
16,420
|
ademaro/teahouse
|
refs/heads/master
|
/pytils/test/templatetags/test_translit.py
|
# -*- coding: utf-8 -*-
# pytils - russian-specific string utils
# Copyright (C) 2006-2008 Yury Yurevich
#
# http://www.pyobject.ru/projects/pytils/
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""
Unit tests for pytils' translit templatetags for Django web framework
"""
from pytils.test.templatetags import helpers
class TranslitDefaultTestCase(helpers.TemplateTagTestCase):
def testLoad(self):
self.check_template_tag('load_tag', u'{% load pytils_translit %}', {}, u'')
def testTranslifyFilter(self):
self.check_template_tag('translify_filter',
u'{% load pytils_translit %}{{ val|translify }}',
{'val': 'проверка'},
u'proverka')
def testDetranslifyFilter(self):
self.check_template_tag('detranslify_filter',
u'{% load pytils_translit %}{{ val|detranslify }}',
{'val': 'proverka'},
u'проверка')
def testSlugifyFilter(self):
self.check_template_tag('slugify_filter',
u'{% load pytils_translit %}{{ val|slugify }}',
{'val': 'Проверка связи'},
u'proverka-svyazi')
# без отладки, если ошибка -- по умолчанию пустая строка
def testDetranslifyError(self):
# в юникод-режиме это не ошибка
from pytils.templatetags import unicode_aware
if not unicode_aware:
self.check_template_tag('detranslify_error',
u'{% load pytils_translit %}{{ val|detranslify }}',
{'val': 'Проверка связи'},
u'')
if __name__ == '__main__':
import unittest
unittest.main()
|
{"/blog/models.py": ["/pyadmin.py"], "/pytils/test/templatetags/test_common.py": ["/pytils/__init__.py"], "/pytils/test/templatetags/test_translit.py": ["/pytils/test/templatetags/__init__.py"], "/blog/admin.py": ["/blog/models.py"]}
|
16,421
|
ademaro/teahouse
|
refs/heads/master
|
/blog/admin.py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from blog.models import Category, Entry
import settings
media = settings.MEDIA_URL
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name',)
search_fields = ('name',)
class EntryAdmin(admin.ModelAdmin):
list_display = ('title', 'date_publication')
fields = ('title', 'alias', 'text', 'category', 'date_publication')
filter_horisontal = ('category')
# class Media:
# js = ('/js/jquery.js', '/js/wymeditor/jquery.wymeditor.js','/js/editor.js')
admin.site.register(Category, CategoryAdmin)
admin.site.register(Entry, EntryAdmin)
|
{"/blog/models.py": ["/pyadmin.py"], "/pytils/test/templatetags/test_common.py": ["/pytils/__init__.py"], "/pytils/test/templatetags/test_translit.py": ["/pytils/test/templatetags/__init__.py"], "/blog/admin.py": ["/blog/models.py"]}
|
16,422
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/src/MLShepard.py
|
import numpy as np
import faiss
from .utils import rolling_window
class MLShepard:
def __init__(
self,
future_scope=3,
dimension=10,
minor_days=3,
trust_treshold=4,
max_point_usage=5,
avr_elemwise_dist=0.04,
epsilon=1e-10
):
self.future_scope = future_scope
self.dimension = dimension
self.minor_days = minor_days
self.trust_threshold = trust_treshold
self.max_point_usage = max_point_usage
self.avr_elemwise_dist = avr_elemwise_dist
self.epsilon = epsilon
self.relevance_threshold = dimension**.5 * avr_elemwise_dist
def fit(self, price_indices):
self.price_indices = {}
for capital_name in price_indices:
self.price_indices[capital_name] = {'X': np.array([]), 'f': np.array([]), 'data': np.array([])}
if price_indices[capital_name].shape[0] == 0:
continue
curr_prices = price_indices[capital_name]
seq_price = price_indices[capital_name].copy()
seq_price[1:] = (seq_price[1:] - seq_price[:-1]) / seq_price[:-1]
seq_price[0] = 0
self.price_indices[capital_name]['data'] = seq_price
X = rolling_window(seq_price, self.dimension)
padding = np.ones((self.dimension-1, self.dimension)) #np.random.normal(0, self.epsilon, (self.dimension-1, self.dimension))
X = np.vstack((padding, X))
self.price_indices[capital_name]['X'] = faiss.IndexFlatL2(X.shape[1])
self.price_indices[capital_name]['X'].add(X.astype(np.float32))
cum_sum = np.cumsum(curr_prices)
moving_avr = curr_prices.copy()
moving_avr[:-self.future_scope] = (cum_sum[self.future_scope:] - cum_sum[:-self.future_scope]) / self.future_scope
self.price_indices[capital_name]['f'] = np.zeros((curr_prices.shape[0],))
self.price_indices[capital_name]['f'][:] = (moving_avr - curr_prices) / curr_prices
def predict(self, recent_prices, update=True, true_values=None, loss_functions=None):
if true_values is None and update:
raise Exception('True values must be provided if update parameter is set to true')
if loss_functions is None:
loss_functions = {'MSE': lambda truth, estimate, _prices: np.sqrt(np.mean((truth-estimate)**2))}
loss_results = {}
result = {}
for capital_name in recent_prices:
result[capital_name] = np.array([])
row_number = 0
if recent_prices[capital_name].shape[1] != self.dimension+1:
raise Exception('The matrix to be predicted must be of the shape (*, dimension+1)')
all_daily_changes = (recent_prices[capital_name][:,1:] - recent_prices[capital_name][:,:-1]) / recent_prices[capital_name][:,:-1]
distances, indices = self.price_indices[capital_name]['X'].search(all_daily_changes.astype(np.float32), k=self.max_point_usage)
closeDays = distances < self.relevance_threshold
for row in recent_prices[capital_name]:
proximity = distances[row_number][closeDays[row_number]]
if proximity.shape[0] < self.trust_threshold:
res = row[-1]
else:
daily_changes = all_daily_changes[row_number]
currIndices = indices[row_number][closeDays[row_number]]
fluctuations = np.vstack([self.price_indices[capital_name]['data'][(i-self.dimension+1):(i+1)] for i in currIndices])
changes = self.price_indices[capital_name]['f'][currIndices]
general_w = self.dimension/(2*self.dimension - self.minor_days)
major_w = 1 - general_w
ws = np.dot(fluctuations, daily_changes)*general_w + np.dot(fluctuations[:, self.minor_days:], daily_changes[self.minor_days:])*major_w
ws /= (np.sum(ws)+self.epsilon)
change_ratio = np.sum(ws * changes)
res = (change_ratio + 1) * row[-1]
result[capital_name] = np.concatenate((result[capital_name], [res]))
if update:
newF = (true_values[capital_name][row_number] - row[-1]) / row[-1]
self.price_indices[capital_name]['X'].add(daily_changes.reshape((1,-1)).astype(np.float32))
self.price_indices[capital_name]['f'] = np.concatenate((self.price_indices[capital_name]['f'], [newF]))
row_number += 1
if true_values is not None:
loss_results[capital_name] = {}
for loss_name in loss_functions:
loss_results[capital_name][loss_name] = loss_functions[loss_name](true_values[capital_name], result[capital_name], recent_prices[capital_name])
return result, loss_results
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,423
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/master.py
|
import numpy as np
import pandas as pd
from dfply import *
from pprint import pprint
from os import listdir
from os.path import isfile, join
from time import time
import datetime
import matplotlib.pyplot as plt
from src.utils import rolling_window, NumpyEncoder
from src.GBL import GBLM
from src.MLShepard import MLShepard
from src.MondrianForest import MondrianForest
from src.OARIMA import OARIMA
from src.OSVR import OSVR
from src.RandConLSTM import RandConLSTM
from src.WHLR import WHLR
import sys
sys.setrecursionlimit(sys.getrecursionlimit() * 100)
TRAIN_PORTION = .8
DIM = 200
# DIM = 15
# Uncomment the method and its parameters to include the corresponding result
methods = {
'GBLM': {
'class': GBLM,
'options': {
'dimension': DIM,
'epsilon': 5e-3,
'forgetting_rate': .59,
'p_learning_rate': .008,
's_learning_rate': .001,
'decay_rate': .25,
'oe_penalty': -1.5,
'ue_penalty': -1.5,
'reward': 1,
'epochs': 1
}
},
# 'MLShepard': {
# 'class': MLShepard,
# 'options': {
# 'future_scope': 3,
# 'dimension': DIM,
# 'minor_days': 3,
# 'trust_treshold': 4,
# 'max_point_usage': 5,
# 'avr_elemwise_dist': 0.04,
# 'epsilon': 1e-10
# }
# },
# 'OARIMA (ogd)': {
# 'class': OARIMA,
# 'options': {
# 'dimension': DIM,
# 'lrate': 1e-2,
# 'epsilon': 1e-10,
# 'method': 'ogd'
# }
# },
# 'OARIMA (ons)': {
# 'class': OARIMA,
# 'options': {
# 'dimension': DIM,
# 'lrate': 1e-2,
# 'epsilon': 1e-10,
# 'method': 'ons'
# }
# },
# 'OSVR': {
# 'class': OSVR,
# 'options': {
# 'future_scope': 3,
# 'dimension': DIM,
# 'C': 10,
# 'kernelParam': 30,
# 'epsilon': 1e-10
# }
# }, RUNNING TIME IS: [ 9.84e-002, -3.39e-003, 2.63e-005, 5.94e-007] @ [n, n**2, n**3, n**4]
# 'LSTM': {
# 'class': RandConLSTM,
# 'options': {
# 'future_scope': 3,
# 'dimension': DIM,
# 'epochs': 2,
# 'batch_size': 128,
# 'num_layers': 1,
# 'epsilon': 1e-10,
# 'hidden_size': 100,
# 'connectivity': 1
# }
# },
# 'RandConLSTM': {
# 'class': RandConLSTM,
# 'options': {
# 'future_scope': 3,
# 'dimension': DIM,
# 'epochs': 2,
# 'batch_size': 128,
# 'num_layers': 1,
# 'epsilon': 1e-10,
# 'hidden_size': 100,
# 'connectivity': .2
# }
# },
# 'WHLR': {
# 'class': WHLR,
# 'options': {
# 'future_scope': 3,
# 'dimension': DIM,
# 'avr_elemwise_dist': 0.04,
# 'learning_rate': 1e-2
# }
# },
# 'MondrianForest': {
# 'class': MondrianForest,
# 'options': {
# 'future_scope': 3,
# 'dimension': DIM
# }
# },
}
print('Preparing dataset...')
# Here is the data directory. Each stock/crypto must be stored in a seperated csv file
dataDir = 'data/stocks'
dataFiles = {f: join(dataDir, f) for f in listdir(dataDir) if isfile(join(dataDir, f)) and f[-4:] == '.csv' and f not in ['stock_metadata.csv', 'NIFTY50_all.csv']}
print(list(dataFiles.keys()))
priceIndices = {f: pd.read_csv(dataFiles[f]) for f in dataFiles}
# dataFiles = {'dummy1': 1, 'dummy2': 1, 'dummy3': 1, 'dummy4': 1, 'dummy5': 1, 'dummy6': 1}
# T_SIZE = 3000
# priceIndices = {
# f: pd.DataFrame({
# 'Date': list(range(T_SIZE)),
# 'Price': np.random.normal(
# np.random.uniform(70, 300),
# np.random.uniform(1, 1.5),
# (T_SIZE,)
# )
# }) for f in dataFiles
# }
prices = {}
pricePartitions = {'train': {}, 'test': {}}
trueVals = {}
intervalLength = float('Inf')
# intervalLength = 0
for cryptoID in priceIndices:
priceIndices[cryptoID].fillna(method='ffill')
priceIndices[cryptoID]["Date"] = priceIndices[cryptoID]["Date"].astype("datetime64[ns]")
priceIndices[cryptoID] = priceIndices[cryptoID] >> arrange(X.Date)
indexLength = priceIndices[cryptoID].shape[0]
indexMean = mean(priceIndices[cryptoID]["Price"].values)
prices[cryptoID] = priceIndices[cryptoID]["Price"].values + np.random.normal(loc=0, scale=indexMean/500, size=indexLength)
intervalLength = min(indexLength, intervalLength)
# intervalLength = min(2000, intervalLength)
cutOff = int(intervalLength * TRAIN_PORTION)
for cryptoID in priceIndices:
# if intervalLength != prices[cryptoID].shape[0]:
# prices[cryptoID] = np.concatenate((
# prices[cryptoID],
# np.repeat(prices[cryptoID][-1], intervalLength - prices[cryptoID].shape[0])
# ))
pricePartitions['train'][cryptoID] = prices[cryptoID][:cutOff]
pricePartitions['test'][cryptoID] = rolling_window(prices[cryptoID][cutOff:intervalLength], (DIM+1))[:-1]
trueVals[cryptoID] = prices[cryptoID][cutOff:intervalLength][(DIM+1):]
MSE = lambda truth, estimate, _prices: np.sqrt(np.mean((truth-estimate)**2))
PMSE = lambda truth, estimate, _prices: np.sqrt(np.mean(((truth-estimate)/truth)**2))
PASE = lambda truth, estimate, _prices: np.mean((np.abs(truth-estimate)/truth))
DMSE = lambda truth, estimate, prices: np.sqrt(np.mean((np.heaviside(-(truth - prices[:,-1])*(estimate - prices[:,-1]), [0]) * (truth-estimate)/truth)**2))
wrongs = lambda truth, estimate, prices: np.sqrt(np.mean(np.heaviside(-(truth - prices[:,-1])*(estimate - prices[:,-1]), [0])))
# DMSESD = lambda truth, estimate, prices: np.sqrt(np.std((np.heaviside(-(truth - prices[:,-1])*(estimate - prices[:,-1]), [0]) * (truth-estimate)/truth)**2))
# DMSE = lambda truth, estimate, prices: print(*[truth, estimate, prices], sep='\n')
# methods['MondrianForest']['later_values'] = {'X': pricePartitions['test'], 'f': trueVals}
import json
for method_name in methods:
print("==================== %s ===================="%(method_name))
method = methods[method_name]
pClass, options = method['class'], method['options']
model = pClass(**options)
print('Fitting model...')
startTime = time()
model.fit({f: pricePartitions['train'][f] for f in dataFiles})
fittedTime = time()
print('Predicting values...')
predStartTime = time()
res = model.predict(pricePartitions['test'], update=True, true_values=trueVals,
loss_functions={'MSE': MSE, 'PMSE': PMSE, 'PASE': PASE, 'DMSE': DMSE, 'wrongs': wrongs})
finishedTime = time()
pprint({coin: {l: np.mean(res[1][coin][l]) for l in res[1][coin]} for coin in res[1]})
print('Plotting results...')
indices = np.random.choice(list(dataFiles.keys()), 1, False)
plt.plot(range((DIM+1)+cutOff, (DIM+1)+cutOff+res[0][indices[0]].shape[0]), res[0][indices[0]])
plt.plot(range(prices[indices[0]].shape[0]), prices[indices[0]])
learnT = (fittedTime - startTime) * 1000
predT = (finishedTime - predStartTime) * 1000
avrPredT = (finishedTime - predStartTime) / (intervalLength-cutOff) * 1000
totalT = learnT + predT
timingString = '''
learning time:\t%.1f ms
predicting time:\t%.1f ms
prediction/test:\t%.1f ms
total time:\t%.1fms
'''%(learnT, predT, avrPredT, totalT)
print(timingString)
print('saving dump...')
currentTime = datetime.datetime.now()
dump_file = open('dumps/Results-%s-%s.dmp'%(method_name, currentTime), 'w')
json.dump(res, dump_file, cls=NumpyEncoder)
dump_file.close()
dump_file = open('dumps/Timing-%s-%s.txt'%(method_name, currentTime), 'w')
dump_file.write(timingString)
dump_file.close()
plt.show()
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,424
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/data/cryptos/prep.py
|
import pandas as pd
from dfply import X, arrange
def loadPriceIndex(dataFile):
priceIndex = pd.read_csv(dataFile)
priceIndex["Price"] = priceIndex.apply(lambda x: x.Price if isinstance(x.Price, float) else float(x.Price.replace(',', '')), axis=1)
priceIndex["Date"] = priceIndex["Date"].astype("datetime64[ns]")
priceIndex = priceIndex >> arrange(X.Date)
return priceIndex
for i in range(1, 4):
coins = loadPriceIndex('coin%d.csv'%i)
coins.to_csv('coin%d.csv'%i)
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,425
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/src/OSVR.py
|
"""Implementation of Online Support Vector Regression (OSVR) as library for a class project in 16-831
Statistical Techniques in Robotics.
Requires Python 3.5
Author: Adam Werries, awerries@cmu.edu, 12/2015.
Adapted from MATLAB code available at http://onlinesvr.altervista.org/
Parameters defined in main() below. C is the regularization parameter, essentially defining the limit on how close the learner must adhere to the dataset (smoothness). Epsilon is the acceptable error, and defines the width of what is sometimes called the "SVR tube". The kernel parameter is the scaling factor for comparing feature distance (this implementation uses a Radial Basis Function).
"""
import sys
import numpy as np
from .utils import rolling_window
from time import time
def sign(x):
""" Returns sign. Numpys sign function returns 0 instead of 1 for zero values. :( """
if x >= 0:
return 1
else:
return -1
class OnlineSVR:
def __init__(self, numFeatures, C, eps, kernelParam, bias = 0, debug = False):
# Configurable Parameters
self.numFeatures = numFeatures
self.C = C
self.eps = eps
self.kernelParam = kernelParam
self.bias = bias
self.debug = debug
print('SELF',self.C,self.eps,self.kernelParam, file=sys.stderr)
# Algorithm initialization
self.numSamplesTrained = 0
self.weights = np.array([])
# Samples X (features) and Y (truths)
self.X = list()
self.Y = list()
# Working sets, contains indices pertaining to X and Y
self.supportSetIndices = list()
self.errorSetIndices = list()
self.remainderSetIndices = list()
self.R = np.matrix([])
def findMinVariation(self, H, beta, gamma, i):
""" Finds the variations of each sample to the new set.
Lc1: distance of the new sample to the SupportSet
Lc2: distance of the new sample to the ErrorSet
Ls(i): distance of the support samples to the ErrorSet/RemainingSet
Le(i): distance of the error samples to the SupportSet
Lr(i): distance of the remaining samples to the SupportSet
"""
# Find direction q of the new sample
q = -sign(H[i])
# Compute variations
Lc1 = self.findVarLc1(H, gamma, q, i)
q = sign(Lc1)
Lc2 = self.findVarLc2(H, q, i)
Ls = self.findVarLs(H, beta, q)
Le = self.findVarLe(H, gamma, q)
Lr = self.findVarLr(H, gamma, q)
# Check for duplicate minimum values, grab one with max gamma/beta, set others to inf
# Support set
if Ls.size > 1:
minS = np.abs(Ls).min()
results = np.array([k for k,val in enumerate(Ls) if np.abs(val)==minS])
if len(results) > 1:
betaIndex = beta[results+1].argmax()
Ls[results] = q*np.inf
Ls[results[betaIndex]] = q*minS
# Error set
if Le.size > 1:
minE = np.abs(Le).min()
results = np.array([k for k,val in enumerate(Le) if np.abs(val)==minE])
if len(results) > 1:
errorGamma = gamma[self.errorSetIndices]
gammaIndex = errorGamma[results].argmax()
Le[results] = q*np.inf
Le[results[gammaIndex]] = q*minE
# Remainder Set
if Lr.size > 1:
minR = np.abs(Lr).min()
results = np.array([k for k,val in enumerate(Lr) if np.abs(val)==minR])
if len(results) > 1:
remGamma = gamma[self.remainderSetIndices]
gammaIndex = remGamma[results].argmax()
Lr[results] = q*np.inf
Lr[results[gammaIndex]] = q*minR
# Find minimum absolute variation of all, retain signs. Flag determines set-switching cases.
minLsIndex = np.abs(Ls).argmin()
minLeIndex = np.abs(Le).argmin()
minLrIndex = np.abs(Lr).argmin()
minIndices = [None, None, minLsIndex, minLeIndex, minLrIndex]
minValues = np.array([Lc1, Lc2, Ls[minLsIndex], Le[minLeIndex], Lr[minLrIndex]])
if np.abs(minValues).min() == np.inf:
print('No weights to modify! Something is wrong.', file=sys.stderr)
sys.exit()
flag = np.abs(minValues).argmin()
if self.debug:
print('MinValues',minValues, file=sys.stderr)
return minValues[flag], flag, minIndices[flag]
def findVarLc1(self, H, gamma, q, i):
# weird hacks below
Lc1 = np.nan
if gamma.size < 2:
g = gamma
else:
g = gamma.item(i)
# weird hacks above
if g <= 0:
Lc1 = np.array(q*np.inf)
elif H[i] > self.eps and -self.C < self.weights[i] and self.weights[i] <= 0:
Lc1 = (-H[i] + self.eps) / g
elif H[i] < -self.eps and 0 <= self.weights[i] and self.weights[i] <= self.C:
Lc1 = (-H[i] - self.eps) / g
else:
print('Something is weird.', file=sys.stderr)
print('i',i, file=sys.stderr)
print('q',q, file=sys.stderr)
print('gamma',gamma, file=sys.stderr)
print('g',g, file=sys.stderr)
print('H[i]',H[i], file=sys.stderr)
print('weights[i]',self.weights[i], file=sys.stderr)
if np.isnan(Lc1):
Lc1 = np.array(q*np.inf)
return Lc1.item()
def findVarLc2(self, H, q, i):
if len(self.supportSetIndices) > 0:
if q > 0:
Lc2 = -self.weights[i] + self.C
else:
Lc2 = -self.weights[i] - self.C
else:
Lc2 = np.array(q*np.inf)
if np.isnan(Lc2):
Lc2 = np.array(q*np.inf)
return Lc2
def findVarLs(self, H, beta, q):
if len(self.supportSetIndices) > 0 and len(beta) > 0:
Ls = np.zeros([len(self.supportSetIndices),1])
supportWeights = self.weights[self.supportSetIndices]
supportH = H[self.supportSetIndices]
for k in range(len(self.supportSetIndices)):
if q*beta[k+1] == 0:
Ls[k] = q*np.inf
elif q*beta[k+1] > 0:
if supportH[k] > 0:
if supportWeights[k] < -self.C:
Ls[k] = (-supportWeights[k] - self.C) / beta[k+1]
elif supportWeights[k] <= 0:
Ls[k] = -supportWeights[k] / beta[k+1]
else:
Ls[k] = q*np.inf
else:
if supportWeights[k] < 0:
Ls[k] = -supportWeights[k] / beta[k+1]
elif supportWeights[k] <= self.C:
Ls[k] = (-supportWeights[k] + self.C) / beta[k+1]
else:
Ls[k] = q*np.inf
else:
if supportH[k] > 0:
if supportWeights[k] > 0:
Ls[k] = -supportWeights[k] / beta[k+1]
elif supportWeights[k] >= -self.C:
Ls[k] = (-supportWeights[k] - self.C) / beta[k+1]
else:
Ls[k] = q*np.inf
else:
if supportWeights[k] > self.C:
Ls[k] = (-supportWeights[k] + self.C) / beta[k+1]
elif supportWeights[k] >= self.C:
Ls[k] = -supportWeights[k] / beta[k+1]
else:
Ls[k] = q*np.inf
else:
Ls = np.array([q*np.inf])
# Correct for NaN
Ls[np.isnan(Ls)] = q*np.inf
if Ls.size > 1:
Ls.shape = (len(Ls),1)
# Check for broken signs
for val in Ls:
if sign(val) == -sign(q) and val != 0:
print('Sign mismatch error in Ls! Exiting.', file=sys.stderr)
sys.exit()
# print('findVarLs',Ls, file=sys.stderr)
return Ls
def findVarLe(self, H, gamma, q):
if len(self.errorSetIndices) > 0:
Le = np.zeros([len(self.errorSetIndices),1])
errorGamma = gamma[self.errorSetIndices]
errorWeights = self.weights[self.errorSetIndices]
errorH = H[self.errorSetIndices]
for k in range(len(self.errorSetIndices)):
if q*errorGamma[k] == 0:
Le[k] = q*np.inf
elif q*errorGamma[k] > 0:
if errorWeights[k] > 0:
if errorH[k] < -self.eps:
Le[k] = (-errorH[k] - self.eps) / errorGamma[k]
else:
Le[k] = q*np.inf
else:
if errorH[k] < self.eps:
Le[k] = (-errorH[k] + self.eps) / errorGamma[k]
else:
Le[k] = q*np.inf
else:
if errorWeights[k] > 0:
if errorH[k] > -self.eps:
Le[k] = (-errorH[k] - self.eps) / errorGamma[k]
else:
Le[k] = q*np.inf
else:
if errorH[k] > self.eps:
Le[k] = (-errorH[k] + self.eps) / errorGamma[k]
else:
Le[k] = q*np.inf
else:
Le = np.array([q*np.inf])
# Correct for NaN
Le[np.isnan(Le)] = q*np.inf
if Le.size > 1:
Le.shape = (len(Le),1)
# Check for broken signs
for val in Le:
if sign(val) == -sign(q) and val != 0:
print('Sign mismatch error in Le! Exiting.', file=sys.stderr)
sys.exit()
# print('findVarLe',Le, file=sys.stderr)
return Le
def findVarLr(self, H, gamma, q):
if len(self.remainderSetIndices) > 0:
Lr = np.zeros([len(self.remainderSetIndices),1])
remGamma = gamma[self.remainderSetIndices]
remH = H[self.remainderSetIndices]
for k in range(len(self.remainderSetIndices)):
if q*remGamma[k] == 0:
Lr[k] = q*np.inf
elif q*remGamma[k] > 0:
if remH[k] < -self.eps:
Lr[k] = (-remH[k] - self.eps) / remGamma[k]
elif remH[k] < self.eps:
Lr[k] = (-remH[k] + self.eps) / remGamma[k]
else:
Lr[k] = q*np.inf
else:
if remH[k] > self.eps:
Lr[k] = (-remH[k] + self.eps) / remGamma[k]
elif remH[k] > -self.eps:
Lr[k] = (-remH[k] - self.eps) / remGamma[k]
else:
Lr[k] = q*np.inf
else:
Lr = np.array([q*np.inf])
# Correct for NaN
Lr[np.isnan(Lr)] = q*np.inf
if Lr.size > 1:
Lr.shape = (len(Lr),1)
# Check for broken signs
for val in Lr:
if sign(val) == -sign(q) and val != 0:
print('Sign mismatch error in Lr! Exiting.', file=sys.stderr)
sys.exit()
# print('findVarLr',Lr, file=sys.stderr)
return Lr
def computeKernelOutput(self, set1, set2):
"""Compute kernel output. Uses a radial basis function kernel."""
X1 = np.matrix(set1)
X2 = np.matrix(set2).T
# Euclidean distance calculation done properly
[S,R] = X1.shape
[R2,Q] = X2.shape
X = np.zeros([S,Q])
if Q < S:
copies = np.zeros(S,dtype=int)
for q in range(Q):
if self.debug:
print('X1',X1, file=sys.stderr)
print('X2copies',X2.T[q+copies,:], file=sys.stderr)
print('power',np.power(X1-X2.T[q+copies,:],2), file=sys.stderr)
xsum = np.sum(np.power(X1-X2.T[q+copies,:],2),axis=1)
xsum.shape = (xsum.size,)
X[:,q] = xsum
else:
copies = np.zeros(Q,dtype=int)
for i in range(S):
X[i,:] = np.sum(np.power(X1.T[:,i+copies]-X2,2),axis=0)
X = np.sqrt(X)
y = np.matrix(np.exp(-self.kernelParam*X**2))
if self.debug:
print('distance',X, file=sys.stderr)
print('kernelOutput',y, file=sys.stderr)
return y
def predict(self, newSampleX):
X = np.array(self.X)
newX = np.array(newSampleX)
weights = np.array(self.weights)
weights.shape = (weights.size,1)
if self.numSamplesTrained > 0:
y = self.computeKernelOutput(X, newX)
return (weights.T @ y).T + self.bias
else:
return np.zeros_like(newX) + self.bias
def computeMargin(self, newSampleX, newSampleY):
fx = self.predict(newSampleX)
newSampleY = np.array(newSampleY)
newSampleY.shape = (newSampleY.size, 1)
if self.debug:
print('fx',fx, file=sys.stderr)
print('newSampleY',newSampleY, file=sys.stderr)
print('hx',fx-newSampleY, file=sys.stderr)
return fx-newSampleY
def computeBetaGamma(self,i):
"""Returns beta and gamma arrays."""
# Compute beta vector
X = np.array(self.X)
Qsi = self.computeQ(X[self.supportSetIndices,:], X[i,:])
if len(self.supportSetIndices) == 0 or self.R.size == 0:
beta = np.array([])
else:
beta = -self.R @ np.append(np.matrix([1]),Qsi,axis=0)
# Compute gamma vector
Qxi = self.computeQ(X, X[i,:])
Qxs = self.computeQ(X, X[self.supportSetIndices,:])
if len(self.supportSetIndices) == 0 or Qxi.size == 0 or Qxs.size == 0 or beta.size == 0:
gamma = np.array(np.ones_like(Qxi))
else:
gamma = Qxi + np.append(np.ones([self.numSamplesTrained,1]), Qxs, 1) @ beta
# Correct for NaN
beta[np.isnan(beta)] = 0
gamma[np.isnan(gamma)] = 0
if self.debug:
print('R',self.R, file=sys.stderr)
print('beta',beta, file=sys.stderr)
print('gamma',gamma, file=sys.stderr)
return beta, gamma
def computeQ(self, set1, set2):
set1 = np.matrix(set1)
set2 = np.matrix(set2)
Q = np.matrix(np.zeros([set1.shape[0],set2.shape[0]]))
for i in range(set1.shape[0]):
for j in range(set2.shape[0]):
Q[i,j] = self.computeKernelOutput(set1[i,:],set2[j,:])
return np.matrix(Q)
def adjustSets(self, H, beta, gamma, i, flag, minIndex):
print('Entered adjustSet logic with flag {0} and minIndex {1}.'.format(flag,minIndex), file=sys.stderr)
if flag not in range(5):
print('Received unexpected flag {0}, exiting.'.format(flag), file=sys.stderr)
sys.exit()
# add new sample to Support set
if flag == 0:
print('Adding new sample {0} to support set.'.format(i), file=sys.stderr)
H[i] = np.sign(H[i])*self.eps
self.supportSetIndices.append(i)
self.R = self.addSampleToR(i,'SupportSet',beta,gamma)
return H,True
# add new sample to Error set
elif flag == 1:
print('Adding new sample {0} to error set.'.format(i), file=sys.stderr)
self.weights[i] = np.sign(self.weights[i])*self.C
self.errorSetIndices.append(i)
return H,True
# move sample from Support set to Error or Remainder set
elif flag == 2:
index = self.supportSetIndices[minIndex]
weightsValue = self.weights[index]
if np.abs(weightsValue) < np.abs(self.C - abs(weightsValue)):
self.weights[index] = 0
weightsValue = 0
else:
self.weights[index] = np.sign(weightsValue)*self.C
weightsValue = self.weights[index]
# Move from support to remainder set
if weightsValue == 0:
print('Moving sample {0} from support to remainder set.'.format(index), file=sys.stderr)
self.remainderSetIndices.append(index)
self.R = self.removeSampleFromR(minIndex)
self.supportSetIndices.pop(minIndex)
# move from support to error set
elif np.abs(weightsValue) == self.C:
print('Moving sample {0} from support to error set.'.format(index), file=sys.stderr)
self.errorSetIndices.append(index)
self.R = self.removeSampleFromR(minIndex)
self.supportSetIndices.pop(minIndex)
else:
print('Issue with set swapping, flag 2.','weightsValue:',weightsValue, file=sys.stderr)
sys.exit()
# move sample from Error set to Support set
elif flag == 3:
index = self.errorSetIndices[minIndex]
print('Moving sample {0} from error to support set.'.format(index), file=sys.stderr)
H[index] = np.sign(H[index])*self.eps
self.supportSetIndices.append(index)
self.errorSetIndices.pop(minIndex)
self.R = self.addSampleToR(index, 'ErrorSet', beta, gamma)
# move sample from Remainder set to Support set
elif flag == 4:
index = self.remainderSetIndices[minIndex]
print('Moving sample {0} from remainder to support set.'.format(index), file=sys.stderr)
H[index] = np.sign(H[index])*self.eps
self.supportSetIndices.append(index)
self.remainderSetIndices.pop(minIndex)
self.R = self.addSampleToR(index, 'RemainingSet', beta, gamma)
return H, False
def addSampleToR(self, sampleIndex, sampleOldSet, beta, gamma):
print('Adding sample {0} to R matrix.'.format(sampleIndex), file=sys.stderr)
X = np.array(self.X)
sampleX = X[sampleIndex,:]
sampleX.shape = (sampleX.size//self.numFeatures,self.numFeatures)
# Add first element
if self.R.shape[0] <= 1:
Rnew = np.ones([2,2])
Rnew[0,0] = -self.computeKernelOutput(sampleX,sampleX)
Rnew[1,1] = 0
# Other elements
else:
# recompute beta/gamma if from error/remaining set
if sampleOldSet == 'ErrorSet' or sampleOldSet == 'RemainingSet':
# beta, gamma = self.computeBetaGamma(sampleIndex)
Qii = self.computeKernelOutput(sampleX, sampleX)
Qsi = self.computeKernelOutput(X[self.supportSetIndices[0:-1],:], sampleX)
beta = -self.R @ np.append(np.matrix([1]),Qsi,axis=0)
beta[np.isnan(beta)] = 0
beta.shape = (len(beta),1)
gamma[sampleIndex] = Qii + np.append(1,Qsi.T)@beta
gamma[np.isnan(gamma)] = 0
gamma.shape = (len(gamma),1)
# add a column and row of zeros onto right/bottom of R
r,c = self.R.shape
Rnew = np.append(self.R, np.zeros([r,1]), axis=1)
Rnew = np.append(Rnew, np.zeros([1,c+1]), axis=0)
# update R
if gamma[sampleIndex] != 0:
# Numpy so wonky! SO WONKY.
beta1 = np.append(beta, [[1]], axis=0)
Rnew = Rnew + 1/gamma[sampleIndex].item()*beta1@beta1.T
if np.any(np.isnan(Rnew)):
print('R has become inconsistent. Training failed at sampleIndex {0}'.forma, file=sys.stderrt(sampleIndex))
sys.exit()
return Rnew
def removeSampleFromR(self, sampleIndex):
print('Removing sample {0} from R matrix.'.format(sampleIndex), file=sys.stderr)
sampleIndex += 1
I = list(range(sampleIndex))
I.extend(range(sampleIndex+1,self.R.shape[0]))
I = np.array(I)
I.shape = (1,I.size)
if self.debug:
print('I',I, file=sys.stderr)
print('RII',self.R[I.T,I], file=sys.stderr)
# Adjust R
if self.R[sampleIndex,sampleIndex] != 0:
Rnew = self.R[I.T,I] - (self.R[I.T,sampleIndex]*self.R[sampleIndex,I]) / self.R[sampleIndex,sampleIndex].item()
else:
Rnew = np.copy(self.R[I.T,I])
# Check for bad things
if np.any(np.isnan(Rnew)):
print('R has become inconsistent. Training failed removing sampleIndex {0}'.forma, file=sys.stderrt(sampleIndex))
sys.exit()
if Rnew.size == 1:
print('Time to annhilate R? R:',Rnew, file=sys.stderr)
Rnew = np.matrix([])
return Rnew
def learn(self, newSampleX, newSampleY):
self.numSamplesTrained += 1
self.X.append(newSampleX)
self.Y.append(newSampleY)
self.weights = np.append(self.weights,0)
i = self.numSamplesTrained - 1 # stupid off-by-one errors
H = self.computeMargin(self.X, self.Y)
# correctly classified sample, skip the rest of the algorithm!
if (abs(H[i]) <= self.eps):
print('Adding new sample {0} to remainder set, within eps.'.format(i), file=sys.stderr)
if self.debug:
print('weights',self.weights, file=sys.stderr)
self.remainderSetIndices.append(i)
return
newSampleAdded = False
iterations = 0
while not newSampleAdded:
# Ensure we're not looping infinitely
iterations += 1
if iterations > self.numSamplesTrained*100:
print('Warning: we appear to be in an infinite loop.', file=sys.stderr)
sys.exit()
iterations = 0
# Compute beta/gamma for constraint optimization
beta, gamma = self.computeBetaGamma(i)
# Find minimum variation and determine how we should shift samples between sets
deltaC, flag, minIndex = self.findMinVariation(H, beta, gamma, i)
# Update weights and bias based on variation
if len(self.supportSetIndices) > 0 and len(beta)>0:
self.weights[i] += deltaC
delta = beta*deltaC
self.bias += delta.item(0)
# numpy is wonky...
weightDelta = np.array(delta[1:])
weightDelta.shape = (len(weightDelta),)
self.weights[self.supportSetIndices] += weightDelta
H += gamma*deltaC
else:
self.bias += deltaC
H += deltaC
# Adjust sets, moving samples between them according to flag
H,newSampleAdded = self.adjustSets(H, beta, gamma, i, flag, minIndex)
if self.debug:
print('weights',self.weights, file=sys.stderr)
class OSVR:
def __init__(
self,
future_scope=3,
dimension=10,
C=10,
kernelParam=30,
epsilon=1e-10
):
self.future_scope = future_scope
self.dimension = dimension
self.C = C
self.kernelParam = kernelParam
self.epsilon = epsilon
def fit(self, price_indices):
self.models = {}
for capital_name in price_indices:
times = []
curr_prices = price_indices[capital_name]
seq_price = price_indices[capital_name].copy()
seq_price[1:] = (seq_price[1:] - seq_price[:-1]) / seq_price[:-1]
seq_price[0] = 0
X = rolling_window(seq_price, self.dimension)
padding = np.ones((self.dimension-1, self.dimension))
X = np.vstack((padding, X))
cum_sum = np.cumsum(curr_prices)
moving_avr = curr_prices.copy()
moving_avr[:-self.future_scope] = (cum_sum[self.future_scope:] - cum_sum[:-self.future_scope]) / self.future_scope
f = np.zeros((curr_prices.shape[0],))
f[:] = (moving_avr - curr_prices) / curr_prices
self.models[capital_name] = OnlineSVR(numFeatures = X.shape[1], C = self.C, eps = self.epsilon, kernelParam = self.kernelParam, bias = 0, debug = False)
for i in range(X.shape[0]):
if i % 20 == 0:
times.append((i,time()))
if i % 40 == 0:
# print('%%%%%%%%%%%%%%% Data point {0} %%%%%%%%%%%%%%%'.format(i))
print(times)
self.models[capital_name].learn(X[i,:], f[i])
def predict(self, recent_prices, update=True, true_values=None, loss_functions=None):
if true_values is None and update:
raise Exception('True values must be provided if update parameter is set to true')
if loss_functions is None:
loss_functions = {'MSE': lambda truth, estimate, _prices: np.sqrt(np.mean((truth-estimate)**2))}
loss_results = {}
result = {}
for capital_name in recent_prices:
result[capital_name] = np.array([])
row_number = 0
if recent_prices[capital_name].shape[1] != self.dimension+1:
raise Exception('The matrix to be predicted must be of the shape (*, dimension+1)')
all_daily_changes = (recent_prices[capital_name][:,1:] - recent_prices[capital_name][:,:-1]) / recent_prices[capital_name][:,:-1]
for row in recent_prices[capital_name]:
daily_changes = all_daily_changes[row_number]
change_ratio = self.models[capital_name].predict(np.array([daily_changes]))[0]
res = (change_ratio + 1) * row[-1]
result[capital_name] = np.concatenate((result[capital_name], [res]))
if update:
newF = (true_values[capital_name][row_number] - row[-1]) / row[-1]
self.models[capital_name].learn(daily_changes)
row_number += 1
if true_values is not None:
loss_results[capital_name] = {}
for loss_name in loss_functions:
loss_results[capital_name][loss_name] = loss_functions[loss_name](true_values[capital_name], result[capital_name], recent_prices[capital_name])
return result, loss_results
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,426
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/src/utils.py
|
import numpy as np
import json
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,427
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/src/WHLR.py
|
import numpy as np
from sklearn.linear_model import LinearRegression as LR
from sklearn.kernel_approximation import RBFSampler
from .utils import rolling_window
class WHLR:
def __init__(
self,
future_scope=3,
dimension=10,
avr_elemwise_dist=0.04,
learning_rate=1e-2
):
self.future_scope = future_scope
self.dimension = dimension
self.avr_elemwise_dist = avr_elemwise_dist
self.learning_rate = learning_rate
self.gamma = (.33 / (dimension**.5 * avr_elemwise_dist))**2
def fit(self, price_indices):
self.models = {}
for capital_name in price_indices:
self.models[capital_name] = {'rbf': RBFSampler(gamma=self.gamma), 'lr': LR()}
curr_prices = price_indices[capital_name]
seq_price = price_indices[capital_name].copy()
seq_price[1:] = (seq_price[1:] - seq_price[:-1]) / seq_price[:-1]
seq_price[0] = 0
X = rolling_window(seq_price, self.dimension)
padding = np.ones((self.dimension-1, self.dimension)) #np.random.normal(0, self.epsilon, (self.dimension-1, self.dimension))
X = np.vstack((padding, X))
cum_sum = np.cumsum(curr_prices)
moving_avr = curr_prices.copy()
moving_avr[:-self.future_scope] = (cum_sum[self.future_scope:] - cum_sum[:-self.future_scope]) / self.future_scope
f = np.zeros((curr_prices.shape[0],))
f[:] = (moving_avr - curr_prices) / curr_prices
X = self.models[capital_name]['rbf'].fit_transform(X)
self.models[capital_name]['lr'].fit(X, f)
def predict(self, recent_prices, update=True, true_values=None, loss_functions=None):
if true_values is None and update:
raise Exception('True values must be provided if update parameter is set to true')
if loss_functions is None:
loss_functions = {'MSE': lambda truth, estimate, _prices: np.sqrt(np.mean((truth-estimate)**2))}
loss_results = {}
result = {}
for capital_name in recent_prices:
result[capital_name] = np.array([])
row_number = 0
if recent_prices[capital_name].shape[1] != self.dimension+1:
raise Exception('The matrix to be predicted must be of the shape (*, dimension+1)')
all_daily_changes = (recent_prices[capital_name][:,1:] - recent_prices[capital_name][:,:-1]) / recent_prices[capital_name][:,:-1]
for row in recent_prices[capital_name]:
daily_changes = all_daily_changes[row_number]
daily_changes = self.models[capital_name]['rbf'].transform([daily_changes])[0]
change_ratio = self.models[capital_name]['lr'].predict([daily_changes])[0]
res = (change_ratio + 1) * row[-1]
result[capital_name] = np.concatenate((result[capital_name], [res]))
if update:
newF = (true_values[capital_name][row_number] - row[-1]) / row[-1]
augmentedX = np.concatenate([[1], daily_changes])
w = np.concatenate([[self.models[capital_name]['lr'].intercept_], self.models[capital_name]['lr'].coef_])
w = w - self.learning_rate * (np.dot(w, augmentedX) - newF) * augmentedX
self.models[capital_name]['lr'].intercept_ = w[0]
self.models[capital_name]['lr'].coef_ = w[1:]
row_number += 1
if true_values is not None:
loss_results[capital_name] = {}
for loss_name in loss_functions:
loss_results[capital_name][loss_name] = loss_functions[loss_name](true_values[capital_name], result[capital_name], recent_prices[capital_name])
return result, loss_results
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,428
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/src/OARIMA.py
|
import numpy as np
import pmdarima as pm
from math import sqrt
from .utils import rolling_window
class OARIMA:
def __init__(
self,
dimension=10,
lrate=1e-2,
epsilon=1e-10,
method='ogd'
):
self.dimension = dimension
self.lrate = lrate
self.epsilon = epsilon
self.method = method
def fit(self, price_indices):
self.model = {}
self.order = None
with_intercept = None
for capital_name in price_indices:
self.model[capital_name] = {'ma': None, 'data': None, 'size': 0}
seq_price = price_indices[capital_name].copy()
seq_price[1:] = (seq_price[1:] - seq_price[:-1]) / seq_price[:-1]
seq_price[0] = 0
if self.order is None:
model = pm.auto_arima(seq_price, seasonal=False, start_p=0, max_p=0, start_q=3, max_q=50, trace=True)
params = model.get_params()
self.order, _with_intercept = params['order'], params['with_intercept']
if self.order[2] < 4:
self.order = (self.order[0], self.order[1], 4)
model = pm.ARIMA(order=self.order, with_intercept=False)
model.fit(seq_price)
self.model[capital_name]['ma'] = model.maparams()
self.model[capital_name]['data'] = seq_price[-(self.order[1] + self.order[2]):]
self.model[capital_name]['size'] = seq_price.shape[0] - (self.order[1] + self.order[2])
pass
def predict(self, recent_prices, update=True, true_values=None, loss_functions=None):
if true_values is None and update:
raise Exception('True values must be provided if update parameter is set to true')
if loss_functions is None:
loss_functions = {'MSE': lambda truth, estimate, _prices: np.sqrt(np.mean((truth-estimate)**2))}
loss_results = {}
result = {}
for capital_name in recent_prices:
result[capital_name] = np.array([])
A_trans = np.identity(self.order[2]) * self.epsilon
row_number = 0
if recent_prices[capital_name].shape[1] != self.dimension+1:
raise Exception('The matrix to be predicted must be of the shape (*, dimension+1)')
for row in recent_prices[capital_name]:
diffed_values = self.model[capital_name]['data']
if self.order[1]:
diffed_sum = diffed_values[-1]
else:
diffed_sum = 0
for _ in range(self.order[1]):
diffed_values = diffed_values[1:] - diffed_values[:-1]
diffed_sum += diffed_values[-1]
estimate = (self.model[capital_name]['ma'] @ diffed_values)
change_ratio = diffed_sum + estimate
res = (change_ratio + 1) * row[-1]
result[capital_name] = np.concatenate((result[capital_name], [res]))
self.model[capital_name]['data'] = np.concatenate((self.model[capital_name]['data'][1:], [change_ratio]))
self.model[capital_name]['size'] += 1
if update:
exact = (true_values[capital_name][row_number] - row[-1])/row[-1]
self.model[capital_name]['data'][-1] = exact
diff = estimate - exact
if self.method == 'ogd':
s = self.model[capital_name]['size']
self.model[capital_name]['ma'] = self.model[capital_name]['ma'] - diffed_sum*2*diff/sqrt(row_number+s+1)*self.lrate
elif self.method == 'ons':
grad = (2*diffed_values*diff).reshape((1, -1))
# print(A_trans, A_trans.shape)
# print(grad, grad.shape)
A_trans = A_trans - A_trans @ grad.T @ grad @ A_trans/(1 + grad @ A_trans @ grad.T)
self.model[capital_name]['ma'] = self.model[capital_name]['ma'] - self.lrate * grad @ A_trans
self.model[capital_name]['ma'] = self.model[capital_name]['ma'].reshape((-1,))
self.model[capital_name]['ma'] = self.model[capital_name]['ma'] / np.sum(self.model[capital_name]['ma'])
row_number += 1
if true_values is not None:
loss_results[capital_name] = {}
for loss_name in loss_functions:
loss_results[capital_name][loss_name] = loss_functions[loss_name](true_values[capital_name], result[capital_name], recent_prices[capital_name])
return result, loss_results
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,429
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/src/RandConLSTM.py
|
import numpy as np
import math
import torch
from torch import nn, optim
# from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm
from collections import OrderedDict
import pandas as pd
from .RCLSTM import rclstm
from .RCLSTM.rclstm import RNN
from .utils import rolling_window
loss_fn = nn.MSELoss()
def compute_loss_accuracy(model, data, label):
hx = None
_, (h_n, _) = model[0](input_=data, hx=hx)
logits = model[1](h_n[-1])
loss = torch.sqrt(loss_fn(input=logits, target=label))
return loss, logits
#learning rate decay
def exp_lr_scheduler(optimizer, epoch, init_lr=1e-2, lr_decay_epoch=3):
lr = init_lr * (0.1 ** (epoch // lr_decay_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
class RandConLSTM:
def __init__(
self,
future_scope=3,
dimension=10,
epochs=2,
batch_size=128,
num_layers=1,
epsilon=1e-10,
hidden_size=100,
connectivity=.2
):
self.future_scope = future_scope
self.dimension = dimension
self.epochs = epochs
self.batch_size = batch_size
self.num_layers = num_layers
self.epsilon = epsilon
self.hidden_size = hidden_size
self.connectivity = connectivity
def fit(self, price_indices):
self.models = {}
for capital_name in price_indices:
curr_prices = price_indices[capital_name]
seq_price = price_indices[capital_name].copy()
seq_price[1:] = (seq_price[1:] - seq_price[:-1]) / seq_price[:-1]
seq_price[0] = 0
X = rolling_window(seq_price, self.dimension)
padding = np.ones((self.dimension-1, self.dimension)) #np.random.normal(0, self.epsilon, (self.dimension-1, self.dimension))
X = np.vstack((padding, X))
cum_sum = np.cumsum(curr_prices)
moving_avr = curr_prices.copy()
moving_avr[:-self.future_scope] = (cum_sum[self.future_scope:] - cum_sum[:-self.future_scope]) / self.future_scope
f = np.zeros((curr_prices.shape[0],))
f[:] = (moving_avr - curr_prices) / curr_prices
rnn_model = RNN(device='cpu', cell_class='rclstm', input_size=1,
hidden_size=self.hidden_size, connectivity=self.connectivity,
num_layers=self.num_layers, batch_first=True, dropout=1)
fc2 = nn.Linear(in_features=self.hidden_size, out_features=1)
self.models[capital_name] = nn.Sequential(OrderedDict([
('rnn', rnn_model),
('fc2', fc2),
]))
# if use_gpu:
# model.cuda()
self.models[capital_name].to('cpu')
optim_method = optim.Adam(params=self.models[capital_name].parameters())
iter_cnt = 0
num_batch = int(math.ceil(len(X) // self.batch_size))
while iter_cnt < self.epochs:
optimizer = exp_lr_scheduler(optim_method, iter_cnt, init_lr=0.01, lr_decay_epoch=3)
for i in range(num_batch):
low_index = self.batch_size*i
high_index = self.batch_size*(i+1)
if low_index <= len(X)-self.batch_size:
batch_inputs = X[low_index:high_index].reshape(self.batch_size, self.dimension, 1).astype(np.float32)
batch_targets = f[low_index:high_index].reshape((self.batch_size, 1)).astype(np.float32)
else:
batch_inputs = X[low_index:].astype(float)
batch_targets = f[low_index:].astype(float)
batch_inputs = torch.from_numpy(batch_inputs).to('cpu')
batch_targets = torch.from_numpy(batch_targets).to('cpu')
# if use_gpu:
# batch_inputs = batch_inputs.cuda()
# batch_targets = batch_targets.cuda()
self.models[capital_name].train(True)
self.models[capital_name].zero_grad()
train_loss, _logits = compute_loss_accuracy(self.models[capital_name], data=batch_inputs, label=batch_targets)
train_loss.backward()
optimizer.step()
if i % 100 == 0:
print('the %dth iter, the %d/%dth batch, train loss is %.4f' % (iter_cnt, i, num_batch, train_loss.item()))
# save model
# save_path = '{}/{}'.format(save_dir, int(round(connectivity/.01)))
# if os.path.exists(save_path):
# torch.save(model, os.path.join(save_path, str(iter_cnt)+'.pt'))
# else:
# os.makedirs(save_path)
# torch.save(model, os.path.join(save_path, str(iter_cnt)+'.pt'))
iter_cnt += 1
def predict(self, recent_prices, update=True, true_values=None, loss_functions=None):
if true_values is None and update:
raise Exception('True values must be provided if update parameter is set to true')
if loss_functions is None:
loss_functions = {'MSE': lambda truth, estimate, _prices: np.sqrt(np.mean((truth-estimate)**2))}
loss_results = {}
result = {}
for capital_name in recent_prices:
result[capital_name] = np.array([])
row_number = 0
if recent_prices[capital_name].shape[1] != self.dimension+1:
raise Exception('The matrix to be predicted must be of the shape (*, dimension+1)')
all_daily_changes = (recent_prices[capital_name][:,1:] - recent_prices[capital_name][:,:-1]) / recent_prices[capital_name][:,:-1]
for row in recent_prices[capital_name]:
daily_changes = all_daily_changes[row_number]
self.models[capital_name].train(False)
tdc = daily_changes.reshape((1,-1,1)).astype(np.float32)
torch_daily_changes = torch.from_numpy(tdc).to('cpu')
lstm_out = self.models[capital_name][0](torch_daily_changes)[0]
change_ratio = self.models[capital_name][1](lstm_out)[0].detach().numpy().reshape((-1,))[0]
res = (change_ratio + 1) * row[-1]
result[capital_name] = np.concatenate((result[capital_name], [res]))
if update:
self.models[capital_name].train(True)
optim_method = optim.Adam(params=self.models[capital_name].parameters())
newF = (true_values[capital_name][row_number] - row[-1]) / row[-1]
iter_cnt = 0
while iter_cnt < self.epochs:
optimizer = exp_lr_scheduler(optim_method, iter_cnt, init_lr=0.01, lr_decay_epoch=3)
batch_inputs = daily_changes.reshape((1, -1, 1)).astype(np.float32)
batch_targets = np.array([newF]).reshape((1, 1)).astype(np.float32)
batch_inputs = torch.from_numpy(batch_inputs).to('cpu')
batch_targets = torch.from_numpy(batch_targets).to('cpu')
self.models[capital_name].train(True)
self.models[capital_name].zero_grad()
train_loss, _logits = compute_loss_accuracy(self.models[capital_name], data=batch_inputs, label=batch_targets)
train_loss.backward()
optimizer.step()
iter_cnt += 1
row_number += 1
if true_values is not None:
loss_results[capital_name] = {}
for loss_name in loss_functions:
loss_results[capital_name][loss_name] = loss_functions[loss_name](true_values[capital_name], result[capital_name], recent_prices[capital_name])
return result, loss_results
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,430
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/results/dumps.py
|
from os import listdir
from os.path import join, isfile
import json
import numpy as np
dataDir = '../dumps'
dataFiles = {f: join(dataDir, f) for f in listdir(dataDir) if isfile(join(dataDir, f)) and f[-4:] == '.dmp'}
# print(list(dataFiles.keys()))
for f in dataFiles:
dump = open(dataFiles[f], 'r')
res = json.load(dump)
print('============ %s ============'%(f))
loss = res[1]
loss_funcs = list(loss[list(loss.keys())[0]].keys())
loss_vals = {loss_func: [] for loss_func in loss_funcs}
for loss_func in loss_funcs:
for capital in loss:
if not np.isnan(loss[capital][loss_func]):
loss_vals[loss_func].append(loss[capital][loss_func])
print('''loss: %s
(mean, sd, n#)
(%f, %f, %d)'''%(loss_func, np.mean(loss_vals[loss_func]), np.std(loss_vals[loss_func]), len(loss_vals[loss_func])))
print()
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,431
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/data/RTcryptos/AAA_delta.py
|
import pandas as pd
from dfply import X, arrange, mutate, drop
import numpy as np
from os import listdir
from os.path import isfile
csvFiles = [f for f in listdir('.') if isfile(f) and f[-4:] == '.csv']
def loadPriceIndex(dataFile):
priceIndex = pd.read_csv(dataFile) >> arrange(X.Date)
dates = priceIndex['Date'].values
deltas = (dates[1:] - dates[:-1])/1000
return deltas
allDeltas = []
for csvFile in csvFiles:
print(csvFile, end=':\t\t')
try:
deltas = loadPriceIndex(csvFile)
pIndex = (np.min(deltas), np.max(deltas), np.median(deltas), np.mean(deltas), np.std(deltas), (np.sum(deltas) / 3600 / 24), '%d,%d'%(deltas.shape[0] // 1000, deltas.shape[0] % 1000))
print(pIndex)
allDeltas.append(np.log(deltas + 1))
except:
print('Failed')
import matplotlib.pyplot as plt
plt.boxplot(allDeltas)
plt.show()
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,432
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/src/MondrianForest.py
|
import numpy as np
import math
from .MF.mondrianforest import MondrianForest as MondrianForest_Main
from .MF.mondrianforest_utils import precompute_minimal
from .utils import rolling_window
def prepareData(X, f, settings, single=False):
data = {
'x_train': X,
'y_train': f,
'n_dim': X.shape[1],
'n_train': f.shape[0],
'x_test': None,
'y_test': None,
'n_test': 0,
'is_sparse': False
}
# ------ beginning of hack ----------
is_mondrianforest = True
n_minibatches = 1 if single else settings.n_minibatches
if is_mondrianforest:
# creates data['train_ids_partition']['current'] and data['train_ids_partition']['cumulative']
# where current[idx] contains train_ids in minibatch "idx", cumulative contains train_ids in all
# minibatches from 0 till idx ... can be used in gen_train_ids_mf or here (see below for idx > -1)
data['train_ids_partition'] = {'current': {}, 'cumulative': {}}
train_ids = np.arange(data['n_train'])
try:
draw_mondrian = settings.draw_mondrian
except AttributeError:
draw_mondrian = False
# if is_mondrianforest and (not draw_mondrian):
# reset_random_seed(settings)
# np.random.shuffle(train_ids)
# NOTE: shuffle should be the first call after resetting random seed
# all experiments would NOT use the same dataset otherwise
train_ids_cumulative = np.arange(0)
n_points_per_minibatch = data['n_train'] / n_minibatches
assert n_points_per_minibatch > 0
idx_base = np.arange(n_points_per_minibatch)
for idx_minibatch in range(n_minibatches):
is_last_minibatch = (idx_minibatch == n_minibatches - 1)
idx_tmp = idx_base + idx_minibatch * n_points_per_minibatch
if is_last_minibatch:
# including the last (data[n_train'] % settings.n_minibatches) indices along with indices in idx_tmp
idx_tmp = np.arange(idx_minibatch * n_points_per_minibatch, data['n_train'])
idx_tmp = list(map(int, idx_tmp))
train_ids_current = train_ids[idx_tmp]
# print idx_minibatch, train_ids_current
data['train_ids_partition']['current'][idx_minibatch] = train_ids_current
train_ids_cumulative = np.append(train_ids_cumulative, train_ids_current)
data['train_ids_partition']['cumulative'][idx_minibatch] = train_ids_cumulative
return data
class Settings:
def __init__(self, **entries):
self.__dict__.update(entries)
class MondrianForest:
def __init__(
self,
future_scope=3,
dimension=10,
later_values=None
):
self.future_scope = future_scope
self.dimension = dimension
self.settings = Settings(**{
"bagging": 0,
"budget": -1,
"budget_to_use": float('inf'),
"data_path": "../../process_data/",
"dataset": "msg-4dim",
"debug": 0,
"discount_factor": 10,
"draw_mondrian": 0,
"init_id": 1,
"min_samples_split": 2,
"batch_size": 128,
"n_minibatches": 10,
"n_mondrians": 10,
"name_metric": "mse",
"normalize_features": 0,
"op_dir": "results",
"optype": "real",
"perf_dataset_keys": ["train", "test"],
"perf_metrics_keys": ["log_prob", "mse"],
"perf_store_keys": ["pred_mean", "pred_prob"],
"save": 0,
"select_features": 0,
"smooth_hierarchically": 1,
"store_every": 0,
"tag": "",
"verbose": 1,
})
if later_values is not None:
self.laterX = later_values['X']
self.laterF = later_values['f']
else:
self.laterX = None
self.laterF = None
def fit(self, price_indices):
self.models = {}
self.aux_data = {}
for capital_name in price_indices:
trainPortion = 1
curr_prices = price_indices[capital_name]
seq_price = price_indices[capital_name].copy()
seq_price[1:] = (seq_price[1:] - seq_price[:-1]) / seq_price[:-1]
seq_price[0] = 0
X = rolling_window(seq_price, self.dimension)
padding = np.ones((self.dimension-1, self.dimension))
X = np.vstack((padding, X))
cum_sum = np.cumsum(curr_prices)
moving_avr = curr_prices.copy()
moving_avr[:-self.future_scope] = (cum_sum[self.future_scope:] - cum_sum[:-self.future_scope]) / self.future_scope
f = np.zeros((curr_prices.shape[0],))
f[:] = (moving_avr - curr_prices) / curr_prices
##### if update is expected
if self.laterX is not None:
trainSize = price_indices[capital_name].shape[0]
testSize = self.laterX[capital_name].shape[0]
trainPortion = trainSize / (trainSize + testSize)
all_daily_changes = (self.laterX[capital_name][:,1:] - self.laterX[capital_name][:,:-1]) / self.laterX[capital_name][:,:-1]
X = np.vstack([X, all_daily_changes])
f = np.concatenate([f, self.laterF[capital_name]])
self.settings.n_minibatches = math.ceil(f.shape[0] / self.settings.batch_size)
data = prepareData(X, f, self.settings)
param, cache = precompute_minimal(data, self.settings)
self.aux_data[capital_name] = {'param': param, 'cache': cache, 'untrainedBatch': self.settings.n_minibatches, 'data': data}
self.models[capital_name] = MondrianForest_Main(self.settings, data)
for idx_minibatch in range(self.settings.n_minibatches):
if idx_minibatch/self.settings.n_minibatches > trainPortion:
self.aux_data[capital_name]['untrainedBatch'] = idx_minibatch
break
if idx_minibatch % 10 == 0:
print('========== %d/%d minibaches =========='%(idx_minibatch, self.settings.n_minibatches))
train_ids_current_minibatch = data['train_ids_partition']['current'][idx_minibatch]
if idx_minibatch == 0:
self.models[capital_name].fit(data, train_ids_current_minibatch, self.settings, param, cache)
else:
self.models[capital_name].partial_fit(data, train_ids_current_minibatch, self.settings, param, cache)
def predict(self, recent_prices, update=True, true_values=None, loss_functions=None):
if true_values is None and update:
raise Exception('True values must be provided if update parameter is set to true')
if loss_functions is None:
loss_functions = {'MSE': lambda truth, estimate, _prices: np.sqrt(np.mean((truth-estimate)**2))}
loss_results = {}
result = {}
ZERO_ARR = np.array([0])
weights_prediction = np.ones(self.settings.n_mondrians) * 1.0 / self.settings.n_mondrians
for capital_name in recent_prices:
result[capital_name] = np.array([])
row_number = 0
if recent_prices[capital_name].shape[1] != self.dimension+1:
raise Exception('The matrix to be predicted must be of the shape (*, dimension+1)')
all_daily_changes = (recent_prices[capital_name][:,1:] - recent_prices[capital_name][:,:-1]) / recent_prices[capital_name][:,:-1]
for row in recent_prices[capital_name]:
daily_changes = all_daily_changes[row_number]
change_ratio = self.models[capital_name].evaluate_predictions({}, daily_changes.reshape((1,-1)), ZERO_ARR, self.settings, self.aux_data[capital_name]['param'], weights_prediction, False)['pred_mean'][0]
change_ratio = self.models[capital_name].evaluate_predictions({}, daily_changes.reshape((1,-1)), np.array([true_values[capital_name][row_number]]), self.settings, self.aux_data[capital_name]['param'], weights_prediction, False)['pred_mean'][0]
res = (change_ratio + 1) * row[-1]
result[capital_name] = np.concatenate((result[capital_name], [res]))
if update and ((row_number + 1) % self.settings.batch_size == 0):
idx_minibatch = self.aux_data[capital_name]['untrainedBatch']
try:
train_ids_current_minibatch = self.aux_data[capital_name]['data']['train_ids_partition']['current'][idx_minibatch]
self.models[capital_name].partial_fit(self.aux_data[capital_name]['data'], train_ids_current_minibatch, self.settings, self.aux_data[capital_name]['param'], self.aux_data[capital_name]['cache'])
self.aux_data[capital_name]['untrainedBatch'] += 1
except:
# idx is not aligned
pass
row_number += 1
if true_values is not None:
loss_results[capital_name] = {}
for loss_name in loss_functions:
loss_results[capital_name][loss_name] = loss_functions[loss_name](true_values[capital_name], result[capital_name], recent_prices[capital_name])
return result, loss_results
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,433
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/src/GBL.py
|
import numpy as np
from .utils import rolling_window
# parameters (except epsilon) offered by https://link.springer.com/article/10.1007/s00521-016-2314-8
class GBLM:
def __init__(
self,
dimension=10,
epsilon=5e-3,
forgetting_rate=.59,
p_learning_rate=.008,
s_learning_rate=.001,
decay_rate=.25,
oe_penalty=-1.5,
ue_penalty=-1.5,
reward=1,
epochs=1
):
self.dimension = dimension
self.epsilon = epsilon
self.forgetting_rate = forgetting_rate
self.p_learning_rate = p_learning_rate
self.s_learning_rate = s_learning_rate
self.decay_rate = decay_rate
self.oe_penalty = oe_penalty
self.ue_penalty = ue_penalty
self.reward = reward
self.epochs = epochs
def fit(self, price_indices):
self.models = {}
initial_weight = np.random.uniform(-1, 1, self.dimension)
initial_weight /= np.sum(initial_weight)
for capital_name in price_indices:
self.models[capital_name] = {'w': initial_weight, 'rlF': 0, 'rlR': 0}
seq_price = price_indices[capital_name].copy()
seq_price[1:] = (seq_price[1:] - seq_price[:-1]) / seq_price[:-1]
seq_price[0] = 0
X = rolling_window(seq_price, self.dimension)
f = seq_price[self.dimension:]
for _ in range(self.epochs):
for x, y in zip(X, f):
w = self.models[capital_name]['w']
yHat = np.dot(w, x)
self.models[capital_name]['rlF'] = self.models[capital_name]['rlF']*self.decay_rate + yHat
diff = yHat - y
reward = self.oe_penalty if diff > self.epsilon else self.ue_penalty if diff < -self.epsilon else self.reward
self.models[capital_name]['rlR'] = self.models[capital_name]['rlR']*self.decay_rate + reward
self.models[capital_name]['w'] = self.forgetting_rate*w + self.p_learning_rate*x*self.models[capital_name]['rlF']*self.models[capital_name]['rlR']
self.models[capital_name]['w'] = self.models[capital_name]['w'] / np.sum(self.models[capital_name]['w'])
def predict(self, recent_prices, update=True, true_values=None, loss_functions=None):
if true_values is None and update:
raise Exception('True values must be provided if update parameter is set to true')
if loss_functions is None:
loss_functions = {'MSE': lambda truth, estimate, _prices: np.sqrt(np.mean((truth-estimate)**2))}
loss_results = {}
result = {}
for capital_name in recent_prices:
result[capital_name] = np.array([])
row_number = 0
if recent_prices[capital_name].shape[1] != self.dimension+1:
raise Exception('The matrix to be predicted must be of the shape (*, dimension+1)')
all_daily_changes = (recent_prices[capital_name][:,1:] - recent_prices[capital_name][:,:-1]) / recent_prices[capital_name][:,:-1]
for row in recent_prices[capital_name]:
w = self.models[capital_name]['w']
daily_changes = all_daily_changes[row_number]
change_ratio = np.dot(w, daily_changes)
res = (change_ratio + 1) * row[-1]
result[capital_name] = np.concatenate((result[capital_name], [res]))
if update:
newF = (true_values[capital_name][row_number] - row[-1]) / row[-1]
self.models[capital_name]['rlF'] = self.models[capital_name]['rlF']*self.decay_rate + change_ratio
diff = change_ratio - newF
reward = self.oe_penalty if diff > self.epsilon else self.ue_penalty if diff < -self.epsilon else self.reward
self.models[capital_name]['rlR'] = self.models[capital_name]['rlR']*self.decay_rate + reward
self.models[capital_name]['w'] = self.forgetting_rate*w + self.s_learning_rate*daily_changes*self.models[capital_name]['rlF']*self.models[capital_name]['rlR']
self.models[capital_name]['w'] = self.models[capital_name]['w'] / np.sum(self.models[capital_name]['w'])
row_number += 1
if true_values is not None:
loss_results[capital_name] = {}
for loss_name in loss_functions:
loss_results[capital_name][loss_name] = loss_functions[loss_name](true_values[capital_name], result[capital_name], recent_prices[capital_name])
return result, loss_results
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,434
|
rasakereh/time-series-predictors
|
refs/heads/master
|
/data/RTcryptos/AAA_prep.py
|
import pandas as pd
from dfply import X, arrange, mutate, drop
from os import listdir
from os.path import isfile
csvFiles = [f for f in listdir('.') if isfile(f) and f[-4:] == '.csv']
header = 'id|Price|volume|timestamp|buy\n'
def loadPriceIndex(dataFile):
original = open(dataFile, 'r')
data = header + original.read()
original.close()
modified = open(dataFile, 'w')
modified.write(data)
modified.close()
priceIndex = pd.read_csv(dataFile, sep='|')
# priceIndex['Date'] = pd.to_datetime(priceIndex['Date'])
priceIndex = priceIndex >> mutate(Date = priceIndex.timestamp) >> drop(X.timestamp) >> arrange(X.Date)
return priceIndex
for csvFile in csvFiles:
print(csvFile, end=':\t\t')
try:
pIndex = loadPriceIndex(csvFile)
pIndex.to_csv(csvFile)
print('Done')
except:
print('Failed')
|
{"/src/MLShepard.py": ["/src/utils.py"], "/master.py": ["/src/utils.py", "/src/GBL.py", "/src/MLShepard.py", "/src/MondrianForest.py", "/src/OARIMA.py", "/src/OSVR.py", "/src/RandConLSTM.py", "/src/WHLR.py"], "/src/OSVR.py": ["/src/utils.py"], "/src/WHLR.py": ["/src/utils.py"], "/src/OARIMA.py": ["/src/utils.py"], "/src/RandConLSTM.py": ["/src/utils.py"], "/src/MondrianForest.py": ["/src/utils.py"], "/src/GBL.py": ["/src/utils.py"]}
|
16,611
|
Archieyoung/SVAN
|
refs/heads/master
|
/range_compare.py
|
"""
Author: ArchieYoung <yangqi2@grandomics.com>
Time: Thu Jul 5 09:24:07 CST 2018
"""
import os
import subprocess
from bed_intersect import intersect_f
from table_check import TableCheck
def RangeCompare(bedtools, bedA, bedB, min_overlap, tmp_dir, prefix,db_id):
# result sv dict, key: query sv id, value: db fields
intersect_result = {}
# calculate query and database fields number
query_field_num = TableCheck(bedA)
# db_field_num = table_check(bedB)
range_intersect_tmp_bed = os.path.join(tmp_dir
,prefix+"."+db_id+".intersect.tmp.bed")
if min_overlap > 0:
with open(range_intersect_tmp_bed,"w") as io:
subprocess.run([bedtools,"intersect","-a",bedA,"-b",bedB,"-wo",
"-f",str(min_overlap),"-r"],stdout=io)
else:
with open(range_intersect_tmp_bed,"w") as io:
subprocess.run([bedtools,"intersect","-a",bedA,"-b",bedB,"-wo"],
stdout=io)
# read intersect file
# bedA query bed
# chrom start end svtype svid svlen re info;
# svid is a unique identifier for the sv
with open(range_intersect_tmp_bed,"r") as io:
lines = io.readlines()
for line in lines:
fields = line.strip().split("\t")
query_fields = fields[:query_field_num]
db_fields = fields[query_field_num:-1]
intersect_field = intersect_f(fields, query_field_num)
_query_svtype = intersect_field.query_svtype
_db_svtype = intersect_field.db_svtype
_query_svid = intersect_field.query_svid
# DEL match DEL or CNV
if (_query_svtype == "DEL" and (_db_svtype == "DEL" or
_db_svtype == "CNV")):
intersect_result.setdefault(_query_svid, []).append(db_fields)
# DUP match DUP or CNV
if (_query_svtype == "DUP" and (_db_svtype == "DUP" or
_db_svtype == "CNV")):
intersect_result.setdefault(_query_svid, []).append(db_fields)
if _query_svtype == "INV" and _db_svtype == "INV":
intersect_result.setdefault(_query_svid, []).append(db_fields)
# WILD database SV type matchs any query SV type
if _db_svtype == "WILD":
intersect_result.setdefault(_query_svid, []).append(db_fields)
return intersect_result
|
{"/range_compare.py": ["/bed_intersect.py", "/table_check.py"], "/svan.py": ["/sv_vcf.py", "/pubdb_prepare.py", "/range_compare.py", "/insertion_compare.py", "/traslocation_compare.py"], "/pubdb_prepare.py": ["/sv_vcf.py"], "/traslocation_compare.py": ["/bed_intersect.py", "/table_check.py"], "/insertion_compare.py": ["/bed_intersect.py", "/table_check.py"], "/local_database_prepare.py": ["/sv_vcf.py"]}
|
16,612
|
Archieyoung/SVAN
|
refs/heads/master
|
/format_result.py
|
"""
Author: ArchieYoung <yangqi2@grandomics.com>
Time: Thu Jul 5 09:24:07 CST 2018
"""
"""
db fields "chrom","start","end","svtype","annot1","annot2","annot3"...
format: "annot1;annot1","annot2,annot2";"annot3;annot3","chrom:start-end,svtype;..."
if multiple result is founded in database, write all feature in one column,
and seperate them by semicolon
"""
def format_result_pub_db(result):
for i in result:
variations = []
#init annotations
annotations = [[] for p in range(len(result[i][0])-4)]
for j in result[i]:
variations.append("{}:{}-{},{}".format(j[0],j[1],j[2],j[3]))
for k in range(len(annotations)):
annotations[k].append(j[k+4])
variations_str = ";".join(variations)
annotations_str = [";".join(l) for l in annotations]
result[i] = annotations_str+[variations_str]
return result
|
{"/range_compare.py": ["/bed_intersect.py", "/table_check.py"], "/svan.py": ["/sv_vcf.py", "/pubdb_prepare.py", "/range_compare.py", "/insertion_compare.py", "/traslocation_compare.py"], "/pubdb_prepare.py": ["/sv_vcf.py"], "/traslocation_compare.py": ["/bed_intersect.py", "/table_check.py"], "/insertion_compare.py": ["/bed_intersect.py", "/table_check.py"], "/local_database_prepare.py": ["/sv_vcf.py"]}
|
16,613
|
Archieyoung/SVAN
|
refs/heads/master
|
/table_check.py
|
"""
Author: ArchieYoung <yangqi2@grandomics.com>
Time: Thu Jul 5 09:24:07 CST 2018
"""
def TableCheck(table):
"""
check if the table have consistent field number of each row
return field number of the table
"""
with open(table,"r") as io:
# skip comment lines
check_momment_line = io.readline()
while check_momment_line[0] == "#":
# comment lines start with "#"
check_momment_line = io.readline()
pass
first_record_line = check_momment_line # first record line
first_record_field_num = len(first_record_line.split("\t"))
lines = io.readlines()
for line in lines:
field_num = len(line.split("\t"))
if field_num != first_record_field_num:
raise RuntimeError("field number not consistent: "
"first record field num is {}, but {} "
"field num is {}".format(first_record_field_num,
field_num,line))
return first_record_field_num
|
{"/range_compare.py": ["/bed_intersect.py", "/table_check.py"], "/svan.py": ["/sv_vcf.py", "/pubdb_prepare.py", "/range_compare.py", "/insertion_compare.py", "/traslocation_compare.py"], "/pubdb_prepare.py": ["/sv_vcf.py"], "/traslocation_compare.py": ["/bed_intersect.py", "/table_check.py"], "/insertion_compare.py": ["/bed_intersect.py", "/table_check.py"], "/local_database_prepare.py": ["/sv_vcf.py"]}
|
16,614
|
Archieyoung/SVAN
|
refs/heads/master
|
/svan.py
|
#!/usr/bin/env python3
"""
author: archieyoung<yangqi2@grandomics.com>
SVAN: A Struture Variation Annotation Tool
"""
import sys
import os
import subprocess
import operator
import argparse
import logging
from multiprocessing import Pool
from glob import iglob
import sv_vcf
import pubdb_prepare
from range_compare import RangeCompare
from insertion_compare import InsCompare
from traslocation_compare import TraCompare
def queryPrepare_vcf(tmp, prefix, vcf_in, bed_out):
"""
Convert VCF to BED
Remove query chrom "chr" prefix if it exists, because database SV
chrom do not have "chr" prefix.
Sort bed file by "chrom" and "start", the same as "sort -k1,1 -k2,2n"
"""
bed_tmp1 = os.path.join(tmp, prefix+".query.bed.tmp1")
# vcf to bed
sv_vcf.vcf_to_bed(vcf_in, bed_tmp1)
# sort bed
bed_list = []
query_list = []
with open(bed_tmp1, "r") as io:
for line in io:
line = line.strip()
fields = line.split("\t")
query_list.append(line)
# Remove query chrom "chr" prefix if it exists
fields[0] = fields[0].replace("chr", "")
fields[1] = int(fields[1])
bed_list.append(fields)
bed_list.sort(key = operator.itemgetter(0, 1))
with open(bed_out, "w") as io:
for i in bed_list:
i[1] = str(i[1])
io.write("\t".join(i)+"\n")
return query_list
def queryPrepare_bed(tmp, prefix, bed_in, bed_out):
"""
Remove query chrom "chr" prefix if it exists, because database SV
chrom do not have "chr" prefix.
Sort bed file by "chrom" and "start", the same as "sort -k1,1 -k2,2n"
"""
# sort bed
bed_list = []
query_list = []
with open(bed_in, "r") as io:
for line in io:
line = line.strip()
fields = line.split("\t")
query_list.append(line)
# Remove query chrom "chr" prefix if it exists
fields[0] = fields[0].replace("chr","")
fields[1] = int(fields[1])
bed_list.append(fields)
bed_list.sort(key = operator.itemgetter(0, 1))
with open(bed_out, "w") as io:
for i in bed_list:
i[1] = str(i[1])
io.write("\t".join(i)+"\n")
return query_list
# db fields "chrom","start","end","svtype","svid","annot1","annot2","annot3"...
# format: "annot1;annot1","annot2,annot2";"annot3;annot3","chrom:start-end,svtype;..."
# if multiple result is founded in database, write each feature in one column,
# and seperate by semicolon
def format_pub_result(result, field_names):
for i in result:
variations = []
# init annotations
annotations = [[] for p in range(len(result[i][0])-5)]
if (len(field_names)-1) != len(annotations):
# print(field_names)
# print(result[i][0][5:])
raise RuntimeError("fields number error.")
for j in result[i]:
variations.append("{}:{}-{},{}".format(j[0],j[1],j[2],j[3]))
for k in range(len(annotations)):
annotations[k].append(j[k+5])
variations_str = ";".join(variations)
annotations_str = [";".join(l) for l in annotations]
new_fields = [variations_str] + annotations_str
result[i] = dict() # change the value inplace
for m in range(len(field_names)):
result[i][field_names[m]] = new_fields[m]
# return result
def format_local_result(result, local_db_size):
for i in result:
variations = []
for j in result[i]:
# variations.append("{}:{}-{},{}".format(j[0],j[1],j[2],j[3]))
variations.append(j[4])
variations_str = ";".join(variations)
frequency = len(variations)/local_db_size
if frequency > 1:
frequency = 1 # set max frequency to 1
frequency = "{:4.2f}".format(frequency)
result[i] = {"GrandSV_Frequency":frequency,
"GrandSV_Variant":variations_str}
# return result
def update_result(updated_result, intersect):
for i in intersect:
if i not in updated_result:
updated_result[i] = intersect[i]
else:
updated_result[i].update(intersect[i])
def get_sv_by_type(bed_in, bed_out, svtype):
result_list = []
with open(bed_in, "r") as io:
for line in io:
fields = line.strip().split("\t")
if fields[3] in svtype:
result_list.append(line)
with open(bed_out, "w") as io:
io.writelines(result_list)
def local_sv_annot(_args):
(_bedtools, _bed_query, _bed_query_ins, _bed_query_tra, _db_bed,
_min_overlap, _max_dist, _tmp_dir, _prefix, _id) = _args
_intersect = dict()
# range compare
range_intersect = RangeCompare(_bedtools, _bed_query, _db_bed,
_min_overlap, _tmp_dir, _prefix, "local{}".format(_id))
# insertion compare
ins_intersect = InsCompare(_bedtools, _bed_query_ins, _db_bed, _max_dist,
_tmp_dir, _prefix, "local{}".format(_id))
# tra compare
tra_intersect = TraCompare(_bedtools, _bed_query_tra, _db_bed, _max_dist,
_tmp_dir, _prefix, "local{}".format(_id))
# update intersect result
_intersect.update(range_intersect)
_intersect.update(ins_intersect)
_intersect.update(tra_intersect)
return _intersect
def table_maker(query_list, result, result_field_names):
table = []
query_field_names = ["Chr", "Start", "End", "SVTYPE", "SVID",
"SVLEN", "RE", "INFO"]
header = query_field_names + result_field_names
for i in query_list:
fields = i.split("\t")
record_dict = dict([p for p in zip(query_field_names, fields)])
svid = record_dict["SVID"]
if svid in result:
record_dict.update(result[svid])
for j in result_field_names:
if j not in record_dict:
record_dict[j] = "NA"
else:
for j in result_field_names:
if j not in record_dict:
record_dict[j] = "NA"
# print(record_dict)
record = "\t".join([record_dict[k] for k in header]) + "\n"
table.append(record)
header_str = "\t".join(header) + "\n"
return header_str, table
def get_args():
parser = argparse.ArgumentParser(
description="SVAN: Structure variation annotation",
usage="usage: %(prog)s [options]")
parser.add_argument("--vcf",
help="vcf file [default %(default)s]", metavar="FILE")
parser.add_argument("--bed",
help="bed file [default %(default)s]", metavar="FILE")
parser.add_argument("--pub_dbdir",
help="public SV database directory [default %(default)s]", metavar="DIR")
parser.add_argument("--local_dbdir",
help="local SV database directory [default %(default)s]", metavar="DIR")
parser.add_argument("--min_overlap",
help="minimum reciprocal overlap fraction for \"Range\" SV A and"
" \"Rang\" SV B [default %(default)s]", type=float,
default=0.5, metavar="FLOAT")
parser.add_argument("--max_dist",
help="maximum distance for \"Point\" SV A and"
" \"Point\" SV B [default %(default)s]", type=int,
default=1000, metavar="FLOAT")
parser.add_argument("--threads", default=4,
help="number of threads [default %(default)s]", type=int, metavar="STR")
parser.add_argument("--prefix",
help="output file name [default %(default)s]", metavar="STR")
parser.add_argument("--outdir",
help="output directory [default %(default)s]", metavar="DIR")
parser.add_argument("--tmp",
help="temporary directory [default %(default)s]",
default="tmp", metavar="DIR")
if len(sys.argv) <= 1:
parser.print_help()
exit()
return parser.parse_args()
def main():
args = get_args()
# make dirs
if not os.path.exists(args.outdir):
os.mkdir(args.outdir)
if not os.path.exists(args.tmp):
os.mkdir(args.tmp)
# prepare query SV
bed_query = os.path.join(args.tmp, args.prefix+".query.bed")
if args.vcf: # vcf query
query_list = queryPrepare_vcf(args.tmp, args.prefix,
args.vcf, bed_query)
elif args.bed: # bed query
query_list = queryPrepare_bed(args.tmp, args.prefix,
args.bed, bed_query)
else:
raise RuntimeError("Must provide either a bed or vcf a file!!!")
# prepare database
one_thousand_sv_raw = os.path.join(args.pub_dbdir,
"ALL.wgs.integrated_sv_map_v2.20130502.svs.genotypes.1000genome.vcf.gz")
dgv_raw = os.path.join(args.pub_dbdir,
"DGV.GS.March2016.50percent.GainLossSep.Final.hg19.gff3")
dbVar_raw = os.path.join(args.pub_dbdir,
"nstd37.GRCh37.variant_call.vcf.gz")
decipher_hi_raw = os.path.join(args.pub_dbdir,
"decipher_HI_Predictions_Version3.bed.gz")
for i in [one_thousand_sv_raw,dgv_raw,dbVar_raw,decipher_hi_raw]:
if not os.path.exists(i):
raise RuntimeError("Error: {} was not founded!".format(i))
one_thousand_sv_db = os.path.join(args.tmp,
"ALL.wgs.integrated_sv_map_v2.20130502.svs.genotypes.1000genome.db.bed")
dgv_db = os.path.join(args.tmp,
"DGV.GS.March2016.50percent.GainLossSep.Final.hg19.db.bed")
dbVar_db = os.path.join(args.tmp,
"nstd37.GRCh37.variant_call.db.bed")
decipher_hi_db = os.path.join(args.tmp,
"decipher_HI_Predictions_Version3.db.bed")
pubdb_prepare.one_thousand_sv.print_bed(one_thousand_sv_raw, one_thousand_sv_db)
pubdb_prepare.dgv_gold_cnv.print_bed(dgv_raw, dgv_db)
pubdb_prepare.dbVar_nstd37_sv.print_bed(dbVar_raw, dbVar_db)
pubdb_prepare.decipher_HI.print_bed(decipher_hi_raw, decipher_hi_db)
bedtools = "bedtools" # temp use
# annotation field names
one_thousand_field_names = ["1000g_SV", "1000g_subtype", "1000g_AF",
"1000g_EAS_AF", "1000g_EUR_AF","1000g_AFR_AF","1000g_AMR_AF",
"1000g_SAS_AF"]
dgv_field_names = ["dgv_SV", "dgv_AF","dgv_sample_size"]
dbVar_field_names = ["dbVar_SV", "dbVar_CLNSIG","dbVar_PHENO"]
decipher_hi_field_names = ["Decipher_region", "Decipher_HI"]
# public database SV annotate, 1000g, dgv, dbVar, decipher_hi
# range annotate
# 1000g
one_thousand_range_intersect = RangeCompare(bedtools, bed_query,
one_thousand_sv_db, args.min_overlap,
args.tmp, args.prefix, "1000genome")
format_pub_result(one_thousand_range_intersect, one_thousand_field_names)
# dgv
dgv_range_intersect = RangeCompare(bedtools, bed_query,
dgv_db, args.min_overlap, args.tmp, args.prefix, "dgv")
format_pub_result(dgv_range_intersect, dgv_field_names)
# dbVar
dbVar_range_intersect = RangeCompare(bedtools, bed_query,
dbVar_db, args.min_overlap, args.tmp, args.prefix, "dbVar")
format_pub_result(dbVar_range_intersect, dbVar_field_names)
# decipher_hi
decipher_hi_range_intersect = RangeCompare(bedtools, bed_query,
decipher_hi_db, 0, args.tmp, args.prefix,
"decipher_hi")
format_pub_result(decipher_hi_range_intersect, decipher_hi_field_names)
# insertion annotate, 1000g and decipher_hi
# get insertion
bed_query_ins = os.path.join(args.tmp, args.prefix + ".query.ins.bed")
get_sv_by_type(bed_query, bed_query_ins, ["INS"])
# 1000g
one_thousand_ins_intersect = InsCompare(bedtools, bed_query_ins,
one_thousand_sv_db, args.max_dist,
args.tmp, args.prefix, "1000genome")
format_pub_result(one_thousand_ins_intersect, one_thousand_field_names)
# decipher hi
decipher_hi_ins_intersect = InsCompare(bedtools, bed_query_ins,
decipher_hi_db, args.max_dist, args.tmp, args.prefix,
"decipher_hi")
format_pub_result(decipher_hi_ins_intersect, decipher_hi_field_names)
# translocation annotate, decipher hi
# get translocation
bed_query_tra = os.path.join(args.tmp, args.prefix + ".query.tra.bed")
get_sv_by_type(bed_query, bed_query_tra, ["TRA"])
# decipher hi
decipher_hi_tra_intersect = TraCompare(bedtools, bed_query_tra,
decipher_hi_db, args.max_dist, args.tmp, args.prefix,
"decipher_hi")
format_pub_result(decipher_hi_tra_intersect, decipher_hi_field_names)
# merge public result
public_results = dict()
public_intersect_list = [one_thousand_range_intersect,
dgv_range_intersect,
dbVar_range_intersect,
decipher_hi_range_intersect,
one_thousand_ins_intersect,
decipher_hi_ins_intersect,
decipher_hi_tra_intersect
]
for i in public_intersect_list:
update_result(public_results, i)
# local SV database annotate
# local SV beds
local_db_beds = iglob(os.path.join(args.local_dbdir, "*.sv.database.bed"))
pool = Pool(processes = args.threads)
intersect_work_list = []
n = 1 # used for number the temp files and calculate local database size
for local_db_bed in local_db_beds:
local_annot_args = ([bedtools, bed_query, bed_query_ins, bed_query_tra,
local_db_bed, args.min_overlap, args.max_dist, args.tmp, args.prefix, n],)
intersect_work_list.append(pool.apply_async(local_sv_annot,
local_annot_args))
n += 1
pool.close()
pool.join()
# merge intersect results
local_intersect_list = []
for i in intersect_work_list:
intersect = i.get()
local_intersect_list.append(intersect)
local_intersect_merged = dict()
for i in local_intersect_list:
for k in i:
if k not in local_intersect_merged:
local_intersect_merged[k] = i[k]
else:
local_intersect_merged[k].extend(i[k])
format_local_result(local_intersect_merged, n)
# OMG, got the END...
annot_result = public_results
update_result(annot_result, local_intersect_merged)
# for k in annot_result:
# print(k, annot_result[k])
# make table
# fields
result_field_names = (["GrandSV_Frequency", "GrandSV_Variant"] +
one_thousand_field_names +
dgv_field_names +
dbVar_field_names +
decipher_hi_field_names)
header_str, table = table_maker(query_list, annot_result, result_field_names)
prefix_with_dir = os.path.join(args.outdir, args.prefix)
with open(prefix_with_dir + ".svan.tsv", "w") as io:
io.write(header_str)
io.writelines(table)
if __name__ == "__main__":
main()
|
{"/range_compare.py": ["/bed_intersect.py", "/table_check.py"], "/svan.py": ["/sv_vcf.py", "/pubdb_prepare.py", "/range_compare.py", "/insertion_compare.py", "/traslocation_compare.py"], "/pubdb_prepare.py": ["/sv_vcf.py"], "/traslocation_compare.py": ["/bed_intersect.py", "/table_check.py"], "/insertion_compare.py": ["/bed_intersect.py", "/table_check.py"], "/local_database_prepare.py": ["/sv_vcf.py"]}
|
16,615
|
Archieyoung/SVAN
|
refs/heads/master
|
/bed_intersect.py
|
"""
Author: ArchieYoung <yangqi2@grandomics.com>
Time: Thu Jul 5 09:24:07 CST 2018
"""
class intersect_f(object):
def __init__(self, fields, query_field_num):
(self.query_chrom, self.query_start, self.query_end, self.query_svtype,
self.query_svid) = fields[:5]
(self.db_chrom, self.db_start, self.db_end, self.db_svtype,
self.db_svid) = fields[query_field_num:query_field_num+5]
|
{"/range_compare.py": ["/bed_intersect.py", "/table_check.py"], "/svan.py": ["/sv_vcf.py", "/pubdb_prepare.py", "/range_compare.py", "/insertion_compare.py", "/traslocation_compare.py"], "/pubdb_prepare.py": ["/sv_vcf.py"], "/traslocation_compare.py": ["/bed_intersect.py", "/table_check.py"], "/insertion_compare.py": ["/bed_intersect.py", "/table_check.py"], "/local_database_prepare.py": ["/sv_vcf.py"]}
|
16,616
|
Archieyoung/SVAN
|
refs/heads/master
|
/pubdb_prepare.py
|
#!/usr/bin/env python3
"""
prepare SV database for annotation
convert 1000genome, DGV, dbVar SV files into bed files
"""
import sys
import gzip
import logging
import operator
import os
from glob import iglob
from datetime import date
from sv_vcf import SV
# 1000genome
class one_thousand_sv(object):
def __init__(self,record):
# 1000genome vcf file parse
self.record = record
fields = record.strip().split("\t")
(self.chrom,self.pos1,self.id,self.ref,self.alt,self.qual,self.filter,
self.info,self.format) = fields[:9]
self.samples = fields[9:]
# info dict
self.info_dict = {}
info_list = self.info.split(";")
for i in info_list:
if "=" in i:
info_id,info_value = i.split("=")
self.info_dict[info_id] = info_value
else:
self.info_dict[i] = i
# end
if "END" in self.info_dict:
self.pos2 = self.info_dict["END"]
else:
# if can not find end in info, end = start(eg. insertion)
self.pos2 = self.pos1
# SVLEN
if "SVLEN" in self.info_dict:
self.svlen = self.info_dict["SVLEN"]
else:
self.svlen = "NA"
# SVTYPE
self.sub_svtype = self.info_dict["SVTYPE"]
if self.sub_svtype in ["SVA","LINE1","ALU","INS"]:
self.svtype = "INS"
elif self.sub_svtype in ["DEL","DEL_ALU","DEL_HERV","DEL_LINE1",
"DEL_SVA"]:
self.svtype = "DEL"
else:
self.svtype = self.sub_svtype
# allele frequency
# multi-alleles(CNVs,0,1,2...) frequency is not considered here,
# treated as bi-alleles(0,1) frequency
af_populations = ["AF","EAS_AF","EUR_AF","AFR_AF","AMR_AF","SAS_AF"]
self.AFs = [self._get_af(i) for i in af_populations]
def _get_af(self,af_population):
# af_population: AF=0.00698882;EAS_AF=0.0069;EUR_AF=0.0189;
# AFR_AF=0.0;AMR_AF=0.0072;SAS_AF=0.0041;
try:
af = sum([float(i) for i in self.info_dict[af_population].split(
",")])
af = "{:.6}".format(af)
except:
af = "NA"
logging.warning('Can not find "{}" in INFO of record: {}'.format(
af_population,self.record))
return af
@classmethod
def print_bed(cls,vcf_gz,out_name):
bed_list = []
with gzip.open(vcf_gz,"r") as io:
n = 0
for line in io:
line = line.decode("utf-8")
if line[0] == "#":
continue
db_svid = "1000genome{}".format(n) # make 1000genome SV id
n += 1
sv = one_thousand_sv(line)
sv.pos1 = int(sv.pos1)
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.sub_svtype]+sv.AFs
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
class dgv_gold_cnv(object):
# dgv gff3 file parse
def __init__(self,record):
self.record = record
fields = record.strip().split("\t")
# remove "chr" prefix in chrom if it exists
self.chrom = fields[0].replace("chr","")
self.pos1 = fields[3]
self.pos2 = fields[4]
self.info_dict = {}
for i in fields[-1].split(";"):
if "=" in i:
info_id,info_value = i.split("=")
self.info_dict[info_id] = info_value
else:
self.info_dict[i] = i
if self.info_dict["variant_sub_type"] == "Gain":
self.svtype = "DUP"
elif self.info_dict["variant_sub_type"] == "Loss":
self.svtype = "DEL"
else:
raise RuntimeError('variant_sub_type can either be "Gain" or "Loss"')
self.af = self.info_dict["Frequency"]
self.af = str(float(self.af.replace("%",""))*0.01)
self.sample_size = self.info_dict["num_unique_samples_tested"]
@classmethod
def print_bed(cls,gff3,out_name):
bed_list = []
with open(gff3,"r") as io:
n = 0
for line in io:
if line[0] == "#":
continue
sv = dgv_gold_cnv(line)
db_svid = "dgv{}".format(n)
n += 1
sv.pos1 = int(sv.pos1)
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.af, sv.sample_size]
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
class dbVar_nstd37_sv(object):
# dbvar vcf file parse
def __init__(self,record):
self.record = record
fields = record.strip().split("\t")
(self.chrom,self.pos1,self.id,self.ref,self.alt,self.qual,self.filter,
self.info) = fields[:8]
# info dict
self.info_dict = {}
info_list = self.info.split(";")
for i in info_list:
if "=" in i:
info_id,info_value = i.split("=")
self.info_dict[info_id] = info_value
else:
self.info_dict[i] = i
self.pos2 = self.info_dict["END"]
self.svtype = self.info_dict["SVTYPE"]
try:
self.clnsig = self.info_dict["CLNSIG"]
except KeyError:
self.clnsig = "NA"
try:
self.pheno = self.info_dict["PHENO"]
except KeyError:
self.pheno = "NA"
@classmethod
def print_bed(cls,vcf_gz,out_name):
bed_list = []
with gzip.open(vcf_gz,"r") as io:
n = 0
for line in io:
line = line.decode("utf-8")
if line[0] == "#":
continue
sv = dbVar_nstd37_sv(line)
db_svid = "dbvar{}".format(n)
n += 1
sv.pos1 = int(sv.pos1)
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.clnsig, sv.pheno]
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
class decipher_HI(object):
"""
Convert decipher_HI_Predictions_Version3.bed.gz to database bed
Huang N, Lee I, Marcotte EM, Hurles ME (2010) Characterising and Predicting Haploinsufficiency in the Human Genome. PLOS Genetics 6(10): e1001154.
"""
def __init__(self,record):
fields = record.strip().split("\t")
self.chrom,self.pos1,self.pos2,self.gene_hi = fields[:4]
# remove "chr"
self.chrom = self.chrom.replace("chr","")
self.svtype = "WILD" # wild means that it can match any SV type, for doing svtye-insensity annotation
@classmethod
def print_bed(cls,input_gz,out_name):
bed_list = []
with gzip.open(input_gz,"r") as io:
io.readline() # remove header
n = 0
for line in io:
line = line.decode("utf-8")
sv = decipher_HI(line)
sv.pos1 = int(sv.pos1)
db_svid = "decipherHI{}".format(n)
n += 1
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.gene_hi]
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
class cosmic_cnv(object):
"""
Convert CosmicCompleteCNA.tsv.gz(CNV) into database bed
too many records 31723168, need refine for annotation, beta!!!
"""
def __init__(self,record):
fields = record.strip().split("\t")
self.CNV_ID = fields[0]
self.Primary_site = fields[5]
self.Primary_histology = fields[9]
self.svtype = fields[-4]
if self.svtype == "gain":
self.svtype = "DUP"
if self.svtype == "loss":
self.svtype = "DEL"
sv_positions = fields[-1] # chrom:start..end
if ":" and ".." in sv_positions:
sp1 = sv_positions.split(":")
sp2 = sp1[1].split("..")
self.chrom = sp1
self.pos1 = sp2[0]
self.pos2 = sp2[1]
else:
raise RuntimeError("{} not match 'chrom:start..end'".format(
sv_positions))
@classmethod
def print_bed(cls,input_gz,out_name):
bed_list = []
cnv_ids = []
with gzip.open(input_gz,"r") as io:
io.readline() # remove header
n = 0
for line in io:
line = line.decode("utf-8")
sv = cosmic_cnv(line)
if sv.CNV_ID in cnv_ids:
continue # remove 'Duplicated' record. CosmicCNA store CNV considering gene informations which is not necessary here
else:
cnv_ids.append(sv.CNV_ID)
sv.pos1 = int(sv.pos1)
db_svid = "cosmic{}".format(n)
n += 1
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.Primary_site, sv.Primary_histology]
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
#class cosmic_sv(object):
# """
# convert cosmic CosmicStructExport.tsv.gz into database bed
# """
# def __init__(self,record):
# fileds = record.strip().split("\t")
def main():
#one_thousand_sv.print_bed(sys.argv[1],sys.argv[2])
#dgv_gold_cnv.print_bed(sys.argv[1],sys.argv[2])
#dbVar_nstd37_sv.print_bed(sys.argv[1],sys.argv[2])
#decipher_HI.print_bed(sys.argv[1],sys.argv[2])
#cosmic_cnv.print_bed(sys.argv[1],sys.argv[2])
#make_grand_sv_db(sys.argv[1], "tmp")
pass
if __name__ == "__main__":
main()
|
{"/range_compare.py": ["/bed_intersect.py", "/table_check.py"], "/svan.py": ["/sv_vcf.py", "/pubdb_prepare.py", "/range_compare.py", "/insertion_compare.py", "/traslocation_compare.py"], "/pubdb_prepare.py": ["/sv_vcf.py"], "/traslocation_compare.py": ["/bed_intersect.py", "/table_check.py"], "/insertion_compare.py": ["/bed_intersect.py", "/table_check.py"], "/local_database_prepare.py": ["/sv_vcf.py"]}
|
16,617
|
Archieyoung/SVAN
|
refs/heads/master
|
/sv_vcf.py
|
"""
A Universal Stucture Variant VCF parsing module
tested vcf: sniffles vcf, nanosv vcf, picky vcf
shared INFO ID are: SVTYPE, END, SVLEN
RE(reads evidence): sniffles, picky; nano SV: RT(2d,template,complement)
BND shared format: N[ref:pos2[
BND format:
N]chr6:25647927]
STAT REF ALT Meaning
s1 s t[p[ piece extending to the right of p is joined after t
s2 s t]p] reverse comp piece extending left of p is joined after t
s3 s ]p]t piece extending to the left of p is joined before t
s4 s [p[t reverse comp piece extending right of p is joined before t
"""
import sys
import logging
class bnd(object):
# for nano sv BND only, must be adjusted when use in other cases
def __init__(self,bnd_string):
self.bnd_string = bnd_string
# fix bnd_string, no matter what is the ref character('N','A','T','C','G','t'), replace it with 'N'
if "[" in self.bnd_string:
if self.bnd_string[0] == "[":
index_2 = self.bnd_string[1:].index("[")
self.bnd_string = self.bnd_string[:index_2+2]+"N"
else:
index_1 = self.bnd_string.index("[")
self.bnd_string = "N"+self.bnd_string[index_1:]
else:
if self.bnd_string[0] == "]":
index_2 = self.bnd_string[1:].index("]")
self.bnd_string = self.bnd_string[:index_2+2]+"N"
else:
index_1 = self.bnd_string.index("]")
self.bnd_string = "N"+self.bnd_string[index_1:]
# N[chr1:123456[, N]chr1:123456], ]chr1:123456]N, [chr1:123456[N
if self.bnd_string[:2] == "N[":
self.stat = "s1"
self.pos = self.bnd_string[2:-1]
if self.bnd_string[:2] == "N]":
self.stat = "s2"
self.pos = self.bnd_string[2:-1]
if self.bnd_string[0] == "]":
self.stat = "s3"
self.pos = self.bnd_string[1:-2]
if self.bnd_string[0] == "[":
self.stat = "s4"
self.pos = self.bnd_string[1:-2]
self.chrom = self.pos.split(":")[0]
self.pos_num = self.pos.split(":")[1]
class SV(object):
def __init__(self,record):
self.record = record
fields = record.strip().split("\t")
# checked if ID is unique for sniffles, nanosv, picky vcf
(self.chrom1,self.pos1,self.id,self.ref,self.alt,self.qual,self.filter,
self.info,self.format) = fields[:9]
self.samples = fields[9:]
# info dict
self.info_dict = {}
info_list = self.info.split(";")
for i in info_list:
if "=" in i:
info_id,info_value = i.split("=")
self.info_dict[info_id] = info_value
else:
self.info_dict[i] = i
self.svtype = self.info_dict["SVTYPE"]
# BND, svtype, pos2
if self.svtype == "BND":
bnd_pos = bnd(self.alt) # BND position
self.chrom2 = bnd_pos.chrom
self.pos2 = bnd_pos.pos_num
if (self.chrom1 == self.chrom2 and (bnd_pos.stat == "s2"
or bnd_pos.stat == "s4")): # INV
self.svtype = "INV"
elif self.chrom1 != self.chrom2:
self.svtype = "TRA"
else:
raise RuntimeError("bad line {}".format(record))
elif self.svtype == "TRA": # sniffles TRA (BND not specified)
self.chrom2 = self.info_dict["CHR2"]
self.pos2 = self.info_dict["END"]
else:
self.chrom2 = self.chrom1
self.pos2 = self.info_dict["END"]
# exchange pos1 and pos2, if pos1 > pos2
if int(self.pos1) > int(self.pos2):
tmp = self.pos1
self.pos1 = self.pos2
self.pos2 = tmp
# svlen
try:
self.svlen = self.info_dict["SVLEN"]
except KeyError:
# INS, TRA do not have SVLEN attribute
self.svlen = "NA"
# RE(number of read evidence)
if "RE" in self.info_dict: # sniffles and picky
self.re = self.info_dict["RE"]
elif "RT" in self.info_dict: # nanosv
# RT=2d,template,compementary; no matter what kind of reads they
# are, add them up
self.re = sum([int(i) for i in self.info_dict["RT"].split(",")])
self.re = str(self.re)
else:
self.re = "NA"
logging.warning("Can not get RE(support reads num) "
"for {}".format(record))
def vcf_to_bed(vcf, out_name, filter=False, min_support_reads=0,
chr_remove=False, sv_id_prefix=None):
with open(vcf,"r") as io:
lines = io.readlines()
# chromosomes to be kept
main_chr = (list(range(1,23)) + ["X", "Y", "MT", "M", "chrX", "chrY",
"chrM", "chrMT"] + ["chr"+str(i) for i in range(1, 23)])
# output bedlines
bed_lines = []
# check if id is unique
id_dict = {}
#chrom1,pos1,chrom2,pos2
previous_sv_breakpoint = ["NA","NA","NA","NA"]
for line in lines:
#skip comment lines
if line.strip()[0] == "#":
continue
sv = SV(line)
# filter
if filter:
if sv.chrom1 not in main_chr or sv.chrom2 not in main_chr:
continue
if int(sv.re) < min_support_reads:
continue
# remove 'chr' in chromosome id
if chr_remove:
sv.chrom1 = sv.chrom1.replace("chr", "")
sv.chrom2 = sv.chrom2.replace("chr", "")
# rename sv id
if sv_id_prefix:
sv.id = sv_id_prefix + sv.id
if sv.id not in id_dict:
id_dict[sv.id] = 1
else:
raise RuntimeError("Duplicated SV ID in you VCF "
"file {}".format(sv.id))
sv_breakpoint = [sv.chrom1,sv.pos1,sv.chrom2,sv.pos2]
# remove duplicate adjacency BND record in picky vcf
# Exactly the same
if ((sv.svtype == "TRA" or sv.svtype == "INV") and
sv_breakpoint[:4] == previous_sv_breakpoint[:4]):
continue
# just swap breakpoint1 and breakpoint2, still the same
if ((sv.svtype == "TRA" or sv.svtype == "INV") and
sv_breakpoint[:2] == previous_sv_breakpoint[2:] and
sv_breakpoint[2:] == previous_sv_breakpoint[:2]):
previous_sv_breakpoint = sv_breakpoint
continue
previous_sv_breakpoint = sv_breakpoint
# convert to bed format
# chrom,start,end,svtype,id,svlen,re,info
if sv.chrom1 == sv.chrom2:
if int(sv.pos1) > 1:
sv.pos1 = str(int(sv.pos1)-1)
bed_line = "\t".join([sv.chrom1,sv.pos1,sv.pos2,sv.svtype,
sv.id,sv.svlen,sv.re,sv.info])+"\n"
else: #TRA
if int(sv.pos1) > 1:
pos1_1 = str(int(sv.pos1)-1)
pos1_2 = sv.pos1
elif int(sv.pos1) == 1:
pos1_1 = "1"
pos1_2 = "1"
else:
continue # invalid position
if int(sv.pos2) > 1:
pos2_1 = str(int(sv.pos2)-1)
pos2_2 = sv.pos2
elif int(sv.pos2) == 1:
pos2_1 = "1"
pos2_2 = "1"
else:
continue # invalid position
bed_line1 = "\t".join([sv.chrom1,pos1_1,pos1_2,sv.svtype,
sv.id+"_1",sv.svlen,sv.re,sv.info])+"\n"
bed_line2 = "\t".join([sv.chrom2,pos2_1,pos2_2,sv.svtype,
sv.id+"_2",sv.svlen,sv.re,sv.info])+"\n"
bed_line = bed_line1+bed_line2
bed_lines.append(bed_line)
with open(out_name,"w") as io:
io.writelines(bed_lines)
def main():
#test
vcf_to_bed(sys.argv[1],sys.argv[2])
if __name__ == "__main__":
main()
|
{"/range_compare.py": ["/bed_intersect.py", "/table_check.py"], "/svan.py": ["/sv_vcf.py", "/pubdb_prepare.py", "/range_compare.py", "/insertion_compare.py", "/traslocation_compare.py"], "/pubdb_prepare.py": ["/sv_vcf.py"], "/traslocation_compare.py": ["/bed_intersect.py", "/table_check.py"], "/insertion_compare.py": ["/bed_intersect.py", "/table_check.py"], "/local_database_prepare.py": ["/sv_vcf.py"]}
|
16,618
|
Archieyoung/SVAN
|
refs/heads/master
|
/traslocation_compare.py
|
"""
Author: ArchieYoung <yangqi2@grandomics.com>
Time: Thu Jul 5 09:24:07 CST 2018
"""
import os
import subprocess
import sys
from bed_intersect import intersect_f
from table_check import TableCheck
def TraCompare(bedtools, bedA, bedB, max_dist, tmp_dir, prefix, db_id):
# result sv dict, key: query sv id, value: db fields
tra_compare_result = {}
# calculate query and database fields number
query_field_num = TableCheck(bedA)
# db_field_num = table_check(bedB)
# padding
padding = max_dist
padding_tmp_bed = os.path.join(tmp_dir,
"{}.{}.tra.padding{}.tmp.bed".format(prefix, db_id, padding))
with open(padding_tmp_bed, "w") as padding_io:
with open(bedA, "r") as bedA_io:
for line in bedA_io:
line = line.strip()
if line[0] == "#":
print(line, file = padding_io)
else:
fields = line.split("\t")
bed_start = int(fields[1])
bed_end = int(fields[2])
if bed_start > padding: # make sure start not less than 1
bed_start = bed_start - padding
else:
bed_start = 1
bed_end = bed_end + padding
fields[1] = str(bed_start)
fields[2] = str(bed_end)
print("\t".join(fields), file = padding_io)
intersect_tmp_bed = os.path.join(tmp_dir,
"{}.{}.tra.intersect.tmp.bed".format(prefix, db_id))
# bedtools intersect, padding + intersect to get all SVs near the brk
with open(intersect_tmp_bed,"w") as io:
subprocess.run([bedtools, "intersect", "-a", padding_tmp_bed, "-b",
bedB,"-wo"], stdout = io)
# read intersect file
# chrom start end svtype svid svlen re info;
# svid is a unique identifier for the sv
with open(intersect_tmp_bed, "r") as io:
tra_pair = {}
lines = io.readlines()
for line in lines:
fields = line.strip().split("\t")
query_fields = fields[:query_field_num]
db_fields = fields[query_field_num:-1]
intersect_field = intersect_f(fields, query_field_num)
# 'WILD' type database SVs match any query SV, do breakpoint annotation
if intersect_field.db_svtype == "WILD":
tra_compare_result.setdefault(intersect_field.query_svid,
[]).append(db_fields)
elif intersect_field.db_svtype == "TRA":
tra_main_id = "_".join(
intersect_field.query_svid.split("_")[:-1])
tra_pair.setdefault(tra_main_id,[]).append(fields)
if tra_pair:
for i in tra_pair:
# print(tra_pair[i])
# get paired database traslocation
db_tra_pair = {}
for r in tra_pair[i]:
query_fields = r[:query_field_num]
db_fields = r[query_field_num:-1]
intersect_field = intersect_f(r, query_field_num)
# print(intersect_field.db_svid)
db_tra_main_id = "_".join(intersect_field.db_svid.split("_")[:-1])
db_tra_pair.setdefault(db_tra_main_id,[]).append(r)
# print(db_tra_pair)
for p in db_tra_pair:
if len(db_tra_pair[p]) == 2:
if (db_tra_pair[p][0][4] != db_tra_pair[p][1][4]):
# two query sv ids are not the same
tra_compare_result.setdefault(db_tra_pair[p][0][4],
[]).append(db_tra_pair[p][0][query_field_num:-1])
tra_compare_result.setdefault(db_tra_pair[p][1][4],
[]).append(db_tra_pair[p][1][query_field_num:-1])
return tra_compare_result
def main():
# test
result = TraCompare("bedtools", sys.argv[1], sys.argv[2], 1000, "tmp1", "test_tra1", "TEST1")
for i in result:
print(i, ":", result[i])
if __name__ == "__main__":
main()
|
{"/range_compare.py": ["/bed_intersect.py", "/table_check.py"], "/svan.py": ["/sv_vcf.py", "/pubdb_prepare.py", "/range_compare.py", "/insertion_compare.py", "/traslocation_compare.py"], "/pubdb_prepare.py": ["/sv_vcf.py"], "/traslocation_compare.py": ["/bed_intersect.py", "/table_check.py"], "/insertion_compare.py": ["/bed_intersect.py", "/table_check.py"], "/local_database_prepare.py": ["/sv_vcf.py"]}
|
16,619
|
Archieyoung/SVAN
|
refs/heads/master
|
/insertion_compare.py
|
"""
Author: ArchieYoung <yangqi2@grandomics.com>
Time: Thu Jul 5 09:24:07 CST 2018
"""
import os
import subprocess
from bed_intersect import intersect_f
from table_check import TableCheck
def InsCompare(bedtools, bedA, bedB, max_dist, tmp_dir, prefix, db_id):
# result sv dict, key: query sv id, value: db fields
intersect_result = {}
# calculate query and database fields number
query_field_num = TableCheck(bedA)
# db_field_num = table_check(bedB)
# padding
padding = max_dist
padding_tmp_bed = os.path.join(tmp_dir,
"{}.{}.ins.padding{}.tmp.bed".format(prefix, db_id, padding))
with open(padding_tmp_bed, "w") as padding_io:
with open(bedA, "r") as bedA_io:
for line in bedA_io:
line = line.strip()
if line[0] == "#":
print(line, file = padding_io)
else:
fields = line.split("\t")
bed_start = int(fields[1])
bed_end = int(fields[2])
if bed_start > padding: # make sure start not less than 1
bed_start = bed_start - padding
else:
bed_start = 1
bed_end = bed_end + padding
fields[1] = str(bed_start)
fields[2] = str(bed_end)
print("\t".join(fields), file = padding_io)
intersect_tmp_bed = os.path.join(tmp_dir,
"{}.{}.ins.intersect.tmp.bed".format(prefix, db_id))
# bedtools intersect
with open(intersect_tmp_bed,"w") as io:
subprocess.run([bedtools, "intersect", "-a", padding_tmp_bed, "-b",
bedB,"-wo"], stdout = io)
# read intersect file
# chrom start end svtype svid svlen re info;
# svid is a unique identifier for the sv
with open(intersect_tmp_bed, "r") as io:
lines = io.readlines()
for line in lines:
fields = line.strip().split("\t")
query_fields = fields[:query_field_num]
db_fields = fields[query_field_num:-1]
intersect_field = intersect_f(fields, query_field_num)
_db_svtype = intersect_field.db_svtype
_query_svid = intersect_field.query_svid
# WILD database SV type matchs any query SV type
if (_db_svtype == "INS" or _db_svtype == "WILD"):
intersect_result.setdefault(_query_svid, []).append(db_fields)
return intersect_result
|
{"/range_compare.py": ["/bed_intersect.py", "/table_check.py"], "/svan.py": ["/sv_vcf.py", "/pubdb_prepare.py", "/range_compare.py", "/insertion_compare.py", "/traslocation_compare.py"], "/pubdb_prepare.py": ["/sv_vcf.py"], "/traslocation_compare.py": ["/bed_intersect.py", "/table_check.py"], "/insertion_compare.py": ["/bed_intersect.py", "/table_check.py"], "/local_database_prepare.py": ["/sv_vcf.py"]}
|
16,620
|
Archieyoung/SVAN
|
refs/heads/master
|
/local_database_prepare.py
|
"""
Author: ArchieYoung <yangqi2@grandomics.com>
Time: Thu Jul 5 09:24:07 CST 2018
"""
import sys
import argparse
import os
from multiprocessing import Pool
from glob import iglob
from sv_vcf import SV
def vcf_to_db_bed(_args):
vcf, min_support_reads, out_dir, sv_id_prefix = _args
with open(vcf, "r") as io:
lines = io.readlines()
# chromosomes to be kept
main_chr = ([str(i) for i in range(1, 23)] + ["X", "Y", "MT", "M", "chrX", "chrY",
"chrM", "chrMT"] + ["chr" + str(i) for i in range(1, 23)])
# output bedlines
bed_lines = []
# check if id is unique
id_dict = {}
#chrom1,pos1,chrom2,pos2
previous_sv_breakpoint = ["NA", "NA", "NA", "NA"]
for line in lines:
#skip comment lines
if line.strip()[0] == "#":
continue
sv = SV(line)
# filter
if sv.chrom1 not in main_chr or sv.chrom2 not in main_chr:
continue
if int(sv.re) < min_support_reads:
continue
# remove 'chr' in chromosome id
sv.chrom1 = sv.chrom1.replace("chr", "")
sv.chrom2 = sv.chrom2.replace("chr", "")
# rename sv id
if sv_id_prefix:
sv.id = "_".join([sv_id_prefix, sv.id])
if sv.id not in id_dict:
id_dict[sv.id] = 1
else:
raise RuntimeError("Duplicated SV ID in you VCF "
"file {}".format(sv.id))
sv_breakpoint = [sv.chrom1, sv.pos1, sv.chrom2, sv.pos2]
# remove duplicate adjacency BND record in picky vcf
# Exactly the same
if ((sv.svtype == "TRA" or sv.svtype == "INV") and
sv_breakpoint[:4] == previous_sv_breakpoint[:4]):
continue
# just swap breakpoint1 and breakpoint2, still the same
if ((sv.svtype == "TRA" or sv.svtype == "INV") and
sv_breakpoint[:2] == previous_sv_breakpoint[2:] and
sv_breakpoint[2:] == previous_sv_breakpoint[:2]):
previous_sv_breakpoint = sv_breakpoint
continue
previous_sv_breakpoint = sv_breakpoint
# convert to bed format
# chrom,start,end,svtype,id,svlen,re,info
if sv.chrom1 == sv.chrom2:
if int(sv.pos1) > 1:
sv.pos1 = str(int(sv.pos1)-1)
bed_line = "\t".join([sv.chrom1,sv.pos1,sv.pos2,sv.svtype,
sv.id,sv.svlen,sv.re])+"\n"
else: #TRA
if int(sv.pos1) > 1:
pos1_1 = str(int(sv.pos1)-1)
pos1_2 = sv.pos1
elif int(sv.pos1) == 1:
pos1_1 = "1"
pos1_2 = "1"
else:
continue # invalid position
if int(sv.pos2) > 1:
pos2_1 = str(int(sv.pos2)-1)
pos2_2 = sv.pos2
elif int(sv.pos2) == 1:
pos2_1 = "1"
pos2_2 = "1"
else:
continue # invalid position
bed_line1 = "\t".join([sv.chrom1,pos1_1,pos1_2,sv.svtype,
sv.id+"_1",sv.svlen,sv.re])+"\n"
bed_line2 = "\t".join([sv.chrom2,pos2_1,pos2_2,sv.svtype,
sv.id+"_2",sv.svlen,sv.re])+"\n"
bed_line = bed_line1+bed_line2
bed_lines.append(bed_line)
out_bed_path = os.path.join(out_dir,
"{}.sv.database.bed".format(sv_id_prefix))
with open(out_bed_path, "w") as out_hd:
out_hd.writelines(bed_lines)
def db_check(vcf_list, db_bed_list):
"""
return vcf list if it have not been added to the database
"""
db_sample_ids = dict()
for i in db_bed_list:
basename = os.path.basename(i)
sample_id = basename.split(".")[0]
if sample_id not in db_sample_ids:
db_sample_ids[sample_id] = 1
else:
raise RuntimeError("Duplicated sample {} in your database".format(sample_id))
vcf_tobe_add = []
for i in vcf_list:
basename = os.path.basename(i)
sample_id = basename.split(".")[0]
if sample_id not in db_sample_ids:
vcf_tobe_add.append(i)
return vcf_tobe_add
def get_args():
parser = argparse.ArgumentParser(
description="Prepare Local SV Database",
usage="usage: %(prog)s [options]")
parser.add_argument("--vcf_dir",
help="vcf file directory [default %(default)s]", metavar="STR")
parser.add_argument("--db_dir",
help="database directory [default %(default)s]", metavar="STR")
parser.add_argument("--min_re",
help="minimum support reads number [default %(default)s]", type=float,
default=2, metavar="INT")
parser.add_argument("--threads",
help="number of threads [default %(default)s]", type=int,
default=4, metavar="INT")
if len(sys.argv) <= 1:
parser.print_help()
exit()
return parser.parse_args()
def main():
args = get_args()
# vcf file list
vcfs = iglob(os.path.join(args.vcf_dir, "*.vcf"))
db_beds = []
if not os.path.exists(args.db_dir):
os.mkdir(args.db_dir)
else:
db_beds = iglob(os.path.join(args.db_dir, "*.bed"))
vcf_tobe_add = db_check(vcfs, db_beds)
#for i in vcf_tobe_add:
# print("add {} to local SV database".format(i))
if len(vcf_tobe_add) == 0:
print("database is the newest.")
sys.exit(0)
work_args_list = [(i, args.min_re, args.db_dir,
os.path.basename(i)[:-4]) for i in vcf_tobe_add]
with Pool(processes=args.threads) as pool:
pool.map(vcf_to_db_bed, work_args_list)
if __name__ == "__main__":
main()
|
{"/range_compare.py": ["/bed_intersect.py", "/table_check.py"], "/svan.py": ["/sv_vcf.py", "/pubdb_prepare.py", "/range_compare.py", "/insertion_compare.py", "/traslocation_compare.py"], "/pubdb_prepare.py": ["/sv_vcf.py"], "/traslocation_compare.py": ["/bed_intersect.py", "/table_check.py"], "/insertion_compare.py": ["/bed_intersect.py", "/table_check.py"], "/local_database_prepare.py": ["/sv_vcf.py"]}
|
16,628
|
idlesign/deflacue
|
refs/heads/master
|
/setup.py
|
import io
import os
import re
import sys
from setuptools import setup
PATH_BASE = os.path.dirname(__file__)
def read_file(fpath):
"""Reads a file within package directories."""
with io.open(os.path.join(PATH_BASE, fpath)) as f:
return f.read()
def get_version():
"""Returns version number, without module import (which can lead to ImportError
if some dependencies are unavailable before install."""
contents = read_file(os.path.join('deflacue', '__init__.py'))
version = re.search('VERSION = \(([^)]+)\)', contents)
version = version.group(1).replace(', ', '.').strip()
return version
setup(
name='deflacue',
version=get_version(),
url='http://github.com/idlesign/deflacue',
description='deflacue is a SoX based audio splitter to split audio CD images incorporated with .cue files',
long_description=read_file('README.rst'),
license='BSD 3-Clause License',
author='Igor `idle sign` Starikov',
author_email='idlesign@yandex.ru',
packages=['deflacue'],
include_package_data=True,
zip_safe=False,
setup_requires=[] + (['pytest-runner'] if 'test' in sys.argv else []) + [],
entry_points={
'console_scripts': ['deflacue = deflacue.cli:main'],
},
test_suite='tests',
tests_require=[
'pytest',
'pytest-datafixtures',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Multimedia :: Sound/Audio :: Conversion',
],
)
|
{"/deflacue/cli.py": ["/deflacue/deflacue.py"], "/deflacue/parser.py": ["/deflacue/exceptions.py"], "/tests/test_basic.py": ["/deflacue/deflacue.py", "/deflacue/exceptions.py"], "/deflacue/deflacue.py": ["/deflacue/exceptions.py", "/deflacue/parser.py"]}
|
16,629
|
idlesign/deflacue
|
refs/heads/master
|
/deflacue/cli.py
|
import argparse
import logging
from .deflacue import Deflacue, DeflacueError
def main():
argparser = argparse.ArgumentParser('deflacue')
argparser.add_argument(
'source_path', help='Absolute or relative source path with .cue file(s).')
argparser.add_argument(
'-r', help='Recursion flag to search directories under the source_path.', action='store_true')
argparser.add_argument(
'-d', help='Absolute or relative destination path for output audio file(s).')
argparser.add_argument(
'-e', help='Cue Sheet file(s) encoding.')
argparser.add_argument(
'--dry', help='Perform the dry run with no changes done to filesystem.', action='store_true')
argparser.add_argument(
'--debug', help='Show debug messages while processing.', action='store_true')
parsed = argparser.parse_args()
kwargs = {'source_path': parsed.source_path}
if parsed.e is not None:
kwargs['encoding'] = parsed.e
if parsed.d is not None:
kwargs['dest_path'] = parsed.d
if parsed.debug:
kwargs['use_logging'] = logging.DEBUG
try:
deflacue = Deflacue(**kwargs)
if not deflacue.sox_check_is_available():
raise DeflacueError(
'SoX seems not available. Please install it (e.g. `sudo apt-get install sox libsox-fmt-all`).'
)
if parsed.dry:
deflacue.set_dry_run()
deflacue.do(recursive=parsed.r)
except DeflacueError as e:
logging.error(e)
|
{"/deflacue/cli.py": ["/deflacue/deflacue.py"], "/deflacue/parser.py": ["/deflacue/exceptions.py"], "/tests/test_basic.py": ["/deflacue/deflacue.py", "/deflacue/exceptions.py"], "/deflacue/deflacue.py": ["/deflacue/exceptions.py", "/deflacue/parser.py"]}
|
16,630
|
idlesign/deflacue
|
refs/heads/master
|
/deflacue/__init__.py
|
VERSION = (2, 0, 1)
|
{"/deflacue/cli.py": ["/deflacue/deflacue.py"], "/deflacue/parser.py": ["/deflacue/exceptions.py"], "/tests/test_basic.py": ["/deflacue/deflacue.py", "/deflacue/exceptions.py"], "/deflacue/deflacue.py": ["/deflacue/exceptions.py", "/deflacue/parser.py"]}
|
16,631
|
idlesign/deflacue
|
refs/heads/master
|
/deflacue/parser.py
|
import logging
from copy import deepcopy
from pathlib import Path
from typing import List, Optional, Tuple
from .exceptions import ParserError
LOGGER = logging.getLogger(__name__)
def pos_to_frames(pos) -> int:
"""Converts position (mm:ss:ff) into frames.
:param pos:
"""
minutes, seconds, frames = map(int, pos.split(':'))
seconds = (minutes * 60) + seconds
rate = 44100
return (seconds * rate) + (frames * (rate // 75))
class Context:
"""Basic context."""
_default = {}
def __init__(self, *, data: dict):
self.data = {**self._default, **deepcopy(data)}
def add(self, key: str, val: str):
self.data[key] = val
class MetaContext(Context):
"""Global .cue meta context."""
_default = {
'ALBUM': 'Unknown',
'PERFORMER': 'Unknown',
'DATE': None,
}
def add(self, key: str, val: str):
if key == 'TITLE':
key = 'ALBUM'
super().add(key, val)
class FileContext(Context):
"""File information."""
def __init__(self, *, path: str, ftype: str, data: dict):
self.path: Path = Path(path)
"""File path."""
self.type = ftype
"""File type."""
self.tracks: List[TrackContext] = []
"""Tracks in file."""
super().__init__(data=data)
def __str__(self):
return str(self.path)
class TrackContext(Context):
"""Track information."""
_default = {
'TITLE': 'Unknown',
}
def __init__(self, *, file: FileContext, num: int, dtype: str):
self.file = file
"""File containing track."""
self.num = num
"""Track number."""
self.type = dtype
"""Track data type."""
self.start: int = 0
"""Start position (frames)."""
super().__init__(data=file.data)
def __str__(self):
return f"{self.num} {self.title} @ {self.file}"
@property
def title(self):
return self.data.get('TITLE', '')
@property
def end(self) -> int:
tracks = self.file.tracks
end = 0
for idx, track in enumerate(tracks):
if track is self:
try:
end = tracks[idx+1].start
except IndexError:
pass
break
return end
class CueData:
"""Represents data from .cue file."""
def __init__(self):
self.meta = []
self.meta = MetaContext(data={})
"""Basic information."""
self.files: List[FileContext] = []
"""Files in image."""
self.tracks: List[TrackContext] = []
"""Tracks in image."""
self._current_file: Optional[FileContext] = None
self._current_track: Optional[TrackContext] = None
self._current_context: Context = self.meta
def add_context(self, key, val):
self._current_context.add(key, val)
def add_file(self, *, path: str, ftype: str):
file_context = FileContext(
path=path,
ftype=ftype,
data=self._current_context.data
)
self._current_context = file_context
self._current_file = file_context
self.files.append(file_context)
def add_track(self, *, num: int, dtype: str):
file_context = self._current_file
track_context = TrackContext(
file=self._current_file,
num=num,
dtype=dtype,
)
track_context.add('TRACK_NUM', f'{num}')
file_context.tracks.append(track_context)
self._current_context = track_context
self._current_track = track_context
self.tracks.append(track_context)
def add_track_index(self, *, pos: str):
self._current_track.start = pos_to_frames(pos)
class CueParser:
"""Simple Cue Sheet file parser."""
def __init__(self, lines: List[str]):
self.lines = lines
def run(self) -> CueData:
cue = CueData()
parse_cmd = self._parse_command
unquote = self._unquote
for line in self.lines:
cmd, args = parse_cmd(line)
if cmd == 'REM':
cue.add_context(*parse_cmd(args))
elif cmd == 'FILE':
fpath, ftype = args.rsplit(' ', 1)
fpath = unquote(fpath)
cue.add_file(path=fpath, ftype=ftype)
elif cmd == 'TRACK':
num, _, dtype = args.partition(' ')
cue.add_track(num=int(num), dtype=dtype)
elif cmd == 'INDEX':
num, _, pos = args.partition(' ')
if num == '01':
cue.add_context(f'{cmd} {num}', pos)
cue.add_track_index(pos=pos)
else:
cue.add_context(cmd, args)
return cue
def _parse_command(self, cmd: str) -> Tuple[str, str]:
command, _, args = cmd.partition(' ')
args = self._unquote(args)
LOGGER.debug(f'Parsed command `{command}`. Args: {args}')
return command, args
@classmethod
def _unquote(cls, val: str) -> str:
return val.strip(' "')
@classmethod
def from_file(cls, fpath: Path, *, encoding: str = None) -> 'CueParser':
def read(coding: str = None) -> Optional[CueParser]:
try:
with open(str(fpath), encoding=coding) as f:
return CueParser([
line.strip() for line in f.readlines()
if line.strip()
])
except UnicodeDecodeError:
return None
parser = read(encoding)
if not parser:
# Try unicode as a fallback.
parser = read()
if not parser:
raise ParserError(
'Unable to read data from .cue file. '
'Please provide a correct encoding.'
)
return parser
|
{"/deflacue/cli.py": ["/deflacue/deflacue.py"], "/deflacue/parser.py": ["/deflacue/exceptions.py"], "/tests/test_basic.py": ["/deflacue/deflacue.py", "/deflacue/exceptions.py"], "/deflacue/deflacue.py": ["/deflacue/exceptions.py", "/deflacue/parser.py"]}
|
16,632
|
idlesign/deflacue
|
refs/heads/master
|
/deflacue/exceptions.py
|
class DeflacueError(Exception):
"""Exception type raised by deflacue."""
class ParserError(Exception):
"""Cue file parser error."""
|
{"/deflacue/cli.py": ["/deflacue/deflacue.py"], "/deflacue/parser.py": ["/deflacue/exceptions.py"], "/tests/test_basic.py": ["/deflacue/deflacue.py", "/deflacue/exceptions.py"], "/deflacue/deflacue.py": ["/deflacue/exceptions.py", "/deflacue/parser.py"]}
|
16,633
|
idlesign/deflacue
|
refs/heads/master
|
/tests/test_basic.py
|
import logging
from collections import deque
from pathlib import Path
import pytest
from deflacue.deflacue import CueParser, Deflacue
from deflacue.exceptions import ParserError
class TestParser:
def test_encoding(self, datafix_dir):
fpath = datafix_dir / 'vys2.cue'
with pytest.raises(ParserError):
CueParser.from_file(fpath)
parser = CueParser.from_file(fpath, encoding='cp1251')
cue = parser.run()
assert cue.meta.data == {
'GENRE': 'Classic',
'DATE': '2020',
'COMMENT': 'Dumped',
'PERFORMER': 'В. С. Высоцкий',
'ALBUM': 'Пять песен',
}
assert len(cue.files) == 2
assert str(cue.files[0]) == '01. Сторона А.flac'
assert len(cue.tracks) == 5
track = cue.tracks[3]
assert str(track)
assert track.start == 5426064
assert track.end == 11205516
assert track.data == {
'ALBUM': 'Пять песен',
'COMMENT': 'Dumped',
'DATE': '2020',
'GENRE': 'Classic',
'INDEX 01': '02:03:03',
'PERFORMER': 'В. С. Высоцкий',
'TITLE': '04. Песня о вещей Кассандре',
'TRACK_NUM': '4',
}
track = cue.tracks[4]
assert track.start == 11205516
assert track.end == 0
assert track.file.path == Path('02. Сторона В.flac')
@pytest.fixture
def sox_mock(monkeypatch):
class SoxMock:
def __init__(self):
self.commands = []
self.results = deque()
def process_command(self, command, **kwargs):
logging.getLogger('deflacue').debug(f'Executing shell command: {command}')
self.commands.append(command)
return 0
mock = SoxMock()
monkeypatch.setattr('deflacue.deflacue.Deflacue._process_command', mock.process_command)
return mock
class TestDeflacue:
def test_basic(self, datafix_dir, sox_mock, tmp_path, caplog):
caplog.set_level(logging.DEBUG, logger='deflacue')
dest = tmp_path / 'sub'
deflacue = Deflacue(
source_path=str(datafix_dir),
dest_path=str(dest),
encoding='cp1251',
)
commands = sox_mock.commands
available = deflacue.sox_check_is_available()
assert available
assert len(commands) == 1
deflacue.do(recursive=True)
assert len(commands) == 6
assert (dest / 'datafixtures' / 'В. С. Высоцкий' / '2020 - Пять песен').exists()
caplog_text = caplog.text
assert 'Extracting `5 - 05. История болезни.flac`' in caplog_text
assert 'Еще Не Вечер.flac` is not found.' in caplog_text
assert '--add-comment="TRACKNUMBER=4"' in caplog_text
|
{"/deflacue/cli.py": ["/deflacue/deflacue.py"], "/deflacue/parser.py": ["/deflacue/exceptions.py"], "/tests/test_basic.py": ["/deflacue/deflacue.py", "/deflacue/exceptions.py"], "/deflacue/deflacue.py": ["/deflacue/exceptions.py", "/deflacue/parser.py"]}
|
16,634
|
idlesign/deflacue
|
refs/heads/master
|
/deflacue/deflacue.py
|
"""
deflacue is a Cue Sheet parser and a wrapper for mighty SoX utility - http://sox.sourceforge.net/.
SoX with appropriate plugins should be installed for deflacue to function.
Ubuntu users may install the following SoX packages: `sox`, `libsox-fmt-all`.
deflacue can function both as a Python module and in command line mode.
"""
import logging
import os
from collections import defaultdict
from pathlib import Path
from subprocess import Popen, PIPE
from typing import List, Dict, Union, Optional
from .exceptions import DeflacueError
from .parser import CueParser
LOGGER = logging.getLogger(__name__)
TypePath = Union[str, Path]
COMMENTS_CUE_TO_VORBIS = {
'TRACK_NUM': 'TRACKNUMBER',
'TITLE': 'TITLE',
'PERFORMER': 'ARTIST',
'ALBUM': 'ALBUM',
'GENRE': 'GENRE',
'DATE': 'DATE',
'ISRC': 'ISRC',
'COMMENT': 'DESCRIPTION',
}
"""Cue REM commands to Vorbis tags."""
class Deflacue:
"""deflacue functionality is encapsulated in this class.
Usage example:
deflacue = Deflacue('/home/idle/cues_to_process/')
deflacue.do()
This will search `/home/idle/cues_to_process/` and subdirectories
for .cue files, parse them and extract separate tracks.
Extracted tracks are stored in Artist - Album hierarchy within
`deflacue` directory under each source directory.
"""
_dry_run = False # Some lengthy shell command won't be executed on dry run.
_target_default = 'deflacue'
def __init__(
self,
source_path: TypePath,
*,
dest_path: TypePath = None,
encoding: str = None,
use_logging: int = logging.INFO
):
"""Prepares deflacue to for audio processing.
:param source_path: Absolute or relative to the current directory path,
containing .cue file(s) or subdirectories with .cue file(s) to process.
:param dest_path: Absolute or relative to the current directory path
to store output files in. If None, output files are saved in `deflacue` directory
in the same directory as input file(s).
:param encoding: Encoding used for .cue file(s).
:param use_logging: Defines the verbosity level of deflacue. All messages
produced by the application are logged with `logging` module.
Examples: logging.INFO, logging.DEBUG.
"""
src = Path(source_path).absolute()
self.path_source: Path = src
self.path_target: Optional[Path] = dest_path
self.encoding = encoding
if use_logging:
self._configure_logging(use_logging)
LOGGER.info(f'Source path: {src}')
if not src.exists():
raise DeflacueError(f'Path `{src}` is not found.')
if dest_path is not None:
self.path_target = Path(dest_path).absolute()
os.chdir(src)
def _process_command(
self,
command: str,
*,
stdout=None,
suppress_dry_run: bool = False
) -> int:
"""Executes shell command with subprocess.Popen.
Returns status code.
"""
LOGGER.debug(f'Executing shell command: {command}')
if not self._dry_run or suppress_dry_run:
prc = Popen(command, shell=True, stdout=stdout)
prc.communicate()
return prc.returncode
return 0
@classmethod
def _configure_logging(cls, verbosity_lvl: int = logging.INFO):
"""Switches on logging at given level."""
logging.basicConfig(level=verbosity_lvl, format='%(levelname)s: %(message)s')
def _create_target_path(self, path: Optional[Path]):
"""Creates a directory for target files."""
if self._dry_run or not path:
return
LOGGER.debug(f'Creating target path: {path} ...')
os.makedirs(path, exist_ok=True)
def set_dry_run(self):
"""Sets deflacue into dry run mode, when all requested actions
are only simulated, and no changes are written to filesystem.
"""
self._dry_run = True
def get_dir_files(self, *, recursive: bool = False) -> Dict[Path, List[Path]]:
"""Creates and returns dictionary of files in source directory.
:param recursive: if True search is also performed within subdirectories.
"""
LOGGER.info(f'Enumerating files under the source path (recursive={recursive}) ...')
files = {}
if recursive:
for current_dir, _, dir_files in os.walk(self.path_source):
path = self.path_source / current_dir
files[path] = [path / f for f in dir_files]
else:
files[self.path_source] = [
f for f in self.path_source.iterdir()
if f.is_file()
]
return files
def filter_target_extensions(self, files_dict: Dict[Path, List[Path]]) -> Dict[Path, List[Path]]:
"""Takes file dictionary created with `get_dir_files` and returns
dictionary of the same kind containing only files of supported
types.
:param files_dict:
"""
files_filtered = defaultdict(list)
LOGGER.info('Filtering .cue files ...')
paths = files_dict.keys()
for path in paths:
if path.name == self._target_default:
continue
for f in sorted(files_dict[path]):
if f.suffix == '.cue':
files_filtered[path].append(f)
return files_filtered
def sox_check_is_available(self) -> bool:
"""Checks whether SoX is available."""
result = self._process_command('sox -h', stdout=PIPE, suppress_dry_run=True)
return result == 0
def sox_extract_audio(
self,
*,
source_file: Path,
pos_start_samples: int,
pos_end_samples: int,
target_file: Path,
metadata: Dict[str, str] = None
):
"""Using SoX extracts a chunk from source audio file into target.
:param source_file: Source audio file path
:param pos_start_samples: Trim position start (samples)
:param pos_end_samples: Trim position end (samples)
:param target_file: Trimmed audio file path
:param metadata: Additional data (tags) dict.
"""
LOGGER.info(f' Extracting `{target_file.name}` ...')
chunk_length_samples = ''
if pos_end_samples:
chunk_length_samples = f'{pos_end_samples - pos_start_samples}s'
add_comment = ''
if metadata is not None:
LOGGER.debug(f'Metadata: {metadata}\n')
for key, val in COMMENTS_CUE_TO_VORBIS.items():
val_meta = metadata.get(key)
if val_meta:
add_comment = f'--add-comment="{val}={val_meta}" {add_comment}'
LOGGER.debug(
'Extraction information:\n'
f' Source file: {source_file}\n'
f' Start position: {pos_start_samples} samples\n'
f' End position: {pos_end_samples} samples\n'
f' Length: {chunk_length_samples} sample(s)')
command = (
f'sox -V1 "{source_file}" '
f'--comment="" {add_comment} "{target_file}" '
f'trim {pos_start_samples}s {chunk_length_samples}'
)
self._process_command(command, stdout=PIPE)
def process_cue(self, *, cue_file: Path, target_path: Path):
"""Parses .cue file, extracts separate tracks.
:param cue_file: .cue filepath
:param target_path: path to place files into
"""
LOGGER.info(f'\nProcessing `{cue_file.name}`\n')
parser = CueParser.from_file(fpath=cue_file, encoding=self.encoding)
cue = parser.run()
cd_info = cue.meta.data
tracks = cue.tracks
def sanitize(val: str) -> str:
return val.replace('/', '')
title = cd_info['ALBUM']
if cd_info['DATE'] is not None:
title = f"{cd_info['DATE']} - {title}"
bundle_path = target_path / sanitize(cd_info['PERFORMER']) / sanitize(title)
self._create_target_path(bundle_path)
len_tracks_count = len(str(len(tracks)))
for track in tracks:
track_file = track.file.path
if not track_file.exists():
LOGGER.error(f'Source file `{track_file}` is not found. Track is skipped.')
continue
track_num = str(track.num).rjust(len_tracks_count, '0')
filename = f"{track_num} - {sanitize(track.title)}.flac"
self.sox_extract_audio(
source_file=track_file,
pos_start_samples=track.start,
pos_end_samples=track.end,
target_file=bundle_path / filename,
metadata=track.data
)
def do(self, *, recursive: bool = False):
"""Main method processing .cue files in batch.
:param recursive: if True .cue search is also performed within subdirectories.
"""
self._create_target_path(self.path_target)
files_dict = self.filter_target_extensions(self.get_dir_files(recursive=recursive))
dir_initial = os.getcwd()
paths = sorted(files_dict.keys())
for path in paths:
os.chdir(path)
LOGGER.info(f"\n{'====' * 10}\nWorking on: {path}\n")
if self.path_target is None:
# When a target path is not specified, create `deflacue` subdirectory
# in every directory we are working at.
target_path = path / self._target_default
else:
# When a target path is specified, we create a subdirectory there
# named after the directory we are working on.
target_path = self.path_target / path.name
self._create_target_path(target_path)
LOGGER.info(f'Target (output) path: {target_path}')
for cue in files_dict[path]:
self.process_cue(cue_file=path / cue, target_path=target_path)
os.chdir(dir_initial)
LOGGER.info('We are done. Thank you.\n')
|
{"/deflacue/cli.py": ["/deflacue/deflacue.py"], "/deflacue/parser.py": ["/deflacue/exceptions.py"], "/tests/test_basic.py": ["/deflacue/deflacue.py", "/deflacue/exceptions.py"], "/deflacue/deflacue.py": ["/deflacue/exceptions.py", "/deflacue/parser.py"]}
|
16,639
|
saibunny/fil-shortstory-svm-thesis
|
refs/heads/master
|
/stopwords.py
|
import csv
def loadTagset(filename):
loaded = []
tagset = []
with open(filename) as csvfile:
lines = csv.reader(csvfile)
loaded = list(lines)
for tag in loaded:
tagset.append(tag[0])
return tagset
tagset = loadTagset("data\\tags.csv")
if "LM" in tagset:
print("yes")
|
{"/preprocess.py": ["/py4jTest.py", "/yandextranslatetest.py", "/senticnettest.py"]}
|
16,640
|
saibunny/fil-shortstory-svm-thesis
|
refs/heads/master
|
/py4jTest.py
|
from py4j.java_gateway import JavaGateway
# gateway = JavaGateway()
#
# tagger = gateway.entry_point.getPost()
#
#
# sentence = "high school. taga-punch, mag-shopping, pala-iyot pala iyot taga-bake taga bake nag i strike nag-i-strike"
# print(tagger.tagPOS(sentence))
class POSTagger:
def __init__(self):
self.gateway = JavaGateway()
self.tagger = self.gateway.entry_point.getPost()
def tagSentence(self, sentence):
return self.tagger.tagPOS(sentence)
def returnTag(self, word):
word = self.tagger.tagPOS(word)
wordtag = word.split("|")
return wordtag[1]
# tagger = POSTagger()
#
# print(tagger.returnTag("high"))
|
{"/preprocess.py": ["/py4jTest.py", "/yandextranslatetest.py", "/senticnettest.py"]}
|
16,641
|
saibunny/fil-shortstory-svm-thesis
|
refs/heads/master
|
/spacytest.py
|
from spacy.lang.en import English
nlp = English()
samplesent = ["couldn't", "eating", "killed", "loving", "parties"]
for word in samplesent:
doc = nlp(word)
word = doc[0].lemma_
print(word)
|
{"/preprocess.py": ["/py4jTest.py", "/yandextranslatetest.py", "/senticnettest.py"]}
|
16,642
|
saibunny/fil-shortstory-svm-thesis
|
refs/heads/master
|
/yandextranslatetest.py
|
import json, urllib.request
# key = "trnsl.1.1.20180821T035101Z.7622bc974ead6403.f3016199d1f56c33e68de316b816750e09daae43"
#
#
# def toUrlSafe(sentence):
# return sentence.replace(" ", "+")
#
# def translateSentence(sentence):
# sentence = toUrlSafe(sentence)
# with urllib.request.urlopen("https://translate.yandex.net/api/v1.5/tr.json/translate"
# "?key=" + key +
# "&text=" + sentence +
# "&lang=tl-en") as url:
# data = json.loads(url.read().decode())
# print(data)
# #print(data["text"])
# translated = "".join(data['text'])
# return translated
#
# text = "Sinabi ko sa iyo na hindi ako kumakain ng cake"
# print(translateSentence(text))
# #Output is: I told you I'm not eating cake
class Translator:
def __init__(self):
self.key = "trnsl.1.1.20180821T035101Z.7622bc974ead6403.f3016199d1f56c33e68de316b816750e09daae43"
def translateWord(self, word):
for i in range(10):
try:
with urllib.request.urlopen("https://translate.yandex.net/api/v1.5/tr.json/translate"
"?key=" + self.key +
"&text=" + word +
"&lang=tl-en") as url:
data = json.loads(url.read().decode())
translated = "".join(data['text'])
return translated.lower()
except Exception as e:
print("Try number " + str(i) + " for word :" + word)
|
{"/preprocess.py": ["/py4jTest.py", "/yandextranslatetest.py", "/senticnettest.py"]}
|
16,643
|
saibunny/fil-shortstory-svm-thesis
|
refs/heads/master
|
/consolidate.py
|
import csv
def loadDataset(filename):
with open(filename) as csvfile:
lines = csv.reader(csvfile)
return list(lines)
counti = 0
for i in range(6):
consolidated = []
counti = i + 1
for j in range(37):
countj = j+1
inputdirectory = "data\\validset_batch" + str(countj) + "_processed_wordcount" + str(counti)
dataset = loadDataset(inputdirectory + ".csv")
for row in dataset:
consolidated.append(row)
outputdirectory = "data\\consolidated_validset_processed_wordcount" + str(counti) + ".csv"
with open(outputdirectory, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(consolidated)
|
{"/preprocess.py": ["/py4jTest.py", "/yandextranslatetest.py", "/senticnettest.py"]}
|
16,644
|
saibunny/fil-shortstory-svm-thesis
|
refs/heads/master
|
/senticnettest.py
|
from senticnet.senticnet import SenticNet
##NOTE TO SELF YOU STILL HAVE TO FIX THE EXCEPTION HERE
class SenticValuer:
def getSentics(self, word):
senticsAndItensity = []
sn = SenticNet('en')
try:
sentics = sn.sentics(word)
polarity_intensity = sn.polarity_intense(word)
# print(sentics)
# print(sentics['pleasantness'])
# print(sentics['attention'])
# print(sentics['sensitivity'])
# print(sentics['aptitude'])
# print(polarity_intensity)
senticsAndItensity.append(float(sentics['pleasantness']))
senticsAndItensity.append(float(sentics['attention']))
senticsAndItensity.append(float(sentics['sensitivity']))
senticsAndItensity.append(float(sentics['aptitude']))
senticsAndItensity.append(float(polarity_intensity))
return senticsAndItensity
except Exception as e:
defaultsentics = [0.0, 0.0, 0.0, 0.0, 0.0]
return defaultsentics
# ##TESTING AREA
# yas = SenticValuer()
# print(yas.getSentics("awkward"))
|
{"/preprocess.py": ["/py4jTest.py", "/yandextranslatetest.py", "/senticnettest.py"]}
|
16,645
|
saibunny/fil-shortstory-svm-thesis
|
refs/heads/master
|
/preprocess.py
|
#import for csv
import csv
#import for tagger
from py4jTest import POSTagger
#import for translator
from yandextranslatetest import Translator
#import for sentic value tagger
from senticnettest import SenticValuer
#import for english lemmatizer
from spacy.lang.en import English
#import for sorting
from operator import itemgetter
import re
##ACTUAL CODE
##FUNCTION DEFINITIONS
def loadDataset(filename):
with open(filename) as csvfile:
lines = csv.reader(csvfile)
return list(lines)
def printDataset(dataset):
print(*dataset, sep='\n')
# for row in dataset:
# print(row)
def printDataset0(dataset):
for row in dataset:
print(row[0])
def loadTagset(filename):
tagset = open(filename, 'r').read().split('\n')
return tagset
# loaded = []
# tagset = []
# with open(filename) as csvfile:
# lines = csv.reader(csvfile)
# loaded = list(lines)
# for tag in loaded:
# tagset.append(tag[0])
# return tagset
def findAffective(phrase):
nlp = English()
sentic = SenticValuer()
affective = ''
highpolarity = 0.0
words = phrase.split(" ")
for i in range(len(words)):
onlychars = re.sub(r'\W+', '', words[i])
if not onlychars.isalpha():
return ""
doc = nlp(onlychars)
words[i] = doc[0].lemma_
senticVal = sentic.getSentics(words[i])
if abs(senticVal[4]) >= highpolarity:
affective = words[i]
highpolarity = senticVal[4]
return affective
def averageSenticValues(row):
sum = [0.0, 0.0, 0.0, 0.0, 0.0]
for senticValues in row[0]:
sum[0] = sum[0] + senticValues[0]
sum[1] = sum[1] + senticValues[1]
sum[2] = sum[2] + senticValues[2]
sum[3] = sum[3] + senticValues[3]
sum[4] = sum[4] + senticValues[4]
for i in range(len(sum)):
sum[i] = sum[i]/len(row[0])
aveRow = []
aveRow.append(sum)
aveRow.append(row[1])
return aveRow
def polarityAbs(senticval):
return abs(senticval[4])
def onetosix(dataset):
sixSets = []
for i in range(6):
count = i+1
tempDataset = []
for j in range(len(dataset)):
tempRow = []
sortedRow0 = sorted(dataset[j][0], key=polarityAbs, reverse=True)
tempRow.append(sortedRow0[:count])
tempRow.append(dataset[j][1])
tempDataset.append(tempRow)
sixSets.append(tempDataset)
return sixSets
##/FUNCTION DEFINITIONS
#MAIN
batchnum = 12
# inputdirectory = "data\\validset_batch" + str(batchnum)
# inputdirectory = "data\\sample"
inputdirectory = "data\\testsetpred_batch" + str(batchnum)
print(inputdirectory)
dataset = loadDataset(inputdirectory + ".csv")
#dataset looks like [sentence, emotion]
print("dataset loaded")
tagger = POSTagger()
print("start tagging")
for row in dataset:
row[0] = tagger.tagSentence(row[0])[:-1]
# row[0] = row[0][:-1]
#row in dataset looks like [sentence with POStag, emotion]
print("end tagging")
print("start splitting by space")
for row in dataset:
row[0] = row[0].split(" ")
#row in dataset looks like [ [word|POStag*], emotion]
#dapat + pero kleen star na lang. meaning there is more than 1 word per row
#an array within an array
print("end splitting by space")
print("start splitting words and tags")
for row in dataset:
for i in range(len(row[0])):
row[0][i] = row[0][i].split("|")
#dataset looks like [ [ [word,POStag]*], emotion]
#3D array na ito
print("end splitting words and tags")
print("start stopword removal")
stopwordset = loadTagset("data\\stopwords.csv")
for row in dataset:
temp = []
for wordtag in row[0]:
# temp = [word for word in wordtag[1] if word in tagset]
if wordtag[0] in stopwordset:
temp.append(wordtag)
row[0] = [word for word in row[0] if word not in temp]
#dataset still looks like the one from earlier except retain most affective POS
print("end stopword removal")
printDataset(dataset)
print("start foreclipping")
tagset = loadTagset("data\\tagstop5.csv")
for row in dataset:
for i in range(len(row[0])):
if "-" in row[0][i][0] and row[0][i][1] is not "FW":
tempword = row[0][i][0].split("-")
while "" in tempword:
tempword.remove("")
temptag = []
for j in range(len(tempword)):
if tempword[j] is not '':
temptag.append(tagger.returnTag(tempword[j]))
if temptag[j] not in tagset:
tempwordtag = []
tempwordtag.append(tempword[j])
tempwordtag.append(temptag[j])
row[0][i] = tempwordtag
print("end foreclipping")
print("start filtering POS")
for row in dataset:
temp = []
for wordtag in row[0]:
# temp = [word for word in wordtag[1] if word in tagset]
if wordtag[1] in tagset:
temp.append(wordtag)
row[0] = [word for word in row[0] if word not in temp]
#dataset still looks like the one from earlier except retain most affective POS
print("end filtering POS")
print("start replacing [word|tag] list by word")
for row in dataset:
for i in range(len(row[0])):
row[0][i] = row[0][i][0]
# dataset = [[j.lower() for j in i] for i in dataset]
#dataset now looks like [ [word]*, emotion]
print("end replacing [word|tag] list by word")
print("Start translation")
translator = Translator()
translations = []
count = 0
for row in dataset:
untransrow = "<"
transrow = ">"
temptransrow = []
for i in range(len(row[0])):
untransrow = untransrow + "|" + row[0][i]
temmie = translator.translateWord(row[0][i])
transrow = transrow + "|" + temmie
row[0][i] = temmie
temptransrow.append(untransrow)
temptransrow.append(transrow)
translations.append(temptransrow)
count = count + 1
print(str(count) + " " + untransrow + "|||||" + transrow)
print("End translation")
#dataset still looks like the one from before except translated to english
print("Start lemmatization")
#next is lemmatizer
nlp = English()
for row in dataset:
if row[0]:
for i in range(len(row[0])):
if " " in row[0][i]:
row[0][i] = findAffective(row[0][i])
else:
doc = nlp(row[0][i])
row[0][i] = doc[0].lemma_
#dataset still looks like the one from before but lemmatized
print("end lemmatization")
print("start sentic valuing")
#next up is senticnet and keep in mind the blank resulting row[0] make the sentic value for that all 0's
sentic = SenticValuer()
for row in dataset:
if row[0]:
for i in range(len(row[0])):
row[0][i] = sentic.getSentics(row[0][i])
else:
row[0] = [[0.0, 0.0, 0.0, 0.0, 0.0]]
#the dataset now looks like [ [sentic values]*, emotion]
print("end sentic valuing")
printDataset(dataset)
print("start averaging")
sixSets = onetosix(dataset)
for i in range(len(sixSets)):
for j in range(len(sixSets[i])):
sixSets[i][j] = averageSenticValues(sixSets[i][j])
print("end averaging")
print("start writing to file")
#Write dataset to file
for i in range(len(sixSets)):
count = i+1
directory = inputdirectory + "_processed_wordcount" + str(count) + '.csv'
finalDataset = []
# for row in sixSets[i]:
# newRow = []
# newRow.append(row[0][0])
# newRow.append(row[0][1])
# newRow.append(row[0][2])
# newRow.append(row[0][3])
# newRow.append(row[0][4])
# newRow.append(row[1])
# finalDataset.append(newRow)
for j in range(len(sixSets[i])):
newRow = []
newRow.append(sixSets[i][j][0][0])
newRow.append(sixSets[i][j][0][1])
newRow.append(sixSets[i][j][0][2])
newRow.append(sixSets[i][j][0][3])
newRow.append(sixSets[i][j][0][4])
newRow.append(sixSets[i][j][1])
newRow.append(translations[j][0])
newRow.append(translations[j][1])
finalDataset.append(newRow)
with open(directory,'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(finalDataset)
print("end writing to file")
print("complete batch #" + str(batchnum))
#/MAIN
|
{"/preprocess.py": ["/py4jTest.py", "/yandextranslatetest.py", "/senticnettest.py"]}
|
16,655
|
jnothman/pickleback
|
refs/heads/master
|
/test_pickleback.py
|
import pickle
import subprocess
def test_with_script(tmpdir):
tmpdir.join('plot.py').write('import sys\n'
'import matplotlib.pyplot as plt\n'
'import matplotlib\n'
'matplotlib.use("Agg")\n'
'plt.scatter([5, 6, 7], [8, 9, 10])\n'
'plt.title("Hello world")\n'
'plt.savefig(sys.argv[1])\n'
)
script_path = str(tmpdir.join('plot.py'))
subprocess.check_call(['python', script_path,
str(tmpdir.join('plot.raw'))])
subprocess.check_call(['python', '-m', 'pickleback', script_path,
str(tmpdir.join('plot.pkl'))])
fig = pickle.load(open(str(tmpdir.join('plot.pkl')), 'rb'))
# FIXME: fig.canvas comes back None. I've not yet understood why/how
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
fig.canvas = FigureCanvas(fig)
fig.savefig(str(tmpdir.join('plot-via-pkl.raw')))
expected_bytes = tmpdir.join('plot.raw').read(mode='rb')
actual_bytes = tmpdir.join('plot-via-pkl.raw').read(mode='rb')
assert expected_bytes == actual_bytes
|
{"/pickleback/__main__.py": ["/pickleback/__init__.py"]}
|
16,656
|
jnothman/pickleback
|
refs/heads/master
|
/pickleback/backend_pkl.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pickle
from matplotlib.backend_bases import FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPickle(figure)
manager = FigureManagerPickle(canvas, num)
return manager
class FigureCanvasPickle(FigureCanvasBase):
"""
The canvas the figure renders into. Not applicable.
Attributes
----------
figure : `matplotlib.figure.Figure`
A high-level Figure instance
"""
def draw(self):
pass
filetypes = {}
filetypes['pkl'] = 'Python pickle format'
filetypes['pickle'] = 'Python pickle format'
def print_pkl(self, filename, *args, **kwargs):
pickle.dump(self.figure, open(filename, 'wb'))
print_pickle = print_pkl
def get_default_filetype(self):
return 'pkl'
class FigureManagerPickle(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasPickle
FigureManager = FigureManagerPickle
|
{"/pickleback/__main__.py": ["/pickleback/__init__.py"]}
|
16,657
|
jnothman/pickleback
|
refs/heads/master
|
/pickleback/__main__.py
|
from __future__ import print_function
import sys
import runpy
from pickleback import register
def main():
if len(sys.argv) == 1:
print('Usage: {prog} /path/to/python-script.py [script-args...]\n'
'Usage: {prog} -m path.to.module [script-args...]\n'
'\n'
'Loads matplotlib backend for pkl extension before running'
'script')
sys.exit(1)
register()
del sys.argv[0]
if sys.argv[0] == '-m':
del sys.argv[0]
runpy.run_module(sys.argv[0], run_name='__main__')
else:
runpy.run_path(sys.argv[0], run_name='__main__')
if __name__ == '__main__':
main()
|
{"/pickleback/__main__.py": ["/pickleback/__init__.py"]}
|
16,658
|
jnothman/pickleback
|
refs/heads/master
|
/pickleback/__init__.py
|
def register():
from matplotlib.backend_bases import register_backend
for ext in ['pkl', 'pickle']:
register_backend(ext, 'pickleback.backend_pkl',
'Python pickle format, to be used with caution on a '
'single matplotlib version')
|
{"/pickleback/__main__.py": ["/pickleback/__init__.py"]}
|
16,662
|
ddalton002/ddalton002stuff
|
refs/heads/master
|
/CSCI465v5old/firstapp/migrations/0006_suggestion_choice_field.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-04 08:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('firstapp', '0005_remove_suggestion_choice_field'),
]
operations = [
migrations.AddField(
model_name='suggestion',
name='choice_field',
field=models.CharField(choices=[('LFG', 'Lfg'), ('LFM', 'Lfm'), ('WTB', 'Wtb'), ('WTS', 'Wts')], default='LFG', max_length=3),
),
]
|
{"/CSCI465/firstapp/views.py": ["/CSCI465/firstapp/forms.py"]}
|
16,663
|
ddalton002/ddalton002stuff
|
refs/heads/master
|
/CSCI465v5/firstapp/models.py
|
from django.db import models
from django import forms
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
CHOICES = (('LFG', 'Lfg',), ('LFM', 'Lfm',),('WTB', 'Wtb',), ('WTS', 'Wts',))
class suggestion(models.Model):
suggestion = models.CharField(max_length=141)
author = models.ForeignKey(User, on_delete=models.CASCADE)
authored = models.DateTimeField(auto_now=True)
# LFG = 'Lfg'
# LFM = 'Lfm'
# WTB = 'Wtb'
# WTS = 'Wts'
# CHOICES = ((LFG, 'Lfg',), (LFM, 'Lfm',),(WTB, 'Wtb',), (WTS, 'Wts',))
choice_field = models.CharField(max_length=3, choices=CHOICES,default='LFG')
image = models.ImageField(max_length=144, upload_to='uploads/%Y/%m/%d/')
idescription = models.CharField(max_length=144)
def __str__(self):
return self.suggestion
class profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.TextField(null=True, max_length=500, blank=True)
games = models.CharField(null=True, max_length=140, blank=True)
birth_date = models.DateField(null=True, blank=True)
# def __str__(self):
# return self.profile
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
|
{"/CSCI465/firstapp/views.py": ["/CSCI465/firstapp/forms.py"]}
|
16,664
|
ddalton002/ddalton002stuff
|
refs/heads/master
|
/CSCI465v3/firstapp/views.py
|
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.http import HttpResponse, JsonResponse
from .models import *
from .forms import *
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.contrib.postgres.search import SearchQuery, SearchRank, SearchVector
from django.views.generic import ListView
# from django.contrib.auth.forms import UserCreationForm
# Create your views here.
def index(request):
form = suggestion_form()
suggestions = suggestion.objects.all().order_by('-authored')
to_return = []
for suggest in suggestions:
data = {}
data["suggestion"]=suggest.suggestion
data["image"]=suggest.image
data["idescription"]=suggest.idescription
data["author"]=suggest.author
# data["comments"]=[]
data["id"]=suggest.id
# comments = comment.objects.all().filter(suggestion=suggest).order_by('-authored')
# for comm in comments:
# data["comments"]+=[{"comment":comm.comment, "author":comm.author}]
# , "comment_form":c_form
to_return+=[data]
context = {"suggestions":to_return, "form":form}
return render(request,"default.html",context)
def page2(request):
suggestions = suggestion.objects.all()
toReturn = ""
for sugg in suggestions:
toReturn += sugg.suggestion + " "
context = {"variable":toReturn}
return render(request,"default.html",context)
def register(request):
if request.method == 'POST':
form = registration_form(request.POST)
if form.is_valid():
form.save(commit=True)
return redirect("/")
else:
form = registration_form()
context = {"form":form}
return render(request,"register.html",context)
def lfg(request):
form = suggestion_form()
suggestions = suggestion.objects.filter(suggestion='hat')
to_return = []
for suggest in suggestions:
data = {}
data["suggestion"]=suggest.suggestion
data["image"]=suggest.image
data["idescription"]=suggest.idescription
data["author"]=suggest.author
data["id"]=suggest.id
to_return+=[data]
context = {"suggestions":to_return, "form":form}
return render(request,"default.html",context)
def lfm(request):
form = suggestion_form()
suggestions = suggestion.objects.filter(suggestion='hat')
to_return = []
for suggest in suggestions:
data = {}
data["suggestion"]=suggest.suggestion
data["image"]=suggest.image
data["idescription"]=suggest.idescription
data["author"]=suggest.author
data["id"]=suggest.id
to_return+=[data]
context = {"suggestions":to_return, "form":form}
return render(request,"default.html",context)
def wtb(request):
form = suggestion_form()
suggestions = suggestion.objects.filter(suggestion='hat')
to_return = []
for suggest in suggestions:
data = {}
data["suggestion"]=suggest.suggestion
data["image"]=suggest.image
data["idescription"]=suggest.idescription
data["author"]=suggest.author
data["id"]=suggest.id
to_return+=[data]
context = {"suggestions":to_return, "form":form}
return render(request,"default.html",context)
def wts(request):
form = suggestion_form()
suggestions = suggestion.objects.filter(suggestion='hat')
to_return = []
for suggest in suggestions:
data = {}
data["suggestion"]=suggest.suggestion
data["image"]=suggest.image
data["idescription"]=suggest.idescription
data["author"]=suggest.author
data["id"]=suggest.id
to_return+=[data]
context = {"suggestions":to_return, "form":form}
return render(request,"default.html",context)
# Source https://simpleisbetterthancomplex.com/tutorial/2016/07/22/how-to-extend-django-user-model.html
@login_required
def profile(request):
if request.method == 'POST':
user_form = UserForm(request.POST, instance=request.user)
profile_form = ProfileForm(request.POST, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
return redirect('.')
# else:
# messages.error(request, _('Please correct the error below.'))
else:
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, 'profile.html', {
'user_form': user_form,
'profile_form': profile_form
})
@login_required
def suggestion_view(request):
if request.method == 'POST':
if request.user.is_authenticated:
form = suggestion_form(request.POST, request.FILES)
if form.is_valid():
form.save(request)
return redirect("/")
else:
form=suggestion_form()
else:
form = suggestion_form()
context = {"form":form}
return render(request,"suggest.html",context)
def suggestions(request):
suggestions = suggestion.objects.all() #.order_by('-authored')
toReturn = {}
toReturn["suggestions"]=[]
# toReturn["haha"]=[]
for sugg in suggestions:
toReturn["suggestions"]+=[{"suggest":sugg.suggestion}]
return JsonResponse(toReturn)
|
{"/CSCI465/firstapp/views.py": ["/CSCI465/firstapp/forms.py"]}
|
16,665
|
ddalton002/ddalton002stuff
|
refs/heads/master
|
/CSCI465/firstapp/views.py
|
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.http import HttpResponse, JsonResponse
from .models import *
from .forms import *
from django.contrib.auth.decorators import login_required
from django.db import transaction
# from django.contrib.auth.forms import UserCreationForm
# Create your views here.
def index(request):
if request.method == 'POST':
form = suggestion_form(request.POST)
if form.is_valid():
modentry = suggestion(suggestion=form.cleaned_data['suggestion'])
modentry.save()
else:
form = suggestion_form()
suggestions = suggestion.objects.all()
context = {"variable":suggestions, "form":form}
return render(request,"default.html",context)
def page2(request):
suggestions = suggestion.objects.all()
toReturn = ""
for sugg in suggestions:
toReturn += sugg.suggestion + " "
context = {"variable":toReturn}
return render(request,"default.html",context)
def register(request):
if request.method == 'POST':
form = registration_form(request.POST)
if form.is_valid():
form.save(commit=True)
return redirect("/")
else:
form = registration_form()
context = {"form":form}
return render(request,"register.html",context)
def suggestions(request):
suggestions = suggestion.objects.all()
toReturn = {}
toReturn["suggestions"]=[]
for sugg in suggestions:
toReturn["suggestions"]+=[{"suggest":sugg.suggestion}]
return JsonResponse(toReturn)
@login_required
# @transaction.atomic
def profile(request):
if request.method == 'POST':
user_form = UserForm(request.POST, instance=request.user)
profile_form = ProfileForm(request.POST, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
return redirect('.')
# else:
# messages.error(request, _('Please correct the error below.'))
else:
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, 'profile.html', {
'user_form': user_form,
'profile_form': profile_form
})
|
{"/CSCI465/firstapp/views.py": ["/CSCI465/firstapp/forms.py"]}
|
16,666
|
ddalton002/ddalton002stuff
|
refs/heads/master
|
/CSCI465v5old/firstapp/forms.py
|
from django import forms
from django.core.validators import validate_unicode_slug
from firstapp.models import CHOICES
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
from .models import *
class suggestion_form(forms.Form):
suggestion = forms.CharField(label='Post Title', max_length=140)
image=forms.ImageField(label="Image File")
# CHOICES = (('LFG', 'Lfg',), ('LFM', 'Lfm',),('WTS', 'Wtb',), ('WTS', 'Wts',))
choice_field = forms.ChoiceField(widget=forms.Select(), choices=CHOICES)
image_description=forms.CharField(label="Image Description", max_length=144)
def save(self, request , commit=True):
suggest = suggestion()
suggest.suggestion=self.cleaned_data['suggestion']
suggest.choice_field=self.cleaned_data['choice_field']
suggest.image=self.cleaned_data['image']
suggest.idescription=self.cleaned_data['image_description']
suggest.author=request.user
if commit:
suggest.save()
return suggest
# work in progress
# CHOICES = (('1', 'LFG',), ('2', 'LFM',), ('3', 'WTB',), ('4', 'WTS',))
# choice_field = forms.ChoiceField(widget=forms.RadioSelect, choices=CHOICES)
class LoginForm(AuthenticationForm):
username=forms.CharField(
label="Username",
max_length=30,
widget=forms.TextInput(attrs={
'name':'username'
})
)
password=forms.CharField(
label="Password",
max_length=32,
widget=forms.PasswordInput()
)
class registration_form(UserCreationForm):
email = forms.EmailField(
label="Email",
required=True
)
class Meta:
model = User
fields = ("username", "email",
"password1", "password2")
def save(self, commit=True):
user=super(registration_form,self).save(commit=False)
user.email=self.cleaned_data["email"]
if commit:
user.save()
return user
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class ProfileForm(forms.ModelForm):
class Meta:
model = profile
fields = ('bio', 'games', 'birth_date')
|
{"/CSCI465/firstapp/views.py": ["/CSCI465/firstapp/forms.py"]}
|
16,667
|
ddalton002/ddalton002stuff
|
refs/heads/master
|
/CSCI465/firstapp/forms.py
|
from django import forms
from django.core.validators import validate_unicode_slug
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
from .models import *
class suggestion_form(forms.Form):
suggestion = forms.CharField(label='Suggestion', max_length=140)
class LoginForm(AuthenticationForm):
username=forms.CharField(
label="Username",
max_length=30,
widget=forms.TextInput(attrs={
'name':'username'
})
)
password=forms.CharField(
label="Password",
max_length=32,
widget=forms.PasswordInput()
)
class registration_form(UserCreationForm):
email = forms.EmailField(
label="Email",
required=True
)
class Meta:
model = User
fields = ("username", "email",
"password1", "password2")
def save(self, commit=True):
user=super(registration_form,self).save(commit=False)
user.email=self.cleaned_data["email"]
if commit:
user.save()
return user
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class ProfileForm(forms.ModelForm):
class Meta:
model = profile
fields = ('bio', 'games', 'birth_date')
|
{"/CSCI465/firstapp/views.py": ["/CSCI465/firstapp/forms.py"]}
|
16,668
|
ddalton002/ddalton002stuff
|
refs/heads/master
|
/CSCI465v5/firstapp/migrations/0005_remove_suggestion_choice_field.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-04 08:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('firstapp', '0004_auto_20171204_0035'),
]
operations = [
migrations.RemoveField(
model_name='suggestion',
name='choice_field',
),
]
|
{"/CSCI465/firstapp/views.py": ["/CSCI465/firstapp/forms.py"]}
|
16,669
|
ddalton002/ddalton002stuff
|
refs/heads/master
|
/CSCI465v5/firstapp/urls.py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',views.index, name='index'),
url(r'page2$',views.page2, name='page2'),
url(r'register/$',views.register, name='register'),
url(r'suggestions/$',views.suggestions, name='suggestions'),
url(r'^suggest/$',views.suggestion_view, name='suggest'),
url(r'profile/$',views.profile, name='profile'),
url(r'about/$',views.about, name='about'),
url(r'lfg/$',views.lfg, name='lfg'),
url(r'lfm/$',views.lfm, name='lfm'),
url(r'wtb/$',views.wtb, name='wtb'),
url(r'wts/$',views.wts, name='wts'),
]
|
{"/CSCI465/firstapp/views.py": ["/CSCI465/firstapp/forms.py"]}
|
16,670
|
ddalton002/ddalton002stuff
|
refs/heads/master
|
/CSCI465v5old/firstapp/migrations/0007_remove_suggestion_choice_field.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-04 08:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('firstapp', '0006_suggestion_choice_field'),
]
operations = [
migrations.RemoveField(
model_name='suggestion',
name='choice_field',
),
]
|
{"/CSCI465/firstapp/views.py": ["/CSCI465/firstapp/forms.py"]}
|
16,671
|
ddalton002/ddalton002stuff
|
refs/heads/master
|
/venv/bin/django-admin.py
|
#!/home/ddalton86/ddalton002stuff/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
{"/CSCI465/firstapp/views.py": ["/CSCI465/firstapp/forms.py"]}
|
16,672
|
ddalton002/ddalton002stuff
|
refs/heads/master
|
/CSCI465/firstapp/migrations/0003_auto_20171114_1718.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-15 01:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('firstapp', '0002_auto_20171114_1056'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='location',
),
migrations.AddField(
model_name='profile',
name='games',
field=models.CharField(blank=True, max_length=140, null=True),
),
]
|
{"/CSCI465/firstapp/views.py": ["/CSCI465/firstapp/forms.py"]}
|
16,673
|
zackhy/statistical-machine-learning
|
refs/heads/master
|
/models/helper.py
|
# -*- coding: utf-8 -*-
import numpy as np
def sigmoid(n):
return 1 / (1 + np.exp(-n))
def log_loss(probs, y_true):
probs = np.array(probs)
y_true = np.array(y_true)
term_1 = np.dot(y_true, np.log(probs))
term_2 = np.dot(1 - y_true, np.log(1 - probs))
return - (1 / len(y_true)) * (term_1 + term_2)
|
{"/test.py": ["/models/naive_bayes.py"], "/models/naive_bayes.py": ["/models/base.py", "/models/utils.py"], "/models/base.py": ["/models/utils.py"], "/models/logistic_regression.py": ["/models/base.py", "/models/helper.py", "/models/utils.py"]}
|
16,674
|
zackhy/statistical-machine-learning
|
refs/heads/master
|
/test.py
|
import numpy as np
from models.naive_bayes import BernoulliNB as MyBNB
from models.naive_bayes import MultinomialNB as MyMNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
X = np.random.randint(5, size=(6, 100))
y = np.array([1, 2, 3, 4, 4, 5])
my_clf = MyBNB().fit(X, y)
my_score = my_clf.score(X, y)
clf = BernoulliNB().fit(X, y)
score = clf.score(X, y)
print('My Bernoulli score: {:.3f}\nSklearn Bernoulli score: {:.3f}'.format(my_score, score))
my_clf = MyMNB().fit(X, y)
my_score = my_clf.score(X, y)
clf = MultinomialNB().fit(X, y)
score = clf.score(X, y)
print('My Multinomial score: {:.3f}\nSklearn Multinomial score: {:.3f}'.format(my_score, score))
|
{"/test.py": ["/models/naive_bayes.py"], "/models/naive_bayes.py": ["/models/base.py", "/models/utils.py"], "/models/base.py": ["/models/utils.py"], "/models/logistic_regression.py": ["/models/base.py", "/models/helper.py", "/models/utils.py"]}
|
16,675
|
zackhy/statistical-machine-learning
|
refs/heads/master
|
/models/naive_bayes.py
|
# -*- coding: utf-8 -*-
import numpy as np
from models.base import Base
from models.utils import input_check, target_check, fitted_check
class BernoulliNB(Base):
"""Bernoulli Naive Bayes Classifier that implements the fit(X, y) and predict(T) methods"""
def fit(self, X, y):
"""
Fit the Bernoulli Naive Bayes Classifier with input data X and target y
:param X: Input data. An array-like object. Shape = (n_samples, n_features)
:param y: Target. An array-like object. Shape = (n_samples, )
:return: The fitted Bernoulli Naive Bayes Classifier
"""
X = input_check(X)
y = target_check(y)
if np.min(X) < 0:
raise ValueError('Input features should be greater than or equal to 0')
# Convert the features to binary
if np.max(X) > 1:
X[X > 1] = 1
self.uniq_classes_, num_docs = np.unique(y, return_counts=True)
self.num_features_ = X.shape[1]
# Compute prior probability for each class
self.prior_prob_ = np.array([n / len(y) for n in num_docs])
# Compute document frequencies for each term given a class
doc_freq = np.vstack([(np.sum(X[y==c, :], axis=0)) for c in self.uniq_classes_])
# Compute conditional probability for each term given a class.
self.cond_prob_ = (doc_freq + 1) / (num_docs.reshape(-1, 1) + 2)
self.fitted = True
return self
@fitted_check
def predict(self, X):
"""
Use the fitted Bernoulli Naive Bayes Classifier to make predictions
:param X: Input data. An array-like object. Shape = (n_samples, n_features)
:return: Predictions. A 1d numpy array. Shape = (n_samples, )
"""
X = input_check(X)
if X.shape[1] != self.num_features_:
raise ValueError('Input X should have a shape of (,{})'.format(self.num_features_))
preds = []
for t in X:
# Compute posterior probability
post_prob = np.log(self.prior_prob_)
likelihood = np.log(np.power(self.cond_prob_, t)) + np.log(np.power((1-self.cond_prob_), (1-t)))
post_prob += np.sum(likelihood, axis=1)
preds.append(np.argmax(post_prob))
return np.array(self.uniq_classes_[preds])
class MultinomialNB(Base):
"""Multinomial Naive Bayes Classifier that implements the fit(X, y) and predict(T) methods"""
def fit(self, X, y):
"""
Fit the Multinomial Naive Bayes Classifier with input data X and target y
:param X: Input data. An array-like object. Shape = (n_samples, n_features)
:param y: Target. An array-like object. Shape = (n_samples, )
:return: The fitted Multinomial Naive Bayes Classifier
"""
X = input_check(X)
y = target_check(y)
if np.min(X) < 0:
raise ValueError('Input features should be greater than or equal to 0')
self.unique_classes_, num_docs = np.unique(y, return_counts=True)
self.num_features_ = X.shape[1]
# Compute the prior probability
self.prior_prob_ = np.array([(n / len(y)) for n in num_docs])
# Compute the term frequencies for each term given a class
term_freq = np.vstack([np.sum(X[y == c, :], axis=0) for c in self.unique_classes_])
# Add one to avoid zero
term_freq = term_freq + 1
# Compute the total term frequencies for each class
tot_freq = np.sum(term_freq, axis=1)
# Compute the conditional probability
self.cond_prob_ = term_freq / tot_freq.reshape(-1, 1)
self.fitted = True
return self
@fitted_check
def predict(self, X):
"""
Use the fitted Multinomial Naive Bayes Classifier to make predictions
:param X: Input data. An array-like object. Shape = (n_samples, n_features)
:return: Predictions. A 1d numpy array. Shape = (n_samples, )
"""
X = input_check(X)
if X.shape[1] != self.num_features_:
raise ValueError('Input X should have a shape of (?,{})'.format(self.num_features_))
preds = []
for t in X:
# Compute posterior probability
post_prob = np.log(self.prior_prob_)
post_prob += np.sum(np.log(np.power(self.cond_prob_, t)), axis=1)
preds.append(np.argmax(post_prob))
return np.array(self.unique_classes_[preds])
|
{"/test.py": ["/models/naive_bayes.py"], "/models/naive_bayes.py": ["/models/base.py", "/models/utils.py"], "/models/base.py": ["/models/utils.py"], "/models/logistic_regression.py": ["/models/base.py", "/models/helper.py", "/models/utils.py"]}
|
16,676
|
zackhy/statistical-machine-learning
|
refs/heads/master
|
/models/base.py
|
# -*- coding: utf-8 -*-
import numpy as np
from models.utils import input_check, target_check, fitted_check
class Base(object):
def __int__(self):
pass
def fit(self, X, y):
return NotImplementedError
def predict(self, T):
return NotImplementedError
@fitted_check
def score(self, X, y_true):
"""
:param X: Input data. An array-like object. Shape = (n_samples, n_features)
:param y_true: Target. An array-like object. Shape = (n_samples, )
:return: Accuracy score.
"""
X = input_check(X)
y_true = target_check(y_true)
preds = self.predict(X)
return (preds == y_true).mean()
|
{"/test.py": ["/models/naive_bayes.py"], "/models/naive_bayes.py": ["/models/base.py", "/models/utils.py"], "/models/base.py": ["/models/utils.py"], "/models/logistic_regression.py": ["/models/base.py", "/models/helper.py", "/models/utils.py"]}
|
16,677
|
zackhy/statistical-machine-learning
|
refs/heads/master
|
/models/logistic_regression.py
|
# -*- coding: utf-8 -*-
import numpy as np
from models.base import Base
from models.helper import sigmoid, log_loss
from models.utils import input_check, target_check, fitted_check
class LogisticRegression(Base):
"""Implement a simple logistic regression"""
def __init__(self, learning_rate, max_iter):
self.learning_rate = learning_rate
self.max_iter = max_iter
def fit(self, X, y):
"""Fit the model using stochastic gradient descent"""
X = input_check(X)
y = target_check(y)
n_features = X.shape[1]
# Initialize weights
coef = np.zeros(n_features)
intercept = 0
loss = log_loss(sigmoid(np.matmul(X, coef) + intercept), y)
# Stochastic gradient descent
while self.max_iter > 0:
for x, y_true in zip(X, y):
# Calculate prediction
z = np.dot(x, coef) + intercept
y_pred = sigmoid(z)
error = y_pred - y_true
# Calculate gradient
gradient = x * error
# Update weights
coef = coef - self.learning_rate * gradient
intercept = intercept - self.learning_rate * error
self.max_iter -= 1
loss = log_loss(sigmoid(np.matmul(X, coef) + intercept), y)
self.coef_ = coef
self.intercept_ = intercept
self.log_loss_ = loss
self.fitted = True
return self
@fitted_check
def predict(self, X):
X = input_check(X)
z = np.matmul(X, self.coef_) + self.intercept_
y_pred = sigmoid(z)
return np.round(y_pred)
|
{"/test.py": ["/models/naive_bayes.py"], "/models/naive_bayes.py": ["/models/base.py", "/models/utils.py"], "/models/base.py": ["/models/utils.py"], "/models/logistic_regression.py": ["/models/base.py", "/models/helper.py", "/models/utils.py"]}
|
16,678
|
zackhy/statistical-machine-learning
|
refs/heads/master
|
/models/utils.py
|
# -*- coding: utf-8 -*-
import numpy as np
def fitted_check(func):
def wrapper(self, *args, **kw):
if not hasattr(self, 'fitted'):
raise AttributeError("This model instance is not fitted yet. Call 'fit' first.")
return func(self, *args, **kw)
return wrapper
def input_check(X):
if not isinstance(X, np.ndarray):
X = np.array(X)
if X.ndim != 2:
raise ValueError('Input X should be a 2d array-like object. Shape = (n_samples, n_features)')
return X
def target_check(y):
if not isinstance(y, np.ndarray):
y = np.array(y)
if y.ndim != 1:
raise ValueError('Input y should be a 1d array-like object. Shape = (n_samples, )')
return y
|
{"/test.py": ["/models/naive_bayes.py"], "/models/naive_bayes.py": ["/models/base.py", "/models/utils.py"], "/models/base.py": ["/models/utils.py"], "/models/logistic_regression.py": ["/models/base.py", "/models/helper.py", "/models/utils.py"]}
|
16,679
|
zackhy/statistical-machine-learning
|
refs/heads/master
|
/models/metrics.py
|
# -*- coding: utf-8 -*-
import numpy as np
def confusion_matrix(y_true, y_pred):
"""
:param y_true: True targets. An array-like object. Shape = (n_samples, )
:param y_pred: Predicted values. An array-like object. Shape = (n_samples, )
:return: Consufison matrix.
"""
y_true = np.array(y_true)
y_pred = np.array(y_pred)
if y_true.shape != y_pred.shape:
raise ValueError('y_true and y_pred must have the same shape.')
labels = np.unique(y_true)
pred_labels = np.unique(y_pred)
if not np.isin(pred_labels, labels).all():
raise ValueError('All the labels in y_pred must be in y_true')
label_to_index = dict((l, i) for i, l in enumerate(labels))
# Convert labels to index
y_true = [label_to_index.get(l) for l in y_true]
y_pred = [label_to_index.get(l) for l in y_pred]
# Confustion matrix
cm = np.zeros((len(labels), len(labels)), dtype=np.int32)
for row, col in zip(y_true, y_pred):
cm[row][col] += 1
return cm
|
{"/test.py": ["/models/naive_bayes.py"], "/models/naive_bayes.py": ["/models/base.py", "/models/utils.py"], "/models/base.py": ["/models/utils.py"], "/models/logistic_regression.py": ["/models/base.py", "/models/helper.py", "/models/utils.py"]}
|
16,685
|
aman229/vqa_tensorflow
|
refs/heads/master
|
/VQA.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import random_seed
from collections import Counter
from PIL import Image
import numpy as np
import itertools
import zipfile
import urllib
import json
import os
import re
class VQADataSet(object):
"""
Base class for the dataset
"""
def __init__(self, data_dir='./data/', split="val", top_answers=3000,
max_ques_len=15, seed=None):
self.data_dir = data_dir
self.split = split
self.img_dir = self.data_dir + "{}2014/".format(self.split)
self.top_answers = top_answers
self.max_ques_len = max_ques_len
self._data = self.preprocess_json(self.split)
self.question_to_index = self.map_to_index(top=None, answer=False)
self.vocab_size = len(self.question_to_index)
self.answer_to_index = self.map_to_index(top=self.top_answers)
self._num_examples = len(self._data)
self._epochs_completed = 0
self._index_in_epoch = 0
self.number_of_questions = len(self._data)
seed1, seed2 = random_seed.get_seed(seed)
np.random.seed(seed1 if seed is None else seed2)
@property
def data(self):
return self._data
@property
def answers(self):
return (x['answers'] for x in self._data)
@property
def questions(self):
return (x['question'] for x in self._data)
@property
def img_indices(self):
return (x['image_id'] for x in self._data)
def preprocess_json(self, split='train', use_nltk=True):
questions_filename = self.data_dir + "OpenEnded_mscoco_{0}2014_questions.json"
answers_filename = self.data_dir + "mscoco_{0}2014_annotations.json"
if use_nltk:
import nltk
tokenize = nltk.word_tokenize
else:
tokenize = lambda x: x.split(' ')
questions = self._read_json(questions_filename.format(split))['questions']
# Answers are present as a list of dicts under the 'annotations' key in the resulting
# dictionary when the json file is read
# The following code reads the json file, then extracts the list of answer dicts
# And then converts the list into a dict indexed by the question_id
answers_dict = {x['question_id']:x for x in self._read_json(answers_filename.format(split))['annotations']}
for item in questions:
question = item['question']
question = tokenize(question.lower()[:-1])
_id = item['question_id']
answers = answers_dict.get(_id)['answers']
# converting answers from list of dicts to just a list of answers without
# confidence or id
punc = r'[;>")<!$.%=#*&/+,@\'?(-]\s*'
answers = [re.sub(punc, ' ', x) for x in [x['answer'] for x in answers]]
item['question'] = question
item['answers'] = answers
return questions
def map_to_index(self, top, answer=True):
if answer:
_data = self.answers
else:
_data = self.questions
x = itertools.chain.from_iterable(_data)
counts = Counter(x)
sorted_common = (x[0] for x in counts.most_common(top))
vocab = {word:index for index, word in enumerate(sorted_common, start=1)}
return vocab
def encode_into_vector(self):
for item in self.data:
q_vec = np.zeros(self.max_ques_len)
for i, word in enumerate(item['question'][:self.max_ques_len]):
mapped_index = self.question_to_index.get(word, 0)
q_vec[i] = mapped_index
a_vec = np.zeros(self.top_answers)
counter = Counter(item['answers'])
most_freq_ans = counter.most_common(1)[0][0]
answer_index = self.answer_to_index.get(most_freq_ans, 1)
a_vec[answer_index-1] = 1
item['question'] = q_vec
item['answers'] = a_vec
def preprocess_image(self, image_id):
path = '{}COCO_val2014_{:012d}.jpg'.format(self.img_dir, image_id)
try:
img = Image.open(path)
img = self._scale_img_to_dim(img, 448)
img = self._center_crop(img, 299, 299)
img = self._normalize_img(img.resize((448, 448), Image.ANTIALIAS))
return img
except FileNotFoundError:
pass
def return_batch_indices(self, batch_size, shuffle=True):
start = self._index_in_epoch
self._indices = list(range(self._num_examples))
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = np.arange(self._num_examples)
np.random.shuffle(perm0)
self._indices = list(perm0)
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
rest_num_examples = self._num_examples - start
filenames_rest_part = self._indices[start:self._num_examples]
# Shuffle the data for next epoch
if shuffle:
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._indices = [self.filenames[i] for i in perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
filenames_new_part = self._indices[start:end]
return filenames_rest_part + filenames_new_part
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._indices[start:end]
def next_batch(self, batch_size):
batch_indices = self.return_batch_indices(batch_size)
data = [self.data[i] for i in batch_indices]
q = np.stack([x['question'] for x in data])
a = np.stack([x['answers'] for x in data])
_img = (x['image_id'] for x in data)
img = np.stack([self.preprocess_image(x) for x in _img])
return q.astype(np.int64), a.astype(np.float16), img.astype(np.float32)
def _normalize_img(self, img):
img = np.array(img)
img = img.astype(np.float32) * (1/255.0)
_mean=[0.485, 0.456, 0.406]
_std=[0.229, 0.224, 0.225]
img = (img - _mean)/_std
return img
def _scale_img_to_dim(self, img, desired_dim):
w, h = img.size
if w > h:
ratio = float(desired_dim)/w
hsize = int(h*ratio)
img = img.resize((448, hsize), Image.ANTIALIAS)
else:
ratio = float(desired_dim)/h
wsize = int(w*ratio)
img = img.resize((wsize, 448), Image.ANTIALIAS)
return img
def _center_crop(self, im, new_width, new_height):
width, height = im.size # Get dimensions
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
return im.crop((left, top, right, bottom))
def _read_json(self, file):
with open(file, 'r') as f:
x = json.load(f)
return x
def maybe_download_and_extract(data_dir):
"""
Will download and extract the VQA data automatically
"""
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# Downloading the question and answers
datasets = ["Questions", "Annotations"]
splits = ["Train", "Val"]
for data in datasets:
for split in splits:
url = "http://visualqa.org/data/mscoco/vqa/{}_{}_mscoco.zip".format(data, split)
filename = url.split('/')[-1]
filepath = os.path.join(data_dir, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.urlretrieve(url, filepath)
zipfile.ZipFile(filepath, 'r').extractall(data_dir)
print('Successfully downloaded and extracted ', filename)
# Downloading images
for split in [x.lower() for x in splits]:
url = "http://msvocds.blob.core.windows.net/coco2014/{}2014.zip".format(split)
filename = url.split('/')[-1]
filepath = os.path.join(data_dir, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.urlretrieve(url, filepath)
zipfile.ZipFile(filepath, 'r').extractall(data_dir)
print('Successfully downloaded and extracted ', filename)
|
{"/main.py": ["/model.py"], "/model.py": ["/VQA.py"]}
|
16,686
|
aman229/vqa_tensorflow
|
refs/heads/master
|
/main.py
|
from model import Model
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--epoch', dest='epoch', type=int, default=20, help='# of epoch')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help='# images in batch')
parser.add_argument('--lr', dest='lr', type=int, default=0.001, help='initial learning rate')
parser.add_argument('--state', dest='state', type=int, default=1024, help='LSTM hidden state size')
parser.add_argument('--embed', dest='embed', type=int, default=300, help='Embedding vector size')
parser.add_argument('--drop', dest='drop', type=int, default=0.5, help='Dropout probability')
parser.add_argument('--freq', dest='freq', type=int, default=1024, help='How many top answers')
parser.add_argument('--resnet_features', dest='resnet',
default='resnet_ckpt/resnet_v2_152.ckpt',
help='Path to resnet pretrained weights')
parser.add_argument('--project', dest='project', type=bool,
default=False, help='Project text features instead of tile')
args = parser.parse_args()
vqa_model = Model(batch_size = args.bs,
init_lr=args.bs,
state_size=args.state,
embedding_size=args.embed,
dropout_prob=args.drop,
most_freq_limit=args.freq,
resnet_weights_path=args.resnet,
project=args.project)
vqa_model.train(args.epoch)
|
{"/main.py": ["/model.py"], "/model.py": ["/VQA.py"]}
|
16,687
|
aman229/vqa_tensorflow
|
refs/heads/master
|
/model.py
|
from resnet_utils import resnet_arg_scope
from resnet import resnet_v2_152
from VQA import VQADataSet
import tensorflow as tf
import time
import os
# TODO add summaries
# TODO add validation
class Model(object):
"""
TF implementation of "Show, Ask, Attend, and Answer: A Strong Baseline For Visual Question Answering'' [0]
[0]: https://arxiv.org/abs/1704.03162
"""
def __init__(self, batch_size, init_lr=0.001, reuse=False, vocabulary_size=None, state_size=1024,
embedding_size=300, dropout_prob=0.5, most_freq_limit=3000,
summary_dir='./logs/', resnet_weights_path = 'resnet_ckpt/resnet_v2_152.ckpt',
project=False):
"""
:type max_ques_length: object
:type embedding_size: object
"""
self.state_size = state_size
self.batch_size = batch_size
self.init_lr = init_lr
self.reuse= reuse
self.embedding_size = embedding_size
self.data = VQADataSet()
self.vocabulary_size = self.data.vocab_size if vocabulary_size is None else vocabulary_size
self.dropout_prob = dropout_prob
self.most_freq_limit = most_freq_limit
self.summary_dir = summary_dir
self.resnet_weights_path = resnet_weights_path
self.project = project
self.sess = tf.Session()
self.build_model()
self._check_resnet_weights()
def build_model(self):
print('\nBuilding Model')
# Creating placeholders for the question and the answer
self.questions = tf.placeholder(tf.int64, shape=[None, 15], name="question_vector")
self.answers = tf.placeholder(tf.float32, shape=[None, self.most_freq_limit], name="answer_vector")
self.images = tf.placeholder(tf.float32, shape=[None, 448, 448, 3], name="images_matrix")
arg_scope = resnet_arg_scope()
with tf.contrib.slim.arg_scope(arg_scope):
resnet_features, _ = resnet_v2_152(self.images, reuse=tf.AUTO_REUSE)
depth_norm = tf.norm(resnet_features, ord='euclidean', keepdims=True, axis=3) + 1e-8
self.image_features = resnet_features/depth_norm
with tf.variable_scope("text_features") as scope:
if self.reuse:
scope.reuse_variables()
self.word_embeddings = tf.get_variable('word_embeddings',
[self.vocabulary_size,
self.embedding_size],
initializer=tf.contrib.layers.xavier_initializer())
word_vectors = tf.nn.embedding_lookup(self.word_embeddings, self.questions)
len_word = self._len_seq(word_vectors)
embedded_sentence = tf.nn.dropout(tf.nn.tanh(word_vectors, name="embedded_sentence"),
keep_prob=self.dropout_prob)
lstm = tf.nn.rnn_cell.LSTMCell(self.state_size,
initializer=tf.contrib.layers.xavier_initializer())
_, final_state = tf.nn.dynamic_rnn(lstm, embedded_sentence,
sequence_length=len_word,
dtype=tf.float32)
self.text_features = final_state.c
self.attention_features = self.compute_attention(self.image_features,
self.text_features)
with tf.variable_scope("fully_connected") as scope:
if self.reuse:
scope.reuse_variables()
self.fc1 = tf.nn.dropout(tf.nn.relu(self.fc_layer(self.attention_features, 1024, name="fc1")),
keep_prob=self.dropout_prob)
self.fc2 = self.fc_layer(self.fc1, 3000, name="fc2")
self.answer_prob = tf.nn.softmax(self.fc2)
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.answers,
logits=self.fc2))
self.global_step = tf.Variable(0, name='global_step', trainable=False, dtype=tf.int32)
self.inc = tf.assign_add(self.global_step, 1, name='increment')
self.lr = tf.train.exponential_decay(learning_rate=self.init_lr,
global_step=self.global_step,
decay_steps=10000,
decay_rate=0.5,
staircase=True)
self.optimizer = tf.train.AdamOptimizer(self.lr, beta1=0.9, beta2=0.999, name="optim")
def train(self, epochs):
self.saver = tf.train.Saver()
self.tf_summary_writer = tf.summary.FileWriter(self.summary_dir, self.sess.graph)
# Loading resnet pretrained weights
resnet_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="resnet")
load_resnet = tf.train.Saver(var_list=resnet_vars)
load_resnet.restore(self.sess, self.resnet_weights_path)
# Freezing resnet weights
train_vars = [x for x in tf.global_variables() if "resnet" not in x.name]
train_step = self.optimizer.minimize(self.loss, var_list=train_vars,
global_step=self.global_step)
# Initializing all variables
print('Initializing variables')
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
self.data.encode_into_vector()
start_time = time.time()
print('Starting training')
for epoch in range(epochs):
steps = 10#data.number_of_questions // self.batch_size
for idx in range(steps):
print("Step {:4d} of epoch {:2d}".format(idx, epoch))
print('\nGetting batches')
q, a, img = self.data.next_batch(self.batch_size)
vqa_dict = {self.questions: q, self.answers: a, self.images: img}
_, cost, _a = self.sess.run([train_step, self.loss, self.inc], feed_dict=vqa_dict)
print("\nEpoch: [%2d] [%4d/%4d] time: %4.4f, Loss: %.8f"
% (epoch, idx, steps,
time.time() - start_time, cost))
def compute_attention(self, image, text):
with tf.variable_scope("attention") as scope:
if self.reuse:
scope.reuse_variables()
text_replicated = self._replicate_features(text, (1, 14, 14, 1),
project=self.project)
# Now both the features from the resnet and lstm are concatenated along the depth axis
features = tf.nn.dropout(tf.concat([image, text_replicated], axis=3),
keep_prob=self.dropout_prob)
conv1 = tf.nn.dropout(self.conv2d_layer(features, filters=512,
kernel_size=(1,1),
name="attention_conv1"),
keep_prob=self.dropout_prob)
conv2 = self.conv2d_layer(conv1, filters=2, kernel_size=(1,1), name="attention_conv2")
# Flatenning each attention map to perform softmax
attention_map = tf.reshape(conv2, (self.batch_size, 14*14, 2))
attention_map = tf.nn.softmax(attention_map, axis=1, name = "attention_map")
image = tf.reshape(image, (self.batch_size, 196, 2048, 1))
attention = tf.tile(tf.expand_dims(attention_map, 2), (1, 1, 2048, 1))
image = tf.tile(image,(1,1,1,2))
weighted = image * attention
weighted_average = tf.reduce_mean(weighted, 1)
# Flatten both glimpses into a single vector
weighted_average = tf.reshape(weighted_average, (self.batch_size, 2048*2))
attention_output = tf.nn.dropout(tf.concat([weighted_average, text], 1), self.dropout_prob)
return attention_output
def conv2d_layer(self, input_tensor, filters, kernel_size=(3,3), stride=1, name="conv", padding='VALID'):
with tf.variable_scope(name):
weights = tf.get_variable('conv_weights', [kernel_size[0], kernel_size[1], input_tensor.get_shape()[-1], filters],
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable('conv_bias', [filters], initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(input_tensor, weights, strides=[1, stride, stride, 1], padding=padding)
conv = tf.nn.bias_add(conv, biases)
return conv
def deconv2d_layer(self, input_tensor, filters, output_size,
kernel_size=(5,5), stride=2, name="deconv2d"):
with tf.variable_scope(name):
h, w = output_size
weights = tf.get_variable('deconv_weights',
shape=[kernel_size[0], kernel_size[1],
filters, input_tensor.get_shape()[-1]],
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable('biases', [filters], initializer=tf.constant_initializer(0.0))
output_dims = [self.batch_size, h, w, filters]
deconv = tf.nn.conv2d_transpose(input_tensor, weights, strides=[1, stride, stride, 1],
output_shape=output_dims)
deconv = tf.nn.bias_add(deconv, biases)
return deconv
def fc_layer(self, input_tensor, neurons, name="fc"):
with tf.variable_scope(name):
weights = tf.get_variable('fc_weights', [input_tensor.get_shape()[-1], neurons],
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable('fc_biases', [neurons], initializer=tf.constant_initializer(0.0))
output = tf.matmul(input_tensor, weights) + biases
return output
def _len_seq(self, sequence):
used = tf.sign(tf.reduce_max(tf.abs(sequence), 2))
length = tf.reduce_sum(used, 1)
length = tf.cast(length, tf.int32)
return length
def _replicate_features(self, input_features, multiples, project=False):
x = tf.reshape(input_features, (self.batch_size, 1, 1, self.state_size))
if not project:
# Expanding dimensions of LSTM features to 4-D
replicated = tf.tile(x, multiples)
else:
dc1 = self.deconv2d_layer(x, 1024, output_size=(2,2), name="dc1")
x1 = tf.nn.dropout(dc1, self.dropout_prob)
dc2 = self.deconv2d_layer(x1, 1536, output_size=(4,4), name="dc2")
x2 = tf.nn.dropout(dc2, self.dropout_prob)
dc3 = self.deconv2d_layer(x2, 2048, output_size=(8,8), name="dc3")
x3 = tf.nn.dropout(dc3, self.dropout_prob)
dc4 = self.deconv2d_layer(x3, 2048, output_size=(16,16), name="dc4")
x4 = tf.nn.dropout(dc4, self.dropout_prob)
replicated = tf.nn.dropout(self.conv2d_layer(x4, 2048, kernel_size=(3,3),
name="conv_dc4"), 0.5)
return replicated
def _check_resnet_weights(self):
resnet_dir = './resnet_ckpt'
if not os.path.exists(resnet_dir):
os.mkdir(resnet_dir)
url = "http://download.tensorflow.org/models/resnet_v2_152_2017_04_14.tar.gz"
os.system("wget " + url)
command = 'tar -xvzf {} -C ./resnet_ckpt/'.format(url.split("/")[-1])
os.system(command)
|
{"/main.py": ["/model.py"], "/model.py": ["/VQA.py"]}
|
16,707
|
davidengebretson/md2html
|
refs/heads/master
|
/setup.py
|
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup
from md2html import __VERSION__
setup(name='md2html',
author='Gary Campbell',
author_email='campg2003@gmail.com',
version=__VERSION__,
install_requires = ['markdown'],
py_modules=['md2html'],
entry_points = {'console_scripts': ['md2html = md2html:main']})
|
{"/setup.py": ["/md2html.py"], "/setupexe.py": ["/md2html.py"]}
|
16,708
|
davidengebretson/md2html
|
refs/heads/master
|
/setupexe.py
|
from distutils.core import setup
import py2exe
from md2html import __VERSION__
setup(name='md2html',
version=__VERSION__,
author='Gary Campbell',
author_email='campg2003@gmail.com',
description='Convert Markdown to HTML',
requires=['markdown(>=2.6.6)'],
console=['md2html.py'],
options={"py2exe":{
"excludes": [
'pyreadline', 'difflib', 'doctest', 'optparse', 'pickle', 'calendar', 'email', 'ftplib', 'httplib', 'rfc822', 'socket', 'select', 'ssl', '_ssl'] # Exclude standard library
}}
)
|
{"/setup.py": ["/md2html.py"], "/setupexe.py": ["/md2html.py"]}
|
16,709
|
davidengebretson/md2html
|
refs/heads/master
|
/md2html.py
|
# 5/23/16 md2html-- convert Markdown to HTML-- for the Audacity JAWS Script project.
__VERSION__ = "1.0.7"
import sys
import os
import os.path
import argparse
import re
import io
import ConfigParser
import markdown
import markdown.extensions
# for py2exe
import markdown.extensions.fenced_code
from markdown.extensions.fenced_code import FencedCodeExtension
import markdown.extensions.toc
from markdown.extensions.toc import TocExtension
# Usage: page_template.format(page_title, body_text)
page_template = u'''\
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>{}</title>
</head>
<body>
{}
</body>
</html>
'''
def msg(s):
#{
print >>sys.stderr, s
#} # msg
desc = """\
Convert Markdown to HTML"""
def main(opts):
#{
"""@param opts: list of command line args-- sys.argv[1:].
@type opts: list of string"""
parser = argparse.ArgumentParser(description=desc, argument_default="", fromfile_prefix_chars="@")
parser.add_argument("-V", "--version", action="version", version="%(prog)s v{}".format(__VERSION__), help="print program version and exit")
parser.add_argument("-q", "--quiet", dest="verbose", action="store_false", default=True, help="suppress messages")
parser.add_argument("input", help="input file name, - reads from stdin (default stdin)")
parser.add_argument("output", help="output file")
parser.add_argument("-t", "--title", dest="title", help="page title")
parser.add_argument("-c", "--toc", dest="toc", action="store_true", help="insert a table of contents")
parser.add_argument("-l", "--toclocation", dest="toclocation", help="a Python regular expression that matches the text before which the TOC is to be placed. If the first character is '+', it is removed and the TOC is placed after the first newline following the start of the matched text. Implies -c")
parser.add_argument("-T", "--toctitle", dest="toctitle", help="title text shown (in a span) before the TOC, default ''. Implies -c")
args = parser.parse_args(opts)
cfg = ConfigParser.SafeConfigParser()
if args.verbose:
#{
msg("md2html v{}: converting {} to {}".format(__VERSION__, args.input, args.output))
#} # if verbose
toc_title = ""
page_title = ""
toclocation = ""
if args.input and args.input != "-":
#{
cfgfile = os.path.dirname(args.input)
#} # if input
else:
#{
cfgfile = os.getcwd()
#} # no input file
cfgfile = os.path.join(cfgfile, "md2html.cfg")
#msg("Reading config file {}".format(cfgfile) # debug)
if os.path.exists(cfgfile):
#{
with io.open(cfgfile, mode='rt', encoding='utf-8-sig') as cfp:
#{
try:
#{
cfg.readfp(cfp)
#}
except ConfigParser.Error as e:
#{
msg("md2html: Error reading config file: {}".format(str(e)))
sys.exit(1)
#} # except
#} # with
#} # if cfgfile exists
cfgsection = ""
if args.input and args.input != "-":
#{
cfgsection = os.path.basename(args.input)
#}
if cfgsection:
#{
if cfg.has_section(cfgsection):
#{
#msg("cfg has section {}".format(cfgsection))
try:
#{
toc_title = cfg.get(cfgsection, "toctitle")
#} # try
except ConfigParser.NoOptionError:
#{
pass
#} # except
try:
#{
toclocation = cfg.get(cfgsection, "toclocation")
#} # try
except ConfigParser.NoOptionError:
#{
pass
#} # except
try:
#{
page_title = cfg.get(cfgsection, "title")
#} # try
except ConfigParser.NoOptionError:
#{
pass
#} # except
#} # if has_section
#} # if cfgsection
if args.toctitle: toc_title = args.toctitle
if args.title: page_title = args.title
toc = args.toc
if args.toclocation: toclocation = args.toclocation
if toclocation or toc_title: toc = True
# input file
if args.input and args.input != "-":
#{
f = io.open(args.input, mode="rt", encoding="utf-8-sig")
#}
else:
#{
f = io.open(sys.stdin.fileno(), mode="rt", encoding="utf-8")
#}
# output file
fout = io.open(args.output, mode="wt", encoding="utf-8")
# I don't know why, but if I write this encoded I get an extra CR. I would think writing in binary mode would produce UNIX-style line endings, but on my Windows machine it doesn't.
#fout = io.open(sys.stdout.fileno(), mode="wb")
try:
#{
s = f.read()
#} # try
except UnicodeDecodeError as e:
#{
msg("md2html: UnicodeDecodeError in {}: {}".format(f.name, str(e)))
sys.exit(1)
#} # except
finally:
#{
f.close()
#} # finally
if toc:
#{
aftertoc = False
if toclocation.startswith("+"):
#{
aftertoc = True
toclocation = toclocation[1:]
#} # if
if not toclocation: toclocation = "^# "
m = re.search(toclocation, s, re.M)
if not m:
#{
msg("md2html: TOC location not found, disabling toc option. Do your headings start in column 1?")
toc = False
#}
else:
#{
# toclocation found.
tocstart = m.start()
if aftertoc:
#{
i = s.find("\n", tocstart)
if i > -1: tocstart = i + 1
#} # if tocstart
s2 = s[:tocstart] + "[TOC]\n" + s[tocstart:]
#} # else toclocation found
#} # if toc
# toc may have been cleared if toclocation not found.
if not toc:
#{
s2 = s
#} # if not toc
#print s2 # debug
#print "-- after s2" # debug
# convert
extensions = [FencedCodeExtension()]
if toc:
#{
extensions.append(TocExtension(title=toc_title))
#}
html = markdown.markdown(s2, extensions=extensions)
try:
#{
fout.write(page_template.format(page_title, html))
#} # try
except UnicodeEncodeError as e:
#{
msg("md2html: UnicodeEncodeError writing output for {}: {} (mode for output file is {})".format(f.name, str(e), fout.mode))
sys.exit(1)
#} except
finally:
#{
fout.close()
#} # finally
#} # main
if __name__ == "__main__":
#{
main(sys.argv[1:])
#} # if __main__
|
{"/setup.py": ["/md2html.py"], "/setupexe.py": ["/md2html.py"]}
|
16,740
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/api/stores/base.py
|
import datetime
class BaseStoreModel:
def __init__(self):
self._data_dict = {}
class BaseProperties:
CreatedAt = 'created_at'
UpdatedAt = 'updated_at'
_reverseMapping = {}
class PropertyNames:
pass
class ReverseMapping:
pass
@property
def datadict(self):
_flat_dict = {}
for key in self._data_dict:
value = self._data_dict.get(key)
if isinstance(value, BaseStoreModel):
_flat_dict[key] = value.datadict
elif isinstance(value, list):
entries = []
for listItem in value:
if isinstance(listItem, BaseStoreModel):
entries.append(listItem.datadict)
else:
entries.append(listItem)
_flat_dict[key] = entries
else:
_flat_dict[key] = value
if not self.BaseProperties.CreatedAt not in _flat_dict:
_flat_dict[self.BaseProperties.CreatedAt]=datetime.datetime.now()
if not self.BaseProperties.UpdatedAt not in _flat_dict:
_flat_dict[self.BaseProperties.UpdatedAt] = datetime.datetime.now()
return _flat_dict
def populate_data_dict(self, dictParam=None):
_flat_dict=dictParam
if self._reverseMapping:
for key in _flat_dict:
reverseMapping = self._reverseMapping.get(key)
value = dictParam[key]
if reverseMapping:
if len(reverseMapping) == 2:
if not isinstance(value, reverseMapping[1]):
value = reverseMapping[1](dictParam[key])
self.set_value(key, value)
elif len(reverseMapping) == 3:
valueList = dictParam[key]
values = []
for value in valueList:
holding_container = reverseMapping[2]()
holding_container.populate_data_dict(value)
values.append(holding_container)
self.set_value(key, values)
else:
self.set_value(key, value)
def __getattr__(self, item):
item_key = [key for key in self._reverseMapping if self._reverseMapping[key][0] ==item]
if item_key:
return self._data_dict.get(item_key[0])
# return super().__g(item)
def __setattr__(self, item, value):
item_key = [key for key in self._reverseMapping if self._reverseMapping[key][0] == item]
if item_key:
self._data_dict[item_key[0]] = value
super().__setattr__(item, value)
# For future
# class BaseApiModel(object):
# def __init__(self):
# self._data_dict = {}
# def populate_data_dict(self, dictParam):
# if not dictParam:
# raise NotImplementedError()
# missing_fields = list(set(list(self._fields)).difference(set(dictParam.keys())))
# if missing_fields:
# raise ValueError(missing_fields)
# for key in dictParam:
# self._data_dict[key] = dictParam.get(key)
#
def get_value(self, key):
if not key:
raise NotImplementedError()
return self._data_dict.get(key)
def set_value(self, key, value):
self._data_dict[key] = value
_fields = ()
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,741
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/api/models/Product.py
|
from tornado.gen import *
from api.core.product import ProductHelper
from api.stores.product import Product
from api.models.base import BaseModel
class ProductModel(BaseModel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._ph = ProductHelper(**kwargs)
@coroutine
def create_product_for_group(self, productDict):
if not productDict:
raise Return((False))
product = Product()
product.Name = productDict.get(product.PropertyNames.Name)
product.ProductCode = productDict.get(product.PropertyNames.ProductCode)
product.ProductId = productDict.get(product.PropertyNames.ProductId)
product.GroupId = productDict.get(product.PropertyNames.GroupId)
product_result = yield self._ph.create_product(product.datadict)
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,742
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/api/models/group.py
|
from api.stores.group import *
from api.stores.user import User, GroupMapping, UserStatus, LinkedAccount, LinkedAccountType
from api.stores.employee import Employee, SupportedRoles, GroupStatus
from tornado.gen import *
from api.core.user import UserHelper
from api.models.base import BaseModel
from api.core.group import GroupHelper
import uuid
import datetime
class GroupModel(BaseModel):
@coroutine
def create_group(self, groupDict, userId=None, type=None):
if not userId:
userId = self._user.UserId
group = Group()
group.Name = groupDict.get(CreateGroupRequestParams.Name)
group.Type = groupDict.get(CreateGroupRequestParams.Type)
group.OwnerId = userId
# membermappings = [self._gh.create_member_mapping(userId, [SupportedRoles.Admin])]
# group.MemberMappings = membermappings
group.set_value(group.PropertyNames.CreatedTimeStamp, datetime.datetime.now())
group.set_value(group.PropertyNames.UpdatedTimeStamp, datetime.datetime.now())
group.Id = uuid.uuid4()
yield self._gh.create_group_for_user(group.datadict)
yield self.create_employee_profile(userId, group.Id, SupportedRoles.Admin)
#
# @coroutine
# def create_employee(self, employeeDict, addedby=None):
# # create member mapping for employee
# employeeId = employeeDict.get('employee')
# membermappings = [self._gh.create_member_mapping(userId, [SupportedRoles.Admin])]
#
# Pharmaceutical distributor is a kind of group that don't have link to Pharmacy company only reverse link
@coroutine
def create_pharmaceutical_distributor(self, **kwargs):
group = self.create_group(kwargs, self._user.UserId, GroupType.PharmaDistributor)
raise Return(group)
@coroutine
def create_employee_profile(self, userId, groupId, role):
employee = Employee()
employee.Id = uuid.uuid4()
employee.UserId = userId
employee.GroupId = groupId
employee.Role = role
if role == SupportedRoles.Admin:
employee.Status = GroupStatus.Joined
else:
employee.Status = GroupStatus.Invited
employee.CreatedTimeStamp = datetime.datetime.utcnow()
employee.UpdatedTimeStamp = datetime.datetime.utcnow()
yield self._uh.save_user(employee.datadict)
# Dummy team for employees
@coroutine
def create_employee_team(self, **kwargs):
group = self.create_group(kwargs, self._user.UserId, GroupType.EmployeeTeam)
raise Return(group)
def populate_group_response(self, group):
returning_dict = {}
returning_dict[GroupResponse.Name] = group.Name
returning_dict[GroupResponse.Id] = str(group.Id)
returning_dict[GroupResponse.MemberCount] = len(group.MemberMappings)
return returning_dict
@coroutine
def create_invited_state_user(self, invitedDict, groupId):
employee = User()
employee.Name = invitedDict.get(CreateEmployeeRequestParams.Name)
employee.Phone = invitedDict.get(CreateEmployeeRequestParams.Phone)
employee.UserId = uuid.uuid4()
account = LinkedAccount()
password = yield self.get_hashed_password(invitedDict.get(CreateEmployeeRequestParams.Password))
account.AccountName = employee.Phone
account.AccountHash = password.get('hash')
account.AccountType = LinkedAccountType.Native
employee.Status = UserStatus.Invited
employee.EmailValidated = False
employee.CreatedTimeStamp = datetime.datetime.now()
employee.UpdatedTimeStamp = datetime.datetime.now()
yield self._uh.save_user(employee.datadict)
raise Return(employee)
@coroutine
def get_groups_where_user_is_owner(self, userId):
if not userId:
userId = self._user.UserId
try:
(groups) = yield self._gh.get_groups_where_user_is_owner(userId)
groupsToReturn = list(map(lambda x: self.populate_group_response(x), groups))
except Exception as e:
raise Return((False, str(e)))
else:
raise Return((True, groupsToReturn))
# @coroutine
# def create_unique_invitation_code(self):
# invitation_codes = yield self._uh.get_all_invitation_codes()
@coroutine
def create_employee(self, groupId, employeeDict):
"""
Employee added by admin.
This method takes input from admin and creates user profile and
creates membermapping for corresponding group.
employeeDict should contain
name,
designation
emailid
:return:
"""
if not isinstance(groupId, uuid.UUID):
groupId = uuid.UUID(groupId)
if not (employeeDict and groupId):
raise NotImplementedError()
employee = yield self.create_invited_state_user(employeeDict, groupId)
yield self.create_employee_profile(employee.UserId, groupId, SupportedRoles.SalesRep)
raise Return({'status':'success'})
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,743
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/app.py
|
import tornado.ioloop
import tornado.web
from urls import urlpatterns
from db.db import Database
from config.config import Settings
from tornado_swirl.swagger import Application, describe
from tornado_swirl import api_routes
from tornado.options import define, options
settings = Settings()
describe(title='UMS API', description='Manages User Operations')
define('mongo_host', default='127.0.0.1:')
class MyApplication(object):
def __init__(self):
self.database = Database()
self.initiateApp()
def initiateApp(self):
app = self.make_app()
app.listen(8888)
def make_app(self):
db = self.database.get_motor_connection()
return tornado.web.Application(api_routes(),
db = db,
cookie_secret=settings.CookieSecret,
debug=settings.Debug,
key_version=settings.KeyVersion,
version=settings.Version,
login_url='api/login'
)
class MainHandler(tornado.web.RequestHandler):
"""This is the main handler"""
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
app = MyApplication()
print("server is running on 8888")
tornado.ioloop.IOLoop.current().start()
io_loop = tornado.ioloop.IOLoop.instance()
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,744
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/api/models/base.py
|
from api.core.user import UserHelper
from api.core.group import GroupHelper
from tornado.gen import coroutine, Return
import bcrypt
class BaseModel(object):
def __init__(self, **kwargs):
if not kwargs.get('db'):
raise ValueError('db should be present')
self._user = None
if kwargs.get('user'):
self._user = kwargs.get('user')
self.db = kwargs.get('db')
if self._user:
self._gh = GroupHelper(**kwargs)
self._uh = UserHelper(**kwargs)
elif self.db:
self._gh = GroupHelper(db=self.db)
self._uh = UserHelper(db=self.db)
@coroutine
def get_hashed_password(self, plain_text_password:str):
if not plain_text_password:
raise NotImplementedError()
raise Return({'hash':bcrypt.hashpw(plain_text_password.encode('utf-8'), bcrypt.gensalt(12))})
@coroutine
def check_hashed_password(self, text_password, hashed_password):
raise Return(bcrypt.checkpw(text_password.encode('utf-8'), hashed_password))
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,745
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/api/stores/product.py
|
from api.stores.base import BaseStoreModel
from bson import ObjectId
class Product(BaseStoreModel):
class PropertyNames:
Name = 'name'
ProductCode = 'pc'
Type = 'type'
SaleScheme = 'ss'
PurchaseScheme = 'ps'
CostPrice = 'cp'
MRP = 'mrp'
Id = '_id'
AvailableQuantity = 'aq'
SalePrice = 'sp'
PurchasePrice = 'pp'
GroupId = 'groupid'
@property
def AvailableQuantity(self):
return self.get_value(self.PropertyNames.AvailableQuantity)
# _reverseMapping={
# '_id':(_id, ObjectId),
# 'pc':(ProductCode, str),
# 'name':('Name', str),
# 'pId':('ProductId', str)
# }
@property
def Name(self):
name = self.get_value(self.PropertyNames.Name)
if name:
return name
@Name.setter
def Name(self, name):
if not name:
raise NotImplementedError()
self.set_value(self.PropertyNames.Name, name)
@property
def GroupId(self):
name = self.get_value(self.PropertyNames.GroupId)
return name
@GroupId.setter
def GroupId(self, groupId):
if not groupId:
raise NotImplementedError()
self.set_value(self.PropertyNames.GroupId, groupId)
@property
def ProductCode(self):
productcode = self.get_value(self.PropertyNames.ProductCode)
return productcode
@ProductCode.setter
def ProductCode(self, productcode):
if not productcode:
raise NotImplementedError()
self.set_value(self.PropertyNames.ProductCode, productcode)
@property
def ProductId(self):
productid = self.get_value(self.PropertyNames.ProductId)
return productid
@ProductId.setter
def ProductId(self, productid):
if not productid:
raise NotImplementedError()
self.set_value(self.PropertyNames.ProductId, productid)
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,746
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/api/stores/group.py
|
from .base import BaseStoreModel
from enum import Enum
from api.stores.product import Product
from bson import ObjectId
from datetime import datetime
import uuid
class CreateEmployeeRequestParams:
Name = 'name'
Designation='designation'
Password = 'password'
EmailId = 'email_id'
Phone = 'phone'
class GroupType(Enum):
Stockist = 'res'
Direct = 'phc'
Doctor = 'phd'
Counter = 'emt'
class GroupResponse:
Name = 'name'
Id = 'id'
MemberCount = 'memberCount'
createdDateTime = 'createdDateTime'
updatedDateTime = 'updatedDateTime'
class MemberMapping(BaseStoreModel):
class PropertyNames:
MemberId = 'member_id'
Designation = "designation"
Roles = 'roles'
Status = 'status'
Shifts = 'shift'
Tasks = 'tasks'
JoinedTimeStamp='jts'
LastUpdatedTimeStamp='lts'
@property
def MemberId(self):
return self.get_value(self.PropertyNames.MemberId)
@MemberId.setter
def MemberId(self, memberId):
if not memberId:
raise NotImplementedError('you must enter memberId')
return self.set_value(self.PropertyNames.MemberId, memberId)
@property
def Designation(self):
return self.get_value(self.PropertyNames.Designation)
@Designation.setter
def Designation(self, title):
if not title:
raise NotImplementedError()
self.set_value(self.PropertyNames.Designation, title)
@property
def Roles(self):
return self.get_value(self.PropertyNames.Roles)
@Roles.setter
def Roles(self, roles):
if not roles:
raise NotImplementedError('you must give roles')
return self.set_value(self.PropertyNames.Roles, roles)
@property
def Status(self):
return self.get_value(self.PropertyNames.Status)
@Status.setter
def Status(self, status):
if not status:
raise NotImplementedError('you must give roles')
return self.set_value(self.PropertyNames.Status, status)
@property
def JoinedTimeStamp(self):
return self.get_value(self.PropertyNames.JoinedTimeStamp)
@JoinedTimeStamp.setter
def JoinedTimeStamp(self, jts):
if not jts:
raise NotImplementedError()
self.set_value(self.PropertyNames.JoinedTimeStamp, jts)
@property
def LastUpdatedTimeStamp(self):
return self.get_value(self.PropertyNames.LastUpdatedTimeStamp)
@LastUpdatedTimeStamp.setter
def LastUpdatedTimeStamp(self, lts):
if not lts:
raise NotImplementedError()
self.set_value(self.PropertyNames.LastUpdatedTimeStamp, lts)
class SubGroupType:
EmployeeGroup = 'emg'
class SubGroup(BaseStoreModel):
class PropertyNames:
Name = 'name'
Type = 'type'
@property
def Name(self):
return self.get_value(self.PropertyNames.Name)
@Name.setter
def Name(self, name):
if not name:
raise NotImplementedError('you must give roles')
return self.set_value(self.PropertyNames.Name, name)
@property
def Type(self):
return self.get_value(self.PropertyNames.Type)
@Type.setter
def Type(self, subgroupType):
if not subgroupType:
raise NotImplementedError('you must give roles')
return self.set_value(self.PropertyNames.Type, subgroupType)
class ProductMapping(BaseStoreModel):
pass
class CreateGroupRequestParams:
Name = 'groupName'
Type = 'groupType'
class Group(BaseStoreModel):
class PropertyNames:
Id = '_id'
Name = 'name'
EmployeeCount = 'employee_count'
OwnerId = 'owner_id'
Type = 'type'
MemberMappings = 'membermappings'
Products = 'products'
CreatedTimeStamp = 'cts'
UpdatedTimeStamp = 'uts'
_reverseMapping = {
'_id': ('Id', uuid.UUID),
'name':('Name', str),
'employee_count': ('EmployeeCount', int),
'owner_id': ('OwnerId', uuid.UUID),
'membermappings': ('MemberMappings', list, MemberMapping),
'products': ('Products', list, Product),
'cts':('CreatedTimeStamp', datetime),
'uts': ('UpdatedTimeStamp', datetime)
}
@property
def Id(self):
return self.get_value(self.PropertyNames.Id)
@Id.setter
def Id(self, id):
if not id:
raise NotImplementedError()
self.set_value(self.PropertyNames.Id, id)
@property
def Name(self):
return self.get_value(self.PropertyNames.Name)
@Name.setter
def Name(self, name):
if not name:
raise NotImplementedError()
self.set_value(self.PropertyNames.Name, name)
@property
def OwnerId(self):
self.get_value(self.PropertyNames.OwnerId)
@OwnerId.setter
def OwnerId(self, ownerid):
if not ownerid:
raise NotImplementedError()
self.set_value(self.PropertyNames.OwnerId, ownerid)
@property
def Type(self):
return self.get_value(self.PropertyNames.Type)
@Type.setter
def Type(self, type):
if not type:
raise NotImplementedError()
self.set_value(self.PropertyNames.Type, type)
@property
def Products(self):
return self.get_value(self.PropertyNames.Products)
@Products.setter
def Products(self, products):
if not products:
raise NotImplementedError()
self.set_value(self.PropertyNames.Products, products)
@property
def MemberMappings(self):
return self.get_value(self.PropertyNames.MemberMappings)
@MemberMappings.setter
def MemberMappings(self, membermappings):
if not membermappings:
raise NotImplementedError()
self.set_value(self.PropertyNames.MemberMappings, membermappings)
# def populate_data_dict(self,dictParam=None):
# self._data_dict = dictParam
# productsList = dictParam.get(self.PropertyNames.Products)
# products = []
# for product in productsList:
# product = Product()
# product.populate_data_dict(product)
# products.append(product)
# self.set_value(self.PropertyNames.Products, products)
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,747
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/tests/test_user_store.py
|
from api.stores.user import User
from tornado.testing import AsyncHTTPTestCase
from app import MyApplication
from tornado.testing import AsyncHTTPClient
from tornado.httputil import HTTPServerRequest
class TestServiceApp(AsyncHTTPTestCase):
def get_app(self):
application = MyApplication()
return application.make_app()
def test_register(self):
testDict = {
'name':'loki',
'phone':'killll',
'email':'saerty@34',
'password':'donega'
}
client = AsyncHTTPClient()
client.fetch()
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,748
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/api/handlers/group.py
|
from tornado.gen import *
from .baseHandler import BaseHandler, BaseApiHandler
from api.models.group import GroupModel
import json
from tornado import web
from bson.json_util import dumps
from tornado_swirl.swagger import schema, restapi
@restapi('/api/user/groups')
class GroupsListHandler(BaseApiHandler):
@web.authenticated
@coroutine
def get(self):
model = GroupModel(user=self._user, db=self.db)
(status, _) = yield model.get_groups_where_user_is_owner(self._user.UserId)
if status:
self.finish(dumps(_))
else:
self.set_status(400, _)
self.finish()
@restapi('/api/group')
class GroupHandler(BaseHandler):
@web.authenticated
@coroutine
def post(self):
user = yield self.current_user
model = GroupModel(user=user, db=self.db)
try:
yield model.create_group(self.args)
except Exception as e:
pass
self.finish(json.dumps({'status': 'success'}))
@restapi('/api/(.*)/group')
class CreateEmloyeeHandler(BaseApiHandler):
@coroutine
def post(self, groupId):
user = yield self.current_user
model = GroupModel(user=user, db=self.db)
# e=''
employee = None
try:
employee = yield model.create_employee(groupId, self.args)
except Exception as e:
self.set_status(400, str(e))
self.finish()
self.write(employee)
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,749
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/api/models/event.py
|
from tornado.gen import *
from api.core.event import EventHelper
from api.models.base import BaseModel
class EventModel(BaseModel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._eh = EventHelper(**kwargs)
@coroutine
def create_event(self, eventDict):
if not eventDict:
raise NotImplementedError()
yield self._eh.create_event(eventDict)
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,750
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/api/models/user.py
|
from tornado.gen import *
import bcrypt
import jwt
import base64
from api.stores.user import User, LinkedAccount, LinkedAccountType, RegisterRequestParams, UserStatus, EmailValidationStatus, NewUserStatus
from api.stores.group import Group, CreateEmployeeRequestParams
from api.models.base import BaseModel
import tornado.ioloop
import uuid
class UserModel(BaseModel):
@coroutine
def check_if_user_exists_with_same_email(self, email):
if not email:
raise NotImplementedError()
user = yield self._uh.getUserByEmail(email)
if user:
raise Return((True, user))
raise Return((False, None))
@coroutine
def check_if_user_exists_with_same_employee_id(self, employee_id):
if not employee_id:
raise NotImplementedError()
user = yield self._uh.getUserByEmployeeId(employee_id)
if user:
raise Return((True, user))
raise Return((False, None))
@coroutine
def create_admin_user(self, postBodyDict):
"""
:param postBodyDict:
password
email
"""
user = User()
user.Name = postBodyDict.get(user.PropertyNames.Name)
user.Phone = postBodyDict.get(user.PropertyNames.Phone)
user.PrimaryEmail = postBodyDict.get(RegisterRequestParams.Email)
# employee_exists = False
# if postBodyDict.get(user.PropertyNames.EmployeeId):
# user.EmployeeId = postBodyDict.get(user.PropertyNames.EmployeeId)
# (employee_exists, _) = yield self.check_if_user_exists_with_same_employee_id(user.EmployeeId)
(user_exists, _) = yield self.check_if_user_exists_with_same_email(user.PrimaryEmail)
if user_exists:
raise Return((False, 'User already exists'))
password = yield self.get_hashed_password(postBodyDict.get(RegisterRequestParams.Password))
linkedaccount = LinkedAccount()
user.UserId = uuid.uuid4()
linkedaccount.AccountName = user.PrimaryEmail
linkedaccount.AccountHash = password.get('hash')
linkedaccount.AccountType = LinkedAccountType.Native
user.LinkedAccounts = [linkedaccount]
user.Status=UserStatus.Registered
user.EmailValidated = EmailValidationStatus.NotValidated
user_result = yield self._uh.save_user(user.datadict)
# user = yield self._uh.getUserByUserId(user_result.inserted_id)
# group = yield self._gh.createDummyGroupForUser(user_result.inserted_id)
# yield self._gh.createGroupMemberMappingForDummyGroup(group.inserted_id, user_result.inserted_id)
raise Return((True, user))
def get_profile(self):
if self._user:
return self._user.datadict
@coroutine
def validate_password(self, postBodyDict):
if postBodyDict['password'] != postBodyDict['password2']:
return {'status':'error', 'message':'you must enter same password'}
@coroutine
def login(self, dict):
username = dict.get('username')
password = dict.get('password')
if not username or not password:
raise Return((False, 'You must enter both fields'))
try:
user = yield self._uh.getUserByEmail(username)
if user:
linkedAccount = user.LinkedAccounts[0]
accounthash = linkedAccount.get(LinkedAccount.PropertyNames.AccountHash)
isvalidPassword = yield self.check_hashed_password(password, accounthash)
if isvalidPassword:
raise Return((True, user))
else:
raise Return((False,'Wrong password'))
else:
raise Return((False, 'user email does not exist'))
except IndexError:
raise Return((False, 'user email does not exist'))
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,751
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/api/stores/user.py
|
from enum import Enum
from .base import BaseStoreModel
import re
from bson import ObjectId
import uuid
import datetime
from tornado_swirl.swagger import schema
class SupportedRoles:
Admin = 'ad'
Member = 'me'
class DisplayRoles:
Employee = 'Employee'
Owner = 'Owner'
class UserStatus:
Invited = 'invited'
Registered = 'registered'
class EmailValidationStatus:
Validated = 'validated'
NotValidated = 'not_validated'
class NewUserStatus:
Yes = 'yes'
No = 'no'
@schema
class RegisterRequestParams:
"""
Properties:
name (str) -- Required Shanmuk
email (email) -- Required
phone (str) -- Required
employeeid (int) -- Required
password (password) -- Required
"""
Name = 'name'
Email = 'email'
Phone = 'phone'
EmployeeId = 'employeeid'
Password = 'password'
@schema
class LoginRequestParams:
"""
Properties:
username (email) -- Required
password (password) -- Required
"""
UserName = 'username'
Password = 'password'
@schema
class SuccessResponse:
"""
Properties:
status (str) -- Required
authToken (str) -- Required
"""
Status = 'status'
AuthToken = 'authToken'
class LinkedAccountType:
Native = 'native'
class StatusType:
Invited = 'invited'
Accepted = 'accepted'
class GroupMapping(BaseStoreModel):
class PropertyNames:
GroupId ='group_id'
Roles = 'roles'
Status = 'status'
Shifts = 'shift'
Tasks = 'tasks'
@property
def GroupId(self):
return self.get_value(self.PropertyNames.GroupId)
@GroupId.setter
def GroupId(self, group_id):
return self.set_value(self.PropertyNames.GroupId, group_id)
@property
def Roles(self):
return self.get_value(self.PropertyNames.Roles)
@property
def Status(self):
return self.get_value(self.PropertyNames.Status)
@Status.setter
def Status(self, status):
if not status:
raise NotImplementedError()
self.set_value(self.PropertyNames.Status, status)
@Roles.setter
def Roles(self, roles):
if not isinstance(roles, list):
raise NotImplementedError()
self.set_value(self.PropertyNames.Roles, roles)
class LinkedAccount(BaseStoreModel):
def __init__(self,accountname=None, accounthash=None, accounttype=None, **kwargs):
super().__init__()
if accountname and accounthash:
self.set_value(self.PropertyNames.AccountName, accountname)
self.set_value(self.PropertyNames.AccountHash, accounthash)
@property
def AccountName(self):
return self.get_value(self.PropertyNames.AccountName)
@AccountName.setter
def AccountName(self, accountname):
if not accountname:
raise NotImplementedError()
self.set_value(self.PropertyNames.AccountName, accountname)
@property
def AccountHash(self):
return self.get_value(self.PropertyNames.AccountHash)
@AccountHash.setter
def AccountHash(self, accounthash):
if not accounthash:
raise NotImplementedError()
self.set_value(self.PropertyNames.AccountHash, accounthash)
@property
def AccountType(self):
return self.get_value(self.PropertyNames.AccountType)
@AccountType.setter
def AccountType(self, accounttype):
if not accounttype:
raise NotImplemented()
class PropertyNames:
AccountName = 'accountname'
AccountHash = 'accounthash'
AccountType = 'accounttype'
AuthToken = 'authtoken'
_reverseMapping = {
'accountname':('AccountName', str),
'accounthash':('AccountHash', str),
'accounttype': ('AccountType', str),
'authtoken': ('AuthToken', str),
}
@property
def AuthToken(self):
return self.get_value(self.PropertyNames.AuthToken)
@AuthToken.setter
def AuthToken(self, auth_token):
return self.set_value(self.PropertyNames.AuthToken, auth_token)
class ReverseMapping:
accountname = 'AccountName'
accounthash = 'AccountHash'
@schema
class ProfileSchema:
"""
Properties:
_id (str) -- Userid
primaryemail (email) -- Email
phone (str) -- Phone
employeeid -- EmployeeId
"""
pass
class User(BaseStoreModel):
'''
This Design assumes All other baseModels are populated and entered into user
'''
@property
def UserId(self):
return self.get_value(self.PropertyNames.UserId)
@UserId.setter
def UserId(self, userId):
if not userId:
raise NotImplementedError()
self.set_value(self.PropertyNames.UserId, userId)
@property
def PrimaryEmail(self):
return self.get_value(self.PropertyNames.PrimaryEmail)
@PrimaryEmail.setter
def PrimaryEmail(self, primaryemail):
if not primaryemail:
raise NotImplementedError()
isValid = re.search('(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)',primaryemail)
if isValid:
self.set_value(self.PropertyNames.PrimaryEmail, primaryemail)
else:
raise ValueError('you must enter valid email')
@property
def LinkedAccounts(self):
return self.get_value(self.PropertyNames.LinkedAccounts)
@LinkedAccounts.setter
def LinkedAccounts(self, linkedAccounts:list):
'''Accepts array argument to be set'''
if not linkedAccounts:
raise NotImplementedError()
self.set_value(self.PropertyNames.LinkedAccounts, linkedAccounts)
@property
def Groups(self):
return self.get_value(self.PropertyNames.Groups)
@Groups.setter
def Groups(self, groups:list):
if not groups:
raise NotImplementedError()
self.set_value(self.PropertyNames.Groups, groups)
@property
def Name(self):
return self.get_value(self.PropertyNames.Name)
@Name.setter
def Name(self, name:str):
if not name:
raise NotImplementedError('you must enter name')
self.set_value(self.PropertyNames.Name, name)
@property
def EmployeeId(self):
return self.get_value(self.PropertyNames.EmployeeId)
@EmployeeId.setter
def EmployeeId(self, employeeId:int):
if not employeeId:
raise NotImplementedError('you must enter employee id')
self.set_value(self.PropertyNames.EmployeeId, employeeId)
@property
def Phone(self):
return self.get_value(self.PropertyNames.Phone)
@Phone.setter
def Phone(self, phone: str):
if not phone:
raise NotImplementedError('you must enter phone')
self.set_value(self.PropertyNames.Phone, phone)
_reverseMapping = {
'_id': ('UserId', uuid.UUID),
'name':('Name', str),
'primaryemail': ('PrimaryEmail', str),
'linkedaccounts': ('LinkedAccounts', list, LinkedAccount),
'groups': ('Groups', list, GroupMapping),
'phone': ('Phone', str),
'employeeid':('EmployeeId', int),
'status':('Status',str),
'emailvalidated':('EmailValidated', bool),
'invitationCode':('InvitationCode', int),
'cts':('CreatedTimeStamp', datetime.datetime),
'uts': ('UpdatedTimeStamp', datetime.datetime)
}
class PropertyNames:
UserId = '_id'
Name = 'name'
PrimaryEmail = 'primaryemail'
LinkedAccounts = 'linkedaccounts'
Groups = 'groups'
Phone = 'phone'
EmployeeId = 'employeeid'
Status='status'
EmailValidated='emailvalidated'
InvitationCode = 'invitationCode'
CreatedTimeStamp = 'cts'
UpdatedTimeStamp = 'uts'
@property
def Status(self):
return self.get_value(self.PropertyNames.Status)
@Status.setter
def Status(self, status):
if not status:
raise NotImplementedError()
self.set_value(self.PropertyNames.Status, status)
@property
def EmailValidated(self):
return self.get_value(self.PropertyNames.EmailValidated)
@EmailValidated.setter
def EmailValidated(self, emv):
if not isinstance(emv, bool):
raise NotImplementedError()
self.set_value(self.PropertyNames.EmailValidated, emv)
@property
def CreatedTimeStamp(self):
return self.get_value(self.PropertyNames.CreatedTimeStamp)
@CreatedTimeStamp.setter
def CreatedTimeStamp(self, jts):
if not jts:
raise NotImplementedError()
self.set_value(self.PropertyNames.CreatedTimeStamp, jts)
@property
def UpdatedTimeStamp(self):
return self.get_value(self.PropertyNames.UpdatedTimeStamp)
@UpdatedTimeStamp.setter
def UpdatedTimeStamp(self, lts):
if not lts:
raise NotImplementedError()
self.set_value(self.PropertyNames.UpdatedTimeStamp, lts)
#
# def populate_data_dict(self,dictParam=None):
# self._data_dict = dictParam
# linkedAccountsList = dictParam.get(self.PropertyNames.LinkedAccounts)
# groupsList = dictParam.get(self.PropertyNames.Groups)
# linkedaccounts = []
# groups = []
# for linkedAccount in linkedAccountsList:
# linkedaccount = LinkedAccount()
# linkedaccount.populate_data_dict(linkedAccount)
# linkedaccounts.append(linkedAccount)
# if groupsList:
# for group in groupsList:
# groupMapping = GroupMapping()
# groupMapping.populate_data_dict(group)
# groups.append(groupMapping)
# self.set_value(self.PropertyNames.LinkedAccounts, linkedaccounts)
# if groupsList:
# self.set_value(self.PropertyNames.Groups, groups)
#
# class UserRegistrationForm(BaseApiModel):
# _fields = ()
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,752
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/api/handlers/user.py
|
from tornado import web
from tornado.gen import *
from .baseHandler import BaseHandler, BaseApiHandler
import simplejson as json
from api.models.user import UserModel
from bson.json_util import dumps
from tornado_swirl.swagger import schema, restapi
@restapi('/api/register')
class RegisterHandler(BaseHandler):
@coroutine
def post(self):
"""Handles registration of user
Handles native user signup and sends authToken back in the
response
Request Body:
user (RegisterRequestParams) -- RegisterRequestParams data.
200 Response:
status (SuccessResponse) -- success
authToken ([jwt]) -- jwtToken
Error Responses:
400 () -- Bad Request
500 () -- Internal Server Error
"""
model = UserModel(db=self.db)
try:
(status, _) = yield model.create_admin_user(self.args)
except Exception as e:
(status, _) = (False, str(e))
if status:
authToken = yield self.authorize(_)
self.write(json.dumps({'status': 'success', 'auth_token': authToken}))
self.finish()
else:
self.set_status(400)
self.write(_)
self.finish()
@restapi('/api/login')
class LoginHandler(BaseHandler):
@coroutine
def post(self):
"""Handles registration of user
Handles native user signup and sends authToken back in the
response
Request Body:
user (LoginRequestParams) -- LoginRequestParams data.
200 Response:
status (SuccessResponse) -- success
authToken ([jwt]) -- jwtToken
Error Responses:
400 () -- Bad Request
500 () -- Internal Server Error
"""
model = UserModel(db=self.db)
(status, _) = yield model.login(self.args)
if status:
authToken = yield self.authorize(_)
self.write(json.dumps({'status': 'success', 'auth_token': authToken}))
else:
self.set_status(400)
self.write(json.dumps(_))
self.finish()
@restapi('/api/profile')
class ProfileHandler(BaseApiHandler):
@web.authenticated
@coroutine
def get(self):
"""
Returns user Profile
HTTP Header:
Authorization (str) -- Required
200 Response:
status (ProfileSchema) -- success
:return:
"""
model = UserModel(user=self._user,db=self.db)
profile = model.get_profile()
self.write(dumps(profile))
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,753
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/urls.py
|
from api.handlers.user import *
from api.handlers.group import GroupsListHandler, GroupHandler, CreateEmloyeeHandler
from api.handlers.product import ProductHandler
from api.handlers.event import EventHandler
urlpatterns = [
# Admin register handler
(r"/api/register$", RegisterHandler),
(r"/api/login$",LoginHandler),
(r"/api/group$", GroupHandler),
(r"/api/profile$", ProfileHandler),
(r"/api/employee$", GroupsListHandler),
(r"/api/product$", ProductHandler),
(r"/api/event$", EventHandler),
(r"/api/groups$", GroupsListHandler),
(r"/api/user/groups$", GroupsListHandler),
(r"/api/(.*)/group", CreateEmloyeeHandler)
]
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,754
|
shanmuk184/SalesCrmbackend
|
refs/heads/master
|
/api/core/group.py
|
from tornado.gen import *
from api.stores.group import Group, MemberMapping
from api.stores.user import SupportedRoles, StatusType
from db.db import QueryConstants
from db.db import Database
class GroupHelper:
def __init__(self,**kwargs):
self._user = kwargs.get('user')
database = Database(kwargs.get('db'))
self.db = database
@coroutine
def createDummyGroupForUser(self, userid):
if not userid:
raise NotImplementedError()
group = Group()
group.Name = 'Dummy Group'
group.Admins = [userid]
group_result = yield self.db.GroupCollection.insert_one(group.datadict)
raise Return(group_result)
@coroutine
def create_group_for_user(self, groupDict:dict):
if not groupDict:
raise Return('error')
group = yield self.db.GroupCollection.insert_one(groupDict)
raise Return(group)
@coroutine
def get_groups_where_user_is_owner(self, userId):
groupCursor = self.db.GroupCollection.find({Group.PropertyNames.OwnerId:userId})
groups = []
while (yield groupCursor.fetch_next):
groupDict = groupCursor.next_object()
group = Group()
group.populate_data_dict(groupDict)
groups.append(group)
raise Return((groups))
def create_member_mapping(self, memberId, roles):
memberMapping = MemberMapping()
memberMapping.MemberId = memberId
memberMapping.Roles = roles
if SupportedRoles.Admin in roles:
memberMapping.Status = StatusType.Accepted
else:
memberMapping.Status = StatusType.Invited
return memberMapping
@coroutine
def insert_member_mapping_into_group(self, groupId, mappingDict):
if not mappingDict or not groupId:
raise NotImplementedError()
yield self.db.GroupCollection.update({'_id':groupId}, {QueryConstants.AddToSet:{Group.PropertyNames.MemberMappings:mappingDict}}, w=1)
@coroutine
def insert_group_mapping_into_user(self, userId, mappingDict):
if not mappingDict or not userId:
raise NotImplementedError()
user_result = yield self.db.UserCollection.update({'_id': userId}, {
QueryConstants.AddToSet: {Group.PropertyNames.MemberMappings: mappingDict}}, w=1)
raise Return(user_result)
|
{"/api/models/Product.py": ["/api/stores/product.py", "/api/models/base.py"], "/api/models/group.py": ["/api/stores/group.py", "/api/stores/user.py", "/api/models/base.py", "/api/core/group.py"], "/app.py": ["/urls.py"], "/api/models/base.py": ["/api/core/group.py"], "/api/stores/product.py": ["/api/stores/base.py"], "/api/stores/group.py": ["/api/stores/base.py", "/api/stores/product.py"], "/tests/test_user_store.py": ["/api/stores/user.py", "/app.py"], "/api/handlers/group.py": ["/api/models/group.py"], "/api/models/event.py": ["/api/models/base.py"], "/api/models/user.py": ["/api/stores/user.py", "/api/stores/group.py", "/api/models/base.py"], "/api/stores/user.py": ["/api/stores/base.py"], "/api/handlers/user.py": ["/api/models/user.py"], "/urls.py": ["/api/handlers/user.py", "/api/handlers/group.py"], "/api/core/group.py": ["/api/stores/group.py", "/api/stores/user.py"]}
|
16,758
|
NuevoIngenioMexicano/DevPlaces
|
refs/heads/master
|
/myApp/models.py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Categorias(models.Model):
#Atributos
nombre = models.CharField(max_length=25)
descripcion = models.CharField(max_length=140)
def __str__(self):
return self.nombre
class Publicacion(models.Model):
#Conexiones a otros Modelos
categoria = models.ForeignKey(Categorias, related_name='categoria_de_la_publicacion')
#Atributos
descripcion = models.CharField(max_length=400)
hora = models.DateField(auto_now_add=True)
lugar = models.CharField(max_length=120)
def __str__(self):
return self.lugar
|
{"/myApp/forms.py": ["/myApp/models.py"], "/myApp/views.py": ["/myApp/forms.py"]}
|
16,759
|
NuevoIngenioMexicano/DevPlaces
|
refs/heads/master
|
/myApp/forms.py
|
from __future__ import unicode_literals
from django.forms import ModelForm
from . import models
from django.utils.translation import ugettext_lazy as _
from django import forms
from .models import Publicacion
class Crear(ModelForm):
class Meta:
model = models.Publicacion
fields = ['lugar','descripcion','categoria']
labels = {
'lugar':_('¿En donde estas?'),
'descripcion':_('¿Que esta pasando?'),
'categoria':_('Clasifica tu Evento')
}
widgets = {
'lugar': forms.TextInput(attrs={'class':'form-control','placeholder':'Lugar'}),
'descripcion': forms.TextInput(attrs={'class':'form-control','placeholder':'Descripcion'}),
'categoria': forms.Select(attrs={'class':'form-control','placeholder':'Categoria'} )
}
|
{"/myApp/forms.py": ["/myApp/models.py"], "/myApp/views.py": ["/myApp/forms.py"]}
|
16,760
|
NuevoIngenioMexicano/DevPlaces
|
refs/heads/master
|
/myApp/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-24 12:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Categorias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=25)),
('descripcion', models.CharField(max_length=140)),
],
),
migrations.CreateModel(
name='Publicacion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion', models.CharField(max_length=400)),
('hora', models.DateField(auto_now_add=True)),
('lugar', models.CharField(max_length=120)),
('categoria', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='categoria_de_la_publicacion', to='myApp.Categorias')),
],
),
]
|
{"/myApp/forms.py": ["/myApp/models.py"], "/myApp/views.py": ["/myApp/forms.py"]}
|
16,761
|
NuevoIngenioMexicano/DevPlaces
|
refs/heads/master
|
/myApp/views.py
|
from django.shortcuts import render
from . import models
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from .forms import Crear
#Create your views here.
def crear(request):
template_name = 'index2.html'
todo = models.Publicacion.objects.all()
context = {
'todo':todo
}
return render(request, template_name, context)
@csrf_exempt
def ver(request):
template_name = 'crear2.html'
form = Crear()
context = {
'form':form
}
if request.method == 'POST':
form = Crear(request.POST)
if form.is_valid:
form.save()
return render(request, template_name, context)
def detalle(request, pk):
template_name = 'detalle.html'
detalle = models.Publicacion.objects.get(pk = pk)
context = {
'detalle':detalle
}
return render(request, template_name, context)
|
{"/myApp/forms.py": ["/myApp/models.py"], "/myApp/views.py": ["/myApp/forms.py"]}
|
16,762
|
red23495/random_util
|
refs/heads/master
|
/Python/Algebra/test/test_binary_exponentiation.py
|
import unittest
from ..binary_exponentiation import mod_pow
class BinaryExponentiationTest(unittest.TestCase):
def test_calculates_power_properly(self):
self.assertEqual(mod_pow(2, 2), 4)
self.assertEqual(mod_pow(1, 10), 1)
self.assertEqual(mod_pow(10, 5), 100000)
def test_calculates_properly_for_big_result(self):
self.assertEqual(mod_pow(10, 100000), 10**100000)
def test_calculates_zero_when_base_is_zero(self):
self.assertEqual(mod_pow(0, 10), 0)
def test_calculates_one_when_power_is_zero(self):
self.assertEqual(mod_pow(200, 0), 1)
def test_calculates_positive_when_base_negative_and_pow_even(self):
self.assertEqual(mod_pow(-2, 4), 16)
def test_calculates_negative_when_base_negative_and_pow_odd(self):
self.assertEqual(mod_pow(-2, 3), -8)
def test_mods_result_properly_when_mod_given(self):
self.assertEqual(mod_pow(2, 2, 1), 0)
self.assertEqual(mod_pow(1, 10, 17), 1)
self.assertEqual(mod_pow(10, 5, 15), 10)
self.assertEqual(mod_pow(10, 5, 100001), 100000)
|
{"/Python/Algebra/test/test_binary_exponentiation.py": ["/Python/Algebra/binary_exponentiation.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.