index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
10,604
|
fferri/geometric_patterns
|
refs/heads/master
|
/video7.py
|
from common import *
imgsz=(1280,800)
r,a=meshgrid_polar(imgsz)
def draw(t=0, **kwargs):
h=t*math.pi*2/100
im=3*(3*np.log(1+r))+5*np.sin(a*8+16*np.log(1+r))
im=apply_colormap(im,colormap.rainbow2(h))
return im
if __name__ == '__main__':
for t in range(100):
print('rendering frame %08d...'%t)
im=draw(t)
imsave(im,'video7-%08d.png'%t)
|
{"/video8.py": ["/common.py"], "/video4.py": ["/common.py"], "/p04.py": ["/common.py"], "/p13.py": ["/common.py"], "/video5.py": ["/common.py"], "/p20.py": ["/common.py"], "/p01.py": ["/common.py"], "/p10.py": ["/common.py"], "/p24.py": ["/common.py"], "/p09.py": ["/common.py"], "/video8b.py": ["/common.py"], "/p16.py": ["/common.py"], "/video6.py": ["/common.py"], "/p19.py": ["/common.py"], "/p22.py": ["/common.py"], "/video.py": ["/common.py"], "/video9.py": ["/common.py"], "/p03.py": ["/common.py"], "/p18.py": ["/common.py"], "/video3.py": ["/common.py"], "/p02.py": ["/common.py"], "/p17.py": ["/common.py"], "/video8a.py": ["/common.py"], "/p15.py": ["/common.py"], "/p12.py": ["/common.py"], "/p14.py": ["/common.py"], "/video2.py": ["/common.py"], "/p11.py": ["/common.py"], "/p05.py": ["/common.py"], "/p25.py": ["/common.py"], "/p21.py": ["/common.py"], "/p23.py": ["/common.py"], "/video7.py": ["/common.py"]}
|
10,623
|
Zearin/bibtexml2
|
refs/heads/master
|
/setup.py
|
# -*- coding: utf-8 -*-
'''Convert bibTeX files to XML! Built on Pygments.
Useful for manipulating bibTeX data as XML with XML toolsets.
If you don't like something about bibtexml2, it's built with Pygments--so
you have its mature, widespread ecosystem at your disposal to tweak
whatever you want.
'''
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from textwrap import dedent
##---------------------------------------------------------------
__name__ = 'bibtexml2'
__version__ = '0.2'
__author__ = 'Zearin'
__author_email__ = 'zearin@users.sourceforge.net'
__description__ = __doc__.splitlines()[0]
##---------------------------------------------------------------
config = {
##
## OVERALL
##---------------------------------------------------------------
'name': __name__,
'version': __version__,
'description': __description__,
'long_description': __doc__,
##
## PEOPLE
##---------------------------------------------------------------
'author': __author__,
'author_email': __author_email__,
##
## METADATA
##---------------------------------------------------------------
'license': 'MIT',
'keywords': 'bibtex xml conversion pygments',
'classifiers': [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Environment :: Plugins',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Text Processing',
'Topic :: Utilities',
],
##
## URLS
##---------------------------------------------------------------
#'url': 'URL to get it at.',
#'download_url': 'Where to download it.',
##
## TECHNICAL
##---------------------------------------------------------------
'packages': [__name__],
'install_requires': ['docopt', 'pygments'],
'setup_requires': ['docopt', 'pygments'],
'tests_require': ['pyvows>=2.0.4'],
'entry_points': {
'pygments.lexers': 'bibtex = bibtexml2.lexer:BibtexLexer',
'pygments.formatters': 'bibtex = bibtexml2.formatter:BibTeXML',
'console_scripts': 'bibtexml2 = bibtexml2.__main__:main'
},
#'scripts': [],
}
setup(**config)
|
{"/tests/lexer_vows.py": ["/bibtexml2/__init__.py"], "/bibtexml2/__main__.py": ["/bibtexml2/__init__.py"]}
|
10,624
|
Zearin/bibtexml2
|
refs/heads/master
|
/bibtexml2/lexer.py
|
# -*- coding: utf-8 -*-
'''FIXME: <<DocString>>
'''
# Based on spec summary at
# http://artis.imag.fr/~Xavier.Decoret/resources/xdkbibtex/bibtex_summary.html
#--------------------------------------------------------------------
## Imports
#--------------------------------------------------------------------
### STDLIB
from __future__ import (
absolute_import,
with_statement,
print_function,)
### External
from pygments.lexer import (
RegexLexer,
bygroups,
include,)
from pygments.token import (
Text,
Comment,
Keyword,
String,
Number,
Operator,
Other,
Punctuation,
Literal,
Whitespace,
Name,)
#--------------------------------------------------------------------
## Variables
#--------------------------------------------------------------------
PUBTYPES = frozenset((
'article',
'book',
'booklet',
'conference',
'inbook',
'incollection',
'inproceedings',
'manual',
'mastersthesis',
'misc',
'phdthesis',
'proceedings',
'techreport',
'unpublished'
))
'''Official bibTeX publication types.'''
### TODO: Tokenize as Keyword.Reserved
_pubtypes_re_string = r'|'.join(PUBTYPES)
FIELDS = frozenset((
'address',
'annote',
'author',
'booktitle',
'chapter',
'crossref',
'edition',
'editor',
'eprint',
'howpublished',
'institution',
'journal',
'key',
'month',
'note',
'number',
'organization',
'pages',
'publisher',
'school',
'series',
'title',
'type',
'url',
'volume',
'year',
))
'''Standard bibTeX fields. (Does not include non-standard fields.)'''
### TODO: Tokenize these as Name.Constant
MONTH_ABBR = ('jan' ,'feb' ,'mar' ,'apr',
'may' ,'jun' ,'jul' ,'aug',
'sep' ,'oct' ,'nov' ,'dec')
'''Predefined bibTeX "variables" for the months of the year,
which resolve to the month's full name.
'''
_month_abbr_re_string = '|'.join(MONTH_ABBR)
#--------------------------------------------------------------------
## Classes
#--------------------------------------------------------------------
class BibtexLexer(RegexLexer):
'''This class is a modification of the 'BibtexLexer' class from the module
'bibtex-pygments-lexer' (version 0.0.1), originally authored by Marco D. Adelfio.
I couldn't find a repository for the module anywhere, so I modified it according
to my needs.
'''
### TODO: Change '=' type from Token.Text to Operator
name = 'BibTeX'
aliases = ['bibtex', 'bib', 'bibtexml']
filenames = ['*.bib']
tokens = {
'root': [
include('whitespace'),
include('@nonentries'),
include('@entries'),
include('raw_comment'),
],
'whitespace': [
(r'\s+', Whitespace)
],
'bracket': [
(r'[^}{]+', String.Double),
(r'{', Punctuation, '#push'),
(r'}', Punctuation, '#pop'),
],
'raw_comment': [
(r'.*\n', Comment)
],
'@entries': [
(r'(?i)(@(?:' + _pubtypes_re_string + r'))\s*({)',
bygroups(
Keyword.Reserved,
Punctuation),
'@entry'
),
],
'@nonentries': [
# non-comment @declarations
(r'(?i)(@(?:string|preamble))\s*({)',
bygroups(
Keyword.Declaration,
Punctuation),
'field'),
(r'(?i)(@(?:comment))\s*({)',
bygroups(
Keyword.Declaration,
Punctuation),
'@comment'), # like 'bracket', but contents tokenized as Comment instead
(r'(?i)(@[^(' + _pubtypes_re_string + '){]+)\s*({)',
bygroups(
Keyword,
Punctuation),
'@entry'
),
],
'@comment': [
(r'[^}{]+', Comment),
(r'{', Punctuation, '#push'),
(r'}', Punctuation, '#pop'),
],
'@entry': [
include('whitespace'),
(r'(?i)([^, ]*)\s*(\,)',
bygroups(
Name.Label,
Punctuation),
'field_multi'
),
],
'field_multi': [
include('whitespace'),
(r'}', Punctuation, '#pop:2'), # pop back to root
(r'(?i)([^}=\s]*)\s*(=)',
bygroups(
Name.Attribute,
Operator),
'value_multi'
),
(r'[^}]+\n', Text),
],
'field': [
include('whitespace'),
(r'}', Punctuation, '#pop'), # pop back to root
(r'(?i)([^}=\s]*)\s*(=)',
bygroups(
Name.Label,
Operator),
'value_single'
),
(r'[^}]+\n', Text),
],
'value': [
include('whitespace'),
(r'-?(0|[1-9]\d*)', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'{', Punctuation, 'bracket'),
(r'[^,}{]+', Text),
],
'value_multi': [
include('value'),
(r',', Punctuation, '#pop'), # pop back to field_multi
(r'}', Punctuation, '#pop:3'), # pop back to root
],
'value_single': [
include('value'),
(r'}', Punctuation, '#pop:2'), # pop back to root
],
}
|
{"/tests/lexer_vows.py": ["/bibtexml2/__init__.py"], "/bibtexml2/__main__.py": ["/bibtexml2/__init__.py"]}
|
10,625
|
Zearin/bibtexml2
|
refs/heads/master
|
/bibtexml2/filter.py
|
# -*- coding: utf-8 -*-
'''Pygments-style filters.'''
## StdLib
from __future__ import (
absolute_import,
with_statement,
print_function,)
## External
from pygments.util import get_bool_opt
from pygments.token import Name
from pygments.filter import (
Filter, # class-based filters
simplefilter, # decorator-based filters
)
@simplefilter
def token_types(lexer, stream, options):
for ttype, value in stream:
if ttype in options['ttypes']:
yield ttype, value
@simplefilter
def lowercase_entries(lexer, stream, options):
for ttype, value in stream:
pass # TODO
@simplefilter
def lowercase_fields(lexer, stream, options):
for ttype, value in stream:
pass # TODO
@simplefilter
def expand_month_abbrs(lexer, stream, options):
months = {
'jan': 'January',
'feb': 'February',
'mar': 'March',
'apr': 'April',
'may': 'May',
'jun': 'June',
'jul': 'July',
'aug': 'August',
'sep': 'September',
'oct': 'October',
'nov': 'November',
'dec': 'December',}
for ttype, value in stream:
if ttype is Token.Text and value in months.keys():
value = months[value]
yield ttype, value
@simplefilter
def drop_whitespace(lexer, stream, options):
for ttype, value in stream:
if ttype is not Token.Text.Whitespace:
yield ttype, value
@simplefilter
def drop_punctuation(lexer, stream, options):
for ttype, value in stream:
if ttype is not Token.Punctuation:
yield ttype, value
|
{"/tests/lexer_vows.py": ["/bibtexml2/__init__.py"], "/bibtexml2/__main__.py": ["/bibtexml2/__init__.py"]}
|
10,626
|
Zearin/bibtexml2
|
refs/heads/master
|
/tests/lexer_vows.py
|
# -*- coding: utf-8 -*-
## Generated by PyVows v2.0.4 (2013/07/19)
## http://pyvows.org
#--------------------------------------------------------------------
## Imports
#--------------------------------------------------------------------
### Standard Library
from __future__ import (
absolute_import,
with_statement,
print_function,)
import os
from os import path
from os.path import (
abspath,
basename,
dirname,)
from pprint import pprint
import sys
### Third Party
import pygments
from pygments.token import *
from pygments.filters import (
KeywordCaseFilter,
TokenMergeFilter,
RaiseOnErrorTokenFilter,)
import six
### PyVows Testing
from pyvows import (Vows, expect)
## Local Imports
# imported below...
#--------------------------------------------------------------------
## Variables
#--------------------------------------------------------------------
TEST_PATH = abspath(dirname(__file__))
MOD_PATH = abspath(path.join(TEST_PATH, '../'))
TESTDATA_PATH = abspath(path.join(TEST_PATH, 'examples'))
try:
# Import the file directly above this one
# (i.e., don’t use similar modules found in PYTHONPATH)
_syspath = sys.path[:]
sys.path.insert(0, MOD_PATH)
import bibtexml2
from bibtexml2 import lexer
sys.path = _syspath[:]
del _syspath
except ImportError as err:
print(err)
sys.exit(err)
FILES = set((f for f in os.listdir(TESTDATA_PATH)
if f != 'testcases.bib')) # causes weird encoding errors; fix later
FILES = set((path.join(TESTDATA_PATH, f) for f in FILES))
LEXER = lexer.BibtexLexer()
LEXER.add_filter( RaiseOnErrorTokenFilter() )
#--------------------------------------------------------------------
## Custom Contexts
#--------------------------------------------------------------------
def token_context(token_types):
class Context(Vows.Context):
def topic(self, parent_topic):
if parent_topic[0] in frozenset(token_types):
yield parent_topic
return Context
#--------------------------------------------------------------------
## Tests
#--------------------------------------------------------------------
@Vows.batch
class FilesToLex(Vows.Context):
# first, rule out any dumb file errors
def topic(self):
for f in FILES:
yield f
def test_files_exist(self, topic):
expect(topic).to_be_a_file()
class WhenLexed(Vows.Context):
def topic(self, parent_topic):
with open(parent_topic, 'r') as f:
code = ''.join( f.readlines() )
for item in pygments.lex(code, LEXER):
yield item # 2-tuple of TokenType, TokenValue
##
## Ignored; use pygments RaiseOnErrorFilter
##
# def we_get_no_lexer_errors(self, topic):
# expect(topic).not_to_be_an_error()
# expect(topic[0]).not_to_equal(Token.Error)
class WhitespaceTokens(token_context( (Token.Text.Whitespace,) )):
def only_contain_whitespace(self, topic):
expect(topic[1]).to_match(r'\s+')
class EntriesAndFields(token_context( (Token.Keyword.Reserved,
Token.Name.Attribute) )):
def contain_no_whitespace(self, topic):
pre_strip, post_strip = topic[1], topic[1].strip()
expect(topic[1]).not_to_match(r'\s+')
expect(pre_strip == post_strip).to_be_true()
class Entries(token_context( (Token.Keyword.Reserved, ) )):
def have_valid_token_values(self, topic):
pubtype = topic[1].lower()
pubtype = pubtype.lstrip('@')
expect(pubtype in lexer.PUBTYPES).to_be_true()
|
{"/tests/lexer_vows.py": ["/bibtexml2/__init__.py"], "/bibtexml2/__main__.py": ["/bibtexml2/__init__.py"]}
|
10,627
|
Zearin/bibtexml2
|
refs/heads/master
|
/bibtexml2/utils.py
|
# -*- coding: utf-8 -*-
'''Miscellaneous BibTeX utilties.'''
## STDLIB
from __future__ import (
absolute_import,
with_statement,
print_function,
)
import io
## External
import six
## Local
#------------------------------------------------------------------------------
def _indeces_that_match(sequence, re_object):
'''Given `sequence`, returns a list of indeces where
`sequence[index]` matches `re_object`.
'''
indeces = []
for idx, line in enumerate(sequence):
if re_object.search(line):
indeces.append(idx)
return indeces
def _break_list(sequence, indeces):
'''Breaks sequence into a list containing tuples.
Each tuple contains a slice of `sequence`, calculated
using each pair of values in `indeces`.
'''
results = []
for idx, item in enumerate(indeces):
start = indeces[idx]
try: stop = indeces[idx+1]
except IndexError: stop = None
results.append( tuple(sequence[start:stop]) )
return results
#------------------------------------------------------------------------------
def open(file):
'''Opens a bibtex `file` and returns its contents as a string.
Uses `readlines()` internally.
'''
try:
lines = None
# open the file as *bytes*:
# - lots of TeX stuff is written in ASCII,
# - this function shouldn't alter the text
# - any unicode translation is up to other functions
with io.open(file, 'rb') as f:
lines = f.readlines()
return lines
except Exception as e:
# Just adds a little extra message with whitespace to make errors easier to spot
from textwrap import dedent
message = '''
ERROR OPENING BIBTEX FILE: {file}
'''.format(file=file)
print(dedent(message))
raise e
|
{"/tests/lexer_vows.py": ["/bibtexml2/__init__.py"], "/bibtexml2/__main__.py": ["/bibtexml2/__init__.py"]}
|
10,628
|
Zearin/bibtexml2
|
refs/heads/master
|
/bibtexml2/formatter.py
|
# -*- coding: utf-8 -*-
'''Pygments formatter. Converts bibTeX to XML.'''
#----------------------------------------------------------------------
# Imports
#----------------------------------------------------------------------
## StdLib
from __future__ import (
absolute_import,
with_statement,
print_function,)
import itertools
## External
from pygments.formatter import Formatter
from pygments.token import (
Text,
Comment,
Keyword,
String,
Number,
Operator,
Other,
Punctuation,
Literal,
Whitespace,
Name,)
import xmlwitch
#----------------------------------------------------------------------
# Variables
#----------------------------------------------------------------------
DTD = '<!DOCTYPE bibtex:file PUBLIC "-//BibTeXML//DTD XML for BibTeX v1.0//EN" "bibtexml.dtd">\n'
XML = xmlwitch.Builder(version='1.0', encoding='utf-8')
XMLNS = 'http://bibtexml.sf.net/'
SKIPPABLE_TTYPES = frozenset([Text.Whitespace, Whitespace, Operator, Punctuation])
#----------------------------------------------------------------------
# Functions
#----------------------------------------------------------------------
def _is_entry(token_tuple):
ttype, value = token_tuple[0], token_tuple[1]
return ttype == Keyword.Reserved # and str(value).startswith('@')
def _write_entry(entry_token_stream):
try:
assert( entry_token_stream[0][0] is Keyword.Reserved ) # sanity check! be sure we have an entry
except AssertionError:
import sys
sys.exit(entry_token_stream[0])
entrytype = entry_token_stream[0][1]
entrytype = entrytype.lstrip('@').lower()
entrylabel = (entry_token_stream[1][0] is Name.Label) and entry_token_stream[1][1] or None
if entrylabel:
with XML.bibtex__entry( id=entrylabel ):
with XML['bibtex__{0}'.format(entrytype)]:
_write_fields( entry_token_stream[2:] )
else:
with XML.bibtex__entry():
with XML['bibtex__{0}'.format(entrytype)]:
_write_fields( entry_token_stream[1:] )
def _write_fields(field_token_list):
field_indeces = sorted([idx for idx, item in enumerate(field_token_list) if item[0] is Name.Attribute])
field_indeces = tuple(field_indeces)
is_field = lambda x: x[0] is not Name.Attribute
for idx, item in enumerate(field_token_list):
ttype, value = item[0], item[1].lower().strip(r' \'"}{')
if ttype is Name.Attribute:
fieldname = value
metaidx = field_indeces.index(idx)
start = idx+1
stop = 1 + metaidx
value = ''.join([ item[1] for item in
itertools.takewhile(
is_field,
field_token_list[idx+1:])
])
XML[ 'bibtex__{0}'.format(fieldname) ]( value )
def _entries(token_seq, indeces):
# returns a list where each item is an entry
# (a slice of `token_seq`)
entries = []
for idx, i in enumerate(indeces):
start = indeces[idx]
try:
stop = indeces[idx + 1]
except IndexError:
stop = None
finally:
entries.append( token_seq[start:stop] )
return entries
#----------------------------------------------------------------------
# Classes
#----------------------------------------------------------------------
class BibTeXML(Formatter):
'''Formats a bibTeX token-stream to XML output.
Output (should) be valid according to the output from the original
BibTeXML project (hosted on SourceForge.net).
'''
name = 'BibTeXML'
aliases = ['bibtexml', 'bibteXML', 'bibxml']
def __init__(self, **options):
super(BibTeXML, self).__init__(**options)
def format(self, tokensource, outfile):
# need to be able to look ahead
token_seq = tuple([i for i in tokensource if i[0] not in SKIPPABLE_TTYPES])
# mark where entries occur
entry_indeces = tuple([idx for idx, item in enumerate(token_seq) if _is_entry(item)])
# build list of sub-iterators
entries = _entries(token_seq, entry_indeces)
# begin XML document
XML.write(DTD)
with XML.bibtex__file(xmlns__bibtex=XMLNS):
idx = 0
# make sure we've captured entries
if len(entry_indeces):
# write anything that occurs before the first entry
in_comment = False
for idx, item in enumerate(token_seq):
if idx > entry_indeces[0]:
break
ttype, value = item[0], item[1]
next_ttype = token_seq[idx+1][0]
if ttype is Comment:
if not in_comment:
in_comment = True
XML.write_indented('\n<!--\n')
XML._indentation += 1
XML.write_indented(value.strip('\r\n'))
continue
elif in_comment:
XML.write_indented('\n-->')
XML._indentation -= 1
continue
# write the entries
for idx, entry in enumerate( entries ):
_write_entry(entry)
outfile.write(str(XML))
|
{"/tests/lexer_vows.py": ["/bibtexml2/__init__.py"], "/bibtexml2/__main__.py": ["/bibtexml2/__init__.py"]}
|
10,629
|
Zearin/bibtexml2
|
refs/heads/master
|
/bibtexml2/__init__.py
|
# -*- coding: utf-8 -*-
'''Convert bibTeX files to XML! Built on Pygments.
Useful for manipulating bibTeX data as XML with XML toolsets.
If you don't like something about bibtexml2, it's built with Pygments--so
you have its mature, widespread ecosystem at your disposal to tweak
whatever you want.
'''
## STDLIB
from __future__ import (
absolute_import,
with_statement,
print_function,)
## Local
from bibtexml2 import (
lexer,
utils)
|
{"/tests/lexer_vows.py": ["/bibtexml2/__init__.py"], "/bibtexml2/__main__.py": ["/bibtexml2/__init__.py"]}
|
10,630
|
Zearin/bibtexml2
|
refs/heads/master
|
/bibtexml2/__main__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''{program[human_format]}
Usage: {program[cli_format]} [options] <file>...
Options:
-X, --no-xml Output bibTeX instead of XML
'''
#--------------------------------------------------------------------
## Imports
#--------------------------------------------------------------------
## StdLib
from __future__ import (
absolute_import,
with_statement,
print_function,)
import sys
## External
from docopt import docopt
import pygments
from pygments.filters import (
KeywordCaseFilter,
TokenMergeFilter,
RaiseOnErrorTokenFilter,)
from pygments.token import *
## Internal
from bibtexml2 import (
lexer,
utils,)
#--------------------------------------------------------------------
## Variables
#--------------------------------------------------------------------
BibtexLexer = lexer.BibtexLexer
docstring_format_dict = {
'human_format': 'BibTeXML2',
'cli_format' : 'bibtexml2',
}
#--------------------------------------------------------------------
## __main__
#--------------------------------------------------------------------
def main():
arguments = docopt(
__doc__.format( program=docstring_format_dict ),
version= '{docstring_format_dict["human_format"]} 2.0',
options_first= True
)
lexer = BibtexLexer()
lexer.add_filter( RaiseOnErrorTokenFilter() )
#lexer.add_filter( TokenMergeFilter() )
lexer.add_filter( KeywordCaseFilter(case='lower') )
for f in arguments['<file>']:
# get bibtex source
code = None
with open(f, 'r') as f:
code = ''.join( f.readlines() )
# NOW LEX SEE CODE!
for idx, item in enumerate(pygments.lex(code, lexer)):
tokentype, tokenvalue = item[0], item[1]
# if tokentype in frozenset([Token.Text.Whitespace, Token.Punctuation]):
# continue
print( "{0:>5}\t{1[0]!s:<25}\t{1[1]!r}".format(idx, item),
file=sys.stdout )
if __name__ == '__main__':
try:
main()
except pygments.filters.ErrorToken as e:
sys.exit(e)
|
{"/tests/lexer_vows.py": ["/bibtexml2/__init__.py"], "/bibtexml2/__main__.py": ["/bibtexml2/__init__.py"]}
|
10,631
|
ranmengyuan/PredictFlow
|
refs/heads/master
|
/analyze/forecast.py
|
from dataBase.sql_helper import conn_db, exe_query, conn_close
from sklearn.linear_model import Lasso
import random
# import numpy as np
# import matplotlib.pyplot as plt
# import time
# from sklearn.metrics import r2_score
def get_data():
"""
从数据库中获得数据
:return:
"""
x_train = []
y_train = []
x_test = []
try:
conn = conn_db()
cur = conn.cursor()
for i in range(2000):
sql = "SELECT shop_id, location_id, per_pay, shop_level FROM ShopInfo WHERE shop_id='" + str(i + 1) + "'"
shop_infos = exe_query(cur, sql)
infos = shop_infos
for m in range(14):
for info in infos:
temp = []
temp.append(int(info[0]))
temp.append(int(info[1]))
temp.append(int(info[2]))
temp.append(int(info[3]))
temp.append(int(m + 489))
x_test.append(temp)
for j in range(489):
for shop_info in shop_infos:
temp = []
temp.append(int(shop_info[0]))
temp.append(int(shop_info[1]))
temp.append(int(shop_info[2]))
temp.append(int(shop_info[3]))
pay = [0, 0]
# view = 0
sql = "SELECT date,sum FROM UserPay WHERE shop_id='" + str(i + 1) + "' AND date='" + str(j) + "'"
user_pays = exe_query(cur, sql)
for user_pay in user_pays:
pay[0] += int(user_pay[0])
pay[1] += int(user_pay[1])
temp.append(int(pay[0]))
x_train.append(temp)
y_train.append(int(pay[1]))
# sql = "SELECT date,sum FROM UserPay WHERE shop_id='" + str(i + 1) + "' AND date='" + str(j) + "'"
# user_views = exe_query(cur, sql)
# for user_view in user_views:
# view += int(user_view[0])
# x_train[5].append(int(view))
# print(shop_info[0], user_pay[0])
except Exception as e:
print(e)
finally:
# conn_close(conn, cur)
return x_train, y_train, x_test
def lasso():
# Lasso 回归的参数
alpha = 0.1
lasso = Lasso(max_iter=10000, alpha=alpha)
# 基于训练数据,得到的模型的测试结果
# 这里使用的是坐标轴下降算法(coordinate descent)
x_train, y_train, x_test = get_data()
print(len(x_train), len(y_train), len(x_test))
y_pred_lasso = lasso.fit(x_train, y_train).predict(x_test)
i = 0
n = 1
try:
f = open("prediction.csv", "w+")
li = str(n) + ","
for result in y_pred_lasso:
ran = random.randint(-20, 20) # 随机误差
if i == 13:
i = 0
li += str(int(result+ran)) + "\n"
f.writelines(li)
n += 1
li = str(n) + ","
else:
i += 1
li += str(int(result+ran)) + ","
except Exception as e:
print(e)
finally:
f.close()
# 这里是R2可决系数(coefficient of determination)
# 回归平方和(RSS)在总变差(TSS)中所占的比重称为可决系数
# 可决系数可以作为综合度量回归模型对样本观测值拟合优度的度量指标。
# 可决系数越大,说明在总变差中由模型作出了解释的部分占的比重越大,模型拟合优度越好。
# 反之可决系数小,说明模型对样本观测值的拟合程度越差。
# R2可决系数最好的效果是1。
# r2_score_lasso = r2_score(y_test, y_pred_lasso)
#
# print("测试集上的R2可决系数 : %f" % r2_score_lasso)
#
# plt.plot(lasso.coef_, label='Lasso coefficients')
#
# plt.plot(coef, '--', label='original coefficients')
#
# plt.legend(loc='best')
#
# plt.show()
|
{"/analyze/forecast.py": ["/dataBase/sql_helper.py"], "/file/file_processing.py": ["/dataBase/sql_helper.py"], "/manage.py": ["/analyze/forecast.py"]}
|
10,632
|
ranmengyuan/PredictFlow
|
refs/heads/master
|
/file/file_processing.py
|
from dataBase.sql_helper import conn_db, exe_table, exe_update, conn_close
def read_by_line(address):
"""
带缓存的文件读取一行数据
:param address:
:return:
"""
file = open(address)
file_content = []
while 1:
lines = file.readlines(100000)
if not lines:
break
for line in lines:
file_content.append(line)
return file_content
def shop_info_database(file_content):
"""
将文本文件处理并存入数据库中
:param file_content:
:return:
"""
try:
conn = conn_db()
cur = conn.cursor()
sql = "DROP TABLE if EXISTS ShopInfo"
exe_table(cur, sql)
sql = "CREATE TABLE ShopInfo(shop_id INT NOT NULL AUTO_INCREMENT,city_name VARCHAR (255),location_id INT ," \
"per_pay INT ,score INT ,comment_cnt INT ,shop_level INT ,cate_1_name VARCHAR (255) ,cate_2_name" \
" VARCHAR (255) ,cate_3_name VARCHAR (255) ,PRIMARY KEY (shop_id)) ENGINE = InnoDB DEFAULT CHARSET = UTF8"
exe_table(cur, sql)
for i in range(0, len(file_content)):
contents = file_content[i].split(",")
if len(contents) == 10:
sql = "INSERT INTO ShopInfo VALUES ('" + contents[0] + "','" + contents[1] + "','" + contents[2] + "'" \
",'" + \
contents[3] + "','" + contents[4] + "','" + contents[5] + "','" + contents[6] + "'," \
"'" + contents[
7] + "','" + contents[8] + "','" + contents[9] + "')"
elif len(contents) == 9:
sql = "INSERT INTO ShopInfo VALUES ('" + contents[0] + "','" + contents[1] + "','" + contents[2] + "'" \
",'" + \
contents[3] + "','" + contents[4] + "','" + contents[5] + "','" + contents[6] + "'," \
"'" + contents[
7] + "','" + contents[8] + "',NULL)"
exe_update(conn, cur, sql)
except Exception as e:
print(e)
print(contents[0])
finally:
conn_close(conn, cur)
def get_user_pay(address):
"""
获得用户的购物信息并存入数据库
:param address:
:return:
"""
try:
conn = conn_db()
cur = conn.cursor()
sql = "DROP TABLE if EXISTS UserPay"
exe_table(cur, sql)
sql = "CREATE TABLE UserPay(id INT NOT NULL AUTO_INCREMENT,shop_id INT NOT NULL ,date INT,sum INT ," \
"PRIMARY KEY (id) ,FOREIGN KEY (shop_id) REFERENCES ShopInfo(shop_id)) " \
"ENGINE = InnoDB DEFAULT CHARSET = UTF8"
exe_table(cur, sql)
file = open(address)
shop = ''
num = []
for i in range(489):
num.append(0)
while 1:
lines = file.readlines(10000)
if not lines:
user_pay_database(conn, cur, num, shop)
break
for line in lines:
file_content = line.split(",")
if shop == '':
shop = file_content[1]
dates = file_content[2].split(" ")
num[cal_day(dates[0])] += 1
elif shop == file_content[1]:
dates = file_content[2].split(" ")
num[cal_day(dates[0])] += 1
elif shop != file_content[1]:
user_pay_database(conn, cur, num, shop)
shop = file_content[1]
for i in range(489):
num[i] = 0
dates = file_content[2].split(" ")
num[cal_day(dates[0])] += 1
except Exception as e:
print(e)
finally:
conn_close(conn, cur)
def cal_day(date):
"""
计算与2015年7月01日相差的天数
:param date:
:return:
"""
day1 = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
day2 = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
start = 0
for i in range(6):
start += day1[i]
deal_date = date.split("-")
num = 0
if deal_date[0] == "2015":
for i in range(int(deal_date[1]) - 1):
num += day1[i]
num += int(deal_date[2]) - 1
return num - start
elif deal_date[0] == "2016":
for i in range(int(deal_date[1]) - 1):
num += day2[i]
num += int(deal_date[2]) - 1
return 365 - start + num
def user_pay_database(conn, cur, num, shop):
"""
将用户购买信息存入数据库
:param conn:
:param cur:
:param num:
:param shop:
:return:
"""
try:
for i in range(489):
sql = "INSERT INTO UserPay(shop_id ,date ,sum) VALUES ('" + shop + "','" + str(i) + "','" + str(
num[i]) + "')"
exe_update(conn, cur, sql)
except Exception as e:
print(e)
print(shop, i, num[i])
def get_user_view(address):
"""
获得用户的浏览信息并存入数据库
:param address:
:return:
"""
try:
conn = conn_db()
cur = conn.cursor()
sql = "DROP TABLE if EXISTS UserView"
exe_table(cur, sql)
sql = "CREATE TABLE UserView(id INT NOT NULL AUTO_INCREMENT,shop_id INT NOT NULL ,date INT,sum INT ," \
"PRIMARY KEY (id) ,FOREIGN KEY (shop_id) REFERENCES ShopInfo(shop_id)) " \
"ENGINE = InnoDB DEFAULT CHARSET = UTF8"
exe_table(cur, sql)
file = open(address)
shop = ''
num = []
for i in range(489):
num.append(0)
while 1:
lines = file.readlines(10000)
if not lines:
user_view_database(conn, cur, num, shop)
break
for line in lines:
file_content = line.split(",")
if shop == '':
shop = file_content[1]
dates = file_content[2].split(" ")
num[cal_day(dates[0])] += 1
elif shop == file_content[1]:
dates = file_content[2].split(" ")
num[cal_day(dates[0])] += 1
elif shop != file_content[1]:
user_view_database(conn, cur, num, shop)
shop = file_content[1]
for i in range(489):
num[i] = 0
dates = file_content[2].split(" ")
num[cal_day(dates[0])] += 1
except Exception as e:
print(e)
finally:
conn_close(conn, cur)
def user_view_database(conn, cur, num, shop):
"""
将用户浏览信息存入数据库
:param conn:
:param cur:
:param num:
:param shop:
:return:
"""
try:
for i in range(489):
sql = "INSERT INTO UserView(shop_id ,date ,sum) VALUES ('" + shop + "','" + str(i) + "','" + str(
num[i]) + "')"
exe_update(conn, cur, sql)
except Exception as e:
print(e)
print(shop, i, num[i])
|
{"/analyze/forecast.py": ["/dataBase/sql_helper.py"], "/file/file_processing.py": ["/dataBase/sql_helper.py"], "/manage.py": ["/analyze/forecast.py"]}
|
10,633
|
ranmengyuan/PredictFlow
|
refs/heads/master
|
/bean/shop_info.py
|
class ShopInfo:
"""
商家信息
"""
shop_id = 0 # 商家id
city_name = '' # 市名
location_id = 0 # 所在位置编号,位置接近的商家具有相同的编号
per_pay = 0 # 人均消费(数值越大消费越高)
score = 0 # 评分(数值越大评分越高)
comment_cnt = 0 # 评论数(数值越大评论数越多)
shop_level = 0 # 门店等级(数值越大门店等级越高)
cate_1_name = '' # 一级品类名称
cate_2_name = '' # 二级分类名称
cate_3_name = '' # 三级分类名称
def __init__(self):
"""
构造函数
:return:
"""
self.shop_id = 0
self.city_name = ''
self.location_id = 0
self.per_pay = 0
self.score = 0
self.comment_cnt = 0
self.shop_level = 0
self.cate_1_name = ''
self.cate_2_name = ''
self.cate_3_name = ''
def __init__(self, shop_id, city_name, location_id, per_pay, score, comment_cnt, shop_level, cate_1_name,
cate_2_name, cate_3_name):
"""
构造函数重置
:param shop_id:
:param city_name:
:param location_id:
:param per_pay:
:param score:
:param comment_cnt:
:param shop_level:
:param cate_1_name:
:param cate_2_name:
:param cate_3_name:
:return:
"""
self.shop_id = shop_id
self.city_name = city_name
self.location_id = location_id
self.per_pay = per_pay
self.score = score
self.comment_cnt = comment_cnt
self.shop_level = shop_level
self.cate_1_name = cate_1_name
self.cate_2_name = cate_2_name
self.cate_3_name = cate_3_name
|
{"/analyze/forecast.py": ["/dataBase/sql_helper.py"], "/file/file_processing.py": ["/dataBase/sql_helper.py"], "/manage.py": ["/analyze/forecast.py"]}
|
10,634
|
ranmengyuan/PredictFlow
|
refs/heads/master
|
/manage.py
|
# from file.file_processing import read_by_line, shop_info_database, get_user_pay, get_user_view
from analyze.forecast import lasso
if __name__ == "__main__":
# file_content = read_by_line("//Volumes//Transcend//文件//实验室//口碑商家流量预测//dataset//shop_info.txt")
# shop_info_database(file_content)
# get_user_pay("//Volumes//Transcend//文件//实验室//口碑商家流量预测//dataset//user_pay.txt")
# get_user_view("//Volumes//Transcend//文件//实验室//口碑商家流量预测//dataset//user_view.txt")
lasso()
|
{"/analyze/forecast.py": ["/dataBase/sql_helper.py"], "/file/file_processing.py": ["/dataBase/sql_helper.py"], "/manage.py": ["/analyze/forecast.py"]}
|
10,635
|
ranmengyuan/PredictFlow
|
refs/heads/master
|
/bean/user.py
|
class User:
"""
用户行为
"""
shop_id = 0 # 商家id,与shop_info对应
date = 0 # 距离2015年07月01号的天数
sum = 0 # 用户当天某种行为的次数
def __init__(self):
"""
构造函数
:return:
"""
self.shop_id = 0
self.date = 0
self.sum = 0
def __init__(self, shop_id, date, sum):
"""
构造函数重置
:param shop_id:
:param date:
:param sum:
:return:
"""
self.shop_id = shop_id
self.date = date
self.sum = sum
|
{"/analyze/forecast.py": ["/dataBase/sql_helper.py"], "/file/file_processing.py": ["/dataBase/sql_helper.py"], "/manage.py": ["/analyze/forecast.py"]}
|
10,636
|
ranmengyuan/PredictFlow
|
refs/heads/master
|
/dataBase/sql_helper.py
|
import pymysql
# from builtins import int
def conn_db():
"""
连接数据库
"""
conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='B221gt12345', db='Tianchi', charset='UTF8')
return conn
def exe_table(cur, sql):
"""
创建表格或删除
:param cur:
:param sql:
:return:
"""
sta = cur.execute(sql)
return sta
def exe_update(conn, cur, sql):
"""
更新或插入操作或删除操作
:param conn
:param cur
:param sql
"""
sta = cur.execute(sql)
# "delete from exe where Id=%d" % (int(eachID))
conn.commit()
return sta
def exe_query(cur, sql):
"""
查找操作
:param cur
:param sql
"""
cur.execute(sql)
return cur
def conn_close(conn, cur):
"""
关闭连接,释放资源
:param conn
:param cur
"""
cur.close()
conn.close()
|
{"/analyze/forecast.py": ["/dataBase/sql_helper.py"], "/file/file_processing.py": ["/dataBase/sql_helper.py"], "/manage.py": ["/analyze/forecast.py"]}
|
10,638
|
artab-sccc/SCCC-ITC172
|
refs/heads/master
|
/fitnessappproject/fitnessapp/admin.py
|
from django.contrib import admin
from .models import weightstat, Workout, Meals
admin.site.register(weightstat)
admin.site.register(Workout)
admin.site.register(Meals)
|
{"/fitnessappproject/fitnessapp/admin.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/views.py": ["/fitnessappproject/fitnessapp/models.py", "/fitnessappproject/fitnessapp/forms.py"], "/pythonclubapp/views.py": ["/pythonclubapp/models.py"], "/fitnessappproject/fitnessapp/forms.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/tests.py": ["/fitnessappproject/fitnessapp/models.py"], "/pythonclubapp/tests.py": ["/pythonclubapp/models.py", "/pythonclubapp/views.py"]}
|
10,639
|
artab-sccc/SCCC-ITC172
|
refs/heads/master
|
/fitnessappproject/fitnessapp/models.py
|
from django.db import models
from django.contrib.auth.models import User
class weightstat(models.Model):
weight=models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
height=models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
weightdate=models.DateField()
def int(self):
return self.weight
class Meta:
db_table='weightstat'
verbose_name_plural='weightstats'
class Workout(models.Model):
currentweight=models.ForeignKey(weightstat, on_delete=models.DO_NOTHING)
workoutname=models.CharField(max_length=255)
minutes=models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
reps=models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
sets=models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
workoutdate=models.DateField()
def __str__(self):
return self.workoutname
class Meta:
db_table='workout'
verbose_name_plural='workouts'
class Meals(models.Model):
currentweight=models.ForeignKey(weightstat, on_delete=models.DO_NOTHING)
mealname=models.CharField(max_length=255)
mealcalories=models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
mealcarbs=models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
mealprotien=models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
mealfats=models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)
mealdate=models.DateField()
def __str__(self):
return self.mealname
class Meta:
db_table='meal'
verbose_name_plural='meals'
|
{"/fitnessappproject/fitnessapp/admin.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/views.py": ["/fitnessappproject/fitnessapp/models.py", "/fitnessappproject/fitnessapp/forms.py"], "/pythonclubapp/views.py": ["/pythonclubapp/models.py"], "/fitnessappproject/fitnessapp/forms.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/tests.py": ["/fitnessappproject/fitnessapp/models.py"], "/pythonclubapp/tests.py": ["/pythonclubapp/models.py", "/pythonclubapp/views.py"]}
|
10,640
|
artab-sccc/SCCC-ITC172
|
refs/heads/master
|
/fitnessappproject/fitnessapp/migrations/0001_initial.py
|
# Generated by Django 3.0.5 on 2020-06-16 21:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='WeightStat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('weight', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('height', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
],
options={
'verbose_name_plural': 'weightstats',
'db_table': 'weightstat',
},
),
migrations.CreateModel(
name='Workout',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('workoutname', models.CharField(max_length=255)),
('minutes', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('reps', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('sets', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('workoutdate', models.DateField()),
('currentweight', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='fitnessapp.WeightStat')),
],
options={
'verbose_name_plural': 'workouts',
'db_table': 'workout',
},
),
migrations.CreateModel(
name='Meals',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mealname', models.CharField(max_length=255)),
('mealcalories', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('mealcarbs', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('mealprotien', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('mealfats', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('mealdate', models.DateField()),
('currentweight', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='fitnessapp.WeightStat')),
],
options={
'verbose_name_plural': 'meals',
'db_table': 'meal',
},
),
]
|
{"/fitnessappproject/fitnessapp/admin.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/views.py": ["/fitnessappproject/fitnessapp/models.py", "/fitnessappproject/fitnessapp/forms.py"], "/pythonclubapp/views.py": ["/pythonclubapp/models.py"], "/fitnessappproject/fitnessapp/forms.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/tests.py": ["/fitnessappproject/fitnessapp/models.py"], "/pythonclubapp/tests.py": ["/pythonclubapp/models.py", "/pythonclubapp/views.py"]}
|
10,641
|
artab-sccc/SCCC-ITC172
|
refs/heads/master
|
/fitnessappproject/fitnessapp/migrations/0004_auto_20200616_1509.py
|
# Generated by Django 3.0.5 on 2020-06-16 22:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fitnessapp', '0003_auto_20200616_1508'),
]
operations = [
migrations.AlterField(
model_name='meals',
name='currentweight',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='fitnessapp.weightstat'),
),
migrations.AlterField(
model_name='workout',
name='currentweight',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='fitnessapp.weightstat'),
),
]
|
{"/fitnessappproject/fitnessapp/admin.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/views.py": ["/fitnessappproject/fitnessapp/models.py", "/fitnessappproject/fitnessapp/forms.py"], "/pythonclubapp/views.py": ["/pythonclubapp/models.py"], "/fitnessappproject/fitnessapp/forms.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/tests.py": ["/fitnessappproject/fitnessapp/models.py"], "/pythonclubapp/tests.py": ["/pythonclubapp/models.py", "/pythonclubapp/views.py"]}
|
10,642
|
artab-sccc/SCCC-ITC172
|
refs/heads/master
|
/fitnessappproject/fitnessapp/views.py
|
from django.shortcuts import render
from .models import weightstat, Workout, Meals
from. forms import WorkoutForm, WeightForm, MealForm
from django.contrib.auth.decorators import login_required
def index (request):
return render(request, 'fitnessapp/index.html')
def getweight(request):
type_list=weightstat.objects.all()
return render(request, 'fitnessapp/weight.html', {'type_list' : type_list})
def getworkout(request):
type_list=Workout.objects.all()
return render(request, 'fitnessapp/workout.html', {'type_list' : type_list})
def getmeals(request):
type_list=Meals.objects.all()
return render(request, 'fitnessapp/meals.html', {'type_list' : type_list})
@login_required
def newWorkout(request):
form=WorkoutForm
if request.method=='POST':
form=WorkoutForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=WorkoutForm()
else:
form=WorkoutForm()
return render(request, 'fitnessapp/newworkout.html', {'form': form})
@login_required
def newWeight(request):
form=WeightForm
if request.method=='POST':
form=WeightForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=WeightForm()
else:
form=WeightForm()
return render(request, 'fitnessapp/newweight.html', {'form': form})
@login_required
def newMeal(request):
form=MealForm
if request.method=='POST':
form=MealForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=MealForm()
else:
form=MealForm()
return render(request, 'fitnessapp/newmeal.html', {'form': form})
def loginmessage(request):
return render(request, 'fitnessapp/loginmessage.html')
def logoutmessage(request):
return render(request, 'fitnessapp/logoutmessage.html')
|
{"/fitnessappproject/fitnessapp/admin.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/views.py": ["/fitnessappproject/fitnessapp/models.py", "/fitnessappproject/fitnessapp/forms.py"], "/pythonclubapp/views.py": ["/pythonclubapp/models.py"], "/fitnessappproject/fitnessapp/forms.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/tests.py": ["/fitnessappproject/fitnessapp/models.py"], "/pythonclubapp/tests.py": ["/pythonclubapp/models.py", "/pythonclubapp/views.py"]}
|
10,643
|
artab-sccc/SCCC-ITC172
|
refs/heads/master
|
/pythonclubapp/models.py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class meeting(models.Model):
meetingtitle=models.CharField(max_length=255)
meetingdate=models.DateField()
meetingtime=models.DecimalField(max_digits=4, decimal_places=0)
meetinglocation=models.CharField(max_length=255)
meetingagenda=models.TextField()
def __str__(self):
return self.meetingtitle
class Meta():
db_table='title'
verbose_name_plural='titles'
class meetingminutes(models.Model):
meetingid=models.ForeignKey(meeting,on_delete=models.DO_NOTHING)
attendance=models.ManyToManyField(User)
minutestext=models.TextField()
def __str__(self):
return self.minutestext
class Meta():
db_table='meetingminutes'
verbose_name_plural='meetingminutes'
class resource(models.Model):
resourcename=models.CharField(max_length=255)
resourcetype=models.CharField(max_length=255)
resourceurl=models.URLField(null=True, blank=True)
dateentered=models.DateField()
userid=models.ForeignKey(User,on_delete=models.DO_NOTHING)
description=models.TextField()
def __str__(self):
return self.resourcename
class Meta():
db_table='resource'
verbose_name_plural='resoruces'
class event(models.Model):
eventtitle=models.CharField(max_length=255)
location=models.CharField(max_length=255)
eventdate=models.DateField()
eventtime=models.DecimalField(max_digits=4, decimal_places=0)
eventdescription=models.TextField()
eventuserid=models.ForeignKey(User,on_delete=models.DO_NOTHING)
def __str__(self):
return self.eventtitle
class Meta():
db_table='event'
verbose_name_plural='events'
|
{"/fitnessappproject/fitnessapp/admin.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/views.py": ["/fitnessappproject/fitnessapp/models.py", "/fitnessappproject/fitnessapp/forms.py"], "/pythonclubapp/views.py": ["/pythonclubapp/models.py"], "/fitnessappproject/fitnessapp/forms.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/tests.py": ["/fitnessappproject/fitnessapp/models.py"], "/pythonclubapp/tests.py": ["/pythonclubapp/models.py", "/pythonclubapp/views.py"]}
|
10,644
|
artab-sccc/SCCC-ITC172
|
refs/heads/master
|
/pythonclubapp/views.py
|
from django.shortcuts import render, get_object_or_404
from .models import meeting, meetingminutes, resource, event
from .forms import ResourceForm, MeetingForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
return render(request, 'pythonclubapp/index.html')
def getresources(request):
type_list=resource.objects.all()
return render(request, 'pythonclubapp/resources.html', {'type_list' : type_list})
def getmeetings(request):
type_list=meeting.objects.all()
return render(request, 'pythonclubapp/meetings.html', {'type_list' : type_list})
def meetingdetails(request, id):
meet=get_object_or_404(meeting, pk=id)
location=meet.meetinglocation
context={
'meet' : meet,
'location' : location,
}
return render(request, 'pythonclubapp/meetingdetails.html', context=context)
@login_required
def newResource(request):
form=ResourceForm
if request.method=='POST':
form=ResourceForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=ResourceForm()
else:
form=ResourceForm()
return render(request, 'pythonclubapp/newresource.html', {'form': form})
@login_required
def newMeeting(request):
form=MeetingForm
if request.method=='POST':
form=MeetingForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=MeetingForm()
else:
form=MeetingForm()
return render(request, 'pythonclubapp/newmeeting.html', {'form': form})
def loginmessage(request):
return render(request, 'pythonclubapp/loginmessage.html')
def logoutmessage(request):
return render(request, 'pythonclubapp/logoutmessage.html')
|
{"/fitnessappproject/fitnessapp/admin.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/views.py": ["/fitnessappproject/fitnessapp/models.py", "/fitnessappproject/fitnessapp/forms.py"], "/pythonclubapp/views.py": ["/pythonclubapp/models.py"], "/fitnessappproject/fitnessapp/forms.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/tests.py": ["/fitnessappproject/fitnessapp/models.py"], "/pythonclubapp/tests.py": ["/pythonclubapp/models.py", "/pythonclubapp/views.py"]}
|
10,645
|
artab-sccc/SCCC-ITC172
|
refs/heads/master
|
/fitnessappproject/fitnessapp/forms.py
|
from django import forms
from .models import weightstat, Workout, Meals
class WorkoutForm(forms.ModelForm):
class Meta:
model=Workout
fields='__all__'
class WeightForm(forms.ModelForm):
class Meta:
model=weightstat
fields='__all__'
class MealForm(forms.ModelForm):
class Meta:
model=Meals
fields='__all__'
|
{"/fitnessappproject/fitnessapp/admin.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/views.py": ["/fitnessappproject/fitnessapp/models.py", "/fitnessappproject/fitnessapp/forms.py"], "/pythonclubapp/views.py": ["/pythonclubapp/models.py"], "/fitnessappproject/fitnessapp/forms.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/tests.py": ["/fitnessappproject/fitnessapp/models.py"], "/pythonclubapp/tests.py": ["/pythonclubapp/models.py", "/pythonclubapp/views.py"]}
|
10,646
|
artab-sccc/SCCC-ITC172
|
refs/heads/master
|
/fitnessappproject/fitnessapp/tests.py
|
from django.test import TestCase
from .models import weightstat, Workout, Meals
class MealsTest(TestCase):
def test_string(self):
name=Meals(mealname="Cookies")
self.assertEqual(str(name), name.mealname)
class WorkoutTest(TestCase):
def test_string(self):
name=Workout(workoutname="Push Ups")
self.assertEqual(str(name), name.workoutname)
|
{"/fitnessappproject/fitnessapp/admin.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/views.py": ["/fitnessappproject/fitnessapp/models.py", "/fitnessappproject/fitnessapp/forms.py"], "/pythonclubapp/views.py": ["/pythonclubapp/models.py"], "/fitnessappproject/fitnessapp/forms.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/tests.py": ["/fitnessappproject/fitnessapp/models.py"], "/pythonclubapp/tests.py": ["/pythonclubapp/models.py", "/pythonclubapp/views.py"]}
|
10,647
|
artab-sccc/SCCC-ITC172
|
refs/heads/master
|
/fitnessappproject/fitnessapp/urls.py
|
from django.urls import path, include
from . import views
urlpatterns=[
path('', views.index, name='index'),
path('getweight/', views.getweight, name='weight'),
path('getworkout/', views.getworkout, name='workout'),
path('getmeals/', views.getmeals, name='meals'),
path('newWorkout', views.newWorkout, name='newworkout'),
path('newWeight', views.newWeight, name='newweight'),
path('newMeal', views.newMeal, name='newmeal'),
path('accounts/', include('django.contrib.auth.urls')),
path('loginmessage', views.loginmessage, name='loginmessage'),
path('logoutmessage', views.logoutmessage, name='logoutmessage'),
]
|
{"/fitnessappproject/fitnessapp/admin.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/views.py": ["/fitnessappproject/fitnessapp/models.py", "/fitnessappproject/fitnessapp/forms.py"], "/pythonclubapp/views.py": ["/pythonclubapp/models.py"], "/fitnessappproject/fitnessapp/forms.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/tests.py": ["/fitnessappproject/fitnessapp/models.py"], "/pythonclubapp/tests.py": ["/pythonclubapp/models.py", "/pythonclubapp/views.py"]}
|
10,648
|
artab-sccc/SCCC-ITC172
|
refs/heads/master
|
/pythonclubapp/tests.py
|
from django.test import TestCase
from django.urls import reverse
from .models import meeting, meetingminutes, resource, event
from .views import newResource, getresources
from django.contrib.auth.models import User
# Tests the 'meeting' model
class MeetingTitleTest(TestCase):
def test_string(self):
meet=meeting(meetingtitle="Meeting 1")
self.assertEqual(str(meet), meet.meetingtitle)
def test_table(self):
self.assertEqual(str(meeting._meta.db_table), 'title')
# Tests the 'meetingminutes' model
class MeetingMinutesTest(TestCase):
def test_string(self):
meet=meetingminutes(minutestext="Minutes Text")
self.assertEqual(str(meet), meet.minutestext)
def test_table(self):
self.assertEqual(str(meetingminutes._meta.db_table), 'meetingminutes')
# Tests the 'resource' model
class ResourceTest(TestCase):
def test_string(self):
res=resource(resourcename="Resource 1")
self.assertEqual(str(res), res.resourcename)
def test_table(self):
self.assertEqual(str(resource._meta.db_table), 'resource')
# Tests the 'event' model
class EventTest(TestCase):
def test_string(self):
even=event(eventtitle="Event 1")
self.assertEqual(str(even), even.eventtitle)
def test_table(self):
self.assertEqual(str(event._meta.db_table), 'event')
# Tests a logged in user can access the Resource form
class ResourceFormTest(TestCase):
def test_view(self):
self.test_user=User.objects.create_user(username='testuser1', password='P@ssw0rd1')
login=self.client.login(username='testuser1', password='P@ssw0rd1')
response = self.client.get(reverse('newresource'))
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertEqual(response.status_code, 200) #tests view
self.assertTemplateUsed(response, 'pythonclubapp/newresource.html') #tests template
|
{"/fitnessappproject/fitnessapp/admin.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/views.py": ["/fitnessappproject/fitnessapp/models.py", "/fitnessappproject/fitnessapp/forms.py"], "/pythonclubapp/views.py": ["/pythonclubapp/models.py"], "/fitnessappproject/fitnessapp/forms.py": ["/fitnessappproject/fitnessapp/models.py"], "/fitnessappproject/fitnessapp/tests.py": ["/fitnessappproject/fitnessapp/models.py"], "/pythonclubapp/tests.py": ["/pythonclubapp/models.py", "/pythonclubapp/views.py"]}
|
10,651
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/cogs/cmds/fun.py
|
from urllib.parse import quote as urlquote
from discord.ext import commands
import discord
import random
import typing
import util.cj as cj
class Fun(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.d = bot.d
self.k = bot.k
async def lang_convert(self, msg, lang):
keys = list(lang)
for key in keys:
msg = msg.replace(key, lang.get(key))
try:
msg = msg.replace(key.upper(), lang.get(key).upper())
except Exception:
pass
msg = discord.utils.escape_markdown(msg)
if len(msg) > 2000 - 6:
return
else:
return msg
async def nice(self, ctx):
cmd_len = len(f"{ctx.prefix}{ctx.invoked_with} ")
return ctx.message.clean_content[cmd_len:]
@commands.command(name="meme", aliases=["meemee", "meem", "maymay", "mehmeh"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def meme(self, ctx):
"""Sends a meme from reddit"""
do_nsfw = False
if isinstance(ctx.channel, discord.TextChannel):
do_nsfw = ctx.channel.is_nsfw()
meme = {"nsfw": True, "spoiler": True}
async with ctx.typing():
while meme["spoiler"] or (not do_nsfw and meme["nsfw"]) or meme.get("image") is None:
resp = await self.bot.aiohttp.get(
"https://api.iapetus11.me/reddit/gimme/meme+memes+me_irl+dankmemes+wholesomememes+prequelmemes",
headers={"Authorization": self.k.vb_api},
)
meme = cj.classify(await resp.json())
embed = discord.Embed(color=self.d.cc, title=meme.title[:256], url=meme.permalink)
embed.set_footer(
text=f"{meme.upvotes} | u/{meme.author}",
icon_url=self.bot.get_emoji(int(self.d.emojis.updoot.split(":")[-1].replace(">", ""))).url,
)
embed.set_image(url=meme.image)
await ctx.send(embed=embed)
@commands.command(name="4chan", aliases=["greentext"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def greentext(self, ctx):
"""Sends a greentext from r/greentext"""
do_nsfw = False
if isinstance(ctx.channel, discord.TextChannel):
do_nsfw = ctx.channel.is_nsfw()
jj = {"nsfw": True}
async with ctx.typing():
while (not do_nsfw and jj["nsfw"]) or jj.get("image") is None:
resp = await self.bot.aiohttp.get(
"https://api.iapetus11.me/reddit/gimme/4chan+greentext", headers={"Authorization": self.k.vb_api}
)
jj = await resp.json()
embed = discord.Embed(color=self.d.cc)
embed.set_image(url=jj["image"])
await ctx.send(embed=embed)
@commands.command(name="comic")
@commands.cooldown(1, 2, commands.BucketType.user)
async def comic(self, ctx):
"""Sends a comic from r/comics"""
do_nsfw = False
if isinstance(ctx.channel, discord.TextChannel):
do_nsfw = ctx.channel.is_nsfw()
comic = {"nsfw": True, "spoiler": True}
async with ctx.typing():
while comic["spoiler"] or (not do_nsfw and comic["nsfw"]) or comic.get("image") is None:
resp = await self.bot.aiohttp.get(
"https://api.iapetus11.me/reddit/gimme/comics",
headers={"Authorization": self.k.vb_api},
)
comic = cj.classify(await resp.json())
embed = discord.Embed(color=self.d.cc, title=comic.title[:256], url=comic.permalink)
embed.set_footer(
text=f"{comic.upvotes} | u/{comic.author}",
icon_url=self.bot.get_emoji(int(self.d.emojis.updoot.split(":")[-1].replace(">", ""))).url,
)
embed.set_image(url=comic.image)
await ctx.send(embed=embed)
@commands.command(name="cursed", aliases=["cursedmc"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def cursed_mc(self, ctx):
if random.choice((True, False)):
meme = {"nsfw": True, "spoiler": True}
async with ctx.typing():
while meme["spoiler"] or meme["nsfw"] or meme.get("image") is None:
resp = await self.bot.aiohttp.get(
"https://api.iapetus11.me/reddit/gimme/CursedMinecraft",
headers={"Authorization": self.k.vb_api},
)
meme = cj.classify(await resp.json())
embed = discord.Embed(color=self.d.cc, title=meme.title[:256], url=meme.permalink)
embed.set_footer(
text=f"{meme.upvotes} | u/{meme.author}",
icon_url=self.bot.get_emoji(int(self.d.emojis.updoot.split(":")[-1].replace(">", ""))).url,
)
embed.set_image(url=meme.image)
await ctx.send(embed=embed)
else:
embed = discord.Embed(color=self.d.cc)
embed.set_image(url=f"https://iapetus11.me/static/images/cursed_minecraft/{random.choice(self.d.cursed_images)}")
await ctx.send(embed=embed)
@commands.command(name="say")
async def say_text(self, ctx, *, _text):
"""Sends whatever is put into the command"""
try:
await ctx.message.delete()
except Exception:
pass
await ctx.send(await self.nice(ctx))
@commands.command(name="villagerspeak")
async def villager_speak(self, ctx, *, msg):
"""Turns the given text into Minecraft villager sounds as text"""
translated = await self.lang_convert(await self.nice(ctx), self.d.fun_langs["villager"])
if translated is None:
await self.bot.send(ctx, ctx.l.fun.too_long)
else:
await ctx.send(translated)
@commands.command(name="enchant")
async def enchant_lang(self, ctx, *, msg):
"""Turns regular text into the Minecraft enchantment table language"""
translated = await self.lang_convert((await self.nice(ctx)).lower(), self.d.fun_langs["enchant"])
if translated is None:
await self.bot.send(ctx, ctx.l.fun.too_long)
else:
await ctx.send(translated)
@commands.command(name="unenchant")
async def unenchant_lang(self, ctx, *, msg):
"""Turns the Minecraft enchantment table language back into regular text"""
translated = await self.lang_convert(await self.nice(ctx), self.d.fun_langs["unenchant"])
if translated is None:
await self.bot.send(ctx, ctx.l.fun.too_long)
else:
await ctx.send(translated)
@commands.command(name="vaporwave")
async def vaporwave_text(self, ctx, *, msg):
"""Turns regular text into vaporwave text"""
translated = await self.lang_convert(await self.nice(ctx), self.d.fun_langs["vaporwave"])
if translated is None:
await self.bot.send(ctx, ctx.l.fun.too_long)
else:
await ctx.send(translated)
@commands.command(name="sarcastic")
async def sarcastic_text(self, ctx, *, msg):
"""Turns regular text into "sarcastic" text from spongebob"""
msg = await self.nice(ctx)
if len(msg) > 2000:
await self.bot.send(ctx, ctx.l.fun.too_long)
return
caps = True
sarcastic = ""
for letter in msg:
if not letter == " ":
caps = not caps
if caps:
sarcastic += letter.upper()
else:
sarcastic += letter.lower()
await ctx.send(sarcastic)
@commands.command(name="clap")
async def clap_cheeks(self, ctx, *, text):
"""Puts the :clap: emoji between words"""
clapped = ":clap: " + " :clap: ".join((await self.nice(ctx)).split(" ")) + " :clap:"
if len(clapped) > 2000:
await self.bot.send(ctx, ctx.l.fun.too_long)
return
await ctx.send(clapped)
@commands.command(name="emojify")
async def emojifi_text(self, ctx, *, _text):
"""Turns text into emojis"""
abcdefg_someone_shouldve_told_ya_not_to_fuck_with_me = "abcdefghijklmnopqrstuvwxyz"
text = ""
for letter in (await self.nice(ctx)).lower():
if letter in abcdefg_someone_shouldve_told_ya_not_to_fuck_with_me:
text += f":regional_indicator_{letter}: "
else:
text += self.d.emojified.get(letter, letter) + " "
if len(text) > 2000:
await self.bot.send(ctx, ctx.l.fun.too_long)
else:
await ctx.send(text)
@commands.command(name="owo", aliases=["owofy"])
async def owofy_text(self, ctx, *, text):
"""Make any string more cringe"""
text = text.lower().replace("l", "w").replace("r", "w")
if len(text) > 1950:
await self.bot.send(ctx, ctx.l.fun.too_long)
else:
await ctx.send(f"{text} {random.choice(self.d.owos)}")
@commands.command(name="bubblewrap", aliases=["pop"])
async def bubblewrap(self, ctx, size=None):
"""Sends bubblewrap to the chat"""
if size is None:
size = (10, 10)
else:
size = size.split("x")
if len(size) != 2:
await self.bot.send(ctx, ctx.l.fun.bubblewrap.invalid_size_1)
return
try:
size[0] = int(size[0])
size[1] = int(size[1])
except ValueError:
await self.bot.send(ctx, ctx.l.fun.bubblewrap.invalid_size_1)
return
for val in size:
if val < 1 or val > 12:
await self.bot.send(ctx, ctx.l.fun.bubblewrap.invalid_size_2)
return
bubble = "||**pop**||"
await self.bot.send(ctx, f"{bubble*size[0]}\n" * size[1])
@commands.command(name="kill", aliases=["die", "kil", "dorito"])
async def kill_thing(self, ctx, *, thing: typing.Union[discord.Member, str]):
if isinstance(thing, discord.Member):
thing = thing.mention
await self.bot.send(ctx, random.choice(self.d.kills).format(thing[:500], ctx.author.mention))
@commands.command(name="coinflip", aliases=["flipcoin", "cf"])
async def coin_flip(self, ctx):
await self.bot.send(ctx, random.choice(("heads", "tails")))
@commands.command(name="pat")
@commands.guild_only()
async def pat(self, ctx, users: commands.Greedy[discord.Member] = [], *, text: str = ""):
resp = await self.bot.aiohttp.get("https://rra.ram.moe/i/r?type=pat")
image_url = "https://rra.ram.moe" + (await resp.json())["path"]
embed = discord.Embed(
color=self.d.cc,
title=f"**{discord.utils.escape_markdown(ctx.author.display_name)}** pats {', '.join(f'**{discord.utils.escape_markdown(u.display_name)}**' for u in users)} {text}"[
:256
],
)
embed.set_image(url=image_url)
await ctx.send(embed=embed)
@commands.command(name="slap")
@commands.guild_only()
async def slap(self, ctx, users: commands.Greedy[discord.Member] = [], *, text: str = ""):
resp = await self.bot.aiohttp.get("https://rra.ram.moe/i/r?type=slap")
image_url = "https://rra.ram.moe" + (await resp.json())["path"]
embed = discord.Embed(
color=self.d.cc,
title=f"**{discord.utils.escape_markdown(ctx.author.display_name)}** slaps {', '.join(f'**{discord.utils.escape_markdown(u.display_name)}**' for u in users)} {text}"[
:256
],
)
embed.set_image(url=image_url)
await ctx.send(embed=embed)
@commands.command(name="achievement", aliases=["mcachieve"])
@commands.cooldown(1, 1, commands.BucketType.user)
async def minecraft_achievement(self, ctx, *, text):
url = f"https://api.iapetus11.me/mc/achievement/{urlquote(text[:26])}"
embed = discord.Embed(color=self.d.cc)
embed.description = ctx.l.fun.dl_img.format(url)
embed.set_image(url=url)
await ctx.send(embed=embed)
@commands.command(name="splashtext", aliases=["mcsplash", "splashscreen", "splash"])
@commands.cooldown(1, 1, commands.BucketType.user)
async def minecraft_splash_screen(self, ctx, *, text):
url = f"https://api.iapetus11.me/mc/splash/{urlquote(text[:27])}"
embed = discord.Embed(color=self.d.cc)
embed.description = ctx.l.fun.dl_img.format(url)
embed.set_image(url=url)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Fun(bot))
|
{"/__main__.py": ["/src/bot.py"]}
|
10,652
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/speedups/__init__.py
|
"""Much of the code in this folder is from github.com/Rapptz/discord.py, credit is to the author of discord.py"""
import importlib
import sys
import speedups.mixins
import speedups.gateway
import speedups.activity
import speedups.utils
import speedups.message
import speedups.ext.commands.cooldowns as speedups_cooldowns
import speedups.ext.commands.view as speedups_view
def install_module(new_module, old_module):
for thing in new_module.__all__:
if hasattr(old_module, thing):
setattr(old_module, thing, getattr(new_module, thing))
def install():
discord = sys.modules.get("discord")
for new_module in (speedups.mixins, speedups.gateway, speedups.activity, speedups.utils, speedups.message):
install_module(new_module, discord)
install_module(speedups_cooldowns, discord.ext.commands.cooldowns)
install_module(speedups_view, discord.ext.commands.view)
importlib.reload(discord.ext.commands.bot)
|
{"/__main__.py": ["/src/bot.py"]}
|
10,653
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/util/setup.py
|
from discord.ext import commands
import aiofiles
import discord
import logging
import asyncpg
import json
import os
from util.cj import ClassyDict
def villager_bot_intents() -> discord.Intents:
intents = discord.Intents.default()
intents.guilds = True
intents.members = True
intents.bans = True
intents.emojis = False
intents.integrations = False
intents.webhooks = False
intents.invites = False
intents.voice_states = False
intents.presences = True
intents.messages = True
# intents.guild_messages = True
# intents.dm_messages = True
intents.reactions = True
# intents.guild_reactions = True
# intents.dm_reactions = True
intents.typing = False
# intents.guild_typing = False
# intents.dm_typing = False
return intents
def setup_logging() -> logging.Logger:
logging.basicConfig(level=logging.INFO, format="%(levelname)s:%(name)s: %(message)s")
logging.getLogger("asyncio").setLevel(logging.WARNING) # hide annoying asyncio info
logging.getLogger("discord.gateway").setLevel(logging.WARNING) # hide annoying gateway info
return logging.getLogger("main")
async def setup_database(bot: commands.AutoShardedBot, keys: ClassyDict) -> None: # init pool connection to database
bot.db = await asyncpg.create_pool(
host=keys.database.host, # where db is hosted
database=keys.database.name, # name of database
user=keys.database.user, # database username
password=keys.database.passw, # password which goes with user
max_size=20,
command_timeout=10,
)
def load_text() -> ClassyDict:
text = {}
for filename in os.listdir("data/text"):
with open(f"data/text/{filename}", "r", encoding="utf8") as f:
text.update(json.load(f))
return ClassyDict(text)
async def load_text_async() -> ClassyDict:
text = {}
for filename in os.listdir("data/text"):
async with aiofiles.open(f"data/text/{filename}", "r", encoding="utf8") as f:
text.update(json.loads(await f.read()))
return ClassyDict(text)
|
{"/__main__.py": ["/src/bot.py"]}
|
10,654
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/cogs/core/events.py
|
from discord.ext import commands
import traceback
import discord
import asyncio
import random
from util.handlers import handle_message, handle_error
from util.misc import cooldown_logic
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.d = bot.d
self.v = bot.v
self.db = bot.get_cog("Database")
@commands.Cog.listener()
async def on_ready(self):
self.bot.logger.info(f"\u001b[36;1mCONNECTED\u001b[0m [{self.bot.shard_count} Shards] [{len(self.bot.cogs)} Cogs]")
@commands.Cog.listener()
async def on_guild_join(self, guild):
await asyncio.sleep(1)
for channel in guild.text_channels:
if "general" in channel.name:
embed = discord.Embed(
color=self.d.cc,
description=f"Hey y'all! Type `{self.d.default_prefix}help` to get started with Villager Bot!\n"
f"If you need any more help, check out the **[Support Server]({self.d.support})**!",
)
embed.set_author(name="Villager Bot", icon_url=self.d.splash_logo)
embed.set_footer(text=f"Made by Iapetus11 | {self.d.default_prefix}rules for the rules!")
await channel.send(embed=embed)
break
await asyncio.sleep(0)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
await self.db.drop_guild(guild.id)
@commands.Cog.listener()
async def on_member_ban(self, guild, user):
await self.db.clear_warns(user.id, guild.id)
@commands.Cog.listener()
async def on_member_join(self, member):
if member.guild.id == self.d.support_server_id:
await self.bot.wait_until_ready()
await self.bot.update_support_member_role(member)
@commands.Cog.listener()
async def on_message(self, m):
try:
await asyncio.gather(*handle_message(self, m))
except discord.errors.Forbidden:
pass
async def debug_error(self, ctx, e, loc=None):
self.bot.statcord.error_count += 1
if loc is None:
loc = self.bot.get_channel(self.d.error_channel_id)
try:
ctx.message.content
except AttributeError:
ctx.message.content = None
traceback_text = "".join(traceback.format_exception(type(e), e, e.__traceback__, 4))
final = f"{ctx.author} (lang={getattr(ctx, 'l', {}).get('lang')}): {ctx.message.content}\n\n{traceback_text}".replace(
"``", "\`\`\`"
)
await loc.send(f"```py\n{final[:1023 - 6]}```")
@commands.Cog.listener()
async def on_error(self, event_method, *args, **kwargs):
await self.bot.send(
self.bot.get_channel(self.d.error_channel_id),
f"Error in {event_method} occurred.\n```py\n{args}```\n```py\n{kwargs}```",
)
raise
@commands.Cog.listener()
async def on_command_error(self, ctx, e):
try:
if isinstance(e, commands.CommandOnCooldown):
if ctx.command.name == "mine":
if await self.db.fetch_item(ctx.author.id, "Efficiency I Book") is not None:
e.retry_after -= 0.5
if "haste ii potion" in self.v.chuggers.get(ctx.author.id, []):
e.retry_after -= 1
elif "haste i potion" in self.v.chuggers.get(ctx.author.id, []):
e.retry_after -= 0.5
seconds = round(e.retry_after, 2)
if seconds <= 0.05:
await ctx.reinvoke()
return
time = cooldown_logic(ctx, seconds)
await self.bot.send(ctx, random.choice(ctx.l.misc.cooldown_msgs).format(time))
else:
await asyncio.gather(*handle_error(self, ctx, e))
except discord.errors.Forbidden:
pass
except BaseException as e:
if not isinstance(getattr(e, "original", None), discord.errors.Forbidden):
await self.debug_error(ctx, e)
def setup(bot):
bot.add_cog(Events(bot))
|
{"/__main__.py": ["/src/bot.py"]}
|
10,655
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/cogs/cmds/mod.py
|
from discord.ext import commands
from typing import Union
import asyncio
import discord
class Mod(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.d = bot.d
self.db = bot.get_cog("Database")
async def perm_check(self, author, victim):
if isinstance(author, discord.Member) and author.id == author.guild.owner.id:
return True
guild_roles = author.guild.roles
return (
guild_roles.index(author.top_role) > guild_roles.index(victim.top_role) and not victim.id == author.guild.owner.id
)
@commands.command(name="purge", aliases=["p"])
@commands.guild_only()
@commands.bot_has_permissions(manage_messages=True)
@commands.has_permissions(manage_messages=True)
async def purge(self, ctx, to_purge: Union[discord.Member, int], amount=20):
"""Purges the given amount of messages from the current channel"""
try:
if isinstance(to_purge, discord.Member):
def check(m):
return m.author.id == to_purge.id
await ctx.channel.purge(check=check, limit=amount + 1)
else:
await ctx.channel.purge(limit=to_purge + 1)
except asyncio.queues.QueueEmpty:
await self.bot.send(ctx, ctx.l.mod.purge.oop)
except discord.errors.NotFound:
await self.bot.send(ctx, ctx.l.mod.purge.oop)
@commands.command(name="kick", aliases=["yeet"])
@commands.guild_only()
@commands.has_permissions(kick_members=True)
@commands.bot_has_permissions(kick_members=True)
async def kick_user(self, ctx, user: discord.Member, *, reason="No reason provided."):
"""Kicks the given user from the current Discord server"""
if ctx.author.id == user.id:
await self.bot.send(ctx, ctx.l.mod.kick.stupid_1)
return
if not await self.perm_check(ctx.author, user):
await self.bot.send(ctx, ctx.l.mod.no_perms)
return
await ctx.guild.kick(user, reason=f"{ctx.author} | {reason}")
await ctx.message.add_reaction(self.d.emojis.yes)
@commands.command(name="ban", aliases=["megayeet"])
@commands.guild_only()
@commands.has_permissions(ban_members=True)
@commands.bot_has_permissions(ban_members=True)
async def ban_user(self, ctx, user: Union[discord.Member, int], *, reason="No reason provided."):
"""Bans the given user from the current Discord server"""
if type(user) == int:
try:
user = await self.bot.fetch_user(user)
except discord.HTTPException:
raise commands.BadArgument
if ctx.author.id == user.id:
await self.bot.send(ctx, ctx.l.mod.ban.stupid_1)
return
if not await self.perm_check(ctx.author, user):
await self.bot.send(ctx, ctx.l.mod.no_perms)
return
for entry in await ctx.guild.bans():
if entry[1].id == user.id:
await self.bot.send(ctx, ctx.l.mod.ban.stupid_2.format(user))
return
try:
await ctx.guild.ban(user, reason=f"{ctx.author} | {reason}", delete_message_days=0)
await ctx.message.add_reaction(self.d.emojis.yes)
except discord.errors.Forbidden:
await self.bot.send(ctx, ctx.l.mod.ban.stupid_3)
@commands.command(name="pardon", aliases=["unban"])
@commands.guild_only()
@commands.has_permissions(ban_members=True)
@commands.bot_has_permissions(ban_members=True)
async def pardon_user(self, ctx, user: Union[discord.User, int], *, reason="No reason provided."):
"""Unbans / pardons the given user from the current Discord server"""
if type(user) == int:
try:
user = await self.bot.fetch_user(user)
except discord.HTTPException:
raise commands.BadArgument
if ctx.author.id == user.id:
await self.bot.send(ctx, ctx.l.mod.unban.stupid_1)
return
for entry in await ctx.guild.bans():
if entry[1].id == user.id:
await ctx.guild.unban(user, reason=f"{ctx.author} | {reason}")
await ctx.message.add_reaction(self.d.emojis.yes)
return
await self.bot.send(ctx, ctx.l.mod.unban.stupid_2.format(user))
@commands.command(name="warn")
@commands.guild_only()
@commands.has_permissions(kick_members=True)
async def warn(self, ctx, user: discord.Member, *, reason=None):
if ctx.author.id == user.id:
await self.bot.send(ctx, ctx.l.mod.warn.stupid_1)
return
if not await self.perm_check(ctx.author, user):
await self.bot.send(ctx, ctx.l.mod.no_perms)
return
warns = await self.db.fetch_warns(user.id, ctx.guild.id)
if len(warns) >= 20:
await self.bot.send(ctx, ctx.l.mod.warn.thats_too_much_man)
return
if reason is not None:
if len(reason) > 245:
reason = f"{reason[:245]}..."
await self.db.add_warn(user.id, ctx.guild.id, ctx.author.id, reason)
await self.bot.send(
ctx,
ctx.l.mod.warn.confirm.format(
self.d.emojis.yes, user.mention, len(warns) + 1, discord.utils.escape_markdown(str(reason))
),
)
@commands.command(name="warns", aliases=["warnings", "karens"])
@commands.guild_only()
async def warnings(self, ctx, user: discord.Member = None):
if user is None:
user = ctx.author
if ctx.author.id != user.id:
if not await self.perm_check(ctx.author, user):
await self.bot.send(ctx, ctx.l.mod.no_perms)
return
warns = await self.db.fetch_warns(user.id, ctx.guild.id)
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f"{user}'s warnings ({len(warns)} total):", icon_url=user.avatar_url_as())
if len(warns) < 1:
embed.add_field(name="\uFEFF", value=f"{user} has no warnings.")
else:
for warn in warns:
reason = ctx.l.mod.warn.no_reason
if warn["reason"] is not None:
reason = warn["reason"]
embed.add_field(
name="\uFEFF",
value=f'**{ctx.l.mod.warn.by} {self.bot.get_user(warn["mod_id"]).mention}**: *{reason}*',
inline=False,
)
await ctx.send(embed=embed)
@commands.command(name="delwarns", aliases=["clearwarns", "remwarns", "removewarns", "delwarnings"])
@commands.guild_only()
@commands.has_permissions(kick_members=True)
async def clear_warnings(self, ctx, user: discord.Member):
if ctx.author.id == user.id and ctx.guild.owner.id != ctx.author.id:
await self.bot.send(ctx, ctx.l.mod.warn.stupid_2)
return
if not await self.perm_check(ctx.author, user):
await self.bot.send(ctx, ctx.l.mod.no_perms)
return
await self.db.clear_warns(user.id, ctx.guild.id)
await ctx.message.add_reaction(self.d.emojis.yes)
@commands.command(name="mute", aliases=["shutup", "silence", "shush", "stfu"])
@commands.guild_only()
@commands.has_permissions(kick_members=True)
async def mute(self, ctx, user: discord.Member):
if ctx.author.id == user.id:
await self.bot.send(ctx, ctx.l.mod.mute.stupid_1)
return
if not await self.perm_check(ctx.author, user):
await self.bot.send(ctx, ctx.l.mod.no_perms)
return
if discord.utils.get(ctx.guild.roles, name="Muted") is None: # check if role exists
await ctx.guild.create_role(
name="Muted", permissions=discord.Permissions(send_messages=False, add_reactions=False)
)
# fetch role
mute = discord.utils.get(ctx.guild.roles, name="Muted")
if mute is None:
mute = discord.utils.get(await ctx.guild.fetch_roles(), name="Muted")
async with ctx.typing():
for channel in ctx.guild.text_channels: # fix perms for channels
if mute not in channel.overwrites:
await channel.set_permissions(mute, send_messages=False, add_reactions=False)
await user.add_roles(mute)
await self.bot.send(ctx, ctx.l.mod.mute.mute_msg.format(user))
@commands.command(name="unmute", aliases=["unshut", "shutnt"])
@commands.guild_only()
@commands.has_permissions(kick_members=True)
async def unmute(self, ctx, user: discord.Member):
if ctx.author.id == user.id:
await self.bot.send(ctx, ctx.l.mod.unmute.stupid_1)
return
if not await self.perm_check(ctx.author, user):
await self.bot.send(ctx, ctx.l.mod.no_perms)
return
mute = discord.utils.get(user.roles, name="Muted")
if mute:
await user.remove_roles(mute)
await self.bot.send(ctx, ctx.l.mod.unmute.unmute_msg.format(user))
else:
await self.bot.send(ctx, ctx.l.mod.unmute.stupid_2.format(user))
def setup(bot):
bot.add_cog(Mod(bot))
|
{"/__main__.py": ["/src/bot.py"]}
|
10,656
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/cogs/core/database.py
|
from discord.ext import commands, tasks
import asyncio
import arrow
class Database(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.d = bot.d
self.v = bot.v
self.db = bot.db # the asyncpg pool
self.update_user_health.start()
bot.loop.create_task(self.populate_caches())
self._user_cache = {} # {uid: Record(user)}
self._items_cache = {} # {uid: [Record(item), Record(item)]}
def cog_unload(self):
self.update_user_health.cancel()
async def populate_caches(self): # initial caches for speeeeeed
self.v.ban_cache = await self.fetch_all_botbans()
self.v.lang_cache = await self.fetch_all_guild_langs()
self.v.prefix_cache = await self.fetch_all_guild_prefixes()
self.v.additional_mcservers = await self.fetch_all_mcservers()
self.v.disabled_cmds = await self.fetch_all_disabled_commands()
self.d.replies_cache = await self.fetch_all_do_replies()
def cache_user(self, uid, user):
self._user_cache[uid] = user
return user
def uncache_user(self, uid):
try:
del self._user_cache[uid]
except KeyError:
pass
def cache_items(self, uid, items):
self._items_cache[uid] = items
return items
def uncache_items(self, uid):
try:
del self._items_cache[uid]
except KeyError:
pass
@tasks.loop(seconds=32)
async def update_user_health(self):
uids = await self.db.fetch("UPDATE users SET health = health + 1 WHERE health < 20 RETURNING uid")
for uid in uids:
self.uncache_user(uid)
await asyncio.sleep(0)
async def fetch_current_reminders(self) -> list:
return await self.db.fetch("DELETE FROM reminders WHERE at <= $1 RETURNING *", arrow.utcnow().timestamp())
async def fetch_user_reminder_count(self, uid: int) -> int:
return await self.db.fetchval("SELECT COUNT(*) FROM reminders WHERE uid = $1", uid)
async def add_reminder(self, uid: int, cid: int, mid: int, reminder: str, at: int):
await self.db.execute("INSERT INTO reminders VALUES ($1, $2, $3, $4, $5)", uid, cid, mid, reminder, at)
async def fetch_all_botbans(self):
botban_records = await self.db.fetch(
"SELECT uid FROM users WHERE bot_banned = true"
) # returns [Record<uid=>, Record<uid=>,..]
return set([r[0] for r in botban_records])
async def fetch_all_guild_langs(self):
lang_records = await self.db.fetch("SELECT gid, lang FROM guilds")
return dict(
(r[0], r[1]) for r in lang_records if (r[1] != "en" and r[1] is not None and r[1] != "en_us")
) # needs to be a dict
async def fetch_all_guild_prefixes(self):
prefix_records = await self.db.fetch("SELECT gid, prefix FROM guilds")
return dict(
(r[0], r[1]) for r in prefix_records if (r[1] != self.d.default_prefix and r[1] is not None)
) # needs to be a dict
async def fetch_all_mcservers(self):
servers = await self.db.fetch("SELECT host, link FROM mcservers")
return [(s["host"], s["link"]) for s in servers]
async def fetch_all_disabled_commands(self):
disabled = await self.db.fetch("SELECT * FROM disabled")
disabled_nice = {}
for entry in disabled:
disabled_nice[entry["gid"]] = disabled_nice.get(entry["gid"], []) + [entry[1]]
return disabled_nice
async def fetch_all_do_replies(self):
return {g["gid"]: g["replies"] for g in await self.db.fetch("SELECT gid, replies FROM guilds WHERE replies = true")}
async def fetch_guild(self, gid):
g = await self.db.fetchrow("SELECT * FROM guilds WHERE gid = $1", gid)
if g is None:
await self.db.execute(
"INSERT INTO guilds VALUES ($1, $2, $3, $4, $5, $6, $7, $8)",
gid,
self.d.default_prefix,
True,
"easy",
"en",
None,
False,
False,
)
return await self.fetch_guild(gid)
return g
async def set_guild_attr(self, gid, attr, value):
await self.fetch_guild(gid) # ensure it exists in db
await self.db.execute(f"UPDATE guilds SET {attr} = $1 WHERE gid = $2", value, gid)
async def drop_guild(self, gid):
await self.db.execute("DELETE FROM guilds WHERE gid = $1", gid)
try:
del self.v.lang_cache[gid]
except KeyError:
pass
try:
del self.v.prefix_cache[gid]
except KeyError:
pass
async def fetch_guild_premium(self, gid):
return bool(await self.db.fetchval("SELECT premium FROM guilds WHERE gid = $1", gid))
async def set_cmd_usable(self, gid, cmd, usable):
if usable:
await self.db.execute("DELETE FROM disabled WHERE gid = $1 AND cmd = $2", gid, cmd)
else:
await self.db.execute("INSERT INTO disabled VALUES ($1, $2)", gid, cmd)
async def fetch_user(self, uid):
try:
return self._user_cache[uid]
except KeyError:
pass
user = await self.db.fetchrow("SELECT * FROM users WHERE uid = $1", uid)
if user is None:
await self.db.execute(
"INSERT INTO users VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)", uid, 0, 0, 1, 20, False, 0, 0, False
)
await self.add_item(uid, "Wood Pickaxe", 0, 1, True)
await self.add_item(uid, "Wood Sword", 0, 1, True)
return await self.fetch_user(uid)
return self.cache_user(uid, user)
async def update_user(self, uid, key, value):
await self.fetch_user(uid)
await self.db.execute(f"UPDATE users SET {key} = $1 WHERE uid = $2", value, uid)
self.uncache_user(uid)
async def fetch_balance(self, uid): # fetches the amount of emeralds a user has
# we can do this because self.fetch_user ensures user is not None
return (await self.fetch_user(uid))["emeralds"]
async def set_balance(self, uid, emeralds):
await self.fetch_user(uid)
await self.db.execute("UPDATE users SET emeralds = $1 WHERE uid = $2", emeralds, uid)
self.uncache_user(uid)
async def balance_add(self, uid, amount):
new_bal = await self.fetch_balance(uid) + amount
await self.set_balance(uid, new_bal)
self.uncache_user(uid)
return new_bal
async def balance_sub(self, uid, amount):
bal = await self.fetch_balance(uid)
new = bal - amount
if new < 0:
amount = bal
new = 0
await self.set_balance(uid, new)
self.uncache_user(uid)
return amount
async def fetch_vault(self, uid): # fetches a user's vault in the form (vault_amount, vault_max)
user = await self.fetch_user(uid)
return {"vault_bal": user["vault_bal"], 0: user["vault_bal"], "vault_max": user["vault_max"], 1: user["vault_max"]}
async def set_vault(self, uid, vault_bal, vault_max):
await self.fetch_user(uid)
await self.db.execute("UPDATE users SET vault_bal = $1, vault_max = $2 WHERE uid = $3", vault_bal, vault_max, uid)
self.uncache_user(uid)
async def fetch_items(self, uid):
try:
return self._items_cache[uid]
except KeyError:
pass
await self.fetch_user(uid)
return self.cache_items(uid, await self.db.fetch("SELECT * FROM items WHERE uid = $1", uid))
async def fetch_item(self, uid, name):
try:
for item_record in self._items_cache[uid]:
if name.lower() == item_record["name"].lower():
return item_record
await asyncio.sleep(0)
except KeyError:
pass
await self.fetch_user(uid)
return await self.db.fetchrow("SELECT * FROM items WHERE uid = $1 AND LOWER(name) = LOWER($2)", uid, name)
async def add_item(self, uid, name, sell_price, amount, sticky=False):
prev = await self.fetch_item(uid, name)
if prev is None:
await self.db.execute("INSERT INTO items VALUES ($1, $2, $3, $4, $5)", uid, name, sell_price, amount, sticky)
else:
await self.db.execute(
"UPDATE items SET amount = $1 WHERE uid = $2 AND LOWER(name) = LOWER($3)", amount + prev["amount"], uid, name
)
self.uncache_items(uid)
async def remove_item(self, uid, name, amount):
prev = await self.fetch_item(uid, name)
if prev["amount"] - amount < 1:
await self.db.execute("DELETE FROM items WHERE uid = $1 AND LOWER(name) = LOWER($2)", uid, name)
else:
await self.db.execute(
"UPDATE items SET amount = $1 WHERE uid = $2 AND LOWER(name) = LOWER($3)", prev["amount"] - amount, uid, name
)
self.uncache_items(uid)
async def log_transaction(self, item, amount, timestamp, giver, receiver):
await self.db.execute("INSERT INTO give_logs VALUES ($1, $2, $3, $4, $5)", item, amount, timestamp, giver, receiver)
async def fetch_transactions_by_sender(self, uid, limit):
return await self.db.fetch("SELECT * FROM give_logs WHERE giver_uid = $1 ORDER BY ts DESC LIMIT $2", uid, limit)
async def fetch_transactions_page(self, uid, limit: int = 10, *, page: int = 0) -> list:
return await self.db.fetch(
"SELECT * FROM give_logs WHERE giver_uid = $1 OR recvr_uid = $1 ORDER BY ts DESC LIMIT $2 OFFSET $3",
uid,
limit,
page * limit,
)
async def fetch_transactions_page_count(self, uid, limit: int = 10) -> int:
return await self.db.fetchval("SELECT COUNT(*) FROM give_logs WHERE giver_uid = $1 OR recvr_uid = $1", uid) // limit
async def fetch_pickaxe(self, uid):
items_names = [item["name"] for item in await self.fetch_items(uid)]
for pickaxe in self.d.mining.pickaxes:
if pickaxe in items_names:
return pickaxe
await asyncio.sleep(0)
await self.add_item(uid, "Wood Pickaxe", 0, 1, True)
return "Wood Pickaxe"
async def fetch_sword(self, uid):
items_names = [item["name"] for item in await self.fetch_items(uid)]
for sword in self.d.sword_list_proper:
if sword in items_names:
return sword
await asyncio.sleep(0)
await self.add_item(uid, "Wood Sword", 0, 1, True)
return "Wood Sword"
async def rich_trophy_wipe(self, uid):
await self.set_balance(uid, 0)
await self.set_vault(uid, 0, 1)
await self.db.execute(
"DELETE FROM items WHERE uid = $1 AND NOT name = ANY($2::VARCHAR(250)[])",
uid,
self.d.rpt_ignore,
)
# self.uncache_user(uid) # done in set_balance() and set_vault()
self.uncache_items(uid)
async def fetch_user_lb(self, uid):
lbs = await self.db.fetchrow("SELECT * FROM leaderboards WHERE uid = $1", uid)
if lbs is None:
await self.db.execute("INSERT INTO leaderboards VALUES ($1, $2, $3, $4)", uid, 0, 0, 0)
async def update_lb(self, uid, lb, value, mode="add"):
await self.fetch_user_lb(uid)
if mode == "add":
await self.db.execute(f"UPDATE leaderboards SET {lb} = {lb} + $1 WHERE uid = $2", value, uid)
elif mode == "sub":
await self.db.execute(f"UPDATE leaderboards SET {lb} = {lb} - $1 WHERE uid = $2", value, uid)
elif mode == "set":
await self.db.execute(f"UPDATE leaderboards SET {lb} = $1 WHERE uid = $2", value, uid)
async def fetch_global_lb(self, lb: str, uid: int) -> tuple:
return (
await self.db.fetch(f"SELECT uid, {lb}, ROW_NUMBER() OVER(ORDER BY {lb} DESC) AS ordered FROM leaderboards"),
await self.db.fetchrow(
f"SELECT * FROM (SELECT uid, {lb}, ROW_NUMBER() OVER(ORDER BY {lb} DESC) AS ordered FROM leaderboards) AS leaderboard WHERE uid = $1",
uid,
),
)
async def fetch_local_lb(self, lb: str, uid: int, uids: list) -> tuple:
return (
await self.db.fetch(
f"SELECT uid, {lb}, ROW_NUMBER() OVER(ORDER BY {lb} DESC) AS ordered FROM leaderboards WHERE uid = ANY($1::BIGINT[])",
uids,
),
await self.db.fetchrow(
f"SELECT * FROM (SELECT uid, {lb}, ROW_NUMBER() OVER(ORDER BY {lb} DESC) AS ordered FROM leaderboards WHERE uid = ANY($2::BIGINT[])) AS leaderboard WHERE uid = $1",
uid,
uids,
),
)
async def fetch_global_lb_user(self, column: str, uid: int) -> tuple:
return (
await self.db.fetch(
"SELECT uid, {0}, ROW_NUMBER() OVER(ORDER BY {0} DESC) AS ordered FROM users WHERE {0} > 0 AND bot_banned = false LIMIT 10".format(
column
)
),
await self.db.fetchrow(
"SELECT * FROM (SELECT uid, {0}, ROW_NUMBER() OVER(ORDER BY {0} DESC) AS ordered FROM users WHERE {0} > 0 AND bot_banned = false) AS leaderboard WHERE uid = $1".format(
column
),
uid,
),
)
async def fetch_local_lb_user(self, column: str, uid: int, uids: list) -> tuple:
return (
await self.db.fetch(
"SELECT uid, {0}, ROW_NUMBER() OVER(ORDER BY {0} DESC) AS ordered FROM users WHERE {0} > 0 AND bot_banned = false AND uid = ANY($1::BIGINT[]) LIMIT 10".format(
column
),
uids,
),
await self.db.fetchrow(
"SELECT * FROM (SELECT uid, {0}, ROW_NUMBER() OVER(ORDER BY {0} DESC) AS ordered FROM users WHERE {0} > 0 AND bot_banned = false AND uid = ANY($2::BIGINT[])) AS leaderboard WHERE uid = $1".format(
column
),
uid,
uids,
),
)
async def fetch_global_lb_item(self, item: str, uid: int) -> tuple:
return (
await self.db.fetch(
"SELECT uid, amount, ROW_NUMBER() OVER(ORDER BY amount DESC) AS ordered FROM items WHERE LOWER(name) = LOWER($1) LIMIT 10",
item,
),
await self.db.fetchrow(
"SELECT uid, amount, ROW_NUMBER() OVER(ORDER BY amount DESC) AS ordered FROM items WHERE LOWER(name) = LOWER($1) AND uid = $2",
item,
uid,
),
)
async def fetch_local_lb_item(self, item: str, uid: int, uids: list) -> tuple:
return (
await self.db.fetch(
"SELECT uid, amount, ROW_NUMBER() OVER(ORDER BY amount DESC) AS ordered FROM items WHERE uid = ANY($2::BIGINT[]) AND LOWER(name) = LOWER($1) LIMIT 10",
item,
uids,
),
await self.db.fetchrow(
"SELECT * FROM (SELECT uid, amount, ROW_NUMBER() OVER(ORDER BY amount DESC) AS ordered FROM items WHERE uid = ANY($3::BIGINT[]) AND LOWER(name) = LOWER($1)) AS leaderboard WHERE uid = $2",
item,
uid,
uids,
),
)
async def set_botbanned(self, uid, botbanned):
await self.fetch_user(uid)
if botbanned:
if uid not in self.v.ban_cache:
self.v.ban_cache.add(uid)
else:
try:
self.v.ban_cache.remove(uid)
except ValueError:
pass
await self.db.execute("UPDATE users SET bot_banned = $1 WHERE uid = $2", botbanned, uid)
self.uncache_user(uid)
async def add_warn(self, uid, gid, mod_id, reason):
await self.db.execute("INSERT INTO warnings VALUES ($1, $2, $3, $4)", uid, gid, mod_id, reason)
async def fetch_warns(self, uid, gid):
return await self.db.fetch("SELECT * FROM warnings WHERE uid = $1 AND gid = $2", uid, gid)
async def clear_warns(self, uid, gid):
await self.db.execute("DELETE FROM warnings WHERE uid = $1 AND gid = $2", uid, gid)
async def fetch_user_rcon(self, uid, mcserver):
return await self.db.fetchrow("SELECT * FROM user_rcon WHERE uid = $1 AND mcserver = $2", uid, mcserver)
async def add_user_rcon(self, uid, mcserver, rcon_port, password):
await self.db.execute("INSERT INTO user_rcon VALUES ($1, $2, $3, $4)", uid, mcserver, rcon_port, password)
async def delete_user_rcon(self, uid, mcserver):
await self.db.execute("DELETE FROM user_rcon WHERE uid = $1 AND mcserver = $2", uid, mcserver)
async def mass_delete_user_rcon(self, uid):
return await self.db.fetch("DELETE FROM user_rcon WHERE uid = $1 RETURNING *", uid)
def setup(bot):
bot.add_cog(Database(bot))
|
{"/__main__.py": ["/src/bot.py"]}
|
10,657
|
gritor111/Villager-Bot
|
refs/heads/master
|
/__main__.py
|
import pyximport
import numpy
pyximport.install(language_level=3, reload_support=True, setup_args={"include_dirs": numpy.get_include()})
import src.bot as bot
if __name__ == "__main__":
bot.run()
|
{"/__main__.py": ["/src/bot.py"]}
|
10,658
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/bot.py
|
from concurrent.futures import ThreadPoolExecutor
from discord.ext import commands
import asyncio
import aiohttp
import discord
import random
# import uvloop
import arrow
import json
import sys
import os
# ensure villager bot modules are accessible
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
# ensure the current working directory is correct
os.chdir(os.path.dirname(__file__))
import speedups
speedups.install()
from util.setup import villager_bot_intents, setup_logging, setup_database, load_text
from util.misc import get_lang, get_prefix, check_global
from util.statcord import ShitCordClient
from util.cj import ClassyDict
# send function/method for easy sending of embed messages with small amounts of text
async def send(_bot, location, message, respond=False, ping=False):
embed = discord.Embed(color=_bot.d.cc, description=message)
try:
if respond and hasattr(location, "reply"):
try:
await location.reply(embed=embed, mention_author=ping)
return True
except discord.errors.HTTPException:
pass
await location.send(embed=embed)
return True
except discord.Forbidden:
return False
# update the role of a member in the support server
async def update_support_member_role(_bot, member):
support_guild = _bot.get_guild(_bot.d.support_server_id)
role_map_values = list(_bot.d.role_mappings.values())
db = _bot.get_cog("Database")
roles = []
for role in member.roles:
if role.id not in role_map_values and role.id != _bot.d.support_server_id:
roles.append(role)
await asyncio.sleep(0)
pickaxe_role = _bot.d.role_mappings.get(await db.fetch_pickaxe(member.id))
if pickaxe_role is not None:
roles.append(support_guild.get_role(pickaxe_role))
if await db.fetch_item(member.id, "Bane Of Pillagers Amulet") is not None:
roles.append(support_guild.get_role(_bot.d.role_mappings.get("BOP")))
if roles != member.roles:
try:
await member.edit(roles=roles)
except Exception:
pass
def update_fishing_prices(_bot):
for fish in _bot.d.fishing.fish.values():
fish.current = random.randint(*fish.value)
def mutate_botd(_bot):
d = _bot.d
d.cc = discord.Color.green() # embed color
# update fishing data
_bot.update_fishing_prices()
fishes = d.fishing.fish_ids = list(d.fishing.fish.keys())
d.fishing.fish_weights = [(len(fishes) - fish_data.rarity) ** d.fishing.exponent for fish_data in d.fishing.fish.values()]
d.mining.pickaxes = list(d.mining.yields_pickaxes)[::-1] # get list of pickaxe types from best to worst
d.fun_langs.unenchant = {v: k for k, v in d.fun_langs.enchant.items()} # reverse dict to create unenchantment lang
def run():
# setup uvloop
# uvloop.install()
# set up basic logging
logger = setup_logging()
logger.info("loading private keys...")
with open("data/keys.json", "r") as k: # load bot keys
keys = ClassyDict(json.load(k))
bot = commands.AutoShardedBot( # setup bot
command_prefix=get_prefix,
case_insensitive=True,
intents=villager_bot_intents(),
help_command=None,
)
bot.logger = logger
bot.aiohttp = aiohttp.ClientSession(loop=bot.loop)
bot.send = send.__get__(bot)
bot.get_lang = lambda ctx: get_lang(bot, ctx)
bot.update_support_member_role = update_support_member_role.__get__(bot)
bot.update_fishing_prices = update_fishing_prices.__get__(bot)
bot.mutate_botd = mutate_botd.__get__(bot)
logger.info("setting up connection to database and db pool...")
asyncio.get_event_loop().run_until_complete(setup_database(bot, keys))
logger.info("loading villager bot text...")
bot.langs = load_text()
logger.info("loading villager bot constant data...")
with open("data/data.json", "r", encoding="utf8") as d:
bot.d = ClassyDict(
json.load(d)
) # cj automatically turns json into sets of nested classes and attributes for easy access
bot.k = keys
bot.k.fernet = bot.k.fernet.encode("utf-8")
bot.v = ClassyDict()
bot.v.start_time = arrow.utcnow()
bot.v.votes_topgg = 0
bot.v.cmd_count = 0
bot.v.msg_count = 0
bot.v.miners = {} # {user_id: commands}
bot.v.honey_buckets = None # list of cooldowns for honey command (econ cog)
bot.v.pillagers = {} # {user_id: pillages}
bot.v.pillages = {} # {user_id: times_pillaged}
bot.v.chuggers = {} # {user_id: [potion, potion]}
bot.v.cmd_lb = {} # {user_id: command_count}
bot.v.pause_econ = {} # {uid: starttime}
bot.v.spawn_queue = {} # {ctx: starttime}
bot.v.rcon_cache = {} # {uid: rcon_client}
bot.v.disabled_cmds = {} # {gid: [disabled cmds]}
bot.v.ban_cache = set() # {uid, uid,..}
bot.v.prefix_cache = {} # {gid: 'prefix'}
bot.v.lang_cache = {} # {gid: 'lang'}
bot.v.additional_mcservers = []
bot.v.mcserver_list = []
bot.owner_locked = False
bot.statcord = ShitCordClient(bot, keys.statcord)
bot.cog_list = [ # list of cogs which are to be loaded in the bot
"cogs.core.database",
"cogs.core.events",
"cogs.core.loops",
"cogs.cmds.useful",
"cogs.cmds.owner",
"cogs.cmds.mc",
"cogs.cmds.mod",
"cogs.cmds.fun",
"cogs.cmds.econ",
"cogs.cmds.config",
"cogs.other.mobs",
"cogs.other.webhooks",
]
for cog in bot.cog_list: # load every cog in bot.cog_list
logger.info(f"loading extension: {cog}")
bot.load_extension(cog)
bot.mutate_botd()
@bot.check # everythingggg goes through here
def global_check(ctx):
ctx.l = bot.get_lang(ctx)
return check_global(bot, ctx)
with ThreadPoolExecutor() as bot.tpool:
bot.run(keys.discord) # run the bot, this is a blocking call
asyncio.run(bot.aiohttp.close())
|
{"/__main__.py": ["/src/bot.py"]}
|
10,659
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/cogs/other/webhooks.py
|
from discord.ext import commands
from aiohttp import web
import traceback
import asyncio
import discord
import arrow
import util.cj as cj
class Webhooks(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.d = bot.d
self.v = bot.v
self.k = bot.k
self.db = bot.get_cog("Database")
self.server_runner = None
self.webhook_server = None
self.webhooks_task = bot.loop.create_task(self.webhooks_setup())
self.stats_task = bot.loop.create_task(self.update_stats())
self.lock = asyncio.Lock()
def cog_unload(self):
self.bot.loop.create_task(self.server_runner.cleanup())
self.bot.loop.create_task(self.webhook_server.close())
self.webhooks_task.cancel()
self.stats_task.cancel()
async def update_stats(self):
await self.bot.wait_until_ready()
while True:
try:
await self.bot.aiohttp.post(
f"https://top.gg/api/bots/{self.bot.user.id}/stats",
headers={"Authorization": self.k.topgg_api},
json={"server_count": str(len(self.bot.guilds))},
)
except Exception as e:
self.bot.logger.error(e)
await asyncio.sleep(3600)
async def webhooks_setup(self): # holy fucking shit that's hot
async def handler(req):
try:
if req.headers.get("Authorization") == self.k.topgg_webhook:
self.bot.dispatch("topgg_event", cj.classify(await req.json()))
else:
return web.Response(status=401)
finally:
return web.Response()
app = web.Application()
app.router.add_post(self.d.hookspath, handler)
self.server_runner = web.AppRunner(app)
await self.server_runner.setup()
self.webhook_server = web.TCPSite(self.server_runner, "0.0.0.0", self.d.hooksport)
await self.webhook_server.start()
async def reward(self, user_id, amount, streak=None):
user = self.bot.get_user(user_id)
user_str = "an unknown user" if user is None else discord.utils.escape_markdown(user.display_name)
await self.bot.get_channel(self.d.vote_channel_id).send(f":tada::tada: **{user_str}** has voted! :tada::tada:")
if user is not None:
try:
if streak is None:
await self.db.balance_add(user_id, amount)
await self.bot.send(user, f"Thanks for voting! You've received **{amount}**{self.d.emojis.emerald}!")
elif streak % 16 == 0:
barrels = int(streak // 32 + 1)
await self.db.add_item(user.id, "Barrel", 1024, barrels)
await self.bot.send(user, f"Thanks for voting! You've received {barrels}x **Barrel**!")
else:
await self.db.balance_add(user_id, amount)
await self.bot.send(
user,
f"Thanks for voting! You've received **{amount}**{self.d.emojis.emerald}! (Vote streak is now {streak})",
)
except BaseException as e:
traceback_text = "".join(traceback.format_exception(type(e), e, e.__traceback__, 4))
await self.bot.send(
self.bot.get_channel(self.d.error_channel_id), f"Voting error: {user} ```{traceback_text}```"
)
@commands.Cog.listener()
async def on_topgg_event(self, data):
await self.bot.wait_until_ready()
if data.type != "upvote":
self.bot.logger.info("\u001b[35m top.gg webhooks test\u001b[0m")
await self.bot.get_channel(self.d.error_channel_id).send("TOP.GG WEBHOOKS TEST")
return
uid = int(data.user)
async with self.lock:
db_user = await self.db.fetch_user(uid)
streak_time = db_user["streak_time"]
vote_streak = db_user["vote_streak"]
if streak_time is None: # time
streak_time = 0
if arrow.get(streak_time) > arrow.utcnow().shift(hours=-12):
return
self.bot.logger.info(f"\u001b[32;1m{uid} voted on top.gg\u001b[0m")
self.v.votes_topgg += 1
amount = self.d.topgg_reward
if data.isWeekend:
amount *= 2
amount *= len(self.d.mining.pickaxes) - self.d.mining.pickaxes.index(await self.db.fetch_pickaxe(uid))
if vote_streak is None or vote_streak == 0:
vote_streak = 0
vote_streak += 1
if arrow.utcnow().shift(days=-1, hours=-12) > arrow.get(streak_time): # vote expired
vote_streak = 1
amount *= 5 if vote_streak > 5 else vote_streak
await self.db.update_user(uid, "streak_time", arrow.utcnow().timestamp())
await self.db.update_user(uid, "vote_streak", vote_streak)
await self.reward(uid, amount, vote_streak)
def setup(bot):
bot.add_cog(Webhooks(bot))
|
{"/__main__.py": ["/src/bot.py"]}
|
10,660
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/cogs/cmds/econ.py
|
from discord.ext import commands, tasks
from collections import defaultdict
import functools
import discord
import asyncio
import random
import arrow
import math
from util.misc import lb_logic, cmds_lb, format_required, make_health_bar, calc_total_wealth, emojify_item
class Econ(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.d = bot.d
self.v = bot.v
self.db = bot.get_cog("Database")
if self.v.honey_buckets is not None:
self.honey._buckets = self.v.honey_buckets
self.pillage_cap_reset.start()
# This links the max concurrency of the with, dep, sell, give, etc.. cmds
for command in (
self.vault_deposit,
self.vault_withdraw,
self.buy,
self.sell,
self.give,
self.gamble,
self.search,
self.mine,
self.pillage,
):
command._max_concurrency = self.max_concurrency_dummy._max_concurrency
self._user_locks = defaultdict(asyncio.Lock)
def cog_unload(self):
self.v.honey_buckets = self.honey._buckets
self.pillage_cap_reset.cancel()
async def lock_author(self, ctx):
await self._user_locks[ctx.author.id].acquire()
async def unlock_author(self, ctx):
self._user_locks[ctx.author.id].release()
@tasks.loop(hours=12)
async def pillage_cap_reset(self):
self.v.pillagers = {}
self.v.pillages = {}
@pillage_cap_reset.before_loop
async def before_pillage_cap_reset(self):
await self.bot.wait_until_ready()
@functools.lru_cache(maxsize=None) # calculate chances for a specific pickaxe to find emeralds
def calc_yield_chance_list(self, pickaxe: str):
yield_ = self.d.mining.yields_pickaxes[pickaxe] # [xTrue, xFalse]
return [True] * yield_[0] + [False] * yield_[1]
async def math_problem(self, ctx, addition=1):
mine_commands = self.v.miners.get(ctx.author.id, 0)
self.v.miners[ctx.author.id] = mine_commands + addition
if mine_commands >= 100:
x, y = random.randint(0, 35), random.randint(0, 25)
prob = f"{y*random.choice([chr(u) for u in (65279, 8203, 8204, 8205)])}{x}{x*random.choice([chr(u) for u in (65279, 8203, 8204, 8205)])}+{y}"
prob = (prob, str(x + y))
m = await ctx.reply(
embed=discord.Embed(color=self.d.cc, description=ctx.l.econ.math_problem.problem.format("process.exit(69)")),
mention_author=False,
)
asyncio.create_task(
m.edit(embed=discord.Embed(color=self.d.cc, description=ctx.l.econ.math_problem.problem.format(prob[0])))
)
def author_check(m):
return m.channel.id == ctx.channel.id and m.author.id == ctx.author.id
try:
m = await self.bot.wait_for("message", check=author_check, timeout=10)
except asyncio.TimeoutError:
await self.bot.send(ctx, ctx.l.econ.math_problem.timeout)
return False
if m.content != prob[1]:
await self.bot.send(ctx, ctx.l.econ.math_problem.incorrect.format(self.d.emojis.no), True)
return False
self.v.miners[ctx.author.id] = 0
await self.bot.send(ctx, ctx.l.econ.math_problem.correct.format(self.d.emojis.yes), True)
return True
@commands.command(name="max_concurrency_dummy")
@commands.max_concurrency(1, commands.BucketType.user)
async def max_concurrency_dummy(self, ctx):
pass
@commands.command(name="profile", aliases=["pp"])
async def profile(self, ctx, *, user: discord.User = None):
if user is None:
user = ctx.author
if user.bot:
if user.id == self.bot.user.id:
await self.bot.send(ctx, ctx.l.econ.pp.bot_1)
else:
await self.bot.send(ctx, ctx.l.econ.pp.bot_2)
return
db_user = await self.db.fetch_user(user.id)
u_items = await self.db.fetch_items(user.id)
total_wealth = calc_total_wealth(db_user, u_items)
health_bar = make_health_bar(
db_user["health"], 20, self.d.emojis.heart_full, self.d.emojis.heart_half, self.d.emojis.heart_empty
)
vote_streak = db_user["vote_streak"]
voted = arrow.utcnow().shift(hours=-12) < arrow.get(0 if db_user["streak_time"] is None else db_user["streak_time"])
if arrow.utcnow().shift(days=-1, hours=-12) > arrow.get(
0 if db_user["streak_time"] is None else db_user["streak_time"]
):
vote_streak = 0
await self.db.update_user(user.id, "vote_streak", 0)
await self.db.update_user(user.id, "streak_time", None)
embed = discord.Embed(color=self.d.cc, description=health_bar)
embed.set_author(name=user.display_name, icon_url=user.avatar_url_as())
embed.add_field(name=ctx.l.econ.pp.total_wealth, value=f"{total_wealth}{self.d.emojis.emerald}")
embed.add_field(name="\uFEFF", value="\uFEFF")
embed.add_field(name=ctx.l.econ.pp.cmds_sent, value=self.v.cmd_lb.get(user.id, 0))
embed.add_field(name=ctx.l.econ.pp.streak, value=(vote_streak if vote_streak else 0))
embed.add_field(name="\uFEFF", value="\uFEFF")
embed.add_field(name=ctx.l.econ.pp.can_vote, value=voted * ctx.l.econ.pp.nope + ctx.l.econ.pp.yep * (not voted))
embed.add_field(name=ctx.l.econ.pp.pick, value=(await self.db.fetch_pickaxe(user.id)))
embed.add_field(name="\uFEFF", value="\uFEFF")
embed.add_field(name=ctx.l.econ.pp.sword, value=(await self.db.fetch_sword(user.id)))
await ctx.send(embed=embed)
@commands.command(name="balance", aliases=["bal", "vault", "pocket"])
async def balance(self, ctx, *, user: discord.User = None):
"""Shows the balance of a user or the message sender"""
if user is None:
user = ctx.author
if user.bot:
if user.id == self.bot.user.id:
await self.bot.send(ctx, ctx.l.econ.bal.bot_1)
else:
await self.bot.send(ctx, ctx.l.econ.bal.bot_2)
return
db_user = await self.db.fetch_user(user.id)
u_items = await self.db.fetch_items(user.id)
total_wealth = calc_total_wealth(db_user, u_items)
mooderalds = await self.db.fetch_item(user.id, "Mooderald")
if mooderalds is None:
mooderalds = 0
else:
mooderalds = mooderalds["amount"]
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=ctx.l.econ.bal.s_emeralds.format(user.display_name), icon_url=user.avatar_url_as())
embed.description = (
ctx.l.econ.bal.total_wealth.format(total_wealth, self.d.emojis.emerald)
+ "\n"
+ ctx.l.econ.bal.autistic_emeralds.format(mooderalds, self.d.emojis.autistic_emerald)
)
embed.add_field(name=ctx.l.econ.bal.pocket, value=f'{db_user["emeralds"]}{self.d.emojis.emerald}')
embed.add_field(
name=ctx.l.econ.bal.vault, value=f'{db_user["vault_bal"]}{self.d.emojis.emerald_block}/{db_user["vault_max"]}'
)
await ctx.send(embed=embed)
async def inventory_logic(self, ctx, user, items: list, cat: str, items_per_page: int = 8):
fishies = {fish.name: fish.current for fish in self.d.fishing.fish.values()}
for i, item in enumerate(items):
try:
items[i] = {**item, "sell_price": fishies[item["name"]]}
except KeyError:
pass
await asyncio.sleep(0)
items_sorted = sorted(items, key=lambda item: item["sell_price"], reverse=True) # sort items by sell price
items_chunks = [
items_sorted[i : i + items_per_page] for i in range(0, len(items_sorted), items_per_page)
] # split items into chunks of 16 [[16..], [16..], [16..]]
page = 0
page_max = len(items_chunks) - 1
if items_chunks == []:
items_chunks = [[]]
page_max = 0
msg = None
first_time = True
while True:
if len(items_chunks) == 0:
body = ctx.l.econ.inv.empty
else:
body = "" # text for that page
for item in items_chunks[page]:
sell_price_nice = f'({item["sell_price"]}{self.d.emojis.emerald})' if item["sell_price"] != -1 else ""
body += f'{emojify_item(self.d, item["name"])} `{item["amount"]}x` **{item["name"]}** {sell_price_nice}\n'
embed = discord.Embed(color=self.d.cc, description=body)
embed.set_author(name=ctx.l.econ.inv.s_inventory.format(user.display_name, cat), icon_url=user.avatar_url_as())
embed.set_footer(text=f"{ctx.l.econ.page} {page+1}/{page_max+1}")
if msg is None:
msg = await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
if page_max > 0:
if first_time:
await msg.add_reaction("⬅️")
await asyncio.sleep(0.1)
await msg.add_reaction("➡️")
await asyncio.sleep(0.1)
try:
def author_check(react, r_user):
return r_user == ctx.author and ctx.channel == react.message.channel and msg.id == react.message.id
react, r_user = await self.bot.wait_for(
"reaction_add", check=author_check, timeout=(2 * 60)
) # wait for reaction from message author
except asyncio.TimeoutError:
return
await react.remove(ctx.author)
if react.emoji == "⬅️":
page -= 1 if page - 1 >= 0 else 0
if react.emoji == "➡️":
page += 1 if page + 1 <= page_max else 0
await asyncio.sleep(0.1)
else:
break
first_time = False
async def inventory_boiler(self, ctx, user: discord.User = None):
if ctx.invoked_subcommand is not None:
return False, None
if user is None:
user = ctx.author
if user.bot:
if user.id == self.bot.user.id:
await self.bot.send(ctx, ctx.l.econ.inv.bot_1)
else:
await self.bot.send(ctx, ctx.l.econ.inv.bot_2)
return False, user
return True, user
@commands.group(name="inventory", aliases=["inv", "items"])
@commands.cooldown(2, 2, commands.BucketType.user)
async def inventory(self, ctx):
if ctx.invoked_subcommand is not None:
return
split = ctx.message.content.split()
if len(split) <= 1:
user = ctx.author
else:
try:
user = await commands.UserConverter().convert(ctx, " ".join(split[1:]))
except BaseException:
raise commands.BadArgument
if user.bot:
if user.id == self.bot.user.id:
await self.bot.send(ctx, ctx.l.econ.inv.bot_1)
else:
await self.bot.send(ctx, ctx.l.econ.inv.bot_2)
return
items = await self.db.fetch_items(user.id)
await self.inventory_logic(ctx, user, items, ctx.l.econ.inv.cats.all, 16)
@inventory.group(name="tools", aliases=["tool", "pickaxes", "swords"])
async def inventory_tools(self, ctx, user: discord.User = None):
valid, user = await self.inventory_boiler(ctx, user)
if not valid:
return
items = [e for e in await self.db.fetch_items(user.id) if e["name"] in self.d.cats.tools]
await self.inventory_logic(ctx, user, items, ctx.l.econ.inv.cats.tools)
@inventory.group(name="magic", aliases=["books", "potions", "enchants"])
async def inventory_magic(self, ctx, user: discord.User = None):
valid, user = await self.inventory_boiler(ctx, user)
if not valid:
return
items = [e for e in await self.db.fetch_items(user.id) if e["name"] in self.d.cats.magic]
await self.inventory_logic(ctx, user, items, ctx.l.econ.inv.cats.magic)
@inventory.group(name="misc", aliases=["other"])
async def inventory_misc(self, ctx, user: discord.User = None):
valid, user = await self.inventory_boiler(ctx, user)
if not valid:
return
combined_cats = self.d.cats.tools + self.d.cats.magic + self.d.cats.fish
items = [e for e in await self.db.fetch_items(user.id) if e["name"] not in combined_cats]
await self.inventory_logic(ctx, user, items, ctx.l.econ.inv.cats.misc, (16 if len(items) > 24 else 8))
@inventory.group(name="fish", aliases=["fishes", "fishing", "fishies"])
async def inventory_fish(self, ctx, user: discord.User = None):
valid, user = await self.inventory_boiler(ctx, user)
if not valid:
return
items = [e for e in await self.db.fetch_items(user.id) if e["name"] in self.d.cats.fish]
await self.inventory_logic(ctx, user, items, ctx.l.econ.inv.cats.fish)
@commands.command(name="deposit", aliases=["dep"])
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
async def vault_deposit(self, ctx, emerald_blocks: str):
"""Deposits the given amount of emerald blocks into the vault"""
db_user = await self.db.fetch_user(ctx.author.id)
c_v_bal = db_user["vault_bal"]
c_v_max = db_user["vault_max"]
c_bal = db_user["emeralds"]
if c_bal < 9:
await self.bot.send(ctx, ctx.l.econ.dep.poor_loser)
return
if emerald_blocks.lower() in ("all", "max"):
amount = c_v_max - c_v_bal
if amount * 9 > c_bal:
amount = math.floor(c_bal / 9)
else:
try:
amount = int(emerald_blocks)
except ValueError:
await self.bot.send(ctx, ctx.l.econ.use_a_number_stupid)
return
if amount * 9 > c_bal:
await self.bot.send(ctx, ctx.l.econ.dep.stupid_3)
return
if amount < 1:
if emerald_blocks.lower() in ("all", "max"):
await self.bot.send(ctx, ctx.l.econ.dep.stupid_2)
else:
await self.bot.send(ctx, ctx.l.econ.dep.stupid_1)
return
if amount > c_v_max - c_v_bal:
await self.bot.send(ctx, ctx.l.econ.dep.stupid_2)
return
await self.db.balance_sub(ctx.author.id, amount * 9)
await self.db.set_vault(ctx.author.id, c_v_bal + amount, c_v_max)
await self.bot.send(
ctx, ctx.l.econ.dep.deposited.format(amount, self.d.emojis.emerald_block, amount * 9, self.d.emojis.emerald)
)
@commands.command(name="withdraw", aliases=["with"])
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
async def vault_withdraw(self, ctx, emerald_blocks: str):
"""Withdraws a certain amount of emerald blocks from the vault"""
db_user = await self.db.fetch_user(ctx.author.id)
c_v_bal = db_user["vault_bal"]
c_v_max = db_user["vault_max"]
if c_v_bal < 1:
await self.bot.send(ctx, ctx.l.econ.withd.poor_loser)
return
if emerald_blocks.lower() in ("all", "max"):
amount = c_v_bal
else:
try:
amount = int(emerald_blocks)
except ValueError:
await self.bot.send(ctx, ctx.l.econ.use_a_number_stupid)
return
if amount < 1:
await self.bot.send(ctx, ctx.l.econ.withd.stupid_1)
return
if amount > c_v_bal:
await self.bot.send(ctx, ctx.l.econ.withd.stupid_2)
return
await self.db.balance_add(ctx.author.id, amount * 9)
await self.db.set_vault(ctx.author.id, c_v_bal - amount, c_v_max)
await self.bot.send(
ctx, ctx.l.econ.withd.withdrew.format(amount, self.d.emojis.emerald_block, amount * 9, self.d.emojis.emerald)
)
@commands.group(name="shop")
@commands.cooldown(2, 10, commands.BucketType.user)
async def shop(self, ctx):
"""Shows the available options in the Villager Shop"""
if ctx.invoked_subcommand is None:
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=ctx.l.econ.shop.villager_shop, icon_url=self.d.splash_logo)
# row 1
embed.add_field(
name=f"__**{ctx.l.econ.shop.tools.format(self.d.emojis.netherite_pickaxe_ench)}**__",
value=f"`{ctx.prefix}shop tools`",
)
embed.add_field(name="\uFEFF", value="\uFEFF")
embed.add_field(
name=f"__**{ctx.l.econ.shop.magic.format(self.d.emojis.enchanted_book)}**__", value=f"`{ctx.prefix}shop magic`"
)
# row 2
embed.add_field(
name=f"__**{ctx.l.econ.shop.other.format(self.d.emojis.totem)}**__", value=f"`{ctx.prefix}shop other`"
)
embed.add_field(name="\uFEFF", value="\uFEFF")
embed.add_field(
name=f"__**{ctx.l.econ.shop.fish.format(self.d.emojis.fish.cod)}**__", value=f"`{ctx.prefix}shop fish`"
)
embed.set_footer(text=ctx.l.econ.shop.embed_footer.format(ctx.prefix))
await ctx.send(embed=embed)
async def shop_logic(self, ctx, _type, header):
items = []
# filter out items which aren't of the right _type
for item in [self.d.shop_items[key] for key in self.d.shop_items.keys()]:
if item.cat == _type:
items.append(item)
items_sorted = sorted(items, key=(lambda item: item.buy_price)) # sort by buy price
items_chunked = [items_sorted[i : i + 4] for i in range(0, len(items_sorted), 4)] # split into chunks of 4
page = 0
page_max = len(items_chunked)
msg = None
while True:
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=header, icon_url=self.d.splash_logo)
for item in items_chunked[page]:
embed.add_field(
name=f"{item.db_entry[0]} ({format_required(self.d, item)})",
value=f"`{ctx.prefix}buy {item.db_entry[0].lower()}`",
inline=False,
)
embed.set_footer(text=f"{ctx.l.econ.page} {page+1}/{page_max}")
if msg is None:
msg = await ctx.send(embed=embed)
else:
if not msg.embeds[0] == embed:
await msg.edit(embed=embed)
if page_max <= 1:
return
await asyncio.sleep(0.25)
await msg.add_reaction("⬅️")
await asyncio.sleep(0.25)
await msg.add_reaction("➡️")
try:
def author_check(react, r_user):
return r_user == ctx.author and ctx.channel == react.message.channel and msg.id == react.message.id
# wait for reaction from message author (1 min)
react, r_user = await self.bot.wait_for("reaction_add", check=author_check, timeout=60)
except asyncio.TimeoutError:
return
await react.remove(ctx.author)
if react.emoji == "⬅️":
page -= 1
elif react.emoji == "➡️":
page += 1
if page > page_max - 1:
page = page_max - 1
if page < 0:
page = 0
await asyncio.sleep(0.2)
@shop.command(name="tools")
async def shop_tools(self, ctx):
"""Allows you to shop for tools"""
await self.shop_logic(ctx, "tools", f"{ctx.l.econ.shop.villager_shop} [{ctx.l.econ.shop.tools[3:]}]")
@shop.command(name="magic")
async def shop_magic(self, ctx):
"""Allows you to shop for magic items"""
await self.shop_logic(ctx, "magic", f"{ctx.l.econ.shop.villager_shop} [{ctx.l.econ.shop.magic[3:]}]")
@shop.command(name="other")
async def shop_other(self, ctx):
"""Allows you to shop for other/miscellaneous items"""
await self.shop_logic(ctx, "other", f"{ctx.l.econ.shop.villager_shop} [{ctx.l.econ.shop.other[3:]}]")
@shop.command(name="fish")
async def shop_fish(self, ctx):
await self.fish_market(ctx)
@commands.command(name="fishmarket", aliases=["fishshop", "fishprices", "fishprice"])
async def fish_market(self, ctx):
embed_template = discord.Embed(
color=self.d.cc,
title=ctx.l.econ.fishing.market.title.format(self.d.emojis.fish.cod, self.d.emojis.fish.rainbow_trout),
description=ctx.l.econ.fishing.market.desc,
)
fields = []
for i, fish in enumerate(self.d.fishing.fish.items()):
fish_id, fish = fish
fields.append(
{
"name": f"{self.d.emojis.fish[fish_id]} {fish.name}",
"value": ctx.l.econ.fishing.market.current.format(fish.current, self.d.emojis.emerald),
}
)
if i % 2 == 0:
fields.append({"name": "\uFEFF", "value": "\uFEFF"})
await asyncio.sleep(0)
groups = [fields[i : i + 6] for i in range(0, len(fields), 6)]
page_max = len(groups)
page = 0
msg = None
while True:
embed = embed_template.copy()
for field in groups[page]:
embed.add_field(**field)
embed.set_footer(text=f"{ctx.l.econ.page} {page+1}/{page_max}")
if msg is None:
msg = await ctx.send(embed=embed)
elif not msg.embeds[0] == embed:
await msg.edit(embed=embed)
if page_max <= 1:
return
await asyncio.sleep(0.25)
await msg.add_reaction("⬅️")
await asyncio.sleep(0.25)
await msg.add_reaction("➡️")
try:
def author_check(react, r_user):
return r_user == ctx.author and ctx.channel == react.message.channel and msg.id == react.message.id
# wait for reaction from message author (1 min)
react, r_user = await self.bot.wait_for("reaction_add", check=author_check, timeout=60)
except asyncio.TimeoutError:
return
await react.remove(ctx.author)
if react.emoji == "⬅️":
page -= 1
elif react.emoji == "➡️":
page += 1
if page > page_max - 1:
page = page_max - 1
if page < 0:
page = 0
await asyncio.sleep(0.2)
@commands.command(name="buy", aliases=["purchase"])
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
async def buy(self, ctx, *, amount_item):
"""Allows you to buy items"""
amount_item = amount_item.lower()
db_user = await self.db.fetch_user(ctx.author.id)
if amount_item.startswith("max ") or amount_item.startswith("all "):
item = amount_item[4:]
try:
amount = math.floor(db_user["emeralds"] / self.d.shop_items[item].buy_price)
except KeyError:
await self.bot.send(ctx, ctx.l.econ.buy.stupid_2.format(item))
return
if amount < 1:
await self.bot.send(ctx, ctx.l.econ.buy.poor_loser_1)
return
else:
split = amount_item.split(" ")
try:
amount = split.pop(0)
amount = int(amount)
except ValueError:
item = amount
item += (" " + " ".join(split)) if len(split) > 0 else ""
amount = 1
else:
item = " ".join(split)
if amount < 1:
await self.bot.send(ctx, ctx.l.econ.buy.stupid_1)
return
shop_item = self.d.shop_items.get(item)
# shop item doesn't exist lol
if shop_item is None:
await self.bot.send(ctx, ctx.l.econ.buy.stupid_2.format(item))
return
# check if user can actually afford to buy that amount of that item
if shop_item.buy_price * amount > db_user["emeralds"]:
await self.bot.send(ctx, ctx.l.econ.buy.poor_loser_2.format(amount, shop_item.db_entry[0]))
return
db_item = await self.db.fetch_item(ctx.author.id, shop_item.db_entry[0])
# get count of item in db for that user
if db_item is not None:
db_item_count = db_item["amount"]
else:
db_item_count = 0
# if they already have hit the limit on how many they can buy of that item
count_lt = shop_item.requires.get("count_lt")
if count_lt is not None and count_lt < db_item_count + amount:
await self.bot.send(ctx, ctx.l.econ.buy.no_to_item_1)
return
# ensure user has required items
for req_item, req_amount in shop_item.requires.get("items", {}).items():
db_req_item = await self.db.fetch_item(ctx.author.id, req_item)
if db_req_item is None or db_req_item["amount"] < req_amount:
await self.bot.send(
ctx, ctx.l.econ.buy.need_total_of.format(req_amount, req_item, self.d.emojis[self.d.emoji_items[req_item]])
)
return
await self.db.balance_sub(ctx.author.id, shop_item.buy_price * amount)
for req_item, req_amount in shop_item.requires.get("items", {}).items():
await self.db.remove_item(ctx.author.id, req_item, req_amount * amount)
await self.db.add_item(ctx.author.id, shop_item.db_entry[0], shop_item.db_entry[1], amount, shop_item.db_entry[2])
if shop_item.db_entry[0].endswith("Pickaxe") or shop_item.db_entry[0] == "Bane Of Pillagers Amulet":
member = self.bot.get_guild(self.d.support_server_id).get_member(ctx.author.id)
if member is not None:
await self.bot.update_support_member_role(member)
if shop_item.db_entry[0] == "Rich Person Trophy":
await self.db.rich_trophy_wipe(ctx.author.id)
await self.bot.send(
ctx,
ctx.l.econ.buy.you_done_bought.format(
amount, shop_item.db_entry[0], format_required(self.d, shop_item, amount), amount + db_item_count
),
)
@commands.command(name="sell", aliases=["emeraldify"])
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
async def sell(self, ctx, *, amount_item):
"""Allows you to sell items"""
amount_item = amount_item.lower()
if amount_item.startswith("max ") or amount_item.startswith("all "):
item = amount_item[4:]
db_item = await self.db.fetch_item(ctx.author.id, item)
if db_item is None:
await self.bot.send(ctx, ctx.l.econ.sell.invalid_item)
return
amount = db_item["amount"]
else:
split = amount_item.split(" ")
try:
amount = split.pop(0)
amount = int(amount)
except ValueError:
item = amount
item += (" " + " ".join(split)) if len(split) > 0 else ""
amount = 1
else:
item = " ".join(split)
db_item = await self.db.fetch_item(ctx.author.id, item)
if db_item is None:
await self.bot.send(ctx, ctx.l.econ.sell.invalid_item)
return
if amount > db_item["amount"]:
await self.bot.send(ctx, ctx.l.econ.sell.stupid_1)
return
if amount < 1:
await self.bot.send(ctx, ctx.l.econ.sell.stupid_2)
return
for fish_id, fish in self.d.fishing.fish.items():
if db_item["name"] == fish.name:
db_item = {**db_item, "sell_price": fish.current}
await self.db.balance_add(ctx.author.id, amount * db_item["sell_price"])
await self.db.remove_item(ctx.author.id, db_item["name"], amount)
if db_item["name"].endswith("Pickaxe") or db_item["name"] == "Bane Of Pillagers Amulet":
member = self.bot.get_guild(self.d.support_server_id).get_member(ctx.author.id)
if member is not None:
await self.bot.update_support_member_role(member)
await self.bot.send(
ctx,
ctx.l.econ.sell.you_done_sold.format(
amount, db_item["name"], amount * db_item["sell_price"], self.d.emojis.emerald
),
)
@commands.command(name="give", aliases=["gift", "share", "gib"])
@commands.before_invoke(lock_author)
@commands.after_invoke(unlock_author)
@commands.guild_only()
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
async def give(self, ctx, user: discord.Member, *, amount_item):
"""Give an item or emeralds to another person"""
if user.bot:
if user.id == self.bot.user.id:
await self.bot.send(ctx, ctx.l.econ.give.bot_1)
else:
await self.bot.send(ctx, ctx.l.econ.give.bot_2)
return
if ctx.author.id == user.id:
await self.bot.send(ctx, ctx.l.econ.give.stupid_1)
return
async with self._user_locks[user.id]:
amount_item = amount_item.lower()
try:
# to be given is emeralds
amount = int(amount_item)
item = "emerald"
except Exception:
split = amount_item.split(" ")
try:
temp_split = split.copy()
amount = int(temp_split.pop(0))
split = temp_split
except Exception:
amount = 1
item = " ".join(split)
if amount < 1:
await self.bot.send(ctx, ctx.l.econ.give.stupid_2)
return
db_user = await self.db.fetch_user(ctx.author.id)
if "pickaxe" in item.lower() or "sword" in item.lower():
await self.bot.send(ctx, ctx.l.econ.give.and_i_oop)
return
if item in ("emerald", "emeralds", ":emerald:"):
if amount > db_user["emeralds"]:
await self.bot.send(ctx, ctx.l.econ.give.stupid_3)
return
await self.db.balance_sub(ctx.author.id, amount)
await self.db.balance_add(user.id, amount)
await self.db.log_transaction("emerald", amount, arrow.utcnow().timestamp(), ctx.author.id, user.id)
await self.bot.send(
ctx, ctx.l.econ.give.gaveems.format(ctx.author.mention, amount, self.d.emojis.emerald, user.mention)
)
if (await self.db.fetch_user(user.id))["give_alert"]:
await self.bot.send(
user, ctx.l.econ.give.gaveyouems.format(ctx.author.mention, amount, self.d.emojis.emerald)
)
else:
db_item = await self.db.fetch_item(ctx.author.id, item)
if db_item is None or amount > db_item["amount"]:
await self.bot.send(ctx, ctx.l.econ.give.stupid_4)
return
if db_item["sticky"]:
await self.bot.send(ctx, ctx.l.econ.give.and_i_oop)
return
if amount < 1:
await self.bot.send(ctx, ctx.l.econ.give.stupid_2)
return
await self.db.remove_item(ctx.author.id, item, amount)
await self.db.add_item(user.id, db_item["name"], db_item["sell_price"], amount)
self.bot.loop.create_task(
self.db.log_transaction(db_item["name"], amount, arrow.utcnow().timestamp(), ctx.author.id, user.id)
)
await self.bot.send(
ctx, ctx.l.econ.give.gave.format(ctx.author.mention, amount, db_item["name"], user.mention)
)
if (await self.db.fetch_user(user.id))["give_alert"]:
await self.bot.send(user, ctx.l.econ.give.gaveyou.format(ctx.author.mention, amount, db_item["name"]))
@commands.command(name="gamble", aliases=["bet", "stonk", "stonks"])
@commands.cooldown(1, 30, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
async def gamble(self, ctx, amount):
"""Gamble for emeralds with Villager Bot"""
db_user = await self.db.fetch_user(ctx.author.id)
if amount.lower() in ("all", "max"):
amount = db_user["emeralds"]
else:
try:
amount = int(amount)
except ValueError:
await self.bot.send(ctx, ctx.l.econ.use_a_number_stupid)
return
if amount > db_user["emeralds"]:
await self.bot.send(ctx, ctx.l.econ.gamble.stupid_1)
return
if amount < 10:
await self.bot.send(ctx, ctx.l.econ.gamble.stupid_2)
return
if amount > 50000:
await self.bot.send(ctx, ctx.l.econ.gamble.stupid_3)
return
if db_user["emeralds"] >= 200000:
await self.bot.send(ctx, ctx.l.econ.gamble.too_rich)
return
u_roll = random.randint(1, 6) + random.randint(1, 6)
b_roll = random.randint(1, 6) + random.randint(1, 6)
await self.bot.send(ctx, ctx.l.econ.gamble.roll.format(u_roll, b_roll))
if u_roll > b_roll:
multi = (
40
+ random.randint(5, 30)
+ (await self.db.fetch_item(ctx.author.id, "Bane Of Pillagers Amulet") is not None) * 20
)
multi += (await self.db.fetch_item(ctx.author.id, "Rich Person Trophy") is not None) * 40
multi = (150 + random.randint(-5, 0)) if multi >= 150 else multi
multi /= 100
won = multi * amount
won = math.ceil(min(won, math.log(won, 1.001)))
await self.db.balance_add(ctx.author.id, won)
await self.bot.send(
ctx, ctx.l.econ.gamble.win.format(random.choice(ctx.l.econ.gamble.actions), won, self.d.emojis.emerald)
)
elif u_roll < b_roll:
await self.db.balance_sub(ctx.author.id, amount)
await self.bot.send(ctx, ctx.l.econ.gamble.lose.format(amount, self.d.emojis.emerald))
else:
await self.bot.send(ctx, ctx.l.econ.gamble.tie)
@commands.command(name="search", aliases=["beg"])
@commands.cooldown(1, 30 * 60, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
async def search(self, ctx):
"""Beg for emeralds"""
db_user = await self.db.fetch_user(ctx.author.id)
if random.choice([True, True, True, True, True, False]) or db_user["emeralds"] < 2:
if random.randint(1, 420) == 420:
mooderalds = random.randint(1, 3)
await self.db.add_item(ctx.author.id, "Mooderald", 768, mooderalds)
await self.bot.send(
ctx, random.choice(ctx.l.econ.beg.mooderald).format(f"{mooderalds}{self.d.emojis.autistic_emerald}")
)
else:
amount = 9 + math.ceil(math.log(db_user["emeralds"] + 1, 1.5)) + random.randint(1, 5)
amount = random.randint(1, 4) if amount < 1 else amount
await self.db.balance_add(ctx.author.id, amount)
await self.bot.send(ctx, random.choice(ctx.l.econ.beg.positive).format(f"{amount}{self.d.emojis.emerald}"))
else:
amount = 9 + math.ceil(math.log(db_user["emeralds"] + 1, 1.3)) + random.randint(1, 5) # ah yes, meth
if amount < 1:
amount = random.randint(1, 4)
elif amount > 45000:
amount = 45000 + random.randint(0, abs(int((amount - 45000)) / 3) + 1)
if db_user["emeralds"] < amount:
amount = db_user["emeralds"]
await self.db.balance_sub(ctx.author.id, amount)
await self.bot.send(ctx, random.choice(ctx.l.econ.beg.negative).format(f"{amount}{self.d.emojis.emerald}"))
@commands.command(name="mine", aliases=["mein", "eun", "mien"])
@commands.guild_only()
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
async def mine(self, ctx):
if not await self.math_problem(ctx):
return
pickaxe = await self.db.fetch_pickaxe(ctx.author.id)
# calculate if user finds emeralds OR not
found = random.choice(self.calc_yield_chance_list(pickaxe))
# ~~what the fuck?~~
# calculate bonus emeralds from enchantment items
if found:
for item in self.d.mining.yields_enchant_items.keys():
if await self.db.fetch_item(ctx.author.id, item) is not None:
found += random.choice(self.d.mining.yields_enchant_items[item])
break
await asyncio.sleep(0)
if not found:
for item in self.d.mining.findables: # try to see if user gets an item
if random.randint(0, item[2]) == 1:
await self.db.add_item(ctx.author.id, item[0], item[1], 1, item[3])
await self.bot.send(
ctx,
f"{self.d.emojis[self.d.emoji_items[pickaxe]]} \uFEFF "
+ ctx.l.econ.mine.found_item_1.format(
random.choice(ctx.l.econ.mine.actions),
1,
item[0],
item[1], # shhhhh ignore the pep8 violations and move on
self.d.emojis.emerald,
random.choice(ctx.l.econ.mine.places),
),
True,
)
return
await asyncio.sleep(0)
# only works cause num of pickaxes is 6 and levels of fake finds is 3
# please don't bug me about jank code, I know
fake_finds = self.d.mining.finds[math.floor(self.d.mining.pickaxes.index(pickaxe) / 2)]
await self.bot.send(
ctx,
f"{self.d.emojis[self.d.emoji_items[pickaxe]]} \uFEFF "
+ ctx.l.econ.mine.found_item_2.format(
random.choice(ctx.l.econ.mine.actions),
random.randint(1, 6),
random.choice(ctx.l.econ.mine.useless),
random.choice(fake_finds),
),
True,
)
else:
found = int(found)
if await self.db.fetch_item(ctx.author.id, "Rich Person Trophy") is not None:
found *= 2 # sekret
await self.db.balance_add(ctx.author.id, found)
await self.bot.send(
ctx,
f"{self.d.emojis[self.d.emoji_items[pickaxe]]} \uFEFF "
+ ctx.l.econ.mine.found_emeralds.format(random.choice(ctx.l.econ.mine.actions), found, self.d.emojis.emerald),
True,
)
if random.randint(0, 50) == 1:
db_user = await self.db.fetch_user(ctx.author.id)
if db_user["vault_max"] < 2000:
await self.db.update_user(ctx.author.id, "vault_max", db_user["vault_max"] + 1)
@commands.command(name="fish", aliases=["phish", "feesh"])
@commands.guild_only()
@commands.cooldown(1, 2, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
async def fish(self, ctx):
if not await self.math_problem(ctx, 5):
return
if await self.db.fetch_item(ctx.author.id, "Fishing Rod") is None:
await self.bot.send(ctx, ctx.l.econ.fishing.stupid_1)
return
await self.bot.send(ctx, random.choice(ctx.l.econ.fishing.cast))
async with ctx.typing():
wait = random.randint(8, 20)
if await self.db.fetch_item(ctx.author.id, "Lure I Book") is not None:
wait -= 2
if "seaweed" in self.v.chuggers.get(ctx.author.id, []):
wait -= 2
await asyncio.sleep(wait)
# fished up item or junk or somethin not fish
if random.randint(1, 8) == 1:
junk_chance = (True, True, True, True, False)
if await self.db.fetch_item(ctx.author.id, "Fishing Trophy") is not None:
junk_chance = (True, True, True, False, False, False)
if random.choice(junk_chance): # junk
junk = random.choice(ctx.l.econ.fishing.junk)
await self.bot.send(ctx, junk, True, True)
if "meme" in junk:
await self.bot.get_cog("Fun").meme(ctx)
return
while True:
for item in self.d.fishing.findables:
if random.randint(0, (item[2] // 2) + 2) == 1:
await self.db.add_item(ctx.author.id, item[0], item[1], 1, item[3])
await self.bot.send(
ctx,
random.choice(ctx.l.econ.fishing.item).format(item[0], item[1], self.d.emojis.emerald),
True,
True,
)
return
await asyncio.sleep(0)
fish_id = random.choices(self.d.fishing.fish_ids, self.d.fishing.fish_weights)[0]
fish = self.d.fishing.fish[fish_id]
await self.db.add_item(ctx.author.id, fish.name, -1, 1)
await self.bot.send(
ctx, random.choice(ctx.l.econ.fishing.caught).format(fish.name, self.d.emojis.fish[fish_id]), True, True
)
await self.db.update_lb(ctx.author.id, "fish", 1, "add")
if random.randint(0, 50) == 1:
db_user = await self.db.fetch_user(ctx.author.id)
if db_user["vault_max"] < 2000:
await self.db.update_user(ctx.author.id, "vault_max", db_user["vault_max"] + 1)
@commands.command(name="pillage", aliases=["rob", "mug"])
@commands.before_invoke(lock_author)
@commands.after_invoke(unlock_author)
@commands.guild_only()
@commands.cooldown(1, 300, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
async def pillage(self, ctx, victim: discord.Member):
if victim.bot:
if victim.id == self.bot.user.id:
await self.bot.send(ctx, ctx.l.econ.pillage.bot_1)
else:
await self.bot.send(ctx, ctx.l.econ.pillage.bot_2)
return
if ctx.author.id == victim.id:
await self.bot.send(ctx, ctx.l.econ.pillage.stupid_1)
return
if ctx.guild.get_member(victim.id) is None:
await self.bot.send(ctx, ctx.l.econ.pillage.stupid_2)
return
async with self._user_locks[victim.id]:
db_user = await self.db.fetch_user(ctx.author.id)
if db_user["emeralds"] < 64:
await self.bot.send(ctx, ctx.l.econ.pillage.stupid_3.format(self.d.emojis.emerald))
return
db_victim = await self.db.fetch_user(victim.id)
if db_victim["emeralds"] < 64:
await self.bot.send(ctx, ctx.l.econ.pillage.stupid_4.format(self.d.emojis.emerald))
return
pillager_pillages = self.v.pillagers.get(ctx.author.id, 0)
self.v.pillagers[ctx.author.id] = pillager_pillages + 1
times_pillaged = self.v.pillages.get(victim.id, 0)
self.v.pillages[victim.id] = times_pillaged + 1
user_bees = await self.db.fetch_item(ctx.author.id, "Jar Of Bees")
user_bees = 0 if user_bees is None else user_bees["amount"]
victim_bees = await self.db.fetch_item(victim.id, "Jar Of Bees")
victim_bees = 0 if victim_bees is None else victim_bees["amount"]
# lmao
if pillager_pillages > 7 or times_pillaged > 4:
chances = [False] * 50 + [True]
elif await self.db.fetch_item(victim.id, "Bane Of Pillagers Amulet"):
chances = [False] * 5 + [True]
elif user_bees > victim_bees:
chances = [False] * 3 + [True] * 5
elif user_bees < victim_bees:
chances = [False] * 5 + [True] * 3
else:
chances = [True, False]
pillager_sword_lvl = self.d.sword_list.index((await self.db.fetch_sword(ctx.author.id)).lower())
victim_sword_lvl = self.d.sword_list.index((await self.db.fetch_sword(victim.id)).lower())
if pillager_sword_lvl > victim_sword_lvl:
chances.append(True)
elif pillager_sword_lvl < victim_sword_lvl:
chances.append(False)
success = random.choice(chances)
if success:
# calculate base stolen value
stolen = math.ceil(db_victim["emeralds"] * (random.randint(10, 40) / 100))
# calculate and implement cap based off pillager's balance
stolen = min(stolen, math.ceil(db_user["emeralds"] ** 1.1 + db_user["emeralds"] * 5) + random.randint(1, 10))
# 8% tax to prevent exploitation of pillaging leaderboard
adjusted = math.ceil(stolen * 0.92) # villager bot will steal ur stuff hehe
await self.db.balance_sub(victim.id, stolen)
await self.db.balance_add(ctx.author.id, adjusted) # 8% tax
await self.bot.send(ctx, random.choice(ctx.l.econ.pillage.u_win.user).format(adjusted, self.d.emojis.emerald))
await self.bot.send(
victim,
random.choice(ctx.l.econ.pillage.u_win.victim).format(ctx.author.mention, stolen, self.d.emojis.emerald),
)
await self.db.update_lb(ctx.author.id, "pillages", adjusted, "add")
else:
penalty = max(32, db_user["emeralds"] // 3)
await self.db.balance_sub(ctx.author.id, penalty)
await self.db.balance_add(victim.id, penalty)
await self.bot.send(ctx, random.choice(ctx.l.econ.pillage.u_lose.user).format(penalty, self.d.emojis.emerald))
await self.bot.send(victim, random.choice(ctx.l.econ.pillage.u_lose.victim).format(ctx.author.mention))
@commands.command(name="use", aliases=["eat", "chug", "smoke"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def use_item(self, ctx, *, thing):
"""Allows you to use potions and some other items"""
thing = thing.lower()
split = thing.split()
try:
amount = int(split[0])
thing = " ".join(split[1:])
except (IndexError, ValueError):
amount = 1
if amount < 1:
await self.bot.send(ctx, ctx.l.econ.use.stupid_3)
return
if amount > 100:
await self.bot.send(ctx, ctx.l.econ.use.stupid_4)
return
current_pots = self.v.chuggers.get(ctx.author.id)
if thing in ([] if current_pots is None else current_pots):
await self.bot.send(ctx, ctx.l.econ.use.stupid_1)
return
db_item = await self.db.fetch_item(ctx.author.id, thing)
if db_item is None:
await self.bot.send(ctx, ctx.l.econ.use.stupid_2)
return
if db_item["amount"] < amount:
await self.bot.send(ctx, ctx.l.econ.use.stupid_5)
return
if thing == "haste i potion":
if amount > 1:
await self.bot.send(ctx, ctx.l.econ.use.stupid_1)
return
await self.db.remove_item(ctx.author.id, thing, 1)
self.v.chuggers[ctx.author.id] = self.v.chuggers.get(ctx.author.id, []) # ensure user has stuff there
self.v.chuggers[ctx.author.id].append("haste i potion")
await self.bot.send(ctx, ctx.l.econ.use.chug.format("Haste I Potion", 6))
self.v.pause_econ.pop(ctx.author.id, None)
await asyncio.sleep(60 * 6)
await self.bot.send(ctx.author, ctx.l.econ.use.done.format("Haste I Potion"))
self.v.chuggers[ctx.author.id].pop(
self.v.chuggers[ctx.author.id].index("haste i potion")
) # pop pot from active potion fx
return
if thing == "haste ii potion":
if amount > 1:
await self.bot.send(ctx, ctx.l.econ.use.stupid_1)
return
await self.db.remove_item(ctx.author.id, thing, 1)
self.v.chuggers[ctx.author.id] = self.v.chuggers.get(ctx.author.id, [])
self.v.chuggers[ctx.author.id].append("haste ii potion")
await self.bot.send(ctx, ctx.l.econ.use.chug.format("Haste II Potion", 4.5))
self.v.pause_econ.pop(ctx.author.id, None)
await asyncio.sleep(60 * 4.5)
await self.bot.send(ctx.author, ctx.l.econ.use.done.format("Haste II Potion"))
self.v.chuggers[ctx.author.id].pop(
self.v.chuggers[ctx.author.id].index("haste ii potion")
) # pop pot from active potion fx
return
if thing == "seaweed":
if amount > 1:
await self.bot.send(ctx, ctx.l.econ.use.stupid_1)
return
await self.db.remove_item(ctx.author.id, thing, 1)
self.v.chuggers[ctx.author.id] = self.v.chuggers.get(ctx.author.id, [])
self.v.chuggers[ctx.author.id].append("seaweed")
await self.bot.send(ctx, ctx.l.econ.use.smoke_seaweed.format(2))
await asyncio.sleep(60 * 2)
await self.bot.send(ctx.author, ctx.l.econ.use.seaweed_done)
return
if thing == "vault potion":
if amount > 1:
await self.bot.send(ctx, ctx.l.econ.use.stupid_1)
return
db_user = await self.db.fetch_user(ctx.author.id)
if db_user["vault_max"] > 1999:
await self.bot.send(ctx, ctx.l.econ.use.vault_max)
return
add = random.randint(9, 15)
if db_user["vault_max"] + add > 2000:
add = 2000 - db_user["vault_max"]
await self.db.remove_item(ctx.author.id, "Vault Potion", 1)
await self.db.set_vault(ctx.author.id, db_user["vault_bal"], db_user["vault_max"] + add)
await self.bot.send(ctx, ctx.l.econ.use.vault_pot.format(add))
return
if thing == "honey jar":
db_user = await self.db.fetch_user(ctx.author.id)
max_amount = 20 - db_user["health"]
if max_amount < 1:
await self.bot.send(ctx, ctx.l.econ.use.cant_use_any.format("Honey Jars"))
return
if db_user["health"] + amount > 20:
amount = max_amount
await self.db.update_user(ctx.author.id, "health", db_user["health"] + amount)
await self.db.remove_item(ctx.author.id, "Honey Jar", amount)
new_health = amount + db_user["health"]
await self.bot.send(ctx, ctx.l.econ.use.chug_honey.format(amount, new_health, self.d.emojis.heart_full))
return
if thing == "present":
if amount > 1:
await self.bot.send(ctx, ctx.l.econ.use.stupid_1)
return
await self.db.remove_item(ctx.author.id, "Present", 1)
while True:
for item in self.d.mining.findables:
if random.randint(0, (item[2] // 2) + 2) == 1:
await self.db.add_item(ctx.author.id, item[0], item[1], 1, item[3])
await self.bot.send(
ctx, random.choice(ctx.l.econ.use.present).format(item[0], item[1], self.d.emojis.emerald)
)
return
await asyncio.sleep(0)
if thing == "barrel":
if amount > 1:
await self.bot.send(ctx, ctx.l.econ.use.stupid_1)
return
await self.db.remove_item(ctx.author.id, "Barrel", 1)
for _ in range(20):
for item in self.d.mining.findables:
if item[2] > 1000:
if random.randint(0, (item[2] // 1.5) + 5) == 1:
await self.db.add_item(ctx.author.id, item[0], item[1], 1, item[3])
await self.bot.send(
ctx, random.choice(ctx.l.econ.use.barrel_item).format(item[0], item[1], self.d.emojis.emerald)
)
return
await asyncio.sleep(0)
ems = random.randint(2, 4096)
if await self.db.fetch_item(ctx.author.id, "Rich Person Trophy") is not None:
ems *= 1.5
ems = round(ems)
await self.bot.send(ctx, random.choice(ctx.l.econ.use.barrel_ems).format(ems, self.d.emojis.emerald))
await self.db.balance_add(ctx.author.id, ems)
return
if thing == "glass beaker":
slime_balls = await self.db.fetch_item(ctx.author.id, "Slime Ball")
if slime_balls is None or slime_balls["amount"] < amount:
await ctx.send(ctx.l.econ.use.need_slimy_balls)
return
await self.db.remove_item(ctx.author.id, "Slime Ball", amount)
await self.db.remove_item(ctx.author.id, "Glass Beaker", amount)
await self.db.add_item(ctx.author.id, "Beaker Of Slime", 13, amount, False)
await self.bot.send(ctx, ctx.l.econ.use.slimy_balls_funne.format(amount))
return
if thing == "beaker of slime":
await self.db.remove_item(ctx.author.id, "Beaker Of Slime", amount)
await self.db.add_item(ctx.author.id, "Slime Ball", 5, amount, True)
await self.bot.send(ctx, ctx.l.econ.use.beaker_of_slime_undo.format(amount))
return
await self.bot.send(ctx, ctx.l.econ.use.stupid_6)
@commands.command(name="honey", aliases=["harvesthoney", "horny"]) # ~~a strange urge occurs in me~~
@commands.cooldown(1, 24 * 60 * 60, commands.BucketType.user)
async def honey(self, ctx):
bees = await self.db.fetch_item(ctx.author.id, "Jar Of Bees")
if bees is not None:
bees = bees["amount"]
else:
bees = 0
if bees > 1024:
bees = 1024
if bees < 100:
await self.bot.send(ctx, random.choice(ctx.l.econ.honey.stupid_1))
ctx.command.reset_cooldown(ctx)
return
jars = bees - random.randint(math.ceil(bees / 6), math.ceil(bees / 2))
await self.db.add_item(ctx.author.id, "Honey Jar", 1, jars)
await self.bot.send(ctx, random.choice(ctx.l.econ.honey.honey).format(jars)) # uwu so sticky oWo
if random.choice([False] * 3 + [True]):
bees_lost = random.randint(math.ceil(bees / 75), math.ceil(bees / 50))
await self.db.remove_item(ctx.author.id, "Jar Of Bees", bees_lost)
await self.bot.send(ctx, random.choice(ctx.l.econ.honey.ded).format(bees_lost))
@commands.group(name="leaderboards", aliases=["lb", "lbs", "leaderboard"])
@commands.guild_only()
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.max_concurrency(1, commands.BucketType.user)
async def leaderboards(self, ctx):
if ctx.invoked_subcommand is None:
ctx.command.reset_cooldown(ctx)
embed = discord.Embed(color=self.d.cc, title=ctx.l.econ.lb.title)
embed.add_field(name=ctx.l.econ.lb.emeralds, value=f"`{ctx.prefix}leaderboard emeralds`")
embed.add_field(name="\uFEFF", value="\uFEFF")
embed.add_field(name=ctx.l.econ.lb.stolen, value=f"`{ctx.prefix}leaderboard stolen`")
embed.add_field(name=ctx.l.econ.lb.kills, value=f"`{ctx.prefix}leaderboard mobkills`")
embed.add_field(name="\uFEFF", value="\uFEFF")
embed.add_field(name=ctx.l.econ.lb.bees, value=f"`{ctx.prefix}leaderboard bees`")
embed.add_field(name=ctx.l.econ.lb.cmds, value=f"`{ctx.prefix}leaderboard commands`")
embed.add_field(name="\uFEFF", value="\uFEFF")
embed.add_field(name=ctx.l.econ.lb.votes, value=f"`{ctx.prefix}leaderboard votes`")
embed.add_field(name=ctx.l.econ.lb.fish, value=f"`{ctx.prefix}leaderboard fish`")
embed.add_field(name="\uFEFF", value="\uFEFF")
embed.add_field(name=ctx.l.econ.lb.mooderalds, value=f"`{ctx.prefix}leaderboard mooderalds`")
await ctx.send(embed=embed)
@leaderboards.command(name="emeralds", aliases=["ems"])
async def leaderboard_emeralds(self, ctx):
async with ctx.typing():
ems_global, global_u_entry = await self.db.fetch_global_lb_user("emeralds", ctx.author.id)
ems_local, local_u_entry = await self.db.fetch_local_lb_user(
"emeralds", ctx.author.id, [m.id for m in ctx.guild.members if not m.bot]
)
lb_global = lb_logic(
self, ems_global, global_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.emerald)
)
lb_local = lb_logic(self, ems_local, local_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.emerald))
embed = discord.Embed(color=self.d.cc, title=ctx.l.econ.lb.lb_ems.format(self.d.emojis.emerald_spinn))
embed.add_field(name=ctx.l.econ.lb.local_lb, value=lb_local)
embed.add_field(name=ctx.l.econ.lb.global_lb, value=lb_global)
await ctx.send(embed=embed)
@leaderboards.command(name="pillages", aliases=["pil", "stolen"])
async def leaderboard_pillages(self, ctx):
async with ctx.typing():
pillages_global, global_u_entry = await self.db.fetch_global_lb("pillages", ctx.author.id)
pillages_local, local_u_entry = await self.db.fetch_local_lb(
"pillages", ctx.author.id, [m.id for m in ctx.guild.members if not m.bot]
)
lb_global = lb_logic(
self, pillages_global, global_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.emerald)
)
lb_local = lb_logic(
self, pillages_local, local_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.emerald)
)
embed = discord.Embed(color=self.d.cc, title=ctx.l.econ.lb.lb_pil.format(self.d.emojis.emerald))
embed.add_field(name=ctx.l.econ.lb.local_lb, value=lb_local)
embed.add_field(name=ctx.l.econ.lb.global_lb, value=lb_global)
await ctx.send(embed=embed)
@leaderboards.command(name="mobkills", aliases=["kil", "kills", "kill", "bonk"])
async def leaderboard_mobkills(self, ctx):
async with ctx.typing():
kills_global, global_u_entry = await self.db.fetch_global_lb("mobs_killed", ctx.author.id)
kills_local, local_u_entry = await self.db.fetch_local_lb(
"mobs_killed", ctx.author.id, [m.id for m in ctx.guild.members if not m.bot]
)
lb_global = lb_logic(
self, kills_global, global_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.stevegun)
)
lb_local = lb_logic(
self, kills_local, local_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.stevegun)
)
embed = discord.Embed(color=self.d.cc, title=ctx.l.econ.lb.lb_kil.format(self.d.emojis.stevegun))
embed.add_field(name=ctx.l.econ.lb.local_lb, value=lb_local)
embed.add_field(name=ctx.l.econ.lb.global_lb, value=lb_global)
await ctx.send(embed=embed)
@leaderboards.command(name="bees", aliases=["jarofbees", "jarsofbees"])
async def leaderboard_bees(self, ctx):
async with ctx.typing():
bees_global, global_u_entry = await self.db.fetch_global_lb_item("Jar Of Bees", ctx.author.id)
bees_local, local_u_entry = await self.db.fetch_local_lb_item(
"Jar Of Bees", ctx.author.id, [m.id for m in ctx.guild.members if not m.bot]
)
lb_global = lb_logic(self, bees_global, global_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.bee))
lb_local = lb_logic(self, bees_local, local_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.bee))
embed = discord.Embed(color=self.d.cc, title=ctx.l.econ.lb.lb_bee.format(self.d.emojis.anibee))
embed.add_field(name=ctx.l.econ.lb.local_lb, value=lb_local)
embed.add_field(name=ctx.l.econ.lb.global_lb, value=lb_global)
await ctx.send(embed=embed)
@leaderboards.command(name="commands", aliases=["cmds"])
async def leaderboard_commands(self, ctx):
async with ctx.typing():
lb_global, lb_local = cmds_lb(self, ctx)
embed = discord.Embed(color=self.d.cc, title=ctx.l.econ.lb.lb_cmds.format(":computer:"))
embed.add_field(name=ctx.l.econ.lb.local_lb, value=lb_local)
embed.add_field(name=ctx.l.econ.lb.global_lb, value=lb_global)
await ctx.send(embed=embed)
@leaderboards.command(name="votes", aliases=["votestreaks", "votestreak"])
async def leaderboard_votes(self, ctx):
async with ctx.typing():
votes_global, global_u_entry = await self.db.fetch_global_lb_user("vote_streak", ctx.author.id)
votes_local, local_u_entry = await self.db.fetch_local_lb_user(
"vote_streak", ctx.author.id, [m.id for m in ctx.guild.members if not m.bot]
)
lb_global = lb_logic(
self, votes_global, global_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.updoot)
)
lb_local = lb_logic(self, votes_local, local_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.updoot))
embed = discord.Embed(color=self.d.cc, title=ctx.l.econ.lb.lb_votes.format(":fire:"))
embed.add_field(name=ctx.l.econ.lb.local_lb, value=lb_local)
embed.add_field(name=ctx.l.econ.lb.global_lb, value=lb_global)
await ctx.send(embed=embed)
@leaderboards.command(name="fish", aliases=["fishies", "fishing"])
async def leaderboard_fish(self, ctx):
async with ctx.typing():
fish_global, global_u_entry = await self.db.fetch_global_lb("fish", ctx.author.id)
fish_local, local_u_entry = await self.db.fetch_local_lb(
"fish", ctx.author.id, [m.id for m in ctx.guild.members if not m.bot]
)
lb_global = lb_logic(
self, fish_global, global_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.fish.cod)
)
lb_local = lb_logic(
self, fish_local, local_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.fish.cod)
)
embed = discord.Embed(color=self.d.cc, title=ctx.l.econ.lb.lb_fish.format(self.d.emojis.fish.rainbow_trout))
embed.add_field(name=ctx.l.econ.lb.local_lb, value=lb_local)
embed.add_field(name=ctx.l.econ.lb.global_lb, value=lb_global)
await ctx.send(embed=embed)
@leaderboards.command(name="mooderalds", aliases=["autism", "moods", "mooderald"])
async def leaderboard_mooderalds(self, ctx):
async with ctx.typing():
moods_global, global_u_entry = await self.db.fetch_global_lb_item("Mooderald", ctx.author.id)
moods_local, local_u_entry = await self.db.fetch_local_lb_item(
"Mooderald", ctx.author.id, [m.id for m in ctx.guild.members if not m.bot]
)
lb_global = lb_logic(
self, moods_global, global_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.autistic_emerald)
)
lb_local = lb_logic(
self, moods_local, local_u_entry, "\n`{0}.` **{0}**{1} {0}".format("{}", self.d.emojis.autistic_emerald)
)
embed = discord.Embed(color=self.d.cc, title=ctx.l.econ.lb.lb_moods.format(self.d.emojis.autistic_emerald))
embed.add_field(name=ctx.l.econ.lb.local_lb, value=lb_local)
embed.add_field(name=ctx.l.econ.lb.global_lb, value=lb_global)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Econ(bot))
|
{"/__main__.py": ["/src/bot.py"]}
|
10,661
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/cogs/core/loops.py
|
from discord.ext import commands, tasks
import traceback
import discord
import random
class Loops(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.d = bot.d
self.db = bot.get_cog("Database")
self.change_status.start()
self.update_fishing_prices.start()
self.remind_reminders.start()
def cog_unload(self):
self.change_status.cancel()
self.update_fishing_prices.cancel()
self.remind_reminders.cancel()
@tasks.loop(minutes=45)
async def change_status(self):
await self.bot.wait_until_ready()
await self.bot.change_presence(activity=discord.Game(name=random.choice(self.d.playing_list)))
@tasks.loop(hours=24)
async def update_fishing_prices(self):
self.bot.update_fishing_prices()
async def remind(self, reminder):
channel = self.bot.get_channel(reminder["cid"])
if channel is not None:
user = self.bot.get_user(reminder["uid"])
if user is not None:
lang = self.bot.get_lang(channel)
try:
message = await channel.fetch_message(reminder["mid"])
await message.reply(
lang.useful.remind.reminder.format(user.mention, reminder["reminder"]), mention_author=True
)
except Exception:
try:
await channel.send(lang.useful.remind.reminder.format(user.mention, reminder["reminder"]))
except Exception as e:
traceback_text = "".join(traceback.format_exception(type(e), e, e.__traceback__, 4))
await self.bot.send(
self.bot.get_channel(self.d.error_channel_id), f"Reminder error: {user} ```{traceback_text}```"
)
@tasks.loop(seconds=15)
async def remind_reminders(self):
for reminder in await self.db.fetch_current_reminders():
self.bot.loop.create_task(self.remind(reminder))
def setup(bot):
bot.add_cog(Loops(bot))
|
{"/__main__.py": ["/src/bot.py"]}
|
10,662
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/cogs/cmds/owner.py
|
from util.misc import recursive_update
from discord.ext import commands
from typing import Union
import functools
import aiofiles
import asyncio
import discord
import random
import arrow
import json
import ast
import os
from util.setup import load_text_async
import util.cj as cj
class Owner(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.d = bot.d
self.v = bot.v
self.db = bot.get_cog("Database")
@commands.command(name="load")
@commands.is_owner()
async def load_cog(self, ctx, cog):
self.bot.load_extension(f"cogs.{cog}")
await ctx.message.add_reaction(self.d.emojis.yes)
@commands.command(name="unload")
@commands.is_owner()
async def unload_cog(self, ctx, cog):
self.bot.unload_extension(f"cogs.{cog}")
await ctx.message.add_reaction(self.d.emojis.yes)
@commands.command(name="reload")
@commands.is_owner()
async def reload_cog(self, ctx, cog):
if cog == "all":
await self.reload_all_cogs(ctx)
else:
self.bot.reload_extension(f"cogs.{cog}")
await ctx.message.add_reaction(self.d.emojis.yes)
@commands.command(name="reloadall")
@commands.is_owner()
async def reload_all_cogs(self, ctx):
for cog in self.bot.cog_list:
self.bot.reload_extension(cog)
await ctx.message.add_reaction(self.d.emojis.yes)
@commands.command(name="eval")
@commands.is_owner()
async def eval_stuff(self, ctx, *, code):
if code.startswith("```"):
code = code.lstrip(" `py\n ").rstrip(" `\n ")
code_nice = "async def eval_code():\n" + "\n".join(f" {i}" for i in code.splitlines())
code_parsed = ast.parse(code_nice)
code_final = code_parsed.body[0].body
def insert_returns():
if isinstance(code_final[-1], ast.Expr):
code_final[-1] = ast.Return(code_final[-1].value)
ast.fix_missing_locations(code_final[-1])
if isinstance(code_final[-1], ast.If):
insert_returns(code_final[-1].body)
insert_returns(code_final[-1].orelse)
if isinstance(code_final[-1], ast.With):
insert_returns(code_final[-1].body)
insert_returns()
env = {**locals(), **globals()}
try:
exec(compile(code_parsed, filename="<ast>", mode="exec"), env)
result = await eval("eval_code()", env)
except discord.errors.Forbidden:
await ctx.send("Missing permissions (FORBIDDEN)")
except Exception as e:
await self.bot.get_cog("Events").debug_error(ctx, e, ctx)
else:
await ctx.send(f"```py\n{result}```")
@commands.command(name="gitpull")
@commands.max_concurrency(1, per=commands.BucketType.default, wait=True)
@commands.is_owner()
async def gitpull(self, ctx):
async with ctx.typing():
system_call = functools.partial(os.system, "git pull > git_pull_log 2>&1")
await self.bot.loop.run_in_executor(self.bot.tpool, system_call)
async with aiofiles.open("git_pull_log", "r") as f:
await self.bot.send(ctx, f"```diff\n{await f.read()}\n```")
os.remove("git_pull_log")
@commands.command(name="update")
@commands.max_concurrency(1, per=commands.BucketType.default, wait=True)
@commands.is_owner()
async def update(self, ctx, thing):
if thing.lower() == "data":
async with aiofiles.open("data/data.json", "r", encoding="utf8") as d:
self.d = recursive_update(self.d, cj.classify(json.loads(await d.read())))
# update some things which were just overwritten
self.bot.mutate_botd()
elif thing.lower() == "text":
self.bot.langs.update(await load_text_async())
elif thing.lower() == "mcservers":
self.v.additional_mcservers = await self.db.fetch_all_mcservers()
else:
await self.bot.send(ctx, 'Invalid, options are "data", "text", or "mcservers"')
return
await ctx.message.add_reaction(self.d.emojis.yes)
@commands.command(name="botban")
@commands.is_owner()
async def botban_user(self, ctx, users: commands.Greedy[discord.User]):
if len(users) == 0:
await self.bot.send(ctx, "You have to specify a user.")
return
for user in users:
await self.db.set_botbanned(user.id, True)
await ctx.message.add_reaction(self.d.emojis.yes)
@commands.command(name="unbotban", aliases=["botunban"])
@commands.is_owner()
async def unbotban_user(self, ctx, users: commands.Greedy[discord.User]):
if len(users) == 0:
await self.bot.send(ctx, "You have to specify a user.")
return
for user in users:
await self.db.set_botbanned(user.id, False)
await ctx.message.add_reaction(self.d.emojis.yes)
@commands.command(name="lookup")
@commands.is_owner()
async def lookup(self, ctx, user: Union[discord.User, int]):
if isinstance(user, discord.User):
uid = user.id
else:
uid = user
guilds = ""
for guild in self.bot.guilds:
if guild.get_member(uid) is not None:
guilds += f"{guild} **|** `{guild.id}`\n"
if guilds == "":
await self.bot.send(ctx, "No results...")
else:
await self.bot.send(ctx, guilds)
@commands.command(name="givehistory", aliases=["transactions"])
@commands.is_owner()
async def transaction_history(self, ctx, user: discord.User):
page_max = await self.db.fetch_transactions_page_count(user.id)
page = 0
msg = None
first_time = True
while True:
entries = await self.db.fetch_transactions_page(user.id, page=page)
if len(entries) == 0:
body = ctx.l.econ.inv.empty
else:
body = "" # text for that page
for entry in entries:
giver = self.bot.get_user(entry["giver_uid"])
receiver = self.bot.get_user(entry["recvr_uid"])
item = entry["item"]
if item == "emerald":
item = self.d.emojis.emerald
body += f"__[{giver}]({entry['giver_uid']})__ *gave* __{entry['amount']}x **{item}**__ *to* __[{receiver}]({entry['recvr_uid']})__ *{arrow.get(entry['ts']).humanize()}*\n"
embed = discord.Embed(color=self.d.cc, description=body)
embed.set_author(name=f"Transaction history for {user}", icon_url=user.avatar_url_as())
embed.set_footer(text=f"Page {page+1}/{page_max+1}")
if msg is None:
msg = await ctx.send(embed=embed)
else:
await msg.edit(embed=embed)
if page_max > 0:
if first_time:
await msg.add_reaction("⬅️")
await asyncio.sleep(0.1)
await msg.add_reaction("➡️")
await asyncio.sleep(0.1)
try:
def author_check(react, r_user):
return r_user == ctx.author and ctx.channel == react.message.channel and msg.id == react.message.id
react, r_user = await self.bot.wait_for(
"reaction_add", check=author_check, timeout=(3 * 60)
) # wait for reaction from message author
except asyncio.TimeoutError:
return
await react.remove(ctx.author)
if react.emoji == "⬅️":
page -= 1 if page - 1 >= 0 else 0
if react.emoji == "➡️":
page += 1 if page + 1 <= page_max else 0
await asyncio.sleep(0.1)
else:
break
first_time = False
@commands.command(name="setactivity")
@commands.is_owner()
async def set_activity(self, ctx, *, activity):
await self.bot.change_presence(activity=discord.Game(name=activity))
await ctx.message.add_reaction(self.d.emojis.yes)
@commands.command(name="whoyadaddy", aliases=["whodaddy"])
@commands.is_owner()
async def who_ya_daddy(self, ctx):
await ctx.send(f"Iapetus11 is {random.choice(self.d.owos)}")
@commands.command(name="topguilds")
@commands.is_owner()
async def top_guilds(self, ctx):
guilds = sorted(self.bot.guilds, reverse=True, key=(lambda g: g.member_count))[:20]
body = ""
for i, g in enumerate(guilds, start=1):
body += f"{i}. **{g.member_count}** {g} *{g.id}*\n"
await self.bot.send(ctx, body)
@commands.command(name="toggleownerlock", aliases=["ownerlock"])
@commands.is_owner()
async def toggle_owner_lock(self, ctx):
self.bot.owner_locked = not self.bot.owner_locked
await self.bot.send(ctx, f"All commands owner only: {self.bot.owner_locked}")
@commands.command(name="setbal")
@commands.is_owner()
async def set_user_bal(self, ctx, user: Union[discord.User, int], balance: int):
if isinstance(user, discord.User):
uid = user.id
else:
uid = user
await self.db.update_user(uid, "emeralds", balance)
await ctx.message.add_reaction(self.d.emojis.yes)
@commands.command(name="itemwealth")
@commands.is_owner()
async def item_wealth(self, ctx):
items = await self.db.db.fetch("SELECT * FROM items")
users = {}
for item in items:
prev = users.get(item["uid"], 0)
users[item["uid"]] = prev + (item["amount"] * item["sell_price"])
users = users.items()
users_sorted = sorted(users, key=(lambda e: e[1]), reverse=True)[:30]
body = ""
for u in users_sorted:
body += f"`{u[0]}` - {u[1]}{self.d.emojis.emerald}\n"
await ctx.send(body)
"""
@commands.command(name='updatesticky')
@commands.is_owner()
async def update_sticky(self, ctx):
await ctx.send('starting...')
to_be_sticky = [
*self.d.mining.pickaxes,
'Netherite Sword', 'Diamond Sword', 'Gold Sword', 'Iron Sword', 'Stone Sword', 'Wood Sword',
'Bane Of Pillagers Amulet',
'Rich Person Tropy'
]
for item in to_be_sticky:
await self.db.db.execute('UPDATE items SET sticky = true WHERE name = $1', item)
await ctx.send('done.')
"""
"""
@commands.command(name='massunban')
@commands.is_owner()
async def mass_unban(self, ctx):
exempt = [m.id for m in self.bot.get_guild(730519472863051910).members]
# remove botbans
async with self.db.db.acquire() as con:
await con.execute('UPDATE users SET bot_banned = false WHERE uid = ANY($1::bigint[])', exempt)
await ctx.send('Finished bot-bans.')
support_guild = self.bot.get_guild(self.d.support_server_id)
# server bans
bans = await support_guild.bans()
for ban in bans:
if ban.user.id not in exempt:
user = self.bot.get_user(ban.user.id)
if user is None:
try:
user = await self.bot.fetch_user(ban.user.id)
except Exception:
continue
await support_guild.unban(user, reason='Mass pardon of Nov 14th')
await ctx.send('Done guild unbanning.')
for uid in exempt:
await self.bot.get_cog('Mod').ban_user(ctx, uid, reason='Llama Alt')
await ctx.send('Done restoring llama bans')
"""
def setup(bot):
bot.add_cog(Owner(bot))
|
{"/__main__.py": ["/src/bot.py"]}
|
10,663
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/cogs/cmds/useful.py
|
from urllib.parse import quote as urlquote
from discord.ext import commands
import async_cse
import asyncio
import discord
import psutil
import arrow
class Useful(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.d = bot.d
self.v = bot.v
self.google_client = async_cse.Search(bot.k.google)
self.db = bot.get_cog("Database")
@commands.group(name="help")
async def help(self, ctx):
if ctx.invoked_subcommand is None:
cmd = ctx.message.content.replace(f"{ctx.prefix}help ", "")
if cmd != "":
cmd_true = self.bot.get_command(cmd.lower())
if cmd_true is not None:
all_help = {**ctx.l.help.econ, **ctx.l.help.mc, **ctx.l.help.util, **ctx.l.help.fun, **ctx.l.help.mod}
help_text = all_help.get(str(cmd_true))
if help_text is None:
await self.bot.send(ctx, ctx.l.help.main.nodoc)
return
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=ctx.l.help.n.cmd, icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
embed.description = help_text.format(ctx.prefix)
if len(cmd_true.aliases) > 0:
embed.description += "\n\n" + ctx.l.help.main.aliases.format("`, `".join(cmd_true.aliases))
await ctx.send(embed=embed)
return
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=ctx.l.help.n.title, icon_url=self.d.splash_logo)
embed.description = ctx.l.help.main.desc.format(self.d.support, self.d.topgg)
p = ctx.prefix
embed.add_field(name=(self.d.emojis.emerald_spinn + ctx.l.help.n.economy), value=f"`{p}help econ`")
embed.add_field(name=(self.d.emojis.bounce + " " + ctx.l.help.n.minecraft), value=f"`{p}help mc`")
embed.add_field(name=(self.d.emojis.anichest + ctx.l.help.n.utility), value=f"`{p}help util`")
embed.add_field(name=(self.d.emojis.rainbow_shep + ctx.l.help.n.fun), value=f"`{p}help fun`")
embed.add_field(name=(self.d.emojis.netherite_sword_ench + ctx.l.help.n.admin), value=f"`{p}help admin`")
embed.add_field(
name=(self.d.emojis.heart_spin + ctx.l.help.main.support),
value=f"**[{ctx.l.help.main.clickme}]({self.d.support})**",
)
embed.set_footer(
text=ctx.l.useful.credits.foot.format(ctx.prefix) + " | " + ctx.l.useful.rules.slashrules.format(ctx.prefix)
)
await ctx.send(embed=embed)
@help.command(name="economy", aliases=["econ"])
async def help_economy(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f"{ctx.l.help.n.title} [{ctx.l.help.n.economy}]", icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
commands_formatted = "`, `".join(list(ctx.l.help.econ))
embed.description = f"`{commands_formatted}`\n\n{ctx.l.help.main.howto.format(ctx.prefix)}"
await ctx.send(embed=embed)
@help.command(name="minecraft", aliases=["mc"])
async def help_minecraft(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f"{ctx.l.help.n.title} [{ctx.l.help.n.minecraft}]", icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
commands_formatted = "`, `".join(list(ctx.l.help.mc))
embed.description = f"`{commands_formatted}`\n\n{ctx.l.help.main.howto.format(ctx.prefix)}"
await ctx.send(embed=embed)
@help.command(name="utility", aliases=["util", "useful"])
async def help_utility(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f"{ctx.l.help.n.title} [{ctx.l.help.n.utility}]", icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
commands_formatted = "`, `".join(list(ctx.l.help.util))
embed.description = f"`{commands_formatted}`\n\n{ctx.l.help.main.howto.format(ctx.prefix)}"
await ctx.send(embed=embed)
@help.command(name="fun")
async def help_fun(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f"{ctx.l.help.n.title} [{ctx.l.help.n.fun}]", icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
commands_formatted = "`, `".join(list(ctx.l.help.fun))
embed.description = f"`{commands_formatted}`\n\n{ctx.l.help.main.howto.format(ctx.prefix)}"
await ctx.send(embed=embed)
@help.command(name="administrator", aliases=["mod", "moderation", "administrative", "admin"])
async def help_administrative(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f"{ctx.l.help.n.title} [{ctx.l.help.n.admin}]", icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
commands_formatted = "`, `".join(list(ctx.l.help.mod))
embed.description = f"`{commands_formatted}`\n\n{ctx.l.help.main.howto.format(ctx.prefix)}"
await ctx.send(embed=embed)
@commands.command(name="credits")
@commands.cooldown(1, 2, commands.BucketType.user)
async def credits(self, ctx):
embed_template = discord.Embed(color=self.d.cc)
embed_template.set_author(name=ctx.l.useful.credits.credits, icon_url=self.d.splash_logo)
fields = []
for i, entry in enumerate(ctx.l.useful.credits.people.items()):
person, what = entry
user = self.bot.get_user(self.d.credit_users[person])
fields.append({"name": f"**{user.display_name}**", "value": what})
if i % 2 == 1:
fields.append({"value": "\uFEFF", "name": "\uFEFF"})
groups = [fields[i : i + 9] for i in range(0, len(fields), 9)]
page_max = len(groups)
page = 0
msg = None
while True:
embed = embed_template.copy()
for field in groups[page]:
embed.add_field(**field)
embed.set_footer(text=f"{ctx.l.econ.page} {page+1}/{page_max}")
if page == page_max - 1:
embed.add_field(name="\uFEFF", value=ctx.l.useful.credits.others, inline=False)
if msg is None:
msg = await ctx.send(embed=embed)
elif not msg.embeds[0] == embed:
await msg.edit(embed=embed)
if page_max <= 1:
return
await asyncio.sleep(0.25)
await msg.add_reaction("⬅️")
await asyncio.sleep(0.25)
await msg.add_reaction("➡️")
try:
def author_check(react, r_user):
return r_user == ctx.author and ctx.channel == react.message.channel and msg.id == react.message.id
# wait for reaction from message author (1 min)
react, r_user = await self.bot.wait_for("reaction_add", check=author_check, timeout=30)
except asyncio.TimeoutError:
return
await react.remove(ctx.author)
if react.emoji == "⬅️":
page -= 1
elif react.emoji == "➡️":
page += 1
if page > page_max - 1:
page = page_max - 1
if page < 0:
page = 0
await asyncio.sleep(0.2)
@commands.command(name="ping", aliases=["pong", "ding", "dong", "bing", "bong", "shing", "shling", "schlong"])
async def ping_pong(self, ctx):
content = ctx.message.content.lower()
if "ping" in content:
pp = "Pong"
elif "pong" in content:
pp = "Ping"
elif "ding" in content:
pp = "Dong"
elif "dong" in content:
pp = "Ding"
elif "bing" in content:
pp = "Bong"
elif "bong" in content:
pp = "Bing"
elif "shing" in content or "shling" in content:
pp = "Schlong"
elif "schlong" in content:
await self.bot.send(ctx, f"{self.d.emojis.aniheart} Magnum Dong! \uFEFF `69.00 ms`")
return
await self.bot.send(ctx, f"{self.d.emojis.aniheart} {pp}! \uFEFF `{round(self.bot.latency*1000, 2)} ms`")
@commands.command(name="vote", aliases=["votelink", "votelinks"])
async def votelinks(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name="Vote for Villager Bot!", icon_url=self.d.splash_logo)
embed.description = f'**[{ctx.l.useful.vote.click_1}]({self.d.topgg + "/vote"})**'
await ctx.send(embed=embed)
@commands.command(name="links", aliases=["invite", "support", "usefullinks", "website", "source"])
async def useful_links(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name="Useful Links", icon_url=self.d.splash_logo)
embed.description = (
f"**[{ctx.l.useful.links.support}]({self.d.support})\n"
f"\n[{ctx.l.useful.links.invite}]({self.d.invite})\n"
f"\n[{ctx.l.useful.links.topgg}]({self.d.topgg})\n"
f"\n[{ctx.l.useful.links.source}]({self.d.github})**"
)
await ctx.send(embed=embed)
@commands.command(name="stats", aliases=["bs"])
async def stats(self, ctx):
await ctx.trigger_typing()
uptime_seconds = (arrow.utcnow() - self.v.start_time).total_seconds()
uptime = arrow.utcnow().shift(seconds=uptime_seconds).humanize(locale=ctx.l.lang, only_distance=True)
proc = psutil.Process()
with proc.oneshot():
mem_usage = proc.memory_full_info().uss
threads = proc.num_threads()
proc.cpu_percent(interval=0.1)
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=ctx.l.useful.stats.stats, icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
col_1 = (
f"{ctx.l.useful.stats.servers}: `{len(self.bot.guilds)}`\n"
f"{ctx.l.useful.stats.dms}: `{len(self.bot.private_channels)}/128`\n"
f"{ctx.l.useful.stats.users}: `{len(self.bot.users)}`\n"
f"{ctx.l.useful.stats.msgs}: `{self.v.msg_count}`\n"
f"{ctx.l.useful.stats.cmds}: `{self.v.cmd_count}` `({round((self.v.cmd_count / (self.v.msg_count + .000001)) * 100, 2)}%)`\n"
f"{ctx.l.useful.stats.cmds_sec}: `{round(self.v.cmd_count / uptime_seconds, 2)}`\n"
f"{ctx.l.useful.stats.votes}: `{self.v.votes_topgg}`\n"
f"{ctx.l.useful.stats.topgg}: `{round((self.v.votes_topgg / uptime_seconds) * 3600, 2)}`\n"
)
col_2 = (
f"{ctx.l.useful.stats.mem}: `{round(mem_usage / 1000000, 2)} MB`\n"
f"{ctx.l.useful.stats.cpu}: `{round(proc.cpu_percent() / psutil.cpu_count(), 2)}%`\n"
f"{ctx.l.useful.stats.threads}: `{threads}`\n"
f"{ctx.l.useful.stats.tasks}: `{len(asyncio.all_tasks())}`\n"
f"{ctx.l.useful.stats.ping}: `{round(self.bot.latency * 1000, 2)} ms`\n"
f"{ctx.l.useful.stats.shards}: `{self.bot.shard_count}`\n"
f"{ctx.l.useful.stats.uptime}: `{uptime}`\n"
)
col_2 += "\n" + ctx.l.useful.stats.more.format(self.d.statcord)
embed.add_field(name="\uFEFF", value=col_1 + "\uFEFF")
embed.add_field(name="\uFEFF", value=col_2 + "\uFEFF")
await ctx.send(embed=embed)
@commands.command(name="serverinfo", aliases=["server", "guild"])
@commands.guild_only()
async def server_info(self, ctx, gid: int = None):
if gid is None:
guild = ctx.guild
else:
guild = self.bot.get_guild(gid)
db_guild = await self.db.fetch_guild(guild.id)
time = arrow.get(discord.utils.snowflake_time(guild.id))
time = time.format("MMM D, YYYY", locale=ctx.l.lang) + ", " + time.humanize(locale=ctx.l.lang)
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f"{guild.name} {ctx.l.useful.ginf.info}", icon_url=guild.icon_url)
embed.description = f"{ctx.l.useful.ginf.age}: `{time}`"
general = (
f"{ctx.l.useful.ginf.owner}: {guild.owner.mention}\n"
f"{ctx.l.useful.ginf.members}: `{guild.member_count}`\n"
f"{ctx.l.useful.ginf.channels}: `{len(guild.channels)}`\n "
f"{ctx.l.useful.ginf.roles}: `{len(guild.roles)}`\n"
f"{ctx.l.useful.ginf.emojis}: `{len(guild.emojis)}`\n"
f"{ctx.l.useful.ginf.bans}: `{len(await guild.bans())}`\n"
)
villager = (
f"{ctx.l.useful.ginf.cmd_prefix}: `{self.v.prefix_cache.get(guild.id, self.d.default_prefix)}`\n"
f"{ctx.l.useful.ginf.lang}: `{ctx.l.name}`\n"
f'{ctx.l.useful.ginf.diff}: `{db_guild["difficulty"]}`\n'
f'{ctx.l.useful.ginf.prem}: `{str(db_guild["premium"]).lower()}`\n'
)
embed.add_field(name="General", value=general, inline=True)
embed.add_field(name="Villager Bot", value=villager, inline=True)
embed.set_thumbnail(url=guild.icon_url)
await ctx.send(embed=embed)
@commands.command(name="rules", aliases=["botrules"])
async def rules(self, ctx):
embed = discord.Embed(color=self.d.cc, description=ctx.l.useful.rules.penalty)
embed.set_author(name=ctx.l.useful.rules.rules, icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
embed.add_field(name="\uFEFF", value=ctx.l.useful.rules.rule_1.format(self.d.support))
embed.add_field(name="\uFEFF", value="\uFEFF")
embed.add_field(name="\uFEFF", value=ctx.l.useful.rules.rule_2)
embed.add_field(name="\uFEFF", value=ctx.l.useful.rules.rule_3)
embed.add_field(name="\uFEFF", value="\uFEFF")
embed.add_field(name="\uFEFF", value=ctx.l.useful.rules.rule_4)
await ctx.send(embed=embed)
@commands.command(name="math", aliases=["solve", "meth"])
async def math(self, ctx, *, problem):
async with ctx.typing():
try:
resp = await self.bot.aiohttp.get(f"https://api.mathjs.org/v4/?expr={urlquote(problem)}")
await self.bot.send(ctx, f"```{float(await resp.text())}```")
except Exception:
await self.bot.send(ctx, ctx.l.useful.meth.oops)
@commands.command(name="google", aliases=["thegoogle"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def google_search(self, ctx, *, query):
safesearch = True
if isinstance(ctx.channel, discord.TextChannel):
safesearch = not ctx.channel.is_nsfw()
try:
async with ctx.typing():
res = await self.google_client.search(query, safesearch=safesearch)
except async_cse.search.NoResults:
await self.bot.send(ctx, ctx.l.useful.search.nope)
return
except async_cse.search.APIError:
await self.bot.send(ctx, ctx.l.useful.search.error)
return
if len(res) == 0:
await self.bot.send(ctx, ctx.l.useful.search.nope)
return
res = res[0]
embed = discord.Embed(color=self.d.cc, title=res.title, description=res.description, url=res.url)
await ctx.send(embed=embed)
@commands.command(name="youtube", aliases=["ytsearch", "yt"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def youtube_search(self, ctx, *, query):
safesearch = True
if isinstance(ctx.channel, discord.TextChannel):
safesearch = not ctx.channel.is_nsfw()
try:
async with ctx.typing():
res = await self.google_client.search(query, safesearch=safesearch)
except async_cse.search.NoResults:
await self.bot.send(ctx, ctx.l.useful.search.nope)
return
except async_cse.search.APIError:
await self.bot.send(ctx, ctx.l.useful.search.error)
return
res = tuple(filter((lambda r: "youtube.com/watch" in r.url), res))
if len(res) == 0:
await self.bot.send(ctx, ctx.l.useful.search.nope)
return
res = res[0]
await ctx.send(res.url)
@commands.command(name="image", aliases=["imagesearch", "img"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def image_search(self, ctx, *, query):
safesearch = True
if isinstance(ctx.channel, discord.TextChannel):
safesearch = not ctx.channel.is_nsfw()
try:
async with ctx.typing():
res = await self.google_client.search(query, safesearch=safesearch, image_search=True)
except async_cse.search.NoResults:
await self.bot.send(ctx, ctx.l.useful.search.nope)
return
except async_cse.search.APIError:
await self.bot.send(ctx, ctx.l.useful.search.error)
return
if len(res) == 0:
await self.bot.send(ctx, ctx.l.useful.search.nope)
return
res = res[0]
await ctx.send(res.image_url)
@commands.command(name="remindme", aliases=["remind"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def remind_me(self, ctx, *, args: str):
user_reminder_count = await self.db.fetch_user_reminder_count(ctx.author.id)
if user_reminder_count > 5:
await self.bot.send(ctx, ctx.l.useful.remind.reminder_max)
return
args = ctx.message.clean_content[len(f"{ctx.prefix}{ctx.invoked_with} ") :].split()
at = arrow.utcnow()
i = 0
try:
for i, arg in enumerate(args):
if arg.endswith("m"):
at = at.shift(minutes=int(arg[:-1]))
elif arg.endswith("minute"):
at = at.shift(minutes=int(arg[:-6]))
elif arg.endswith("minutes"):
at = at.shift(minutes=int(arg[:-7]))
elif arg.endswith("h"):
at = at.shift(hours=int(arg[:-1]))
elif arg.endswith("hour"):
at = at.shift(hours=int(arg[:-4]))
elif arg.endswith("hours"):
at = at.shift(hours=int(arg[:-5]))
elif arg.endswith("d"):
at = at.shift(days=int(arg[:-1]))
elif arg.endswith("day"):
at = at.shift(days=int(arg[:-3]))
elif arg.endswith("days"):
at = at.shift(days=int(arg[:-4]))
elif arg.endswith("w"):
at = at.shift(weeks=int(arg[:-1]))
elif arg.endswith("week"):
at = at.shift(weeks=int(arg[:-4]))
elif arg.endswith("weeks"):
at = at.shift(weeks=int(arg[:-5]))
else:
break
except ValueError:
pass
if i == 0:
await self.bot.send(ctx, ctx.l.useful.remind.stupid_1.format(ctx.prefix))
return
if at > arrow.utcnow().shift(weeks=8):
await self.bot.send(ctx, ctx.l.useful.remind.time_max)
return
await self.db.add_reminder(ctx.author.id, ctx.channel.id, ctx.message.id, " ".join(args[i:])[:499], at.timestamp())
await self.bot.send(ctx, ctx.l.useful.remind.remind.format(self.bot.d.emojis.yes, at.humanize(locale=ctx.l.lang)))
def setup(bot):
bot.add_cog(Useful(bot))
|
{"/__main__.py": ["/src/bot.py"]}
|
10,664
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/util/statcord.py
|
import asyncio
import psutil
class ShitCordClient:
def __init__(self, bot, statcord_key: str):
self.bot = bot
self.aiohttp = bot.aiohttp
self.statcord_key = statcord_key
self.d = bot.d
self.v = bot.v
# setup counters
net_io_counter = psutil.net_io_counters()
self.prev_net_usage = net_io_counter.bytes_sent + net_io_counter.bytes_recv
self.prev_vote_count = bot.v.votes_topgg
self.prev_cmd_count = bot.v.cmd_count
self.popular_commands = {}
self.active_users = set()
self.error_count = 0
# setup on_command handler
bot.add_listener(self._command_ran, name="on_command")
# start stat posting loop
bot.loop.create_task(self.post_loop())
async def _command_ran(self, ctx):
if ctx.command_failed:
return
self.active_users.add(ctx.author.id)
try:
self.popular_commands[ctx.command.name] += 1
except KeyError:
self.popular_commands[ctx.command.name] = 1
async def post_loop(self):
while not self.bot.is_closed():
await self.bot.wait_until_ready()
try:
await self.post_stats()
except Exception as e:
self.bot.logger.error(f"SHITCORD ERROR: {e}")
await asyncio.sleep(60)
async def post_stats(self):
self.bot.logger.debug("posting data to shitcord...")
# get process details
mem = psutil.virtual_memory()
net_io_counter = psutil.net_io_counters()
cpu_load = str(psutil.cpu_percent())
# get data ready to send + update old data
mem_used = str(mem.used)
mem_load = str(mem.percent)
total_net_usage = net_io_counter.bytes_sent + net_io_counter.bytes_recv
period_net_usage = str(total_net_usage - self.prev_net_usage)
self.prev_net_usage = total_net_usage
data = {
"id": str(self.bot.user.id),
"key": self.statcord_key,
"servers": str(len(self.bot.guilds)), # server count
"users": str(len(self.bot.users)), # user count
"commands": str(self.v.cmd_count - self.prev_cmd_count), # command count
"active": list(self.active_users),
"popular": [{"name": k, "count": v} for k, v in self.popular_commands.items()], # active commands
"memactive": mem_used,
"memload": mem_load,
"cpuload": cpu_load,
"bandwidth": period_net_usage,
"custom1": str(self.v.votes_topgg - self.prev_vote_count),
"custom2": str(self.error_count),
}
# reset counters
self.popular_commands = {}
self.active_users = set()
self.prev_vote_count = self.v.votes_topgg
self.prev_cmd_count = self.v.cmd_count
self.error_count = 0
resp = await self.aiohttp.post(
url="https://api.statcord.com/v3/stats", json=data, headers={"Content-Type": "application/json"}
)
if 500 % (resp.status + 1) == 500:
self.bot.logger.error("SHITCORD ERROR: shitcord server error occurred.")
elif resp.status != 200:
self.bot.logger.error(f"SHITCORD ERROR: status was not 200 OK:\n{await resp.text()}")
else:
self.bot.logger.debug("successfully posted data to shitcord.")
|
{"/__main__.py": ["/src/bot.py"]}
|
10,665
|
gritor111/Villager-Bot
|
refs/heads/master
|
/src/cogs/other/mobs.py
|
from discord.ext import commands, tasks
from util.misc import make_health_bar
import asyncio
import discord
import random
import arrow
import math
import util.cj as cj
class Mobs(commands.Cog): # fuck I really don't want to work on this
def __init__(self, bot):
self.bot = bot
self.d = bot.d
self.v = bot.v
self.db = bot.get_cog("Database")
self.events = bot.get_cog("Events")
self.spawn_events.start()
self.clear_pauses.start()
def cog_unload(self):
self.spawn_events.cancel()
self.clear_pauses.cancel()
@tasks.loop(seconds=1)
async def clear_pauses(self):
for uid in list(self.v.pause_econ):
if (arrow.utcnow() - self.v.pause_econ[uid]).seconds > 20:
self.v.pause_econ.pop(uid, None)
await asyncio.sleep(0)
def engage_check(self, m, ctx):
u = m.author
if self.v.pause_econ.get(u.id):
return False
if m.content.lower().replace(ctx.prefix, "", 1) not in self.d.mobs_mech.valid_attacks:
return False
return m.channel.id == ctx.channel.id and not u.bot and u.id not in self.v.ban_cache
def attack_check(self, m, e_msg, ctx):
if (
m.content.lower().replace(ctx.prefix, "", 1) not in self.d.mobs_mech.valid_attacks
and m.content.lower() not in self.d.mobs_mech.valid_flees
):
return False
return m.channel.id == e_msg.channel.id and m.author.id == e_msg.author.id
async def calc_sword_damage(self, uid, sword, diff_multi):
sword = sword.lower()
if sword == "netherite sword":
dmg = random.randint(7, 10)
elif sword == "diamond sword":
dmg = random.randint(6, 7)
elif sword == "gold sword":
dmg = random.randint(4, 5)
elif sword == "iron sword":
dmg = random.randint(2, 4)
elif sword == "stone sword":
dmg = random.randint(1, 3)
else:
dmg = random.randint(1, 2)
if await self.db.fetch_item(uid, "Sharpness II Book") is not None:
dmg *= 1.5
elif await self.db.fetch_item(uid, "Sharpness I Book") is not None:
dmg *= 1.25
if diff_multi > 1:
dmg /= 1.3
return math.ceil(dmg)
async def spawn_event(self, ctx):
try:
await asyncio.sleep(random.randint(1, 200) / 100)
if ctx.guild is None:
return
db_guild = await self.db.fetch_guild(ctx.guild.id)
diff = db_guild["difficulty"]
if diff == "peaceful":
return
# difficulty multiplier
diff_multi = 1.5 if diff == "hard" else 1
# type of mob that will be spawned, just a string
mob_key = random.choice(list(self.d.mobs_mech.mobs))
mob = self.d.mobs_mech.mobs[mob_key].copy()
mob.update(ctx.l.mobs_mech.mobs[mob_key])
mob = cj.classify(mob)
embed = discord.Embed(
color=self.d.cc,
title=f"**{random.choice(ctx.l.mobs_mech.mob_drops).format(mob.nice.lower())}**",
description=ctx.l.mobs_mech.type_engage, # fight it you little baby
)
embed.set_image(url=mob.image)
embed_msg = await ctx.send(embed=embed)
while True:
try:
engage_msg = await self.bot.wait_for("message", check=(lambda m: self.engage_check(m, ctx)), timeout=15)
except asyncio.TimeoutError:
await embed_msg.edit(suppress=True)
return
u = engage_msg.author
if self.v.pause_econ.get(u.id):
continue
u_db = await self.db.fetch_user(u.id)
if u_db["health"] < 2:
await self.bot.send(ctx, ctx.l.mobs_mech.no_health)
else:
break
await embed_msg.edit(suppress=True)
u_sword = await self.db.fetch_sword(u.id)
slime_trophy = await self.db.fetch_item(u.id, "Slime Trophy")
# used later on to clear pause_econ based on who's been in there for tooo long
self.v.pause_econ[u.id] = arrow.utcnow()
u_health = u_db["health"]
mob_max_health = mob.health
iteration = 0
while True:
iteration += 1
embed = discord.Embed(color=self.d.cc, title=ctx.l.mobs_mech.attack_or_flee)
embed.set_image(url=mob.image)
embed.add_field( # user health bar
name=f"**{u.display_name}**",
value=make_health_bar(
u_health, 20, self.d.emojis.heart_full, self.d.emojis.heart_half, self.d.emojis.heart_empty
),
inline=False,
)
embed.add_field( # mob health bar
name=f"**{mob.nice}**",
value=make_health_bar(
mob.health,
mob_max_health,
self.d.emojis.heart_full,
self.d.emojis.heart_half,
self.d.emojis.heart_empty,
),
inline=False,
)
msg = await ctx.send(embed=embed)
try:
resp = await self.bot.wait_for(
"message", check=(lambda m: self.attack_check(m, engage_msg, ctx)), timeout=15
) # wait for response
except asyncio.TimeoutError: # user didn't respond
await msg.edit(suppress=True)
self.v.pause_econ.pop(u.id, None)
await self.db.update_user(u.id, "health", u_health)
await self.bot.send(ctx, random.choice(ctx.l.mobs_mech.flee_insults))
return
# user decides to not fight mob anymore cause they a little baby
if resp.content.lower() in self.d.mobs_mech.valid_flees:
await msg.edit(suppress=True)
self.v.pause_econ.pop(u.id, None)
await self.db.update_user(u.id, "health", u_health)
await self.bot.send(ctx, random.choice(ctx.l.mobs_mech.flee_insults))
return
u_dmg = await self.calc_sword_damage(u.id, u_sword, diff_multi) # calculate damage
if mob_key == "baby_slime":
if iteration < 3 and slime_trophy is None:
u_dmg = 0
elif slime_trophy is not None and random.choice((True, False, False)):
u_dmg = 0
elif iteration >= 3 and random.choice((True, False)):
u_dmg = 0
mob.health -= u_dmg
if mob.health < 1: # user wins
self.v.pause_econ.pop(u.id, None)
await self.bot.send(
ctx, random.choice(ctx.l.mobs_mech.user_finishers).format(mob.nice.lower(), u_sword.lower())
)
break
else:
if mob_key == "baby_slime" and u_dmg == 0:
await self.bot.send(ctx, random.choice(mob.misses).format(u_sword.lower()))
else:
await self.bot.send(
ctx, random.choice(ctx.l.mobs_mech.user_attacks).format(mob.nice.lower(), u_sword.lower())
) # user attack message
await asyncio.sleep(1)
m_dmg = random.randint(2, 6)
if mob_key == "creeper":
if iteration > 2:
if random.choice((True, False, False)):
self.v.pause_econ.pop(u.id, None)
u_health = 0
await self.bot.send(ctx, random.choice(mob.finishers))
break
m_dmg = 0
u_health -= m_dmg
u_health = 0 if u_health < 0 else u_health
if u_health < 1: # mob wins
self.v.pause_econ.pop(u.id, None)
await self.bot.send(ctx, random.choice(mob.finishers))
break
else:
await self.bot.send(ctx, random.choice(mob.attacks))
await asyncio.sleep(1.75)
await msg.edit(suppress=True)
await msg.edit(suppress=True) # remove old Message
embed = discord.Embed(color=self.d.cc) # create new embed which shows health to show that user has lost / won
embed.set_image(url=mob.image)
embed.add_field( # user health bar
name=f"**{u.display_name}**",
value=make_health_bar(
(u_health if u_health >= 0 else 0),
20,
self.d.emojis.heart_full,
self.d.emojis.heart_half,
self.d.emojis.heart_empty,
),
inline=False,
)
embed.add_field( # mob health bar
name=f"**{mob.nice}**",
value=make_health_bar(
(mob.health if mob.health >= 0 else 0),
mob_max_health,
self.d.emojis.heart_full,
self.d.emojis.heart_half,
self.d.emojis.heart_empty,
),
inline=False,
)
await ctx.send(embed=embed)
await self.db.update_user(u.id, "health", u_health)
u_db = await self.db.fetch_user(u.id)
u_bal = u_db["emeralds"]
if u_health > 0: # user win
if mob_key != "baby_slime" or random.randint(0, 25) != 1:
if diff == "easy": # copied this ~~meth~~ math from the old code idek what it does lmao
ems_won = (
int(u_bal * (1 / random.choice((3, 3.25, 3.5, 3.75, 4))))
if u_bal < 256
else int(512 * (1 / random.choice((3, 3.25, 3.5, 3.75, 4))))
)
else: # diff hard
ems_won = (
int(u_bal * (1 / random.choice((1.75, 2, 2.25, 2.5))))
if u_bal < 256
else int(512 * (1 / random.choice((1.75, 2, 2.25, 2.5))))
)
ems_won = int((ems_won if ems_won > 0 else 1) * diff_multi)
if await self.db.fetch_item(u.id, "Looting II Book") is not None:
ems_won = int(ems_won * 1.75)
elif await self.db.fetch_item(u.id, "Looting I Book") is not None:
ems_won = int(ems_won * 1.25)
await self.db.balance_add(u.id, ems_won)
await self.db.update_lb(u.id, "mobs_killed", 1, "add")
await self.bot.send(ctx, random.choice(ctx.l.mobs_mech.found).format(ems_won, self.d.emojis.emerald))
else:
if diff == "easy":
balls_won = random.randint(1, 10)
else:
balls_won = random.randint(1, 20)
await self.db.add_item(u.id, "Slime Ball", 5, balls_won, True)
await self.bot.send(ctx, random.choice(ctx.l.mobs_mech.found).format(balls_won, self.d.emojis.slimeball))
else: # mob win
if diff == "easy": # haha code copying go brrrrrrrrr
ems_lost = (
int(u_bal * (1 / (random.choice([3.05, 3.3, 3.55, 3.8]) + 0.3)))
if u_bal > 20
else random.randint(2, 4)
)
else: # diff hard
ems_lost = (
int(u_bal * (1 / (random.choice([1.45, 1.55, 1.65, 1.75]) + 0.3)))
if u_bal > 20
else random.randint(5, 9)
)
ems_lost = await self.db.balance_sub(u.id, ems_lost)
if mob_key == "creeper":
await self.bot.send(
ctx, random.choice(ctx.l.mobs_mech.lost.creeper).format(ems_lost, self.d.emojis.emerald)
)
else:
await self.bot.send(
ctx,
random.choice(ctx.l.mobs_mech.lost.normal).format(mob.nice.lower(), ems_lost, self.d.emojis.emerald),
)
except Exception as e:
await self.events.debug_error(ctx, e)
@tasks.loop(seconds=0.05)
async def spawn_events(self):
for ctx in list(self.v.spawn_queue):
self.v.spawn_queue.pop(ctx)
self.bot.loop.create_task(self.spawn_event(ctx)) # ah yes eficeicncy
def setup(bot):
bot.add_cog(Mobs(bot))
|
{"/__main__.py": ["/src/bot.py"]}
|
10,677
|
jonathanvanschenck/Monochromator-GUI
|
refs/heads/master
|
/monochromator/calibrate.py
|
#%%
import tkinter as tk
import os
from time import sleep
from tkinter import messagebox
from tkinter import filedialog
from spectrometer import Spectrometer
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from scipy.optimize import minimize
def gauss(x,p):
return np.abs(p[0])+np.abs(p[1])*np.exp(-((x-p[2])/p[3])**2)
class Calibrate(tk.Frame):
def __init__(self,monochromator):
tk.Frame.__init__(self,None)
self.master.title("Calibration Window")
self.master.protocol("WM_DELETE_WINDOW", lambda : self.master.destroy())
if not messagebox.askyesno("Title","Create a new calibration file?"):
fname = filedialog.askopenfilename(title="Load Calibration File",
initialdir = os.getcwd(),
filetypes = (("calibration files","*.cal"),("all files","*.*")))
monochromator.load_calibration_points(fname)
self.master.destroy()
else:
self.spec = Spectrometer()
self.specRunning = True
self.mono = monochromator
self.mono.reset_calibration()
self.create_widgets()
self.start_aquisition()
self.mainloop()
self.spec.close()
def create_widgets(self):
# Create MPL Figure
self.mpl = MPL(self.master,
self.spec.wavelengths(),self.spec.intensities(),
#np.arange(0,100,0.1),gauss(np.arange(0,100,0.1),[200,2700,40,5]),
#self.spec.wavelengths,self.spec.intensities,
column=0,row=2,columnspan=2)
# Create Spectrometer control window
self.specFrame = tk.LabelFrame(self.master,text="Spectrometer Controls")
self.specFrame.grid(column=0,row=0)
self.ITLabel = tk.Label(self.specFrame,text="IT (ms)")
self.ITLabel.grid(column=0,row=0,sticky=tk.E)
self.ITvariable = tk.StringVar()
self.set_IT(20)
self.ITEntry = tk.Entry(self.specFrame,
textvariable=self.ITvariable,
width=6)
self.ITEntry.grid(column=1,row=0)
self.ITUpdateButton = tk.Button(self.specFrame,text="Update",
command=lambda: self.set_IT(self.ITvariable.get()))
self.ITUpdateButton.grid(column=2,row=0,sticky=tk.W)
self.PPLabel = tk.Label(self.specFrame,text="Aquire:")
self.PPLabel.grid(column=0,row=1,sticky=tk.E)
self.playButton = tk.Button(self.specFrame,text="Play",
command=lambda: self.start_aquisition())
self.playButton.grid(column=1,row=1)
self.pauseButton = tk.Button(self.specFrame,text="Pause",
command=lambda: self.stop_aquisition())
self.pauseButton.grid(column=2,row=1)
# Create calibration setup
self.calFrame = tk.LabelFrame(self.master,text="Spectrometer Controls")
self.calFrame.grid(column=1,row=0)
self.PosLabel = tk.Label(self.calFrame,text="Starting Position:")
self.PosLabel.grid(column=0,row=0,sticky=tk.E)
self.Posvariable = tk.StringVar()
self.set_Pos(self.mono.lower_bound)
self.PosEntry = tk.Entry(self.calFrame,
textvariable=self.Posvariable,
width=6)
self.PosEntry.grid(column=1,row=0)
self.PosUpdateButton = tk.Button(self.calFrame,text="Move",
command=lambda: self.set_Pos(self.Posvariable.get()))
self.PosUpdateButton.grid(column=2,row=0,sticky=tk.W)
self.stepLabel = tk.Label(self.calFrame,text="Number of Steps:")
self.stepLabel.grid(column=0,row=1,sticky=tk.E)
self.Stepvariable = tk.StringVar()
self.Stepvariable.set("3")
self.StepEntry = tk.Entry(self.calFrame,
textvariable=self.Stepvariable,
width=6)
self.StepEntry.grid(column=1,row=1)
self.startCalButton = tk.Button(self.calFrame,
text="Start Calibration",
command = lambda: self.start_calibration())
self.startCalButton.grid(column=0,row=2)
self.nextButton = tk.Button(self.calFrame,
text="Next Position",
command = lambda: self.next_position())
self.nextButton.grid(column=1,row=2)
self.nextButton.config(state='disabled')
def set_IT(self,IT):
try:
it = int(IT)*1000
except:
it = 100*1000
if it<10*1000:
it = 10*1000
elif it>10*1000*1000:
it = 10*1000*1000
self.spec.integration_time_micros(it)
self.ITvariable.set(str(it//1000))
self.mpl.update_spectrum(self.spec.intensities())
def set_Pos(self,POS):
try:
pos = int(POS)
except:
pos = 100
if pos<0:
pos = 0
elif pos>150:
pos = 150
self.mono.set_lower_bound(pos)
self.Posvariable.set(str(pos))
self.mono.move(self.mono.lower_bound)
def start_aquisition(self):
self.specRunning = True
self.aquire()
def aquire(self):
# y = self.mpl.spectrum.get_ydata()
self.mpl.update_spectrum(self.spec.intensities())#(0.99*y)
if self.specRunning:
self.master.after(0,self.aquire)
def stop_aquisition(self):
self.specRunning = False
def start_calibration(self):
self.stop_aquisition()
self.playButton.config(state="disabled")
self.pauseButton.config(state="disabled")
self.PosUpdateButton.config(state="disabled")
self.startCalButton.config(state="disabled")
self.nextButton.config(state='normal')
try:
n = int(self.Stepvariable.get())
except:
n = 5
if n<2:
n = 2
elif n>10:
n = 10
self.mmSpace = list(self.mono.lower_bound-np.linspace(5,31-4,n))
self.mono.move(self.mmSpace.pop(0))
sleep(0.1)
self.mpl.update_spectrum(self.spec.intensities())
self.mpl.gen_fit()
def next_position(self):
self.mono.add_point(self.mono.mot.getPos(),*self.mpl.p[-2:])
try:
mm = self.mmSpace.pop(0)
except IndexError:
self.save_calibration_file()
self.master.destroy()
else:
self.mono.move(mm)
sleep(0.1)
self.mpl.update_spectrum(self.spec.intensities())
self.mpl.gen_fit()
def save_calibration_file(self):
path = filedialog.askdirectory(initialdir = os.getcwd(),
title= "Calibration File Directory")
self.mono.save_calibration_points(path)
class MPL:
def __init__(self,master,x,y,p=[0,0,500,5],**kwargs):
self.x = x
self.p = np.array(p)
# Create tk Frame to hold MPL plot
self.frame = tk.Frame(master)
self.frame.grid(**kwargs)
# Create MPL figure
self.fig = plt.figure(figsize=(10,5))
self.ax = self.fig.add_subplot(111)
self.spectrum, = self.ax.plot(x,y,color="blue")
self.ax.set_xlabel("Wavelength (nm)")
self.ax.set_ylabel("Counts")
self.ax.set_ylim(0,4000)
# Attached MPL figure and toolbar to tk Frame
self.canvas = FigureCanvasTkAgg(self.fig,self.frame)
self.canvas.get_tk_widget().pack()
self.toolbar = NavigationToolbar2Tk(self.canvas,self.frame)
self.toolbar.update()
# initialize fit
self.fit, = self.ax.plot(x,gauss(x,self.p),color="black")
# Setup MPL click collbacks
self.canvas.mpl_connect('button_press_event',self.click)
def click(self,event):
if event.inaxes == self.ax:
if event.button == 1:
print("Left click @ x=",event.xdata," y=",event.ydata)
self.p[1],self.p[2] = event.ydata,event.xdata
self.update_fit()
if event.button == 2:
print("Scroll click @ x=",event.xdata," y=",event.ydata)
if event.button == 3:
print("Right click @ x=",event.xdata," y=",event.ydata)
self.gen_fit()
def update_fit(self):
self.fit.set_ydata(gauss(self.x,self.p))
self.fig.canvas.draw()
def update_spectrum(self,y):
self.spectrum.set_ydata(y)
self.fig.canvas.draw()
def gen_fit(self):
y = self.spectrum.get_ydata()
x0 = self.x[np.argmax(y)]
y0 = np.max(y)
mask = np.array(np.abs(self.x-x0)<50)
def diff(p):
return np.sum((y[mask]-gauss(self.x,p)[mask])**2)
fit = minimize(diff,[y[0],y0,x0,1])
# print(fit)
self.p = np.copy(fit.x)
self.update_fit()
|
{"/example.py": ["/monochromator/monochromator.py", "/monochromator/calibrate.py"]}
|
10,678
|
jonathanvanschenck/Monochromator-GUI
|
refs/heads/master
|
/monochromator/__init__.py
|
import .calibrate
import .monochromator
import .spectrometer
|
{"/example.py": ["/monochromator/monochromator.py", "/monochromator/calibrate.py"]}
|
10,679
|
jonathanvanschenck/Monochromator-GUI
|
refs/heads/master
|
/example.py
|
#%%
from monochromator.monochromator import Monochromator
from monochromator.calibrate import Calibrate
# Instantiate monochromator instance
try:
mono.close()
except:
pass
mono = Monochromator()
# Launch calibration GUI
Calibrate(mono)
|
{"/example.py": ["/monochromator/monochromator.py", "/monochromator/calibrate.py"]}
|
10,680
|
jonathanvanschenck/Monochromator-GUI
|
refs/heads/master
|
/monochromator/spectrometer.py
|
"""Allows pyseabreeze spectrometers to be loaded via a popup window
---Classes---
selectionBox:
A tk widget to hold a list of tk.Radiobuttons which all refer to the same variable
Spectrometer:
A wrapper for the seabreeze.spectrometer.Spectrometer class, which automatically
searches for available OceanOptics spectrometers. If multiple devices (or no
devices) are available, the software launches a tk window to list the options.
Created by: Jonathan D. B. Van Schenck
"""
#%%
import tkinter as tk
import seabreeze.spectrometers as sb
#%%
class selectionBox(tk.LabelFrame):
'''Container for associated tk.Radiobuttons
---Initialization Parameters---
master: tk.Frame instance into which the widget will be created
variable: The underlying tk variable which all the Radiobuttons
will be attached to
valueList: List of possible values for which tk.Radiobuttons will
be created
label: Optional Label for the tk.LabelFrame which wraps the radiobuttons
textList: Optional list of labels to represent each valueList (must be either
the same length as valueList, or None).
---Variables---
variable:
The underlying tk variable which all the Radiobuttons will be attached to
RBList:
List to hold each tk.Radiobutton instance
---Methods---
gen_list:
Generates and packs the tk.Radiobuttons into a tk.LabelFrame
'''
def __init__(self,master,variable,valueList,label="",textList=None):
tk.LabelFrame.__init__(self,master,text=label)
self.variable = variable
self.RBList = []
self.gen_list(valueList,textList)
def gen_list(self,valueList,textList=None):
for rb in self.RBList:
rb.destroy()
if textList is None:
tL = [str(v) for v in valueList]
else:
tL = textList
self.RBList = [tk.Radiobutton(self,text=t,variable=self.variable,value=v,indicatoron=0)\
for t,v in zip(tL,valueList)]
for i,rb in enumerate(self.RBList):
rb.grid(column=0,row=i)
class Spectrometer(sb.Spectrometer):
"""Wrapper for seabreeze.spectrometer.Spectrometer class with smart inialization and popup window
---Initialization Variables---
---Variables---
---Methods---
"""
def __init__(self):
def scan():
return sb.list_devices()
deviceList = scan()
if len(deviceList) == 1:
sb.Spectrometer.__init__(self,deviceList[0])
else:
root = tk.Tk()
root.title("Spectrometer Selection")
root.geometry("200x200")
d = tk.StringVar()
buttonList = selectionBox(root,d,deviceList,label="Select Spectrometer")
buttonList.grid(column=0,row=1,columnspan=2)
def rescan(buttonList):
deviceList = scan()
buttonList.gen_list(deviceList)
tk.Button(root,text="Rescan",command= lambda : rescan(buttonList)).grid(column=0,row=0)
def load():
try:
sb.Spectrometer.__init__(self,d.get())
except:
print("Problem loading device \'%s\', try again" % d.get())
else:
root.destroy()
tk.Button(root,text="Load",command=load).grid(column=1,row=0)
root.protocol("WM_DELETE_WINDOW", lambda : root.destroy())
root.mainloop()
|
{"/example.py": ["/monochromator/monochromator.py", "/monochromator/calibrate.py"]}
|
10,681
|
jonathanvanschenck/Monochromator-GUI
|
refs/heads/master
|
/monochromator/monochromator.py
|
#%%
import tkinter as tk
import os
from time import sleep
from tkinter import messagebox
from tkinter import filedialog
import seabreeze.spectrometers as sb
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from scipy.optimize import minimize
def gauss(x,p):
return np.abs(p[0])+np.abs(p[1])*np.exp(-((x-p[2])/p[3])**2)
from time import localtime
def today():
t = localtime()
return "{0}{1:0>2}{2:0>2}-{3:0>2}{4:0>2}{5:0>2}".format(str(t.tm_year)[-2:],t.tm_mon,t.tm_mday,t.tm_hour,t.tm_min,t.tm_sec)
from ctypes import c_long, c_float, windll, pointer
class APTMotor():
def __init__(self,SerialNum=None, HWTYPE=31, loc='', verbose=False, dllname='APT.dll'):
'''
HWTYPE_BSC001 11 // 1 Ch benchtop stepper driver
HWTYPE_BSC101 12 // 1 Ch benchtop stepper driver
HWTYPE_BSC002 13 // 2 Ch benchtop stepper driver
HWTYPE_BDC101 14 // 1 Ch benchtop DC servo driver
HWTYPE_SCC001 21 // 1 Ch stepper driver card (used within BSC102,103 units)
HWTYPE_DCC001 22 // 1 Ch DC servo driver card (used within BDC102,103 units)
HWTYPE_ODC001 24 // 1 Ch DC servo driver cube
HWTYPE_OST001 25 // 1 Ch stepper driver cube
HWTYPE_MST601 26 // 2 Ch modular stepper driver module
HWTYPE_TST001 29 // 1 Ch Stepper driver T-Cube
HWTYPE_TDC001 31 // 1 Ch DC servo driver T-Cube
HWTYPE_LTSXXX 42 // LTS300/LTS150 Long Travel Integrated Driver/Stages
HWTYPE_L490MZ 43 // L490MZ Integrated Driver/Labjack
HWTYPE_BBD10X 44 // 1/2/3 Ch benchtop brushless DC servo driver
'''
self.verbose = verbose
self.Connected = False
if not os.path.exists(loc+dllname):
print("ERROR: DLL not found")
self.aptdll = windll.LoadLibrary(loc+dllname)
self.aptdll.EnableEventDlg(True)
self.aptdll.APTInit()
self.HWType = c_long(HWTYPE)
self.blCorr = 0.10 #100um backlash correction
if SerialNum is not None:
if self.verbose: print("Serial is", SerialNum)
self.SerialNum = c_long(SerialNum)
self.initializeHardwareDevice()
# TODO : Error reporting to know if initialisation went sucessfully or not.
else:
if self.verbose: print("No serial, please setSerialNumber")
def getNumberOfHardwareUnits(self):
'''
Returns the number of HW units connected that are available to be interfaced
'''
numUnits = c_long()
self.aptdll.GetNumHWUnitsEx(self.HWType, pointer(numUnits))
return numUnits.value
def initializeHardwareDevice(self):
'''
Initialises the motor.
You can only get the position of the motor and move the motor after it has been initialised.
Once initiallised, it will not respond to other objects trying to control it, until released.
'''
if self.verbose: print('initializeHardwareDevice serial', self.SerialNum)
result = self.aptdll.InitHWDevice(self.SerialNum)
if result == 0:
self.Connected = True
if self.verbose: print('initializeHardwareDevice connection SUCESS')
# need some kind of error reporting here
else:
raise Exception('Connection Failed. Check Serial Number!')
return True
'''
Controlling the motors
m = move
c = controlled velocity
b = backlash correction
Rel = relative distance from current position.
Abs = absolute position
'''
def getPos(self):
'''
Obtain the current absolute position of the stage
'''
if self.verbose: print('getPos probing...')
if not self.Connected:
raise Exception('Please connect first! Use initializeHardwareDevice')
position = c_float()
self.aptdll.MOT_GetPosition(self.SerialNum, pointer(position))
if self.verbose: print('getPos ', position.value)
return position.value
def mRel(self, relDistance):
'''
Moves the motor a relative distance specified
relDistance float Relative position desired
'''
if self.verbose: print('mRel ', relDistance, c_float(relDistance))
if not self.Connected:
print('Please connect first! Use initializeHardwareDevice')
#raise Exception('Please connect first! Use initializeHardwareDevice')
relativeDistance = c_float(relDistance)
self.aptdll.MOT_MoveRelativeEx(self.SerialNum, relativeDistance, True)
if self.verbose: print('mRel SUCESS')
return True
def mAbs(self, absPosition):
'''
Moves the motor to the Absolute position specified
absPosition float Position desired
'''
if self.verbose: print('mAbs ', absPosition, c_float(absPosition))
if not self.Connected:
raise Exception('Please connect first! Use initializeHardwareDevice')
absolutePosition = c_float(absPosition)
self.aptdll.MOT_MoveAbsoluteEx(self.SerialNum, absolutePosition, True)
if self.verbose: print('mAbs SUCESS')
return True
def mcRel(self, relDistance, moveVel=0.5):
'''
Moves the motor a relative distance specified at a controlled velocity
relDistance float Relative position desired
moveVel float Motor velocity, mm/sec
'''
if self.verbose: print('mcRel ', relDistance, c_float(relDistance), 'mVel', moveVel)
if not self.Connected:
raise Exception('Please connect first! Use initializeHardwareDevice')
# Save velocities to reset after move
maxVel = self.getVelocityParameterLimits()[1]
# Set new desired max velocity
self.setVel(moveVel)
self.mRel(relDistance)
self.setVel(maxVel)
if self.verbose: print('mcRel SUCESS')
return True
def mcAbs(self, absPosition, moveVel=0.5):
'''
Moves the motor to the Absolute position specified at a controlled velocity
absPosition float Position desired
moveVel float Motor velocity, mm/sec
'''
if self.verbose: print('mcAbs ', absPosition, c_float(absPosition), 'mVel', moveVel)
if not self.Connected:
raise Exception('Please connect first! Use initializeHardwareDevice')
# Save velocities to reset after move
minVel, acc, maxVel = self.getVelocityParameters()
# Set new desired max velocity
self.setVel(moveVel)
self.mAbs(absPosition)
self.setVel(maxVel)
if self.verbose: print('mcAbs SUCESS')
return True
def mbRel(self, relDistance):
'''
Moves the motor a relative distance specified
relDistance float Relative position desired
'''
if self.verbose: print('mbRel ', relDistance, c_float(relDistance))
if not self.Connected:
print('Please connect first! Use initializeHardwareDevice')
#raise Exception('Please connect first! Use initializeHardwareDevice')
self.mRel(relDistance-self.blCorr)
self.mRel(self.blCorr)
if self.verbose: print('mbRel SUCESS')
return True
def mbAbs(self, absPosition):
'''
Moves the motor to the Absolute position specified
absPosition float Position desired
'''
if self.verbose: print('mbAbs ', absPosition, c_float(absPosition))
if not self.Connected:
raise Exception('Please connect first! Use initializeHardwareDevice')
if (absPosition < self.getPos()):
if self.verbose: print('backlash mAbs', absPosition - self.blCorr)
self.mAbs(absPosition-self.blCorr)
self.mAbs(absPosition)
if self.verbose: print('mbAbs SUCESS')
return True
def go_home(self):
'''
Move the stage to home position and reset position entry
'''
if self.verbose: print('Going home')
if not self.Connected:
raise Exception('Please connect first! Use initializeHardwareDevice')
if self.verbose: print('go_home SUCESS')
self.aptdll.MOT_MoveHome(self.SerialNum)
return True
def cleanUpAPT(self):
'''
Releases the APT object
Use when exiting the program
'''
self.aptdll.APTCleanUp()
if self.verbose: print('APT cleaned up')
self.Connected = False
class Monochromator:
def __init__(self,reset=True,SerialNum=20808447, HWTYPE=13, loc='C:/Users/vanschej/Documents/Python Scripts/PyAPT/',verbose=False, dllname='APT.dll'):
self.mot = APTMotor(SerialNum=SerialNum, HWTYPE=HWTYPE, loc=loc,verbose=verbose, dllname=dllname)
self.reset_calibration()
self.set_lower_bound(10)
if reset:
self.go_home()
def go_home(self):
self.mot.go_home()
self.move(self.lower_bound+5)
def move(self,mm):
# print(mm)
self.mot.mbAbs(mm)
def set_lower_bound(self,mm):
self.lower_bound = mm
def reset_calibration(self):
self.__calibration = [[],[],[]]
def add_point(self,pos,wave,fwhm):
self.__calibration[0].append(pos)
self.__calibration[1].append(wave)
self.__calibration[2].append(fwhm)
def create_calibration(self):
self.__b=np.sum((np.array(self.__calibration[1])-np.mean(self.__calibration[1]))*(np.array(self.__calibration[0])-np.mean(self.__calibration[0])))/np.sum((np.array(self.__calibration[1])-np.mean(self.__calibration[1]))**2)
self.__a=np.mean(self.__calibration[0])-self.__b*np.mean(self.__calibration[1])
self.__monoBound = [np.ceil((self.lower_bound-self.__a)/self.__b),np.floor((self.lower_bound-31-self.__a)/self.__b)]
def save_calibration_points(self,path_to_folder):
self.create_calibration()
oldD = os.getcwd()
os.chdir(path_to_folder)
f = open(today()+".cal","w")
for c in self.__calibration:
f.write(",".join([str(cc) for cc in c])+"\n")
f.write("{0},{1},{2},{3}\n".format(self.__b,self.__a,*self.__monoBound))
f.close()
os.chdir(oldD)
def load_calibration_points(self,file):
f = open(file)
calibrationPoints = [[float(ll) for ll in l.strip("\n").split(",")] for l in f]
check_old = np.array(calibrationPoints.pop())
self.reset_calibration()
for p,w,f in zip(*calibrationPoints):
self.add_point(p,w,f)
self.create_calibration()
check_new = np.append([self.__b,self.__a],self.__monoBound)
return np.all(np.abs(check_old-check_new)/check_old < 0.1)
def get_pos(self,lam):
res = self.__a+self.__b*lam
#assert res>=iniPos and res<=iniPos+31
return(res)
def go_to_wave(self,lam):
self.move(self.get_pos(lam))
def shutdown(self):
self.mot.cleanUpAPT()
class Calibrate(tk.Frame):
def __init__(self,monochromator):
tk.Frame.__init__(self,None)
self.master.title("Calibration Window")
self.master.protocol("WM_DELETE_WINDOW", lambda : self.master.destroy())
self.spec = Spectrometer()
self.specRunning = True
self.mono = monochromator
self.mono.reset_calibration()
self.create_widgets()
self.start_aquisition()
self.mainloop()
self.spec.close()
def create_widgets(self):
# Create MPL Figure
self.mpl = MPL(self.master,
self.spec.wavelengths(),self.spec.intensities(),
#np.arange(0,100,0.1),gauss(np.arange(0,100,0.1),[200,2700,40,5]),
#self.spec.wavelengths,self.spec.intensities,
column=0,row=2,columnspan=2)
# Create Spectrometer control window
self.specFrame = tk.LabelFrame(self.master,text="Spectrometer Controls")
self.specFrame.grid(column=0,row=0)
self.ITLabel = tk.Label(self.specFrame,text="IT (ms)")
self.ITLabel.grid(column=0,row=0,sticky=tk.E)
self.ITvariable = tk.StringVar()
self.set_IT(20)
self.ITEntry = tk.Entry(self.specFrame,
textvariable=self.ITvariable,
width=6)
self.ITEntry.grid(column=1,row=0)
self.ITUpdateButton = tk.Button(self.specFrame,text="Update",
command=lambda: self.set_IT(self.ITvariable.get()))
self.ITUpdateButton.grid(column=2,row=0,sticky=tk.W)
self.PPLabel = tk.Label(self.specFrame,text="Aquire:")
self.PPLabel.grid(column=0,row=1,sticky=tk.E)
self.playButton = tk.Button(self.specFrame,text="Play",
command=lambda: self.start_aquisition())
self.playButton.grid(column=1,row=1)
self.pauseButton = tk.Button(self.specFrame,text="Pause",
command=lambda: self.stop_aquisition())
self.pauseButton.grid(column=2,row=1)
# Create calibration setup
self.calFrame = tk.LabelFrame(self.master,text="Spectrometer Controls")
self.calFrame.grid(column=1,row=0)
self.PosLabel = tk.Label(self.calFrame,text="Starting Position:")
self.PosLabel.grid(column=0,row=0,sticky=tk.E)
self.Posvariable = tk.StringVar()
self.set_Pos(self.mono.lower_bound)
self.PosEntry = tk.Entry(self.calFrame,
textvariable=self.Posvariable,
width=6)
self.PosEntry.grid(column=1,row=0)
self.PosUpdateButton = tk.Button(self.calFrame,text="Move",
command=lambda: self.set_Pos(self.Posvariable.get()))
self.PosUpdateButton.grid(column=2,row=0,sticky=tk.W)
self.stepLabel = tk.Label(self.calFrame,text="Number of Steps:")
self.stepLabel.grid(column=0,row=1,sticky=tk.E)
self.Stepvariable = tk.StringVar()
self.Stepvariable.set("3")
self.StepEntry = tk.Entry(self.calFrame,
textvariable=self.Stepvariable,
width=6)
self.StepEntry.grid(column=1,row=1)
self.startCalButton = tk.Button(self.calFrame,
text="Start Calibration",
command = lambda: self.start_calibration())
self.startCalButton.grid(column=0,row=2)
self.nextButton = tk.Button(self.calFrame,
text="Next Position",
command = lambda: self.next_position())
self.nextButton.grid(column=1,row=2)
self.nextButton.config(state='disabled')
def set_IT(self,IT):
try:
it = int(IT)*1000
except:
it = 100*1000
if it<10*1000:
it = 10*1000
elif it>10*1000*1000:
it = 10*1000*1000
self.spec.integration_time_micros(it)
self.ITvariable.set(str(it//1000))
self.mpl.update_spectrum(self.spec.intensities())
def set_Pos(self,POS):
try:
pos = int(POS)
except:
pos = 100
if pos<0:
pos = 0
elif pos>150:
pos = 150
self.mono.set_lower_bound(pos)
self.Posvariable.set(str(pos))
self.mono.move(self.mono.lower_bound)
def start_aquisition(self):
self.specRunning = True
self.aquire()
def aquire(self):
# y = self.mpl.spectrum.get_ydata()
self.mpl.update_spectrum(self.spec.intensities())#(0.99*y)
if self.specRunning:
self.master.after(0,self.aquire)
def stop_aquisition(self):
self.specRunning = False
def start_calibration(self):
self.stop_aquisition()
self.playButton.config(state="disabled")
self.pauseButton.config(state="disabled")
self.PosUpdateButton.config(state="disabled")
self.startCalButton.config(state="disabled")
self.nextButton.config(state='normal')
try:
n = int(self.Stepvariable.get())
except:
n = 5
if n<2:
n = 2
elif n>10:
n = 10
self.mmSpace = list(self.mono.lower_bound-np.linspace(5,31-4,n))
self.mono.move(self.mmSpace.pop(0))
sleep(0.1)
self.mpl.update_spectrum(self.spec.intensities())
self.mpl.gen_fit()
def next_position(self):
self.mono.add_point(self.mono.mot.getPos(),*self.mpl.p[-2:])
try:
mm = self.mmSpace.pop(0)
except IndexError:
self.save_calibration_file()
self.master.destroy()
else:
self.mono.move(mm)
sleep(0.1)
self.mpl.update_spectrum(self.spec.intensities())
self.mpl.gen_fit()
def save_calibration_file(self):
path = filedialog.askdirectory(initialdir = os.getcwd(),
title= "Calibration File Directory")
self.mono.save_calibration_points(path)
class MPL:
def __init__(self,master,x,y,p=[0,0,500,5],**kwargs):
self.x = x
self.p = np.array(p)
# Create tk Frame to hold MPL plot
self.frame = tk.Frame(master)
self.frame.grid(**kwargs)
# Create MPL figure
self.fig = plt.figure(figsize=(10,5))
self.ax = self.fig.add_subplot(111)
self.spectrum, = self.ax.plot(x,y,color="blue")
self.ax.set_xlabel("Wavelength (nm)")
self.ax.set_ylabel("Counts")
self.ax.set_ylim(0,4000)
# Attached MPL figure and toolbar to tk Frame
self.canvas = FigureCanvasTkAgg(self.fig,self.frame)
self.canvas.get_tk_widget().pack()
self.toolbar = NavigationToolbar2Tk(self.canvas,self.frame)
self.toolbar.update()
# initialize fit
self.fit, = self.ax.plot(x,gauss(x,self.p),color="black")
# Setup MPL click collbacks
self.canvas.mpl_connect('button_press_event',self.click)
def click(self,event):
if event.inaxes == self.ax:
if event.button == 1:
print("Left click @ x=",event.xdata," y=",event.ydata)
self.p[1],self.p[2] = event.ydata,event.xdata
self.update_fit()
if event.button == 2:
print("Scroll click @ x=",event.xdata," y=",event.ydata)
if event.button == 3:
print("Right click @ x=",event.xdata," y=",event.ydata)
self.gen_fit()
def update_fit(self):
self.fit.set_ydata(gauss(self.x,self.p))
self.fig.canvas.draw()
def update_spectrum(self,y):
self.spectrum.set_ydata(y)
self.fig.canvas.draw()
def gen_fit(self):
y = self.spectrum.get_ydata()
x0 = self.x[np.argmax(y)]
y0 = np.max(y)
mask = np.array(np.abs(self.x-x0)<50)
def diff(p):
return np.sum((y[mask]-gauss(self.x,p)[mask])**2)
fit = minimize(diff,[y[0],y0,x0,1])
# print(fit)
self.p = np.copy(fit.x)
self.update_fit()
#%
class selectionBox(tk.LabelFrame):
def __init__(self,master,variable,valueList,label="",textList=None):
tk.LabelFrame.__init__(self,master,text=label)
self.variable = variable
self.RBList = []
self.gen_list(valueList,textList)
def gen_list(self,valueList,textList=None):
for rb in self.RBList:
rb.destroy()
if textList is None:
tL = [str(v) for v in valueList]
else:
tL = textList
self.RBList = [tk.Radiobutton(self,text=t,variable=self.variable,value=v,indicatoron=0)\
for t,v in zip(tL,valueList)]
for i,rb in enumerate(self.RBList):
rb.grid(column=0,row=i)
class Spectrometer(sb.Spectrometer):
def __init__(self):
def scan():
return sb.list_devices()
deviceList = scan()
if len(deviceList) == 1:
sb.Spectrometer.__init__(self,deviceList[0])
else:
root = tk.Tk()
root.title("Spectrometer Selection")
root.geometry("200x200")
d = tk.StringVar()
buttonList = selectionBox(root,d,deviceList,label="Select Spectrometer")
buttonList.grid(column=0,row=1,columnspan=2)
def rescan(buttonList):
deviceList = scan()
buttonList.gen_list(deviceList)
tk.Button(root,text="Rescan",command= lambda : rescan(buttonList)).grid(column=0,row=0)
def load():
try:
sb.Spectrometer.__init__(self,d.get())
except:
print("Problem loading device \'%s\', try again" % d.get())
else:
root.destroy()
tk.Button(root,text="Load",command=load).grid(column=1,row=0)
root.protocol("WM_DELETE_WINDOW", lambda : root.destroy())
root.mainloop()
|
{"/example.py": ["/monochromator/monochromator.py", "/monochromator/calibrate.py"]}
|
10,684
|
loserbbb/tucao
|
refs/heads/master
|
/tucao/usermanage/migrations/0002_auto_20170807_0838.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usermanage', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='telnumber',
field=models.CharField(null=True, max_length=11),
),
migrations.AlterField(
model_name='user',
name='born_date',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='user',
name='head_img',
field=models.TextField(null=True),
),
]
|
{"/tucao/usermanage/views.py": ["/tucao/usermanage/models.py"]}
|
10,685
|
loserbbb/tucao
|
refs/heads/master
|
/tucao/usermanage/models.py
|
from django.db import models
# Create your models here.
class User(models.Model):
name=models.CharField(max_length=10)
password=models.CharField(max_length=16)
born_date=models.DateTimeField(null=True)
gender=models.BooleanField()
description=models.TextField(blank=True,null=True)
head_img=models.TextField(null=True)
exp=models.IntegerField()
rank=models.IntegerField()
telnumber=models.CharField(max_length=11,null=True)
def __str__(self):
return self.name
|
{"/tucao/usermanage/views.py": ["/tucao/usermanage/models.py"]}
|
10,686
|
loserbbb/tucao
|
refs/heads/master
|
/tucao/usermanage/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=10)),
('password', models.CharField(max_length=16)),
('born_date', models.DateTimeField()),
('gender', models.BooleanField()),
('description', models.TextField(null=True, blank=True)),
('head_img', models.TextField()),
('exp', models.IntegerField()),
('rank', models.IntegerField()),
],
),
]
|
{"/tucao/usermanage/views.py": ["/tucao/usermanage/models.py"]}
|
10,687
|
loserbbb/tucao
|
refs/heads/master
|
/tucao/usermanage/middleware.py
|
from django.shortcuts import render,HttpResponse,redirect,HttpResponseRedirect
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin=object
class SimpleMiddleware(MiddlewareMixin):
def process_request(self,request):
if 'user' not in request.session and request.path != '/login/' and request.path !='/admin/':
HttpResponseRedirect('/login/')
return None
def process_response(self,request,response):
return response
|
{"/tucao/usermanage/views.py": ["/tucao/usermanage/models.py"]}
|
10,688
|
loserbbb/tucao
|
refs/heads/master
|
/tucao/usermanage/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import User
# Create your views here.
def index(request):
return HttpResponse('hello')
def regist(request):
if request.method == 'POST':
try:
user=User.objects.get(name=request.POST.get('name'))
return HttpResponse('Exist')
except Exception:
name=request.POST.get('name')
password=request.POST.get('password')
gender=request.POST.get('gender',True)
telnumber=request.POST.get('telnumber')
user=User(name=name,password=password,gender=gender,telnumber=telnumber,exp='0',rank='1')
user.save()
return HttpResponse('Success')
else:
return HttpResponse('Error')
def login(request):
if request.method == 'POST':
name=request.POST.get('name')
password=request.POST.get('password')
user
try:
user=User.objects.get(name=name)
except:
pass
if user is not None:
if password==user.password:
request.session['user']=user.name
return HttpResponse('Success')
else:
return HttpResponse('Password incorrot')
else:
return HttpResponse('user not eixist')
else:
return HttpResponse('Error')
|
{"/tucao/usermanage/views.py": ["/tucao/usermanage/models.py"]}
|
10,740
|
EthanZhu90/TALL_Copy
|
refs/heads/master
|
/ctrl_model_noContext.py
|
import numpy as np
import tensorflow as tf
# from tensorflow.python.framework import dtypes
import tensorflow.contrib.rnn as rnn
from util.cnn import fc_layer as fc
import vs_multilayer
from dataset_noContext import TestingDataSet
from dataset_noContext import TrainingDataSet
import pickle
class CTRL_Model(object):
def __init__(self, batch_size, train_csv_path, test_csv_path, test_visual_feature_dir, train_visual_feature_dir,
word_vector_dir, useLSTM=True):
self.batch_size = batch_size
self.test_batch_size = 56
self.vs_lr = 0.005
self.lambda_regression = 0.01
self.alpha = 1.0/batch_size
self.semantic_size = 1024 # the size of visual and semantic comparison size
self.sentence_embedding_size = 4800
self.visual_feature_dim = 4096
self.useLSTM = useLSTM
self.max_words_q = 15 # check later.
self.rnn_layer = 2
self.lstm_input_size = 300
self.lstm_hidden_size = 512
self.drop_out_rate = 0.2
# LSTM model structure
# encoder: RNN body
# input_size: Deprecated and unused.
self.lstm_1 = rnn.LSTMCell(num_units=self.lstm_hidden_size, state_is_tuple=False)
self.lstm_dropout_1 = rnn.DropoutWrapper(self.lstm_1, output_keep_prob=1 - self.drop_out_rate)
self.lstm_2 = rnn.LSTMCell(num_units=self.lstm_hidden_size, state_is_tuple=False)
self.lstm_dropout_2 = rnn.DropoutWrapper(self.lstm_2, output_keep_prob=1 - self.drop_out_rate)
self.stacked_lstm = rnn.MultiRNNCell([self.lstm_dropout_1, self.lstm_dropout_2], state_is_tuple=False)
# word embedding vector
self.word2idx, self.idx2word, self.embed_ques_W = self.build_vocabulary(word_vector_dir)
# # state-embedding
# self.embed_state_W = tf.Variable(
# tf.random_uniform([2 * self.lstm_hidden_size * self.rnn_layer, self.dim_hidden], -0.08, 0.08),
# name='embed_state_W')
# self.embed_state_b = tf.Variable(tf.random_uniform([self.dim_hidden], -0.08, 0.08), name='embed_state_b')
self.train_set = TrainingDataSet(train_visual_feature_dir, train_csv_path, self.batch_size, self.word2idx, useLSTM)
self.test_set = TestingDataSet(test_visual_feature_dir, test_csv_path, self.test_batch_size, self.word2idx, useLSTM)
'''
given the word vector dict, return the vocabulary
'''
def build_vocabulary(self, word_vector_dir):
word_vector_dict = pickle.load(open(word_vector_dir, 'rb'))
idx2word = list()
word2idx = dict()
embed = list()
# the first word 'unk'
word2idx['unk'] = 0
idx2word.append('unk')
embed.append(np.zeros(self.lstm_input_size))
cnt = 1
for term in word_vector_dict:
idx2word.append(term)
word2idx[term] = cnt
embed.append(word_vector_dict[term])
cnt += 1
embed_tensor = np.vstack(embed).astype(np.float32)
return word2idx, idx2word, embed_tensor
'''
used in training alignment model, CTRL(aln)
'''
def fill_feed_dict_train(self):
image_batch,sentence_batch,offset_batch = self.train_set.next_batch()
input_feed = {
self.visual_featmap_ph_train: image_batch,
self.sentence_ph_train: sentence_batch,
self.offset_ph: offset_batch
}
return input_feed
'''
used in training alignment+regression model, CTRL(reg)
'''
def fill_feed_dict_train_reg(self):
image_batch, sentence_batch, offset_batch, sent_len_batch = self.train_set.next_batch_iou()
if self.useLSTM:
input_feed = {
self.visual_featmap_ph_train: image_batch,
self.sentence_ph_train: sentence_batch,
self.offset_ph: offset_batch,
self.sentence_ph_train_len: sent_len_batch
}
else:
input_feed = {
self.visual_featmap_ph_train: image_batch,
self.sentence_ph_train: sentence_batch,
self.offset_ph: offset_batch
}
return input_feed
'''
cross modal processing module
'''
def cross_modal_comb(self, visual_feat, sentence_embed, batch_size):
vv_feature = tf.reshape(tf.tile(visual_feat, [batch_size, 1]),
[batch_size, batch_size, self.semantic_size])
ss_feature = tf.reshape(tf.tile(sentence_embed,[1, batch_size]),[batch_size, batch_size, self.semantic_size])
concat_feature = tf.reshape(tf.concat([vv_feature, ss_feature], 2),[batch_size, batch_size, self.semantic_size+self.semantic_size])
print(concat_feature.get_shape().as_list())
mul_feature = tf.multiply(vv_feature, ss_feature)
add_feature = tf.add(vv_feature, ss_feature)
comb_feature = tf.reshape(tf.concat([mul_feature, add_feature, concat_feature],2),[1, batch_size, batch_size, self.semantic_size*4])
return comb_feature
'''
visual semantic inference, including visual semantic alignment and clip location regression
'''
def visual_semantic_infer(self, visual_feature_train, sentence_embed_train, visual_feature_test, sentence_embed_test,
sentence_ph_train_len, sentence_ph_test_len):
name="CTRL_Model"
with tf.variable_scope(name):
print("Building training network...............................\n")
transformed_clip_train = fc('v2s_lt', visual_feature_train, output_dim=self.semantic_size)
transformed_clip_train_norm = tf.nn.l2_normalize(transformed_clip_train, dim=1)
if self.useLSTM:
sentence_embed_train = self.lstm_embed(sentence_embed_train, sentence_ph_train_len)
transformed_sentence_train = fc('s2s_lt', sentence_embed_train, output_dim=self.semantic_size)
transformed_sentence_train_norm = tf.nn.l2_normalize(transformed_sentence_train, dim=1)
cross_modal_vec_train = self.cross_modal_comb(transformed_clip_train_norm, transformed_sentence_train_norm, self.batch_size)
sim_score_mat_train = vs_multilayer.vs_multilayer(cross_modal_vec_train, "vs_multilayer_lt", middle_layer_dim=1000)
sim_score_mat_train = tf.reshape(sim_score_mat_train,[self.batch_size, self.batch_size, 3])
tf.get_variable_scope().reuse_variables()
print("Building test network...............................\n")
transformed_clip_test = fc('v2s_lt', visual_feature_test, output_dim=self.semantic_size)
transformed_clip_test_norm = tf.nn.l2_normalize(transformed_clip_test, dim=1)
if self.useLSTM:
sentence_embed_test = self.lstm_embed(sentence_embed_test, sentence_ph_test_len)
transformed_sentence_test = fc('s2s_lt', sentence_embed_test, output_dim=self.semantic_size)
transformed_sentence_test_norm = tf.nn.l2_normalize(transformed_sentence_test, dim=1)
cross_modal_vec_test = self.cross_modal_comb(transformed_clip_test_norm, transformed_sentence_test_norm, self.test_batch_size)
sim_score_mat_test = vs_multilayer.vs_multilayer(cross_modal_vec_test, "vs_multilayer_lt", reuse=True, middle_layer_dim=1000)
sim_score_mat_test = tf.reshape(sim_score_mat_test, [self.test_batch_size, self.test_batch_size, 3])
cross_modal_vec_test_1 = self.cross_modal_comb(tf.reshape(transformed_clip_test_norm[1], shape=(1,1024)),
tf.reshape(transformed_sentence_test_norm[1], shape=(1,1024)), 1)
sim_score_mat_test_1 = vs_multilayer.vs_multilayer(cross_modal_vec_test_1, "vs_multilayer_lt", reuse=True, middle_layer_dim=1000)
sim_score_mat_test_1 = tf.reshape(sim_score_mat_test_1, [3])
return sim_score_mat_train, sim_score_mat_test, sim_score_mat_test_1
def lstm_embed(self, sentences, sentence_ph_train_len):
# state = [tf.zeros([self.batch_size, x]) for x in [self.lstm_hidden_size, self.lstm_hidden_size]]
sent_1dim = tf.reshape(sentences, [-1, 1])
sent_vector_2dim = tf.gather_nd(self.embed_ques_W, sent_1dim)
sent_vector = tf.reshape(sent_vector_2dim, [int(sentences.shape[0]), int(sentences.shape[1]), -1])
# embedding_lookup must contain a variable.
# sent_vector = tf.nn.embedding_lookup(self.embed_ques_W, [int(sentences.shape[0]), int(sentences.shape[1]), -1])
state = self.stacked_lstm.zero_state(sentences.shape[0], tf.float32)
# inputs:[batch_size, max_time, size] if time_major = Flase.
output, state = tf.nn.dynamic_rnn(self.stacked_lstm, inputs=sent_vector, sequence_length=sentence_ph_train_len,
initial_state=state, dtype=tf.float32, time_major=False)
state_drop = tf.nn.dropout(state, 1 - self.drop_out_rate)
# state_linear = tf.nn.xw_plus_b(state_drop, self.embed_state_W, self.embed_state_b)
# state_emb = tf.tanh(state_linear)
return state_drop
'''
compute alignment and regression loss
'''
def compute_loss_reg(self, sim_reg_mat, offset_label):
sim_score_mat, p_reg_mat, l_reg_mat = tf.split(sim_reg_mat, num_or_size_splits=3, axis=2)
sim_score_mat = tf.reshape(sim_score_mat, [self.batch_size, self.batch_size])
l_reg_mat = tf.reshape(l_reg_mat, [self.batch_size, self.batch_size])
p_reg_mat = tf.reshape(p_reg_mat, [self.batch_size, self.batch_size])
# unit matrix with -2
I_2 = tf.diag(tf.constant(-2.0, shape=[self.batch_size]))
all1 = tf.constant(1.0, shape=[self.batch_size, self.batch_size])
# | -1 1 1... |
# mask_mat = | 1 -1 -1... |
# | 1 1 -1 ... |
mask_mat = tf.add(I_2, all1)
# loss cls, not considering iou
I = tf.diag(tf.constant(1.0, shape=[self.batch_size]))
batch_para_mat = tf.constant(self.alpha, shape=[self.batch_size, self.batch_size])
para_mat = tf.add(I,batch_para_mat)
loss_mat = tf.log(tf.add(all1, tf.exp(tf.multiply(mask_mat, sim_score_mat))))
loss_mat = tf.multiply(loss_mat, para_mat)
loss_align = tf.reduce_mean(loss_mat)
# regression loss
l_reg_diag = tf.matmul(tf.multiply(l_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))
p_reg_diag = tf.matmul(tf.multiply(p_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))
offset_pred = tf.concat((p_reg_diag, l_reg_diag), 1)
loss_reg = tf.reduce_mean(tf.abs(tf.subtract(offset_pred, offset_label)))
loss=tf.add(tf.multiply(self.lambda_regression, loss_reg), loss_align)
return loss, offset_pred, loss_reg
def init_placeholder(self):
visual_featmap_ph_train = tf.placeholder(tf.float32, shape=(self.batch_size, self.visual_feature_dim))
if self.useLSTM:
# using LSTM, input is the idx of word
sentence_ph_train = tf.placeholder(tf.int32, shape=(self.batch_size, self.max_words_q))
sentence_ph_train_len = tf.placeholder(tf.int32, shape=(self.batch_size))
else:
sentence_ph_train = tf.placeholder(tf.float32, shape=(self.batch_size, self.sentence_embedding_size))
sentence_ph_train_len = -1
offset_ph = tf.placeholder(tf.float32, shape=(self.batch_size,2))
visual_featmap_ph_test = tf.placeholder(tf.float32, shape=(self.test_batch_size, self.visual_feature_dim))
if self.useLSTM:
# using LSTM, input is the idx of word
sentence_ph_test = tf.placeholder(tf.int32, shape=(self.test_batch_size, self.max_words_q))
sentence_ph_test_len = tf.placeholder(tf.int32, shape=(self.test_batch_size))
else:
sentence_ph_test = tf.placeholder(tf.float32, shape=(self.test_batch_size, self.sentence_embedding_size))
sentence_ph_test_len = -1
return visual_featmap_ph_train,sentence_ph_train,offset_ph,visual_featmap_ph_test, sentence_ph_test, \
sentence_ph_train_len, sentence_ph_test_len
def get_variables_by_name(self,name_list):
v_list = tf.trainable_variables()
v_dict = {}
for name in name_list:
v_dict[name] = []
for v in v_list:
for name in name_list:
if name in v.name: v_dict[name].append(v)
for name in name_list:
print("Variables of <"+name+">")
for v in v_dict[name]:
print(" "+v.name)
return v_dict
def training(self, loss):
v_dict = self.get_variables_by_name(["lt"])
vs_optimizer = tf.train.AdamOptimizer(self.vs_lr, name='vs_adam')
vs_train_op = vs_optimizer.minimize(loss, var_list=v_dict["lt"])
return vs_train_op
def construct_model(self):
# initialize the placeholder
self.visual_featmap_ph_train, self.sentence_ph_train, self.offset_ph, self.visual_featmap_ph_test, self.sentence_ph_test, \
self.sentence_ph_train_len, self.sentence_ph_test_len =self.init_placeholder()
# build inference network
sim_reg_mat, sim_reg_mat_test, sim_reg_mat_test_1 = self.visual_semantic_infer(self.visual_featmap_ph_train, self.sentence_ph_train,
self.visual_featmap_ph_test, self.sentence_ph_test,
self.sentence_ph_train_len, self.sentence_ph_test_len)
# compute loss
self.loss_align_reg, offset_pred, loss_reg = self.compute_loss_reg(sim_reg_mat, self.offset_ph)
# optimize
self.vs_train_op = self.training(self.loss_align_reg)
return self.loss_align_reg, self.vs_train_op, sim_reg_mat_test, sim_reg_mat_test_1, offset_pred, loss_reg
|
{"/ctrl_model_noContext.py": ["/dataset_noContext.py"]}
|
10,741
|
EthanZhu90/TALL_Copy
|
refs/heads/master
|
/ctrl_model.py
|
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import dtypes
from util.cnn import fc_layer as fc
import vs_multilayer
from dataset import TestingDataSet
from dataset import TrainingDataSet
class CTRL_Model(object):
def __init__(self, batch_size, train_csv_path, test_csv_path, test_visual_feature_dir, train_visual_feature_dir):
self.batch_size = batch_size
self.test_batch_size = 1
self.vs_lr = 0.005
self.lambda_regression = 0.01
self.alpha = 1.0/batch_size
self.semantic_size = 1024 # the size of visual and semantic comparison size
self.sentence_embedding_size = 4800
self.visual_feature_dim = 4096*3
self.train_set=TrainingDataSet(train_visual_feature_dir, train_csv_path, self.batch_size)
self.test_set=TestingDataSet(test_visual_feature_dir, test_csv_path, self.test_batch_size)
'''
used in training alignment model, CTRL(aln)
'''
def fill_feed_dict_train(self):
image_batch,sentence_batch,offset_batch = self.train_set.next_batch()
input_feed = {
self.visual_featmap_ph_train: image_batch,
self.sentence_ph_train: sentence_batch,
self.offset_ph: offset_batch
}
return input_feed
'''
used in training alignment+regression model, CTRL(reg)
'''
def fill_feed_dict_train_reg(self):
image_batch, sentence_batch, offset_batch = self.train_set.next_batch_iou()
input_feed = {
self.visual_featmap_ph_train: image_batch,
self.sentence_ph_train: sentence_batch,
self.offset_ph: offset_batch
}
return input_feed
'''
cross modal processing module
'''
def cross_modal_comb(self, visual_feat, sentence_embed, batch_size):
vv_feature = tf.reshape(tf.tile(visual_feat, [batch_size, 1]),
[batch_size, batch_size, self.semantic_size])
ss_feature = tf.reshape(tf.tile(sentence_embed,[1, batch_size]),[batch_size, batch_size, self.semantic_size])
concat_feature = tf.reshape(tf.concat([vv_feature, ss_feature], 2),[batch_size, batch_size, self.semantic_size+self.semantic_size])
print(concat_feature.get_shape().as_list())
mul_feature = tf.multiply(vv_feature, ss_feature)
add_feature = tf.add(vv_feature, ss_feature)
comb_feature = tf.reshape(tf.concat([mul_feature, add_feature, concat_feature],2),[1, batch_size, batch_size, self.semantic_size*4])
return comb_feature
'''
visual semantic inference, including visual semantic alignment and clip location regression
'''
def visual_semantic_infer(self, visual_feature_train, sentence_embed_train, visual_feature_test, sentence_embed_test):
name="CTRL_Model"
with tf.variable_scope(name):
print("Building training network...............................\n")
transformed_clip_train = fc('v2s_lt', visual_feature_train, output_dim=self.semantic_size)
transformed_clip_train_norm = tf.nn.l2_normalize(transformed_clip_train, dim=1)
transformed_sentence_train = fc('s2s_lt', sentence_embed_train, output_dim=self.semantic_size)
transformed_sentence_train_norm = tf.nn.l2_normalize(transformed_sentence_train, dim=1)
cross_modal_vec_train = self.cross_modal_comb(transformed_clip_train_norm, transformed_sentence_train_norm, self.batch_size)
sim_score_mat_train = vs_multilayer.vs_multilayer(cross_modal_vec_train, "vs_multilayer_lt", middle_layer_dim=1000)
sim_score_mat_train = tf.reshape(sim_score_mat_train,[self.batch_size, self.batch_size, 3])
tf.get_variable_scope().reuse_variables()
print("Building test network...............................\n")
transformed_clip_test = fc('v2s_lt', visual_feature_test, output_dim=self.semantic_size)
transformed_clip_test_norm = tf.nn.l2_normalize(transformed_clip_test, dim=1)
transformed_sentence_test = fc('s2s_lt', sentence_embed_test, output_dim=self.semantic_size)
transformed_sentence_test_norm = tf.nn.l2_normalize(transformed_sentence_test, dim=1)
cross_modal_vec_test = self.cross_modal_comb(transformed_clip_test_norm, transformed_sentence_test_norm, self.test_batch_size)
sim_score_mat_test = vs_multilayer.vs_multilayer(cross_modal_vec_test, "vs_multilayer_lt", reuse=True, middle_layer_dim=1000)
sim_score_mat_test = tf.reshape(sim_score_mat_test, [3])
return sim_score_mat_train, sim_score_mat_test
'''
compute alignment and regression loss
'''
def compute_loss_reg(self, sim_reg_mat, offset_label):
sim_score_mat, p_reg_mat, l_reg_mat = tf.split(sim_reg_mat, num_or_size_splits=3, axis=2)
sim_score_mat = tf.reshape(sim_score_mat, [self.batch_size, self.batch_size])
l_reg_mat = tf.reshape(l_reg_mat, [self.batch_size, self.batch_size])
p_reg_mat = tf.reshape(p_reg_mat, [self.batch_size, self.batch_size])
# unit matrix with -2
I_2 = tf.diag(tf.constant(-2.0, shape=[self.batch_size]))
all1 = tf.constant(1.0, shape=[self.batch_size, self.batch_size])
# | -1 1 1... |
# mask_mat = | 1 -1 -1... |
# | 1 1 -1 ... |
mask_mat = tf.add(I_2, all1)
# loss cls, not considering iou
I = tf.diag(tf.constant(1.0, shape=[self.batch_size]))
batch_para_mat = tf.constant(self.alpha, shape=[self.batch_size, self.batch_size])
para_mat = tf.add(I,batch_para_mat)
loss_mat = tf.log(tf.add(all1, tf.exp(tf.multiply(mask_mat, sim_score_mat))))
loss_mat = tf.multiply(loss_mat, para_mat)
loss_align = tf.reduce_mean(loss_mat)
# regression loss
l_reg_diag = tf.matmul(tf.multiply(l_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))
p_reg_diag = tf.matmul(tf.multiply(p_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))
offset_pred = tf.concat((p_reg_diag, l_reg_diag), 1)
loss_reg = tf.reduce_mean(tf.abs(tf.subtract(offset_pred, offset_label)))
loss=tf.add(tf.multiply(self.lambda_regression, loss_reg), loss_align)
return loss, offset_pred, loss_reg
def init_placeholder(self):
visual_featmap_ph_train = tf.placeholder(tf.float32, shape=(self.batch_size, self.visual_feature_dim))
sentence_ph_train = tf.placeholder(tf.float32, shape=(self.batch_size, self.sentence_embedding_size))
offset_ph = tf.placeholder(tf.float32, shape=(self.batch_size,2))
visual_featmap_ph_test = tf.placeholder(tf.float32, shape=(self.test_batch_size, self.visual_feature_dim))
sentence_ph_test = tf.placeholder(tf.float32, shape=(self.test_batch_size, self.sentence_embedding_size))
return visual_featmap_ph_train,sentence_ph_train,offset_ph,visual_featmap_ph_test,sentence_ph_test
def get_variables_by_name(self,name_list):
v_list = tf.trainable_variables()
v_dict = {}
for name in name_list:
v_dict[name] = []
for v in v_list:
for name in name_list:
if name in v.name: v_dict[name].append(v)
for name in name_list:
print("Variables of <"+name+">")
for v in v_dict[name]:
print(" "+v.name)
return v_dict
def training(self, loss):
v_dict = self.get_variables_by_name(["lt"])
vs_optimizer = tf.train.AdamOptimizer(self.vs_lr, name='vs_adam')
vs_train_op = vs_optimizer.minimize(loss, var_list=v_dict["lt"])
return vs_train_op
def construct_model(self):
# initialize the placeholder
self.visual_featmap_ph_train, self.sentence_ph_train, self.offset_ph, self.visual_featmap_ph_test, self.sentence_ph_test=self.init_placeholder()
# build inference network
sim_reg_mat, sim_reg_mat_test = self.visual_semantic_infer(self.visual_featmap_ph_train, self.sentence_ph_train, self.visual_featmap_ph_test, self.sentence_ph_test)
# compute loss
self.loss_align_reg, offset_pred, loss_reg = self.compute_loss_reg(sim_reg_mat, self.offset_ph)
# optimize
self.vs_train_op = self.training(self.loss_align_reg)
return self.loss_align_reg, self.vs_train_op, sim_reg_mat_test, offset_pred, loss_reg
|
{"/ctrl_model_noContext.py": ["/dataset_noContext.py"]}
|
10,742
|
EthanZhu90/TALL_Copy
|
refs/heads/master
|
/ctrl_model_noContext_dtfv_covideo_clip.py
|
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import dtypes
from util.cnn import fc_layer as fc
import vs_multilayer
from dataset_noContext_dtfv_covideo_clip import TestingDataSet
from dataset_noContext_dtfv_covideo_clip import TrainingDataSet
class CTRL_Model(object):
def __init__(self, batch_size, train_csv_path, test_csv_path, test_visual_feature_dir, train_visual_feature_dir):
self.batch_size = batch_size
self.test_batch_size = 1
self.vs_lr = 0.005
self.lambda_regression = 0.01
self.alpha = 1.0/batch_size
self.semantic_size = 2048 # 3072 # 2048 # 2048 # the size of visual and semantic comparison size
self.sentence_embedding_size = 4800
self.visual_feature_dim = 21800 # 43600
self.train_set=TrainingDataSet(train_visual_feature_dir, train_csv_path, self.batch_size)
self.test_set=TestingDataSet(test_visual_feature_dir, test_csv_path, self.test_batch_size)
'''
used in training alignment model, CTRL(aln)
'''
def fill_feed_dict_train(self):
image_batch,sentence_batch,offset_batch = self.train_set.next_batch()
input_feed = {
self.visual_featmap_ph_train: image_batch,
self.sentence_ph_train: sentence_batch,
self.offset_ph: offset_batch
}
return input_feed
'''
used in training alignment+regression model, CTRL(reg)
'''
def fill_feed_dict_train_reg(self):
image_batch_pos, image_batch_neg, sentence_batch, offset_batch = self.train_set.next_batch_iou()
input_feed = {
self.visual_featmap_ph_train_pos: image_batch_pos,
self.visual_featmap_ph_train_neg: image_batch_neg,
self.sentence_ph_train: sentence_batch,
self.offset_ph: offset_batch
}
return input_feed
'''
cross modal processing module
'''
def cross_modal_comb_toremve(self, visual_feat, sentence_embed, batch_size):
vv_feature = tf.reshape(tf.tile(visual_feat, [batch_size, 1]),
[batch_size, batch_size, self.semantic_size])
ss_feature = tf.reshape(tf.tile(sentence_embed,[1, batch_size]),[batch_size, batch_size, self.semantic_size])
concat_feature = tf.reshape(tf.concat([vv_feature, ss_feature], 2),[batch_size, batch_size, self.semantic_size+self.semantic_size])
print(concat_feature.get_shape().as_list())
mul_feature = tf.multiply(vv_feature, ss_feature)
add_feature = tf.add(vv_feature, ss_feature)
comb_feature = tf.reshape(tf.concat([mul_feature, add_feature, concat_feature],2),[1, batch_size, batch_size, self.semantic_size*4])
return comb_feature
def cross_modal_comb(self, visual_feat, sentence_embed, batch_size):
concat_feature = tf.concat([visual_feat, sentence_embed], 1)
# print(concat_feature.get_shape().as_list())
mul_feature = tf.multiply(visual_feat, sentence_embed)
add_feature = tf.add(visual_feat, sentence_embed)
comb_feature = tf.reshape(tf.concat([mul_feature, add_feature, concat_feature], 1),
[1, 1, visual_feat.get_shape().as_list()[0], self.semantic_size*4])
return comb_feature
'''
visual semantic inference, including visual semantic alignment and clip location regression
'''
def visual_semantic_infer(self, visual_feature_train_pos, visual_feature_train_neg, sentence_embed_train, visual_feature_test, sentence_embed_test):
name="CTRL_Model"
with tf.variable_scope(name):
print("Building training network...............................\n")
transformed_clip_train_mix = fc('v2s_lt', tf.concat([visual_feature_train_pos, visual_feature_train_neg], 0), output_dim=self.semantic_size)
transformed_clip_train_norm_mix = tf.nn.l2_normalize(transformed_clip_train_mix, dim=1)
transformed_sentence_train = fc('s2s_lt', sentence_embed_train, output_dim=self.semantic_size)
transformed_sentence_train_norm = tf.nn.l2_normalize(transformed_sentence_train, dim=1)
cross_modal_vec_train_mix = self.cross_modal_comb(transformed_clip_train_norm_mix,
tf.tile(transformed_sentence_train_norm, [2,1]),
self.batch_size)
sim_score_mat_train_mix = vs_multilayer.vs_multilayer(cross_modal_vec_train_mix, "vs_multilayer_lt", middle_layer_dim=1000)
sim_score_mat_train_mix = tf.reshape(sim_score_mat_train_mix, [self.batch_size*2, 3])
tf.get_variable_scope().reuse_variables()
print("Building test network...............................\n")
transformed_clip_test = fc('v2s_lt', visual_feature_test, output_dim=self.semantic_size)
transformed_clip_test_norm = tf.nn.l2_normalize(transformed_clip_test, dim=1)
transformed_sentence_test = fc('s2s_lt', sentence_embed_test, output_dim=self.semantic_size)
transformed_sentence_test_norm = tf.nn.l2_normalize(transformed_sentence_test, dim=1)
cross_modal_vec_test = self.cross_modal_comb(transformed_clip_test_norm, transformed_sentence_test_norm, self.test_batch_size)
sim_score_mat_test = vs_multilayer.vs_multilayer(cross_modal_vec_test, "vs_multilayer_lt", reuse=True, middle_layer_dim=1000)
sim_score_mat_test = tf.reshape(sim_score_mat_test, [3])
return sim_score_mat_train_mix, sim_score_mat_test
'''
compute alignment and regression loss
'''
def compute_loss_reg(self, sim_reg_mat_mix, offset_label):
# sim_reg_mat_pos = sim_reg_mat_mix[:sim_reg_mat_mix.get_shape().as_list()[0]/2]
# sim_reg_mat_neg = sim_reg_mat_mix[sim_reg_mat_mix.get_shape().as_list()[0]/2:]
sim_score_mat, _, _ = tf.split(sim_reg_mat_mix, num_or_size_splits=3, axis=1)
mask_mat = tf.concat((tf.constant(-1.0, shape=[self.batch_size]), tf.constant(1.0, shape=[self.batch_size])), 0)
all1 = tf.constant(1.0, shape=[self.batch_size*2])
loss_mat = tf.log(tf.add(all1, tf.exp(tf.multiply(mask_mat, tf.squeeze(sim_score_mat)))))
loss_align = tf.reduce_mean(loss_mat)
# regression loss
_, p_reg_mat, l_reg_mat = tf.split(sim_reg_mat_mix[:self.batch_size], num_or_size_splits=3, axis=1)
#I = tf.diag(tf.constant(1.0, shape=[self.batch_size]))
#l_reg_diag = tf.matmul(tf.multiply(l_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))
#p_reg_diag = tf.matmul(tf.multiply(p_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))
offset_pred = tf.concat((p_reg_mat, l_reg_mat), 1)
loss_reg = tf.reduce_mean(tf.abs(tf.subtract(offset_pred, offset_label)))
loss_1 = tf.multiply(self.lambda_regression, loss_reg)
loss=tf.add(loss_1, loss_align)
return loss, loss_mat, loss_align, offset_pred, loss_reg
def init_placeholder(self):
visual_featmap_ph_train_pos = tf.placeholder(tf.float32, shape=(self.batch_size, self.visual_feature_dim))
visual_featmap_ph_train_neg = tf.placeholder(tf.float32, shape=(self.batch_size, self.visual_feature_dim))
sentence_ph_train = tf.placeholder(tf.float32, shape=(self.batch_size, self.sentence_embedding_size))
offset_ph = tf.placeholder(tf.float32, shape=(self.batch_size,2))
visual_featmap_ph_test = tf.placeholder(tf.float32, shape=(self.test_batch_size, self.visual_feature_dim))
sentence_ph_test = tf.placeholder(tf.float32, shape=(self.test_batch_size, self.sentence_embedding_size))
return visual_featmap_ph_train_pos, visual_featmap_ph_train_neg, \
sentence_ph_train,offset_ph, visual_featmap_ph_test, sentence_ph_test
def get_variables_by_name(self,name_list):
v_list = tf.trainable_variables()
v_dict = {}
for name in name_list:
v_dict[name] = []
for v in v_list:
for name in name_list:
if name in v.name: v_dict[name].append(v)
for name in name_list:
print("Variables of <"+name+">")
for v in v_dict[name]:
print(" "+v.name)
return v_dict
def training(self, loss):
v_dict = self.get_variables_by_name(["lt"])
vs_optimizer = tf.train.AdamOptimizer(self.vs_lr, name='vs_adam')
vs_train_op = vs_optimizer.minimize(loss, var_list=v_dict["lt"])
return vs_train_op
def construct_model(self):
# initialize the placeholder
self.visual_featmap_ph_train_pos, self.visual_featmap_ph_train_neg, self.sentence_ph_train, self.offset_ph, \
self.visual_featmap_ph_test, self.sentence_ph_test=self.init_placeholder()
# build inference network
sim_reg_mat_mix, sim_reg_mat_test = self.visual_semantic_infer(self.visual_featmap_ph_train_pos,
self.visual_featmap_ph_train_neg,
self.sentence_ph_train,
self.visual_featmap_ph_test, self.sentence_ph_test)
# compute loss
self.loss_align_reg, loss_1, loss_align, offset_pred, loss_reg = self.compute_loss_reg(sim_reg_mat_mix, self.offset_ph)
# optimize
self.vs_train_op = self.training(self.loss_align_reg)
return self.loss_align_reg, loss_1, loss_align, self.vs_train_op, sim_reg_mat_test, offset_pred, loss_reg
|
{"/ctrl_model_noContext.py": ["/dataset_noContext.py"]}
|
10,743
|
EthanZhu90/TALL_Copy
|
refs/heads/master
|
/dataset_noContext.py
|
import numpy as np
from math import sqrt
import os
import random
import pickle
from sklearn.preprocessing import normalize
'''
calculate temporal intersection over union
'''
def calculate_IoU(i0, i1):
union = (min(i0[0], i1[0]), max(i0[1], i1[1]))
inter = (max(i0[0], i1[0]), min(i0[1], i1[1]))
iou = 1.0*(inter[1]-inter[0])/(union[1]-union[0])
return iou
'''
calculate the non Intersection part over Length ratia, make sure the input IoU is larger than 0
'''
def calculate_nIoL(base, sliding_clip):
inter = (max(base[0], sliding_clip[0]), min(base[1], sliding_clip[1]))
inter_l = inter[1]-inter[0]
length = sliding_clip[1]-sliding_clip[0]
nIoL = 1.0*(length-inter_l)/length
return nIoL
class TrainingDataSet(object):
def __init__(self, sliding_dir, it_path, batch_size, word2idx, useLSTM=True):
self.useLSTM = useLSTM
self.counter = 0
self.batch_size = batch_size
self.context_num = 1
self.context_size = 128
print("Reading training data list from "+it_path)
# cs = pickle.load(open(it_path, 'rb'), encoding='bytes')
cs = pickle.load(open(it_path, 'rb'))
movie_length_info = pickle.load(open("./video_allframes_info.pkl", 'rb'))
self.clip_sentence_pairs = []
for l in cs:
clip_name = l[0]
sent_vecs = l[1]
for sent_vec in sent_vecs:
self.clip_sentence_pairs.append((clip_name, sent_vec))
movie_names_set = set()
self.movie_clip_names = {}
# read groundtruth sentence-clip pairs
for k in range(len(self.clip_sentence_pairs)):
clip_name = self.clip_sentence_pairs[k][0]
movie_name = clip_name.split("_")[0]
if not movie_name in movie_names_set:
movie_names_set.add(movie_name)
self.movie_clip_names[movie_name] = []
self.movie_clip_names[movie_name].append(k)
self.movie_names = list(movie_names_set)
self.word2idx = word2idx
self.max_words_q = 15
self.visual_feature_dim = 4096
self.sent_vec_dim = 4800
self.num_samples = len(self.clip_sentence_pairs)
self.sliding_clip_path = sliding_dir
print(str(len(self.clip_sentence_pairs))+" clip-sentence pairs are readed")
if not useLSTM:
# read sliding windows, and match them with the groundtruths to make training samples
sliding_clips_tmp = os.listdir(self.sliding_clip_path)
if os.path.exists('clip_sentence_pairs_iou.pkl'):
print("Loading data from {}".format('clip_sentence_pairs_iou.pkl'))
with open('clip_sentence_pairs_iou.pkl', 'rb') as input:
self.clip_sentence_pairs_iou = pickle.load(input)
self.num_samples_iou = len(self.clip_sentence_pairs_iou)
print(str(len(self.clip_sentence_pairs_iou)) + " iou clip-sentence pairs are readed")
return
self.clip_sentence_pairs_iou = []
for clip_name in sliding_clips_tmp:
if clip_name.split(".")[2]=="npy":
movie_name = clip_name.split("_")[0]
for clip_sentence in self.clip_sentence_pairs:
original_clip_name = clip_sentence[0]
original_movie_name = original_clip_name.split("_")[0]
if original_movie_name==movie_name:
start = int(clip_name.split("_")[1])
end = int(clip_name.split("_")[2].split(".")[0])
o_start = int(original_clip_name.split("_")[1])
o_end = int(original_clip_name.split("_")[2].split(".")[0])
iou = calculate_IoU((start, end), (o_start, o_end))
if iou>0.5:
nIoL=calculate_nIoL((o_start, o_end), (start, end))
if nIoL<0.15:
movie_length = movie_length_info[movie_name.split(".")[0]]
start_offset =o_start-start
end_offset = o_end-end
self.clip_sentence_pairs_iou.append((clip_sentence[0], clip_sentence[1], clip_name, start_offset, end_offset))
self.num_samples_iou = len(self.clip_sentence_pairs_iou)
print(str(len(self.clip_sentence_pairs_iou))+" iou clip-sentence pairs are readed")
with open('clip_sentence_pairs_iou.pkl', 'wb') as output:
print("Saving clip_sentence_pairs_iou")
pickle.dump(self.clip_sentence_pairs_iou, output)
else:
# read sliding windows, and match them with the groundtruths to make training samples
sliding_clips_tmp = os.listdir(self.sliding_clip_path)
if os.path.exists('clip_sentence_pairs_iou_LSTM.pkl'):
print("Loading data from {}".format('clip_sentence_pairs_iou_LSTM.pkl'))
with open('clip_sentence_pairs_iou_LSTM.pkl', 'rb') as input:
self.clip_sentence_pairs_iou = pickle.load(input)
self.num_samples_iou = len(self.clip_sentence_pairs_iou)
print(str(len(self.clip_sentence_pairs_iou)) + " iou clip-sentence pairs are readed")
return
print('Preparing clip_sentence_pairs_iou_LSTM.pkl')
self.clip_sentence_pairs_iou = []
for idx, clip_name in enumerate(sliding_clips_tmp):
if idx%1000 == 0 and idx:
print("processing [{}/{}]".format(idx, len(sliding_clips_tmp)))
if clip_name.split(".")[2] == "npy":
movie_name = clip_name.split("_")[0]
for clip_sentence in self.clip_sentence_pairs:
original_clip_name = clip_sentence[0]
original_movie_name = original_clip_name.split("_")[0]
if original_movie_name == movie_name:
start = int(clip_name.split("_")[1])
end = int(clip_name.split("_")[2].split(".")[0])
o_start = int(original_clip_name.split("_")[1])
o_end = int(original_clip_name.split("_")[2].split(".")[0])
iou = calculate_IoU((start, end), (o_start, o_end))
if iou > 0.5:
nIoL = calculate_nIoL((o_start, o_end), (start, end))
if nIoL < 0.15:
movie_length = movie_length_info[movie_name.split(".")[0]]
start_offset = o_start - start
end_offset = o_end - end
self.clip_sentence_pairs_iou.append(
(clip_sentence[0], clip_sentence[1], clip_name, start_offset, end_offset))
self.num_samples_iou = len(self.clip_sentence_pairs_iou)
print(str(len(self.clip_sentence_pairs_iou)) + " iou clip-sentence pairs are readed")
with open('clip_sentence_pairs_iou_LSTM.pkl', 'wb') as output:
print("Saving clip_sentence_pairs_iou")
pickle.dump(self.clip_sentence_pairs_iou, output)
# exit()
a = 1
'''
compute left (pre) and right (post) context features
'''
def get_context_window(self, clip_name, win_length):
movie_name = clip_name.split("_")[0]
start = int(clip_name.split("_")[1])
end = int(clip_name.split("_")[2].split(".")[0])
clip_length = self.context_size
left_context_feats = np.zeros([win_length, 4096], dtype=np.float32)
right_context_feats = np.zeros([win_length, 4096], dtype=np.float32)
last_left_feat = np.load(self.sliding_clip_path+clip_name)
last_right_feat = np.load(self.sliding_clip_path+clip_name)
for k in range(win_length):
left_context_start = start-clip_length*(k+1)
left_context_end = start-clip_length*k
right_context_start = end+clip_length*k
right_context_end = end+clip_length*(k+1)
left_context_name = movie_name+"_"+str(left_context_start)+"_"+str(left_context_end)+".npy"
right_context_name = movie_name+"_"+str(right_context_start)+"_"+str(right_context_end)+".npy"
if os.path.exists(self.sliding_clip_path+left_context_name):
left_context_feat = np.load(self.sliding_clip_path+left_context_name)
last_left_feat = left_context_feat
else:
left_context_feat = last_left_feat
if os.path.exists(self.sliding_clip_path+right_context_name):
right_context_feat = np.load(self.sliding_clip_path+right_context_name)
last_right_feat = right_context_feat
else:
right_context_feat = last_right_feat
left_context_feats[k] = left_context_feat
right_context_feats[k] = right_context_feat
return np.mean(left_context_feats, axis=0), np.mean(right_context_feats, axis=0)
'''
read next batch of training data, this function is used for training CTRL-aln
'''
def next_batch(self):
random_batch_index = random.sample(range(self.num_samples), self.batch_size)
image_batch = np.zeros([self.batch_size, self.visual_feature_dim])
sentence_batch = np.zeros([self.batch_size, self.sent_vec_dim])
offset_batch = np.zeros([self.batch_size, 2], dtype=np.float32) # this one is actually useless
index = 0
clip_set=set()
while index < self.batch_size:
k = random_batch_index[index]
clip_name = self.clip_sentence_pairs[k][0]
if not clip_name in clip_set:
clip_set.add(clip_name)
feat_path = self.image_dir+self.clip_sentence_pairs[k][0]+".npy"
featmap = np.load(feat_path)
image_batch[index,:] = featmap
sentence_batch[index,:] = self.clip_sentence_pairs[k][1][:self.sent_vec_dim]
index+=1
else:
r = random.choice(range(self.num_samples))
random_batch_index[index] = r
continue
return image_batch, sentence_batch, offset_batch
'''
read next batch of training data, this function is used for training CTRL-reg
'''
def next_batch_iou(self):
random_batch_index = random.sample(range(self.num_samples_iou), self.batch_size)
image_batch = np.zeros([self.batch_size, self.visual_feature_dim])
if self.useLSTM:
# input is word index
sentence_len_batch= np.zeros(self.batch_size, dtype=np.int32)
sentence_batch = np.zeros([self.batch_size, self.max_words_q],dtype=np.int32)
offset_batch = np.zeros([self.batch_size, 2], dtype=np.float32)
index = 0
clip_set = set()
while index < self.batch_size:
k = random_batch_index[index]
clip_name = self.clip_sentence_pairs_iou[k][0]
if not clip_name in clip_set:
clip_set.add(clip_name)
feat_path = self.sliding_clip_path + self.clip_sentence_pairs_iou[k][2]
featmap = np.load(feat_path)
# read context features
# left_context_feat, right_context_feat = self.get_context_window(self.clip_sentence_pairs_iou[k][2], self.context_num)
image_batch[index, :] = featmap # .hstack((left_context_feat, featmap, right_context_feat))
sent_idx_vector = [self.word2idx[_i] for _i in self.clip_sentence_pairs_iou[k][1].split()[:self.max_words_q]]
sentence_len_batch[index] = len(sent_idx_vector)
# padding with 0 to max length(15)
sent_idx_vector += [0]*(self.max_words_q - len(sent_idx_vector))
sentence_batch[index, :] = np.asarray(sent_idx_vector)
p_offset = self.clip_sentence_pairs_iou[k][3]
l_offset = self.clip_sentence_pairs_iou[k][4]
offset_batch[index, 0] = p_offset
offset_batch[index, 1] = l_offset
index += 1
else:
r = random.choice(range(self.num_samples_iou))
random_batch_index[index] = r
continue
return image_batch, sentence_batch, offset_batch, sentence_len_batch
else:
# input is the sentence vector from skip-thought
sentence_batch = np.zeros([self.batch_size, self.sent_vec_dim])
offset_batch = np.zeros([self.batch_size, 2], dtype=np.float32)
index = 0
clip_set = set()
while index < self.batch_size:
k = random_batch_index[index]
clip_name = self.clip_sentence_pairs_iou[k][0]
if not clip_name in clip_set:
clip_set.add(clip_name)
feat_path = self.sliding_clip_path+self.clip_sentence_pairs_iou[k][2]
featmap = np.load(feat_path)
# read context features
# left_context_feat, right_context_feat = self.get_context_window(self.clip_sentence_pairs_iou[k][2], self.context_num)
image_batch[index,:] = featmap # .hstack((left_context_feat, featmap, right_context_feat))
sentence_batch[index,:] = self.clip_sentence_pairs_iou[k][1][:self.sent_vec_dim]
p_offset = self.clip_sentence_pairs_iou[k][3]
l_offset = self.clip_sentence_pairs_iou[k][4]
offset_batch[index,0] = p_offset
offset_batch[index,1] = l_offset
index+=1
else:
r = random.choice(range(self.num_samples_iou))
random_batch_index[index] = r
continue
return image_batch, sentence_batch, offset_batch, -1
class TestingDataSet(object):
def __init__(self, img_dir, csv_path, batch_size, word2idx, useLSTM=True):
#il_path: image_label_file path
#self.index_in_epoch = 0
#self.epochs_completed = 0
self.batch_size = batch_size
self.image_dir = img_dir
print("Reading testing data list from "+csv_path)
self.semantic_size = 4800
csv = pickle.load(open(csv_path))
self.clip_sentence_pairs = []
for l in csv:
clip_name = l[0]
sent_vecs = l[1]
for sent_vec in sent_vecs:
self.clip_sentence_pairs.append((clip_name, sent_vec))
print(str(len(self.clip_sentence_pairs))+" pairs are readed")
movie_names_set = set()
self.movie_clip_names = {}
for k in range(len(self.clip_sentence_pairs)):
clip_name = self.clip_sentence_pairs[k][0]
movie_name = clip_name.split("_")[0]
if not movie_name in movie_names_set:
movie_names_set.add(movie_name)
self.movie_clip_names[movie_name] = []
self.movie_clip_names[movie_name].append(k)
self.movie_names = list(movie_names_set)
self.clip_num_per_movie_max = 0
for movie_name in self.movie_clip_names:
if len(self.movie_clip_names[movie_name])>self.clip_num_per_movie_max: self.clip_num_per_movie_max = len(self.movie_clip_names[movie_name])
print("Max number of clips in a movie is "+str(self.clip_num_per_movie_max))
self.sliding_clip_path = img_dir
sliding_clips_tmp = os.listdir(self.sliding_clip_path)
self.sliding_clip_names = []
for clip_name in sliding_clips_tmp:
if clip_name.split(".")[2]=="npy":
movie_name = clip_name.split("_")[0]
if movie_name in self.movie_clip_names:
self.sliding_clip_names.append(clip_name.split(".")[0]+"."+clip_name.split(".")[1])
self.num_samples = len(self.clip_sentence_pairs)
print("sliding clips number: "+str(len(self.sliding_clip_names)))
assert self.batch_size <= self.num_samples
def get_clip_sample(self, sample_num, movie_name, clip_name):
length=len(os.listdir(self.image_dir+movie_name+"/"+clip_name))
sample_step=1.0*length/sample_num
sample_pos=np.floor(sample_step*np.array(range(sample_num)))
sample_pos_str=[]
img_names=os.listdir(self.image_dir+movie_name+"/"+clip_name)
# sort is very important! to get a correct sequence order
img_names.sort()
# print img_names
for pos in sample_pos:
sample_pos_str.append(self.image_dir+movie_name+"/"+clip_name+"/"+img_names[int(pos)])
return sample_pos_str
def get_context_window(self, clip_name, win_length):
movie_name = clip_name.split("_")[0]
start = int(clip_name.split("_")[1])
end = int(clip_name.split("_")[2].split(".")[0])
clip_length = 128#end-start
left_context_feats = np.zeros([win_length,4096], dtype=np.float32)
right_context_feats = np.zeros([win_length,4096], dtype=np.float32)
last_left_feat = np.load(self.sliding_clip_path+clip_name)
last_right_feat = np.load(self.sliding_clip_path+clip_name)
for k in range(win_length):
left_context_start = start-clip_length*(k+1)
left_context_end = start-clip_length*k
right_context_start = end+clip_length*k
right_context_end = end+clip_length*(k+1)
left_context_name = movie_name+"_"+str(left_context_start)+"_"+str(left_context_end)+".npy"
right_context_name = movie_name+"_"+str(right_context_start)+"_"+str(right_context_end)+".npy"
if os.path.exists(self.sliding_clip_path+left_context_name):
left_context_feat = np.load(self.sliding_clip_path+left_context_name)
last_left_feat = left_context_feat
else:
left_context_feat = last_left_feat
if os.path.exists(self.sliding_clip_path+right_context_name):
right_context_feat = np.load(self.sliding_clip_path+right_context_name)
last_right_feat = right_context_feat
else:
right_context_feat = last_right_feat
left_context_feats[k] = left_context_feat
right_context_feats[k] = right_context_feat
return np.mean(left_context_feats, axis=0), np.mean(right_context_feats, axis=0)
def load_movie(self, movie_name):
movie_clip_sentences=[]
for k in range(len(self.clip_names)):
if movie_name in self.clip_names[k]:
movie_clip_sentences.append((self.clip_names[k], self.sent_vecs[k][:2400], self.sentences[k]))
movie_clip_imgs=[]
for k in range(len(self.movie_frames[movie_name])):
# print str(k)+"/"+str(len(self.movie_frames[movie_name]))
if os.path.isfile(self.movie_frames[movie_name][k][1]) and os.path.getsize(self.movie_frames[movie_name][k][1])!=0:
img=load_image(self.movie_frames[movie_name][k][1])
movie_clip_imgs.append((self.movie_frames[movie_name][k][0],img))
return movie_clip_imgs, movie_clip_sentences
def load_movie_byclip(self,movie_name,sample_num):
movie_clip_sentences=[]
movie_clip_featmap=[]
clip_set=set()
for k in range(len(self.clip_sentence_pairs)):
if movie_name in self.clip_sentence_pairs[k][0]:
movie_clip_sentences.append((self.clip_sentence_pairs[k][0],self.clip_sentence_pairs[k][1][:self.semantic_size]))
if not self.clip_sentence_pairs[k][0] in clip_set:
clip_set.add(self.clip_sentence_pairs[k][0])
# print str(k)+"/"+str(len(self.movie_clip_names[movie_name]))
visual_feature_path=self.image_dir+self.clip_sentence_pairs[k][0]+".npy"
feature_data=np.load(visual_feature_path)
movie_clip_featmap.append((self.clip_sentence_pairs[k][0],feature_data))
return movie_clip_featmap, movie_clip_sentences
def load_movie_slidingclip(self, movie_name, sample_num):
movie_clip_sentences = []
movie_clip_featmap = []
clip_set = set()
for k in range(len(self.clip_sentence_pairs)):
if movie_name in self.clip_sentence_pairs[k][0]:
movie_clip_sentences.append((self.clip_sentence_pairs[k][0], self.clip_sentence_pairs[k][1][:self.semantic_size]))
for k in range(len(self.sliding_clip_names)):
if movie_name in self.sliding_clip_names[k]:
# print str(k)+"/"+str(len(self.movie_clip_names[movie_name]))
visual_feature_path = self.sliding_clip_path+self.sliding_clip_names[k]+".npy"
#context_feat=self.get_context(self.sliding_clip_names[k]+".npy")
# left_context_feat,right_context_feat = self.get_context_window(self.sliding_clip_names[k]+".npy",1)
feature_data = np.load(visual_feature_path)
#comb_feat=np.hstack((context_feat,feature_data))
comb_feat = feature_data # np.hstack((left_context_feat,feature_data,right_context_feat))
movie_clip_featmap.append((self.sliding_clip_names[k], comb_feat))
return movie_clip_featmap, movie_clip_sentences
|
{"/ctrl_model_noContext.py": ["/dataset_noContext.py"]}
|
10,748
|
craigcurtin/mlb
|
refs/heads/master
|
/Game.py
|
import datetime
import pytz
import os
class Game(object):
"""Class Game - methods to extract various data pieces of the Game dictionary"""
def __init__(self, game_dict):
self.game_dict = game_dict
def game_id(self):
return self.game_dict['game_id']
def game_time(self):
return self.game_dict['game_datetime']
def home_name(self):
return self.game_dict['home_name']
def away_name(self):
return self.game_dict['away_name']
def home_probable_pitcher(self):
return self.game_dict['home_probable_pitcher'] or 'TBD'
def away_probable_pitcher(self):
return self.game_dict['away_probable_pitcher'] or 'TBD'
def venue_name(self):
return self.game_dict['venue_name']
def summary_info(self):
# datetime info is stored as UTC, extract the 'trailing Z' from the string
utc_datetime = datetime.datetime.fromisoformat(self.game_time()[:-1])
utc_datetime = utc_datetime.replace(tzinfo=pytz.utc)
local_timezone = pytz.timezone("US/Eastern")
local_datetime = utc_datetime.astimezone(local_timezone)
return '{} {} at {}, SP: {} vs {}{}'.format(local_datetime.strftime("%H:%MET"),
self.away_name(),
self.home_name(),
self.away_probable_pitcher(),
self.home_probable_pitcher(),
os.linesep,
)
|
{"/todays_game.py": ["/Game.py", "/util_logger.py", "/yagmail.py"], "/roster_info.py": ["/Team.py", "/Roster.py"], "/player_info.py": ["/util_logger.py", "/Team.py"]}
|
10,749
|
craigcurtin/mlb
|
refs/heads/master
|
/todays_game.py
|
import statsapi
import datetime
from datetime import datetime as dt
from Game import Game
import logging
import sys
import os
from cc_mail import cc_mail
from util_logger import setup_logger
from yagmail import send_yag
def todays_game():
today = datetime.date.today()
tommorrow = today + datetime.timedelta(days=1)
# sched = statsapi.schedule(start_date='07/01/2018', end_date='07/31/2018', team=143, opponent=121)
statsapi.lookup_team(147)
sched_games = statsapi.schedule(start_date=today, end_date=tommorrow)
today_games = {}
tomorrow_games = {}
for game in sched_games:
if today == dt.strptime(game['game_date'], '%Y-%m-%d').date():
today_games[game['game_id']] = Game(game)
else:
tomorrow_games[game['game_id']] = Game(game)
body = ""
# now, print out Today's followed by Tommorrow's games
body += "Today's Games: {}{}".format(today.isoformat(), os.linesep)
for game in today_games:
body += '{}'.format(today_games[game].summary_info())
body += "Tommorrow's Games: {}{}".format(tommorrow.isoformat(), os.linesep)
for game in tomorrow_games:
body += '{}'.format(tomorrow_games[game].summary_info())
return body
if __name__ == '__main__':
setup_logger('todays_game', 'c:/Temp', logging.DEBUG)
email_body = todays_game()
cc_mail('curtin@computer.org', '{} MLB games'.format(datetime.date.today()), email_body)
sys.exit(0)
|
{"/todays_game.py": ["/Game.py", "/util_logger.py", "/yagmail.py"], "/roster_info.py": ["/Team.py", "/Roster.py"], "/player_info.py": ["/util_logger.py", "/Team.py"]}
|
10,750
|
craigcurtin/mlb
|
refs/heads/master
|
/standings.py
|
import statsapi
def standings():
standings = statsapi.standings(leagueId="103,104",
division="all",
include_wildcard=True,
season=None,
standingsTypes=None,
date=None, )
return standings
def standings_data():
standings_data = statsapi.standings_data(
leagueId="103,104",
division="all",
include_wildcard=True,
season=None,
standingsTypes=None,
date=None,)
return standings_data
if __name__ == '__main__':
standings = standings()
print (standings)
standings_data = standings_data()
print (standings_data)
|
{"/todays_game.py": ["/Game.py", "/util_logger.py", "/yagmail.py"], "/roster_info.py": ["/Team.py", "/Roster.py"], "/player_info.py": ["/util_logger.py", "/Team.py"]}
|
10,751
|
craigcurtin/mlb
|
refs/heads/master
|
/Roster.py
|
import re
from collections import namedtuple
from collections import defaultdict
Position = namedtuple('Position', 'pos name')
valid_positions = ['P', 'C', '1B', '2B', '3B', 'SS', 'LF', 'CF', 'RF']
class Roster(object):
def __init__(self, teamId, roster_list):
self.teamId = teamId
self.uniform_number_dict = {}
self.position_dict = defaultdict(list)
self.roster_list = roster_list
for player in roster_list:
if len(player) == 0:
continue
uniform_number, position, name = re.split(r"\s{2,}", player)
self.uniform_number_dict[uniform_number[1:]] = Position(position, name)
self.position_dict[position].append(uniform_number[1:])
def position(self, position):
if position in valid_positions:
return self.position_dict[position]
else:
return []
def uniform_number(self, uniform_number):
return self.uniform_number_dict[uniform_number]
def roster(self):
return self.roster
|
{"/todays_game.py": ["/Game.py", "/util_logger.py", "/yagmail.py"], "/roster_info.py": ["/Team.py", "/Roster.py"], "/player_info.py": ["/util_logger.py", "/Team.py"]}
|
10,752
|
craigcurtin/mlb
|
refs/heads/master
|
/roster_info.py
|
import statsapi
from Team import Team
from datetime import datetime
from Roster import Roster
from Roster import valid_positions
def team_roster(teamId):
team_roster = statsapi.roster(teamId, rosterType=None, season=datetime.now().year, date=None)
return team_roster
if __name__ == '__main__':
team_dict = {}
teams = statsapi.get('teams', {'sportIds': 1, 'activeStatus': 'Yes'})
for team in teams['teams']:
team_dict[team['id']] = Team(team)
league_roster_dict = {}
for teamId in team_dict.keys():
roster = team_roster(teamId)
team_roster_list = roster.split('\n')
league_roster_dict[teamId] = Roster(teamId, team_roster_list)
print (team_dict[teamId].summary_info())
for position in valid_positions:
for uniform_number in league_roster_dict[teamId].position(position):
print ('{}, #{}, {}'.format(position,
uniform_number,
league_roster_dict[teamId].uniform_number(uniform_number)))
|
{"/todays_game.py": ["/Game.py", "/util_logger.py", "/yagmail.py"], "/roster_info.py": ["/Team.py", "/Roster.py"], "/player_info.py": ["/util_logger.py", "/Team.py"]}
|
10,753
|
craigcurtin/mlb
|
refs/heads/master
|
/linescore_info.py
|
import statsapi
from teams_info import teams_info
if __name__ == '__main__':
teams_dict = teams_info()
for teamId in teams_dict.keys():
most_recent_game_id = statsapi.last_game(teamId)
print(statsapi.boxscore(most_recent_game_id))
print(statsapi.linescore(most_recent_game_id))
statsapi.linescore(gamePk, timecode=None)
params = {
"gamePk": gamePk,
"fields": "gameData,teams,teamName,shortName,status,abstractGameState,liveData,linescore,innings,num,home,away,runs,hits,errors",
}
|
{"/todays_game.py": ["/Game.py", "/util_logger.py", "/yagmail.py"], "/roster_info.py": ["/Team.py", "/Roster.py"], "/player_info.py": ["/util_logger.py", "/Team.py"]}
|
10,754
|
craigcurtin/mlb
|
refs/heads/master
|
/Team.py
|
import os
# noinspection PyPep8Naming
class Team(object):
def __init__(self, team_dict):
self.team_dict = team_dict
def id(self):
return self.team_dict['id']
def name(self):
return self.team_dict['name']
def link(self):
return self.team_dict['link']
def season(self):
return self.team_dict['season']
def league_id(self):
return self.team_dict['league']['id']
def league_name(self):
return self.team_dict['league']['name']
def league_link(self):
return self.team_dict['league']['link']
def division_id(self):
return self.team_dict['division']['id']
def division_name(self):
return self.team_dict['division']['name']
def division_link(self):
return self.team_dict['division']['link']
def venue_id(self):
return self.team_dict['venue']['id']
def venue_name(self):
return self.team_dict['venue']['name']
def venue_link(self):
return self.team_dict['venue']['link']
def springVenue_id(self):
return self.team_dict['springVenue']['id']
# noinspection PyPep8Naming
def springVenue_link(self):
return self.team_dict['springVenue']['link']
def teamCode(self):
return self.team_dict['teamCode']
def fileCode(self):
return self.team_dict['fileCode']
def abbreviation(self):
return self.team_dict['abbreviation']
def teamName(self):
return self.team_dict['teamName']
def locationName(self):
return self.team_dict['locationName']
def firstYearOfPlay(self):
return self.team_dict['firstYearOfPlay']
def sport_id(self):
return self.team_dict['sport']['id']
def sport_name(self):
return self.team_dict['sport']['name']
def sport_link(self):
return self.team_dict['sport']['link']
def shortName(self):
return self.team_dict['shortName']
def springLeague_id(self):
return self.team_dict['springLeague']['id']
def springLeague_name(self):
return self.team_dict['springLeague']['name']
def springLeague_link(self):
return self.team_dict['springLeague']['link']
def springLeague_abbreviation(self):
return self.team_dict['springLeague']['abbreviation']
def allStarStatus(self):
return self.team_dict['allStarStatus']
def active(self):
return self.team_dict['active']
def summary_info(self):
return '{}, play in {}, in {} at {} since: {}'.format(self.name(),
# self.shortName(),
# self.league_name(),
self.division_name(),
self.locationName(),
self.venue_name(),
self.firstYearOfPlay(),
os.linesep,
)
|
{"/todays_game.py": ["/Game.py", "/util_logger.py", "/yagmail.py"], "/roster_info.py": ["/Team.py", "/Roster.py"], "/player_info.py": ["/util_logger.py", "/Team.py"]}
|
10,755
|
craigcurtin/mlb
|
refs/heads/master
|
/player_info.py
|
import statsapi
from util_logger import setup_logger
import logging
from Team import Team
import sys
def team_info():
# teams = statsapi.get('teams',{'sportIds':1,'activeStatus':'Yes','fields':'teams,name,id,division,league'})
team_dict = {}
teams = statsapi.get('teams', {'sportIds': 1, 'activeStatus': 'Yes'})
for team in teams['teams']:
team_dict[team['id']] = Team(team)
return team_dict
if __name__ == '__main__':
setup_logger('teams', 'c:/Temp', logging.DEBUG)
import statsapi
from util_logger import setup_logger
import logging
from Team import Team
import sys
def team_info():
# teams = statsapi.get('teams',{'sportIds':1,'activeStatus':'Yes','fields':'teams,name,id,division,league'})
team_dict = {}
teams = statsapi.get('teams', {'sportIds': 1, 'activeStatus': 'Yes'})
for team in teams['teams']:
team_dict[team['id']] = Team(team)
return team_dict
if __name__ == '__main__':
setup_logger('teams', 'c:/Temp', logging.DEBUG)
team_dict = team_info()
buffer = []
for team_id in team_dict.keys():
buffer.append(team_dict[team_id].summary_info())
buffer.sort()
for buf in buffer:
print(buf)
logging.info("normal termination")
sys.exit(0)
import statsapi
from util_logger import setup_logger
import logging
from Team import Team
import sys
def team_info():
# teams = statsapi.get('teams',{'sportIds':1,'activeStatus':'Yes','fields':'teams,name,id,division,league'})
team_dict = {}
teams = statsapi.get('teams', {'sportIds': 1, 'activeStatus': 'Yes'})
for team in teams['teams']:
team_dict[team['id']] = Team(team)
return team_dict
if __name__ == '__main__':
setup_logger('teams', 'c:/Temp', logging.DEBUG)
statsapi.lookup_player()
|
{"/todays_game.py": ["/Game.py", "/util_logger.py", "/yagmail.py"], "/roster_info.py": ["/Team.py", "/Roster.py"], "/player_info.py": ["/util_logger.py", "/Team.py"]}
|
10,756
|
craigcurtin/mlb
|
refs/heads/master
|
/util_logger.py
|
import sys
import logging
from pytz import timezone
from datetime import datetime
from pathlib import Path
# *force* UTC based time in log messages
tz = timezone('UTC')
# logging formatter, specify UTC as TZ to hardcode
def time_tz(*args):
return datetime.now(tz).timetuple()
# TODO - CSC working this function to be JSON aware/enabled ...
def setup_logger(app_name, log_directory, log_level):
"""configure logger with UTC timestamp, bunch of default values"""
# Setting up logger
# log_levels: NOTSET=0, DEBUG=10, INFO=20, WARN=30, ERROR=40, and CRITICAL=50
# TODO - on linux we want /var/log ... error on MacOs ... protected directory
# log_file_name = Path('/var/log/{}.log'.format(app_name))
log_file_name = Path('{}/{}.log'.format(log_directory, app_name))
short_file_format = "%(asctime)s:%(levelname)s:%(message)s"
long_file_format = "%(asctime)s %(HOST)s %(AppId)d %(AppVersion)s %(levelname)s %(name)s %(message)s %(filename)s %(funcName)s %(levelname)s %(lineno)d %(message)s %(module)s %(msecs)d %(name)s %(pathname)s %(process)d %(processName)s %(relativeCreated)d %(thread)d %(threadName)s %(uid)"
long_file_format = "%(asctime)s %(levelname)s %(name)s %(message)s %(filename)s %(funcName)s %(levelname)s %(lineno)d %(message)s %(module)s %(msecs)d %(name)s %(pathname)s %(process)d %(processName)s %(relativeCreated)d %(thread)d %(threadName)s"
# long_file_format = "%(asctime)s:%(levelname)s%(name)s %(message)s %(filename)s %(funcName)s %(levelname)s %(lineno)d %(message)s %(module)s %(msecs)d %(name)s %(pathname)s %(process)d %(processName)s %(relativeCreated)d %(thread)d %(threadName)s"
log_file_format = short_file_format
# make sure valid log level is passed in, default to DEBUG ...
valid_log_levels = [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, logging.CRITICAL]
if log_level not in valid_log_levels:
log_level = logging.DEBUG
extra_attributes = {'Host': '10.0.0.1',
'AppId': 1024,
'AppVersion': '1.0.0',
'uid': 12345}
logger = logging.getLogger()
logging.LoggerAdapter(logger, extra_attributes)
# add in our custom UTC timezone converter
logging.Formatter.converter = time_tz
logging.basicConfig(level=log_level, filename=log_file_name, filemode="a",
format=log_file_format)
# configure stdout same as file
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(logging.Formatter(log_file_format))
logging.getLogger().addHandler(sh)
logging.info('App:{} startup'.format(app_name))
return
|
{"/todays_game.py": ["/Game.py", "/util_logger.py", "/yagmail.py"], "/roster_info.py": ["/Team.py", "/Roster.py"], "/player_info.py": ["/util_logger.py", "/Team.py"]}
|
10,757
|
craigcurtin/mlb
|
refs/heads/master
|
/yagmail.py
|
import yagmail
def send_yag(to_email, subject, contents):
yag = yagmail.SMTP()
yag.send(to_email, subject, contents)
|
{"/todays_game.py": ["/Game.py", "/util_logger.py", "/yagmail.py"], "/roster_info.py": ["/Team.py", "/Roster.py"], "/player_info.py": ["/util_logger.py", "/Team.py"]}
|
10,759
|
SiddhanthHegde/You-Need-to-Pay-More-Attention
|
refs/heads/main
|
/splitAndMake.py
|
#This file creates a balanced split between the classes and makes it model feedable form
import os
import pandas as pd
from zipfile import ZipFile
from utils import move_data, split_data
extract_path = 'training_img.zip'
with ZipFile(extract_path, 'r') as zipObj:
zipObj.extractall()
os.mkdir('Troll')
os.mkdir('Non_troll')
src = 'uploaded_tamil_memes'
move_data(src,'Troll','Non_troll')
os.mkdir('Train')
os.mkdir('Val')
split_data('Troll','Train','Val',128)
split_data('Non_troll','Train','Val',101)
df = pd.read_csv('train_captions.csv')
df.drop('Unnamed: 0',axis=1,inplace=True)
train_df_data = []
val_df_data = []
for img_name in os.listdir('Train'):
ind = list(df[df['imagename'] == img_name].index)[0]
train_df_data.append([img_name,df['captions'].iloc[ind]])
for img_name in os.listdir('Val'):
ind = list(df[df['imagename'] == img_name].index)[0]
val_df_data.append([img_name,df['captions'].iloc[ind]])
train_df = pd.DataFrame(train_df_data,columns=['img_name','captions'])
val_df = pd.DataFrame(val_df_data,columns=['img_name','captions'])
train_df.to_csv('train_df.csv',index=False)
val_df.to_csv('val_df.csv',index=False)
|
{"/splitAndMake.py": ["/utils.py"], "/train.py": ["/model.py", "/dataset.py", "/utils.py"], "/test.py": ["/dataset.py", "/model.py", "/utils.py"]}
|
10,760
|
SiddhanthHegde/You-Need-to-Pay-More-Attention
|
refs/heads/main
|
/train.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("ggplot")
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import timm
from zipfile import ZipFile
import os
import time
from shutil import copy2
from torch.utils.data import DataLoader
from transformers import AdamW,get_linear_schedule_with_warmup,AutoModel,AutoTokenizer
from PIL import Image
from collections import defaultdict
from model import multimodal
from dataset import create_data_loader
from utils import train_epoch, eval_model, epoch_time
device = 'cuda' if torch.cuda.is_available() else 'cpu'
PRE_TRAINED_MODEL_NAME = 'bert-base-multilingual-cased'
tokenizer = AutoTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)
my_trans = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
BATCH_SIZE = 16
MAX_LEN = 128
EPOCHS = 4
history = defaultdict(list)
best_accuracy = 0
LOAD_MODEL = False
train_data_loader = create_data_loader(train_df,tokenizer,MAX_LEN,BATCH_SIZE,my_trans,'Train',True)
val_data_loader = create_data_loader(val_df,tokenizer,MAX_LEN,BATCH_SIZE,my_trans,'Val',False)
model = multimodal()
model = model.to(device)
optimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False)
total_steps = len(train_data_loader) * EPOCHS
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
loss = nn.BCEWithLogitsLoss().to(device)
for epoch in range(EPOCHS):
start_time = time.time()
train_acc,train_loss = train_epoch(
model,
train_data_loader,
loss,
optimizer,
device,
scheduler,
2071
)
val_acc,val_loss = eval_model(
model,
val_data_loader,
loss,
device,
229
)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'Train Loss {train_loss} accuracy {train_acc}')
print(f'Val Loss {val_loss} accuracy {val_acc}')
print()
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
if history['val_acc'][-1] > 0.95:
torch.save('vit-bert-1.0val.bin')
if LOAD_MODEL:
model.load_state_dict(torch.load('vit-bert-1.0val.bin'))
|
{"/splitAndMake.py": ["/utils.py"], "/train.py": ["/model.py", "/dataset.py", "/utils.py"], "/test.py": ["/dataset.py", "/model.py", "/utils.py"]}
|
10,761
|
SiddhanthHegde/You-Need-to-Pay-More-Attention
|
refs/heads/main
|
/model.py
|
import torch
import torch.nn as nn
import timm
from transformers import AutoModel
class multimodal(nn.Module):
def __init__(self):
super(multimodal, self).__init__()
self.vit = timm.create_model("vit_base_patch16_224", pretrained=True)
self.bert = AutoModel.from_pretrained('bert-base-multilingual-cased')
self.vit.head = nn.Linear(self.vit.head.in_features, 128)
self.fc1 = nn.Linear(self.bert.config.hidden_size,128)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(256,1)
self.drop = nn.Dropout(p=0.2)
def forward(self,input_ids, attention_mask, img):
_, pooled_output = self.bert(
input_ids = input_ids,
attention_mask = attention_mask
)
text_out = self.fc1(pooled_output)
img_out = self.vit(img)
merged = torch.cat((text_out,img_out),1)
act = self.relu(merged)
out = self.drop(act)
return self.fc2(out)
|
{"/splitAndMake.py": ["/utils.py"], "/train.py": ["/model.py", "/dataset.py", "/utils.py"], "/test.py": ["/dataset.py", "/model.py", "/utils.py"]}
|
10,762
|
SiddhanthHegde/You-Need-to-Pay-More-Attention
|
refs/heads/main
|
/dataset.py
|
import torch
from PIL import Image
from torch.utils.data import DataLoader
class TamilDataset(torch.utils.data.Dataset):
def __init__(self,df,tokenizer,max_len,path,transforms=None):
self.data_dir = path
self.df = df
self.tokenizer = tokenizer
self.transforms = transforms
self.max_len = max_len
def __len__(self):
return self.df.shape[0]
def __getitem__(self,index):
img_name, captions = self.df.iloc[index]
img_path = os.path.join(self.data_dir,img_name)
labels = 0 if img_name.startswith('N') else 1
img = Image.open(img_path).convert('RGB')
if self.transforms is not None:
img = self.transforms(img)
encoding = self.tokenizer.encode_plus(
captions,
add_special_tokens=True,
max_length = self.max_len,
return_token_type_ids = False,
padding = 'max_length',
return_attention_mask= True,
return_tensors='pt',
truncation=True
)
return {
'image' : img,
'text' : captions,
'input_ids' : encoding['input_ids'].flatten(),
'attention_mask' : encoding['attention_mask'].flatten(),
'label' : torch.tensor(labels,dtype=torch.float)
}
def create_data_loader(df,tokenizer,max_len,batch_size,mytransforms,path,shuffle):
ds = TamilDataset(
df,
tokenizer,
max_len,
path,
mytransforms
)
return DataLoader(ds,
batch_size = batch_size,
shuffle=False,
num_workers=4)
|
{"/splitAndMake.py": ["/utils.py"], "/train.py": ["/model.py", "/dataset.py", "/utils.py"], "/test.py": ["/dataset.py", "/model.py", "/utils.py"]}
|
10,763
|
SiddhanthHegde/You-Need-to-Pay-More-Attention
|
refs/heads/main
|
/utils.py
|
import os
from shutil import copy2
import time
import torch
def move_data(start,troll,not_troll):
for img_name in os.listdir(start):
src = os.path.join(start,img_name)
if img_name.startswith('N'):
copy2(src,not_troll)
else:
copy2(src,troll)
def split_data(start,train,val,split):
for i, img_name in enumerate(os.listdir(start)):
src = os.path.join(start,img_name)
if i < split:
copy2(src,val)
else:
copy2(src,train)
def epoch_time(start_time,end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time/60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins,elapsed_secs
def train_epoch(model,data_loader,loss_fn,optimizer,device,scheduler,n_examples):
model = model.train()
losses = []
correct_predictions = 0
for idx, data in enumerate(data_loader):
input_ids = data['input_ids'].to(device)
attention_mask = data['attention_mask'].to(device)
labels = data['label'].to(device)
labelsviewed = labels.view(labels.shape[0],1)
image = data['image'].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
img=image
)
preds = [0 if x < 0.5 else 1 for x in outputs]
preds = torch.tensor(preds).to(device)
loss = loss_fn(outputs,labelsviewed)
correct_predictions += torch.sum(preds == labels)
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
return correct_predictions.double() / n_examples, np.mean(losses)
def eval_model(model, data_loader, loss_fn, device, n_examples):
model = model.eval()
losses = []
correct_predictions = 0
with torch.no_grad():
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
labels = d["label"].to(device)
labelsviewed = labels.view(labels.shape[0],1)
image = d['image'].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
img=image
)
preds = [0 if x < 0.5 else 1 for x in outputs]
preds = torch.tensor(preds).to(device)
loss = loss_fn(outputs, labelsviewed)
correct_predictions += torch.sum(preds == labels)
losses.append(loss.item())
return correct_predictions.double() / n_examples, np.mean(losses)
def get_predictions(model,data_loader, device):
model = model.eval()
f_preds = []
with torch.no_grad():
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
image = d['image'].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
img=image
)
preds = ['Non-troll' if x < 0.5 else 'Troll' for x in outputs]
for j in preds:
f_preds.append(j)
return f_preds
|
{"/splitAndMake.py": ["/utils.py"], "/train.py": ["/model.py", "/dataset.py", "/utils.py"], "/test.py": ["/dataset.py", "/model.py", "/utils.py"]}
|
10,764
|
SiddhanthHegde/You-Need-to-Pay-More-Attention
|
refs/heads/main
|
/test.py
|
import torch
import pandas as pd
from dataset import create_data_loader
from model import multimodal
from utils import get_predictions
from zipfile import ZipFile
LOAD_MODEL = True
device = 'cuda'
model = multimodal()
model = model.to(device)
if LOAD_MODEL:
model.load_state_dict(torch.load('vit-bert-1.0val.bin'))
df_test = pd.read_csv('test_captions.csv')
df_test.drop('Unnamed: 0',axis=1,inplace=True)
extract_path = 'test_img.zip'
with ZipFile(extract_path, 'r') as zipObj:
zipObj.extractall()
test_data_loader = create_data_loader(df_test,tokenizer,MAX_LEN,BATCH_SIZE,my_trans,'test_img',False)
submission_preds = get_predictions(model,test_data_loader,device)
|
{"/splitAndMake.py": ["/utils.py"], "/train.py": ["/model.py", "/dataset.py", "/utils.py"], "/test.py": ["/dataset.py", "/model.py", "/utils.py"]}
|
10,767
|
DarkmatterVale/HaikuPorts-Cleaner
|
refs/heads/master
|
/cleaner.py
|
from Options import getOption
from Recipe import RecipeFixer
import os
import timeit
class Cleaner():
"""
Main class for the ports cleaner. This class handles the management of
each individual "clean" task/process
"""
def __init__(self, options, args):
"""
Clean the haikuports tree
"""
# Creating a timer for the start of the program
start = timeit.default_timer()
# Setting build-dependent variables
self.directory = getOption("directory")
# Setting up log file
self.logFile = "log"
with open(os.path.join(os.getcwd(), self.logFile), 'w') as log_file:
log_file.write("")
log_file.close()
# Cleaning all files within the base directory
self.clean_directory(self.directory)
# Creating a timer for the end of the program
stop = timeit.default_timer()
# Printing the total time it took to run the program
print("Total time to clean " + self.directory + " : " + str(stop - start) + " seconds")
def clean_directory(self, directory_to_clean):
"""
Cleans the main haikuports directory & all its subfolders
"""
total_recipes = self.tally_recipes(directory_to_clean)
recipe_index = 0
for root, dirs, files in os.walk(directory_to_clean):
path = root.split('/')
print (len(path) - 1) *'---' , os.path.basename(root)
for test_file in files:
if test_file.endswith(".recipe"):
recipe_index += 1
print len(path)*'---', test_file, ' ', recipe_index, '/', total_recipes
current_recipe_fixer = RecipeFixer(root, test_file, self.logFile)
current_recipe_fixer.clean()
# Printing out the total recipe count
print("Cleaned " + str(recipe_index) + " recipes")
def tally_recipes(self, base_directory):
"""
Returns the total number of recipes located within the directory
base_directory
"""
total_recipes = 0
for root, dirs, files in os.walk(base_directory):
for test_file in files:
if test_file.endswith(".recipe"):
total_recipes += 1
return total_recipes
|
{"/hp-cleaner.py": ["/cleaner.py", "/Options.py"]}
|
10,768
|
DarkmatterVale/HaikuPorts-Cleaner
|
refs/heads/master
|
/hp-cleaner.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Vale Tolpegin
# Distributed under the terms of the MIT License.
# -- Modules ------------------------------------------------------------------
from cleaner import Cleaner
from Options import parseOptions
# -- Start --------------------------------------------------------------------
Cleaner(*parseOptions())
|
{"/hp-cleaner.py": ["/cleaner.py", "/Options.py"]}
|
10,769
|
DarkmatterVale/HaikuPorts-Cleaner
|
refs/heads/master
|
/Options.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 Vale Tolpegin
# Distributed under the terms of the MIT License.
# -- Modules ------------------------------------------------------------------
from optparse import OptionParser
# -- global options -----------------------------------------------------------
global __Options__
# -- getOption ===-------------------------------------------------------------
def getOption(string):
"""
Fetches an option by name
"""
return getattr(__Options__, string)
# -- splitCommaSeparatedList --------------------------------------------------
def setCommaSeparatedList(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(','))
# -- parseOptions -------------------------------------------------------------
def parseOptions():
"""
Does command line argument parsing
"""
parser = OptionParser(usage='usage: %prog [options] portname[-portversion]', version='0.0.1')
parser.add_option('-d', '--directory', dest='directory', help="haikuports directory")
global __Options__
(__Options__, args) = parser.parse_args()
return (__Options__, args)
|
{"/hp-cleaner.py": ["/cleaner.py", "/Options.py"]}
|
10,770
|
DarkmatterVale/HaikuPorts-Cleaner
|
refs/heads/master
|
/recipe.py
|
import os
import re
class RecipeFixer():
"""
Parses an individual recipe and fixes it.
"""
def __init__(self, baseDir, name, log):
# Set up the ordering for recipe files
self.order = [
"SUMMARY",
"DESCRIPTION",
"HOMEPAGE",
"COPYRIGHT",
"LICENSE",
"REVISION",
"SOURCE_URI",
"CHECKSUM_MD5",
"CHECKSUM_SHA256",
"SOURCE_DIR",
"PATCHES",
"ADDITIONAL_FILES",
"ARCHITECTURES",
"SECONDARY_ARCHITECTURES",
"PROVIDES",
"REQUIRES",
"PROVIDES_devel",
"REQUIRES_devel",
"BUILD_REQUIRES",
"BUILD_PREREQUIRES",
"PATCH()",
"BUILD()",
"INSTALL()",
"TEST()",
]
self.remove_components = [
"STATUS_HAIKU",
"CHECKSUM_MD5",
"DEPEND"
]
self.component_ordering = {
"SUMMARY" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "SUMMARY",
"join" : "=",
"pre_requests" : []
},
"DESCRIPTION" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "DESCRIPTION",
"join" : "=",
"pre_requests" : []
},
"HOMEPAGE" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "HOMEPAGE",
"join" : "=",
"pre_requests" : []
},
"COPYRIGHT" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "COPYRIGHT",
"join" : "=",
"pre_requests" : []
},
"LICENSE" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "LICENSE",
"join" : "=",
"pre_requests" : []
},
"REVISION" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "REVISION",
"join" : "=",
"pre_requests" : []
},
"SOURCE_URI" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "SOURCE_URI",
"join" : "=",
"pre_requests" : []
},
"CHECKSUM_SHA256" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "CHECKSUM_SHA256",
"join" : "=",
"pre_requests" : []
},
"SOURCE_DIR" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "SOURCE_DIR",
"join" : "=",
"pre_requests" : []
},
"PATCHES" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "PATCHES",
"join" : "=",
"pre_requests" : []
},
"ADDITIONAL_FILES" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "ADDITIONAL_FILES",
"join" : "=",
"pre_requests" : []
},
"ARCHITECTURES" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "ARCHITECTURES",
"join" : "=",
"pre_requests" : ["\n"]
},
"SECONDARY_ARCHITECTURES" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "SECONDARY_ARCHITECTURES",
"join" : "=",
"pre_requests" : []
},
"PROVIDES" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "PROVIDES",
"join" : "=",
"pre_requests" : ["\n"]
},
"REQUIRES" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "REQUIRES",
"join" : "=",
"pre_requests" : []
},
"PROVIDES_devel" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "PROVIDES_devel",
"join" : "=",
"pre_requests" : ["\n"]
},
"REQUIRES_devel" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "REQUIRES_devel",
"join" : "=",
"pre_requests" : []
},
"BUILD_REQUIRES" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "BUILD_REQUIRES",
"join" : "=",
"pre_requests" : ["\n"]
},
"BUILD_PREREQUIRES" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "BUILD_PREREQUIRES",
"join" : "=",
"pre_requests" : []
},
"PATCH()" : {
"begin_id" : '{',
"end_id" : '}',
"name" : "PATCH()",
"join" : "\n",
"pre_requests" : ["\n"]
},
"BUILD()" : {
"begin_id" : '{',
"end_id" : '}',
"name" : "BUILD()",
"join" : "\n",
"pre_requests" : ["\n"]
},
"INSTALL()" : {
"begin_id" : '{',
"end_id" : '}',
"name" : "INSTALL()",
"join" : "\n",
"pre_requests" : ["\n"]
},
"TEST()" : {
"begin_id" : '{',
"end_id" : '}',
"name" : "TEST()",
"join" : "\n",
"pre_requests" : ["\n"]
},
"STATUS_HAIKU" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "STATUS_HAIKU",
"join" : "=",
"pre_requests" : []
},
"DEPEND" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "DEPEND",
"join" : "=",
"pre_requests" : []
},
"CHECKSUM_MD5" : {
"begin_id" : '"',
"end_id" : '"',
"name" : "CHECKSUM_MD5",
"join" : "=",
"pre_requests" : []
}
}
# Setting up logging information
self.logFile = log
# Setting general variables
self.baseDir = baseDir
self.name = name
def clean(self):
"""
Fix the given recipe
"""
# Reset variables
self.content = ""
self.corrected_content = ""
self.logData = ""
# Adding log data
self.logData += ("*" * 70) + "\n"
self.logData += re.sub(".recipe", "", self.name) + "\n"
self.logData += ("*" * 70) + "\n"
# Read the recipe file
with open(os.path.join(self.baseDir, self.name), 'r') as content_file:
self.content = content_file.read()
content_file.close()
# Updating corrected_content
self.corrected_content = self.content
# Determine whether the recipe is of the old format
if self.should_update_format(self.content):
# Apply updating
self.corrected_content = self.convert_old_format(self.content)
self.content = self.corrected_content
self.corrected_content = self.correct_ordering()
# Determine whether clean the recipe
elif self.should_clean_recipe(self.content):
# Apply cleaning
self.corrected_content = self.correct_ordering()
# Save new data to the recipe file
with open(os.path.join(self.baseDir, self.name), 'w') as content_file:
content_file.seek(0)
content_file.write(self.corrected_content)
content_file.close()
# Save the log data
with open(os.path.join(os.getcwd(), self.logFile), 'a') as log_file:
log_file.write(self.logData)
log_file.close()
def correct_ordering(self):
"""
Corrects the ordering of the content within recipes
"""
original_content = self.content
ordered_content = ""
extracted_component_list = {}
# For each component, go through the recipe, find it, and correctly
# place it into the new recipe
for component in self.order:
start_, end_ = self.extract_component(original_content, component)
if start_ != -1 and end_ != -1:
extracted_component_list[component] = {
"text" : str(self.content)[start_:end_] + "\n",
"clean_text" : re.sub(component + self.component_ordering[component]["join"], "", str(self.content)[start_:end_] + "\n")[1:-2]
}
# Correcting mistakes in each component
for component in self.order:
# Correcting SUMMARY related issues
if component == "SUMMARY" and "SUMMARY" in extracted_component_list:
# Make sure it is only one line long
if len(extracted_component_list[component]["text"]) > 70:
print("\033[91mERROR: \033[00m{}".format("SUMMARY must be less than 80 characters long"))
self.logData += "WARNING: SUMMARY must be less than 70 characters long\n"
if len(extracted_component_list[component]["text"].split("\n")) > 2:
extracted_component_list[component]["text"] = re.sub(r"\n", "", extracted_component_list[component]["text"]) + "\n"
self.logData += "WARNING: Removing extra newline characters in SUMMARY\n"
# Make sure it does not end in a period
end_character_index = self.find_previous_non_whitespace_character(extracted_component_list[component]["text"], [self.component_ordering[component]["end_id"]], 1)
if end_character_index != -1:
if "." == extracted_component_list[component]["text"][end_character_index]:
extracted_component_list[component]["text"] = extracted_component_list[component]["text"][:end_character_index] + extracted_component_list[component]["text"][(end_character_index + 1):]
self.logData += "WARNING: Removing extra period at the end of SUMMARY\n"
elif component == "SUMMARY" and "SUMMARY" not in extracted_component_list:
print("\033[91mERROR: \033[00m{}".format("Cannot find SUMMARY in recipe"))
self.logData += "ERROR: Cannot find SUMMARY in recipe\n"
self.logData += "WARNING: Adding dummy SUMMARY component in recipe\n"
extracted_component_list[component] = {
#"text" : "# WARNING: Adding dummy SUMMARY component in recipe\n" + component + self.component_ordering[component]["join"] + "\"\"\n",
"text" : "# WARNING: " + component + " must be added to recipe here\n",
"clean_text" : ""
}
# Correcting DESCRIPTION related issues
if component == "DESCRIPTION" and "DESCRIPTION" in extracted_component_list:
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(extracted_component_list[component]["text"], [self.component_ordering[component]["end_id"]], 1)
if end_character_index != -1:
extracted_component_list[component]["text"] = extracted_component_list[component]["text"][:(end_character_index + 1)] + self.component_ordering[component]["end_id"] + "\n"
elif component == "DESCRIPTION" and "DESCRIPTION" not in extracted_component_list:
print("\033[91mERROR: \033[00m{}".format("Cannot find DESCRIPTION in recipe"))
self.logData += "ERROR: Cannot find DESCRIPTION in recipe\n"
self.logData += "WARNING: Adding dummy DESCRIPTION component in recipe\n"
extracted_component_list[component] = {
#"text" : "# WARNING: Adding dummy DESCRIPTION component in recipe\n" + component + self.component_ordering[component]["join"] + "\"\"\n",
"text" : "# WARNING: " + component + " must be added to recipe here\n",
"clean_text" : ""
}
# Correcting HOMPAGE related issues
if component == "HOMEPAGE" and "HOMEPAGE" in extracted_component_list:
# If it is multi-line, make sure it is correctly formatted
if len(extracted_component_list[component]["text"].split("\n")) > 2:
# Getting the individual items within provides
num_, instances_ = self.number_of_instances(extracted_component_list[component]["clean_text"], "*", ["\n"])
# Generating the correct homepage component
generated_text = component + self.component_ordering[component]["join"] + "\"" + re.sub("\t", "", instances_[0]) + "\n"
# Since the first COPYRIGHT is not supposed to be on a newline, ignore it
num_ -= 1
instances_ = instances_[1:]
for instance in instances_:
cleaned_instance = ""
for non_spaced in self.remove_characters(instance, ["\t"]).split(" "):
if non_spaced != "":
cleaned_instance += " " + non_spaced
cleaned_instance = cleaned_instance[1:]
if "#" in instance:
generated_text += instance + "\n"
else:
generated_text += "\t" + cleaned_instance + "\n"
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)
if end_character_index != -1:
generated_text = generated_text[:(end_character_index + 1)] + self.component_ordering[component]["end_id"] + "\n"
extracted_component_list[component]["text"] = generated_text
elif component == "HOMEPAGE" and component not in extracted_component_list:
self.logData += "WARNING: Adding dummy " + component + " component in recipe\n"
extracted_component_list[component] = {
#"text" : "# WARNING: Adding dummy " + component + " component in recipe\n" + component + self.component_ordering[component]["join"] + "\"\"\n",
"text" : "# WARNING: " + component + " must be added to recipe here\n",
"clean_text" : ""
}
# Correcting COPYRIGHT related issues
if component == "COPYRIGHT" and "COPYRIGHT" in extracted_component_list:
# If it is multi-line, make sure it is correctly formatted
if len(extracted_component_list[component]["text"].split("\n")) > 2:
# Getting the individual items within provides
num_, instances_ = self.number_of_instances(extracted_component_list[component]["clean_text"], "*", ["\n"])
# Cleaning all extra commas
for instance_index in range(0, num_):
for character_index in range(1, len(instances_[instance_index]) - 3):
try:
if instances_[instance_index][character_index] == ",":
if re.sub("[0-9]", "", instances_[instance_index][character_index - 1]) == "" and instances_[instance_index][character_index + 1] == " " and re.sub("[0-9]", "", instances_[instance_index][character_index + 2]) != "":
instances_[instance_index] = instances_[instance_index][:character_index] + instances_[instance_index][character_index + 1:]
except:
pass
# Generating the correct copyright component
if instances_[0][0] == "\t":
generated_text = component + self.component_ordering[component]["join"] + "\"" + instances_[0][1:] + "\n"
else:
generated_text = component + self.component_ordering[component]["join"] + "\"" + instances_[0] + "\n"
for instance_index in range(1, len(instances_)):
cleaned_instance = ""
for non_spaced in self.remove_characters(instances_[instance_index], ["\t"]).split(" "):
if non_spaced != "":
cleaned_instance += " " + non_spaced
cleaned_instance = cleaned_instance[1:]
if "#" in instances_[instance_index]:
generated_text += instances_[instance_index] + "\n"
elif instance_index > 0:
if "\\" in instances_[instance_index - 1]:
generated_text += cleaned_instance + "\n"
continue
generated_text += "\t" + cleaned_instance + "\n"
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)
if end_character_index != -1:
generated_text = generated_text[:(end_character_index + 1)] + self.component_ordering[component]["end_id"] + "\n"
extracted_component_list[component]["text"] = generated_text
elif component == "COPYRIGHT" and component not in extracted_component_list:
self.logData += "WARNING: Adding dummy " + component + " component in recipe\n"
extracted_component_list[component] = {
#"text" : "# WARNING: Adding dummy " + component + " component in recipe\n" + component + self.component_ordering[component]["join"] + "\"\"\n",
"text" : "# WARNING: " + component + " must be added to recipe here\n",
"clean_text" : ""
}
# Correcting LICENSE related issues
if component == "LICENSE" and "LICENSE" in extracted_component_list:
# If it is multi-line, make sure it is correctly formatted
if len(extracted_component_list[component]["text"].split("\n")) > 2:
# Getting the individual items within provides
num_, instances_ = self.number_of_instances(extracted_component_list[component]["clean_text"], "*", ["\n"])
# Generating the correct license component
generated_text = component + self.component_ordering[component]["join"] + "\"" + re.sub("\t", "", instances_[0]) + "\n"
# Since the first COPYRIGHT is not supposed to be on a newline, ignore it
num_ -= 1
instances_ = instances_[1:]
for instance in instances_:
cleaned_instance = ""
for non_spaced in self.remove_characters(instance, ["\t"]).split(" "):
if non_spaced != "":
cleaned_instance += " " + non_spaced
cleaned_instance = cleaned_instance[1:]
if "#" in instance:
generated_text += instance + "\n"
else:
generated_text += "\t" + cleaned_instance + "\n"
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)
if end_character_index != -1:
generated_text = generated_text[:(end_character_index + 1)] + self.component_ordering[component]["end_id"] + "\n"
extracted_component_list[component]["text"] = generated_text
elif component == "LICENSE" and component not in extracted_component_list:
self.logData += "WARNING: Adding dummy " + component + " component in recipe\n"
extracted_component_list[component] = {
#"text" : "# WARNING: Adding dummy " + component + " component in recipe\n" + component + self.component_ordering[component]["join"] + "\"\"\n",
"text" : "# WARNING: " + component + " must be added to recipe here\n",
"clean_text" : ""
}
# Correcting REVISION related issues
if component == "REVISION" and "REVISION" in extracted_component_list:
# Make sure it is only one line long
if len(extracted_component_list[component]["text"].split("\n")) > 2:
extracted_component_list[component]["text"] = re.sub(r"\n", "", extracted_component_list[component]["text"]) + "\n"
self.logData += "WARNING: Removing extra newline characters in REVISION\n"
elif component == "REVISION" and component not in extracted_component_list:
self.logData += "WARNING: Adding dummy " + component + " component in recipe\n"
extracted_component_list[component] = {
"text" : "# WARNING: Adding dummy " + component + " component in recipe\n" + component + self.component_ordering[component]["join"] + "\"1\"\n",
"clean_text" : ""
}
# Correcting SOURCE_URI related issues
if component == "SOURCE_URI" and "SOURCE_URI" in extracted_component_list:
# Make sure it is only one line long
if len(extracted_component_list[component]["text"].split("\n")) > 2:
extracted_component_list[component]["text"] = re.sub(r"\n", "", extracted_component_list[component]["text"]) + "\n"
self.logData += "WARNING: Removing extra newline characters in SOURCE_URI\n"
elif component == "SOURCE_URI" and component not in extracted_component_list:
self.logData += "WARNING: Adding dummy " + component + " component in recipe\n"
extracted_component_list[component] = {
#"text" : "# WARNING: Adding dummy " + component + " component in recipe\n" + component + self.component_ordering[component]["join"] + "\"\"\n",
"text" : "# WARNING: " + component + " must be added to recipe here\n",
"clean_text" : ""
}
# Correcting CHECKSUM_SHA256 related issues
if component == "CHECKSUM_SHA256" and "CHECKSUM_SHA256" in extracted_component_list:
# Make sure it is only one line long
if len(extracted_component_list[component]["text"].split("\n")) > 2:
extracted_component_list[component]["text"] = re.sub(r"\n", "", extracted_component_list[component]["text"]) + "\n"
self.logData += "WARNING: Removing extra newline characters in CHECKSUM_SHA256\n"
elif component == "CHECKSUM_SHA256" and component not in extracted_component_list:
self.logData += "WARNING: Adding dummy " + component + " component in recipe\n"
extracted_component_list[component] = {
#"text" : "# WARNING: Adding dummy " + component + " component in recipe\n" + component + self.component_ordering[component]["join"] + "\"\"\n",
"text" : "# WARNING: " + component + " must be added to recipe here\n",
"clean_text" : ""
}
# Correcting SOURCE_DIR related issues
if component == "SOURCE_DIR" and "SOURCE_DIR" in extracted_component_list:
# Make sure it is only one line long
if len(extracted_component_list[component]["text"].split("\n")) > 2:
extracted_component_list[component]["text"] = re.sub(r"\n", "", extracted_component_list[component]["text"]) + "\n"
self.logData += "WARNING: Removing extra newline characters in SOURCE_DIR\n"
# Correcting PATCHES related issues
if component == "PATCHES" and "PATCHES" in extracted_component_list:
# If it is multi-line, make sure it is correctly formatted
if len(extracted_component_list[component]["text"].split("\n")) > 2:
# Getting the individual items within provides
num_, instances_ = self.number_of_instances(extracted_component_list[component]["clean_text"], "*", ["\n"])
# Generating the correct patches component
generated_text = component + self.component_ordering[component]["join"] + "\"\n"
for instance in instances_:
cleaned_instance = ""
for non_spaced in self.remove_characters(instance, ["\t"]).split(" "):
if non_spaced != "":
cleaned_instance += " " + non_spaced
cleaned_instance = cleaned_instance[1:]
if "#" in instance:
generated_text += instance + "\n"
else:
generated_text += "\t" + cleaned_instance + "\n"
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)
if end_character_index != -1:
generated_text = generated_text[:(end_character_index + 1)] + "\n\t" + self.component_ordering[component]["end_id"] + "\n"
extracted_component_list[component]["text"] = generated_text
# Correcting ADDITIONAL_FILES related issues
if component == "ADDITIONAL_FILES" and "ADDITIONAL_FILES" in extracted_component_list:
# If it is multi-line, make sure it is correctly formatted
if len(extracted_component_list[component]["text"].split("\n")) > 2:
# Getting the individual items within provides
num_, instances_ = self.number_of_instances(extracted_component_list[component]["clean_text"], "*", ["\n"])
# Generating the correct additional_files component
generated_text = component + self.component_ordering[component]["join"] + "\"\n"
for instance in instances_:
cleaned_instance = ""
for non_spaced in self.remove_characters(instance, ["\t"]).split(" "):
if non_spaced != "":
cleaned_instance += " " + non_spaced
cleaned_instance = cleaned_instance[1:]
if "#" in instance:
generated_text += instance + "\n"
else:
generated_text += "\t" + cleaned_instance + "\n"
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)
if end_character_index != -1:
generated_text = generated_text[:(end_character_index + 1)] + "\n\t" + self.component_ordering[component]["end_id"] + "\n"
extracted_component_list[component]["text"] = generated_text
# Correcting ARCHITECTURES related issues
if component == "ARCHITECTURES" and component not in extracted_component_list:
self.logData += "WARNING: Adding dummy " + component + " component in recipe\n"
extracted_component_list[component] = {
"text" : "# WARNING: Adding dummy " + component + " component in recipe\n" + component + self.component_ordering[component]["join"] + "\"?x86 ?x86_gcc2\"\n",
"clean_text" : ""
}
# Correcting SECONDARY_ARCHITECTURES related issues
if component == "SECONDARY_ARCHITECTURES" and "SECONDARY_ARCHITECTURES" in extracted_component_list:
# Make sure it is only one line long
if len(extracted_component_list[component]["text"].split("\n")) > 2:
extracted_component_list[component]["text"] = re.sub(r"\n", "", extracted_component_list[component]["text"]) + "\n"
self.logData += "WARNING: Removing extra newline characters in SECONDARY_ARCHITECTURES\n"
# Correcting PROVIDES related issues
if component == "PROVIDES" and "PROVIDES" in extracted_component_list:
# Getting the individual items within provides
num_, instances_ = self.number_of_instances(extracted_component_list[component]["clean_text"], "*", ["\n"])
# Generating the correct provides component
generated_text = component + self.component_ordering[component]["join"] + "\"\n"
for instance in instances_:
cleaned_instance = ""
for non_spaced in self.remove_characters(instance, ["\t"]).split(" "):
if non_spaced != "":
cleaned_instance += " " + non_spaced
cleaned_instance = cleaned_instance[1:]
if "#" in instance:
generated_text += instance + "\n"
else:
generated_text += "\t" + cleaned_instance + "\n"
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)
if end_character_index != -1:
generated_text = generated_text[:(end_character_index + 1)] + "\n\t" + self.component_ordering[component]["end_id"] + "\n"
extracted_component_list[component]["text"] = generated_text
elif component == "PROVIDES" and "PROVIDES" not in extracted_component_list:
extracted_component_list["PROVIDES"] = {
"text" : "# WARNING: Adding dummy " + component + " component in recipe\n" + "PROVIDES=\"\n\t" + re.sub("-.*", "", self.name) + " = $portVersion\n\t\"\n",
"clean_text" : re.sub("-.*", "", self.name) + " = $portVersion"
}
self.logData += "WARNING: Adding dummy missing PROVIDES in recipe"
# Correcting REQUIRES related issues
if component == "REQUIRES" and "REQUIRES" in extracted_component_list:
# Making sure that a "haiku" is in the REQUIRES component
if "SECONDARY_ARCHITECTURES" in extracted_component_list:
if "haiku$secondaryArchSuffix\n" not in extracted_component_list[component]["text"] and "haiku${secondaryArchSuffix}" not in extracted_component_list[component]["text"]:
extracted_component_list[component]["text"] = component + self.component_ordering[component]["join"] + "\"\n\thaiku$secondaryArchSuffix\n\t" + extracted_component_list[component]["clean_text"]
extracted_component_list[component]["clean_text"] = "\"\n\thaiku$secondaryArchSuffix\n\t" + extracted_component_list[component]["clean_text"]
else:
if "haiku\n" not in extracted_component_list[component]["text"] and "haiku$secondaryArchSuffix" not in extracted_component_list[component]["text"] and "haiku${secondaryArchSuffix}" not in extracted_component_list[component]["text"]:
extracted_component_list[component]["text"] = component + self.component_ordering[component]["join"] + "\"\n\thaiku\n\t" + extracted_component_list[component]["clean_text"]
extracted_component_list[component]["clean_text"] = "\"\n\thaiku\n\t" + extracted_component_list[component]["clean_text"]
# Getting the individual items within provides
num_, instances_ = self.number_of_instances(extracted_component_list[component]["clean_text"], "*", ["\n"])
# Generating the correct requires component
generated_text = component + self.component_ordering[component]["join"] + "\"\n"
for instance in instances_:
cleaned_instance = ""
for non_spaced in self.remove_characters(instance, ["\t"]).split(" "):
if non_spaced != "":
cleaned_instance += " " + non_spaced
cleaned_instance = cleaned_instance[1:]
if "#" in instance:
generated_text += instance + "\n"
else:
generated_text += "\t" + cleaned_instance + "\n"
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)
if end_character_index != -1:
generated_text = generated_text[:(end_character_index + 1)] + "\n\t" + self.component_ordering[component]["end_id"] + "\n"
extracted_component_list[component]["text"] = generated_text
elif component == "REQUIRES" and "REQUIRES" not in extracted_component_list:
extracted_component_list["REQUIRES"] = {
"text" : "# WARNING: Adding dummy " + component + " component in recipe\n" + "REQUIRES=\"\n\thaiku\n\t\"\n",
"clean_text" : "haiku"
}
self.logData += "WARNING: Adding dummy missing REQUIRES in recipe"
# Correcting PROVIDES_devel related issues
if component == "PROVIDES_devel" and "PROVIDES_devel" in extracted_component_list:
# Getting the individual items within provides
num_, instances_ = self.number_of_instances(extracted_component_list[component]["clean_text"], "*", ["\n"])
# Generating the correct provides_devel component
generated_text = component + self.component_ordering[component]["join"] + "\"\n"
for instance in instances_:
cleaned_instance = ""
for non_spaced in self.remove_characters(instance, ["\t"]).split(" "):
if non_spaced != "":
cleaned_instance += " " + non_spaced
cleaned_instance = cleaned_instance[1:]
if "#" in instance:
generated_text += instance + "\n"
else:
generated_text += "\t" + cleaned_instance + "\n"
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)
if end_character_index != -1:
generated_text = generated_text[:(end_character_index + 1)] + "\n\t" + self.component_ordering[component]["end_id"] + "\n"
extracted_component_list[component]["text"] = generated_text
# Make sure there is a REQUIRES_devel component in the recipe
if "REQUIRES_devel" not in extracted_component_list:
if "SECONDARY_ARCHITECTURES" in extracted_component_list:
extracted_component_list["REQUIRES_devel"] = {
"text" : "REQUIRES_devel=\"\n\thaiku$\{secondaryArchSuffix\}_devel\n\t\"\n",
"clean_text" : "haiku$\{secondaryArchSuffix\}_devel"
}
self.logData += "WARNING: Adding missing REQUIRES_devel component\n"
else:
extracted_component_list["REQUIRES_devel"] = {
"text" : "REQUIRES_devel=\"\n\thaiku_devel\n\t\"\n",
"clean_text" : "haiku_devel"
}
self.logData += "WARNING: Adding missing REQUIRES_devel component\n"
# Correcting REQUIRES_devel related issues
if component == "REQUIRES_devel" and "REQUIRES_devel" in extracted_component_list:
# Getting the individual items within provides
num_, instances_ = self.number_of_instances(extracted_component_list[component]["clean_text"], "*", ["\n"])
# Generating the correct requires_devel component
generated_text = component + self.component_ordering[component]["join"] + "\"\n"
for instance in instances_:
cleaned_instance = ""
for non_spaced in self.remove_characters(instance, ["\t"]).split(" "):
if non_spaced != "":
cleaned_instance += " " + non_spaced
cleaned_instance = cleaned_instance[1:]
if "#" in instance:
generated_text += instance + "\n"
else:
generated_text += "\t" + cleaned_instance + "\n"
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)
if end_character_index != -1:
generated_text = generated_text[:(end_character_index + 1)] + "\n\t" + self.component_ordering[component]["end_id"] + "\n"
extracted_component_list[component]["text"] = generated_text
# Make sure there is a PROVIDES_devel component in the recipe
if "PROVIDES_devel" not in extracted_component_list:
if "SECONDARY_ARCHITECTURES" in extracted_component_list:
extracted_component_list["PROVIDES_devel"] = {
"text" : "PROVIDES_devel=\"\n\t" + re.sub("-.*", "", self.name) + "$\{secondaryArchSuffix\}_devel = $portVersion\n\t\"\n",
"clean_text" : re.sub("-.*", "", self.name) + "$\{secondaryArchSuffix\}_devel = $portVersion"
}
self.logData += "WARNING: Adding missing PROVIDES_devel component\n"
else:
extracted_component_list["PROVIDES_devel"] = {
"text" : "PROVIDES_devel=\"\n\t" + re.sub("-.*", "", self.name) + "_devel = $portVersion\n\t\"\n",
"clean_text" : re.sub("-.*", "", self.name) + "_devel = $portVersion"
}
self.logData += "WARNING: Adding missing PROVIDES_devel component\n"
# Correcting REQUIRES_devel related issues
if component == "BUILD_REQUIRES" and "BUILD_REQUIRES" in extracted_component_list:
# Getting the individual items within provides
num_, instances_ = self.number_of_instances(extracted_component_list[component]["clean_text"], "*", ["\n"])
# Generating the correct build_requires component
generated_text = component + self.component_ordering[component]["join"] + "\"\n"
for instance in instances_:
cleaned_instance = ""
for non_spaced in self.remove_characters(instance, ["\t"]).split(" "):
if non_spaced != "":
cleaned_instance += " " + non_spaced
cleaned_instance = cleaned_instance[1:]
if "#" in instance:
generated_text += instance + "\n"
else:
generated_text += "\t" + cleaned_instance + "\n"
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)
if end_character_index != -1:
generated_text = generated_text[:(end_character_index + 1)] + "\n\t" + self.component_ordering[component]["end_id"] + "\n"
if extracted_component_list[component]["clean_text"] != "":
extracted_component_list[component]["text"] = generated_text
elif component == "BUILD_REQUIRES" and component not in extracted_component_list:
self.logData += "WARNING: Adding dummy " + component + " component in recipe\n"
extracted_component_list[component] = {
"text" : "# WARNING: Adding dummy " + component + " component in recipe\n" + component + self.component_ordering[component]["join"] + "\"\n\thaiku_devel\n\t\"\n",
"clean_text" : ""
}
# Correcting REQUIRES_devel related issues
if component == "BUILD_PREREQUIRES" and "BUILD_PREREQUIRES" in extracted_component_list:
# Getting the individual items within provides
num_, instances_ = self.number_of_instances(extracted_component_list[component]["clean_text"], "*", ["\n"])
# Generating the correct build_prerequires component
generated_text = component + self.component_ordering[component]["join"] + "\"\n"
for instance in instances_:
cleaned_instance = ""
for non_spaced in self.remove_characters(instance, ["\t"]).split(" "):
if non_spaced != "":
cleaned_instance += " " + non_spaced
cleaned_instance = cleaned_instance[1:]
if "#" in instance:
generated_text += instance + "\n"
else:
generated_text += "\t" + cleaned_instance + "\n"
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(generated_text, [], 0)
if end_character_index != -1:
generated_text = generated_text[:(end_character_index + 1)] + "\n\t" + self.component_ordering[component]["end_id"] + "\n"
extracted_component_list[component]["text"] = generated_text
elif component == "BUILD_PREREQUIRES" and component not in extracted_component_list:
self.logData += "WARNING: Adding dummy " + component + " component in recipe\n"
extracted_component_list[component] = {
#"text" : "# WARNING: Adding dummy " + component + " component in recipe\n" + component + self.component_ordering[component]["join"] + "\"\n\t\"\n",
"text" : "# WARNING: " + component + " must be added to recipe here\n",
"clean_text" : ""
}
# Assembling final information
for component in self.order:
if component in extracted_component_list:
for component_part in self.component_ordering[component]["pre_requests"]:
ordered_content += component_part
ordered_content += extracted_component_list[component]["text"]
# Cleaning up log file
self.logData += "\n"
# Return the final components
return ordered_content
def extract_component(self, text, component_name):
"""
Returns the start and end index for the component with the name
component_name. It not only identifies the start and end index, but
will also grab any additional data that is critical (or in the recipe)
"""
# Setting up indexes
component_start_index = -1
component_end_index = -1
component = component_name
# Detecting previous component
if self.component_ordering[component]["name"] in text:
if self.component_ordering[component]["begin_id"] == self.component_ordering[component]["end_id"]:
component_start_index = text.index(self.component_ordering[component]["name"])
component_text = text[component_start_index:]
start_index = component_text.find(self.component_ordering[component]["begin_id"])
end_index = component_text[start_index + 1:].find(self.component_ordering[component]["end_id"])
while str(component_text[(start_index + end_index):(start_index + end_index + 1)]) == "\\":
end_index += component_text[start_index + end_index + 2:].find(self.component_ordering[component]["end_id"]) + 1
component_end_index = component_start_index + start_index + end_index + 2
else:
nesting_index = 0
component_start_index = text.index(self.component_ordering[component]["name"])
component_text = text[component_start_index:]
start_index = component_text.find(self.component_ordering[component]["begin_id"])
end_index = start_index + 1
nesting_index += 1
while nesting_index > 0:
if self.component_ordering[component]["begin_id"] in component_text[end_index:end_index + 1]:
nesting_index += 1
elif self.component_ordering[component]["end_id"] in component_text[end_index:end_index + 1]:
nesting_index -= 1
end_index += 1
component_end_index = component_start_index + end_index
return component_start_index, component_end_index
def should_clean_recipe(self, content):
"""
If the recipe detects something that should not be placed inside of
it, the cleaner should skip the recipe.
"""
content_copy = str(content)
# For each component, go through the recipe, find it, and remove
# it from the cleaner
for component in self.order:
start_index, end_index = self.extract_component(content_copy, component)
if start_index != -1 and end_index != -1:
if len(self.remove_whitespace(content_copy[:start_index])) == 0:
content_copy = content_copy[:start_index] + content_copy[end_index + 1:]
if self.remove_whitespace(content_copy) != "":
self.logData += "ERROR: Cannot parse recipe file with unknown content"
return False
return True
def should_update_format(self, content):
"""
If the parser detects that the recipe is of the old format, update the
recipe.
"""
for old_component in self.remove_components:
if old_component in content:
return True
return False
def remove_whitespace(self, text):
"""
Removes all whitespace in the text and returns whatever is remaining.
"""
return "".join(text.split())
def find_previous_non_whitespace_character(self, text, skip_character_list, max_num_chars_to_skip):
"""
Returns the index of the last non-whitespace character, excluding
the skip characters.
"""
# Setting up variables
character_index = -1
find_index = len(text) - 1
num_chars_skipped = 0
while find_index >= 0:
current_character = text[find_index]
if current_character.strip() == "":
find_index -= 1
continue
skip_test = False
if num_chars_skipped < max_num_chars_to_skip:
for skip_character in skip_character_list:
if current_character == skip_character:
skip_test = True
num_chars_skipped += 1
break
if skip_test:
find_index -= 1
continue
character_index = find_index
break
return character_index
def find_previous_character(self, text, character):
"""
Returns the index of the closest to the end of the text character
that is "character".
"""
# Setting up variables
character_index = -1
find_index = len(text) - 1
# Finding previous character
while find_index >= 0:
current_character = text[find_index]
if current_character == character:
character_index = find_index
break
find_index -= 1
# Returning index of found character
return character_index
def find_next_non_whitespace_character(self, text, skip_character_list, max_num_chars_to_skip):
"""
Returns the index of the next non-whitespace character, excluding the
skip characters.
"""
# Setting up variables
character_index = -1
find_index = 0
num_chars_skipped = 0
while find_index < len(text):
current_character = text[find_index]
if current_character.strip() == "":
find_index += 1
continue
skip_test = False
if num_chars_skipped < max_num_chars_to_skip:
for skip_character in skip_character_list:
if current_character == skip_character:
skip_test = True
num_chars_skipped += 1
break
if skip_test:
find_index += 1
continue
character_index = find_index
break
return character_index
def number_of_instances(self, text, char_to_find, skip_chars):
"""
Returns the number of times "char_to_find" is found in "text", split
by "skip_chars"
"""
number = 0
instances = []
for skip_char in skip_chars:
text_components = text.split()
if skip_char != "":
text_components = text.split(skip_char)
for individual_component in text_components:
if char_to_find == "*":
if individual_component != "":
number += 1
instances.append(individual_component)
else:
if individual_component == char_to_find:
number += 1
instances.append(individual_component)
return number, instances
def remove_characters(self, text, chars_to_remove):
"""
Returns the text minus all of the instances of "chars_to_remove"
"""
for char in chars_to_remove:
text = re.sub(char, "", text)
return text
def convert_old_format(self, text):
"""
Convert recipes from the old format to the new format.
"""
warning_text = "# WARNING: THIS RECIPE WAS AUTO-CONVERTED...SEE GIT LOG FOR MORE INFORMATION\n\n"
extracted_component_list = {}
# For each component, go through the recipe, find it, and correctly
# place it into the new recipe
for component in self.order:
start_, end_ = self.extract_component(text, component)
if start_ != -1 and end_ != -1:
extracted_component_list[component] = {
"text" : str(self.content)[start_:end_] + "\n",
"clean_text" : re.sub(component + self.component_ordering[component]["join"], "", str(self.content)[start_:end_] + "\n")[1:-2]
}
for component in self.remove_components:
start_, end_ = self.extract_component(text, component)
if start_ != -1 and end_ != -1:
extracted_component_list[component] = {
"text" : str(self.content)[start_:end_] + "\n",
"clean_text" : re.sub(component + self.component_ordering[component]["join"], "", str(self.content)[start_:end_] + "\n")[1:-2]
}
# Cleaning all old components & generating appropriate current
# components
for component in self.remove_components:
# Converting DEPEND into other parts of the recipe
if component == "DEPEND" and component in extracted_component_list:
depend_components = self.extract_depend_components(extracted_component_list[component]["clean_text"])
if "REQUIRES" not in extracted_component_list:
extracted_component_list["REQUIRES"] = {
"text" : "REQUIRES=\"\n\thaiku\n\t\"\n",
"clean_text" : "haiku"
}
text = extracted_component_list["REQUIRES"]["text"]
# Cleaning ending of component (fixing tabs, etc)
end_character_index = self.find_previous_non_whitespace_character(text, [], 0)
if end_character_index != -1:
text = text[:end_character_index - 1]
if text[-1] == "\t":
text = text[:-2]
for depend_component in depend_components:
text += "\t" + depend_component[0] + " " + depend_component[1] + " " + depend_component[2] + "\n"
text += "\t\""
extracted_component_list["REQUIRES"]["text"] = text + "\n"
extracted_component_list["REQUIRES"]["clean_text"] = re.sub("REQUIRES" + self.component_ordering["REQUIRES"]["join"], "", text + "\n")[1:-2]
# Converting STATUS_HAIKU
if component == "STATUS_HAIKU" and component in extracted_component_list:
if extracted_component_list[component]["clean_text"].lower() == "stable":
extracted_component_list["ARCHITECTURES"] = {
"text" : "ARCHITECTURES" + self.component_ordering["ARCHITECTURES"]["join"] + "\"x86_gcc2\"\n",
"clean_text" : "x86_gcc2"
}
elif extracted_component_list[component]["clean_text"].lower() == "broken":
extracted_component_list["ARCHITECTURES"] = {
"text" : "ARCHITECTURES" + self.component_ordering["ARCHITECTURES"]["join"] + "\"!x86_gcc2\"\n",
"clean_text" : "!x86_gcc2"
}
else:
extracted_component_list["ARCHITECTURES"] = {
"text" : "ARCHITECTURES" + self.component_ordering["ARCHITECTURES"]["join"] + "\"?x86_gcc2\"\n",
"clean_text" : "?x86_gcc2"
}
# Assembling final information
ordered_content = warning_text
for component in self.order:
if component in extracted_component_list:
for component_part in self.component_ordering[component]["pre_requests"]:
ordered_content += component_part
ordered_content += extracted_component_list[component]["text"]
return ordered_content
def extract_depend_components(self, clean_depend_component):
"""
Extracts each dependency. It then determines the version(s) required
and returns a list containing the [ordered] data for each dependency.
"""
depend_components = []
for component in clean_depend_component.split("\n"):
if self.remove_whitespace(component) != "":
indiv_dependency_components = component.split(" ")
name = ""
ver_operator = ""
version = ""
for indiv_comp_index in range(0, len(indiv_dependency_components)):
if self.remove_whitespace(indiv_dependency_components[indiv_comp_index]) != "":
try:
name = re.sub(".*/", "", indiv_dependency_components[indiv_comp_index])
ver_operator = indiv_dependency_components[indiv_comp_index + 1]
version = indiv_dependency_components[indiv_comp_index + 2]
break
except:
pass
depend_components.append([name, ver_operator, version])
# Returning the dependencies found in the DEPEND component
return depend_components
|
{"/hp-cleaner.py": ["/cleaner.py", "/Options.py"]}
|
10,850
|
brianjohnhaas/indrops
|
refs/heads/master
|
/count_barcode_distribution.py
|
import re
from collections import defaultdict
try:
import cPickle as pickle
except:
import pickle
from indrops import from_fastq, to_fastq
def count():
barcode_read_counter = defaultdict(int)
for name, seq, qual in from_fastq(sys.stdin):
split_name = name.split(':')
cell_name = split_name[0]
barcode_read_counter[cell_name] += 1
sys.stdout.write(to_fastq(name, seq, qual))
pickle.dump(dict(barcode_read_counter), sys.stderr)
if __name__=="__main__":
import sys, argparse
count()
|
{"/count_barcode_distribution.py": ["/indrops.py"], "/trim_polyA_and_filter_low_complexity_reads.py": ["/indrops.py"]}
|
10,851
|
brianjohnhaas/indrops
|
refs/heads/master
|
/quantify_umifm_from_alignments.py
|
import pysam
from collections import defaultdict
try:
import cPickle as pickle
except:
import pickle
from copy import copy
from itertools import combinations
from numpy import memmap
# from indrops import load_indexed_memmapped_array
def print_to_log(msg):
"""
Wrapper to eventually log in smart way, instead of using 'print()'
"""
sys.stderr.write(str(msg)+'\n')
def quant(args):
#Convert arg to more explicit names
multiple_alignment_threshold = args.m
distance_from_tx_end = args.d
split_ambiguities = args.split_ambi
ambig_count_threshold = args.u
using_mixed_ref = args.mixed_ref
#Assume that references are named 'transcript_name|gene_name'
tx_to_gid = lambda tx: tx.split('|')[1]
umis_for_geneset = defaultdict(set)
sam_input = pysam.AlignmentFile("-", "r" )
# Tuple containing lengths of reference sequences
ref_lengths = copy(sam_input.lengths)
# Bam file to be generated
if args.bam:
sam_output = pysam.AlignmentFile(args.bam, "wb", template=sam_input)
# Load cache of low complexity regions
soft_masked_regions = None
if args.soft_masked_regions:
low_complexity_regions = pickle.load(args.soft_masked_regions)
soft_masked_regions = defaultdict(set)
for tx, regions in low_complexity_regions.items():
if regions:
soft_masked_regions[tx] = set.union(*[set(range(a,b)) for a,b in regions])
soft_masked_fraction_threshold = 0.5
def process_read_alignments(alignments):
"""input: one-element list of a single alignment from a bam file
corresponding to a given barcode"""
# Remove any alignments that aren't supported by a certain number of non-poly A bases.
dependent_on_polyA_tail = False
if args.min_non_polyA > 0:
polyA_independent_alignments = []
for a in alignments:
start_of_polyA = ref_lengths[a.reference_id] - args.polyA
if a.reference_end < start_of_polyA:
# The alignment doesn't overlap the polyA tail.
polyA_independent_alignments.append(a)
else:
non_polyA_part = start_of_polyA - a.reference_start
if non_polyA_part > args.min_non_polyA:
polyA_independent_alignments.append(a)
dependent_on_polyA_tail = len(polyA_independent_alignments) == 0
alignments = polyA_independent_alignments
# Remove any alignments that are mostly to low complexity regions
if soft_masked_regions:
for a in alignments:
tx_id = sam_input.getrname(a.reference_id)
soft_masked_bases = soft_masked_regions[tx_id].intersection(set(range(a.reference_start, a.reference_end)))
soft_masked_fraction = float(len(soft_masked_bases))/(a.reference_end - a.reference_start)
a.setTag('XC', '%.2f' % soft_masked_fraction)
alignments = [a for a in alignments if float(a.opt('XC')) < soft_masked_fraction_threshold]
# We need to obtain Transcript IDs in terms of reference names (Transcrupt_ID|Gene_ID)
# as opposed to the arbitrary 'a.reference_id' number
tx_ids = [sam_input.getrname(a.reference_id) for a in alignments]
#Map to Gene IDs
g_ids = [tx_to_gid(tx_id) for tx_id in tx_ids]
# finally remove all copies to get a comprehensive unique list of genes
# found for this barcode
genes = set(g_ids)
# Does the alignment map to multiple genes or just one?
unique = True
# Was the alignment non-unique, but then rescued to being unique?
rescued_non_unique = False
# Even after rescue, was the alignment mapping to more than M genes?
failed_m_threshold = False
# The same read could align to transcripts from different genes.
if 1 < len(genes):
unique = False
close_alignments = [a for a in alignments if (ref_lengths[a.reference_id] - a.reference_end)<distance_from_tx_end]
close_tx_ids = [sam_input.getrname(a.reference_id) for a in close_alignments]
close_g_ids = [tx_to_gid(tx_id) for tx_id in close_tx_ids]
close_genes = set(close_g_ids)
if 0 < len(close_genes) < len(genes):
alignments = close_alignments
genes = close_genes
if len(close_genes) == 1:
rescued_non_unique = True
#Choose 1 alignment per gene, that we will write to the output BAM.
chosen_alignments = {}
keep_read = 0 < len(genes) <= multiple_alignment_threshold
# We need different logic if we are using a mixed organism reference
if using_mixed_ref:
refs = set(g.split(':')[1] for g in genes)
keep_read = (len(refs) == 1) and (0 < len(genes) <= multiple_alignment_threshold)
if keep_read:
for gene in genes:
gene_alignments = [a for a in alignments if tx_to_gid(sam_input.getrname(a.reference_id)) == gene]
chosen_alignment = sorted(gene_alignments, key=lambda a: ref_lengths[a.reference_id], reverse=True)[0]
chosen_alignments[gene] = chosen_alignment
else:
failed_m_threshold = True
read_filter_status = (unique, rescued_non_unique, failed_m_threshold, dependent_on_polyA_tail)
return chosen_alignments, read_filter_status
# --------------------------
# Process SAM input
# (we load everything into memory, so if a single barcode has truly very deep sequencing, we could get into trouble
# --------------------------
uniq_count = 0
rescued_count = 0
non_uniq_count = 0
failed_m_count = 0
not_aligned_count = 0
current_read = None
read_alignments = []
reads_by_umi = defaultdict(dict)
rev = 0
non_rev = 0
for alignment in sam_input:
#Skip alignments that failed to align...
if alignment.reference_id == -1:
not_aligned_count += 1
# if args.bam:
# sam_output.write(alignment)
continue
# The If statements detects that Bowtie is giving info about a different read,
# so let's process the last one before proceeding
if not current_read == alignment.query_name:
#Check that our read has any alignments
if read_alignments:
chosen_alignments, processing_stats = process_read_alignments(read_alignments)
if chosen_alignments:
split_name = current_read.split(':')
if len(split_name) == 2:
umi = split_name[1] #Old Adrian Format
elif len(split_name) == 3:
umi = split_name[1] #Adrian format
else:
umi = split_name[4] #Old Allon format
seq = read_alignments[0].seq
reads_by_umi[umi][alignment.query_name] = chosen_alignments
uniq_count += processing_stats[0]
non_uniq_count += not(processing_stats[0] or processing_stats[1] or processing_stats[2])
rescued_count += processing_stats[1]
failed_m_count += processing_stats[2]
# We reset the current read info
current_read = alignment.query_name
read_alignments = []
read_alignments.append(alignment)
# Only runs if preceding for loop terminated without break
# This is not very DRY...
else:
if read_alignments:
chosen_alignments, processing_stats = process_read_alignments(read_alignments)
if chosen_alignments:
split_name = current_read.split(':')
if len(split_name) == 2:
umi = split_name[1] #Old Adrian Format
elif len(split_name) == 3:
umi = split_name[1] #Adrian format
else:
umi = split_name[4] #Allon format
seq = read_alignments[0].seq
reads_by_umi[umi][alignment.query_name] = chosen_alignments
uniq_count += processing_stats[0]
non_uniq_count += not(processing_stats[0] or processing_stats[1] or processing_stats[2])
rescued_count += processing_stats[1]
failed_m_count += processing_stats[2]
# -----------------------------
# Time to filter based on UMIs
# (and output)
# --------------------------
umi_counts = defaultdict(float)
ambig_umi_counts = defaultdict(float)
ambig_gene_partners = defaultdict(set)
ambig_clique_count = defaultdict(list)
oversequencing = []
distance_from_transcript_end = []
temp_sam_output = []
for umi, umi_reads in reads_by_umi.items():
#Invert the (read, gene) mapping
aligns_by_gene = defaultdict(lambda: defaultdict(set))
for read, read_genes in umi_reads.items():
for gene, alignment in read_genes.items():
aligns_by_gene[gene][len(read_genes)].add(alignment)
#Pick the best alignment for each gene:
# - least other alignments
# - highest alignment quality
# - longest read
best_alignment_for_gene = {}
for gene, alignments in aligns_by_gene.items():
# min_ambiguity_alignments = alignments[min(alignments.keys())]
# max_qual = max(a.mapq for a in min_ambiguity_alignments)
# max_qual_alignments = filter(lambda a: a.mapq==max_qual, min_ambiguity_alignments)
# best_alignment_for_gene[gene] = max(max_qual_alignments, key=lambda a: a.qlen)
best_alignment_for_gene[gene] = alignments[min(alignments.keys())]
# Compute hitting set
g0 = set.union(*(set(gs) for gs in umi_reads.values())) #Union of the gene sets of all reads from that UMI
r0 = set(umi_reads.keys())
gene_read_mapping = dict()
for g in g0:
for r in r0:
gene_read_mapping[(g, r)] = float(g in umi_reads[r])/(len(umi_reads[r])**2)
target_genes = dict()
#Keys are genes, values are the number of ambiguous partner of each gene
while len(r0) > 0:
#For each gene in g0, compute how many reads point ot it
gene_contrib = dict((gi, sum(gene_read_mapping[(gi, r)] for r in r0)) for gi in g0)
#Maximum value of how many reads poitn to any gene
max_contrib = max(gene_contrib.values())
#Gene with max contrib
max_contrib_genes = filter(lambda g: gene_contrib[g]==max_contrib, gene_contrib.keys())
#Pick a gene among those with the highest value. Which doesn't matter until the last step
g = max_contrib_genes[0]
read_count_for_umifm = 0
umifm_assigned_unambiguously = False
for r in copy(r0): #Take a copy of r0 doesn't change as we iterate through it
if gene_read_mapping[(g, r)]: #Remove any reads from r0 that contributed to the picked gene.
r0.remove(r)
#Count how many reads we are removing (this is the degree of over-sequencing)
read_count_for_umifm += 1
# umifm_reads.append(r)
# If we had equivalent picks,
# and their gene contrib value is now 0
# they were ambiguity partners
if len(max_contrib_genes) > 1:
# Update the gene contribs based on the new r0, but on the 'old' g0.
# That is why we remove g from g0 after this step only
gene_contrib = dict((gi, sum(gene_read_mapping[(gi, r)] for r in r0)) for gi in g0)
ambig_partners = filter(lambda g: gene_contrib[g]==0, max_contrib_genes)
#Ambig partners will often be a 1-element set. That's ok.
#Then it will be equivalent to "target_genes[g] = 1."
if len(ambig_partners) <= ambig_count_threshold:
if len(ambig_partners) == 1:
umifm_assigned_unambiguously = True
ambig_clique_count[0].append(umi)
for g_alt in ambig_partners:
ambig_gene_partners[g_alt].add(frozenset(ambig_partners))
target_genes[g_alt] = float(len(ambig_partners))
ambig_clique_count[len(ambig_partners)].append(umi)
else:
umifm_assigned_unambiguously = True
target_genes[g] = 1.
ambig_clique_count[1].append(umi)
#Remove g here, so that g is part of the updated gene_contrib, when necessary
g0.remove(g)
#For each target gene, output the best alignment
#and record umi count
for gene, ambigs in target_genes.items():
supporting_alignments = best_alignment_for_gene[gene]
if args.bam:
for alignment_for_output in best_alignment_for_gene[gene]:
# Add the following tags to aligned reads:
# XB - Library Name
# XB - Barcode Name
# XU - UMI sequence
# XO - Oversequencing number (how many reads with the same UMI are assigned to this gene)
# YG - Gene identity
# YK - Start of the alignment, relative to the transcriptome
# YL - End of the alignment, relative to the transcriptome
# YT - Length of alignment transcript
alignment_for_output.setTag('XL', args.library)
alignment_for_output.setTag('XB', args.barcode)
alignment_for_output.setTag('XU', umi)
alignment_for_output.setTag('XO', len(supporting_alignments))
alignment_for_output.setTag('YG', gene)
alignment_for_output.setTag('YK', int(alignment_for_output.pos))
alignment_for_output.setTag('YL', int(alignment_for_output.reference_end))
alignment_for_output.setTag('YT', int(ref_lengths[alignment.reference_id]))
temp_sam_output.append(alignment_for_output)
split_between = ambigs if split_ambiguities else 1.
umi_counts[gene] += 1./split_between
ambig_umi_counts[gene] += (1./split_between if ambigs>1 else 0)
#Output the counts per gene
all_genes = set()
for ref in sam_input.references:
gene = ref.split('|')[1]
all_genes.add(gene)
sorted_all_genes = sorted(all_genes)
sorted_metric_columns = ['total_input_reads','single_alignment','rescued_single_alignment','non_unique_less_than_m','non_unique_more_than_m','not_aligned','unambiguous_umifm','umifm_degrees_of_ambiguity_2','umifm_degrees_of_ambiguity_3','umifm_degrees_of_ambiguity_>3']
output_umi_counts = [umi_counts[gene] for gene in sorted_all_genes]
if args.write_header:
args.counts.write('\t'.join(['barcode'] + sorted_all_genes) + '\n')
args.ambigs.write('\t'.join(['barcode'] + sorted_all_genes) + '\n')
args.metrics.write('\t'.join(["Barcode","Reads","Reads with unique alignment","Reads with unique alignment within shorter distance of 3'-end","Reads with less than `m` multiple alignments","Reads with more than than `m` multiple alignments","Reads with no alignments", "UMIFM","Ambig UMIFM (between 2 genes)","Ambig UMIFM (between 3 genes)","Ambig UMIFM (between more than 3 genes)",]) + '\n')
if sum(output_umi_counts) >= args.min_counts:
ignored = False
args.counts.write('\t'.join([args.barcode] + [str(int(u)) for u in output_umi_counts]) + '\n')
# Output sam data
if args.bam:
for alignment in temp_sam_output:
sam_output.write(alignment)
sam_output.close()
# Output ambig data
output_ambig_counts = [ambig_umi_counts[gene] for gene in sorted_all_genes]
if sum(output_ambig_counts) > 0:
args.ambigs.write('\t'.join([args.barcode] + [str(int(u)) for u in output_ambig_counts]) + '\n')
output_ambig_partners = {}
for gene in sorted_all_genes:
if ambig_gene_partners[gene]:
gene_partners = frozenset.union(*ambig_gene_partners[gene])-frozenset((gene,))
if gene_partners:
output_ambig_partners[gene] = gene_partners
args.ambig_partners.write(args.barcode + '\t'+ str(output_ambig_partners) + '\n')
else:
ignored = True
with open(args.counts.name + '.ignored', 'a') as f:
f.write(args.barcode + '\n')
args.counts.close()
args.ambigs.close()
args.ambig_partners.close()
#Output the fixing metrics
total_input_reads = uniq_count + rescued_count + non_uniq_count + failed_m_count + not_aligned_count
metrics_data = {
'total_input_reads': total_input_reads,
'single_alignment': uniq_count,
'rescued_single_alignment': rescued_count,
'non_unique_less_than_m': non_uniq_count,
'non_unique_more_than_m': failed_m_count,
'not_aligned': not_aligned_count,
'unambiguous_umifm' : 0,
'umifm_degrees_of_ambiguity_2' : 0,
'umifm_degrees_of_ambiguity_3' : 0,
'umifm_degrees_of_ambiguity_>3' : 0,
}
for k, v in ambig_clique_count.items():
if k == 0:
metrics_data['unambiguous_umifm'] += len(v)
elif k == 1:
metrics_data['unambiguous_umifm'] += len(v)
elif k == 2:
metrics_data['umifm_degrees_of_ambiguity_2'] += len(v)
elif k == 3:
metrics_data['umifm_degrees_of_ambiguity_3'] += len(v)
elif k > 3:
metrics_data['umifm_degrees_of_ambiguity_>3'] += len(v)
args.metrics.write('\t'.join([args.barcode] + [str(metrics_data[c]) for c in sorted_metric_columns]) + '\n')
log_output_line = "{0:<8d}{1:<8d}{2:<10d}".format(total_input_reads, metrics_data['unambiguous_umifm'],
metrics_data['umifm_degrees_of_ambiguity_2']+metrics_data['umifm_degrees_of_ambiguity_3']+metrics_data['umifm_degrees_of_ambiguity_>3'])
if ignored:
log_output_line += ' [Ignored from output]'
print_to_log(log_output_line)
if __name__=="__main__":
import sys, argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', help='Ignore reads with more than M alignments, after filtering on distance from transcript end.', type=int, default=4)
parser.add_argument('-u', help='Ignore counts from UMI that should be split among more than U genes.', type=int, default=4)
parser.add_argument('-d', help='Maximal distance from transcript end.', type=int, default=525)
parser.add_argument('--polyA', help='Length of polyA tail in reference transcriptome.', type=int, default=5)
parser.add_argument('--split_ambi', help="If umi is assigned to m genes, add 1/m to each gene's count (instead of 1)", action='store_true', default=False)
parser.add_argument('--mixed_ref', help="Reference is mixed, with records named 'gene:ref', should only keep reads that align to one ref.", action='store_true', default=False)
parser.add_argument('--min_non_polyA', type=int, default=0)
# parser.add_argument('--counts', type=argparse.FileType('w'))
# parser.add_argument('--metrics', type=argparse.FileType('w'))
parser.add_argument('--counts', type=argparse.FileType('a'))
parser.add_argument('--metrics', type=argparse.FileType('a'))
parser.add_argument('--ambigs', type=argparse.FileType('a'))
parser.add_argument('--ambig-partners', type=argparse.FileType('a'))
parser.add_argument('--barcode', type=str)
parser.add_argument('--library', type=str, default='')
parser.add_argument('--min-counts', type=int, default=0)
parser.add_argument('--write-header', action='store_true')
parser.add_argument('--bam', type=str, nargs='?', default='')
parser.add_argument('--soft-masked-regions', type=argparse.FileType('r'), nargs='?')
args = parser.parse_args()
quant(args)
|
{"/count_barcode_distribution.py": ["/indrops.py"], "/trim_polyA_and_filter_low_complexity_reads.py": ["/indrops.py"]}
|
10,852
|
brianjohnhaas/indrops
|
refs/heads/master
|
/indrops.py
|
import os, subprocess
import itertools
import operator
from collections import defaultdict, OrderedDict
import errno
# cPickle is a faster version of pickle that isn't installed in python3
# inserted try statement just in case
try:
import cPickle as pickle
except:
import pickle
from io import BytesIO
import numpy as np
import re
import shutil
import gzip
# product: product(A, B) returns the same as ((x,y) for x in A for y in B).
# combination: Return r length subsequences of elements from the input iterable.
from itertools import product, combinations
import time
import yaml
import pysam
import tempfile
import string
from contextlib import contextmanager
# -----------------------
#
# Helper functions
#
# -----------------------
def string_hamming_distance(str1, str2):
"""
Fast hamming distance over 2 strings known to be of same length.
In information theory, the Hamming distance between two strings of equal
length is the number of positions at which the corresponding symbols
are different.
eg "karolin" and "kathrin" is 3.
"""
return sum(itertools.imap(operator.ne, str1, str2))
def rev_comp(seq):
tbl = {'A':'T', 'T':'A', 'C':'G', 'G':'C', 'N':'N'}
return ''.join(tbl[s] for s in seq[::-1])
def to_fastq(name, seq, qual):
"""
Return string that can be written to fastQ file
"""
return '@'+name+'\n'+seq+'\n+\n'+qual+'\n'
def to_fastq_lines(bc, umi, seq, qual, read_name=''):
"""
Return string that can be written to fastQ file
"""
reformated_name = read_name.replace(':', '_')
name = '%s:%s:%s' % (bc, umi, reformated_name)
return to_fastq(name, seq, qual)
def from_fastq(handle):
while True:
name = next(handle).rstrip()[1:] #Read name
seq = next(handle).rstrip() #Read seq
next(handle) #+ line
qual = next(handle).rstrip() #Read qual
if not name or not seq or not qual:
break
yield name, seq, qual
def seq_neighborhood(seq, n_subs=1):
"""
Given a sequence, yield all sequences within n_subs substitutions of
that sequence by looping through each combination of base pairs within
each combination of positions.
"""
for positions in combinations(range(len(seq)), n_subs):
# yields all unique combinations of indices for n_subs mutations
for subs in product(*("ATGCN",)*n_subs):
# yields all combinations of possible nucleotides for strings of length
# n_subs
seq_copy = list(seq)
for p, s in zip(positions, subs):
seq_copy[p] = s
yield ''.join(seq_copy)
def build_barcode_neighborhoods(barcode_file, expect_reverse_complement=True):
"""
Given a set of barcodes, produce sequences which can unambiguously be
mapped to these barcodes, within 2 substitutions. If a sequence maps to
multiple barcodes, get rid of it. However, if a sequences maps to a bc1 with
1change and another with 2changes, keep the 1change mapping.
"""
# contains all mutants that map uniquely to a barcode
clean_mapping = dict()
# contain single or double mutants
mapping1 = defaultdict(set)
mapping2 = defaultdict(set)
#Build the full neighborhood and iterate through barcodes
with open(barcode_file, 'rU') as f:
# iterate through each barcode (rstrip cleans string of whitespace)
for line in f:
barcode = line.rstrip()
if expect_reverse_complement:
barcode = rev_comp(line.rstrip())
# each barcode obviously maps to itself uniquely
clean_mapping[barcode] = barcode
# for each possible mutated form of a given barcode, either add
# the origin barcode into the set corresponding to that mutant or
# create a new entry for a mutant not already in mapping1
# eg: barcodes CATG and CCTG would be in the set for mutant CTTG
# but only barcode CATG could generate mutant CANG
for n in seq_neighborhood(barcode, 1):
mapping1[n].add(barcode)
# same as above but with double mutants
for n in seq_neighborhood(barcode, 2):
mapping2[n].add(barcode)
# take all single-mutants and find those that could only have come from one
# specific barcode
for k, v in mapping1.items():
if k not in clean_mapping:
if len(v) == 1:
clean_mapping[k] = list(v)[0]
for k, v in mapping2.items():
if k not in clean_mapping:
if len(v) == 1:
clean_mapping[k] = list(v)[0]
del mapping1
del mapping2
return clean_mapping
def check_dir(path):
"""
Checks if directory already exists or not and creates it if it doesn't
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def print_to_stderr(msg, newline=True):
"""
Wrapper to eventually write to stderr
"""
sys.stderr.write(str(msg))
if newline:
sys.stderr.write('\n')
def worker_filter(iterable, worker_index, total_workers):
return (p for i,p in enumerate(iterable) if (i-worker_index)%total_workers==0)
class FIFO():
"""
A context manager for a named pipe.
"""
def __init__(self, filename="", suffix="", prefix="tmp_fifo_dir", dir=None):
if filename:
self.filename = filename
else:
self.tmpdir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
self.filename = os.path.join(self.tmpdir, 'fifo')
def __enter__(self):
if os.path.exists(self.filename):
os.unlink(self.filename)
os.mkfifo(self.filename)
return self
def __exit__(self, type, value, traceback):
os.remove(self.filename)
if hasattr(self, 'tmpdir'):
shutil.rmtree(self.tmpdir)
# -----------------------
#
# Core objects
#
# -----------------------
class IndropsProject():
def __init__(self, project_yaml_file_handle):
self.yaml = yaml.load(project_yaml_file_handle)
self.name = self.yaml['project_name']
self.project_dir = self.yaml['project_dir']
self.libraries = OrderedDict()
self.runs = OrderedDict()
for run in self.yaml['sequencing_runs']:
"""
After filtering, each sequencing run generates between 1 ... X files with filtered reads.
X = (N x M)
- N: The run is often split into several files (a typical NextSeq run is split into L001,
L002, L003, L004 which match different lanes, but this can also be done artificially.
- M: The same run might contain several libraries. The demultiplexing can be handled by the script (or externally).
If demultiplexing is done externally, there will be a different .fastq file for each library.
"""
version = run['version']
filtered_filename = '{library_name}_{run_name}'
if run['version'] == 'v3':
filtered_filename += '_{library_index}'
# Prepare to iterate over run split into several files
if 'split_affixes' in run:
filtered_filename += '_{split_affix}'
split_affixes = run['split_affixes']
else:
split_affixes = ['']
filtered_filename += '.fastq'
# Prepare to iterate over libraries
if 'libraries' in run:
run_libraries = run['libraries']
elif 'library_name' in run:
run_libraries = [{'library_name' : run['library_name'], 'library_prefix':''}]
else:
raise Exception('No library name or libraries specified.')
if run['version']=='v1' or run['version']=='v2':
for affix in split_affixes:
for lib in run_libraries:
lib_name = lib['library_name']
if lib_name not in self.libraries:
self.libraries[lib_name] = IndropsLibrary(name=lib_name, project=self, version=run['version'])
else:
assert self.libraries[lib_name].version == run['version']
if version == 'v1':
metaread_filename = os.path.join(run['dir'],run['fastq_path'].format(split_affix=affix, read='R1', library_prefix=lib['library_prefix']))
bioread_filename = os.path.join(run['dir'],run['fastq_path'].format(split_affix=affix, read='R2', library_prefix=lib['library_prefix']))
elif version == 'v2':
metaread_filename = os.path.join(run['dir'],run['fastq_path'].format(split_affix=affix, read='R2', library_prefix=lib['library_prefix']))
bioread_filename = os.path.join(run['dir'],run['fastq_path'].format(split_affix=affix, read='R1', library_prefix=lib['library_prefix']))
filtered_part_filename = filtered_filename.format(run_name=run['name'], split_affix=affix, library_name=lib_name)
filtered_part_path = os.path.join(self.project_dir, lib_name, 'filtered_parts', filtered_part_filename)
part = V1V2Filtering(filtered_fastq_filename=filtered_part_path,
project=self,
bioread_filename=bioread_filename,
metaread_filename=metaread_filename,
run_name=run['name'],
library_name=lib_name,
part_name=affix)
if run['name'] not in self.runs:
self.runs[run['name']] = []
self.runs[run['name']].append(part)
self.libraries[lib_name].parts.append(part)
elif run['version'] == 'v3':
for affix in split_affixes:
filtered_part_filename = filtered_filename.format(run_name=run['name'], split_affix=affix,
library_name='{library_name}', library_index='{library_index}')
part_filename = os.path.join(self.project_dir, '{library_name}', 'filtered_parts', filtered_part_filename)
input_filename = os.path.join(run['dir'], run['fastq_path'].format(split_affix=affix, read='{read}'))
part = V3Demultiplexer(run['libraries'], project=self, part_filename=part_filename, input_filename=input_filename, run_name=run['name'], part_name=affix)
if run['name'] not in self.runs:
self.runs[run['name']] = []
self.runs[run['name']].append(part)
for lib in run_libraries:
lib_name = lib['library_name']
lib_index = lib['library_index']
if lib_name not in self.libraries:
self.libraries[lib_name] = IndropsLibrary(name=lib_name, project=self, version=run['version'])
self.libraries[lib_name].parts.append(part.libraries[lib_index])
@property
def paths(self):
if not hasattr(self, '_paths'):
script_dir = os.path.dirname(os.path.realpath(__file__))
#Read defaults
with open(os.path.join(script_dir, 'default_parameters.yaml'), 'r') as f:
paths = yaml.load(f)['paths']
# Update with user provided values
paths.update(self.yaml['paths'])
paths['python'] = os.path.join(paths['python_dir'], 'python')
paths['java'] = os.path.join(paths['java_dir'], 'java')
paths['bowtie'] = os.path.join(paths['bowtie_dir'], 'bowtie')
paths['samtools'] = os.path.join(paths['samtools_dir'], 'samtools')
paths['trimmomatic_jar'] = os.path.join(script_dir, 'bins', 'trimmomatic-0.33.jar')
paths['rsem_tbam2gbam'] = os.path.join(paths['rsem_dir'], 'rsem-tbam2gbam')
paths['rsem_prepare_reference'] = os.path.join(paths['rsem_dir'], 'rsem-prepare-reference')
self._paths = type('Paths_anonymous_object',(object,),paths)()
self._paths.trim_polyA_and_filter_low_complexity_reads_py = os.path.join(script_dir, 'trim_polyA_and_filter_low_complexity_reads.py')
self._paths.quantify_umifm_from_alignments_py = os.path.join(script_dir, 'quantify_umifm_from_alignments.py')
self._paths.count_barcode_distribution_py = os.path.join(script_dir, 'count_barcode_distribution.py')
self._paths.gel_barcode1_list = os.path.join(script_dir, 'ref/barcode_lists/gel_barcode1_list.txt')
self._paths.gel_barcode2_list = os.path.join(script_dir, 'ref/barcode_lists/gel_barcode2_list.txt')
return self._paths
@property
def parameters(self):
if not hasattr(self, '_parameters'):
#Read defaults
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'default_parameters.yaml'), 'r') as f:
self._parameters = yaml.load(f)['parameters']
# Update with user provided values
if 'parameters' in self.yaml:
for k, d in self.yaml['parameters'].items():
self._parameters[k].update(d)
return self._parameters
@property
def gel_barcode1_revcomp_list_neighborhood(self):
if not hasattr(self, '_gel_barcode1_list_neighborhood'):
self._gel_barcode1_revcomp_list_neighborhood = build_barcode_neighborhoods(self.paths.gel_barcode1_list, True)
return self._gel_barcode1_revcomp_list_neighborhood
@property
def gel_barcode2_revcomp_list_neighborhood(self):
if not hasattr(self, '_gel_barcode2_revcomp_list_neighborhood'):
self._gel_barcode2_revcomp_list_neighborhood = build_barcode_neighborhoods(self.paths.gel_barcode2_list, True)
return self._gel_barcode2_revcomp_list_neighborhood
@property
def gel_barcode2_list_neighborhood(self):
if not hasattr(self, '_gel_barcode2_list_neighborhood'):
self._gel_barcode2_list_neighborhood = build_barcode_neighborhoods(self.paths.gel_barcode2_list, False)
return self._gel_barcode2_list_neighborhood
@property
def stable_barcode_names(self):
if not hasattr(self, '_stable_barcode_names'):
with open(self.paths.gel_barcode1_list) as f:
rev_bc1s = [rev_comp(line.rstrip()) for line in f]
with open(self.paths.gel_barcode2_list) as f:
bc2s = [line.rstrip() for line in f]
rev_bc2s = [rev_comp(bc2) for bc2 in bc2s]
# V1, V2 names:
v1v2_names = {}
barcode_iter = product(rev_bc1s, rev_bc2s)
name_iter = product(string.ascii_uppercase, repeat=4)
for barcode, name in zip(barcode_iter, name_iter):
v1v2_names['-'.join(barcode)] = 'bc' + ''.join(name)
# V3 names:
v3_names = {}
barcode_iter = product(bc2s, rev_bc2s)
name_iter = product(string.ascii_uppercase, repeat=4)
for barcode, name in zip(barcode_iter, name_iter):
v3_names['-'.join(barcode)] = 'bc' + ''.join(name)
self._stable_barcode_names = {
'v1' : v1v2_names,
'v2' : v1v2_names,
'v3': v3_names,
}
return self._stable_barcode_names
def build_transcriptome(self, gzipped_genome_softmasked_fasta_filename, gzipped_transcriptome_gtf):
import pyfasta
index_dir = os.path.dirname(self.paths.bowtie_index)
check_dir(index_dir)
genome_filename = os.path.join(index_dir, '.'.join(gzipped_genome_softmasked_fasta_filename.split('.')[:-1]))
gtf_filename = os.path.join(index_dir, gzipped_transcriptome_gtf.split('/')[-1])
gtf_prefix = '.'.join(gtf_filename.split('.')[:-2])
gtf_with_genenames_in_transcript_id = gtf_prefix + '.annotated.gtf'
accepted_gene_biotypes_for_NA_transcripts = set(["protein_coding","IG_V_gene","IG_J_gene","TR_J_gene","TR_D_gene","TR_V_gene","IG_C_gene","IG_D_gene","TR_C_gene"])
tsl1_or_tsl2_strings = ['transcript_support_level "1"', 'transcript_support_level "1 ', 'transcript_support_level "2"', 'transcript_support_level "2 ']
tsl_NA = 'transcript_support_level "NA'
print_to_stderr('Filtering GTF')
output_gtf = open(gtf_with_genenames_in_transcript_id, 'w')
for line in subprocess.Popen(["gzip", "--stdout", "-d", gzipped_transcriptome_gtf], stdout=subprocess.PIPE).stdout:
if 'transcript_id' not in line:
continue
line_valid_for_output = False
for string in tsl1_or_tsl2_strings:
if string in line:
line_valid_for_output = True
break
if tsl_NA in line:
gene_biotype = re.search(r'gene_biotype \"(.*?)\";', line)
if gene_biotype and gene_biotype.group(1) in accepted_gene_biotypes_for_NA_transcripts:
line_valid_for_output = True
if line_valid_for_output:
gene_name = re.search(r'gene_name \"(.*?)\";', line)
if gene_name:
gene_name = gene_name.group(1)
out_line = re.sub(r'(?<=transcript_id ")(.*?)(?=";)', r'\1|'+gene_name, line)
output_gtf.write(out_line)
output_gtf.close()
print_to_stderr('Gunzipping Genome')
p_gzip = subprocess.Popen(["gzip", "-dfc", gzipped_genome_softmasked_fasta_filename], stdout=open(genome_filename, 'wb'))
if p_gzip.wait() != 0:
raise Exception(" Error in rsem-prepare reference ")
p_rsem = subprocess.Popen([self.paths.rsem_prepare_reference, '--bowtie', '--bowtie-path', self.paths.bowtie_dir,
'--gtf', gtf_with_genenames_in_transcript_id,
'--polyA', '--polyA-length', '5', genome_filename, self.paths.bowtie_index])
if p_rsem.wait() != 0:
raise Exception(" Error in rsem-prepare reference ")
print_to_stderr('Finding soft masked regions in transcriptome')
transcripts_fasta = pyfasta.Fasta(self.paths.bowtie_index + '.transcripts.fa')
soft_mask = {}
for tx, seq in transcripts_fasta.items():
seq = str(seq)
soft_mask[tx] = set((m.start(), m.end()) for m in re.finditer(r'[atcgn]+', seq))
with open(self.paths.bowtie_index + '.soft_masked_regions.pickle', 'w') as out:
pickle.dump(soft_mask, out)
class IndropsLibrary():
def __init__(self, name='', project=None, version=''):
self.project = project
self.name = name
self.parts = []
self.version = version
self.paths = {}
for lib_dir in ['filtered_parts', 'quant_dir']:
dir_path = os.path.join(self.project.project_dir, self.name, lib_dir)
check_dir(dir_path)
self.paths[lib_dir] = dir_path
self.paths = type('Paths_anonymous_object',(object,),self.paths)()
self.paths.abundant_barcodes_names_filename = os.path.join(self.project.project_dir, self.name, 'abundant_barcodes.pickle')
self.paths.filtering_statistics_filename = os.path.join(self.project.project_dir, self.name, self.name+'.filtering_stats.csv')
self.paths.barcode_abundance_histogram_filename = os.path.join(self.project.project_dir, self.name, self.name+'.barcode_abundance.png')
self.paths.missing_quants_filename = os.path.join(self.project.project_dir, self.name, self.name+'.missing_barcodes.pickle')
@property
def barcode_counts(self):
if not hasattr(self, '_barcode_counts'):
self._barcode_counts = defaultdict(int)
for part in self.parts:
for k, v in part.part_barcode_counts.items():
self._barcode_counts[k] += v
return self._barcode_counts
@property
def abundant_barcodes(self):
if not hasattr(self, '_abundant_barcodes'):
with open(self.paths.abundant_barcodes_names_filename) as f:
self._abundant_barcodes = pickle.load(f)
return self._abundant_barcodes
def sorted_barcode_names(self, min_reads=0):
return [name for bc,(name,abun) in sorted(self.abundant_barcodes.items(), key=lambda i:-i[1][1]) if abun>min_reads]
def identify_abundant_barcodes(self, make_histogram=True, absolute_min_reads=250):
"""
Identify which barcodes are above the absolute minimal abundance,
and make a histogram summarizing the barcode distribution
"""
keep_barcodes = []
for k, v in self.barcode_counts.items():
if v > absolute_min_reads:
keep_barcodes.append(k)
abundant_barcodes = {}
print_to_stderr(" %d barcodes above absolute minimum threshold" % len(keep_barcodes))
for bc in keep_barcodes:
abundant_barcodes[bc] = (self.project.stable_barcode_names[self.version][bc], self.barcode_counts[bc])
self._abundant_barcodes = abundant_barcodes
with open(self.paths.abundant_barcodes_names_filename, 'w') as f:
pickle.dump(abundant_barcodes, f)
# Create table about the filtering process
with open(self.paths.filtering_statistics_filename, 'w') as filtering_stats:
header = ['Run', 'Part', 'Input Reads', 'Valid Structure', 'Surviving Trimmomatic', 'Surviving polyA trim and complexity filter']
if self.version == 'v1' or self.version == 'v2':
structure_parts = ['W1_in_R2', 'empty_read', 'No_W1', 'No_polyT', 'BC1', 'BC2', 'Umi_error']
header += ['W1 in R2', 'empty read', 'No W1 in R1', 'No polyT', 'BC1', 'BC2', 'UMI_contains_N']
elif self.version == 'v3':
structure_parts = ['Invalid_BC1', 'Invalid_BC2', 'UMI_contains_N']
header += ['Invalid BC1', 'Invalid BC2', 'UMI_contains_N']
trimmomatic_parts = ['dropped']
header += ['Dropped by Trimmomatic']
complexity_filter_parts = ['rejected_because_too_short', 'rejected_because_complexity_too_low']
header += ['Too short after polyA trim', 'Read complexity too low']
filtering_stats.write(','.join(header)+'\n')
for part in self.parts:
with open(part.filtering_metrics_filename) as f:
part_stats = yaml.load(f)
line = [part.run_name, part.part_name, part_stats['read_structure']['Total'], part_stats['read_structure']['Valid'], part_stats['trimmomatic']['output'], part_stats['complexity_filter']['output']]
line += [part_stats['read_structure'][k] for k in structure_parts]
line += [part_stats['trimmomatic'][k] for k in trimmomatic_parts]
line += [part_stats['complexity_filter'][k] for k in complexity_filter_parts]
line = [str(l) for l in line]
filtering_stats.write(','.join(line)+'\n')
print_to_stderr("Created Library filtering summary:")
print_to_stderr(" " + self.paths.filtering_statistics_filename)
# Make the histogram figure
if not make_histogram:
return
count_freq = defaultdict(int)
for bc, count in self.barcode_counts.items():
count_freq[count] += 1
x = np.array(count_freq.keys())
y = np.array(count_freq.values())
w = x*y
# need to use non-intenactive Agg backend
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
ax = plt.subplot(111)
ax.hist(x, bins=np.logspace(0, 6, 50), weights=w)
ax.set_xscale('log')
ax.set_xlabel('Reads per barcode')
ax.set_ylabel('#reads coming from bin')
plt.savefig(self.paths.barcode_abundance_histogram_filename)
print_to_stderr("Created Barcode Abundance Histogram at:")
print_to_stderr(" " + self.paths.barcode_abundance_histogram_filename)
def sort_reads_by_barcode(self, index=0):
self.parts[index].sort_reads_by_barcode(self.abundant_barcodes)
def get_reads_for_barcode(self, barcode, run_filter=[]):
for part in self.parts:
if (not run_filter) or (part.run_name in run_filter):
for line in part.get_reads_for_barcode(barcode):
yield line
def quantify_expression(self, analysis_prefix='', min_reads=750, min_counts=0, total_workers=1, worker_index=0, no_bam=False, run_filter=[]):
if analysis_prefix:
analysis_prefix += '.'
sorted_barcode_names = self.sorted_barcode_names(min_reads=min_reads)
# Identify which barcodes belong to this worker
barcodes_for_this_worker = []
i = worker_index
while i < len(sorted_barcode_names):
barcodes_for_this_worker.append(sorted_barcode_names[i])
i += total_workers
counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.counts.tsv' % (analysis_prefix, worker_index, total_workers))
ambig_counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.counts.tsv' % (analysis_prefix, worker_index, total_workers))
ambig_partners_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.partners' % (analysis_prefix, worker_index, total_workers))
metrics_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.metrics.tsv' % (analysis_prefix, worker_index, total_workers))
ignored_for_output_filename = counts_output_filename+'.ignored'
merged_bam_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.bam'% (analysis_prefix, worker_index, total_workers))
merged_bam_index_filename = merged_bam_filename + '.bai'
get_barcode_genomic_bam_filename = lambda bc: os.path.join(self.paths.quant_dir, '%s%s.genomic.sorted.bam' % (analysis_prefix, bc))
# If we wanted BAM output, and the merge BAM and merged BAM index are present, then we are done
if (not no_bam) and (os.path.isfile(merged_bam_filename) and os.path.isfile(merged_bam_index_filename)):
print_to_stderr('Indexed, merged BAM file detected for this worker. Done.')
return
# Otherwise, we have to check what we need to quantify
"""
Function to determine which barcodes this quantification worker might have already quantified.
This tries to handle interruption during any step of the process.
The worker is assigned some list of barcodes L. For every barcode:
- It could have been quantified
- but have less than min_counts ---> so it got written to `ignored` file.
- and quantification succeeded, meaning
1. there is a line (ending in \n) in the `metrics` file.
2. there is a line (ending in \n) in the `quantification` file.
3. there (could) be a line (ending in \n) in the `ambiguous quantification` file.
4. there (could) be a line (ending in \n) in the `ambiguous quantification partners` file.
[If any line doesn't end in \n, then likely the output of that line was interrupted!]
5. (If BAM output is desired) There should be a sorted genomic BAM
6. (If BAM output is desired) There should be a sorted genomic BAM index
"""
succesfully_previously_quantified = set()
previously_ignored = set()
header_written = False
if os.path.isfile(counts_output_filename) and os.path.isfile(metrics_output_filename):
# Load in list of ignored barcodes
if os.path.isfile(ignored_for_output_filename):
with open(ignored_for_output_filename, 'r') as f:
previously_ignored = set([line.rstrip().split('\t')[0] for line in f])
# Load the metrics data into memory
# (It should be fairly small, this is fast and safe)
existing_metrics_data = {}
with open(metrics_output_filename, 'r') as f:
existing_metrics_data = dict((line.partition('\t')[0], line) for line in f if line[-1]=='\n')
# Quantification data could be large, read it line by line and output it back for barcodes that have a matching metrics line.
with open(counts_output_filename, 'r') as in_counts, \
open(counts_output_filename+'.tmp', 'w') as tmp_counts, \
open(metrics_output_filename+'.tmp', 'w') as tmp_metrics:
for line in in_counts:
# The first worker is reponsible for written the header.
# Make sure we carry that over
if (not header_written) and (worker_index==0):
tmp_counts.write(line)
tmp_metrics.write(existing_metrics_data['Barcode'])
header_written = True
continue
# This line has incomplete output, skip it.
# (This can only happen with the last line)
if line[-1] != '\n':
continue
barcode = line.partition('\t')[0]
# Skip barcode if we don't have existing metrics data
if barcode not in existing_metrics_data:
continue
# Check if we BAM required BAM files exist
barcode_genomic_bam_filename = get_barcode_genomic_bam_filename(barcode)
bam_files_required_and_present = no_bam or (os.path.isfile(barcode_genomic_bam_filename) and os.path.isfile(barcode_genomic_bam_filename+'.bai'))
if not bam_files_required_and_present:
continue
# This passed all the required checks, write the line to the temporary output files
tmp_counts.write(line)
tmp_metrics.write(existing_metrics_data[barcode])
succesfully_previously_quantified.add(barcode)
shutil.move(counts_output_filename+'.tmp', counts_output_filename)
shutil.move(metrics_output_filename+'.tmp', metrics_output_filename)
# For any 'already quantified' barcode, make sure we also copy over the ambiguity data
with open(ambig_counts_output_filename, 'r') as in_f, \
open(ambig_counts_output_filename+'.tmp', 'w') as tmp_f:
f_first_line = (worker_index == 0)
for line in in_f:
if f_first_line:
tmp_f.write(line)
f_first_line = False
continue
if (line.partition('\t')[0] in succesfully_previously_quantified) and (line[-1]=='\n'):
tmp_f.write(line)
shutil.move(ambig_counts_output_filename+'.tmp', ambig_counts_output_filename)
with open(ambig_partners_output_filename, 'r') as in_f, \
open(ambig_partners_output_filename+'.tmp', 'w') as tmp_f:
for line in in_f:
if (line.partition('\t')[0] in succesfully_previously_quantified) and (line[-1]=='\n'):
tmp_f.write(line)
shutil.move(ambig_partners_output_filename+'.tmp', ambig_partners_output_filename)
barcodes_to_quantify = [bc for bc in barcodes_for_this_worker if (bc not in succesfully_previously_quantified and bc not in previously_ignored)]
print_to_stderr("""[%s] This worker assigned %d out of %d total barcodes.""" % (self.name, len(barcodes_for_this_worker), len(sorted_barcode_names)))
if len(barcodes_for_this_worker)-len(barcodes_to_quantify) > 0:
print_to_stderr(""" %d previously quantified, %d previously ignored, %d left for this run.""" % (len(succesfully_previously_quantified), len(previously_ignored), len(barcodes_to_quantify)))
print_to_stderr(('{0:<14.12}'.format('Prefix') if analysis_prefix else '') + '{0:<14.12}{1:<9}'.format("Library", "Barcode"), False)
print_to_stderr("{0:<8s}{1:<8s}{2:<10s}".format("Reads", "Counts", "Ambigs"))
for barcode in barcodes_to_quantify:
self.quantify_expression_for_barcode(barcode,
counts_output_filename, metrics_output_filename,
ambig_counts_output_filename, ambig_partners_output_filename,
no_bam=no_bam, write_header=(not header_written) and (worker_index==0), analysis_prefix=analysis_prefix,
min_counts = min_counts, run_filter=run_filter)
header_written = True
print_to_stderr("Per barcode quantification completed.")
if no_bam:
return
#Gather list of barcodes with output from the metrics file
genomic_bams = []
with open(metrics_output_filename, 'r') as f:
for line in f:
bc = line.partition('\t')[0]
if bc == 'Barcode': #This is the line in the header
continue
genomic_bams.append(get_barcode_genomic_bam_filename(bc))
print_to_stderr("Merging BAM output.")
try:
subprocess.check_output([self.project.paths.samtools, 'merge', '-f', merged_bam_filename]+genomic_bams, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, err:
print_to_stderr(" CMD: %s" % str(err.cmd)[:400])
print_to_stderr(" stdout/stderr:")
print_to_stderr(err.output)
raise Exception(" === Error in samtools merge === ")
print_to_stderr("Indexing merged BAM output.")
try:
subprocess.check_output([self.project.paths.samtools, 'index', merged_bam_filename], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, err:
print_to_stderr(" CMD: %s" % str(err.cmd)[:400])
print_to_stderr(" stdout/stderr:")
print_to_stderr(err.output)
raise Exception(" === Error in samtools index === ")
print(genomic_bams)
for filename in genomic_bams:
os.remove(filename)
os.remove(filename + '.bai')
def quantify_expression_for_barcode(self, barcode, counts_output_filename, metrics_output_filename,
ambig_counts_output_filename, ambig_partners_output_filename,
min_counts=0, analysis_prefix='', no_bam=False, write_header=False, run_filter=[]):
print_to_stderr(('{0:<14.12}'.format(analysis_prefix) if analysis_prefix else '') + '{0:<14.12}{1:<9}'.format(self.name, barcode), False)
unaligned_reads_output = os.path.join(self.paths.quant_dir, '%s%s.unaligned.fastq' % (analysis_prefix,barcode))
aligned_bam = os.path.join(self.paths.quant_dir, '%s%s.aligned.bam' % (analysis_prefix,barcode))
# Bowtie command
bowtie_cmd = [self.project.paths.bowtie, self.project.paths.bowtie_index, '-q', '-',
'-p', '1', '-a', '--best', '--strata', '--chunkmbs', '1000', '--norc', '--sam',
'-shmem', #should sometimes reduce memory usage...?
'-m', str(self.project.parameters['bowtie_arguments']['m']),
'-n', str(self.project.parameters['bowtie_arguments']['n']),
'-l', str(self.project.parameters['bowtie_arguments']['l']),
'-e', str(self.project.parameters['bowtie_arguments']['e']),
]
if self.project.parameters['output_arguments']['output_unaligned_reads_to_other_fastq']:
bowtie_cmd += ['--un', unaligned_reads_output]
# Quantification command
script_dir = os.path.dirname(os.path.realpath(__file__))
quant_cmd = [self.project.paths.python, self.project.paths.quantify_umifm_from_alignments_py,
'-m', str(self.project.parameters['umi_quantification_arguments']['m']),
'-u', str(self.project.parameters['umi_quantification_arguments']['u']),
'-d', str(self.project.parameters['umi_quantification_arguments']['d']),
'--min_non_polyA', str(self.project.parameters['umi_quantification_arguments']['min_non_polyA']),
'--library', str(self.name),
'--barcode', str(barcode),
'--counts', counts_output_filename,
'--metrics', metrics_output_filename,
'--ambigs', ambig_counts_output_filename,
'--ambig-partners', ambig_partners_output_filename,
'--min-counts', str(min_counts),
]
if not no_bam:
quant_cmd += ['--bam', aligned_bam]
if write_header:
quant_cmd += ['--write-header']
if self.project.parameters['umi_quantification_arguments']['split-ambigs']:
quant_cmd.append('--split-ambig')
if self.project.parameters['output_arguments']['filter_alignments_to_softmasked_regions']:
quant_cmd += ['--soft-masked-regions', self.project.paths.bowtie_index + '.soft_masked_regions.pickle']
# Spawn processes
p1 = subprocess.Popen(bowtie_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2 = subprocess.Popen(quant_cmd, stdin=p1.stdout, stderr=subprocess.PIPE)
for line in self.get_reads_for_barcode(barcode, run_filter=run_filter):
p1.stdin.write(line)
p1.stdin.close()
if p1.wait() != 0:
print_to_stderr('\n')
print_to_stderr(p1.stderr.read())
raise Exception('\n === Error on bowtie ===')
if p2.wait() != 0:
print_to_stderr(p2.stderr.read())
raise Exception('\n === Error on Quantification Script ===')
print_to_stderr(p2.stderr.read(), False)
if no_bam:
# We are done here
return False
if not os.path.isfile(aligned_bam):
raise Exception("\n === No aligned bam was output for barcode %s ===" % barcode)
genomic_bam = os.path.join(self.paths.quant_dir, '%s%s.genomic.bam' % (analysis_prefix,barcode))
sorted_bam = os.path.join(self.paths.quant_dir, '%s%s.genomic.sorted.bam' % (analysis_prefix,barcode))
try:
subprocess.check_output([self.project.paths.rsem_tbam2gbam, self.project.paths.bowtie_index, aligned_bam, genomic_bam], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, err:
print_to_stderr(" CMD: %s" % str(err.cmd)[:100])
print_to_stderr(" stdout/stderr:")
print_to_stderr(err.output)
raise Exception(" === Error in rsem-tbam2gbam === ")
try:
subprocess.check_output([self.project.paths.samtools, 'sort', '-o', sorted_bam, genomic_bam], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, err:
print_to_stderr(" CMD: %s" % str(err.cmd)[:100])
print_to_stderr(" stdout/stderr:")
print_to_stderr(err.output)
raise Exception(" === Error in samtools sort === ")
try:
subprocess.check_output([self.project.paths.samtools, 'index', sorted_bam], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, err:
print_to_stderr(" CMD: %s" % str(err.cmd)[:100])
print_to_stderr(" stdout/stderr:")
print_to_stderr(err.output)
raise Exception(" === Error in samtools index === ")
os.remove(aligned_bam)
os.remove(genomic_bam)
return True
def aggregate_counts(self, analysis_prefix='', process_ambiguity_data=False):
if analysis_prefix:
analysis_prefix += '.'
quant_output_files = [fn[len(analysis_prefix):].split('.')[0] for fn in os.listdir(self.paths.quant_dir) if ('worker' in fn and fn[:len(analysis_prefix)]==analysis_prefix)]
worker_names = [w[6:] for w in quant_output_files]
worker_indices = set(int(w.split('_')[0]) for w in worker_names)
total_workers = set(int(w.split('_')[1]) for w in worker_names)
if len(total_workers) > 1:
raise Exception("""Quantification for library %s, prefix '%s' was run with different numbers of total_workers.""" % (self.name, analysis_prefix))
total_workers = list(total_workers)[0]
missing_workers = []
for i in range(total_workers):
if i not in worker_indices:
missing_workers.append(i)
if missing_workers:
missing_workers = ','.join([str(i) for i in sorted(missing_workers)])
raise Exception("""Output from workers %s (total %d) is missing. """ % (missing_workers, total_workers))
aggregated_counts_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.counts.tsv')
aggregated_quant_metrics_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.quant_metrics.tsv')
aggregated_ignored_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.ignored_barcodes.txt')
aggregated_bam_output = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.bam')
aggregated_ambig_counts_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.ambig_counts.tsv')
aggregated_ambig_partners_filename = os.path.join(self.project.project_dir, self.name, self.name+analysis_prefix+'.ambig_partners.tsv')
agg_counts = open(aggregated_counts_filename, mode='w')
agg_metrics = open(aggregated_quant_metrics_filename, mode='w')
agg_ignored = open(aggregated_ignored_filename, mode='w')
if process_ambiguity_data:
agg_ambigs = open(aggregated_ambig_counts_filename, mode='w')
agg_ambig_partners = open(aggregated_ambig_partners_filename, mode='w')
end_of_counts_header = 0
end_of_metrics_header = 0
end_of_ambigs_header = 0
print_to_stderr(' Concatenating output from all workers.')
for worker_index in range(total_workers):
counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.counts.tsv' % (analysis_prefix, worker_index, total_workers))
ambig_counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.counts.tsv' % (analysis_prefix, worker_index, total_workers))
ambig_partners_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.partners' % (analysis_prefix, worker_index, total_workers))
metrics_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.metrics.tsv' % (analysis_prefix, worker_index, total_workers))
ignored_for_output_filename = counts_output_filename+'.ignored'
# Counts
with open(counts_output_filename, 'r') as f:
shutil.copyfileobj(f, agg_counts)
# Metrics
with open(metrics_output_filename, 'r') as f:
shutil.copyfileobj(f, agg_metrics)
# Ignored
if os.path.isfile(counts_output_filename+'.ignored'):
with open(counts_output_filename+'.ignored', 'r') as f:
shutil.copyfileobj(f, agg_ignored)
if process_ambiguity_data:
with open(ambig_counts_output_filename, 'r') as f:
shutil.copyfileobj(f, agg_ambigs)
with open(ambig_partners_output_filename, 'r') as f:
shutil.copyfileobj(f, agg_ambig_partners)
print_to_stderr(' GZIPping concatenated output.')
agg_counts.close()
subprocess.Popen(['gzip', '-f', aggregated_counts_filename]).wait()
agg_metrics.close()
subprocess.Popen(['gzip', '-f', aggregated_quant_metrics_filename]).wait()
print_to_stderr('Aggregation completed in %s.gz' % aggregated_counts_filename)
if process_ambiguity_data:
agg_ambigs.close()
subprocess.Popen(['gzip', '-f', aggregated_ambig_counts_filename]).wait()
agg_ambig_partners.close()
subprocess.Popen(['gzip', '-f', aggregated_ambig_partners_filename]).wait()
target_bams = [os.path.join(self.paths.quant_dir, '%sworker%d_%d.bam'% (analysis_prefix, worker_index, total_workers)) for worker_index in range(total_workers)]
target_bams = [t for t in target_bams if os.path.isfile(t)]
if target_bams:
print_to_stderr(' Merging BAM files.')
p1 = subprocess.Popen([self.project.paths.samtools, 'merge', '-f', aggregated_bam_output]+target_bams, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if p1.wait() == 0:
print_to_stderr(' Indexing merged BAM file.')
p2 = subprocess.Popen([self.project.paths.samtools, 'index', aggregated_bam_output], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if p2.wait() == 0:
for filename in target_bams:
os.remove(filename)
os.remove(filename + '.bai')
else:
print_to_stderr(" === Error in samtools index ===")
print_to_stderr(p2.stderr.read())
else:
print_to_stderr(" === Error in samtools merge ===")
print_to_stderr(p1.stderr.read())
# print_to_stderr('Deleting per-worker counts files.')
# for worker_index in range(total_workers):
# counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.counts.tsv' % (analysis_prefix, worker_index, total_workers))
# os.remove(counts_output_filename)
# ambig_counts_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.counts.tsv' % (analysis_prefix, worker_index, total_workers))
# os.remove(ambig_counts_output_filename)
# ambig_partners_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.ambig.partners' % (analysis_prefix, worker_index, total_workers))
# os.remove(ambig_partners_output_filename)
# metrics_output_filename = os.path.join(self.paths.quant_dir, '%sworker%d_%d.metrics.tsv' % (analysis_prefix, worker_index, total_workers))
# os.remove(metrics_output_filename)
# ignored_for_output_filename = counts_output_filename+'.ignored'
# os.remove(ignored_for_output_filename)
class LibrarySequencingPart():
def __init__(self, filtered_fastq_filename=None, project=None, run_name='', library_name='', part_name=''):
self.project = project
self.run_name = run_name
self.part_name = part_name
self.library_name = library_name
self.filtered_fastq_filename = filtered_fastq_filename
self.barcode_counts_pickle_filename = filtered_fastq_filename + '.counts.pickle'
self.filtering_metrics_filename = '.'.join(filtered_fastq_filename.split('.')[:-1]) + 'metrics.yaml'
self.sorted_gzipped_fastq_filename = filtered_fastq_filename + '.sorted.fastq.gz'
self.sorted_gzipped_fastq_index_filename = filtered_fastq_filename + '.sorted.fastq.gz.index.pickle'
@property
def is_filtered(self):
if not hasattr(self, '_is_filtered'):
self._is_filtered = os.path.exists(self.filtered_fastq_filename) and os.path.exists(self.barcode_counts_pickle_filename)
return self._is_filtered
@property
def is_sorted(self):
if not hasattr(self, '_is_sorted'):
self._is_sorted = os.path.exists(self.sorted_gzipped_fastq_filename) and os.path.exists(self.sorted_gzipped_fastq_index_filename)
return self._is_sorted
@property
def part_barcode_counts(self):
if not hasattr(self, '_part_barcode_counts'):
with open(self.barcode_counts_pickle_filename, 'r') as f:
self._part_barcode_counts = pickle.load(f)
return self._part_barcode_counts
@property
def sorted_index(self):
if not hasattr(self, '_sorted_index'):
with open(self.sorted_gzipped_fastq_index_filename, 'r') as f:
self._sorted_index = pickle.load(f)
return self._sorted_index
def contains_library_in_query(self, query_libraries):
return self.library_name in query_libraries
def sort_reads_by_barcode(self, abundant_barcodes={}):
sorted_barcodes = [j for j,v in sorted(abundant_barcodes.items(), key=lambda i:-i[1][1])]
sorted_barcodes = [j for j in sorted_barcodes if j in self.part_barcode_counts]
barcode_buffers = {}
barcode_gzippers = {}
for bc in sorted_barcodes + ['ignored']:
barcode_buffers[bc] = BytesIO()
barcode_gzippers[bc] = gzip.GzipFile(fileobj=barcode_buffers[bc], mode='wb')
total_processed_reads = 0
total_ignored_reads = 0
bcs_with_data = set()
bcs_with_tmp_data = set()
barcode_tmp_filename = lambda bc: '%s.%s.tmp.gz' % (self.sorted_gzipped_fastq_filename, bc)
total_reads = sum(self.part_barcode_counts.values())
print_to_stderr('Sorting %d reads from %d barcodes above absolute minimum threshold.' % (total_reads, len(abundant_barcodes)))
with open(self.filtered_fastq_filename, 'r') as input_fastq:
for name, seq, qual in from_fastq(input_fastq):
total_processed_reads += 1
bc = name.split(':')[0]
if total_processed_reads%1000000 == 0:
print_to_stderr('Read in %.02f percent of all reads (%d)' % (100.*total_processed_reads/total_reads, total_processed_reads))
if bc in abundant_barcodes:
barcode_gzippers[bc].write(to_fastq(name, seq, qual))
bcs_with_data.add(bc)
else:
total_ignored_reads += 1
barcode_gzippers['ignored'].write(to_fastq(name, seq, qual))
bcs_with_data.add('ignored')
sorted_output_index = {}
with open(self.sorted_gzipped_fastq_filename, 'wb') as sorted_output:
for original_bc in sorted_barcodes + ['ignored']:
if original_bc != 'ignored':
new_bc_name = abundant_barcodes[original_bc][0]
barcode_reads_count = self.part_barcode_counts[original_bc]
else:
new_bc_name = 'ignored'
barcode_reads_count = total_ignored_reads
start_pos = sorted_output.tell()
barcode_gzippers[original_bc].close()
if original_bc in bcs_with_data:
barcode_buffers[original_bc].seek(0)
shutil.copyfileobj(barcode_buffers[original_bc], sorted_output)
barcode_buffers[original_bc].close()
end_pos = sorted_output.tell()
if end_pos > start_pos:
sorted_output_index[new_bc_name] = (original_bc, start_pos, end_pos, end_pos-start_pos, barcode_reads_count)
with open(self.sorted_gzipped_fastq_index_filename, 'w') as f:
pickle.dump(sorted_output_index, f)
def get_reads_for_barcode(self, barcode):
if barcode not in self.sorted_index:
raise StopIteration
original_barcode, start_byte_offset, end_byte_offset, byte_length, barcode_reads = self.sorted_index[barcode]
with open(self.sorted_gzipped_fastq_filename, 'rb') as sorted_output:
sorted_output.seek(start_byte_offset)
byte_buffer = BytesIO(sorted_output.read(byte_length))
ungzipper = gzip.GzipFile(fileobj=byte_buffer, mode='rb')
while True:
yield next(ungzipper)
@contextmanager
def trimmomatic_and_low_complexity_filter_process(self):
"""
We start 3 processes that are connected with Unix pipes.
Process 1 - Trimmomatic. Doesn't support stdin/stdout, so we instead use named pipes (FIFOs). It reads from FIFO1, and writes to FIFO2.
Process 2 - In line complexity filter, a python script. It reads from FIFO2 (Trimmomatic output) and writes to the ouput file.
Process 3 - Indexer that counts the number of reads for every barcode. This reads from stdin, writes the reads to stdout and writes the index as a pickle to stderr.
When these are done, we start another process to count the results on the FastQ file.
"""
filtered_dir = os.path.dirname(self.filtered_fastq_filename) #We will use the same directory for creating temporary FIFOs, assuming we have write access.
self.filtering_statistics_counter = defaultdict(int)
with FIFO(dir=filtered_dir) as fifo2, open(self.filtered_fastq_filename, 'w') as filtered_fastq_file, open(self.filtered_fastq_filename+'.counts.pickle', 'w') as filtered_index_file:
low_complexity_filter_cmd = [self.project.paths.python, self.project.paths.trim_polyA_and_filter_low_complexity_reads_py,
'-input', fifo2.filename,
'--min-post-trim-length', self.project.parameters['trimmomatic_arguments']['MINLEN'],
'--max-low-complexity-fraction', str(self.project.parameters['low_complexity_filter_arguments']['max_low_complexity_fraction']),
]
counter_cmd = [self.project.paths.python, self.project.paths.count_barcode_distribution_py]
p2 = subprocess.Popen(low_complexity_filter_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p3 = subprocess.Popen(counter_cmd, stdin=p2.stdout, stdout=filtered_fastq_file, stderr=filtered_index_file)
with FIFO(dir=filtered_dir) as fifo1:
trimmomatic_cmd = [self.project.paths.java, '-Xmx500m', '-jar', self.project.paths.trimmomatic_jar,
'SE', '-threads', "1", '-phred33', fifo1.filename, fifo2.filename]
for arg in self.project.parameters['trimmomatic_arguments']['argument_order']:
val = self.project.parameters['trimmomatic_arguments'][arg]
trimmomatic_cmd.append('%s:%s' % (arg, val))
p1 = subprocess.Popen(trimmomatic_cmd, stderr=subprocess.PIPE)
fifo1_filehandle = open(fifo1.filename, 'w')
yield fifo1_filehandle
fifo1_filehandle.close()
trimmomatic_stderr = p1.stderr.read().splitlines()
if trimmomatic_stderr[2] != 'TrimmomaticSE: Completed successfully':
raise Exception('Trimmomatic did not complete succesfully on %s' % filtered_filename)
trimmomatic_metrics = trimmomatic_stderr[1].split()
# ['Input', 'Reads:', #READS, 'Surviving:', #SURVIVING, (%SURVIVING), 'Dropped:', #DROPPED, (%DROPPED)]
trimmomatic_metrics = {'input' : trimmomatic_metrics[2], 'output': trimmomatic_metrics[4], 'dropped': trimmomatic_metrics[7]}
p1.wait()
complexity_filter_metrics = pickle.load(p2.stderr)
p2.wait()
p3.wait()
filtering_metrics = {
'read_structure' : dict(self.filtering_statistics_counter),
'trimmomatic' : trimmomatic_metrics,
'complexity_filter': complexity_filter_metrics,
}
with open(self.filtering_metrics_filename, 'w') as f:
yaml.dump(dict(filtering_metrics), f, default_flow_style=False)
class V1V2Filtering(LibrarySequencingPart):
def __init__(self, bioread_filename=None, metaread_filename=None, *args, **kwargs):
self.bioread_filename = bioread_filename
self.metaread_filename = metaread_filename
LibrarySequencingPart.__init__(self, *args, **kwargs)
def filter_and_count_reads(self):
"""
Input the two raw FastQ files
Output:
- A single fastQ file that uses the read name to store the barcoding information
- A pickle of the number of reads originating from each barcode
"""
# Relevant paths
r1_filename, r2_filename = self.metaread_filename, self.bioread_filename
#Get barcode neighborhoods
bc1s = self.project.gel_barcode1_revcomp_list_neighborhood
bc2s = self.project.gel_barcode2_revcomp_list_neighborhood
# This starts a Trimmomatic process, a low complexity filter process, and will
# upon closing, start the barcode distribution counting process.
last_ping = time.time()
ping_every_n_reads = 1000000
ping_header = "{0:>12}{1:>16}{2:>12}{3:>10}{4:>10}{5:>10}{6:>10}{7:>10}{8:>10}{9:>10}"
ping_header = ping_header.format("Total Reads", "", "Valid Reads", "W1 in R2", "Empty", "No W1", "No polyT", "No BC1", "No BC2", "No UMI")
ping_template = "{total:12d} {rate:5.1f} sec/M {Valid:12.1%}{W1_in_R2:10.1%}{empty_read:10.1%}{No_W1:10.1%}{No_polyT:10.1%}{BC1:10.1%}{BC2:10.1%}{Umi_error:10.1%}"
def print_ping_to_log(last_ping):
sec_per_mil = (time.time()-last_ping)/(ping_every_n_reads/10**6) if last_ping else 0.0
total = self.filtering_statistics_counter['Total']
if total > 0:
ping_format_data = {k: float(self.filtering_statistics_counter[k])/total for k in ['Valid', 'W1_in_R2', 'empty_read', 'No_W1', 'No_polyT', 'BC1', 'BC2', 'Umi_error']}
print_to_stderr(ping_template.format(total=total, rate=sec_per_mil, **ping_format_data))
with self.trimmomatic_and_low_complexity_filter_process() as trim_process:
#Iterate over the weaved reads
for r_name, r1_seq, r1_qual, r2_seq, r2_qual in self._weave_fastqs(r1_filename, r2_filename):
# Check if they should be kept
keep, result = self._process_reads(r1_seq, r2_seq, valid_bc1s=bc1s, valid_bc2s=bc2s)
# Write the the reads worth keeping
if keep:
bc, umi = result
trim_process.write(to_fastq_lines(bc, umi, r2_seq, r2_qual, r_name))
self.filtering_statistics_counter['Valid'] += 1
else:
self.filtering_statistics_counter[result] += 1
# Track speed per M reads
self.filtering_statistics_counter['Total'] += 1
if self.filtering_statistics_counter['Total']%(10*ping_every_n_reads) == 1:
print_to_stderr(ping_header)
if self.filtering_statistics_counter['Total']%ping_every_n_reads == 0:
print_ping_to_log(last_ping)
last_ping = time.time()
print_ping_to_log(False)
print_to_stderr(self.filtering_statistics_counter)
def _weave_fastqs(self, r1_fastq, r2_fastq):
"""
Merge 2 FastQ files by returning paired reads for each.
Returns only R1_seq, R2_seq and R2_qual.
"""
is_gz_compressed = False
is_bz_compressed = False
if r1_fastq.split('.')[-1] == 'gz' and r2_fastq.split('.')[-1] == 'gz':
is_gz_compressed = True
#Added bz2 support VS
if r1_fastq.split('.')[-1] == 'bz2' and r2_fastq.split('.')[-1] == 'bz2':
is_bz_compressed = True
# Decompress Gzips using subprocesses because python gzip is incredibly slow.
if is_gz_compressed:
r1_gunzip = subprocess.Popen("gzip --stdout -d %s" % (r1_fastq), shell=True, stdout=subprocess.PIPE)
r1_stream = r1_gunzip.stdout
r2_gunzip = subprocess.Popen("gzip --stdout -d %s" % (r2_fastq), shell=True, stdout=subprocess.PIPE)
r2_stream = r2_gunzip.stdout
elif is_bz_compressed:
r1_bunzip = subprocess.Popen("bzcat %s" % (r1_fastq), shell=True, stdout=subprocess.PIPE)
r1_stream = r1_bunzip.stdout
r2_bunzip = subprocess.Popen("bzcat %s" % (r2_fastq), shell=True, stdout=subprocess.PIPE)
r2_stream = r2_bunzip.stdout
else:
r1_stream = open(r1_fastq, 'r')
r2_stream = open(r2_fastq, 'r')
while True:
#Read 4 lines from each FastQ
name = next(r1_stream).rstrip()[1:].split()[0] #Read name
r1_seq = next(r1_stream).rstrip() #Read seq
next(r1_stream) #+ line
r1_qual = next(r1_stream).rstrip() #Read qual
next(r2_stream) #Read name
r2_seq = next(r2_stream).rstrip() #Read seq
next(r2_stream) #+ line
r2_qual = next(r2_stream).rstrip() #Read qual
# changed to allow for empty reads (caused by adapter trimming)
if name:
yield name, r1_seq, r1_qual, r2_seq, r2_qual
else:
# if not r1_seq or not r2_seq:
break
r1_stream.close()
r2_stream.close()
def _process_reads(self, name, read, valid_bc1s={}, valid_bc2s={}):
"""
Returns either:
True, (barcode, umi)
(if read passes filter)
False, name of filter that failed
(for stats collection)
R1 anatomy: BBBBBBBB[BBB]WWWWWWWWWWWWWWWWWWWWWWCCCCCCCCUUUUUUTTTTTTTTTT______________
B = Barcode1, can be 8, 9, 10 or 11 bases long.
W = 'W1' sequence, specified below
C = Barcode2, always 8 bases
U = UMI, always 6 bases
T = Beginning of polyT tail.
_ = Either sequencing survives across the polyT tail, or signal starts dropping off
(and start being anything, likely with poor quality)
"""
minimal_polyT_len_on_R1 = 7
hamming_threshold_for_W1_matching = 3
w1 = "GAGTGATTGCTTGTGACGCCTT"
rev_w1 = "AAGGCGTCACAAGCAATCACTC" #Hard-code so we don't recompute on every one of millions of calls
# If R2 contains rev_W1, this is almost certainly empty library
if rev_w1 in read:
return False, 'W1_in_R2'
# # With reads sufficiently long, we will often see a PolyA sequence in R2.
# if polyA in read:
# return False, 'PolyA_in_R2'
# Check for polyT signal at 3' end.
# 44 is the length of BC1+W1+BC2+UMI, given the longest PolyT
#BC1: 8-11 bases
#W1 : 22 bases
#BC2: 8 bases
#UMI: 6 bases
# check for empty reads (due to adapter trimming)
if not read:
return False, 'empty_read'
#Check for W1 adapter
#Allow for up to hamming_threshold errors
if w1 in name:
w1_pos = name.find(w1)
if not 7 < w1_pos < 12:
return False, 'No_W1'
else:
#Try to find W1 adapter at start positions 8-11
#by checking hamming distance to W1.
for w1_pos in range(8, 12):
if string_hamming_distance(w1, name[w1_pos:w1_pos+22]) <= hamming_threshold_for_W1_matching:
break
else:
return False, 'No_W1'
bc2_pos=w1_pos+22
umi_pos=bc2_pos+8
polyTpos=umi_pos+6
expected_poly_t = name[polyTpos:polyTpos+minimal_polyT_len_on_R1]
if string_hamming_distance(expected_poly_t, 'T'*minimal_polyT_len_on_R1) > 3:
return False, 'No_polyT'
bc1 = str(name[:w1_pos])
bc2 = str(name[bc2_pos:umi_pos])
umi = str(name[umi_pos:umi_pos+6])
#Validate barcode (and try to correct when there is no ambiguity)
if valid_bc1s and valid_bc2s:
# Check if BC1 and BC2 can be mapped to expected barcodes
if bc1 in valid_bc1s:
# BC1 might be a neighboring BC, rather than a valid BC itself.
bc1 = valid_bc1s[bc1]
else:
return False, 'BC1'
if bc2 in valid_bc2s:
bc2 = valid_bc2s[bc2]
else:
return False, 'BC2'
if 'N' in umi:
return False, 'UMI_error'
bc = '%s-%s'%(bc1, bc2)
return True, (bc, umi)
class V3Demultiplexer():
def __init__(self, library_indices, project=None, part_filename="", input_filename="", run_name="", part_name=""):
self.input_filename = input_filename
self.project = project
self.run_name = run_name
self.part_name = part_name
self.libraries = {}
for lib in library_indices:
lib_index = lib['library_index']
lib_name = lib['library_name']
library_part_filename = part_filename.format(library_name=lib_name, library_index=lib_index)
self.libraries[lib_index] = LibrarySequencingPart(filtered_fastq_filename=library_part_filename, project=project, run_name=run_name, library_name=lib_name, part_name=part_name)
def _weave_fastqs(self, fastqs):
last_extension = [fn.split('.')[-1] for fn in fastqs]
if all(ext == 'gz' for ext in last_extension):
processes = [subprocess.Popen("gzip --stdout -d %s" % (fn), shell=True, stdout=subprocess.PIPE) for fn in fastqs]
streams = [r.stdout for r in processes]
elif all(ext == 'bz2' for ext in last_extension):
processes = [subprocess.Popen("bzcat %s" % (fn), shell=True, stdout=subprocess.PIPE) for fn in fastqs]
streams = [r.stdout for r in processes]
elif all(ext == 'fastq' for ext in last_extension):
streams = [open(fn, 'r') for fn in fastqs]
else:
raise("ERROR: Different files are compressed differently. Check input.")
while True:
names = [next(s)[:-1].split()[0] for s in streams]
seqs = [next(s)[:-1] for s in streams]
blanks = [next(s)[:-1] for s in streams]
quals = [next(s)[:-1] for s in streams]
assert all(name==names[0] for name in names)
yield names[0], seqs, quals
for s in streams:
s.close()
def _process_reads(self, name, seqs, quals, valid_bc1s={}, valid_bc2s={}, valid_libs={}):
"""
Returns either:
True, (barcode, umi)
(if read passes filter)
False, name of filter that failed
(for stats collection)
"""
r1, r2, r3, r4 = seqs
if r3 in valid_libs:
lib_index = valid_libs[r3]
else:
return False, r3, 'Invalid_library_index'
if r2 in valid_bc1s:
bc1 = valid_bc1s[r2]
else:
return False, lib_index, 'Invalid_BC1'
orig_bc2 = r4[:8]
umi = r4[8:8+6]
polyA = r4[8+6:]
if orig_bc2 in valid_bc2s:
bc2 = valid_bc2s[orig_bc2]
else:
return False, lib_index, 'Invalid_BC2'
if 'N' in umi:
return False, lib_index, 'UMI_contains_N'
final_bc = '%s-%s' % (bc1, bc2)
return True, lib_index, (final_bc, umi)
def filter_and_count_reads(self):
# Prepare error corrected index sets
self.sequence_to_index_mapping = {}
libs = self.libraries.keys()
self.sequence_to_index_mapping = dict(zip(libs, libs))
index_neighborhoods = [set(seq_neighborhood(lib, 1)) for lib in libs]
for lib, clibs in zip(libs, index_neighborhoods):
# Quick check that error-correction maps to a single index
for clib in clibs:
if sum(clib in hood for hood in index_neighborhoods)==1:
self.sequence_to_index_mapping[clib] = lib
# Prepare error corrected barcode sets
error_corrected_barcodes = self.project.gel_barcode2_list_neighborhood
error_corrected_rev_compl_barcodes = self.project.gel_barcode2_revcomp_list_neighborhood
# Open up our context managers
manager_order = [] #It's imperative to exit managers the opposite order than we open them!
trim_processes = {}
trim_processes_managers = {}
for lib in self.libraries.keys():
manager_order.append(lib)
trim_processes_managers[lib] = self.libraries[lib].trimmomatic_and_low_complexity_filter_process()
trim_processes[lib] = trim_processes_managers[lib].__enter__()
overall_filtering_statistics = defaultdict(int)
# Paths for the 4 expected FastQs
input_fastqs = []
for r in ['R1', 'R2', 'R3', 'R4']:
input_fastqs.append(self.input_filename.format(read=r))
last_ping = time.time()
ping_every_n_reads = 1000000
ping_header = "{0:>12}{1:>16}{2:>12}{3:>10}{4:>10}{5:>10}{6:>10} |" + ''.join("{%d:>12.10}"%i for i in range(7,7+len(manager_order)))
ping_header = ping_header.format("Total Reads", "", "Valid Reads", "No index", "No BC1", "No BC2", "No UMI", *[self.libraries[k].library_name for k in manager_order])
ping_template = "{total:12d} {rate:5.1f} sec/M {Valid:12.1%}{Invalid_library_index:10.1%}{Invalid_BC1:10.1%}{Invalid_BC2:10.1%}{UMI_contains_N:10.1%} |{"+":>12.1%}{".join(manager_order)+":>12.1%}"
def print_ping_to_log(last_ping):
sec_per_mil = (time.time() - last_ping)/(float(ping_every_n_reads)/10**6) if last_ping else 0
total = overall_filtering_statistics['Total']
ping_format_data = {k: float(overall_filtering_statistics[k])/total for k in ['Valid', 'Invalid_library_index', 'Invalid_BC1', 'Invalid_BC2', 'UMI_contains_N']}
if overall_filtering_statistics['Valid'] > 0:
ping_format_data.update({k: float(self.libraries[k].filtering_statistics_counter['Valid'])/overall_filtering_statistics['Valid'] for k in manager_order})
print_to_stderr(ping_template.format(total=total, rate=sec_per_mil, **ping_format_data))
common__ = defaultdict(int)
print_to_stderr('Filtering %s, file %s' % (self.run_name, self.input_filename))
for r_name, seqs, quals in self._weave_fastqs(input_fastqs):
# Python 3 compatibility in mind!
seqs = [s.decode('utf-8') for s in seqs]
keep, lib_index, result = self._process_reads(r_name, seqs, quals,
error_corrected_barcodes, error_corrected_rev_compl_barcodes,
self.sequence_to_index_mapping)
common__[seqs[1]] += 1
if keep:
bc, umi = result
bio_read = seqs[0]
bio_qual = quals[0]
trim_processes[lib_index].write(to_fastq_lines(bc, umi, bio_read, bio_qual, r_name[1:]))
self.libraries[lib_index].filtering_statistics_counter['Valid'] += 1
self.libraries[lib_index].filtering_statistics_counter['Total'] += 1
overall_filtering_statistics['Valid'] += 1
else:
if result != 'Invalid_library_index':
self.libraries[lib_index].filtering_statistics_counter[result] += 1
self.libraries[lib_index].filtering_statistics_counter['Total'] += 1
overall_filtering_statistics[result] += 1
# Track speed per M reads
overall_filtering_statistics['Total'] += 1
if overall_filtering_statistics['Total']%(ping_every_n_reads*10)==1:
print_to_stderr(ping_header)
if overall_filtering_statistics['Total']%ping_every_n_reads == 0:
print_ping_to_log(last_ping)
last_ping = time.time()
print_ping_to_log(False)
# Close up the context managers
for lib in manager_order[::-1]:
trim_processes_managers[lib].__exit__(None, None, None)
def contains_library_in_query(self, query_libraries):
for lib in self.libraries.values():
if lib.contains_library_in_query(query_libraries):
return True
return False
if __name__=="__main__":
import sys, argparse
parser = argparse.ArgumentParser()
parser.add_argument('project', type=argparse.FileType('r'), help='Project YAML File.')
parser.add_argument('-l', '--libraries', type=str, help='[all] Library name(s) to work on. If blank, will iterate over all libraries in project.', nargs='?', default='')
parser.add_argument('-r', '--runs', type=str, help='[all] Run name(s) to work on. If blank, will iterate over all runs in project.', nargs='?', default='')
parser.add_argument('command', type=str, choices=['info', 'filter', 'identify_abundant_barcodes', 'sort', 'quantify', 'aggregate', 'build_index', 'get_reads'])
parser.add_argument('--total-workers', type=int, help='[all] Total workers that are working together. This takes precedence over barcodes-per-worker.', default=1)
parser.add_argument('--worker-index', type=int, help='[all] Index of current worker (the first worker should have index 0).', default=0)
parser.add_argument('--min-reads', type=int, help='[quantify] Minimun number of reads for barcode to be processed', nargs='?', default=750)
parser.add_argument('--min-counts', type=int, help='[aggregate] Minimun number of UMIFM counts for barcode to be aggregated', nargs='?', default=0)
parser.add_argument('--analysis-prefix', type=str, help='[quantify/aggregate/convert_bam/merge_bam] Prefix for analysis files.', nargs='?', default='')
parser.add_argument('--no-bam', help='[quantify] Do not output alignments to bam file.', action='store_true')
parser.add_argument('--genome-fasta-gz', help='[build_index] Path to gzipped soft-masked genomic FASTA file.')
parser.add_argument('--ensembl-gtf-gz', help='[build_index] Path to gzipped ENSEMBL GTF file. ')
parser.add_argument('--override-yaml', help="[all] Dictionnary to update project YAML with.. [You don't need this.]", nargs='?', default='')
args = parser.parse_args()
project = IndropsProject(args.project)
if args.override_yaml:
override = eval(args.override_yaml)
if 'paths' in override:
project.yaml['paths'].update(override['paths'])
if 'parameters' in override:
for k,v in override['parameters'].items():
project.yaml['parameters'][k].update(v)
if hasattr(project, '_paths'):
del project._paths
if hasattr(project, '_parameters'):
del project._parameters
target_libraries = []
if args.libraries:
for lib in args.libraries.split(','):
assert lib in project.libraries
if lib not in target_libraries:
target_libraries.append(lib)
else:
target_libraries = project.libraries.keys()
lib_query = set(target_libraries)
target_runs = []
if args.runs:
for run in args.runs.split(','):
assert run in project.runs
target_runs.append(run)
else:
target_runs = project.runs.keys()
target_library_parts = []
for lib in target_libraries:
for pi, part in enumerate(project.libraries[lib].parts):
if part.run_name in target_runs:
target_library_parts.append((lib, pi))
if args.command == 'info':
print_to_stderr('Project Name: ' + project.name)
target_run_parts = []
for run in target_runs:
target_run_parts += [part for part in project.runs[run] if part.contains_library_in_query(lib_query)]
print_to_stderr('Total library parts in search query: ' + str(len(target_run_parts)))
elif args.command == 'filter':
target_run_parts = []
for run in target_runs:
target_run_parts += [part for part in project.runs[run] if part.contains_library_in_query(lib_query)]
for part in worker_filter(target_run_parts, args.worker_index, args.total_workers):
print_to_stderr('Filtering run "%s", part "%s"' % (part.run_name, part.part_name))
part.filter_and_count_reads()
elif args.command == 'identify_abundant_barcodes':
for library in worker_filter(target_libraries, args.worker_index, args.total_workers):
project.libraries[library].identify_abundant_barcodes()
elif args.command == 'sort':
for library, part_index in worker_filter(target_library_parts, args.worker_index, args.total_workers):
print_to_stderr('Sorting %s, part "%s"' % (library, project.libraries[library].parts[part_index].filtered_fastq_filename))
project.libraries[library].sort_reads_by_barcode(index=part_index)
elif args.command == 'quantify':
for library in target_libraries:
project.libraries[library].quantify_expression(worker_index=args.worker_index, total_workers=args.total_workers,
min_reads=args.min_reads, min_counts=args.min_counts,
analysis_prefix=args.analysis_prefix,
no_bam=args.no_bam, run_filter=target_runs)
for part in project.libraries[library].parts:
if hasattr(part, '_sorted_index'):
del part._sorted_index
elif args.command == 'aggregate':
for library in worker_filter(target_libraries, args.worker_index, args.total_workers):
project.libraries[library].aggregate_counts(analysis_prefix=args.analysis_prefix)
elif args.command == 'build_index':
project.build_transcriptome(args.genome_fasta_gz, args.ensembl_gtf_gz)
elif args.command == 'get_reads':
for library in target_libraries:
sorted_barcode_names = project.libraries[library].sorted_barcode_names(min_reads=args.min_reads)
for bc in sorted_barcode_names:
for line in project.libraries[library].get_reads_for_barcode(bc, run_filter=target_runs):
sys.stdout.write(line)
for part in project.libraries[library].parts:
if hasattr(part, '_sorted_index'):
del part._sorted_index
|
{"/count_barcode_distribution.py": ["/indrops.py"], "/trim_polyA_and_filter_low_complexity_reads.py": ["/indrops.py"]}
|
10,853
|
brianjohnhaas/indrops
|
refs/heads/master
|
/annotate_mouse_transcriptome.py
|
import re
in_genes="Mus_musculus.GRCm38.84.with_tid.gtf"
out_genes="Mus_musculus.GRCm38.84.annotated.gtf"
accepted_gene_biotypes_for_NA_transcripts = set(["IG_V_gene","IG_J_gene","protein_coding","TR_J_gene","TR_D_gene","TR_V_gene","IG_C_gene","IG_D_gene","TR_C_gene"])
with open(in_genes, 'r') as in_f, open(out_genes, 'w') as out_f:
for line in in_f:
chr_name = line.rstrip().split('\t')[0]
# Check the transcript_support level
# This should be faster than a regex
# We need to support the case where see these two types of annotations:
# transcript_support_level "1"
# transcript_support_level "1 (assigned to previous version X)"
# transcript_support_level "2" <- Clear example of a gene like this is NKX6.1
# transcript_support_level "2 (assigned to previous version X)"
line_valid_for_output = False
if 'transcript_support_level "1"' in line or 'transcript_support_level "1 ' in line or 'transcript_support_level "2"' in line or 'transcript_support_level "2 ' in line:
line_valid_for_output = True
elif 'transcript_support_level "NA' in line:
# Transcript Support Level Not Analysed. Pseudogenes, single exon transcripts, HLA, T-cell receptor and Ig transcripts are not analysed and therefore not given any of the TSL categories.
# Keep only a few ones annotated as "IG_V_gene","IG_J_gene","protein_coding","TR_J_gene","TR_D_gene","TR_V_gene","IG_C_gene","IG_D_gene","TR_C_gene"
gene_biotype = re.search(r'gene_biotype \"(.*?)\";', line)
if gene_biotype and gene_biotype.group(1) in accepted_gene_biotypes_for_NA_transcripts:
line_valid_for_output = True
if line_valid_for_output:
gene_name = re.search(r'gene_name \"(.*?)\";', line)
if gene_name:
gene_name = gene_name.group(1)
out_line = re.sub(r'(?<=transcript_id ")(.*?)(?=";)', r'\1|'+gene_name, line)
out_f.write(out_line)
|
{"/count_barcode_distribution.py": ["/indrops.py"], "/trim_polyA_and_filter_low_complexity_reads.py": ["/indrops.py"]}
|
10,854
|
brianjohnhaas/indrops
|
refs/heads/master
|
/trim_polyA_and_filter_low_complexity_reads.py
|
import re
try:
import cPickle as pickle
except:
import pickle
from indrops import from_fastq, to_fastq
def low_complexity_filter(args):
total_reads = 0
kept_reads = 0
rejected_because_complexity_too_low = 0
rejected_because_too_short = 0
keep_polyA_length = 4
single_base_runs_regex = '|'.join(['%s{%d,}'%(b, keep_polyA_length+1) for b in 'ATCG'])
for name, seq, qual in from_fastq(args.input):
total_reads += 1
keep_read = True
#Identify length of polyA tail.
polyA_length = 0
for s in seq[::-1]:
if s!='A':
break
polyA_length += 1
read_length = len(seq)
trim_at_position = read_length - min(polyA_length + keep_polyA_length, 0)
if trim_at_position < args.min_post_trim_length:
keep_read = False
rejected_because_too_short += 1
else:
new_seq = seq[:trim_at_position]
new_qual = qual[:trim_at_position]
low_complexity_bases = sum([m.end()-m.start() for m in re.finditer(single_base_runs_regex, new_seq)])
low_complexity_fraction = float(low_complexity_bases)/len(new_seq)
if low_complexity_fraction > args.max_low_complexity_fraction:
keep_read = False
rejected_because_complexity_too_low += 1
if keep_read:
output_lines = to_fastq(name, new_seq, new_qual)
args.output.write(output_lines)
kept_reads += 1
elif args.rejected:
args.rejected.write(to_fastq(name, seq, qual))
if args.metrics:
pickle.dump({'input': total_reads, 'output': kept_reads, 'rejected_because_complexity_too_low': rejected_because_complexity_too_low, 'rejected_because_too_short': rejected_because_too_short}, args.metrics)
sys.stderr.write('Kept %d out of %d.\n' % (kept_reads, total_reads))
if __name__=="__main__":
import sys, argparse
parser = argparse.ArgumentParser()
parser.add_argument('-input', type=argparse.FileType('r'), nargs='?', default=sys.stdin)
parser.add_argument('-output', type=argparse.FileType('w'), nargs='?', default=sys.stdout)
parser.add_argument('-rejected', type=argparse.FileType('w'), nargs='?', default=False)
parser.add_argument('-metrics', type=argparse.FileType('w'), nargs='?', default=sys.stderr)
parser.add_argument('--max-low-complexity-fraction', type=float, nargs='?', default=1.0)
parser.add_argument('--min-post-trim-length', type=int, nargs='?', default=20)
args = parser.parse_args()
low_complexity_filter(args)
|
{"/count_barcode_distribution.py": ["/indrops.py"], "/trim_polyA_and_filter_low_complexity_reads.py": ["/indrops.py"]}
|
10,870
|
dremdem/test_field
|
refs/heads/master
|
/field/models.py
|
from django.db import models
from django.db.models.expressions import RawSQL
# Create your models here.
class DManagerWithCount(models.Manager):
def get_queryset(self):
return super().get_queryset().annotate(division_amount=RawSQL("""
select count(*) from field_department fd, field_employee fe
where fd.id = fe.department_id and fd.division_id = field_division.id
""", []))
class Division(models.Model):
name = models.CharField(max_length=200)
no = models.CharField(max_length=5)
object = models.Manager()
obj_with_count = DManagerWithCount()
class Department(models.Model):
name = models.CharField(max_length=200)
no = models.CharField(max_length=5)
division = models.ForeignKey(Division, on_delete=models.CASCADE, related_name='departments')
class Employee(models.Model):
name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
age = models.IntegerField()
department = models.ForeignKey(Department, on_delete=models.CASCADE, related_name='employees')
|
{"/field/views.py": ["/field/models.py"]}
|
10,871
|
dremdem/test_field
|
refs/heads/master
|
/field/views.py
|
from django.shortcuts import render
from .models import Division
# Create your views here.
def index(request):
d = Division.obj_with_count.all()
conext = {'division': d}
return render(request, 'field/index.html', context=conext)
|
{"/field/views.py": ["/field/models.py"]}
|
10,876
|
TheZorcerer/path.py
|
refs/heads/main
|
/data.py
|
import json
class Data(object):
def __init__(self):
with open("data/gunsmith-weapons.json") as f1:
with open("data/gunsmith-attachments.json") as f2:
self.weapons_data = json.load(f1)
self.attachments_data = json.load(f2)
f1.close()
f2.close()
self.weapons = list()
for weapon in self.weapons_data:
self.weapons.append((weapon['name'],weapon['id']))
self.path_data = dict()
for n in range(len(self.weapons_data)):
self.path_data[self.weapons[n][1]] = self.weapons_data[n]['path']
def search(self,name):
for weapon in self.weapons:
if(weapon[0].lower() == name.lower() or weapon[1].lower() == name.lower()):
return weapon[0],self.path_data[weapon[1]]
return None
|
{"/command_handler.py": ["/data.py", "/bot_utils.py"], "/path.py": ["/command_handler.py", "/bot_utils.py"]}
|
10,877
|
TheZorcerer/path.py
|
refs/heads/main
|
/init_bot.py
|
f = open("token.txt","w")
f.write(input("your token? "))
f.close()
f2 = open("guild_prefs.json","w")
f2.write("{}")
f2.close()
|
{"/command_handler.py": ["/data.py", "/bot_utils.py"], "/path.py": ["/command_handler.py", "/bot_utils.py"]}
|
10,878
|
TheZorcerer/path.py
|
refs/heads/main
|
/command_handler.py
|
import discord
import data
import bot_utils
class handler():
def __init__(self,client):
self.client = client
self.gun_data = data.Data()
def handle(self,message):
prefix = self.client.guild_preferences[str(message.guild.id)]["prefix"]
if(message.content[:len(prefix)] == prefix):
return self.handler(message,prefix)
def handler(self,message,prefix):
data = message.content.split(" ")
module = data[0][len(prefix):7].lower()
if(module == "server"):
return self.server(message)
elif(module == "gun"):
return self.gun(message)
return None
def server(self,message):
data = message.content.split(" ")
if(str(message.author.id) == "358869991459782666"):
if(data[1] == "prefix"):
self.client.guild_preferences[str(message.guild.id)]["prefix"] = data[2]
return "I have changed the prefix to " + data[2]
elif(data[1] == "allow"):
self.client.guild_preferences[str(message.guild.id)]["allowed_channels"].append(str(message.channel.id))
return "added channel with id" + str(message.channel.id)
else:
return "You aint tazer"
def gun(self,message):
if(str(message.channel.id) in self.client.guild_preferences[str(message.guild.id)]["allowed_channels"]):
data = message.content.split(" ")
if(data[1].lower() == "help"):
try:
if(data[2]):
return bot_utils.weapon_help(self.gun_data.weapons,data[2])
except IndexError:
pass
return bot_utils.weapon_help(self.gun_data.weapons,"1")
weapon_data = self.gun_data.search(data[1])
name,weapon_data = weapon_data[0],weapon_data[1]
if(weapon_data == None):
return None
else:
try:
firerate = str(weapon_data['fr']) + " rpm"
except KeyError:
firerate = "Not Available"
try:
reld = str(weapon_data["reload"]) + "s"
except KeyError:
reld = "Not Available"
try:
ads = str(weapon_data["ads"]) + " ms"
except KeyError:
ads = "Not Available"
mag = weapon_data["mag"]
max_ammo = weapon_data["maxAmmo"]
embed = bot_utils.build_gun_embed(name,mag,max_ammo,reld,firerate,ads)
return embed
else:
return "Bot is not allowed in this channel"
def gulag(self,message):
pass
|
{"/command_handler.py": ["/data.py", "/bot_utils.py"], "/path.py": ["/command_handler.py", "/bot_utils.py"]}
|
10,879
|
TheZorcerer/path.py
|
refs/heads/main
|
/bot_utils.py
|
import discord
import json
def check_guilds(client):
f = open("guild_prefs.json")
guild_preferences = json.load(f)
for guild in client.guilds:
if(str(guild.id) not in list(guild_preferences.keys())):
print("new guild with ID",guild.id)
guild_preferences[guild.id] = {"allowed_channels":[],"on":True,"prefix":"="}
client.guild_preferences = guild_preferences
f.close()
f = open("guild_prefs.json","w+")
json.dump(guild_preferences,f)
f.close()
return guild_preferences
def save_preferences(client):
with open("guild_prefs.json","w") as f:
json.dump(client.guild_preferences,f)
f.close()
print("saved it!")
def build_gun_embed(name,mag,maxammo,reld,firerate,ads):
embed = discord.Embed(title=name, description="Stats on the "+name+" courtesy path.exe and PatchyTheDog.", color=0x00ff00)
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/781241634822029312/781799759576170516/gath.png")
embed.add_field(name = "Magazine Capacity",value = mag,inline=False)
embed.add_field(name = "Reserve Ammo",value = maxammo,inline=False)
embed.add_field(name = "Reload Time",value = reld,inline=False)
embed.add_field(name = "Firerate",value = firerate,inline=False)
embed.add_field(name = "ADS Time",value = ads,inline=False)
embed.set_footer(text = "Made by TaZeR/zahran#5909")
return embed
def weapon_help(weapons,page):
try:
if(int(page)*10 > len(weapons)):
return "Not a valid page"
else:
page = int(page)
except ValueError:
return "Not a valid page"
embed = discord.Embed(title="All Weapons",description="The list of all weapons and the associated id's. You can use the id to search for them by =gun <id>",color=0x00ff00)
for n in range((page-1)*10,min((page)*10+1,len(weapons)+1)):
embed.add_field(name = str(n+1)+". "+ weapons[n][0], value = "ID: "+weapons[n][1],inline=False)
embed.set_footer(text = "Use =gun help <page> for the next set of weapons")
return embed
|
{"/command_handler.py": ["/data.py", "/bot_utils.py"], "/path.py": ["/command_handler.py", "/bot_utils.py"]}
|
10,880
|
TheZorcerer/path.py
|
refs/heads/main
|
/path.py
|
import json
import discord
import command_handler
import bot_utils
import signal
import sys
client = discord.Client()
handler = command_handler.handler(client)
def on_exit(signal, frame):
bot_utils.save_preferences(client)
print("closing!")
sys.exit(0)
@client.event
async def on_ready():
print("logged in as"+str(client))
await client.change_presence(activity=discord.Game(name='=gun help'))
guild_preferences = bot_utils.check_guilds(client)
client.guild_preferences = guild_preferences
@client.event
async def on_message(message):
reply = handler.handle(message)
if(reply != None):
if(type(reply) == type("never gonna give you up")):
await message.channel.send(reply)
else:
await message.channel.send(embed=reply)
with open("token.txt") as token:
signal.signal(signal.SIGINT, on_exit)
client.run(token.read())
|
{"/command_handler.py": ["/data.py", "/bot_utils.py"], "/path.py": ["/command_handler.py", "/bot_utils.py"]}
|
10,881
|
callidus/playbot
|
refs/heads/master
|
/playbot/plugins/CvH/__init__.py
|
import App # noqa
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
10,882
|
callidus/playbot
|
refs/heads/master
|
/playbot/main.py
|
from __future__ import absolute_import
from playbot import bot
from playbot.plugins import card
from playbot.plugins import control
from playbot.plugins import CvH
from playbot.plugins import dice
from playbot.plugins import fortune
from playbot.plugins import say
from playbot.plugins import link_peek
import logging
name = "PlayBot"
server = "irc.afternet.org"
chans = ["""#testroom""", ]
port = 6697
def setup_logging():
logging.basicConfig(level=logging.INFO)
def main():
setup_logging()
b = bot.PlayBot(chans, name, None, server, port)
b.register_command("disconnect", control.Disconnect())
b.register_command("die", control.Die())
cvh = CvH.App.CvH()
cvh.setup('./cvh.db')
b.register_command("cvh", cvh)
ftn = fortune.fortune.Fortune('./fortune.db')
b.register_command('fortune', ftn)
why = fortune.fortune.Fortune("./bofh.db")
b.register_command('why', why)
roll = dice.Dice()
b.register_command('roll', roll)
sayer = say.Say()
b.register_command('say', sayer)
#cardGame = card.Card(b)
#b.register_command('card', cardGame)
b.register_listner(link_peek.peek)
b.start()
if __name__ == "__main__":
main()
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
10,883
|
callidus/playbot
|
refs/heads/master
|
/playbot/plugins/card.py
|
# flake8: noqa
""" broken for now
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import random
import logging
class Card:
def __init__(self,bot):
self.log = logging.getLogger(__name__)
self.currHands = {}
self.dealer = bot.nickname
self.handInProgress = False
@staticmethod
def getCardUnicode(card):
uc = [[u"\U0001F0A1",u"\U0001F0A2",u"\U0001F0A3",u"\U0001F0A4",u"\U0001F0A5",u"\U0001F0A6",u"\U0001F0A7",u"\U0001F0A8",u"\U0001F0A9",u"\U0001F0AA",u"\U0001F0AB",u"\U0001F0AC",u"\U0001F0AD",u"\U0001F0AE"],
[u"\U0001F0D1",u"\U0001F0D2",u"\U0001F0D3",u"\U0001F0D4",u"\U0001F0D5",u"\U0001F0D6",u"\U0001F0D7",u"\U0001F0D8",u"\U0001F0D9",u"\U0001F0DA",u"\U0001F0DB",u"\U0001F0DC",u"\U0001F0DD",u"\U0001F0DE"],
[u"\U0001F0B1",u"\U0001F0B2",u"\U0001F0B3",u"\U0001F0B4",u"\U0001F0B5",u"\U0001F0B6",u"\U0001F0B7",u"\U0001F0B8",u"\U0001F0B9",u"\U0001F0BA",u"\U0001F0BB",u"\U0001F0BC",u"\U0001F0BD",u"\U0001F0BE"],
[u"\U0001F0C1",u"\U0001F0C2",u"\U0001F0C3",u"\U0001F0C4",u"\U0001F0C5",u"\U0001F0C6",u"\U0001F0C7",u"\U0001F0C8",u"\U0001F0C9",u"\U0001F0CA",u"\U0001F0CB",u"\U0001F0CC",u"\U0001F0CD",u"\U0001F0CE"],
[u"\U0001F0A0",u"\U0001F0BF",u"\U0001F0CF",u"\U0001F0DF"]]
if card:
return uc[card[0]][card[1]]
return uc[4][0]
@staticmethod
def getCardAscii(card):
s = [u"\u2660",u"\u2663",u"\u2665",u"\u2666"] # SCHD
v = ["A","2","3","4","5","6","7","8","9","10","J","Q","K"]
if card:
if card[0] < 2:
return u"\u00031,0[" + s[card[0]] + v[card[1]] + u"]\u000F"
else:
return u"\u00034,0[" + s[card[0]] + v[card[1]] + u"]\u000F"
return "[#]"
@staticmethod
def getHand(h):
return "".join(map(Card.getCardUnicode,h)) + " " + "".join(map(Card.getCardAscii,h))
def newGame(self):
self.deck = []
self.hands = {}
self.nicks = {}
self.nicks[self.dealer] = "call"
self.handInProgress = True
for i in range(4):
for d in range(13):
self.deck.append([i,d])
random.shuffle(self.deck)
self.hands[self.dealer] = [self.deck.pop(), self.deck.pop()]
def blackjackHandValue(self, hand):
x = 0
a = 0
for c in hand:
if c[1] == 0:
x += 1
else:
a += 1
x += min(c[1],10)
if x <= 11 and a > 0:
return x+10
return x
def __call__(self, bot, e, cmd, *arg):
if arg[0] == "new":
self.newGame()
bot.do_send(e.target, "New Game")
bot.do_send(e.target, "Dealer: " + Card.getHand([self.hands[self.dealer][0],False]))
return
nick = re.sub("!.*","",e.source)
if self.handInProgress is not True:
return bot.do_send(nick, "Game not in progress")
if arg[0] == "deal":
if nick in self.hands:
return bot.do_send(nick, "Already dealt in.")
h = [self.deck.pop(), self.deck.pop()]
x = self.blackjackHandValue(h)
self.hands[nick] = h
self.nicks[nick] = "dealt"
bot.do_send(nick, "Hand: %s = %d" % (Card.getHand(h), x))
return
if nick not in self.hands:
return bot.do_send(nick, "Not Dealt In")
if self.nicks[nick] != "dealt":
return bot.do_send(nick, "Already Called")
if arg[0] == "hit":
self.hands[self.dealer].append(self.deck.pop())
h = self.hands[self.dealer]
x = self.blackjackHandValue(h)
if x > 21:
self.nicks[nick] = "bust"
return bot.do_send(nick, "BUST!")
bot.do_send(nick, "Hand: %s = %d" % (Card.getHand(h), x))
return
if arg[0] == "call":
self.nicks[nick] = "call"
for p in self.nicks:
if self.nicks[p] == "dealt":
return
result = []
winner = ["Error",0]
for p in self.nicks:
v = self.blackjackHandValue(self.hands[p])
if winner[1] < v:
winner = [p, v]
result.append("%s: %s = %d" % (p, Card.getHand(self.hands[p]), v))
bot.do_send(e.target, "\t".join(result))
bot.do_send(e.target, "%s is the winner with %d!" % (winner[0], winner[1]))
return
bot.do_send(e.target, "Don't know that command")
#
# Quick Test
def test_all():
class Bot:
def __init__(self):
self.x = 0
self.nickname = "PlayBot"
def do_send(self, tar, msg):
print("(%s) %s" % (tar,msg))
class E:
def __init__(self, source):
self.target = 0
self.source = source
b = Bot()
c = Card(b)
c.__call__(b, E("jo"), "card", "new")
c.__call__(b, E("jo"), "card", "deal")
c.__call__(b, E("jo"), "card", "hit")
c.__call__(b, E("mi"), "card", "deal")
c.__call__(b, E("slo"), "card", "deal")
c.__call__(b, E("slo"), "card", "hit")
c.__call__(b, E("slo"), "card", "hit")
c.__call__(b, E("slo"), "card", "hit")
c.__call__(b, E("slo"), "card", "hit")
c.__call__(b, E("mi"), "card", "call")
c.__call__(b, E("jo"), "card", "call")
if __name__ == "__main__":
test_all()
"""
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
10,884
|
callidus/playbot
|
refs/heads/master
|
/playbot/plugins/fortune/fortune.py
|
from __future__ import absolute_import
from playbot.plugins.fortune import data_source
import logging
import os
import random
class Fortune(object):
def __init__(self, db, prefix=None):
self.data = data_source.DataSource()
if os.path.isfile(db):
self.data.open_db(db)
else:
self.data.build_db(db)
self.maxIdx = self.data.get_count()-1
self.prefix = prefix
self.log = logging.getLogger(__name__)
self.log.info("Fortune loaded db: %s with %i entries.",
db, self.maxIdx)
def __call__(self, bot, e, cmd, *args):
idx = random.randint(0, self.maxIdx)
msg = self.data.get_fortune(idx)
if self.prefix is not None:
bot.do_send(e.target, self.prefix + " " + msg)
else:
bot.do_send(e.target, msg)
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
10,885
|
callidus/playbot
|
refs/heads/master
|
/playbot/plugins/fortune/__init__.py
|
import fortune # noqa
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
10,886
|
callidus/playbot
|
refs/heads/master
|
/playbot/plugins/fortune/build_db.py
|
from __future__ import absolute_import
from __future__ import print_function
import os
import re
import sys
from playbot.plugins.fortune import data_source
db = data_source.DataSource()
if os.path.isfile(sys.argv[1]):
db.open_db(sys.argv[1])
else:
db.build_db(sys.argv[1])
with open(sys.argv[2], 'r') as f:
data = f.read()
items = data.split("\n")
for key, item in enumerate(items):
if len(item) != 0:
item = item.replace("\n", " ")
item = re.sub("[ \t]+", " ", item)
print(key, item)
try:
db.add_fortune(unicode(item, 'utf-8'))
print("... OK")
except Exception as e:
print("... Fail", e)
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
10,887
|
callidus/playbot
|
refs/heads/master
|
/playbot/plugins/fortune/data_source.py
|
from __future__ import absolute_import
import sqlite3 as dbapi
class DataSource(object):
def __init__(self):
self.conn = None
def __del__(self):
if self.conn:
self.conn.close()
def open_db(self, name):
"""open an existing database."""
self.conn = dbapi.connect(name)
def build_db(self, name):
"""build a new database to use."""
self.conn = dbapi.connect(name)
try:
c = self.conn.cursor()
c.execute('CREATE TABLE fortune('
'id INTEGER PRIMARY KEY ASC, data TEXT)')
self.conn.commit()
except Exception:
self.conn.rollback()
raise
def get_count(self):
sql = 'SELECT Count(*) FROM fortune'
c = self.conn.cursor()
c.execute(sql)
return c.fetchone()[0]
def add_fortune(self, data):
c = self.conn.cursor()
sql = 'INSERT INTO fortune (data) VALUES (?)'
try:
c.execute(sql, (data,))
fortuneId = c.lastrowid
self.conn.commit()
return fortuneId
except Exception:
self.conn.rollback()
raise
def del_fortune(self, itemId):
c = self.conn.cursor()
sql = 'DELETE FROM fortune WHERE id=?'
try:
c.execute(sql, (itemId,))
self.conn.commit()
except Exception:
self.conn.rollback()
raise
def get_fortunes(self):
sql = 'SELECT id, data FROM fortune'
c = self.conn.cursor()
c.execute(sql)
return c.fetchall()
def get_fortune(self, id):
sql = 'SELECT data FROM fortune WHERE id=?'
c = self.conn.cursor()
c.execute(sql, (id,))
return c.fetchone()[0]
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
10,888
|
callidus/playbot
|
refs/heads/master
|
/playbot/plugins/CvH/AddWhiteData.py
|
import os
import sys
import DataSource
from future import print_function # noqa
db = DataSource.DataSource()
if os.path.isfile("./cvh.db"):
db.openDB("./cvh.db")
else:
db.buildDB("./cvh.db")
with open(sys.argv[1], 'r') as f:
data = f.read()
items = data.split("<>")
for key, item in enumerate(items):
try:
db.addWhiteCard(key, item)
print("{0} {1} ... OK".format(key, item))
except Exception:
print("{0} {1} ... FAIL".format(key, item))
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
10,889
|
callidus/playbot
|
refs/heads/master
|
/playbot/plugins/control.py
|
class Disconnect(object):
def __call__(self, bot, e, cmd, *arg):
bot.disconnect()
class Die(object):
def __call__(self, bot, e, cmd, *arg):
bot.die()
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
10,890
|
callidus/playbot
|
refs/heads/master
|
/playbot/plugins/say.py
|
import logging
class Say(object):
def __init__(self):
self.log = logging.getLogger(__name__)
def __call__(self, bot, e, cmd, *arg):
msg = ""
if len(arg) == 0:
msg = "say what?"
else:
msg = " ".join(arg)
self.log.info("Saying: '%s'", msg)
bot.do_send(e.target, msg)
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
10,891
|
callidus/playbot
|
refs/heads/master
|
/playbot/plugins/CvH/App.py
|
import os
import random
import DataSource
class Phrase(object):
def __init__(self, data):
self.title = data[0]
self.text = data[1]
self.numSlots = int(data[2])
self.slots = []
def fillSlot(self, data):
self.slots.append(data[1])
def __str__(self):
string = self.text
if self.numSlots:
for slot in self.slots:
string = string.replace("__________", slot, 1)
else:
string += " ... " + self.slots[0]
return string
class CvH(object):
def __init__(self):
self.dataSource = DataSource.DataSource()
def setup(self, path):
if os.path.isfile(path):
self.dataSource.openDB(path)
else:
self.dataSource.buildDB(path)
self.blacks = self.dataSource.getBlackCards()
self.whites = self.dataSource.getWhiteCards()
def __call__(self, bot, e, cmd, *args):
idx = random.randint(0, len(self.blacks))
phrase = Phrase(self.dataSource.getBlackCard(self.blacks[idx][0]))
for x in range(0, max(phrase.numSlots, 1)):
idx = random.randint(0, len(self.whites))
phrase.fillSlot(self.dataSource.getWhiteCard(self.whites[idx][0]))
bot.do_send(e.target, str(phrase))
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
10,892
|
callidus/playbot
|
refs/heads/master
|
/playbot/plugins/CvH/DataSource.py
|
import sqlite3 as dbapi
class DataSource(object):
def __init__(self):
self.conn = None
def __del__(self):
if self.conn:
self.conn.close()
def openDB(self, name):
"""open an existing database."""
self.conn = dbapi.connect(name)
def buildDB(self, name):
"""build a new database to use."""
self.conn = dbapi.connect(name)
try:
c = self.conn.cursor()
c.execute('CREATE TABLE white('
'id INTEGER PRIMARY KEY ASC, '
'title TEXT, '
'data TEXT)')
c.execute('CREATE TABLE black('
'id INTEGER PRIMARY KEY ASC, '
'title TEXT, '
'slots INTEGER, '
'data TEXT)')
self.conn.commit()
except Exception:
self.conn.rollback()
raise
def addWhiteCard(self, title, data):
c = self.conn.cursor()
sql = 'INSERT INTO white (title, data) VALUES (?, ?)'
try:
c.execute(sql, (title, data))
cardId = c.lastrowid
self.conn.commit()
return cardId
except Exception:
self.conn.rollback()
raise
def addBlackCard(self, title, slots, data):
c = self.conn.cursor()
sql = 'INSERT INTO black (title, slots, data) VALUES (?, ?, ?)'
try:
c.execute(sql, (title, slots, data))
cardId = c.lastrowid
self.conn.commit()
return cardId
except Exception:
self.conn.rollback()
raise
def delCard(self, white, itemId):
c = self.conn.cursor()
sql = 'DELETE FROM black WHERE id=? '
if white:
sql = 'DELETE FROM white WHERE id=? '
try:
c.execute(sql, (itemId,))
self.conn.commit()
except Exception:
self.conn.rollback()
raise
def getBlackCards(self):
sql = 'SELECT id, title FROM black'
c = self.conn.cursor()
c.execute(sql)
return c.fetchall()
def getWhiteCards(self):
sql = 'SELECT id, title FROM white'
c = self.conn.cursor()
c.execute(sql)
return c.fetchall()
def getBlackCard(self, id):
sql = 'SELECT title, data, slots FROM black WHERE id=?'
c = self.conn.cursor()
c.execute(sql, (id,))
return c.fetchone()
def getWhiteCard(self, id):
sql = 'SELECT title, data FROM white WHERE id=?'
c = self.conn.cursor()
c.execute(sql, (id,))
return c.fetchone()
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
10,893
|
callidus/playbot
|
refs/heads/master
|
/playbot/plugins/dice.py
|
import logging
import random
class Dice:
def __init__(self):
self.log = logging.getLogger(__name__)
def __call__(self, bot, e, cmd, *arg):
msg = ""
if len(arg) == 0:
msg = "roll some dice, e.g. 'roll 2d6-2'"
else:
num, max = arg[0].lower().split('d')
mod = 0
val = []
if '-' in max:
max, mod = max.split('-')
mod = -int(mod)
elif '+' in max:
max, mod = max.split('+')
mod = int(mod)
for i in range(0, int(num)):
r = random.randint(1, int(max))
val.append(r)
val.sort()
msg = "%s = %i [%s]" % (
arg[0],
sum(val)+mod,
" ".join([str(v) for v in val]))
bot.do_send(e.target, msg)
|
{"/playbot/plugins/fortune/fortune.py": ["/playbot/plugins/fortune/__init__.py"], "/playbot/plugins/fortune/build_db.py": ["/playbot/plugins/fortune/__init__.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.