text
string
size
int64
token_count
int64
"""uuid ids Revision ID: b6a452c73bc3 Revises: 6df0d5aac594 Create Date: 2017-12-06 20:57:39.660665 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'b6a452c73bc3' down_revision = '6df0d5aac594' branch_labels = None depends_on = None def upgrade(): op.create_table('reminders_copy', sa.Column('id', sa.String(), nullable=False), sa.Column('user_id', sa.Text(), nullable=True), sa.Column('message', sa.Text(), nullable=True), sa.Column('notify_at', sa.Integer(), nullable=True), sa.Column('status', sa.String(), server_default='pending', nullable=True), sa.PrimaryKeyConstraint('id') ) op.execute('''\ insert into reminders_copy (id, user_id, message, notify_at, status) select "some-id-" || id, user_id, message, notify_at, status from reminders ''') op.execute('drop table reminders') op.execute('alter table reminders_copy rename to reminders') def downgrade(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ###
1,124
412
# vim: ts=2 sw=2 noexpandtab from __future__ import unicode_literals import requests from .AbstractModule import AbstractModule class RedditModule(AbstractModule): def try_login(self, uname, password): payload = { 'user': uname, 'passwd': password, 'api_type': 'json'} headers = { 'user-agent': AbstractModule().user_agent(), 'Content-Type': 'application/x-www-form-urlencoded'} loginResp = requests.post( 'http://www.reddit.com/api/login/', data=payload, headers=headers) return 'wrong password' not in loginResp.text reddit = RedditModule()
575
208
from __future__ import unicode_literals __version__ = '2020.03.06'
68
30
"""Table based reclassify triggered by probability threshold.""" import argparse import os import logging import hashlib from ecoshard import geoprocessing from ecoshard import taskgraph import pandas import numpy from osgeo import gdal gdal.SetCacheMax(2**27) logging.basicConfig( level=logging.DEBUG, format=( '%(asctime)s (%(relativeCreated)d) %(levelname)s %(name)s' ' [%(funcName)s:%(lineno)d] %(message)s')) logging.getLogger('ecoshard.taskgraph').setLevel(logging.WARN) LOGGER = logging.getLogger(__name__) ALIGNED_DIR = 'reclass_aligned_dir' os.makedirs(ALIGNED_DIR, exist_ok=True) def main(): """Entry point.""" parser = argparse.ArgumentParser( description='reclassify raster to table based on probability threshold') parser.add_argument( '--base_raster_path', type=str, required=True, help='path to integer raster') parser.add_argument( '--threshold_raster_path', type=str, required=True, help='path to threshold raster') parser.add_argument( '--threshold_value', type=float, required=True, help=( 'floating point value, if threshold raster is greater than this ' 'value, reclassify based on > column of table.value, else use the ' '<= value flip lulc pixel')) parser.add_argument( '--reclassify_table_path', type=str, required=True, help=( 'path to csv table with columns')) parser.add_argument( '--csv_table_fields', type=str, nargs=3, required=True, help=( 'column names for (1) base raster value, (2) value to flip to if <= ' 'threshold, and (3) value to flip to if > threshold')) parser.add_argument( '--target_raster_path', type=str, help='desired target raster') args = parser.parse_args() print(args.csv_table_fields) df = pandas.read_csv(args.reclassify_table_path) value_map = { int(base_lucode): (float(leq_target), float(gt_target)) for (base_lucode, leq_target, gt_target) in zip( df[args.csv_table_fields[0]], df[args.csv_table_fields[1]], df[args.csv_table_fields[2]]) } print(value_map) threshold_nodata = geoprocessing.get_raster_info( args.threshold_raster_path)['nodata'][0] def _reclass_op(base_array, threshold_array): result = base_array.copy() if threshold_nodata is not None: valid_mask = ~numpy.isclose(threshold_array, threshold_nodata) else: valid_mask = numpy.ones(base_array.shape, dtype=bool) for base_code, (leq_target, gt_target) in value_map.items(): leq_mask = ( base_array == base_code) * ( threshold_array <= args.threshold_value) result[leq_mask & valid_mask] = leq_target gt_mask = ( base_array == base_code) * ( threshold_array > args.threshold_value) result[gt_mask & valid_mask] = gt_target return result base_raster_info = geoprocessing.get_raster_info(args.base_raster_path) base_raster_path_list = [ args.base_raster_path, args.threshold_raster_path] path_hash = hashlib.sha256() path_hash.update(','.join([ os.path.basename(path) for path in base_raster_path_list + [ args.target_raster_path]]).encode( 'utf-8')) workspace_dir = os.path.join(ALIGNED_DIR, path_hash.hexdigest()[:5]) os.makedirs(workspace_dir, exist_ok=True) aligned_raster_path_list = [ os.path.join(workspace_dir, os.path.basename(path)) for path in base_raster_path_list] LOGGER.info(f'aligning {base_raster_path_list}') task_graph = taskgraph.TaskGraph(workspace_dir, -1) task_graph.add_task( func=geoprocessing.align_and_resize_raster_stack, args=( base_raster_path_list, aligned_raster_path_list, ['near']*2, base_raster_info['pixel_size'], 'union'), kwargs={ 'target_projection_wkt': base_raster_info['projection_wkt'] }) task_graph.close() task_graph.join() LOGGER.info(f'reclassifying to {args.target_raster_path}') geoprocessing.raster_calculator( [(path, 1) for path in aligned_raster_path_list], _reclass_op, args.target_raster_path, base_raster_info['datatype'], base_raster_info['nodata'][0]) if __name__ == '__main__': main()
4,469
1,479
import pytest import midas import ke import re def assert_pattern(pattern, matches, not_matches=()): if isinstance(pattern, str): pattern = ke.compile(pattern) for m in matches: assert pattern.match(m), (pattern, m) for m in not_matches: assert not pattern.match(m), (pattern, m) def test_assumptions(): assert_pattern(re.compile(r".*a.c"), ["abc", "ab abc"], ["", "ac", "\nabc"]) assert_pattern(re.compile(r"(?:.|[\r\n])*a.c"), ["\nabc", "\n\n\nabc"], ["ab"]) def test_api_equivalence(): ke_dir = set(x for x in dir(ke) if not x.startswith("_")) re_dir = set(re.__all__) re_dir.difference_update(["template"]) # experimental, not documented assert re_dir assert re_dir - ke_dir == set() @midas.test(format="lines") def test_compile_gold(ke_pattern): return ke.re(ke_pattern) JABBERWOCKY = """\ 'Twas brillig, and the slithy toves Did gyre and gimble in the wabe: All mimsy were the borogoves, And the mome raths outgrabe. "Beware the Jabberwock, my son! The jaws that bite, the claws that catch! Beware the Jubjub bird, and shun The frumious Bandersnatch!" He took his vorpal sword in hand: Long time the manxome foe he sought, So rested he by the Tumtum tree, And stood a while in thought. And, as in uffish thought he stood, The Jabberwock, with eyes of flame, Came whiffling through the tulgey wood, And burbled as it came! One two! One two! And through and through The vorpal blade went snicker-snack! He left it dead, and with its head He went galumphing back. "And hast thou slain the Jabberwock? Come to my arms, my beamish boy! Oh frabjous day! Callooh! Callay!" He chortled in his joy. 'Twas brillig, and the slithy toves Did gyre and gimble in the wabe: All mimsy were the borogoves, And the mome raths outgrabe. """ @midas.test(format="lines") def test_jabberwocky(ke_pattern): return ke.findall(ke_pattern, JABBERWOCKY, ke.MULTILINE | ke.DOTALL | ke.IGNORECASE) def test_re(): assert ke.compile("\t\r\n").match("\t\r\n") def test_compile(): assert ke.compile('["a"]').search("bab") assert not ke.compile('["c"]').search("bab") assert ke.compile('["a"]').search("bab") def test_compile_bytes(): assert ke.compile(b'["a"]').search(b"bab") assert not ke.compile(b'["c"]').search(b"bab") def test_flags(): assert ke.compile("a", ke.I).match("A") assert ke.compile("a[#el]", ke.M).search("a\nb") assert not ke.compile("a[#el]").search("a\nb") for flag in "AIUMS": k = getattr(ke, flag) r = getattr(re, flag) if r != re.ASCII: r |= re.UNICODE assert ke.compile("a", k).flags == r assert ke.compile(b"a", ke.LOCALE).flags == re.LOCALE assert ke.compile("a", ke.DEBUG).flags == re.DEBUG | re.UNICODE # X should have no effect assert ke.compile("a", ke.X).flags == re.UNICODE assert ke.re("[c #wb [1-3 #d] #wb]", ke.X) == ke.re("[c #wb [1-3 #d] #wb]") def test_search(): assert ke.search('["a"]', "xabc") assert not ke.search('["a"]', "Abc") assert ke.search('["a"]', "xabc", flags=ke.I) def test_match(): assert ke.match('["a"]', "abc") assert not ke.match('["a"]', "bac") assert ke.match('[capture "a"][capture:g "b"]', "abc").group(1) == "a" assert ke.match('[capture "a"][capture:g "b"]', "abc").group("g") == "b" assert not ke.match("a", "Ac") assert ke.match("a", "Ac", ke.I) def test_fullmatch(): assert ke.fullmatch('["a"]', "a") assert not ke.fullmatch('["a"]', "abc") def test_split(): assert ke.split("[#d]", "a1a2a3a4a5", maxsplit=3) == ["a", "a", "a", "a4a5"] def test_findall(): assert ke.findall("a", "xabca") == re.findall("a", "xabca") assert ke.findall("aaa", "aaaabaaab") == re.findall("aaa", "aaaabaaab") assert ke.findall("aaa", "aaAabaaab", ke.I) == re.findall("aaa", "aaAabaaab", re.I) def test_finditer(): assert [m.groups() for m in ke.finditer("[c #letter]", "xa.ca")] == [ m.groups() for m in re.finditer(r"(\w)", "xa.ca") ] def test_sub(): assert ( ke.sub( "Hi [capture 1+ #letter], what's up?", r"\1! \1!", "Hi Bobby, what's up? Hi Martin, what's up?", ) == "Bobby! Bobby! Martin! Martin!" ) assert ke.sub("[1+ #d]", "###", "123-45-6789", count=2) == "###-###-6789" assert ( ke.sub("[c 1+ #d]", lambda m: m.group(1)[::-1], "123-45-6789") == "321-54-9876" ) with pytest.raises(IndexError): ke.sub("[1+ #d]", lambda m: m.group(1)[::-1], "123-45-6789") def test_subn(): assert ke.subn("[1+ #d]", "###", "123-45-6789", count=2) == ("###-###-6789", 2) @midas.test(format="lines") def test_escape(line): k = ke.escape(line) assert ke.match(k, line) assert ke.re(k) == re.escape(line) return k def test_multiple(): assert ke.match('[1+ "a"]', "a") assert ke.match('[1+ "a"]', "aa") assert not ke.match('[1+ "a"]', "") assert ke.match('[2-3 "a"][#end_line]', "aa") assert ke.match('[2-3 "a"][#end_line]', "aaa") assert not ke.match('[2-3 "a"][#end_line]', "a") assert not ke.match('[2-3 "a"][#end_line]', "aaaa") assert ke.match('[0-1 "a"][#end_line]', "") assert ke.match('[0-1 "a"][#end_line]', "a") assert not ke.match('[0-1 "a"][#end_line]', "aa") assert ke.match('[2 "a"][#end_line]', "aa") assert not ke.match('[2 "a"][#end_line]', "a") assert not ke.match('[2 "a"][#end_line]', "aaa") assert ke.match('[2-3 ["hi" | "bye"]][#end_line]', "byebye") assert ke.match('[2-3 ["hi" | "bye"]][#end_line]', "hibye") assert ke.match('[2-3 ["hi" | "bye"]][#end_line]', "hihibye") assert not ke.match('[2-3 ["hi" | "bye"]][#end_line]', "hihihihi") assert not ke.match('[2-3 ["hi" | "bye"]][#end_line]', "hi") def test_capture(): assert ke.compile('[capture 3-5 "a"]').match("aaa").group(1) == "aaa" assert ke.compile('[capture 3-5 "a"]').match("aaaaaaaaa").group(1) == "aaaaa" assert not ke.compile('[capture 3-5 "a"]').match("aa") assert not ke.compile('[capture 3-5 "a"][#end_line]').match("aaaaaa") assert not ke.compile('[[capture 3-5 "a"] #end_line]').match("aaaaaa") assert ke.match('[capture 3-5 "a"]', "aaaaaaaaa").group(1) == "aaaaa" with pytest.raises(re.error): ke.re("[capture 3-5]") with pytest.raises(re.error): ke.re("[capture 3-5 []]") with pytest.raises(re.error): ke.re('[capture 0 "a"]') def test_named_capture(): assert ke.compile('[capture:a 3-5 "a"]').match("aaa").group("a") == "aaa" def test_comments(): assert ke.re("[comment]") == ke.re("[]") assert ke.re('[comment "a"]') == ke.re("[]") assert ke.re("[comment #token]") == ke.re("[]") assert ke.re("[comment not #token]") == ke.re("[]") with pytest.raises(re.error): ke.re("[0-1 comment #token]") assert ke.re('["a" [comment "a"] "b"]') == ke.re("ab") assert ke.re('[comment [["a"]]]') == ke.re("[]") def test_range_macros(): assert ke.re("[#a..z]") == "[a-z]" assert ke.re('[#a..c | "g" | #q..t]') == "[a-cgq-t]" assert ke.re('[#a..c | "-"]') == r"[\-a-c]" assert ke.match('[#a..c | "-"]', "a") assert ke.match('[#a..c | "-"]', "b") assert ke.match('[#a..c | "-"]', "c") assert ke.match('[#a..c | "-"]', "-") assert not ke.match('[#a..c | "-"]', "d") with pytest.raises(re.error): ke.re("[#..]") with pytest.raises(re.error): ke.re("[#a..]") with pytest.raises(re.error): ke.re("[#a..a]") with pytest.raises(re.error): ke.re("[#a..em..z]") with pytest.raises(re.error): ke.re("[#!../ #:..@ #....]") def test_not(): assert ke.compile('[not "a"]').match("b") assert ke.compile('[not "a"]').match("A") assert not ke.compile('[not "a"]').match("a") assert not ke.compile('[not not "a"]').match("b") assert not ke.compile('[not not "a"]').match("A") assert ke.compile('[not not "a"]').match("a") assert ke.compile('[not ["a" | "b"]]').match("c") assert not ke.compile('[not ["a" | "b"]]').match("a") assert not ke.compile('[not ["a" | "b"]]').match("b") assert ke.compile('[not "a"]').match("0") assert ke.compile('[not ["a" | #d]]').match("b") assert not ke.compile('[not ["a" | #d]]').match("0") assert not ke.compile('[not ["a" | #d]]').match("a") assert not ke.compile('[not ["a" | #d]]').match("9") assert ke.compile("[not #a..f]").match("g") assert ke.compile("[not #a..f]").match("A") assert not ke.compile("[not #a..f]").match("a") assert not ke.compile("[not #a..f]").match("c") assert not ke.compile("[not #a..f]").match("f") with pytest.raises(re.error): ke.compile('[not "ab"]') with pytest.raises(re.error): ke.compile("[not]") def test_real(): print(ke.re("[#ss #real #es]")) r = ke.compile("[#ss #real #es]") assert r.match("0") assert r.match("0.0") assert r.match("-0.0") assert r.match("1234.56") assert r.match("-0.0") assert not r.match("0.") assert not r.match(".0") assert not r.match("-0.") assert not r.match("-.0") def test_float(): print(ke.re("[#ss #float #es]")) f = ke.compile("[#ss #float #es]") assert f.match("0.0") assert f.match("-0.0") assert f.match("0.0e1") assert f.match("-0.0e1") assert f.match("0.0e-1") assert f.match("0.0E1") assert f.match("0.") assert f.match("0.e1") assert f.match(".0") assert f.match(".0e1") assert f.match("0e1") assert not f.match("0") assert not f.match(".") assert not f.match(".e1") assert not f.match("0.0e") assert f.match("1024.12e3") assert f.match("-1024.12e-3") assert f.match("-.12e3") assert f.match("-1024.12E-3") def test_hex(): h = ke.compile("[#ss #hexn #es]") assert h.match("0") assert h.match("9") assert h.match("a") assert h.match("f") assert h.match("A") assert h.match("1234567890abcdef") assert h.match("09af09AF") assert not h.match("-1") assert not h.match("g") def test_token(): h = ke.compile("[#ss #token #es]") assert h.match("a") assert h.match("abc") assert h.match("a1") assert h.match("A") assert h.match("AbC19") assert h.match("_") assert h.match("_a") assert h.match("_1") assert h.match("a_b_c") assert not h.match("1") assert not h.match("1_") assert not h.match("9_") assert not h.match("1234") assert not h.match("!") assert not h.match("a!") assert not h.match("#x") assert not h.match("x ") assert not h.match("x y") def test_c0_c1(): assert ke.compile("a[#c0]z").match("a16z").groups()[0] == "16" assert ke.compile("a[#c0]z").search("http://abc.xyz/").groups()[0] == "bc.xy" assert ke.compile("a[#c0]z").search("azure").groups()[0] == "" assert ke.compile("a[#c1]z").match("a16z").groups()[0] == "16" assert ke.compile("a[#c1]z").search("http://abc.xyz/").groups()[0] == "bc.xy" assert not ke.compile("a[#c1]z").search("azure") def test_escapes(): assert ke.compile( "[#dq #q #t #lb #rb #vertical_tab #formfeed #bell #backspace #el]" ).match(""""'\t[]\v\f\a\b""") def test_define_macros(): expected = "Yo dawg, I heard you like Yo dawg, I heard you like this, so I put some of this in your regex so you can recurse while you recurse, so I put some Yo dawg, I heard you like this, so I put some of this in your regex so you can recurse while you recurse in your Yo dawg, I heard you like this, so I put some of this in your regex so you can recurse while you recurse so you can recurse while you recurse".replace( ",", re.escape(",") ) # `,` in CPython, `\,` in PyPy assert ( ke.re( """[#recursive_dawg][ #yo=["Yo dawg, I heard you like "] #so_i_put=[", so I put some "] #in_your=[" in your "] #so_you_can=[" so you can "] #while_you=[" while you "] #dawg=[#yo "this" #so_i_put "of this" #in_your "regex" #so_you_can "recurse" #while_you "recurse"] #recursive_dawg=[#yo #dawg #so_i_put #dawg #in_your #dawg #so_you_can "recurse" #while_you "recurse"] ]""" ) == expected ) def test_newlines(): assert_pattern( "[#sl]a[0+ #any]x[0+ #any]b[#el]", ["axb", "azzzxzzzb"], ["a\nx\nb", "\naxb"] ) assert_pattern( ke.compile("[#sl]a[0+ #any]x[0+ #any]b[#el]", ke.DOTALL), ["a\nx\nb"], ["\naxb"] ) assert_pattern( ke.compile("[#sl]a[0+ #aaa]x[0+ #aaa]b[#el]"), ["a\nx\nb"], ["\naxb"] ) assert_pattern( ke.compile( "[0+ #any][#sl]a[0+ #any]x[0+ #any]b[#el]", ke.DOTALL | ke.MULTILINE ), ["\naxb", "ax\naxb", "\naxb\n", "ax\nazzzxzzzb"], ) assert_pattern( ke.compile("[0+ #any][#sl]a[0+ #any]x[0+ #any]b[#el]", ke.MULTILINE), [], ["\naxb", "ax\naxb", "\naxb\n", "ax\nazzzxzzzb"], ) assert_pattern( ke.compile("[0+ #aaa][#sl]a[0+ #aaa]x[0+ #aaa]b[#el]", ke.MULTILINE), ["\naxb", "ax\naxb", "\naxb\n", "ax\nazzzxzzzb"], ) assert ( ke.re("[#start_string][#newline][#end_string]") == r"\A(?:[\n\r\u2028\u2029]|\r\n)\Z" ) assert_pattern( ke.compile("[#start_string][#newline][#end_string]"), ["\r", "\n", "\u2028", "\u2029", "\r\n"], ["\n\n", "\n\r", "\r\r"], ) assert_pattern( ke.compile("[#start_string][#newline_character][#end_string]"), ["\r", "\n", "\u2028", "\u2029"], ["\r\n", "\n\n", "\n\r", "\r\r"], ) assert_pattern(ke.compile("a[#not_newline]c"), ["abc"], ["a\rc", "a\nc", "abbc"]) def test_js(): assert ke.re("[capture:hi 'hi']") == "(?P<hi>hi)" assert ke.re("[capture:hi 'hi']", syntax="javascript") == "(?<hi>hi)"
13,887
5,830
from future import standard_library standard_library.install_aliases() from androguard import session from androguard.core.bytecodes.dvm import * from androguard.decompiler.decompiler import * from androguard.core.androconf import CONF def init_print_colors(): from IPython.utils import coloransi, io androconf.default_colors(coloransi.TermColors) CONF["PRINT_FCT"] = io.stdout.write def get_default_session(): """ Return the default Session from the configuration or create a new one, if the session is None. """ if CONF["SESSION"] is None: CONF["SESSION"] = session.Session() return CONF["SESSION"] def AnalyzeAPK(filename, session=None): """ Analyze an android application and setup all stuff for a more quickly analysis ! :param session: A session (default None) :param filename: the filename of the android application or a buffer which represents the application :type filename: string :rtype: return the :class:`APK`, :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects """ androconf.debug("AnalyzeAPK") if not session: session = get_default_session() with open(filename, "rb") as fd: data = fd.read() session.add(filename, data) return session.get_objects_apk(filename) def AnalyzeDex(filename, session=None): """ Analyze an android dex file and setup all stuff for a more quickly analysis ! :param session: A session (Default None) :param filename: the filename of the android dex file or a buffer which represents the dex file :type filename: string :rtype: return the :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects """ androconf.debug("AnalyzeDex") if not session: session = get_default_session() with open(filename, "rb") as fd: data = fd.read() return session.addDEX(filename, data) def AnalyzeODex(filename, session=None): """ Analyze an android odex file and setup all stuff for a more quickly analysis ! :param filename: the filename of the android dex file or a buffer which represents the dex file :type filename: string :param session: The Androguard Session to add the ODex to (default: None) :rtype: return the :class:`DalvikOdexVMFormat`, and :class:`VMAnalysis` objects """ androconf.debug("AnalyzeODex") if not session: session = get_default_session() with open(filename, "rb") as fd: data = fd.read() return session.addDEY(filename, data) def RunDecompiler(d, dx, decompiler): """ Run the decompiler on a specific analysis :param d: the DalvikVMFormat object :type d: :class:`DalvikVMFormat` object :param dx: the analysis of the format :type dx: :class:`VMAnalysis` object :param decompiler: the type of decompiler to use ("dad", "dex2jad", "ded") :type decompiler: string """ if decompiler is not None: androconf.debug("Decompiler ...") decompiler = decompiler.lower() if decompiler == "dex2jad": d.set_decompiler(DecompilerDex2Jad( d, androconf.CONF["PATH_DEX2JAR"], androconf.CONF["BIN_DEX2JAR"], androconf.CONF["PATH_JAD"], androconf.CONF["BIN_JAD"], androconf.CONF["TMP_DIRECTORY"])) elif decompiler == "dex2fernflower": d.set_decompiler(DecompilerDex2Fernflower( d, androconf.CONF["PATH_DEX2JAR"], androconf.CONF["BIN_DEX2JAR"], androconf.CONF["PATH_FERNFLOWER"], androconf.CONF["BIN_FERNFLOWER"], androconf.CONF["OPTIONS_FERNFLOWER"], androconf.CONF["TMP_DIRECTORY"])) elif decompiler == "ded": d.set_decompiler(DecompilerDed( d, androconf.CONF["PATH_DED"], androconf.CONF["BIN_DED"], androconf.CONF["TMP_DIRECTORY"])) else: d.set_decompiler(DecompilerDAD(d, dx))
4,173
1,260
# -*- coding: utf-8 -*- # this file is released under public domain and you can use without limitations # ------------------------------------------------------------------------- # This is a sample controller # - index is the default action of any application # - user is required for authentication and authorization # - download is for downloading files uploaded in the db (does streaming) # ------------------------------------------------------------------------- from collections import OrderedDict import json import datetime from gluon.tools import prettydate db.logging.created_on.represent = db.application.created_on.represent = \ lambda v, r: XML('<span title="%s">%s</span>' % (prettydate(getattr(r, 'created_on', None)), getattr(r, 'created_on', None))) db.application.modified_on.represent = lambda v, r: XML('<span title="%s">%s</span>' % (prettydate( getattr(r, 'modified_on', None)), getattr(r, 'modified_on', None))) _role_to_permission = dict(app_managers="manage", contributors="contribute", admins="administrate", trainers="train", masters=None) # todo observers=observe def test_email(): response.view = os.path.join("templates", "email.html") return dict(summary="test", first_name="test", message="test", action_url="test", call_to_action="test") def _disable_rbac_fields(*args): def _decorator(func): """when displaying a grid with something joined, sometimes the auth tables joined are not necessary""" def inner(): # http://stackoverflow.com/questions/19673284/how-do-i-get-list-of-field-objects-in-a-table-in-web2py for rbac in args: for field in rbac: field.readable = False field.writable = False return func return inner() return _decorator def index(): # web2py performs inner joins automatically and transparently when the query links two or more tables response.title = "Insight PCMH" response.subtitle = " - Patient-Centered Medical Home Transformation for Practices" redirect(URL('init', 'default', 'dash')) @_disable_rbac_fields(db.auth_membership, db.auth_permission, db.auth_group) @auth.requires_login() def dash(): # web2py performs inner joins automatically and transparently when the query links two or more tables response.title = "PCMH Dashboard" return dict() @auth.requires(URL.verify(request, hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["revoke_participant", "permission", "row_id"])) def revoke_user(): e = request.get_vars["revoke_participant"] p = request.get_vars["permission"] r = request.get_vars["row_id"] del request.get_vars["row_id"] del request.get_vars["revoke_participant"] del request.get_vars["permission"] del request.get_vars["_signature"] logger.warn("Revoking, user id, permission, row, self-group:\n%s, %s, %s, %s" % (e, p, r, auth.id_group("user_%s" % e))) auth.del_permission(auth.id_group("user_%s" % e), p, "application", r) # 0 means user_1 session.flash = "Revoked user from application ID%s" % r redirect(URL("dash.html", args=request.args, vars=request.get_vars)) @auth.requires(URL.verify(request, hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["assign_participant", "permission", "row_id"])) def assign_user(): e = request.get_vars["assign_participant"] p = request.get_vars["permission"] r = request.get_vars["row_id"] del request.get_vars["row_id"] del request.get_vars["assign_participant"] del request.get_vars["permission"] del request.get_vars["_signature"] auth.add_permission(auth.id_group("user_%s" % e), p, "application", r) # 0 means user_1 #if not auth.has_membership(role="contributors", user_id=e): # auth.add_membership(role="contributors", user_id=e) session.flash = "Assigned user to application ID%s" % r redirect(URL("dash.html", args=request.args, vars=request.get_vars)) def _assigned_column(row): row.id = getattr(row, 'id', None) or getattr(row.application, 'id', None) # can be from join or regular query assert row.id, "expected row.id" participators = PARTICIPATORS(row) # # print participators participator_widgets = [] for participator in participators: base = dict( c_id=participator.auth_user.id, c_fn=participator.auth_user.first_name.capitalize(), c_ln=participator.auth_user.last_name.capitalize(), c_email=participator.auth_user.email, ) base.update( c_posessive="%s's" % base["c_fn"], c_name="%s %s (%s)" % (base['c_fn'], base['c_ln'], base['c_id']), c_name_html="<a href='mailto:%s'>%s %s</a> <span class='text-muted'>(%s)</span>" % (base['c_email'], base['c_fn'], base['c_ln'], base['c_id']), c_acronym="%s%s%s" % (participator.auth_user.first_name.capitalize()[0], participator.auth_user.last_name.capitalize()[0], participator.auth_user.id), ) if auth.has_membership(user_id=participator.auth_user.id, role="trainers") and \ participator.auth_permission.name == "train": participator_widgets.append(dict(color="danger", title="trainer", permission="train", c_title=base['c_name']+" (Trainer)", c_title_html=base['c_name_html'] + " <span class='text-danger'>(Trainer)</span>", **base)) elif auth.has_membership(user_id=participator.auth_user.id, role="app_managers") and \ participator.auth_permission.name == "manage": participator_widgets.append(dict(color="warning", title="app_manager", permission="manage", c_title=base['c_name']+" (App Manager)", c_title_html=base['c_name_html'] + " <span class='text-warning'>(App Manager)</span>", **base)) # it is possible to be two roles elif participator.auth_permission.name == "contribute": is_contrib = auth.has_membership(user_id=participator.auth_user.id, role="contributors") fake_title = "Contributor" if is_contrib else "Visitor" fake_color = "success" if is_contrib else "primary" participator_widgets.append(dict(color=fake_color, title="contributor", permission="contribute", c_title=base['c_name'] + " (%s)" % fake_title, c_title_html=base['c_name_html'] + " <span class='text-%s'>(%s)</span>" % (fake_color, fake_title), **base)) for participator_widget in participator_widgets: revoke_id = "revoke_participant_%s_%s_%s" % (participator_widget["title"], participator_widget["c_id"], row.id) widget_script = """ {select}<script> $(document).ready(function(){{ $("#{revoke_id}").val('').multiselect({{ nonSelectedText: '{c_acronym}', onChange: function(option, checked, select) {{ window.location = '{url}'; }}, buttonClass: 'btn btn-sm btn-{color}', disableIfEmpty: true, enableHTML: true}}); }}) </script>""" # option widget_a = XML("<button class='btn btn-sm btn-{color}' " "title=\"{c_name_html}\">{c_acronym}</button>".format( color=participator_widget["color"], c_acronym=participator_widget["c_acronym"], c_name_html=participator_widget["c_title_html"], # todo change to popover )) widget_b = XML(widget_script.format( c_acronym=participator_widget["c_acronym"], revoke_id=revoke_id, color=participator_widget["color"], url=URL("revoke_user.html", vars=dict( revoke_participant=participator_widget["c_id"], permission=participator_widget["permission"], row_id=row.id, **request.get_vars), hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["revoke_participant", "permission", "row_id"] ), select=SELECT(OPTGROUP(OPTION("Revoke %s access to this application" % participator_widget['c_posessive']), _label=participator_widget['c_title_html'] + ":"), _id=revoke_id, _class="assigned", ) )) if IS_ADMIN or IS_MASTER: widget = widget_b # only admins can revoke else: widget = widget_a #assert(widget, "expected a widget!") participator_widget.update(dict( widget=widget )) colleagues = db((db.auth_group.id == db.auth_membership.group_id) & (db.auth_group.role.belongs("trainers", "app_managers", "contributors")) & (db.auth_user.id == db.auth_membership.user_id)).select(orderby=db.auth_user.first_name|db.auth_group.role) employee_options = [] contributor_options = [] assign_urls = {} title = e_title = e_title_html = permission = None for colleague in colleagues: e_id = colleague.auth_user.id e_fn = colleague.auth_user.first_name.capitalize() e_ln = colleague.auth_user.last_name.capitalize() e_email = colleague.auth_user.email e_name = "%s %s (%s)" % (e_fn, e_ln, e_id) e_name_html = "%s %s <span class='text-muted'>(%s)</span>" % (e_fn, e_ln, e_id) if colleague.auth_group.role == "trainers": title = "trainer" permission = "train" e_title = e_name + " (trainer)" e_title_html = e_name_html + " <span class='text-danger'>(Trainer)</span>" if colleague.auth_group.role == "app_managers": title = "app_manager" permission = "manage" e_title = e_name + " (App Mananger)" e_title_html = e_name_html + " <span class='text-warning'>(App Manager)</span>" if colleague.auth_group.role == "contributors": title = "contributor" permission = "contribute" e_title = e_name + " (Contributor)" e_title_html = e_name_html + " <span class='text-success'>(Contributor)</span>" assign_id = "%s_%s" % (title, e_id) assign_urls[assign_id] = URL("assign_user.html", vars=dict( assign_participant=e_id, permission=permission, row_id=row.id, **request.get_vars), hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["assign_participant", "permission", "row_id"] ) # if not already in revoke buttons if not "%s_%s" % (title, e_id) in map(lambda w: "%s_%s" % (w["title"], w["c_id"]), participator_widgets): _bucket = employee_options if colleague.auth_group.role == "contributors": _bucket = contributor_options _bucket.append(OPTION(e_title_html, _value=assign_id)) colleagues_and_participants = set(map(lambda e: e.auth_user.id, colleagues) + map(lambda e: e.auth_user.id, participators)) # a_week_ago = request.now - datetime.timedelta(days=7) recent = db(db.auth_user.created_on > 0).select(orderby=~db.auth_user.modified_on, limitby=(0, 50)) recent.exclude(lambda r: r.id in colleagues_and_participants or r.is_insight) recent_options = [] for new in recent: n_id = new.id n_fn = new.first_name.capitalize() n_ln = new.last_name.capitalize() n_email = new.email n_name = "%s %s (%s)" % (n_fn, n_ln, n_id) n_name_html = "%s %s <span class='text-muted'>(%s) (%s)</span>" % (n_fn, n_ln, n_id, n_email) assign_id = "new_%s" % n_id assign_urls[assign_id] = URL("assign_user.html", vars=dict( assign_participant=n_id, permission="contribute", row_id=row.id, **request.get_vars), hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["assign_participant", "permission", "row_id"] ) recent_options.append(OPTION(n_name_html, _value=assign_id)) employee_optgroup = OPTGROUP(*employee_options, _label="Assign an employee:") contributor_optgroup = OPTGROUP(*contributor_options, _label="Assign a contributor:") recent_optgroup = OPTGROUP(*recent_options, _label="Assign a recent registrant:") assign_optgroups = [] enable_participant_assign_select = False if IS_ADMIN or IS_MASTER: if recent_options: assign_optgroups.append(recent_optgroup) if contributor_options: # no need to show opt-group if there are no options available assign_optgroups.append(contributor_optgroup) if employee_options: assign_optgroups.append(employee_optgroup) enable_participant_assign_select = True all_widgets = map(lambda e: e['widget'], participator_widgets) if enable_participant_assign_select: all_widgets.append(XML("""{select}<script> $(document).ready(function(){{ urls_{row_id} = {urls}; $("#assign_participant_{row_id}").val('').multiselect({{ nonSelectedText: '<span class="glyphicon glyphicon-plus"></span>', maxHeight: 200, enableFiltering: true, enableCaseInsensitiveFiltering: true, onChange: function(option, checked, select) {{ window.location = urls_{row_id}[$(option).val()]; }}, buttonClass: 'btn btn-sm btn-info', disableIfEmpty: true, enableHTML: true}}); }}) </script>""".format( # option #http://bit.ly/2g9PyCl select remove default choice, then style with multiselect select=SELECT( *reversed(assign_optgroups), _id="assign_participant_%s" % row.id, _class="assign_participants" ), row_id=row.id, urls=json.dumps(assign_urls) ))) # , _multiple="multiple") # only one choice container = DIV(*all_widgets, _style="display:flex") return container @auth.requires(IS_ADMIN or IS_MASTER, requires_login=True) def certify(): for field in db.application: field.readable = False field.writable = False db.application.practice_photo.readable = True db.application.practice_photo.writable = True db.application.certified_on.writable = True db.application.certified_on.readable = True db.application.practice_name.readable = True def onvalidation(form): form.vars.status = "Certified" form.vars.progress = 1.0 form = SQLFORM.grid(db.application, onvalidation=onvalidation, create=False, deletable=False) return dict(form=form) # @_disable_rbac_fields # @auth.requires_signature() def load_apps_grid(): # db.application.modified_by.readable = True # db.application.created_by.readable = True # db.application.created_on.readable = True # db.application.modified_on.readable = True db.application.modified_by.readable = True db.application.modified_on.readable = True db.application.created_on.readable = True db.application.owner_id.readable = True def _progress(v, r): percent = "%0.0f%%" % (v*100) active = " active" if v < .25: color = "danger" elif v < .75: color = "warning" elif v < .99: color = "success" else: color = "info" if 0: # enable/disable spin for 100% active = "" return SPAN( percent, DIV( SPAN(percent, _class="sr-only"), DIV(_class="progress-bar progress-bar-striped progress-bar-%s%s" % (color, active), _role="progressbar", _style="width: %s;" % percent), _class="progress" ) ) db.application.progress.represent = _progress db.application.website.represent = lambda v, r: A("Visit", _href=v) if v else "N/A" def _user(v, r): u = db(db.auth_user.id == v).select().last() if not u: return SPAN("User Deleted", _class="text-danger") return A("%s %s" % (u.first_name.capitalize(), u.last_name.capitalize()), _href="mailto:%s" % u.email) db.application.owner_id.represent = _user links = [dict(header='', # header is col title body=lambda row: A(SPAN(_class="glyphicon glyphicon-play"), _class="btn btn-sm btn-default", _title="Start Application Dashboard", _style="background-color:#BA55D3;", _href=URL("2014", 'index.html', args=[0], # todo- set 2014/2017 standards here table may or may not be joined vars=dict(app_id=getattr(row, "application", row).id))))] if not IS_STAFF: # add contributor db.application.status.writable = False if IS_MASTER or IS_ADMIN or IS_TRAINER: db.application.owner_id.writable = True if IS_MASTER or IS_ADMIN: # remove not after testing non-master mode my_apps_grid = db(db.application.id > 0) else: my_group_id = auth.id_group("user_%s" % auth.user.id) my_apps_distinct = db((db.application.id == db.auth_permission.record_id) & # same application id will show up # twice because multiple permissions of same user can be set for the same application (ie # when you see HD1 XXX HD1 in master mode *WARNING* (db.auth_permission.name.belongs(["manage", "contribute", "administrate", "train"])) & (db.auth_permission.group_id == my_group_id)).select(groupby=db.application.id) # distinct gives near "ON" operational error, just use groupby http://bit.ly/2h0Ou3Z # groupby http://bit.ly/2h0Ou3Z logger.info(my_apps_distinct) my_apps_grid = db.application.id.belongs(map(lambda r: r.application.id, my_apps_distinct)) # will have to # double query because grid does not have distinct and groupby disables CUD links.append(dict( header="Participants", # can use SPAN body=_assigned_column )) app_grid = SQLFORM.grid(my_apps_grid, onvalidation=_app_onvalidation, showbuttontext=False, orderby=db.application.certified_on | ~db.application.id, # ~db.auth_user.is_insight | ~db.auth_user.id, maxtextlength=50, oncreate=_app_oncreate, create=IS_ADMIN or IS_MASTER or IS_CONTRIB or IS_TRAINER, formname="load_apps_grid", links=links, deletable=IS_ADMIN or IS_MASTER, editable=IS_ADMIN or IS_MASTER or IS_TRAINER, # groupby=db.application.id, # groupby by itself behaves like distinct http://bit.ly/2h0Ou3Z field_id=db.application.id, links_placement='left', buttons_placement='left') return dict(app_grid=app_grid) @auth.requires(URL.verify(request, hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["group", "action", "user_id"])) @auth.requires(IS_ADMIN or IS_MASTER or IS_HIMEL) def add_remove_user(): u = request.get_vars["user_id"] a = request.get_vars["action"] g = request.get_vars["group"] del request.get_vars["group"] del request.get_vars["action"] del request.get_vars["user_id"] del request.get_vars["_signature"] user = db(db.auth_user.id == u).select().last() assert user, "expected user! (user deleted?)" assert g in _without_keys(_role_to_permission, "masters"), "Invalid role (web parameter tampering?)" # not needed because of auth.signature if a == "+": auth.add_membership(role=g, user_id=u) word = "added" direction = "to" else: print g print auth.id_group("user_%s" % u) auth.del_membership(role=g, user_id=u) db((db.auth_permission.group_id == auth.id_group("user_%s" % u)) & (db.auth_permission.name == _role_to_permission[g])).delete() word = "removed" direction = "from" session.flash = '%s was %s %s the group "%s"' % (user.first_name.capitalize(), word, direction, g.replace("_"," ")) if user.email in MASTER_EMAILS: session.flash = "Don't even think about it! ;)" redirect(URL("dash.html", args=request.args, vars=request.get_vars)) def _employee_group_links(row): _p = SPAN(_class="glyphicon glyphicon-plus") # plus or minus _m = SPAN(_class="glyphicon glyphicon-minus") if row.is_insight: groups = [("admins", "info"), ("app_managers", "warning"), ("trainers", "danger")] else: groups = [("contributors", "success")] all_links = [] for each in groups: group = each[0] group_name = group.capitalize().replace("_", " ") color = each[1] action = "-" if auth.has_membership(role=group, user_id=row.id) else "+" sign = _m if action == "-" else _p btn_color = "btn-secondary" confirm_msg = "" if action != "+": confirm_msg = " All assignments for this user will be lost!" btn_color = "btn-%s" % color label = XML("%s %s" % (sign, group_name[:-1] if not group == "contributors" else "Contributor (Write Access)") ) # take out the s at the end all_links.append(A(label, _onclick="if(!confirm('Are you sure?%s')){event.preventDefault()}" % confirm_msg, # http://bit.ly/2hM7Cbk _class="btn btn-sm %s" % btn_color, _href=URL("add_remove_user.html", vars=dict( user_id=row.id, group=group, action=action, **request.get_vars), hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["user_id", "group", "action"] ), _title=(("Remove %s from " if action == "-" else "Add %s to ") % row.first_name.capitalize()) + group_name ) ) return DIV(*all_links, _style="display:flex") def load_logs_grid(): db.logging.owner_id.default=auth.user.id db.logging.owner_id.writable=False if IS_GRID: db.logging.created_on.readable = True db.logging.created_by.readable=False db.application.id.represent = lambda v, r: "%s (%s)" % (r.application.practice_name, r.application.id) db.logging.application.readable = True db.logging.application.writable = True if IS_MASTER or IS_ADMIN: db.logging.owner_id.readable = True query = db(db.logging.id > 0) else: db.logging.owner_id.readable = False query = db((db.logging.owner_id == db.auth_user.id) & (db.auth_user.id == auth.user.id)) logs_grid = SQLFORM.grid( query, formname="load_logs_grid", maxtextlength=50, showbuttontext=False, deletable=False if not IS_MASTER else True, # editable=False if not IS_MASTER else True, # fields=[db.logging.id, db.logging.application, db.logging.difficulty, db.logging.description, # db.logging.people_involved, db.logging.created_by, db.logging.created_on], # links=links, # onupdate=_user_onupdate, # oncreate=_user_oncreate, orderby=~db.logging.id, # show latest first field_id=db.logging.id, # user_signature=False, # this is handled by the controller links_placement='left') return dict(logs_grid=logs_grid) # @_disable_rbac_fields # @auth.requires_signature() def load_users_grid(): links = [] links.append(dict( header="Set roles", # can use SPAN body=_employee_group_links )) users_grid = SQLFORM.grid(db.auth_user, formname="load_users_grid", showbuttontext=False, maxtextlength=50, links=links, onupdate=_user_onupdate, oncreate=_user_oncreate, orderby=~db.auth_user.is_insight | ~db.auth_user.id, # show insight first field_id=db.auth_user.id, deletable=IS_ADMIN or IS_MASTER or IS_HIMEL, editable=IS_ADMIN or IS_MASTER or IS_HIMEL, # user_signature=False, # this is handled by the controller links_placement='left') return dict(users_grid=users_grid) def user(): """ exposes: http://..../[app]/default/user/login http://..../[app]/default/user/logout http://..../[app]/default/user/register http://..../[app]/default/user/profile http://..../[app]/default/user/retrieve_password http://..../[app]/default/user/change_password http://..../[app]/default/user/bulk_register use @auth.requires_login() @auth.requires_membership('group name') @auth.requires_permission('read','table name',record_id) to decorate functions that need access control also notice there is http://..../[app]/appadmin/manage/auth to allow administrator to manage users """ response.title = (request.args(0) or request.function).capitalize().replace("_", " ") return dict(form=auth()) #auth.requires(not auth.has_membership(user_id=getattr(auth.user, "id", None), role="trainers") and # not auth.has_membership(user_id=getattr(auth.user, "id", None), role="admins") # , requires_login=True) def _app_onvalidation(form): if not IS_STAFF: form.vars.owner_id = auth.user.id if form.vars.application_size == "Corporate": corporate_apps = db(db.application.authorized_representative == form.vars.authorized_representative).select() if form.vars.largest_practice: for app in corporate_apps: if app.largest_practice: form.errors.largest_practice = "Largest practice under the authorized representative already exists!" def _user_onupdate(form): # todo revoke all permissions id = form.vars.id self_group = auth.id_group("user_%s" % id) # auth.del_permission(auth.id_group("user_%s" % e), p, "application", r) # 0 means user_1 if not form.vars.is_insight: groups = _without_keys(_role_to_permission, "contributors") for role in groups: permission = groups[role] auth.del_membership(role=role, user_id=id) if permission: db((db.auth_permission.group_id == self_group) & (db.auth_permission.name == permission)).delete() else: auth.del_membership(role="contributors", user_id=id) db((db.auth_permission.group_id == self_group) & (db.auth_permission.name == "contribute")).delete() def _user_oncreate(form): id = form.vars.id self_group = "user_%s" % id auth.add_group(self_group, description="Group for user %s. Created in admin panel" % self_group) auth.add_membership(role=self_group, user_id=id) if not form.vars.is_insight: # don't make insight employee contributor auth.add_membership(role="contributors", user_id=id) # auto add to contributors if made from grid, not register def _app_oncreate(form): app_id = form.vars.id if not IS_STAFF: auth.add_permission(0, "contribute", 'application', app_id) # 0 means user_1 # auth.add_membership(role="contributor", user_id=auth.user_id) if IS_TRAINER: auth.add_permission(0, "train", 'application', app_id) # 0 means user_1 # add primary contact if not auth.user.id == form.vars.owner_id: # make primary contact a contributor app_owner = db(db.auth_user.id == form.vars.owner_id).select().last() if app_owner and not app_owner.is_insight: # make sure owner is contributor, i.e. registrant self_group = auth.id_group("user_%s" % app_owner.id) if self_group: auth.add_membership(role="contributors", user_id=app_owner.id) auth.add_permission(self_group, "contribute", 'application', app_id) admins = db((db.auth_group.id == db.auth_membership.group_id) & (db.auth_group.role == "admins") & (db.auth_user.id == db.auth_membership.user_id) ).select() for admin in admins: auth.add_permission(auth.id_group("user_%s" % admin.auth_user.id), "administrate", 'application', app_id) # 0 means user_1 # auth.add_permission(auth.id_group("masters"), "administrate", 'application', app_id) # app_url = URL('application', vars=dict(app_id=form.vars.id), hmac_key=MY_KEY) # redirect(URL(0, "index.html", vars=dict(app_id=form.vars.id))) # redir user to his app @cache.action() def download(): """ allows downloading of uploaded files http://..../[app]/default/download/[filename] """ return response.download(request, db) def call(): """ exposes services. for example: http://..../[app]/default/call/jsonrpc decorate with @services.jsonrpc the functions to expose supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv """ return service()
31,263
9,601
from collections import OrderedDict from enum import Enum TO_AUTOR = 0 TO_REDAKTOR = 1 TO_INNY = 2 TO_TLUMACZ = 3 TO_KOMENTATOR = 4 TO_RECENZENT = 5 TO_OPRACOWAL = 6 TO_REDAKTOR_TLUMACZENIA = 7 TYP_OGOLNY_DO_PBN = { TO_AUTOR: "AUTHOR", TO_REDAKTOR: "EDITOR", TO_TLUMACZ: "TRANSLATOR", TO_REDAKTOR_TLUMACZENIA: "TRANSLATION_EDITOR", } GR_WPROWADZANIE_DANYCH = "wprowadzanie danych" CHARAKTER_SLOTY_KSIAZKA = 1 CHARAKTER_SLOTY_ROZDZIAL = 2 CHARAKTER_SLOTY_REFERAT = 3 RODZAJ_PBN_ARTYKUL = 1 RODZAJ_PBN_ROZDZIAL = 2 RODZAJ_PBN_KSIAZKA = 3 RODZAJ_PBN_POSTEPOWANIE = 4 CHARAKTER_OGOLNY_ARTYKUL = "art" CHARAKTER_OGOLNY_ROZDZIAL = "roz" CHARAKTER_OGOLNY_KSIAZKA = "ksi" CHARAKTER_OGOLNY_INNE = "xxx" class DZIEDZINA(Enum): NAUKI_HUMANISTYCZNE = 1 NAUKI_INZ_TECH = 2 NAUKI_MEDYCZNE = 3 NAUKI_ROLNICZE = 4 NAUKI_SPOLECZNE = 5 NAUKI_SCISLE = 6 NAUKI_TEOLOGICZNE = 7 NAUKI_SZTUKA = 8 WYZSZA_PUNKTACJA = [ DZIEDZINA.NAUKI_SPOLECZNE, DZIEDZINA.NAUKI_HUMANISTYCZNE, DZIEDZINA.NAUKI_TEOLOGICZNE, ] DZIEDZINY = OrderedDict() DZIEDZINY[DZIEDZINA.NAUKI_HUMANISTYCZNE] = "Nauki humanistyczne" DZIEDZINY[DZIEDZINA.NAUKI_INZ_TECH] = "Nauki inżynieryjno-techniczne" DZIEDZINY[DZIEDZINA.NAUKI_MEDYCZNE] = "Nauki medyczne i o zdrowiu" DZIEDZINY[DZIEDZINA.NAUKI_ROLNICZE] = "Nauki rolnicze" DZIEDZINY[DZIEDZINA.NAUKI_SPOLECZNE] = "Nauki społeczne" DZIEDZINY[DZIEDZINA.NAUKI_SCISLE] = "Nauki ścisłe i przyrodnicze" DZIEDZINY[DZIEDZINA.NAUKI_TEOLOGICZNE] = "Nauki teologiczne" DZIEDZINY[DZIEDZINA.NAUKI_SZTUKA] = "Sztuka" class TRYB_KALKULACJI(Enum): AUTORSTWO_MONOGRAFII = 1 REDAKCJA_MONOGRAFI = 2 ROZDZIAL_W_MONOGRAFI = 3 class TRYB_DOSTEPU(Enum): NIEJAWNY = 0 TYLKO_W_SIECI = 1 JAWNY = 2 DO_STYCZNIA_POPRZEDNI_POTEM_OBECNY = "jan_prev_then_current" NAJWIEKSZY_REKORD = "max_rec" PBN_UID_LEN = 24 ORCID_LEN = 19 LINK_PBN_DO_AUTORA = "{pbn_api_root}/core/#/person/view/{pbn_uid_id}/current" LINK_PBN_DO_WYDAWCY = "{pbn_api_root}/core/#/publisher/view/{pbn_uid_id}/current" LINK_PBN_DO_ZRODLA = "{pbn_api_root}/core/#/journal/view/{pbn_uid_id}/current" LINK_PBN_DO_PUBLIKACJI = "{pbn_api_root}/core/#/publication/view/{pbn_uid_id}/current" PBN_LATA = [2017, 2018, 2019, 2020, 2021, 2022] # Minimalny rok od którego zaczynamy liczyć punkty dla prac PBN i w ogóle minimalny rok integracji. PBN_MIN_ROK = PBN_LATA[0] # Maksymalny rok dla procedur eksportujących do PBN, liczącyc punkty/sloty oraz testów PBN_MAX_ROK = PBN_LATA[-1] KWARTYLE = [(None, "brak"), (1, "Q1"), (2, "Q2"), (3, "Q3"), (4, "Q4")]
2,582
1,528
import csv from datetime import datetime, timedelta import random import string import os from os.path import expanduser from pathlib import Path from monitor.dbt_runner import DbtRunner import click any_type_columns = ['date', 'null_count', 'null_percent'] FILE_DIR = os.path.dirname(__file__) def generate_date_range(base_date, numdays=30): return [base_date - timedelta(days=x) for x in range(0, numdays)] def write_rows_to_csv(csv_path, rows, header): # Creates the csv file directories if needed. directory_path = Path(csv_path).parent.resolve() Path(directory_path).mkdir(parents=True, exist_ok=True) with open(csv_path, 'w') as csv_file: writer = csv.DictWriter(csv_file, fieldnames=header) writer.writeheader() writer.writerows(rows) def generate_rows(rows_count_per_day, dates, get_row_callback): rows = [] for date in dates: for i in range(0, rows_count_per_day): row = get_row_callback(date, i, rows_count_per_day) rows.append(row) return rows def generate_string_anomalies_training_and_validation_files(rows_count_per_day=100): def get_training_row(date, row_index, rows_count): return {'date': date.strftime('%Y-%m-%d %H:%M:%S'), 'min_length': ''.join(random.choices(string.ascii_lowercase, k=random.randint(5, 10))), 'max_length': ''.join(random.choices(string.ascii_lowercase, k=random.randint(5, 10))), 'average_length': ''.join(random.choices(string.ascii_lowercase, k=5)), 'missing_count': '' if row_index < (3 / 100 * rows_count) else ''.join(random.choices(string.ascii_lowercase, k=5)), 'missing_percent': '' if random.randint(1, rows_count) <= (20 / 100 * rows_count) else ''.join(random.choices(string.ascii_lowercase, k=5))} def get_validation_row(date, row_index, rows_count): return {'date': date.strftime('%Y-%m-%d %H:%M:%S'), 'min_length': ''.join(random.choices(string.ascii_lowercase, k=random.randint(1, 10))), 'max_length': ''.join(random.choices(string.ascii_lowercase, k=random.randint(5, 15))), 'average_length': ''.join(random.choices(string.ascii_lowercase, k=random.randint(5, 8))), 'missing_count': '' if row_index < (20 / 100 * rows_count) else ''.join(random.choices(string.ascii_lowercase, k=5)), 'missing_percent': '' if random.randint(1, rows_count) <= (60 / 100 * rows_count) else ''.join(random.choices(string.ascii_lowercase, k=5))} string_columns = ['date', 'min_length', 'max_length', 'average_length', 'missing_count', 'missing_percent'] dates = generate_date_range(base_date=datetime.today() - timedelta(days=2), numdays=30) training_rows = generate_rows(rows_count_per_day, dates, get_training_row) write_rows_to_csv(os.path.join(FILE_DIR, 'data', 'training', 'string_column_anomalies_training.csv'), training_rows, string_columns) validation_date = datetime.today() - timedelta(days=1) validation_rows = generate_rows(rows_count_per_day, [validation_date], get_validation_row) write_rows_to_csv(os.path.join(FILE_DIR, 'data', 'validation', 'string_column_anomalies_validation.csv'), validation_rows, string_columns) def generate_numeric_anomalies_training_and_validation_files(rows_count_per_day=200): def get_training_row(date, row_index, rows_count): return {'date': date.strftime('%Y-%m-%d %H:%M:%S'), 'min': random.randint(100, 200), 'max': random.randint(100, 200), 'zero_count': 0 if row_index < (3 / 100 * rows_count) else random.randint(100, 200), 'zero_percent': 0 if random.randint(1, rows_count) <= (20 / 100 * rows_count) else random.randint(100, 200), 'average': random.randint(99, 101), 'standard_deviation': random.randint(99, 101), 'variance': random.randint(99, 101)} def get_validation_row(date, row_index, rows_count): row_index += -(rows_count / 2) return {'date': date.strftime('%Y-%m-%d %H:%M:%S'), 'min': random.randint(10, 200), 'max': random.randint(100, 300), 'zero_count': 0 if row_index < (80 / 100 * rows_count) else random.randint(100, 200), 'zero_percent': 0 if random.randint(1, rows_count) <= (60 / 100 * rows_count) else random.randint(100, 200), 'average': random.randint(101, 110), 'standard_deviation': random.randint(80, 120), 'variance': random.randint(80, 120)} numeric_columns = ['date', 'min', 'max', 'zero_count', 'zero_percent', 'average', 'standard_deviation', 'variance'] dates = generate_date_range(base_date=datetime.today() - timedelta(days=2), numdays=30) training_rows = generate_rows(rows_count_per_day, dates, get_training_row) write_rows_to_csv(os.path.join(FILE_DIR, 'data', 'training', 'numeric_column_anomalies_training.csv'), training_rows, numeric_columns) validation_date = datetime.today() - timedelta(days=1) validation_rows = generate_rows(rows_count_per_day, [validation_date], get_validation_row) write_rows_to_csv(os.path.join(FILE_DIR, 'data', 'validation', 'numeric_column_anomalies_validation.csv'), validation_rows, numeric_columns) def generate_any_type_anomalies_training_and_validation_files(rows_count_per_day=300): def get_training_row(date, row_index, rows_count): return {'date': date.strftime('%Y-%m-%d %H:%M:%S'), 'null_count_str': None if row_index < (3 / 100 * rows_count) else ''.join(random.choices(string.ascii_lowercase, k=5)), 'null_percent_str': None if random.randint(1, rows_count) <= (20 / 100 * rows_count) else ''.join(random.choices(string.ascii_lowercase, k=5)), 'null_count_float': None if row_index < (3 / 100 * rows_count) else random.uniform(1.2, 8.9), 'null_percent_float': None if random.randint(1, rows_count) <= (20 / 100 * rows_count) else random.uniform(1.2, 8.9), 'null_count_int': None if row_index < (3 / 100 * rows_count) else random.randint(100, 200), 'null_percent_int': None if random.randint(1, rows_count) <= (20 / 100 * rows_count) else random.randint(100, 200), 'null_count_bool': None if row_index < (3 / 100 * rows_count) else bool(random.getrandbits(1)), 'null_percent_bool': None if random.randint(1, rows_count) <= (20 / 100 * rows_count) else bool(random.getrandbits(1))} def get_validation_row(date, row_index, rows_count): return {'date': date.strftime('%Y-%m-%d %H:%M:%S'), 'null_count_str': None if row_index < (80 / 100 * rows_count) else ''.join(random.choices(string.ascii_lowercase, k=5)), 'null_percent_str': None if random.randint(1, rows_count) <= (60 / 100 * rows_count) else ''.join(random.choices(string.ascii_lowercase, k=5)), 'null_count_float': None if row_index < (80 / 100 * rows_count) else random.uniform(1.2, 8.9), 'null_percent_float': None if random.randint(1, rows_count) <= (60 / 100 * rows_count) else random.uniform(1.2, 8.9), 'null_count_int': None if row_index < (80 / 100 * rows_count) else random.randint(100, 200), 'null_percent_int': None if random.randint(1, rows_count) <= (60 / 100 * rows_count) else random.randint(100, 200), 'null_count_bool': None if row_index < (80 / 100 * rows_count) else bool(random.getrandbits(1)), 'null_percent_bool': None if random.randint(1, rows_count) <= (60 / 100 * rows_count) else bool(random.getrandbits(1))} any_type_columns = ['date', 'null_count_str', 'null_percent_str', 'null_count_float', 'null_percent_float', 'null_count_int', 'null_percent_int', 'null_count_bool', 'null_percent_bool'] dates = generate_date_range(base_date=datetime.today() - timedelta(days=2), numdays=30) training_rows = generate_rows(rows_count_per_day, dates, get_training_row) write_rows_to_csv(os.path.join(FILE_DIR, 'data', 'training', 'any_type_column_anomalies_training.csv'), training_rows, any_type_columns) validation_date = datetime.today() - timedelta(days=1) validation_rows = generate_rows(rows_count_per_day, [validation_date], get_validation_row) write_rows_to_csv(os.path.join(FILE_DIR, 'data', 'validation', 'any_type_column_anomalies_validation.csv'), validation_rows, any_type_columns) def generate_fake_data(): print('Generating fake data!') generate_string_anomalies_training_and_validation_files() generate_numeric_anomalies_training_and_validation_files() generate_any_type_anomalies_training_and_validation_files() def e2e_tests(target, test_types): table_test_results = [] string_column_anomalies_test_results = [] numeric_column_anomalies_test_results = [] any_type_column_anomalies_test_results = [] schema_changes_test_results = [] regular_test_results = [] artifacts_results = [] dbt_runner = DbtRunner(project_dir=FILE_DIR, profiles_dir=os.path.join(expanduser('~'), '.dbt'), target=target) clear_test_logs = dbt_runner.run_operation(macro_name='clear_tests') for clear_test_log in clear_test_logs: print(clear_test_log) dbt_runner.seed(select='training') dbt_runner.run(full_refresh=True) if 'table' in test_types: dbt_runner.test(select='tag:table_anomalies') table_test_results = dbt_runner.run_operation(macro_name='validate_table_anomalies') print_test_result_list(table_test_results) # If only table tests were selected no need to continue to the rest of the flow if len(test_types) == 1: return [table_test_results, string_column_anomalies_test_results, numeric_column_anomalies_test_results, any_type_column_anomalies_test_results, schema_changes_test_results, regular_test_results, artifacts_results] # Creates row_count metrics for anomalies detection. if 'no_timestamp' in test_types: current_time = datetime.now() # Run operation returns the operation value as a list of strings. # So we we convert the days_back value into int. days_back_project_var = int(dbt_runner.run_operation(macro_name="return_config_var", macro_args={"var_name": "days_back"})[0]) # No need to create todays metric because the validation run does it. for run_index in range(1, days_back_project_var): custom_run_time = (current_time - timedelta(run_index)).isoformat() dbt_runner.test(select='tag:no_timestamp', vars={"custom_run_started_at": custom_run_time}) dbt_runner.seed(select='validation') if 'schema' in test_types: # We need to upload the schema changes dataset before at least one dbt run, as dbt run takes a snapshot of the # normal schema dbt_runner.seed(select='schema_changes_data') dbt_runner.run() if 'debug' in test_types: dbt_runner.test(select='tag:debug') return [table_test_results, string_column_anomalies_test_results, numeric_column_anomalies_test_results, any_type_column_anomalies_test_results, schema_changes_test_results, regular_test_results, artifacts_results] if 'no_timestamp' in test_types: dbt_runner.test(select='tag:no_timestamp') no_timestamp_test_results = dbt_runner.run_operation(macro_name='validate_no_timestamp_anomalies') print_test_result_list(no_timestamp_test_results) if 'column' in test_types: dbt_runner.test(select='tag:string_column_anomalies') string_column_anomalies_test_results = dbt_runner.run_operation(macro_name='validate_string_column_anomalies') print_test_result_list(string_column_anomalies_test_results) dbt_runner.test(select='tag:numeric_column_anomalies') numeric_column_anomalies_test_results = dbt_runner.run_operation(macro_name='validate_numeric_column_anomalies') print_test_result_list(numeric_column_anomalies_test_results) dbt_runner.test(select='tag:all_any_type_columns_anomalies') any_type_column_anomalies_test_results = dbt_runner.run_operation(macro_name= 'validate_any_type_column_anomalies') print_test_result_list(any_type_column_anomalies_test_results) if 'schema' in test_types: schema_changes_logs = dbt_runner.run_operation(macro_name='do_schema_changes') for schema_changes_log in schema_changes_logs: print(schema_changes_log) dbt_runner.run() dbt_runner.test(select='tag:schema_changes') schema_changes_test_results = dbt_runner.run_operation(macro_name='validate_schema_changes') print_test_result_list(schema_changes_test_results) if 'regular' in test_types: dbt_runner.test(select='test_type:singular tag:regular_tests') regular_test_results = dbt_runner.run_operation(macro_name='validate_regular_tests') print_test_result_list(regular_test_results) if 'artifacts' in test_types: artifacts_results = dbt_runner.run_operation(macro_name='validate_dbt_artifacts') print_test_result_list(artifacts_results) return [table_test_results, string_column_anomalies_test_results, numeric_column_anomalies_test_results, any_type_column_anomalies_test_results, schema_changes_test_results, regular_test_results, artifacts_results] def print_test_result_list(test_results): for test_result in test_results: print(test_result) def print_tests_results(table_test_results, string_column_anomalies_test_results, numeric_column_anomalies_test_results, any_type_column_anomalies_test_results, schema_changes_test_results, regular_test_results, artifacts_results): print('\nTable test results') print_test_result_list(table_test_results) print('\nString columns test results') print_test_result_list(string_column_anomalies_test_results) print('\nNumeric columns test results') print_test_result_list(numeric_column_anomalies_test_results) print('\nAny type columns test results') print_test_result_list(any_type_column_anomalies_test_results) print('\nSchema changes test results') print_test_result_list(schema_changes_test_results) print('\nRegular test results') print_test_result_list(regular_test_results) print('\ndbt artifacts results') print_test_result_list(artifacts_results) @click.command() @click.option( '--target', '-t', type=str, default='all', help="snowflake / bigquery / redshift / all (default = all)" ) @click.option( '--e2e-type', '-e', type=str, default='all', help="table / column / schema / regular / artifacts / no_timestamp / debug / all (default = all)" ) @click.option( '--generate-data', '-g', type=bool, default=True, help="Set to true if you want to re-generate fake data (default = True)" ) def main(target, e2e_type, generate_data): if generate_data: generate_fake_data() if target == 'all': e2e_targets = ['snowflake', 'bigquery', 'redshift'] else: e2e_targets = [target] if e2e_type == 'all': e2e_types = ['table', 'column', 'schema', 'regular', 'artifacts'] else: e2e_types = [e2e_type] all_results = {} for e2e_target in e2e_targets: print(f'Starting {e2e_target} tests\n') e2e_test_results = e2e_tests(e2e_target, e2e_types) print(f'\n{e2e_target} results') all_results[e2e_target] = e2e_test_results for e2e_target, e2e_test_results in all_results.items(): print(f'\n{e2e_target} results') print_tests_results(*e2e_test_results) if __name__ == '__main__': main()
16,604
5,544
from typing import List, T def get_first_item(items: List[T]) -> T: return next(iter(items), None)
105
38
import requests import logging import math import xbmcaddon from xbmcgui import ListItem from xbmcplugin import addDirectoryItem, endOfDirectory from resources.lib.constants.url import BASE_URL, SEARCH_PATH from resources.lib.router_factory import get_router_instance from resources.lib.routes.episodelist import episode_list ADDON = xbmcaddon.Addon() logger = logging.getLogger(ADDON.getAddonInfo('id')) def generate_routes(plugin): plugin.add_route(anime_search, "/search") return plugin def anime_search(): plugin = get_router_instance() search_value = plugin.args["name"][0] if "name" in plugin.args else "" page = plugin.args["page"][0] if "page" in plugin.args else "1" params = { "name": search_value, "limit": 10, "page": int(page) } res = requests.get(BASE_URL + SEARCH_PATH, params=params) json_data = res.json() for anime in json_data['data']['list']: li = ListItem(anime["animeName"]) li.setArt({"icon": anime["backgroundSrc"]}) li.setInfo(type="video", infoLabels={"plot": anime["animeSynopsis"]}) addDirectoryItem( plugin.handle, plugin.url_for( episode_list, id=str(anime["animeID"]), listId=str(anime["animeListID"]), episode_count=str(anime["animeEpisode"]) ), li, True ) are_pages_remaining = math.ceil(float(json_data["data"]["count"]) / float(params.get("limit"))) > int(page) if (are_pages_remaining): next_page_params = { "page": page, "name": search_value } next_page_params.update({ "page": str(int(params.get("page")) + 1) }) addDirectoryItem( plugin.handle, plugin.url_for( anime_search, **next_page_params ), ListItem('Next Page'), True ) endOfDirectory(plugin.handle)
1,956
615
from setuptools import setup, Extension setup(name='arm_now', version='1.2', author='@chaign_c', url='https://github.com/nongiach/arm_now', packages=['arm_now'], py_modules=['arm_now'], entry_points = { 'console_scripts': [ 'arm_now = arm_now:main', ], }, install_requires=[ 'exall', 'requests', 'docopt', 'pySmartDL', 'python-magic' ], keywords = ['emulator', 'arm', 'mips', 'powerpc', 'x86', 'qemu'] )
605
187
from astropy.table import Table from collections import OrderedDict import numpy as np from .spectrum import Spectrum1D from copy import deepcopy from scipy import signal def read_expres(fname, full_output=False, as_arrays=False, as_order_dict=False, as_raw_table=False): if full_output: raise NotImplementedError("For now use as_raw_table=True") tab = Table.read(fname, hdu=1) if as_raw_table: return tab orders = tab["order"] cols = ["wavelength", "spectrum", "uncertainty", "continuum", "offset","offset_uncertainty","n_pixels","reduced_chi", "continuum_mask","pixel_mask","tellurics", "bary_wavelength"] Nord = len(orders) if as_order_dict: alloutput = OrderedDict() elif as_arrays: alloutput = [[], [], []] else: alloutput = [] meta = {"file":fname} for iord in range(Nord): meta["order"] = orders[iord] wave = tab["wavelength"][iord] flux = tab["spectrum"][iord] errs = tab["uncertainty"][iord] if as_arrays: alloutput[0].append(wave) alloutput[1].append(flux) alloutput[2].append(errs) else: spec = Spectrum1D(wave, flux, errs**-2, metadata=meta) if as_order_dict: alloutput[orders[iord]] = spec else: alloutput.append(spec) if as_arrays: all_output[0] = np.array(all_output[0]) all_output[1] = np.array(all_output[1]) all_output[2] = np.array(all_output[2]) return alloutput def rebin_spec(spec, n_rebin): """ Sum n_rebin pixels together """ n_new = len(spec.dispersion) // n_rebin n_orig = n_new * n_rebin wave = spec.dispersion[0:n_orig].reshape((-1,n_rebin)) flux = spec.flux[0:n_orig].reshape((-1,n_rebin)) errs = (spec.ivar[0:n_orig]**-0.5).reshape((-1,n_rebin)) wave = np.mean(wave, axis=1) flux = np.sum(flux, axis=1) errs = np.sqrt(np.sum(errs**2, axis=1)) return Spectrum1D(wave, flux, errs**-2, spec.metadata)
2,095
752
# -*- coding: utf-8 -*- """ This code evaluates the outputs from calibrated BusSim @author: geomlk """ import numpy as np import matplotlib.pyplot as plt import pickle import os os.chdir("/Users/minhkieu/Documents/Github/dust/Projects/ABM_DA/bussim/") ''' Step 1: Load calibration results ''' def load_calibrated_params_IncreaseRate(IncreaseRate): name0 = ['./Calibration/BusSim_Model2_calibration_IncreaseRate_',str(IncreaseRate),'.pkl'] str1 = ''.join(name0) with open(str1, 'rb') as f: model_params, best_mean_model2,Sol_archived_mean,Sol_archived_std,PI_archived = pickle.load(f) name0 = ['./Calibration/BusSim_Model1_calibration_IncreaseRate_',str(IncreaseRate),'.pkl'] str1 = ''.join(name0) with open(str1, 'rb') as f: model_params, best_mean_model1,Sol_archived_mean,Sol_archived_std,PI_archived = pickle.load(f) return best_mean_model1,best_mean_model2 def load_calibrated_params_maxDemand(maxDemand): name0 = ['./Calibration/BusSim_Model2_calibration_static_maxDemand_',str(maxDemand),'.pkl'] str1 = ''.join(name0) with open(str1, 'rb') as f: model_params, best_mean_model2,Sol_archived_mean,Sol_archived_std,PI_archived = pickle.load(f) name0 = ['./Calibration/BusSim_Model1_calibration_static_maxDemand_',str(maxDemand),'.pkl'] str1 = ''.join(name0) with open(str1, 'rb') as f: model_params, best_mean_model1,Sol_archived_mean,Sol_archived_std,PI_archived = pickle.load(f) return best_mean_model1,best_mean_model2 ''' Step 2: Load synthetic real-time data ''' def load_actual_params_IncreaseRate(IncreaseRate): #load up a model from a Pickle name0 = ['./Data/Realtime_data_IncreaseRate_',str(IncreaseRate),'.pkl'] str1 = ''.join(name0) with open(str1, 'rb') as f: model_params,t,x,GroundTruth = pickle.load(f) return model_params,t,x def load_actual_params_maxDemand(maxDemand): #load up a model from a Pickle name0 = ['./Data/Realtime_data_static_maxDemand_',str(maxDemand),'.pkl'] str1 = ''.join(name0) with open(str1, 'rb') as f: model_params,t,x,GroundTruth = pickle.load(f) return model_params,t,x #define RMSE function def rmse(yhat,y): return np.sqrt(np.square(np.subtract(yhat, y).mean())) ''' Step 3: Evaluation of calibrated models when the arrival rate is changing by 1 to 20% ''' def IncreaseRate_analysis(): Results = [0,0] do_plot=True for IncreaseRate in range(1,20,2): #load real-time data model_params, t,x = load_actual_params_IncreaseRate(IncreaseRate) #load calibrated parameters best_mean_model1,best_mean_model2 = load_calibrated_params_IncreaseRate(IncreaseRate) #load the BusSim-static model from BusSim_stochastic import Model as Model2 ArrivalRate = best_mean_model2[0:(model_params['NumberOfStop'])] DepartureRate = best_mean_model2[model_params['NumberOfStop']:(2*model_params['NumberOfStop'])] TrafficSpeed = best_mean_model2[-2] #load model model = Model2(model_params, TrafficSpeed,ArrivalRate,DepartureRate) for time_step in range(int(model.EndTime / model.dt)): model.step() x2 = np.array([bus.trajectory for bus in model.buses]).T t2 = np.arange(0, model.EndTime, model.dt) x2[x2 <= 0 ] = np.nan x2[x2 >= (model.NumberOfStop * model.LengthBetweenStop)] = np.nan #load the BusSim-stochastic model from BusSim_deterministic import Model as Model1 ArrivalRate = best_mean_model1[0:(model_params['NumberOfStop'])] DepartureRate = best_mean_model1[model_params['NumberOfStop']:(2*model_params['NumberOfStop'])] TrafficSpeed = best_mean_model1[-2] #load model model = Model1(model_params, TrafficSpeed,ArrivalRate,DepartureRate) for time_step in range(int(model.EndTime / model.dt)): model.step() x3 = np.array([bus.trajectory for bus in model.buses]).T t3 = np.arange(0, model.EndTime, model.dt) x3[x3 <= 0 ] = np.nan x3[x3 >= (model.NumberOfStop * model.LengthBetweenStop)] = np.nan #plot individual run (if needed) if do_plot: plt.figure(3, figsize=(16 / 2, 9 / 2)) plt.clf() plt.plot(t, x, linewidth=1,color='black',linestyle = '-') plt.plot(t2, x2, linewidth=1.5,linestyle = ':',color='r') plt.ylabel('Distance (m)') plt.xlabel('Time (s)') plt.plot(t3, x3, linewidth=1.5,linestyle = '--',color='b') plt.plot([], [], linewidth=1.5,linestyle = ':',color='r',label='BusSim-stochastic') plt.plot([], [], linewidth=1.5,linestyle = '--',color='b',label='BusSim-deterministic') plt.plot([], [], linewidth=1,color='black',linestyle = '-',label='Real-time') plt.legend() plt.show() name0 = ['./Figures/Fig_calibration_IncreaseRate_',str(IncreaseRate),'.pdf'] str1 = ''.join(name0) plt.savefig(str1, dpi=200,bbox_inches='tight') #calculate RMSE x3[np.isnan(x3)]=0 x2[np.isnan(x2)]=0 x[np.isnan(x)]=0 RMSE1 = rmse(x3,x) RMSE2 = rmse(x2,x) Results = np.vstack((Results,[RMSE1,RMSE2])) #plot the evaluation results do_plot_results=True if do_plot_results: plt.figure(3, figsize=(16 / 2, 9 / 2)) plt.clf() plt.plot(np.arange(1,20,2),Results[1:,1],linewidth=1.5,linestyle = '--',color='b',label='BusSim-deterministic') plt.plot(np.arange(1,20,2),Results[1:,0],linewidth=1.5,linestyle = ':',color='r',label='BusSim-stochastic') plt.ylabel('RMSE (m)') plt.xlabel(r'$\xi$ (%)') plt.legend() plt.show() plt.savefig('./Figures/Fig_calibration_results_IncreaseRate.pdf', dpi=200,bbox_inches='tight') return Results ''' Step 3: Evaluation of the case when the maxDemand increases from 0.5 to 4.5 ''' def maxDemand_analysis(): Results = [0,0] do_plot=False for maxDemand in range(1,10,2): maxDemand=maxDemand/2 model_params, t,x = load_actual_params_maxDemand(maxDemand) best_mean_model1,best_mean_model2 = load_calibrated_params_maxDemand(maxDemand) from BusSim_stochastic import Model as Model2 ArrivalRate = best_mean_model2[0:(model_params['NumberOfStop'])] DepartureRate = best_mean_model2[model_params['NumberOfStop']:(2*model_params['NumberOfStop'])] TrafficSpeed = best_mean_model2[-1] #load model model = Model2(model_params, TrafficSpeed,ArrivalRate,DepartureRate) for time_step in range(int(model.EndTime / model.dt)): model.step() x2 = np.array([bus.trajectory for bus in model.buses]).T t2 = np.arange(0, model.EndTime, model.dt) x2[x2 <= 0 ] = np.nan x2[x2 >= (model.NumberOfStop * model.LengthBetweenStop)] = np.nan from BusSim_deterministic import Model as Model1 ArrivalRate = best_mean_model1[0:(model_params['NumberOfStop'])] DepartureRate = best_mean_model1[model_params['NumberOfStop']:(2*model_params['NumberOfStop'])] TrafficSpeed = best_mean_model1[-1] #load model model = Model1(model_params, TrafficSpeed,ArrivalRate,DepartureRate) for time_step in range(int(model.EndTime / model.dt)): model.step() x3 = np.array([bus.trajectory for bus in model.buses]).T t3 = np.arange(0, model.EndTime, model.dt) x3[x3 <= 0 ] = np.nan x3[x3 >= (model.NumberOfStop * model.LengthBetweenStop)] = np.nan #plot individual plots if it's needed if do_plot: plt.figure(3, figsize=(16 / 2, 9 / 2)) plt.clf() plt.plot(t2, x2, linewidth=1,linestyle = ':',color='r',label='BusSim-stochastic') plt.plot(t, x, linewidth=1,color='black',linestyle = '-',label='Real-time') plt.ylabel('Distance (m)') plt.xlabel('Time (s)') plt.plot(t3, x3, linewidth=.5,linestyle = '--',color='b',label='BusSim-deterministic') plt.legend() plt.show() name0 = ['./Figures/Fig_calibration_maxDemand_',str(maxDemand),'.pdf'] str1 = ''.join(name0) plt.savefig(str1, dpi=200,bbox_inches='tight') #calculate RMSE for each run x3[np.isnan(x3)]=0 x2[np.isnan(x2)]=0 x[np.isnan(x)]=0 RMSE1 = rmse(x3,x) RMSE2 = rmse(x2,x) Results = np.vstack((Results,[RMSE1,RMSE2])) #plot the evaluation results do_plot_results=True if do_plot_results: plt.figure(3, figsize=(16 / 2, 9 / 2)) plt.clf() plt.plot(np.arange(1,10,2),Results[1:,0],linewidth=1.5,linestyle = '--',color='b',label='BusSim-deterministic') plt.plot(np.arange(1,10,2),Results[1:,1],linewidth=1.5,linestyle = ':',color='r',label='BusSim-stochastic') plt.ylabel('RMSE (m)') plt.xlabel(r'$maxDemand$ (passenger/min)') plt.xticks(np.arange(1,10,2), (np.arange(1,10,2)/2)) plt.legend() plt.show() plt.savefig('./Figures/Fig_calibration_results_maxDemand.pdf', dpi=200,bbox_inches='tight') return Results if __name__ == '__main__': #main code, just call the evaluation codes Results = IncreaseRate_analysis() #Results = maxDemand_analysis()
9,664
3,544
# -*- coding: utf-8 -*- ''' Created on 15 feb. 2015 @author: mohamed seghilani ''' import opencavity import webbrowser import platform #if __name__ == '__main__': def launch(): help_path=opencavity.__file__ if platform.system()=='Windows': separator='\\' else: separator='/' count=1 while (not help_path.endswith(separator)) and count<50: help_path=help_path[:-1] count=count+1 #prevent unfinit loop if path is empty help_path2='Docs/_build/html/index.html' help_path=help_path+help_path2 webbrowser.open(help_path)
596
216
from .base_thermal import BaseThermal from .isothermal import Isothermal from .lumped import Lumped from .x_full import OneDimensionalX from . import pouch_cell
161
54
''' http://insight.bitpay.com/ ''' import logging import requests import time from lib import config, exceptions, util bitcoin_rpc_session = None def check(): return True def searchrawtransactions(address): unconfirmed = util.unconfirmed_transactions(address) rawtransactions = util.rpc('searchrawtransactions', [address, 1, 0, 9999999]) confirmed = [tx for tx in rawtransactions if tx['confirmations'] > 0] return unconfirmed + confirmed
463
150
#!/usr/bin/env python # -*- coding: utf-8 -*- import requests from bs4 import BeautifulSoup def getHotProducts(url): if "adafruit" in url: r = requests.get(url) featuredProducts = [] if r.status_code == 200: body = r.text bPage = BeautifulSoup(body, "html.parser", from_encoding="iso-8859-8") #products = bPage.select("#featured-products_block_center") products = bPage.find_all("div", {"class":"row product-listing"}) for product in products: nameOfProduct = product.find_all("div", {"class": "product-listing-text-wrapper"})[0].find("h1").find("a").get("data-name") urlOfProduct = "https://www.adafruit.com" + product.find_all("div", {"class": "product-listing-text-wrapper"})[0].find("h1").find("a").get("href") descriptionOfProduct = product.find_all("div", {"class": "product-description clearfix hidden-sm hidden-xs hidden-lg"})[0].getText()[1:] priceOfProduct = product.find_all("div", {"class": "product-info clearfix row"})[0].\ find_all("div", {"class":"price-stock col-lg-8 col-md-6 col-sm-6 col-xs-12"})[0].\ find_all("span", {"class":"normal-price"})[0].getText() stockProduct = product.find_all("div", {"class": "product-info clearfix row"})[0]. \ find_all("div", {"class": "price-stock col-lg-8 col-md-6 col-sm-6 col-xs-12"})[0]. \ find_all("div", {"class": "stock"})[0].getText().replace("\n", "") featuredProducts.append([nameOfProduct.replace(u'\xa0', u' ').replace(u'\u200b', ''), urlOfProduct.replace(u'\xa0', u' ').replace(u'\u200b', ''), descriptionOfProduct.replace(u'\xa0', u' ').replace(u'\u200b', ''), priceOfProduct.replace(u'\xa0', u' ').replace(u'\u200b', ''), stockProduct.replace(u'\xa0', u' ').replace(u'\u200b', '')]) return featuredProducts else: return None elif "sparkfun" in url: r = requests.get(url) featuredProducts = [] if r.status_code == 200: body = r.text bPage = BeautifulSoup(body, "html.parser", from_encoding="iso-8859-8") products = bPage.find_all("div", {"class": "tile product-tile has_addl_actions grid "}) for product in products: main = product.find_all("div", {"class":"main"})[0] rawNameAndUrl = main.find_all("h3")[0].find("a") urlOfProduct = rawNameAndUrl.get("href").strip() nameOfProduct = rawNameAndUrl.find_all("span")[0].getText().strip() descriptionOfProduct = main.find_all("p", {"class": "description"})[0].getText().strip().replace(u'\n', u'').replace(u'\r', u'') stockProduct = main.find_all("span", {"class" : "bubbles"})[0].find("a").find_all("span")[0].find_all("span")[0].getText().strip() priceOfProduct = '$' + product.find_all("div", {"itemprop":"offers"})[0].find("div").find("span").find("span", {"itemprop": "price"}).getText().strip() if not nameOfProduct == "": featuredProducts.append([nameOfProduct.replace(u'\xa0', u' ').replace(u'\u200b', '').replace( u'\u2026', '').replace(u'\xae', '').replace(u'\u2013', '').replace(u'\u2019', '').replace(u'\ufffd', '').replace(u'\xb5', '').replace(u'\u201d', ''), urlOfProduct.replace(u'\xa0', u' ').replace(u'\u200b', '').replace( u'\u2026', '').replace(u'\xae', '').replace(u'\u2013', '').replace(u'\u2019', '').replace(u'\ufffd', '').replace(u'\xb5', '').replace(u'\u201d', ''), descriptionOfProduct.replace(u'\xa0', u' ').replace(u'\u200b', '').replace( u'\u2026', '').replace(u'\xae', '').replace(u'\u2013', '').replace(u'\u2019', '').replace(u'\ufffd', '').replace(u'\xb5', '').replace(u'\u201d', ''), priceOfProduct.replace(u'\xa0', u' ').replace(u'\u200b', '').replace( u'\u2026', '').replace(u'\xae', '').replace(u'\u2013', '').replace(u'\u2019', '').replace(u'\ufffd', '').replace(u'\xb5', '').replace(u'\u201d', ''), stockProduct.replace(u'\xa0', u' ').replace(u'\u200b', '').replace( u'\u2026', '').replace(u'\xae', '').replace(u'\u2013', '').replace(u'\u2019', '').replace(u'\ufffd', '').replace(u'\xb5', '').replace(u'\u201d', '')]) return featuredProducts else: return None def exportToCSV(data): import csv file = open("out.csv", 'w') outFile = csv.writer(file) for row in data: print list(row) outFile.writerow(list(row)) if __name__ == "__main__": products = getHotProducts("https://www.sparkfun.com/categories/top?per_page=400") exportToCSV(products)
4,876
1,692
from django.urls import path from . import views app_name = 'merchant' urlpatterns = [ path('', views.merchant, name='merchant'), path('makanan', views.makanan, name='makanan'), path('makanan/search_makanan', views.search_makanan, name='search_makanan'), path('search_merchant', views.search_merchant, name='search_merchant'), path('makanan/<int:pk>', views.detail_makanan, name='detail'), path('show_merchant', views.display_merchant, name='showMerchant'), path('show_makanan', views.display_makanan, name='showMakanan'), ]
569
219
"""Worker application. It calls an external slow task and send its output, line by line, as "log" events through SocketIO. The web page will then print the lines. """ # Disable the warning because eventlet must patch the standard library as soon # as possible. from communication import (CELERY, get_socketio) # pylint: disable=wrong-import-order import socket from datetime import datetime from subprocess import PIPE, Popen SOCKETIO = get_socketio() def announce(): """Tell this worker is up and running.""" hostname = socket.gethostname() time = datetime.now().strftime('%H:%M:%S') msg = '{} Worker {} is up.'.format(time, hostname) SOCKETIO.emit('log', {'data': msg}) announce() @CELERY.task def add_task(name): """Run the slow task as a subprocess and send results to the web site.""" args = './slow_task.sh', str(name) with Popen(args, stdout=PIPE, universal_newlines=True) as proc: for line in proc.stdout: SOCKETIO.emit('log', {'data': line.rstrip()})
1,049
326
#! /usr/bin/env python2.7 # -*- coding: latin-1 -*- from database import User from database import ApiKey from flask_principal import Permission from flask_principal import RoleNeed admin_permission = Permission(RoleNeed('admin')) def map_api_key_to_user(key): """ Take an input of an API key and return a User instance """ key = key.strip() user = None # Attempt to get a user instance from an apikey row row = ApiKey.filter_by(api_key=key).first() if row: user = row.user return user
536
175
""" Queries in this module have a shared 'after' query, which lets Graphql paginate """ from gql import gql # type: ignore[import] query_contributed_to: gql = gql( r"""query ($after: String) { viewer { repositoriesContributedTo(first: 100, after: $after, contributionTypes: [COMMIT, ISSUE, PULL_REQUEST, REPOSITORY]) { totalCount nodes { name licenseInfo { name } description stargazers { totalCount } primaryLanguage { name } url updatedAt } pageInfo { endCursor hasNextPage } } } } """ ) query_owned: gql = gql( r"""query ($after: String) { viewer { repositories( first: 100, after: $after, orderBy: { field: UPDATED_AT, direction: DESC } ) { nodes { name licenseInfo { name } description stargazers { totalCount } primaryLanguage { name } url updatedAt } pageInfo { endCursor hasNextPage } } } }""" )
1,173
362
""" ``GribFile`` class that implements a GRIB file that closes itself and its messages when it is no longer needed. Author: Daniel Lee, DWD, 2014 """ import gribapi from gribmessage import GribMessage class GribFile(file): """ A GRIB file handle meant for use in a context manager. Individual messages can be accessed using the ``next`` method. Of course, it is also possible to iterate over each message in the file:: >>> with GribFile(filename) as grib: ... # Print number of messages in file ... len(grib) ... # Open all messages in file ... for msg in grib: ... print(msg["shortName"]) ... len(grib.open_messages) >>> # When the file is closed, any open messages are closed >>> len(grib.open_messages) """ def __enter__(self): return self def __exit__(self, type, value, traceback): """Close all open messages, release GRIB file handle and close file.""" while self.open_messages: self.open_messages.pop().close() self.file_handle.close() def close(self): """Possibility to manually close file.""" self.__exit__(None, None, None) def __len__(self): """Return total messages in GRIB file.""" return gribapi.grib_count_in_file(self.file_handle) def __iter__(self): return self def next(self): try: return GribMessage(self) except IOError: raise StopIteration() def __init__(self, filename, mode="r"): """Open file and receive GRIB file handle.""" #: File handle for working with actual file on disc #: The class holds the file it works with because the GRIB API's #: typechecking does not allow using inherited classes. self.file_handle = open(filename, mode) #: Number of message in GRIB file currently being read self.message = 0 #: Open messages self.open_messages = []
2,032
568
# Generated by Django 2.1.4 on 2019-02-21 03:11 from django.db import migrations provinces = ( ('Hà Nội', 'Thành Phố'), ('Hà Giang', 'Tỉnh'), ('Cao Bằng', 'Tỉnh'), ('Bắc Kạn', 'Tỉnh'), ('Tuyên Quang', 'Tỉnh'), ('Lào Cai', 'Tỉnh'), ('Điện Biên', 'Tỉnh'), ('Lai Châu', 'Tỉnh'), ('Sơn La', 'Tỉnh'), ('Yên Bái', 'Tỉnh'), ('Hòa Bình', 'Tỉnh'), ('Thái Nguyên', 'Tỉnh'), ('Lạng Sơn', 'Tỉnh'), ('Quảng Ninh', 'Tỉnh'), ('Bắc Giang', 'Tỉnh'), ('Phú Thọ', 'Tỉnh'), ('Vĩnh Phúc', 'Tỉnh'), ('Bắc Ninh', 'Tỉnh'), ('Hải Dương', 'Tỉnh'), ('Hải Phòng', 'Thành Phố'), ('Hưng Yên', 'Tỉnh'), ('Thái Bình', 'Tỉnh'), ('Hà Nam', 'Tỉnh'), ('Nam Định', 'Tỉnh'), ('Ninh Bình', 'Tỉnh'), ('Thanh Hóa', 'Tỉnh'), ('Nghệ An', 'Tỉnh'), ('Hà Tĩnh', 'Tỉnh'), ('Quảng Bình', 'Tỉnh'), ('Quảng Trị', 'Tỉnh'), ('Thừa Thiên Huế', 'Tỉnh'), ('Đà Nẵng', 'Thành Phố'), ('Quảng Nam', 'Tỉnh'), ('Quảng Ngãi', 'Tỉnh'), ('Bình Định', 'Tỉnh'), ('Phú Yên', 'Tỉnh'), ('Khánh Hòa', 'Tỉnh'), ('Ninh Thuận', 'Tỉnh'), ('Bình Thuận', 'Tỉnh'), ('Kon Tum', 'Tỉnh'), ('Gia Lai', 'Tỉnh'), ('Đắk Lắk', 'Tỉnh'), ('Đắk Nông', 'Tỉnh'), ('Lâm Đồng', 'Tỉnh'), ('Bình Phước', 'Tỉnh'), ('Tây Ninh', 'Tỉnh'), ('Bình Dương', 'Tỉnh'), ('Đồng Nai', 'Tỉnh'), ('Bà Rịa - Vũng Tàu', 'Tỉnh'), ('Hồ Chí Minh', 'Thành Phố'), ('Long An', 'Tỉnh'), ('Tiền Giang', 'Tỉnh'), ('Bến Tre', 'Tỉnh'), ('Trà Vinh', 'Tỉnh'), ('Vĩnh Long', 'Tỉnh'), ('Đồng Tháp', 'Tỉnh'), ('An Giang', 'Tỉnh'), ('Kiên Giang', 'Tỉnh'), ('Cần Thơ', 'Thành Phố'), ('Hậu Giang', 'Tỉnh'), ('Sóc Trăng', 'Tỉnh'), ('Bạc Liêu', 'Tỉnh'), ('Cà Mau', 'Tỉnh') ) def forwards_func(apps, schema_editor): Province = apps.get_model("vnprovinces", "Province") db_alias = schema_editor.connection.alias Province.objects.using(db_alias).bulk_create([ Province(name=name, type=type) for name, type in provinces ]) def reverse_func(apps, schema_editor): Province = apps.get_model("vnprovinces", "Province") db_alias = schema_editor.connection.alias for name, type in provinces: Province.objects.using(db_alias).filter(name=name, type=type).delete() class Migration(migrations.Migration): dependencies = [ ('vnprovinces', '0001_initial'), ] operations = [ migrations.RunPython(forwards_func, reverse_func), ]
2,525
1,391
cube = lambda x: pow(x,3) def fibonacci(n): l=list() if n==0: l=[] elif n==1: l=[0] else: l=[0,1] for i in range(2,n): num=l[i-1]+l[i-2] l.append(num) return 1
239
105
import os os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" import io import tensorflow as tf import matplotlib.pyplot as plt import numpy as np import tensorflow_datasets as tfds from tensorflow import keras from tensorflow.keras import layers # Make sure we don't get any GPU errors physical_devices = tf.config.list_physical_devices("GPU") tf.config.experimental.set_memory_growth(physical_devices[0], True) writer = tf.summary.create_file_writer("logs/graph_vis") @tf.function def my_func(x, y): return tf.nn.relu(tf.matmul(x, y)) x = tf.random.uniform((3, 3)) y = tf.random.uniform((3, 3)) tf.summary.trace_on(graph=True, profiler=True) out = my_func(x, y) with writer.as_default(): tf.summary.trace_export( name="function_trace", step=0, profiler_outdir="logs\\graph_vis\\" )
804
297
from fabric.api import * env.user = 'ptigas' env.hosts = ['hocus.io'] def deploy(): with cd('repos/hocus'): run('git pull') with cd('repos/hocus/webapp'): run('composer install') with cd('repos/hocus'): run('rsync -rv --exclude=vendor webapp/* ~/Sites/hocus.io/') run('cp -r ~/Sites/hocus.io/settings.prod.php ~/Sites/hocus.io/settings.php')
361
159
from src.helpers.staticHelper import StaticHelper from src.routes.interfaces.iEntityIndexerLogic import IEntityIndexerLogic from src.logics.interfaces.iIndexingAlgorithm import IIndexingAlgorithm from src.logics.interfaces.iEntityData import IEntityData class EntityIndexerLogic(IEntityIndexerLogic): def __init__(self, indexingAlgorithm, entityData): self.indexingAlgorithm = indexingAlgorithm StaticHelper.isInterfaceResloved(self.indexingAlgorithm, IIndexingAlgorithm) self.entityData = entityData StaticHelper.isInterfaceResloved(self.entityData, IEntityData) def createIndex(self, data): self.indexingAlgorithm.createIndex({}, 1, 1, '', '') #self.entityData.insertEntityNode() #self.entityData.setData(data) #dataNodeId = self.entityData.insertEntityNode() #print(rootId) #self.indexingAlgorithm.createIndex(data, rootId, dataNode.GetNodeId(), '', '')
949
284
from server.connection import Base from sqlalchemy import Column, String from sqlalchemy.dialects.postgresql import UUID class Hostel(Base): __tablename__ = "hostel" id = Column(UUID(as_uuid=True), primary_key=True) name = Column(String(255), nullable=False) description = Column(String(255), nullable=False) def __repr__(self): return self.name
377
125
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 1999-2017 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...expr.expressions import Column, CollectionExpr def change_input(expr, src_input, new_input, get_field, dag): for path in expr.all_path(src_input, strict=True): cols = [it for it in path if isinstance(it, Column)] assert len(cols) <= 1 collection_len = len([it for it in path if isinstance(it, CollectionExpr)]) if isinstance(expr, CollectionExpr): assert collection_len == 2 else: assert collection_len == 1 if len(cols) == 1: col = cols[0] col_name = col.source_name or col.name field = get_field(new_input, col_name) if col.is_renamed(): field = field.rename(col.name) else: field = field.copy() path[-3].substitute(col, field, dag=dag) else: path[-2].substitute(src_input, new_input, dag=dag) def copy_sequence(sequence, collection, dag=None): copied = sequence.copy() if dag: dag.add_node(copied) is_copied = set() for path in sequence.all_path(collection, strict=True): curr = copied for seq in path[1:-1]: if id(seq) in is_copied: continue is_copied.add(id(seq)) copied_seq = seq.copy() curr.substitute(seq, copied_seq, dag=dag) curr = copied_seq return copied
2,047
620
try: from setuptools import setup except ImportError: from distutils.core import setup setup( name='arrow', version='0.4.4', description='Better dates and times for Python', url='http://crsmithdev.com/arrow', author='Chris Smith', author_email="crsmithdev@gmail.com", license='Apache 2.0', packages=['arrow'], zip_safe=False, install_requires=[ 'python-dateutil' ], test_suite="tests", classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Software Development :: Libraries :: Python Modules' ] )
892
276
from typing import List from decimal import Decimal from yayFinPy.stock import Stock import pandas as pd def test_constructor(): try: stock = Stock("AAPL") assert(stock != None) return 1 except Exception as e: print("Test Failed: test_constructor: ", e) return 0 def test_constructor_failure(): try: stock = Stock("INVALID") except: return 1 print("Test Failed: test_constructor_failure") return 0 def test_stock_attributes(): try: stock = Stock("AAPL") assert(stock != None) assert(type(stock.bid) == Decimal) assert(type(stock.ask) == Decimal) assert(type(stock.bid_size) == Decimal) assert(type(stock.ask_size) == Decimal) assert(type(stock.name) == str) assert(type(stock.pe_ratio) == Decimal) assert(type(stock.peg_ratio) == Decimal) assert(type(stock.market_cap) == Decimal) assert(stock.name == "Apple Inc.") return 1 except Exception as e: print("Test Failed: test_stock_attributes", e) return 0 def test_stock_splits(): try: stock = Stock("AAPL") splits = stock.splits assert(type(splits) == type(pd.Series(dtype='float64'))) return 1 except Exception as e: print("Test Failed: test_stock_splits", e) return 0 def test_stock_dividends(): try: stock = Stock("AAPL") dividends = stock.dividends assert(type(dividends) == type(pd.Series(dtype='float64'))) return 1 except Exception as e: print("Test Failed: test_stock_dividends", e) return 0 def test_stock_news(): try: stock = Stock("AAPL") news = stock.related_news() assert(type(news) == list) if len(news) > 0: assert(type(news[0]) == str) assert(len(news) <= 20) return 1 except Exception as e: print("Test Failed: test_stock_news", e) return 0 def test_stock_tweets(): try: stock = Stock("AAPL") tweets = stock.tweets("invalid", "invalid", "", "") return 0 except Exception as e: #test expected to fail return 1 def test_stock_sentiments(): try: stock = Stock("AAPL") sentiment_score = stock.sentiment() return 0 except Exception as e: return 1 #test expected to fail return 0 def test_stock_returns(): try: stock = Stock("AAPL") returns_val = stock.returns() assert(type(returns_val) == Decimal) return 1 except Exception as e: print("Test Failed: test_stock_returns", e) return 0 def test_stock_companyData(): try: stock = Stock("AAPL") companyData = stock.company_data assert(type(str(companyData)) == str) return 1 except Exception as e: print("Test Failed: test_stock_companyData", e) return 0 if __name__ == '__main__': success = [] success.append(test_constructor()) success.append(test_constructor_failure()) success.append(test_stock_attributes()) success.append(test_stock_splits()) success.append(test_stock_dividends()) success.append(test_stock_returns()) success.append(test_stock_news()) success.append(test_stock_tweets()) success.append(test_stock_sentiments()) success.append(test_stock_companyData()) print("Stock Test Done: (%d/%d) Successful"%(sum(success), len(success)))
3,061
1,184
#! /usr/bin/env python import os import rospy import rospkg from readbag import restore from qt_gui.plugin import Plugin from python_qt_binding.QtCore import Qt from python_qt_binding import loadUi from python_qt_binding.QtGui import QFileDialog, QGraphicsView, QIcon, QWidget from PyQt4 import QtGui, QtCore from example_ui import * from TheTeleop2 import * class MyPlugin(Plugin): def pr(self, anda): arg = input("inserte algo") print arg def getTab(self): arg = self._widget.comboBox.currentText () print str(arg) def getBags(self, bag_path): list_bags = os.listdir(bag_path) list_of_bag = [] for t in list_bags: if ".bag" in t: t = t.split('.') list_of_bag.append(t[0]) return list_of_bag def __init__(self, context): super(MyPlugin, self).__init__(context) # Give QObjects reasonable names self.setObjectName('MyPlugin') # Process standalone plugin command-line arguments from argparse import ArgumentParser parser = ArgumentParser() # Add argument(s) to the parser. parser.add_argument("-q", "--quiet", action="store_true", dest="quiet", help="Put plugin in silent mode") args, unknowns = parser.parse_known_args(context.argv()) if not args.quiet: print 'arguments: ', args print 'unknowns: ', unknowns # Create QWidget #print "i am alive" #self._widget = QWidget() self._widget = Form1() # Get path to UI file which should be in the "resource" folder of this package #ui_file = os.path.join(rospkg.RosPack().get_path('rqt_the_teleop'), 'resource', 'MyPlugin.ui') # Extend the widget with all attributes and children from UI file #loadUi(ui_file, self._widget) # Give QObjects reasonable names #self._widget.setObjectName('MyPluginUi') # Show _widget.windowTitle on left-top of each plugin (when # it's set in _widget). This is useful when you open multiple # plugins at once. Also if you open multiple instances of your # plugin at once, these lines add number to make it easy to # tell from pane to pane. #if context.serial_number() > 1: # self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number())) # Add widget to the user interface context.add_widget(self._widget) #test #tree_widget ROBOT self._widget.elementsTable.setEditTriggers(self._widget.elementsTable.NoEditTriggers) header_robot = self._widget.elementsTable.header() header_robot.setResizeMode(QHeaderView.ResizeToContents) header_robot.setContextMenuPolicy(Qt.CustomContextMenu) def shutdown_plugin(self): # TODO unregister all publishers here pass def save_settings(self, plugin_settings, instance_settings): # TODO save intrinsic configuration, usually using: # instance_settings.set_value(k, v) pass def restore_settings(self, plugin_settings, instance_settings): # TODO restore intrinsic configuration, usually using: # v = instance_settings.value(k) pass #def trigger_configuration(self): # Comment in to signal that the plugin has a way to configure # This will enable a setting button (gear icon) in each dock widget title bar # Usually used to open a modal configuration dialog
3,184
1,082
from distutils.core import setup setup( name='Flask-SimpleSQLA', version='1.0', url='http://github.com/blaxpirit/flask-simplesqla', license='BSD', author="Oleh Prypin", author_email='blaxpirit@gmail.com', description="Extension providing basic support of SQLAlchemy in Flask applications", long_description=__doc__, packages=['flask_simplesqla'], platforms='any', install_requires=[ 'Flask>=0.8', 'SQLAlchemy', ], classifiers=[ 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
955
285
# --- Day 20: Jurassic Jigsaw --- # The high-speed train leaves the forest and quickly carries you south. You can even see a desert in the distance! # Since you have some spare time, you might as well see if there was anything interesting in the image the Mythical # Information Bureau satellite captured. # # After decoding the satellite messages, you discover that the data actually contains many small images created by # the satellite's camera array. The camera array consists of many cameras; rather than produce a single square image, # they produce many smaller square image tiles that need to be reassembled back into a single image. # # Each camera in the camera array returns a single monochrome image tile with a random unique ID number. The tiles # (your puzzle input) arrived in a random order. # # Worse yet, the camera array appears to be malfunctioning: each image tile has been rotated and flipped to a random # orientation. Your first task is to reassemble the original image by orienting the tiles so they fit together. # # To show how the tiles should be reassembled, each tile's image data includes a border that should line up exactly with # its adjacent tiles. All tiles have this border, and the border lines up exactly when the tiles are both oriented # correctly. Tiles at the edge of the image also have this border, but the outermost edges won't line up with any other tiles. # # For example, suppose you have the following nine tiles: # # Tile 2311: # ..##.#..#. # ##..#..... # #...##..#. # ####.#...# # ##.##.###. # ##...#.### # .#.#.#..## # ..#....#.. # ###...#.#. # ..###..### # # Tile 1951: # #.##...##. # #.####...# # .....#..## # #...###### # .##.#....# # .###.##### # ###.##.##. # .###....#. # ..#.#..#.# # #...##.#.. # # Tile 1171: # ####...##. # #..##.#..# # ##.#..#.#. # .###.####. # ..###.#### # .##....##. # .#...####. # #.##.####. # ####..#... # .....##... # # Tile 1427: # ###.##.#.. # .#..#.##.. # .#.##.#..# # #.#.#.##.# # ....#...## # ...##..##. # ...#.##### # .#.####.#. # ..#..###.# # ..##.#..#. # # Tile 1489: # ##.#.#.... # ..##...#.. # .##..##... # ..#...#... # #####...#. # #..#.#.#.# # ...#.#.#.. # ##.#...##. # ..##.##.## # ###.##.#.. # # Tile 2473: # #....####. # #..#.##... # #.##..#... # ######.#.# # .#...#.#.# # .######### # .###.#..#. # ########.# # ##...##.#. # ..###.#.#. # # Tile 2971: # ..#.#....# # #...###... # #.#.###... # ##.##..#.. # .#####..## # .#..####.# # #..#.#..#. # ..####.### # ..#.#.###. # ...#.#.#.# # # Tile 2729: # ...#.#.#.# # ####.#.... # ..#.#..... # ....#..#.# # .##..##.#. # .#.####... # ####.#.#.. # ##.####... # ##..#.##.. # #.##...##. # # Tile 3079: # #.#.#####. # .#..###### # ..#....... # ######.... # ####.#..#. # .#...#.##. # #.#####.## # ..#.###... # ..#....... # ..#.###... # By rotating, flipping, and rearranging them, you can find a square arrangement that causes all adjacent borders to line up: # # #...##.#.. ..###..### #.#.#####. # ..#.#..#.# ###...#.#. .#..###### # .###....#. ..#....#.. ..#....... # ###.##.##. .#.#.#..## ######.... # .###.##### ##...#.### ####.#..#. # .##.#....# ##.##.###. .#...#.##. # #...###### ####.#...# #.#####.## # .....#..## #...##..#. ..#.###... # #.####...# ##..#..... ..#....... # #.##...##. ..##.#..#. ..#.###... # # #.##...##. ..##.#..#. ..#.###... # ##..#.##.. ..#..###.# ##.##....# # ##.####... .#.####.#. ..#.###..# # ####.#.#.. ...#.##### ###.#..### # .#.####... ...##..##. .######.## # .##..##.#. ....#...## #.#.#.#... # ....#..#.# #.#.#.##.# #.###.###. # ..#.#..... .#.##.#..# #.###.##.. # ####.#.... .#..#.##.. .######... # ...#.#.#.# ###.##.#.. .##...#### # # ...#.#.#.# ###.##.#.. .##...#### # ..#.#.###. ..##.##.## #..#.##..# # ..####.### ##.#...##. .#.#..#.## # #..#.#..#. ...#.#.#.. .####.###. # .#..####.# #..#.#.#.# ####.###.. # .#####..## #####...#. .##....##. # ##.##..#.. ..#...#... .####...#. # #.#.###... .##..##... .####.##.# # #...###... ..##...#.. ...#..#### # ..#.#....# ##.#.#.... ...##..... # For reference, the IDs of the above tiles are: # # 1951 2311 3079 # 2729 1427 2473 # 2971 1489 1171 # To check that you've assembled the image correctly, multiply the IDs of the four corner tiles together. If you do # this with the assembled tiles from the example above, you get 1951 * 3079 * 2971 * 1171 = 20899048083289. # # Assemble the tiles into an image. What do you get if you multiply together the IDs of the four corner tiles? import re def reverse_string(string): new_str_list = list() for char in string: new_str_list.append(char) new_str_list = new_str_list.reverse() new_str = "" for char in new_str_list: new_str += char return new_str def get_left(image): left_list = list() for line in image: left_list.append(line[0]) return left_list def get_right(image): left_list = list() for line in image: left_list.append(line[-1]) return left_list def get_bottom(image): bottom_list = list() for char in image[-1]: bottom_list.append(char) return bottom_list def get_top(image): bottom_list = list() for char in image[0]: bottom_list.append(char) return bottom_list def no_empty_list(list_in): i = 0 iterate = len(list_in) while i < iterate: try: list_in.remove('') i += 1 except: break return list_in def no_newline_list(list_in): i = 0 iterate = len(list_in) while i < iterate: try: list_in.pop(1 + i) i += 1 except: break return list_in def match_bottoms_and_tops(obj_list): return None def match_left_and_right(obj_list): return None class Tile: def __init__(self, id, image): self.id = id self.left_side = get_left(image) self.right_side = get_right(image) self.bottom_side = get_bottom(image) self.top_side = get_top(image) self.left_side_flipped = [e for e in self.left_side] self.right_side_flipped = [e for e in self.right_side] self.bottom_side_flipped = [e for e in self.bottom_side] self.top_side_reversed = [e for e in self.top_side] self.left_side.reverse() self.right_side.reverse() self.bottom_side.reverse() self.top_side.reverse() self.side_match_count = 0 def add_match(self): self.side_match_count += 1 data = open("input.txt", "r").read() data_str = data lst = re.split('Tile ', data_str) lst = no_empty_list(lst) lst_of_lst = list() for str in lst: str = re.split('(\\n)', str) str = no_newline_list(str) str = no_empty_list(str) lst_of_lst.append(str) print str obj_lst = list() for lst in lst_of_lst: obj_lst.append(Tile(lst[0], lst[1:])) print obj_lst # All that you need to solved this problem now is to find whichever Tile it is that only has matches on 2 sides (I believe, # this assumes that you don;t have to jigsaw the Tiles to solve the problem i = 0 lits_len = len(obj_lst) while i < lits_len - 1: temp_r = obj_lst[i].right_side temp_r_r = obj_lst[i].right_side_flipped temp_l = obj_lst[i].left_side temp_l_r = obj_lst[i].left_side_flipped temp_t = obj_lst[i].top_side temp_t_r = obj_lst[i].top_side_reversed temp_b = obj_lst[i].bottom_side temp_b_r = obj_lst[i].bottom_side_flipped base_lst = [temp_r, temp_l, temp_r_r, temp_l_r, temp_t, temp_b, temp_t_r, temp_b_r] for obj in obj_lst[i+1:]: comp_lst = [obj.right_side, obj.left_side, obj.top_side, obj.bottom_side] for side in comp_lst: if side in base_lst: obj_lst[i].add_match() obj.add_match() if obj_lst[i].side_match_count > 3: break i += 1 for item in obj_lst: if item.side_match_count == 2: print item.id print obj_lst
7,879
3,262
import logging import os import pickle from typing import NamedTuple import gym from gym import Wrapper, GoalEnv from gym.wrappers import FlattenObservation, TimeLimit, TransformReward, FilterObservation from runstats import Statistics import torch from envs.gym_mujoco.custom_wrappers import DropGoalEnvsAbsoluteLocation torch.set_num_threads(2) torch.set_num_interop_threads(2) from stable_baselines3 import SAC import numpy as np from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.monitor import Monitor from solvability import ForHER import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt plt.ion() from envs.custom_envs import DADSEnv, make_point2d_dads_env, make_fetch_reach_env, \ make_fetch_push_env, make_point_mass_env, make_ant_dads_env from scipy.stats import multivariate_normal as mvn def l2(target, sources: np.ndarray): return np.linalg.norm(np.subtract(target, sources), axis=sources.ndim - 1) class MutualInfoStrategy: def __init__(self, skill_dim: int): self._skill_dim = skill_dim def sample_skill(self, samples=None): raise NotImplementedError def get_mutual_info(self, goal_delta: np.ndarray, skill: np.ndarray) -> float: log = dict() log["p(g'|z,g)"] = self._mi_numerator(delta=goal_delta, skill=skill) log["p(g'|g)"] = self._mi_denominator(delta=goal_delta) mutual_info = log["p(g'|z,g)"] - log["p(g'|g)"] if mutual_info < -10: logging.warning(str((mutual_info, log["p(g'|z,g)"], log["p(g'|g)"]))) return mutual_info def choose_skill(self, desired_delta: np.ndarray) -> np.ndarray: raise NotImplementedError def _mi_numerator(self, delta: np.ndarray, skill: np.ndarray) -> float: raise NotImplementedError def _mi_denominator(self, delta: np.ndarray) -> float: raise NotImplementedError class DotProductStrategy(MutualInfoStrategy): def sample_skill(self, samples=None): size = (samples, self._skill_dim) if samples else self._skill_dim return np.random.normal(size=size) def _mi_numerator(self, delta: np.ndarray, skill: np.ndarray) -> float: diff = delta - skill return -0.5 * (diff @ diff) def _mi_denominator(self, delta: np.ndarray) -> float: return -0.25*(delta @ delta) - np.log(np.sqrt(2**len(delta))) def choose_skill(self, desired_delta: np.ndarray) -> np.ndarray: skills = self.sample_skill(samples=1000) diffs = l2(desired_delta, skills) return skills[diffs.argmin()] class MVNStrategy(MutualInfoStrategy): def __init__(self, skill_dim: int): super().__init__(skill_dim) self.cov = cov = { "z": np.eye(self._skill_dim), "g'|z,g": 0.1 * np.eye(self._skill_dim) } # Integration of two gaussians <-> convolution <-> sum of two gaussian RVs. cov["g'|g"] = cov["z"] + cov["g'|z,g"] def sample_skill(self, samples=1): return mvn.rvs(size=samples, cov=self.cov["z"]) def choose_skill(self, desired_delta: np.ndarray) -> np.ndarray: skills = self.sample_skill(samples=1000) diffs = l2(desired_delta, skills) return skills[diffs.argmin()] def _mi_numerator(self, delta: np.ndarray, skill: np.ndarray) -> float: return mvn.logpdf(x=delta, mean=skill, cov=self.cov["g'|z,g"]) def _mi_denominator(self, delta: np.ndarray) -> float: return mvn.logpdf(x=delta, cov=self.cov["g'|g"]) class SkillWrapper(Wrapper): def __init__(self, env: GoalEnv, skill_reset_steps: int): super().__init__(env) self._skill_reset_steps = skill_reset_steps self._skill_dim = env.observation_space["desired_goal"].shape[0] obs_dim = self.env.observation_space["observation"].shape[0] self.observation_space = gym.spaces.Box(-np.inf, np.inf, shape=(obs_dim + self._skill_dim, )) self.strategy = MVNStrategy(skill_dim=self._skill_dim) self._cur_skill = self.strategy.sample_skill() self._last_dict_obs = None self._goal_deltas_stats = [Statistics([1e-6]) for _ in range(self._skill_dim)] def _normalize(self, delta): μs = [s.mean() for s in self._goal_deltas_stats] σs = [s.stddev() for s in self._goal_deltas_stats] return np.asarray([(d-μ)/σ for (d, μ, σ) in zip(delta, μs, σs)]) def step(self, action): dict_obs, _, done, info = self.env.step(action) reward = self._reward(dict_obs=dict_obs) self._last_dict_obs = dict_obs if np.random.random() < 1/self._skill_reset_steps: self._cur_skill = self.strategy.sample_skill() flat_obs_w_skill = self._add_skill(observation=dict_obs["observation"]) return flat_obs_w_skill, reward, done, info def _reward(self, dict_obs: np.ndarray) -> float: last_diff = self._last_dict_obs["achieved_goal"] - self._last_dict_obs["desired_goal"] cur_diff = dict_obs["achieved_goal"] - dict_obs["desired_goal"] goal_delta = (cur_diff - last_diff)[:self._skill_dim] for s, d in zip(self._goal_deltas_stats, goal_delta): s.push(d) return self.strategy.get_mutual_info(goal_delta=self._normalize(goal_delta), skill=self._cur_skill) def reset(self, **kwargs): self._cur_skill = self.strategy.sample_skill() self._last_dict_obs = self.env.reset(**kwargs) return self._add_skill(observation=self._last_dict_obs["observation"]) def _add_skill(self, observation: np.ndarray) -> np.ndarray: return np.concatenate((observation, self._cur_skill)) def set_sac(self, sac): self._sac = sac def predict(self, dict_obs: dict, deterministic=True): delta = (dict_obs["desired_goal"] - dict_obs["achieved_goal"])[:self._skill_dim] skill = self.strategy.choose_skill(desired_delta=self._normalize(delta)) flat_obs_w_skill = np.concatenate((dict_obs["observation"], skill)) return self._sac.predict(observation=flat_obs_w_skill, deterministic=deterministic) def save(self, fname: str): with open(fname + "-stats.pkl", "wb") as file: pickle.dump(self._goal_deltas_stats, file) def load(self, fname: str): with open(fname + "-stats.pkl", "rb") as file: self._goal_deltas_stats = pickle.load(file) def relabel(self, observations, actions, next_observations, rewards, dones): assert observations.ndim == 2, observations.ndim assert observations.shape == next_observations.shape, (observations.shape, next_observations.shape) deltas = self.env.achieved_goal_from_state(next_observations - observations)[:self._skill_dim] deltas = self._normalize(deltas) new_skills = self.strategy.sample_skill(len(observations)) mi = self.strategy.get_mutual_info rewards = np.asarray([mi(goal_delta=d, skill=s) for d, s in zip(deltas, new_skills)]) new_obs, new_next_obs = observations.copy(), next_observations.copy() set_skills(observations, new_skills) set_skills(next_observations, new_skills) return new_obs, new_next_obs, actions, rewards, dones def eval_dict_env(dict_env: GoalEnv, model, ep_len: int): while True: dict_obs = dict_env.reset() for _ in range(ep_len): dict_env.render("human") action, _ = model.predict(dict_obs, deterministic=True) dict_obs, *_ = dict_env.step(action) def as_dict_env(env): return ForHER(env) def set_skills(obs: np.ndarray, skills: np.ndarray) -> None: idx = skills.shape[1] obs[:, -idx:] = skills class AddExpCallback(BaseCallback): def __init__(self, num_added_samples: int, verbose: int = 0): super().__init__(verbose) self.num_added_samples = num_added_samples def _on_step(self) -> bool: buffer: ReplayBuffer = self.model.replay_buffer can_sample = buffer.size() > 0 if not can_sample: return True samples = buffer.sample(self.num_added_samples) wrapper: SkillWrapper = self.training_env.envs[0] new_samples = wrapper.relabel(**{k:v.cpu().numpy() for k, v in samples._asdict().items()}) buffer.extend(*new_samples) return True envs_fns = dict( point2d=make_point2d_dads_env, reach=make_fetch_reach_env, push=make_fetch_push_env, pointmass=make_point_mass_env, ant=make_ant_dads_env ) class Conf(NamedTuple): ep_len: int num_episodes: int lr: float = 3e-4 first_n_goal_dims: int = None reward_scaling: float = 1.0 def show(model, env, conf: Conf): while True: d_obs = env.reset() for _ in range(conf.ep_len): env.render("human") action, _ = model.predict(d_obs, deterministic=True) d_obs, *_ = env.step(action) def train(model: SAC, conf: Conf, save_fname: str, added_trans = 0): kwargs = dict() if added_trans > 0: kwargs["callback"] = AddExpCallback(num_added_samples=added_trans) model.learn(total_timesteps=conf.ep_len * conf.num_episodes, log_interval=10, **kwargs) model.save(save_fname) CONFS = dict( point2d=Conf(ep_len=30, num_episodes=50, lr=0.01), reach=Conf(ep_len=50, num_episodes=50, lr=0.001), push=Conf(ep_len=50, num_episodes=2000, first_n_goal_dims=2), pointmass=Conf(ep_len=150, num_episodes=100, lr=0.001, reward_scaling=1/100), ant=Conf(ep_len=400, num_episodes=250, reward_scaling=1/500) ) def main(): as_gdads = True name = "pointmass" drop_abs_position = True dads_env_fn = envs_fns[name] conf: Conf = CONFS[name] dict_env = as_dict_env(dads_env_fn()) dict_env = TimeLimit(dict_env, max_episode_steps=conf.ep_len) if drop_abs_position: dict_env = DropGoalEnvsAbsoluteLocation(dict_env) if as_gdads: flat_env = SkillWrapper(env=dict_env, skill_reset_steps=conf.ep_len // 2) else: flat_obs_content = ["observation", "desired_goal", "achieved_goal"] if drop_abs_position: flat_obs_content.remove("achieved_goal") # Because always 0 vector flat_env = FlattenObservation(FilterObservation(dict_env, filter_keys=flat_obs_content)) flat_env = TransformReward(flat_env, f=lambda r: r*conf.reward_scaling) flat_env = Monitor(flat_env) filename = f"modelsCommandSkills/{name}-gdads{as_gdads}" if os.path.exists(filename + ".zip"): sac = SAC.load(filename, env=flat_env) if as_gdads: flat_env.load(filename) else: sac = SAC("MlpPolicy", env=flat_env, verbose=1, learning_rate=conf.lr, tensorboard_log=f"{filename}-tb", buffer_size=10000) train(model=sac, conf=conf, save_fname=filename) if as_gdads: flat_env.save(filename) if as_gdads: flat_env.set_sac(sac) eval_dict_env(dict_env=dict_env, model=flat_env, ep_len=conf.ep_len) show(model=sac, env=flat_env, conf=conf) if __name__ == '__main__': main()
11,248
4,064
from typing import List, Optional, TypeVar import torch import torch.nn as nn from torecsys.inputs.base import BaseInput class ImageInput(BaseInput): """ Base Input class for image, which embed image by a stack of convolution neural network (CNN) and fully-connect layer. """ ImageInputs = TypeVar('ImageInput') def __init__(self, embed_size: int, in_channels: int, layers_size: List[int], kernels_size: List[int], strides: List[int], paddings: List[int], pooling: Optional[str] = 'avg_pooling', use_batchnorm: Optional[bool] = True, dropout_p: Optional[float] = 0.0, activation: Optional[nn.Module] = nn.ReLU()): """ Initialize ImageInput. Args: embed_size (int): Size of embedding tensor in_channels (int): Number of channel of inputs layers_size (List[int]): Layers size of CNN kernels_size (List[int]): Kernels size of CNN strides (List[int]): Strides of CNN paddings (List[int]): Paddings of CNN pooling (str, optional): Method of pooling layer Defaults to avg_pooling use_batchnorm (bool, optional): Whether batch normalization is applied or not after Conv2d Defaults to True dropout_p (float, optional): Probability of Dropout2d Defaults to 0.0 activation (torch.nn.modules.activation, optional): Activation function of Conv2d Defaults to nn.ReLU() Raises: ValueError: when pooling is not in ["max_pooling", "avg_pooling"] """ super().__init__() self.length = embed_size self.model = nn.Sequential() layers_size = [in_channels] + layers_size iterations = enumerate(zip(layers_size[:-1], layers_size[1:], kernels_size, strides, paddings)) for i, (in_c, out_c, k, s, p) in iterations: conv2d_i = nn.Conv2d(in_c, out_c, kernel_size=k, stride=s, padding=p) self.model.add_module(f'conv2d_{i}', conv2d_i) if use_batchnorm: self.model.add_module(f'batchnorm2d_{i}', nn.BatchNorm2d(out_c)) self.model.add_module(f'dropout2d_{i}', nn.Dropout2d(p=dropout_p)) self.model.add_module(f'activation_{i}', activation) if pooling == 'max_pooling': pooling_layer = nn.AdaptiveMaxPool2d(output_size=(1, 1,)) elif pooling == 'avg_pooling': pooling_layer = nn.AdaptiveAvgPool2d(output_size=(1, 1,)) else: raise ValueError('pooling must be in ["max_pooling", "avg_pooling"].') self.model.add_module('pooling', pooling_layer) self.fc = nn.Linear(layers_size[-1], embed_size) def forward(self, inputs: torch.Tensor) -> torch.Tensor: """ Forward calculation of ImageInput Args: inputs (torch.tensor), shape = (B, C, H_{i}, W_{i}), data_type = torch.float: tensor of images Returns: torch.tensor, shape = (B, 1, E): output of ImageInput """ # output's shape of convolution model = (B, C_{last}, 1, 1) outputs = self.model(inputs.rename(None)) outputs.names = ('B', 'C', 'H', 'W',) # output's shape of fully-connect layers = (B, E) outputs = self.fc(outputs.rename(None).squeeze()) # unsqueeze the outputs in dim = 1 and set names to the tensor, outputs = outputs.unsqueeze(1) outputs.names = ('B', 'N', 'E',) return outputs
3,727
1,176
# This file need to be send to the cluster via .addPyFile to handle the pickle problem # This is outside the optimus folder on purpose because it cause problem importing optimus when using de udf. # This can not import any optimus file unless it's imported via addPyFile import datetime import math import os import re from ast import literal_eval import fastnumbers import pandas as pd import pendulum from dask import distributed from dask.dataframe.core import DataFrame as DaskDataFrame from pyspark.ml.linalg import VectorUDT from pyspark.sql import functions as F, DataFrame as SparkDataFrame from pyspark.sql.types import ArrayType, StringType, IntegerType, FloatType, DoubleType, BooleanType, StructType, \ LongType, DateType, ByteType, ShortType, TimestampType, BinaryType, NullType # This function return True or False if a string can be converted to any datatype. from optimus.helpers.constants import ProfilerDataTypes from optimus.helpers.raiseit import RaiseIt def str_to_date(_value, date_format=None): try: # date_format = "DD/MM/YYYY" pendulum.parse(_value, strict=False) return True except: return False def str_to_date_format(_value, date_format): # Check this https://stackoverflow.com/questions/17134716/convert-dataframe-column-type-from-string-to-datetime-dd-mm-yyyy-format try: pendulum.from_format(_value, date_format) return True except: return False def str_to_null(_value): _value = _value.lower() if _value == "null": return True else: return False def is_null(_value): if pd.isnull(_value): return True else: return False def str_to_data_type(_value, _dtypes): """ Check if value can be parsed to a tuple or and list. Because Spark can handle tuples we will try to transform tuples to arrays :param _value: :return: """ # return True if isinstance(_value, str) else False try: if isinstance(literal_eval((_value.encode('ascii', 'ignore')).decode("utf-8")), _dtypes): return True except (ValueError, SyntaxError, AttributeError): return False def str_to_array(_value): return False # return str_to_data_type(_value, (list, tuple)) def str_to_object(_value): return False # return str_to_data_type(_value, (dict, set)) regex_int = r"^\d+$" # For cudf 0.14 regex_int = r"^\d+$" # For cudf 0.14 regex_decimal = r"^\d+\.\d$" regex_boolean = r"\btrue\b|\bfalse\b" regex_boolean_compiled = re.compile(regex_boolean) def str_to_boolean(value, compile=False): return str_to(value, regex_boolean, regex_boolean_compiled, compile) regex_gender = r"\bmale\b|\bfemale\b" regex_gender_compiled = re.compile(regex_gender) def str_to_gender(value, compile=False): return str_to(value, regex_gender, regex_gender_compiled, compile) regex_url = "(http|https|ftp|s3):\/\/.?[a-zA-Z]*.\w*.[a-zA-Z0-9]*\/?[a-zA-z_-]*.?[a-zA-Z]*\/?" regex_url_compiled = re.compile(regex_url, re.IGNORECASE) def str_to_url(value, compile=False): return str_to(value, regex_url, regex_url_compiled, compile) regex_ip = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}" regex_ip_compiled = re.compile(regex_ip, re.IGNORECASE) def str_to_ip(value, compile=False): return str_to(value, regex_ip, regex_ip_compiled, compile) # regex_email = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)" # This do not work in CUDF/RE2 regex_email = r"^[^@]+@[^@]+\.[a-zA-Z]{2,}$" regex_email_compiled = re.compile(regex_email, re.IGNORECASE) def str_to_email(value, compile=False): return str_to(value, regex_email, regex_email_compiled, compile) # Reference https://www.regular-expressions.info/creditcard.html # https://codereview.stackexchange.com/questions/74797/credit-card-checking regex_credit_card = (r'(4(?:\d{12}|\d{15})' # Visa r'|5[1-5]\d{14}' # Mastercard r'|6011\d{12}' # Discover (incomplete?) r'|7\d{15}' # What's this? r'|3[47]\d{13}' # American Express r')$') regex_credit_card_compiled = re.compile(regex_credit_card) def str_to_credit_card(value, compile=False): return str_to(value, regex_credit_card, regex_credit_card_compiled, compile) regex_zip_code = r"^(\d{5})([- ])?(\d{4})?$" regex_zip_code_compiled = re.compile(regex_zip_code, re.IGNORECASE) def str_to_zip_code(value, compile=False): return str_to(value, regex_zip_code, regex_zip_code_compiled, compile) regex_missing = r" " regex_missing_compiled = re.compile(regex_missing, re.IGNORECASE) def str_to_missing(value, compile=False): return str_to(value, regex_missing, regex_missing_compiled, compile) regex_social_security_number = "^([1-9])(?!\1{2}-\1{2}-\1{4})[1-9]{2}-[1-9]{2}-[1-9]{4}" regex_social_security_number_compiled = re.compile(regex_social_security_number, re.IGNORECASE) def str_to_social_security_number(value, compile=False): return str_to(value, regex_social_security_number, regex_social_security_number_compiled, compile) regex_http_code = "/^[1-5][0-9][0-9]$/" regex_http_code_compiled = re.compile(regex_http_code, re.IGNORECASE) def str_to_http_code(value, compile=False): return str_to(value, regex_http_code, regex_http_code_compiled, compile) # Reference https://stackoverflow.com/questions/8634139/phone-validation-regex regex_phone_number = "/\(?([0-9]{3})\)?([ .-]?)([0-9]{3})\2([0-9]{4})/" regex_phone_number_compiled = re.compile(regex_phone_number, re.IGNORECASE) def str_to_phone_number(value, compile=False): return str_to(value, regex_phone_number, regex_phone_number_compiled, compile) # States US_STATES_NAMES = ["alabama", "alaska", "american samoa", "arizona", "arkansas", "california", "colorado", "connecticut", "delaware", "district of columbia", "federated states of micronesia", "florida", "georgia", "guam", "hawaii", "idaho", "illinois", "indiana", "iowa", "kansas", "kentucky", "louisiana", "maine", "marshall islands", "maryland", "massachusetts", "michigan", "minnesota", "mississippi", "missouri", "montana", "nebraska", "nevada", "new hampshire", "new jersey", "new mexico", "new york", "north carolina", "north dakota", "northern mariana islands", "ohio", "oklahoma", "oregon", "palau", "pennsylvania", "puerto rico", "rhode island", "south carolina", "south dakota", "tennessee", "texas", "utah", "vermont", "virgin islands", "virginia", "washington", "west virginia", "wisconsin", "wyoming" ] US_STATES_CODE = [ "al", "ak", "as", "az", "ar", "ca", "co", "ct", "de", "dc", "fm", "fl", "ga", "gu", "hi", "id", "il", "in", "ia", "ks", "ky", "la", "me", "mh", "md", "ma", "mi", "mn", "ms", "mo", "mt", "ne", "nv", "nh", "nj", "nm", "ny", "nc", "nd", "mp", "oh", "ok", "or", "pw", "pa", "pr", "ri", "sc", "sd", "tn", "tx", "ut", "vt", "vi", "va", "wa", "wv", "wi", "wy" ] def str_to(value, regex, compiled_regex, compile=False): if value is None: result = False else: if compile is True: regex = compiled_regex else: regex = regex result = bool(re.match(regex, value)) return result def str_to_int(_value): return True if fastnumbers.isint(_value) else False def str_to_decimal(_value): return True if fastnumbers.isfloat(_value) else False def str_to_str(_value): return True if isinstance(_value, str) else False currencies = {"$": "dollar", "¢": "cent", "£": "point", "€": "euro", "¥": "yen", "₹": "indian rupee", "₽": "ruble", "元": "yuan", "¤": "currency", "₠": "euro-currency", "₡": "colon", "₢": "cruzeiro", "₣": "french franc", "₤": "lira", "₥": "mill", "₦": "naira", "₧": "peseta", "₨": "rupee", "₩": "won", "₪": "new shequel", "₫": "dong", "₭": "kip", "₮": "tugrik", "₯": "drachma", "₰": "german penny", "₱": "peso", "₲": "guarani", "₳": "austral", "₴": "hryvnia", "₵": "cedi", "₶": "livre tournois", "₸": "tenge", "₺": "turkish lira", "₼": "manat", "৲": "bengali rupee mark", "৳": "bengali rupee sign", "૱": "gujarati rupee sign", "௹": "tamil rupee sign", "฿": "thai currency bath", "៛": "khmer currency reil", "㍐": "square yuan", "円": "yen character", "圆": "yen/yuan character variant one", "圎": "yen/yuan character variant two", "圓": "yuan character, in hong kong and taiwan", "圜": "yen/yuan character variant three", "원": "won character", "﷼": "rial sign", "$": "fullwidth dollar sign", "¢": "fullwidth cent sign", "£": "fullwidth pound sign", "¥": "fullwidth yen sign", "₩": "fullwidth won sign"} regex_currencies = "|".join(list(currencies.keys())) regex_currencies_compiled = re.compile(regex_currencies) def str_to_currency(value, compile=False): return str_to(value, regex_boolean, regex_boolean_compiled, compile) def parse_spark_class_dtypes(value): """ Get a pyspark data class from a string data type representation. for example 'StringType()' from 'string' :param value: :return: """ if not isinstance(value, list): value = [value] try: data_type = [SPARK_DTYPES_DICT_OBJECTS[SPARK_SHORT_DTYPES[v]] for v in value] except (KeyError, TypeError): data_type = value if isinstance(data_type, list) and len(data_type) == 1: result = data_type[0] else: result = data_type return result class Infer(object): """ This functions return True or False if match and specific dataType """ DTYPE_FUNC = {"string": str_to_str, "boolean": str_to_boolean, "date": str_to_date, "array": str_to_array, "object": str_to_object, "ip": str_to_ip, "url": str_to_url, "email": str_to_email, "gender": str_to_gender, "credit_card_number": str_to_credit_card, "zip_code": str_to_zip_code, "int": str_to_int, "decimal": str_to_decimal, ProfilerDataTypes.PHONE_NUMBER.value: str_to_phone_number, ProfilerDataTypes.SOCIAL_SECURITY_NUMBER.value: str_to_social_security_number, ProfilerDataTypes.HTTP_CODE.value: str_to_http_code, } @staticmethod def mismatch(value: tuple, dtypes: dict): """ Count the dataType that match, do not match, nulls and missing. For example if we have an string column we also need to pass the column type we want to match. Like credit card or postal code. :param value: tuple(Column/Row, value) :param dtypes: dict {col_name:(dataType, mismatch)} :return: """ col_name, value = value _data_type = "" dtype = dtypes[col_name] if Infer.DTYPE_FUNC[dtype](value) is True: _data_type = dtype else: if is_null(value) is True: _data_type = "null" elif str_to_missing(value) is True: _data_type = "missing" else: _data_type = "mismatch" result = (col_name, _data_type), 1 return result @staticmethod def to_spark(value): """ Infer a Spark data type from a value :param value: value to be inferred :return: Spark data type """ result = None if value is None: result = "null" elif is_bool(value): result = "bool" elif fastnumbers.isint(value): result = "int" elif fastnumbers.isfloat(value): result = "float" elif is_list(value): result = ArrayType(Infer.to_spark(value[0])) elif is_datetime(value): result = "datetime" elif is_date(value): result = "date" elif is_binary(value): result = "binary" elif is_str(value): if str_to_boolean(value): result = "bool" elif str_to_date(value): result = "string" # date elif str_to_array(value): result = "string" # array else: result = "string" return parse_spark_class_dtypes(result) @staticmethod def parse(col_and_value, infer: bool = False, dtypes=None, str_funcs=None, int_funcs=None, full=True): """ :param col_and_value: Column and value tuple :param infer: If 'True' try to infer in all the dataTypes available. See int_func and str_funcs :param dtypes: :param str_funcs: Custom string function to infer. :param int_funcs: Custom numeric functions to infer. {col_name: regular_expression} :param full: True return a tuple with (col_name, dtype), count or False return dtype :return: """ col_name, value = col_and_value # Try to order the functions from less to more computational expensive if int_funcs is None: int_funcs = [(str_to_credit_card, "credit_card_number"), (str_to_zip_code, "zip_code")] if str_funcs is None: str_funcs = [ (str_to_missing, "missing"), (str_to_boolean, "boolean"), (str_to_date, "date"), (str_to_array, "array"), (str_to_object, "object"), (str_to_ip, "ip"), (str_to_url, "url"), (str_to_email, "email"), (str_to_gender, "gender"), (str_to_null, "null") ] # Check 'string' for Spark, 'object' for Dask if (dtypes[col_name] == "object" or dtypes[col_name] == "string") and infer is True: if isinstance(value, bool): _data_type = "boolean" elif fastnumbers.isint(value): # Check if value is integer _data_type = "int" for func in int_funcs: if func[0](value) is True: _data_type = func[1] break elif value != value: _data_type = "null" elif fastnumbers.isfloat(value): _data_type = "decimal" elif isinstance(value, str): _data_type = "string" for func in str_funcs: if func[0](value) is True: _data_type = func[1] break else: _data_type = dtypes[col_name] if is_null(value) is True: _data_type = "null" elif str_to_missing(value) is True: _data_type = "missing" else: if dtypes[col_name].startswith("array"): _data_type = "array" else: _data_type = dtypes[col_name] # print(_data_type) result = (col_name, _data_type), 1 if full: return result else: return _data_type @staticmethod def parse_pandas(value, date_format="DD/MM/YYYY"): # int_funcs = [(str_to_credit_card, "credit_card_number"), (str_to_zip_code, "zip_code")] str_funcs = [ (str_to_missing, "missing"), (str_to_boolean, "boolean"), (str_to_array, "array"), (str_to_object, "object"), (str_to_ip, "ip"), (str_to_url, "url"), (str_to_email, "email"), (str_to_gender, "gender"), (str_to_null, "null")] if isinstance(value, list): _data_type = "array" elif pd.isnull(value): _data_type = "null" elif isinstance(value, bool): _data_type = "boolean" elif profiler_dtype_func("int", True)( value): # We first check if a number can be parsed as a credit card or zip code _data_type = "int" for func in int_funcs: if func[0](str(value)) is True: _data_type = func[1] # Seems like float can be parsed as dates elif profiler_dtype_func("decimal", True)(value): _data_type = "decimal" elif str_to_date(value): _data_type = "date" else: _data_type = "string" for func in str_funcs: if func[0](str(value)) is True: _data_type = func[1] return _data_type def profiler_dtype_func(dtype, null=False): """ Return a function that check if a value match a datatype :param dtype: :param null: :return: """ def _float(value): if null is True: return fastnumbers.isfloat(value, allow_nan=True) is True and fastnumbers.isint(value) is False else: return fastnumbers.isfloat(value) is True and fastnumbers.isint(value) is False or value != value def _int(value): if null is True: return fastnumbers.isint(value) else: return fastnumbers.isint(value) or value != value if dtype == ProfilerDataTypes.INT.value: return _int elif dtype == ProfilerDataTypes.DECIMAL.value: return _float elif dtype == ProfilerDataTypes.STRING.value: return is_str elif dtype == ProfilerDataTypes.BOOLEAN.value: return str_to_boolean elif dtype == ProfilerDataTypes.DATE.value: return str_to_object elif dtype == ProfilerDataTypes.ARRAY.value: return is_str elif dtype == ProfilerDataTypes.OBJECT.value: return str_to_object elif dtype == ProfilerDataTypes.GENDER.value: return str_to_gender elif dtype == ProfilerDataTypes.IP.value: return str_to_ip elif dtype == ProfilerDataTypes.URL.value: return str_to_url elif dtype == ProfilerDataTypes.EMAIL.value: return str_to_email elif dtype == ProfilerDataTypes.CREDIT_CARD_NUMBER.value: return str_to_credit_card elif dtype == ProfilerDataTypes.ZIP_CODE.value: return str_to_zip_code elif dtype == ProfilerDataTypes.MISSING.value: return is_str else: RaiseIt.value_error(dtype, ProfilerDataTypes.list()) def is_nan(value): """ Check if a value is nan :param value: :return: """ result = False if is_str(value): if value.lower() == "nan": result = True elif is_numeric(value): if math.isnan(value): result = True return result def is_none(value): """ Check if a value is none :param value: :return: """ result = False if is_str(value): if value.lower() == "none": result = True elif value is None: result = True return result def is_same_class(class1, class2): """ Check if 2 class are the same :param class1: :param class2: :return: """ return class1 == class2 def is_(value, type_): """ Check if a value is instance of a class :param value: :param type_: :return: """ return isinstance(value, type_) def is_type(type1, type2): """ Check if a value is a specific class :param type1: :param type2: :return: """ return type1 == type2 def is_function(value): """ Check if a param is a function :param value: object to check for :return: """ return hasattr(value, '__call__') def is_list(value): """ Check if an object is a list :param value: :return: """ return isinstance(value, list) def is_list_empty(value): """ Check is a list is empty :param value: :return: """ return len(value) == 0 def is_dict(value): """ Check if an object is a list :param value: :return: """ return isinstance(value, dict) def is_tuple(value): """ Check if an object is a tuple :param value: :return: """ return isinstance(value, tuple) def is_column(value): """ Check if a object is a column :return: """ return isinstance(value, F.Column) def is_list_of_str(value): """ Check if an object is a list of strings :param value: :return: """ return bool(value) and isinstance(value, list) and all(isinstance(elem, str) for elem in value) def is_list_of_int(value): """ Check if an object is a list of integers :param value: :return: """ return bool(value) and isinstance(value, list) and all(isinstance(elem, int) for elem in value) def is_list_of_float(value): """ Check if an object is a list of floats :param value: :return: """ return bool(value) and isinstance(value, list) and all(isinstance(elem, float) for elem in value) def is_list_of_str_or_int(value): """ Check if an object is a string or an integer :param value: :return: """ return bool(value) and isinstance(value, list) and all(isinstance(elem, (int, str)) for elem in value) def is_list_of_str_or_num(value): """ Check if an object is string, integer or float :param value: :return: """ return bool(value) and isinstance(value, list) and all(isinstance(elem, (str, int, float)) for elem in value) def is_list_of_spark_dataframes(value): """ Check if an object is a Spark DataFrame :param value: :return: """ return bool(value) and isinstance(value, list) and all(isinstance(elem, SparkDataFrame) for elem in value) def is_list_of_dask_dataframes(value): """ Check if an object is a Spark DataFrame :param value: :return: """ return isinstance(value, list) and all(isinstance(elem, DaskDataFrame) for elem in value) def is_filepath(file_path): """ Check if a value ia a valid file path :param file_path: :return: """ # the file is there if os.path.exists(file_path): return True # the file does not exists but write privileges are given elif os.access(os.path.dirname(file_path), os.W_OK): return True # can not write there else: return False def is_ip(value): """ Check if a value is valid ip :param value: :return: """ parts = value.split(".") if len(parts) != 4: return False for item in parts: if not 0 <= int(item) <= 255: return False return True def is_list_of_strings(value): """ Check if all elements in a list are strings :param value: :return: """ return bool(value) and isinstance(value, list) and all(isinstance(elem, str) for elem in value) def is_list_of_numeric(value): """ Check if all elements in a list are int or float :param value: :return: """ return bool(value) and isinstance(value, list) and all(isinstance(elem, (int, float)) for elem in value) def is_list_of_list(value): """ Check if all elements in a list are tuples :param value: :return: """ return bool(value) and isinstance(value, list) and all(isinstance(elem, list) for elem in value) def is_list_of_tuples(value): """ Check if all elements in a list are tuples :param value: :return: """ return bool(value) and isinstance(value, list) and all(isinstance(elem, tuple) for elem in value) def is_list_of_one_element(value): """ Check if a var is a single element :param value: :return: """ if is_list(value): return len(value) == 1 def is_dict_of_one_element(value): """ Check if a var is a single element :param value: :return: """ if is_dict(value): return len(value) == 1 def is_one_element(value): """ Check if a var is a single element :param value: :return: """ return isinstance(value, (str, int, float, bool)) def is_num_or_str(value): """ Check if a var is numeric(int, float) or string :param value: :return: """ return isinstance(value, (int, float, str)) def is_str_or_int(value): """ Check if a var is a single element :param value: :return: """ return isinstance(value, (str, int)) def is_numeric(value): """ Check if a var is a single element :param value: :return: """ return isinstance(value, (int, float)) def is_str(value): """ Check if an object is a string :param value: :return: """ # Seems 20% faster than return isinstance(value, str) # return True if type("str") == "str" else False def is_object(value): """ Check if an object is a string :param value: :return: """ return isinstance(value, str) def is_list_of_futures(value): """ Check if an object is a list of strings :param value: :return: """ return bool(value) and isinstance(value, list) and all( isinstance(elem, distributed.client.Future) for elem in value) def is_future(value): """ Check if an object is a list of strings :param value: :return: """ return isinstance(value, distributed.client.Future) def is_int(value): """ Check if an object is an integer :param value: :return: """ return isinstance(value, int) def is_url(value): regex = re.compile( r'^(?:http|ftp|hdfs)s?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain... r'localhost|' # localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) return re.match(regex, value) def is_float(value): """ Check if an object is an integer :param value: :return: """ return isinstance(value, float) def is_bool(value): return isinstance(value, bool) def is_datetime(value): """ Check if an object is a datetime :param value: :return: """ return isinstance(value, datetime.datetime) def is_binary(value): """ Check if an object is a bytearray :param value: :return: """ return isinstance(value, bytearray) def is_date(value): """ Check if an object is a date :param value: :return: """ return isinstance(value, datetime.date) PYTHON_SHORT_TYPES = {"string": "string", "str": "string", "integer": "int", "int": "int", "float": "float", "double": "double", "bool": "boolean", "boolean": "boolean", "array": "array", "null": "null" } PYTHON_TYPES = {"string": str, "int": int, "float": float, "boolean": bool} PYSPARK_NUMERIC_TYPES = ["byte", "short", "big", "int", "double", "float"] PYSPARK_NOT_ARRAY_TYPES = ["byte", "short", "big", "int", "double", "float", "string", "date", "bool"] PYSPARK_STRING_TYPES = ["str"] PYSPARK_ARRAY_TYPES = ["array"] SPARK_SHORT_DTYPES = {"string": "string", "str": "string", "integer": "int", "int": "int", "bigint": "bigint", "big": "bigint", "long": "bigint", "float": "float", "double": "double", "bool": "boolean", "boolean": "boolean", "struct": "struct", "array": "array", "date": "date", "datetime": "datetime", "byte": "byte", "short": "short", "binary": "binary", "null": "null", "vector": "vector", "timestamp": "datetime" } SPARK_DTYPES_DICT = {"string": StringType, "int": IntegerType, "float": FloatType, "double": DoubleType, "boolean": BooleanType, "struct": StructType, "array": ArrayType, "bigint": LongType, "date": DateType, "byte": ByteType, "short": ShortType, "datetime": TimestampType, "binary": BinaryType, "null": NullType, "vector": VectorUDT } SPARK_DTYPES_DICT_OBJECTS = \ {"string": StringType(), "int": IntegerType(), "float": FloatType(), "double": DoubleType(), "boolean": BooleanType(), "struct": StructType(), "array": ArrayType(StringType()), "bigint": LongType(), "date": DateType(), "byte": ByteType(), "short": ShortType(), "datetime": TimestampType(), "binary": BinaryType(), "null": NullType() } PROFILER_COLUMN_TYPES = {"categorical", "numeric", "date", "null", "array", "binary"} PYTHON_TO_PROFILER = {"string": "categorical", "boolean": "categorical", "int": "numeric", "float": "numeric", "decimal": "numeric", "date": "date", "array": "array", "binaty": "binary", "null": "null"} SPARK_DTYPES_TO_PROFILER = {"int": ["smallint", "tinyint", "bigint", "int"], "decimal": ["float", "double"], "string": "string", "date": {"date", "timestamp"}, "boolean": "boolean", "binary": "binary", "array": "array", "object": "object", "null": "null", "missing": "missing"}
31,259
10,147
import codecs import numpy import glob import re from sklearn.metrics import f1_score def read_passages(filename, is_labeled): str_seqs = [] str_seq = [] label_seqs = [] label_seq = [] for line in codecs.open(filename, "r", "utf-8"): lnstrp = line.strip() if lnstrp == "": if len(str_seq) != 0: str_seqs.append(str_seq) str_seq = [] label_seqs.append(label_seq) label_seq = [] else: if is_labeled: clause, label = lnstrp.split("\t") label_seq.append(label.strip()) else: clause = lnstrp str_seq.append(clause) if len(str_seq) != 0: str_seqs.append(str_seq) str_seq = [] label_seqs.append(label_seq) label_seq = [] return str_seqs, label_seqs def from_BIO_ind(BIO_pred, BIO_target, indices): table = {} # Make a mapping between the indices of BIO_labels and temporary original label indices original_labels = [] for BIO_label,BIO_index in indices.items(): if BIO_label[:2] == "I_" or BIO_label[:2] == "B_": label = BIO_label[2:] else: label = BIO_label if label in original_labels: table[BIO_index] = original_labels.index(label) else: table[BIO_index] = len(original_labels) original_labels.append(label) original_pred = [table[label] for label in BIO_pred] original_target = [table[label] for label in BIO_target] return original_pred, original_target def to_BIO(label_seqs): new_label_seqs = [] for label_para in label_seqs: new_label_para = [] prev = "" for label in label_para: if label!="none": # "none" is O, remain unchanged. if label==prev: new_label = "I_"+label else: new_label = "B_"+label else: new_label = label # "none" prev = label new_label_para.append(new_label) new_label_seqs.append(new_label_para) return new_label_seqs def from_BIO(label_seqs): new_label_seqs = [] for label_para in label_seqs: new_label_para = [] for label in label_para: if label[:2] == "I_" or label[:2] == "B_": new_label = label[2:] else: new_label = label new_label_para.append(new_label) new_label_seqs.append(new_label_para) return new_label_seqs def clean_url(word): """ Clean specific data format from social media """ # clean urls word = re.sub(r'https? : \/\/.*[\r\n]*', '<URL>', word) word = re.sub(r'exlink', '<URL>', word) return word def clean_num(word): # check if the word contain number and no letters if any(char.isdigit() for char in word): try: num = float(word.replace(',', '')) return '@' except: if not any(char.isalpha() for char in word): return '@' return word def clean_words(str_seqs): processed_seqs = [] for str_seq in str_seqs: processed_clauses = [] for clause in str_seq: filtered = [] tokens = clause.split() for word in tokens: word = clean_url(word) word = clean_num(word) filtered.append(word) filtered_clause = " ".join(filtered) processed_clauses.append(filtered_clause) processed_seqs.append(processed_clauses) return processed_seqs def test_f1(test_file,pred_label_seqs): def linearize(labels): linearized = [] for paper in labels: for label in paper: linearized.append(label) return linearized _, label_seqs = read_passages_original(test_file,True) true_label = linearize(label_seqs) pred_label = linearize(pred_label_seqs) f1 = f1_score(true_label,pred_label,average="weighted") print("F1 score:",f1) return f1 def evaluate(y, pred): accuracy = float(sum([c == p for c, p in zip(y, pred)]))/len(pred) num_gold = {} num_pred = {} num_correct = {} for c, p in zip(y, pred): if c in num_gold: num_gold[c] += 1 else: num_gold[c] = 1 if p in num_pred: num_pred[p] += 1 else: num_pred[p] = 1 if c == p: if c in num_correct: num_correct[c] += 1 else: num_correct[c] = 1 fscores = {} for p in num_pred: precision = float(num_correct[p]) / num_pred[p] if p in num_correct else 0.0 recall = float(num_correct[p]) / num_gold[p] if p in num_correct else 0.0 fscores[p] = 2 * precision * recall / (precision + recall) if precision !=0 and recall !=0 else 0.0 weighted_fscore = sum([fscores[p] * num_gold[p] if p in num_gold else 0.0 for p in fscores]) / sum(num_gold.values()) return accuracy, weighted_fscore, fscores def make_folds(train_X, train_Y, num_folds): num_points = train_X.shape[0] fol_len = num_points / num_folds rem = num_points % num_folds print(train_X.shape, train_Y.shape) X_folds = numpy.split(train_X, num_folds) if rem == 0 else numpy.split(train_X[:-rem], num_folds) Y_folds = numpy.split(train_Y, num_folds) if rem == 0 else numpy.split(train_Y[:-rem], num_folds) cv_folds = [] for i in range(num_folds): train_folds_X = [] train_folds_Y = [] for j in range(num_folds): if i != j: train_folds_X.append(X_folds[j]) train_folds_Y.append(Y_folds[j]) train_fold_X = numpy.concatenate(train_folds_X) train_fold_Y = numpy.concatenate(train_folds_Y) cv_folds.append(((train_fold_X, train_fold_Y), (X_folds[i], Y_folds[i]))) return cv_folds def arg2param(args): params = vars(args) params["lr"] = float(args.lr) params["hard_k"] = int(args.hard_k) params["embedding_dropout"] = float(args.embedding_dropout) params["high_dense_dropout"] = float(args.high_dense_dropout) params["attention_dropout"] = float(args.attention_dropout) params["lstm_dropout"] = float(args.lstm_dropout) params["word_proj_dim"] = int(args.word_proj_dim) params["lstm_dim"] = int(args.lstm_dim) params["att_proj_dim"] = int(args.att_proj_dim) params["rec_hid_dim"] = int(args.rec_hid_dim) params["epoch"] = int(args.epoch) params["maxseqlen"] = int(args.maxseqlen) params["maxclauselen"] = int(args.maxclauselen) params["batch_size"]=int(args.batch_size) params["validation_split"] = float(args.validation_split) return params
6,853
2,320
import json from glob import glob data = [] for fname in ['critic-0.jsonlines']+glob('*new*.jsonlines'): print fname with open(fname) as f: for line in f: data.append(json.loads(line)) new_data = [] for i in data: if i['imdb_mid'] != -1: new_data.append(i) with open('compact.json','w') as f: json.dump(new_data, f)
364
137
#!/usr/bin/env python # -*- coding: utf-8 -*- """Exercise 11.5 from Kane 1985.""" from __future__ import division from sympy import expand, solve, symbols, trigsimp from sympy import sin, tan, pi from sympy.physics.mechanics import Point, ReferenceFrame, RigidBody from sympy.physics.mechanics import dot, dynamicsymbols, inertia, msprint from util import generalized_active_forces, generalized_inertia_forces from util import partial_velocities, subs g, m, Px, Py, Pz, R, t = symbols('g m Px Py Pz R t') q1, q2, q3, q4, q5 = q = dynamicsymbols('q1:6') qd = dynamicsymbols('q1:6', level=1) u1, u2, u3, u4, u5 = u = dynamicsymbols('u1:6') # reference frames A = ReferenceFrame('A') B_prime = A.orientnew('B_prime', 'Axis', [q1, A.z]) B = B_prime.orientnew('B', 'Axis', [pi/2 - q2, B_prime.x]) C = B.orientnew('C', 'Axis', [q3, B.z]) # points, velocities pO = Point('O') pO.set_vel(A, 0) # R is the point in plane H that comes into contact with disk C. pR = pO.locatenew('R', q4*A.x + q5*A.y) pR.set_vel(A, pR.pos_from(pO).dt(A)) pR.set_vel(B, 0) # C^ is the point in disk C that comes into contact with plane H. pC_hat = pR.locatenew('C^', 0) pC_hat.set_vel(C, 0) # C* is the point at the center of disk C. pC_star = pC_hat.locatenew('C*', R*B.y) pC_star.set_vel(C, 0) pC_star.set_vel(B, 0) # calculate velocities in A pC_star.v2pt_theory(pR, A, B) pC_hat.v2pt_theory(pC_star, A, C) # kinematic differential equations kde = [x - y for x, y in zip( [dot(C.ang_vel_in(A), basis) for basis in B] + qd[3:], u)] kde_map = solve(kde, qd) # include second derivatives in kde map for k, v in kde_map.items(): kde_map[k.diff(t)] = v.diff(t) vc = map(lambda x: dot(pC_hat.vel(A), x), [A.x, A.y]) vc_map = solve(subs(vc, kde_map), [u4, u5]) # define disc rigidbody I_C = inertia(C, m*R**2/4, m*R**2/4, m*R**2/2) rbC = RigidBody('rbC', pC_star, C, m, (I_C, pC_star)) # forces R_C_hat = Px*A.x + Py*A.y + Pz*A.z R_C_star = -m*g*A.z forces = [(pC_hat, R_C_hat), (pC_star, R_C_star)] # partial velocities bodies = [rbC] system = ([i.masscenter for i in bodies] + [i.frame for i in bodies] + list(zip(*forces)[0])) partials = partial_velocities(system, [u1, u2, u3], A, kde_map, vc_map) # generalized active forces Fr, _ = generalized_active_forces(partials, forces) Fr_star, _ = generalized_inertia_forces(partials, bodies, kde_map, vc_map) # dynamical equations dyn_eq = subs([x + y for x, y in zip(Fr, Fr_star)], kde_map) u1d, u2d, u3d = ud = [x.diff(t) for x in [u1, u2, u3]] dyn_eq_map = solve(dyn_eq, ud) for x in ud: print('{0} = {1}'.format(msprint(x), msprint(trigsimp(dyn_eq_map[x])))) u1d_expected = (u2**2*tan(q2) - 6*u2*u3 -4*g*sin(q2)/R)/5 u2d_expected = 2*u3*u1 - u1*u2*tan(q2) u3d_expected = 2*u1*u2/3 assert trigsimp(expand(dyn_eq_map[u1d] - u1d_expected)) == 0 assert trigsimp(expand(dyn_eq_map[u2d] - u2d_expected)) == 0 assert trigsimp(expand(dyn_eq_map[u3d] - u3d_expected)) == 0
2,965
1,365
import hashlib import os import logging import posixpath from seaserv import seafile_api from seahub.utils import normalize_file_path, check_filename_with_rename from seahub.tags.models import FileUUIDMap logger = logging.getLogger(__name__) def create_user_draft_repo(username, org_id=-1): repo_name = 'Drafts' if org_id and org_id > 0: repo_id = seafile_api.create_org_repo(repo_name, '', username, org_id) else: repo_id = seafile_api.create_repo(repo_name, '', username) return repo_id def get_draft_file_name(repo_id, file_path): file_path = normalize_file_path(file_path) file_name, file_ext = os.path.splitext(os.path.basename(file_path)) draft_file_name = "%s%s%s" % (file_name, '(draft)', file_ext) draft_file_name = check_filename_with_rename(repo_id, '/Drafts', draft_file_name) return draft_file_name def is_draft_file(repo_id, file_path): is_draft = False file_path = normalize_file_path(file_path) from .models import Draft try: draft = Draft.objects.filter(origin_repo_id=repo_id, draft_file_path=file_path) if draft: is_draft = True except Draft.DoesNotExist: pass return is_draft def has_draft_file(repo_id, file_path): has_draft = False file_path = normalize_file_path(file_path) parent_path = os.path.dirname(file_path) filename = os.path.basename(file_path) file_uuid = FileUUIDMap.objects.get_fileuuidmap_by_path( repo_id, parent_path, filename, is_dir=False) from .models import Draft if file_uuid: try: d = Draft.objects.filter(origin_file_uuid=file_uuid.uuid) if d: d = d[0] file_id = seafile_api.get_file_id_by_path(repo_id, d.draft_file_path) if file_id: has_draft = True else: Draft.DoesNotExist except Draft.DoesNotExist: pass return has_draft def get_file_draft(repo_id, file_path, is_draft=False, has_draft=False): draft = {} draft['draft_id'] = None draft['draft_file_path'] = '' draft['draft_origin_file_path'] = '' from .models import Draft if is_draft: d = Draft.objects.filter(origin_repo_id=repo_id, draft_file_path=file_path) if d: d = d[0] uuid = FileUUIDMap.objects.get_fileuuidmap_by_uuid(d.origin_file_uuid) file_path = posixpath.join(uuid.parent_path, uuid.filename) draft['draft_id'] = d.id draft['draft_file_path'] = d.draft_file_path draft['draft_origin_file_path'] = file_path else: Draft.DoesNotExist if has_draft: file_path = normalize_file_path(file_path) parent_path = os.path.dirname(file_path) filename = os.path.basename(file_path) file_uuid = FileUUIDMap.objects.get_fileuuidmap_by_path( repo_id, parent_path, filename, is_dir=False) d = Draft.objects.filter(origin_file_uuid=file_uuid.uuid) if d: d = d[0] draft['draft_id'] = d.id draft['draft_file_path'] = d.draft_file_path else: Draft.DoesNotExist return draft def send_draft_publish_msg(draft, username, path): """ send draft publish msg to seafevents """ repo_id = draft.origin_repo_id old_path = draft.draft_file_path msg = '%s\t%s\t%s\t%s\t%s\t%s' % ("publish", "draft", repo_id, username, path, old_path) try: seafile_api.publish_event('seahub.draft', msg) except Exception as e: logger.error("Error when sending draft publish message: %s" % str(e))
3,706
1,313
def get_itinerary(flights, starting_point, current_itinerary): if not flights: return current_itinerary + [starting_point] updated_itinerary = None for index, (city_1, city_2) in enumerate(flights): if starting_point == city_1: child_itinerary = get_itinerary( flights[:index] + flights[index + 1:], city_2, current_itinerary + [city_1]) if child_itinerary: if not updated_itinerary or "".join(child_itinerary) < "".join(updated_itinerary): updated_itinerary = child_itinerary return updated_itinerary size = int(input()) array_input = [] for x in range(size): array_input.append(tuple(input().split())) g = get_itinerary(array_input,'MSC',[]) print(" ".join(g))
781
258
import threading import time def _stamp(): return int(time.time()*1000) _base = int(time.mktime(time.strptime('2020-1-1', '%Y-%m-%d'))*1000) class Snowflake(): def __init__(self, machine: int = 0): self.machine = machine & 0b11111 self._mutex = threading.Lock() self._lstTime = _stamp() self._flow = 0 def next(self) -> int: with self._mutex: if self._flow == 256 and self._lstTime == _stamp(): time.sleep(0.002) if self._lstTime < _stamp(): self._lstTime = _stamp() self._flow = 0 t = self._lstTime - _base # t = _stamp() - _base # this will cause bug id = 0 id |= (t<<13) id |= (self.machine << 8) id |= self._flow self._flow += 1 return id def getFlow(id): return id & ((1<<8)-1) def getMachineId(id): return (id>>8) & ((1<<5)-1) def getTime(id): return (id>>13) / 1000 + _base def makeId(timestamp = 0, machine = 0, flow = 0): """ using unix style timestamp, not python timestamp """ timestamp -= _base return (timestamp<<13) | (machine << 8) | flow
1,233
443
# geneexpr.py - simple gene expression example # Richard M. Murray, 11 Aug 2018 # # This example shows how to use the txtl library to create a model for # a simple gene expression construct. This model is constructed to # demonstrate the ability to mimic the MATLAB TX-TL modeling toolbox # approach as well as a few simple variants that are enabled in # the python version. import txtl import txtl.bioscrape # Set up the standard TXTL tubes tube1 = txtl.extract('BL21_DE3') tube2 = txtl.buffer('stdbuffer') # Now set up a tube that will contain our DNA tube3 = txtl.newtube('geneexpr') # Define a DNA strand using strings (ala MATLAB) gene1 = txtl.assemble_dna(prom='ptet(50)', utr5='BCD2(20)', cds='tetR(1200)') txtl.add_dna(mixture=tube3, dna=gene1, conc=1, type='plasmid') # # Assemble a DNA strand using objects (semi-pythonic) # # Note: these constructs would normally live inside of a model # library, but this shows how to extend functionality by creating # constructs inline. # Create individual DNA components based on standard types ptet = txtl.RepressedPromoter('ptet', 'tetR', dimer=True) bcd2 = txtl.ConstitutiveRBS('BCD2', Ribosome_Binding_F=10) degfp = txtl.ProteinCDS('deGFP', maturation_time=30*txtl.minutes) lva = txtl.DegradationTag('lva', 'clpXP') # Assemble a gene using objects instead of strings gene2 = txtl.assemble_dna(ptet, bcd2, degfp, lva) txtl.add_dna(tube3, gene2, 1, 'plasmid') # Mix the contents of the individual tubes well1 = txtl.combine_tubes([tube1, tube2, tube3]) # Run a simulation #! TODO: implement simdata = txtl.bioscrape.runsim(well1, 8 * txtl.hours) # plot the result #! TODO: implement txtl.bioscrape.plot(simdata, well1, ['Protein_deGFP', 'Protein_tetR']) # Create an SBML file containing the model txtl.write_sbml(well1, 'geneexpr.xml') # print out a basic report about the content of the well well1.print_report()
1,878
699
from expression_evaluator.token import * class InOperator(BasicOperator): symbols = ['in'] priority = PriorityLevel.String @classmethod def _function(cls, a, b): return a in b
201
59
import re import numpy as np def timestamp_to_num(ts): num_list = [] ts_list = re.split('[:,]', ts) for i in ts_list: num_list.append(int(i)) return np.array(num_list) def main(filename, delta, output, direction): buff = [] # Read file with open(filename, 'r') as f: contents = f.readlines() # For each line for line in contents: # Parse line for timestamp ts = parse_timestamp(line) # If no timestamp, put into buffer as is if ts == []: buff.append(line) # If timestamp exists, make change of delta, put into buffer: else: new_ts = update_timestamps(ts, delta, direction) new_ts_str = timestamp_to_string(new_ts) new_line = create_ts_line(new_ts_str) buff.append(new_line) # Write buffer out with open(output, 'w') as file: for i in buff: file.write(i) def create_ts_line(ts): begin = ts[0] end = ts[1] string = begin + ' --> ' + end + '\n' return string def timestamp_to_string(ts): strings = [] begin = ts[0] end = ts[1] strings.append('%02d:%02d:%02d,%03d' % (begin[0], begin[1], begin[2], begin[3])) strings.append('%02d:%02d:%02d,%03d' % (end[0], end[1], end[2], end[3])) return strings def update_timestamps(lst, delta, direction): # Convert to lists begin = timestamp_to_num(lst[0]) end = timestamp_to_num(lst[1]) new_delta = timestamp_to_num(delta) # Convert to millisecond scalars begin_ms = convert_to_ms(begin) end_ms = convert_to_ms(end) new_delta_ms = convert_to_ms(new_delta) # Update timestamps if direction == 'B': new_begin_ms = begin_ms - new_delta_ms new_end_ms = end_ms - new_delta_ms else: new_begin_ms = begin_ms + new_delta_ms new_end_ms = end_ms + new_delta_ms # Convert back to list format new_begin = convert_to_ts(new_begin_ms) new_end = convert_to_ts(new_end_ms) return [new_begin, new_end] def convert_to_ts(millis): hours = int(millis // 3.6e6) millis %= 3.6e6 minutes = int(millis // 60000) millis %= 60000 seconds = int(millis // 1000) millis = int(millis % 1000) return [hours, minutes, seconds, millis] def convert_to_ms(timestamp): ms = timestamp[3] secs = timestamp[2] * 1000 mins = timestamp[1] * 1000 * 60 hours = timestamp[0] * 1000 * 3600 time_in_ms = ms + secs + mins + hours return time_in_ms def parse_timestamp(txt): pattern = '\d\d:\d\d:\d\d,\d*' return re.findall(pattern, txt)
2,632
990
from django import forms from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import User class SignUpForm(UserCreationForm): first_name = forms.CharField(max_length=50, required=True) last_name = forms.CharField(max_length=50, required=True) email = forms.EmailField(max_length=255, required=True) def __init__(self, *args, **kwargs): super(SignUpForm, self).__init__(*args, **kwargs) self.fields['username'].widget.attrs['class'] = 'input' self.fields['email'].widget.attrs['class'] = 'input' self.fields['password1'].widget.attrs['class'] = 'input' self.fields['password2'].widget.attrs['class'] = 'input' self.fields['first_name'].widget.attrs['class'] = 'input' self.fields['last_name'].widget.attrs['class'] = 'input' class Meta: model = User fields = ['username', 'first_name', 'last_name', 'email', 'password1', 'password2']
962
299
# # COPYRIGHT (C) 2002-2011 Rajgopal Srinivasan and modified by changjin.hong@gmail.com # """ .. module:: preprocess_clinvar :platform: Unix, Windows, MacOSX :synopsis: Transparent opening of compressed and uncompressed files .. moduleauthor:: ; changjin.hong@gmail.com """ import os, dill, re, argparse from gcn.lib.databases.snpdb import Clinvar from collections import namedtuple from gcn.lib.databases.ready2upload import lib_hgmd from gcn.lib.databases.refgene import Refgene from gcn.etc import fileconfig from gcn.lib.utils.lib_utils import py_struct, month_to_num, msgout, open2, runcmd from gcn.lib.io import anyopen import gcn.lib.io.vcf as vcf def store_variant_citations(variant_citation_fn): print 'storing variant citations ...' # linked_ids = py_struct(allele_id=[], # variation_id=[], # rs=[]) linked_ids = {} tmp_fn = '%s.tmp'%variant_citation_fn cmd = "cut -f1,2,3 %s | sort -r -k1,1 -k2,2n -k3,3n | uniq > %s" % (variant_citation_fn,tmp_fn) runcmd(cmd) fp = anyopen.openfile(tmp_fn,'rt') head = fp.next()[:-1] if head.startswith('#'): head = head[1:] ntuple = namedtuple('ntuple', head.split('\t')) for i in fp: rec = i[:-1] linked_id = ntuple._make(rec.split('\t')) linked_ids[linked_id.AlleleID] = linked_id.VariationID fp.close() os.unlink(tmp_fn) print 'Done.' return linked_ids def store_variant_summary(variant_summary_fn, linked_ids): ''' objective: parse variant summary file from Clinvar and generate a dictionary (rcvaccess) of refSeq tx HGVSc, aa change in hgvs last evaluate date REV status :return: ''' print 'storing variant summary ...' fp = anyopen.openfile(variant_summary_fn, 'rt') head_col = fp.next().split('\t') alleleid_i = head_col.index('#AlleleID') name_i = head_col.index('Name') rcvacc_i = head_col.index('RCVaccession') date_i = head_col.index('LastEvaluated') assembly_i = head_col.index('Assembly') review_i = head_col.index('ReviewStatus') # rsid_i = 9 vars_to_summuary = {} for i in fp: itms = i.split('\t') if itms[assembly_i]=='GRCh37': if not itms[rcvacc_i].strip(): continue allele_id = itms[alleleid_i] variation_id = None if allele_id in linked_ids: variation_id = linked_ids[allele_id] rcv_ids = itms[rcvacc_i].split(';') # if itms[rsid_i] == '-1': # rcv_ids = itms[rcvacc_i].split(';') # else: # rcv_ids = ['rs%s'%itms[rsid_i]] for rcv_id in rcv_ids: if rcv_id not in vars_to_summuary: vars_to_summuary[rcv_id] = py_struct( name=None, REFTX=None, HGVSc=None, HGVSp=None, DATE=None, REV=None, CLNMETHOD=None, allele_id=None, variation_id=None) vars_to_summuary[rcv_id].allele_id = allele_id if variation_id: vars_to_summuary[rcv_id].variation_id = variation_id mObj = re.search(r'(.+)\([\w]+\):c\.(.+)\s+\(p\.(.+)\)', itms[name_i]) if mObj: vars_to_summuary[rcv_id].REFTX = mObj.group(1) vars_to_summuary[rcv_id].HGVSc = 'c.%s'%mObj.group(2) vars_to_summuary[rcv_id].HGVSp = 'p.%s'%mObj.group(3) else: #to handle a case where there is no aa hgvs mObj = re.search(r'(.+)\([\w]+\):c\.(.+)', itms[name_i]) if mObj: vars_to_summuary[rcv_id].REFTX = mObj.group(1) vars_to_summuary[rcv_id].HGVSc = 'c.%s' % mObj.group(2) if itms[date_i] != '-': [mon_date,year] = itms[date_i].split(',') mon2, date2 = mon_date.split() mon2 = month_to_num(mon2) if mon2: month2 = mon2 else: month2 = '01' vars_to_summuary[rcv_id].DATE = '%s-%s-%s'%(year.strip(),month2,date2.strip()) vars_to_summuary[rcv_id].REV = itms[review_i].replace(' ','_') fp.close() print 'Done.' return vars_to_summuary def store_submission_summary_fn2(submit_fn): print 'storing submission summary file ...' fp= anyopen.openfile(submit_fn, 'rt') submissions = {} read_heads = False for rec in fp: rec = rec[:-1] # print 'rec:',rec if not read_heads and rec.endswith('SubmittedGeneSymbol'): rec = rec[1:] heads = rec.split('\t') # print 'heads:',heads subm = namedtuple('subm', heads) read_heads = True elif read_heads: subm_rec = subm._make(rec.split('\t')) if subm_rec.VariationID not in submissions: submissions[subm_rec.VariationID] = py_struct(collection_methods=[]) for cmethod in subm_rec.CollectionMethod.split(';'): submissions[subm_rec.VariationID].collection_methods.append(cmethod.replace(' ','_')) fp.close() print 'Done.' return submissions def append_annotation_to_vcf(vcf_fn, vars_to_summuary, submissions, out_vcf): print 'appending annotation to clinvar VCF file ...' v = vcf.VCFParser(vcf_fn) ostream = open2(out_vcf, 'w') v.add_meta_info("REFTX", "1", "String", "RefSeq Transcript Name") v.add_meta_info("HGVSc", "1", "String", "HGVSc change in HGVS nomenclature") v.add_meta_info("HGVSp", "1", "String", "AA change in HGVS nomenclature") v.add_meta_info("SPLOC", "1", "Integer", "Distance from the predicted splice site") v.add_meta_info("DATE", "1", "String", "Last evaluated date") v.add_meta_info("REV", "1", "String", "Review status") v.add_meta_info("CLNMETHOD", "1", "String", "Collection methods") v.writeheader(ostream) for rec in v: v.parseinfo(rec) found = False for j, rcv_ids in enumerate([rec.id,rec.info.CLNACC]): if not found: for rcv_id in rcv_ids: if j == 1: #strip off version rcv_id = rcv_id.split('.')[0] if rcv_id in vars_to_summuary: rec.info.REFTX = vars_to_summuary[rcv_id].REFTX if vars_to_summuary[rcv_id].HGVSc: rec.info.HGVSc = vars_to_summuary[rcv_id].HGVSc mObj = re.search(r'c\.(.*)([\+\-]\d+)\D+', rec.info.HGVSc) if mObj: SPLOC = mObj.group(2) if abs(int(SPLOC)) < 3: rec.info.SPLOC = SPLOC if vars_to_summuary[rcv_id].HGVSp: rec.info.HGVSp = vars_to_summuary[rcv_id].HGVSp if vars_to_summuary[rcv_id].DATE: rec.info.DATE = vars_to_summuary[rcv_id].DATE if vars_to_summuary[rcv_id].REV: rec.info.REV = vars_to_summuary[rcv_id].REV if vars_to_summuary[rcv_id].variation_id in submissions: cmethods = list(set(submissions[vars_to_summuary[rcv_id].variation_id].collection_methods)) rec.info.CLNMETHOD = '|'.join(cmethods) found = True break for j,clndbn in enumerate(rec.info.CLNDBN): rec.info.CLNDBN[j] = clndbn.replace('\\x2c_', ',').replace('\\x2c', ',') v.write(ostream, rec) ostream.close() v.stream.close() print 'Done.' def append_annotation_to_vcf2(vcf_fn, vars_to_summuary, submissions, out_vcf): print 'appending annotation to clinvar VCF file ...' v = vcf.VCFParser(vcf_fn) ostream = open2(out_vcf, 'w') v.add_meta_info("REFTX", "1", "String", "RefSeq Transcript Name") v.add_meta_info("HGVSc", "1", "String", "HGVSc change in HGVS nomenclature") v.add_meta_info("HGVSp", "1", "String", "AA change in HGVS nomenclature") v.add_meta_info("SPLOC", "1", "Integer", "Distance from the predicted splice site") v.add_meta_info("DATE", "1", "String", "Last evaluated date") v.add_meta_info("REV", "1", "String", "Review status") v.add_meta_info("CLNMETHOD", "1", "String", "Collection methods") v.writeheader(ostream) for rec in v: v.parseinfo(rec) # clnacc = re.split('[|,]', rec.info.CLNACC) # rec.info.CLNACC = '|'.join(list(set(clnacc))) uniq_rcv_ids = [] for rcv_id_str in rec.info.CLNACC: for rcv_id in rcv_id_str.split('|'): if rcv_id in uniq_rcv_ids: continue uniq_rcv_ids.append(rcv_id) # print 'rec.info.CLNACC:',rec.info.CLNACC #cj_debug for rcv_id in uniq_rcv_ids: rcv_id = rcv_id.split('.')[0] if rcv_id in vars_to_summuary: rec.info.REFTX = vars_to_summuary[rcv_id].REFTX if vars_to_summuary[rcv_id].HGVSc: rec.info.HGVSc = vars_to_summuary[rcv_id].HGVSc mObj = re.search(r'c\.(.*)([\+\-]\d+)\D+', rec.info.HGVSc) if mObj: SPLOC = mObj.group(2) if abs(int(SPLOC)) < 3: rec.info.SPLOC = SPLOC if vars_to_summuary[rcv_id].HGVSp: rec.info.HGVSp = vars_to_summuary[rcv_id].HGVSp if vars_to_summuary[rcv_id].DATE: rec.info.DATE = vars_to_summuary[rcv_id].DATE if vars_to_summuary[rcv_id].REV: rec.info.REV = vars_to_summuary[rcv_id].REV if vars_to_summuary[rcv_id].variation_id in submissions: cmethods = list(set(submissions[vars_to_summuary[rcv_id].variation_id].collection_methods)) # print 'cmethods:',cmethods #cj_debug rec.info.CLNMETHOD = '|'.join(cmethods) found = True break rec.info.CLNACC = uniq_rcv_ids for j,clndbn in enumerate(rec.info.CLNDBN): rec.info.CLNDBN[j] = clndbn.replace('\\x2c_', ',').replace('\\x2c', ',') v.write(ostream, rec) ostream.close() v.stream.close() print 'Done.' def pathogenic_per_gene(cds_len_per_gene,hgmd_on=False): clnStat = Clinvar() vartypes = clnStat.count_lofs() # search for hgmd if hgmd_on: hgmdStat = lib_hgmd.HgmdRegion() vartypes = hgmdStat.count_lofs(vartypes = vartypes) # count vevent per gene known_patho_profs = {} for csigs in vartypes.itervalues(): gene = csigs[-1] if gene not in known_patho_profs: cds_len = 480*1.25 #average CDS length in one transcript = 480 if gene in cds_len_per_gene: cds_len = cds_len_per_gene[gene] # (benign, vus, pathogenic) x (lof,nsm) known_patho_profs[gene] = [[0, 0], [0, 0], [0, 0], cds_len] for i, csig in enumerate(csigs[:-1]): for j, vtype in enumerate(csig): known_patho_profs[gene][i][j] += 100.*vtype/cds_len return known_patho_profs def known_pathov_stats(reuse=True, has_hgmd_license=False): """ to retrieve variant types (LOF, missense, etc) from known pathogenic mutation database (clinvar or HGMD) :return: """ pathog_prof_pyv = fileconfig.FILECONFIG['PATHOG_PROF'] if reuse and os.path.exists(pathog_prof_pyv): msg = 'loading some statistics on known pathogenic variants (%s) ...' % pathog_prof_pyv msgout('notice', msg) fp = open(pathog_prof_pyv, 'rb') pathov_prof_gene = dill.load(fp) fp.close() else: refgene = Refgene() cds_len_per_gene = refgene.get_cds_len_per_gene() pathov_prof_gene = pathogenic_per_gene(cds_len_per_gene, hgmd_on=has_hgmd_license) fpw = open(pathog_prof_pyv, 'wb') dill.dump(pathov_prof_gene, fpw) fpw.close() #TODO: use SVM to infer optimal variables to classify benign vs. pathogenic return pathov_prof_gene def main(): desc = 'add RefSeq Transcript name, cDNA change, AA change in HGVS nomenclature, and review/date into Clinvar VCF file.' parser = argparse.ArgumentParser(description=desc) parser.add_argument('-i', '--input', dest='vcf_fn', type=str, required=True, help='input clinvar VCF') parser.add_argument('-c', '--citation', dest='citation_fn', type=str, required=True, help='input clinvar citation') # Clinvar db provides a separate tab delimited file parser.add_argument('-s', '--summary', dest='summary_fn', type=str, required=True, help='input clinvar summary') #Clinvar db provides a separate tab delimited file parser.add_argument('-S', '--submit', dest='submit_fn', type=str, required=True, help='input clinvar submit') #Clinvar db provides a separate tab delimited file parser.add_argument('-o', '--output', dest='out_fn', type=str, required=True, help='output clinvar VCF') args = parser.parse_args() linked_ids = store_variant_citations(args.citation_fn) dAnnotation = store_variant_summary(args.summary_fn, linked_ids) dSubmission = store_submission_summary_fn2(args.submit_fn) append_annotation_to_vcf2(args.vcf_fn, dAnnotation, dSubmission, args.out_fn) if __name__ == "__main__": main() # class Clinvar(snpdb.SNPDB): # # def __init__(self): # # #open db connection # name = 'CLINVARDB' # super(Clinvar, self).__init__(name) # if name in dbconfig.DBCONFIG: # self.load(name=name) # elif os.path.exists(name): # self.load(db=name) # else: # raise ValueError('No such database %s' % name)
11,991
5,575
# Copyright (c) 2019, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: MIT # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT import os import json import time import torch import numpy as np from dist_train.utils.shared_optim import SharedAdam as Adam from dist_train.workers.base import EpisodicOffPolicyManager, OffPolicyManager, OnPolicyManager, PPOManager class EpisodicOffPolicy(EpisodicOffPolicyManager): def rollout_wrapper(self, c_ep_counter): st = time.time() self.agent_model.play_episode() # Add episode for training. self.replay_buffer.add_episode(self.agent_model.transitions_for_buffer(training=True)) dur = time.time() - st # Calculate losses to allow dense logging episode_stats = self.agent_model.episode_summary() self._log_rollout(c_ep_counter, dur, episode_stats) def _log_rollout(self, c_ep_counter, dur, episode_stats): # Increment the steps counters, place the log in the epoch buffer, and give a quick rollout print c_ep_counter += 1 self.time_keeper['n_rounds'] += 1 n_steps = int(self.agent_model.train_steps.data.item()) + int(c_ep_counter.item()) timestamp = ''.join('{:017.4f}'.format(time.time()).split('.')) log = {'{:d}.{}'.format(n_steps, timestamp): [str(sl) for sl in episode_stats]} self.epoch_buffer.append(log) dense_save = False # (int(self.time_keeper['n_rounds']) % self.settings.ep_save) == 0 and self.rank == 0 log_str = '{:10d} - {} {:6d} Dur = {:6.2f}, Steps = {:3d} {} {}'.format( n_steps, '*' if dense_save else ' ', int(self.time_keeper['n_rounds']), dur, int(self.agent_model.n_steps), '!!!' if int(self.agent_model.was_success) else ' ', '*' if dense_save else ' ' ) self.logger.info(log_str) def eval_wrapper(self): stats = [] episodes = {} for evi in range(self.config.get('eval_iters', 10)): self.agent_model.play_episode(do_eval=self.config.get('greedy_eval', True)) ep_stats = [float(x) for x in self.agent_model.episode_summary()] stats.append(ep_stats) dump_ep = [] for t in self.agent_model.curr_ep: dump_t = {k: np.array(v.detach()).tolist() for k, v in t.items()} dump_ep.append(dump_t) episodes[evi] = dump_ep return stats, episodes class OffPolicy(OffPolicyManager): def env_transitions_wrapper(self, c_step_counter, num_transitions): # Collect transitions and update counter self.agent_model.collect_transitions(num_transitions, skip_im_rew=True) c_step_counter += num_transitions # Add episode for training self.replay_buffer.add_episode(self.agent_model.transitions_for_buffer(training=True)) def eval_wrapper(self): stats = [] episodes = {} for evi in range(self.config.get('eval_iters', 10)): self.agent_model.play_episode(do_eval=self.config.get('greedy_eval', True)) ep_stats = [float(x) for x in self.agent_model.episode_summary()] stats.append(ep_stats) dump_ep = [] for t in self.agent_model.curr_ep: dump_t = {k: np.array(v.cpu().detach()).tolist() for k, v in t.items()} dump_ep.append(dump_t) episodes[evi] = dump_ep return stats, episodes class HierarchicalEpisodicOffPolicy(EpisodicOffPolicy): def __init__(self, rank, config, settings): super().__init__(rank, config, settings) self.optim_lo_path = os.path.join(self.exp_dir, 'optim_lo.pth.tar') self.optim_lo = Adam(self.agent_model._lo_parameters, lr=config['learning_rate']) if os.path.isfile(self.optim_lo_path): self.optim_lo.load_state_dict(torch.load(self.optim_lo_path)) def checkpoint(self): super().checkpoint() torch.save(self.optim_lo, self.optim_lo_path) def rollout_wrapper(self, c_ep_counter): st = time.time() self.agent_model.play_episode(optim_lo=self.optim_lo) self.agent_model.relabel_episode() # Add episode for training. self.replay_buffer.add_episode(self.agent_model.transitions_for_buffer(training=True)) dur = time.time() - st # Calculate losses to allow dense logging episode_stats = self.agent_model.episode_summary() self._log_rollout(c_ep_counter, dur, episode_stats) class OnPolicy(OnPolicyManager): def rollout_wrapper(self, c_ep_counter): st = time.time() self.agent_model.eval() self.agent_model.play_episode() self.agent_model.train() loss = self.condense_loss(self.agent_model()) dur = time.time() - st # Calculate losses to allow dense logging episode_stats = self.agent_model.episode_summary() self._log_rollout(c_ep_counter, dur, episode_stats) return loss def _log_rollout(self, c_ep_counter, dur, episode_stats): c_ep_counter += 1 n_steps = int(self.agent_model.train_steps.data.item()) + int(c_ep_counter.item()) timestamp = ''.join('{:017.4f}'.format(time.time()).split('.')) dense_save = False # (int(self.time_keeper['n_rounds']) % self.settings.ep_save) == 0 and self.rank == 0 # The burden to save falls to us if dense_save: dstr = '{:010d}.{}'.format(n_steps, timestamp) config_path = self.settings.config_path exp_name = config_path.split('/')[-1][:-5] exp_dir = os.path.join(self.settings.log_dir, exp_name) c_path = os.path.join(exp_dir, dstr + '.json') dump_ep = [] for t in self.agent_model.curr_ep: dump_t = {k: np.array(v.detach()).tolist() for k, v in t.items()} dump_ep.append(dump_t) with open(c_path, 'wt') as f: json.dump(dump_ep, f) self.time_keeper['ep_save'] = int(self.time_keeper['n_rounds']) # Increment the steps counters and log the results. self.time_keeper['n_rounds'] += 1 hist_name = 'hist_{}.json'.format(self.rank) with open(os.path.join(self.exp_dir, hist_name), 'a') as save_file: log = {'{:d}.{}'.format(n_steps, timestamp): [str(sl) for sl in episode_stats]} save_file.write(json.dumps(log)) save_file.close() log_str = '{:10d} - {} {:6d} Dur = {:6.2f}, Steps = {:3d} {} {}'.format( n_steps, '*' if dense_save else ' ', int(self.time_keeper['n_rounds']), dur, int(self.agent_model.n_steps), '!!!' if int(self.agent_model.was_success) else ' ', '*' if dense_save else ' ' ) self.logger.info(log_str) def eval_wrapper(self): stats = [] episodes = {} for evi in range(self.config.get('eval_iters', 10)): self.agent_model.play_episode(do_eval=bool(self.config.get('greedy_eval', True))) ep_stats = [float(x) for x in self.agent_model.episode_summary()] stats.append(ep_stats) dump_ep = [] for t in self.agent_model.curr_ep: dump_t = {k: np.array(v.detach()).tolist() for k, v in t.items()} dump_ep.append(dump_t) episodes[evi] = dump_ep return stats, episodes class PPO(PPOManager, OnPolicy): def rollout_wrapper(self, c_ep_counter): st = time.time() self.agent_model.reach_horizon() dur = time.time() - st # Calculate losses to allow dense logging episode_stats = self.agent_model.episode_summary() self._log_rollout(c_ep_counter, dur, episode_stats) class HierarchicalPPO(PPO): def __init__(self, rank, config, settings): super().__init__(rank, config, settings) self.optim_lo_path = os.path.join(self.exp_dir, 'optim_lo.pth.tar') self.optim_lo = Adam(self.agent_model._lo_parameters, lr=config['learning_rate']) if os.path.isfile(self.optim_lo_path): self.optim_lo.load_state_dict(torch.load(self.optim_lo_path)) def checkpoint(self): super().checkpoint() torch.save(self.optim_lo, self.optim_lo_path) def rollout_wrapper(self, c_ep_counter): st = time.time() self.agent_model.reach_horizon(optim_lo=self.optim_lo) dur = time.time() - st # Calculate losses to allow dense logging episode_stats = self.agent_model.episode_summary() self._log_rollout(c_ep_counter, dur, episode_stats)
8,820
2,933
# see https://www.codewars.com/kata/5a908da30025e995880000e3/solutions/python from TestFunction import Test def prime_series(number): is_prime = lambda number: all( number%i != 0 for i in range(2, int(number**.5)+1) ) ret = '' for n in range(2, number): if is_prime(n) == True: ret += str(n) return ret def solve(a, b): c = prime_series(round((a + b)*2.12)) return c[a:a+b] Test = Test(None) Test.assert_equals(solve(2,2),'57') Test.assert_equals(solve(10,3),'192') Test.assert_equals(solve(20,9),'414347535') Test.assert_equals(solve(30,12),'616771737983') Test.assert_equals(solve(40,8),'83899710') Test.assert_equals(solve(50,6),'031071') Test.assert_equals(solve(10000,5),'02192') Test.assert_equals(solve(20000,5),'09334')
779
376
from pygooglechart import Chart from pygooglechart import SimpleLineChart from pygooglechart import Axis try: import stackless except ImportError: stackless = None import timeit import gc results = dict() RANGE = range(0, 100000, 10000)[1:] if stackless: libs = ["stacklessb", "fibrab", "kamaeliab"] else: libs = ["fibrab", "kamaeliab"] for lib in libs: print lib for i in RANGE: print i t = timeit.Timer(setup="import %s.hackysack"%lib, stmt="%s.hackysack.runit(%d, 1000, dbg=0)"%(lib, i)) v = t.timeit(1) gc.collect() results.setdefault(lib, []).append(v) for lib, v in results.items(): open("%s.results"%lib, "w").write(repr(v))
707
277
import numpy as np from scipy.linalg import eigh import voice_activity_detector import features_extraction import statistics import utils def get_sigma(ubm, space_dimension): sigma = np.zeros(shape=(len(ubm.covariances) * len(ubm.covariances[0]))) k = 0 for i in range(len(ubm.covariances[0])): for j in range(len(ubm.covariances)): sigma[k] = ubm.covariances[j][i] k += 1 repeat_sigma = np.repeat(np.transpose(sigma)[:, np.newaxis], space_dimension, axis=1) return repeat_sigma def save_i_vector_model(path, i_vector, speaker, components_number): f = open( path + "/ivectors/" + speaker + "_ivector_model_" + str(components_number) + ".txt", "wb") np.save(f, i_vector) f.close def load_i_vector_model(path, speaker, components_number): f = open( path + "/ivectors/" + speaker + "_ivector_model_" + str(components_number) + ".txt", "rb") i_vector = np.load(f) f.close return i_vector def save_i_vectors(path, i_vectors, speaker, components_number): f = open( path + "/ivectors/" + speaker + "_ivector_" + str( components_number) + ".txt", "wb") np.save(f, i_vectors) f.close def extract_i_vector_from_signal(ubm, utterance_path, t_matrix, space_dimension, mfcc_number, frame_duration, step_duration, sigma): t_matrix_divides_sigma = np.divide(t_matrix, sigma) t_matrix_divides_sigma_transpose = np.transpose(t_matrix_divides_sigma) identity_matrix = np.eye(space_dimension, dtype=float) vad_object = voice_activity_detector.Vad(utterance_path, 2) signal_samples, sample_rate = vad_object.get_speech_signal() del vad_object mfcc = features_extraction.FeaturesExtraction(mfcc_number, True, frame_duration, step_duration) features = mfcc.extract_mfcc_from_signal(signal_samples, sample_rate) log_likelihood = statistics.log_likelihood_computation(features, ubm) n, f, s = statistics.statistics_computation(log_likelihood, features) # first order statistics are centered by the mean vector f = np.subtract(f, np.multiply(np.transpose( np.repeat(n[:, np.newaxis], np.shape(ubm.means)[1], axis=1)), np.transpose(ubm.means))) # i-vector computation i1 = np.matmul(np.transpose( np.multiply(t_matrix_divides_sigma, np.repeat( np.transpose(np.repeat(n, np.shape(features)[1]))[:, np.newaxis], space_dimension, axis=1))), t_matrix) i2 = np.matmul(np.linalg.pinv(np.add(identity_matrix, i1)), t_matrix_divides_sigma_transpose) i3 = [] for i in range(np.shape(f)[1]): if i == 0: i3 = np.transpose(f)[i] else: i3 = np.concatenate((i3, np.transpose(f)[i]), axis=0) i_vector = np.matmul(i2, i3) return i_vector def extract_i_vectors(path, ubm, train_paths, t_matrix, space_dimension, mfcc_number, frame_duration, step_duration, components_number): sigma = get_sigma(ubm, space_dimension) speakers_list = train_paths.keys() ivectors = {} for speaker in speakers_list: ivector_per_file = [] for file in range(len(train_paths[speaker])): ivector_per_file.append(extract_i_vector_from_signal(ubm, train_paths[speaker][file], t_matrix, space_dimension, mfcc_number, frame_duration, step_duration, sigma)) i_vectors = np.transpose(np.dstack(ivector_per_file)[0]) # ivectors[speaker] = i_vectors save_i_vectors(path, i_vectors, speaker, components_number) def LDA_projection_matrix(ivectors): # LDA projection matrix ivector_list = ivectors cat_list = utils.concatenate_ivectors(ivector_list) projection_matrix = np.identity(len(ivector_list[0][0])) num_eigen_vectors = len(ivector_list) sw = np.zeros(np.shape(projection_matrix)) sb = np.zeros(np.shape(projection_matrix)) wbar = np.mean(cat_list, axis=0) for lists in ivector_list: ws = lists wsbar = np.mean(ws, axis=0) ws_sub = np.reshape(np.subtract(wsbar, wbar), (np.shape(wbar)[0], 1)) ws_mul = np.matmul(ws_sub, np.transpose(ws_sub)) sb = np.add(sb, ws_mul) ws_cov = np.cov(np.transpose(ws), bias=True) sw = np.add(sw, ws_cov) eigvals, eigvecs = eigh(sb, sw, eigvals_only=False) zipped_eig = zip(eigvals, eigvecs) sorted_zipped_eig = sorted(zipped_eig, reverse=True) sortedd = [element for _, element in sorted_zipped_eig] a_matrix = [] for i in range(num_eigen_vectors): a_matrix.append(sortedd[i]) a_matrix = np.dstack(a_matrix) a_matrix = np.rollaxis(a_matrix[0], -1) a_matrix = np.divide(a_matrix, np.repeat( np.linalg.norm(a_matrix, axis=1)[:, np.newaxis], len(a_matrix[0]), axis=1)) ivectors_fin = np.matmul(a_matrix, np.transpose(cat_list)) projection_matrix = np.matmul(a_matrix, projection_matrix) return projection_matrix, ivectors_fin def WCCN_projection_matrix(lda_projection_matrix, ivectors, utterances): num_eigen_vectors = len(ivectors) alpha = 0.9 ivv = [] index = 0 utt_keys = utterances.keys() start = 0 final = 0 for i in utt_keys: final += utterances[i] iv = np.zeros((num_eigen_vectors, utterances[i])) for j in range(num_eigen_vectors): iv[j] = ivectors[j][start:final] ivv.append(iv) index += 1 start += utterances[i] w_ = np.zeros((len(lda_projection_matrix), len(lda_projection_matrix))) for i in range(len(ivv)): w_ = np.add(w_, np.cov(ivv[i], bias=True)) w_ = np.divide(w_, np.full((np.shape(w_)[0], np.shape(w_)[1]), num_eigen_vectors)) w_ = np.add( np.multiply(np.full((np.shape(w_)[0], np.shape(w_)[1]), 1 - alpha), w_), np.multiply(np.full((np.shape(w_)[0], np.shape(w_)[1]), alpha), np.identity(np.shape(w_)[0]))) b_matrix = np.linalg.cholesky( np.linalg.pinv(w_)) # nearestPD(np.linalg.pinv(w_))) wccn_projection_matrix = np.matmul(b_matrix, lda_projection_matrix) return wccn_projection_matrix def load_projection_matrix(path, components_number): f = open(path + "/models/projection_matrix_" + str(components_number) + ".txt", "rb") p_matrix = np.load(f) f.close return p_matrix def save_projection_matrix(path, components_number, p_matrix): f = open(path + "/models/projection_matrix_" + str(components_number) + ".txt", "wb") np.save(f, p_matrix) f.close
7,445
2,548
# Handles file uploads in Python Tornado: http://tornadoweb.org/ import tornado.web import logging import os import uuid def uuid_naming_strategy(original_name): "File naming strategy that ignores original name and returns an UUID" return str(uuid.uuid4()) class UploadHandler(tornado.web.RequestHandler): "Handle file uploads." def initialize(self, upload_path, naming_strategy): """Initialize with given upload path and naming strategy. :keyword upload_path: The upload path. :type upload_path: str :keyword naming_strategy: File naming strategy. :type naming_strategy: (str) -> str function """ self.upload_path = upload_path if naming_strategy is None: naming_strategy = uuid_naming_strategy self.naming_strategy = naming_strategy def post(self): fileinfo = self.request.files['filearg'][0] filename = self.naming_strategy(fileinfo['filename']) try: with open(os.path.join(self.upload_path, filename), 'w') as fh: fh.write(fileinfo['body']) logging.info("%s uploaded %s, saved as %s", str(self.request.remote_ip), str(fileinfo['filename']), filename) except IOError as e: logging.error("Failed to write file due to IOError %s", str(e))
1,412
401
# Program 15d: Plotting a Newton fractal. # See Figure 15.7. from PIL import Image width = height = 512 image = Image.new('RGB', (width, height)) xmin, xmax = -1.5, 1.5 ymin, ymax = -1.5, 1.5 max_iter = 20 h = 1e-6 # Step size eps = 1e-3 # Maximum error def f(z): return z**3 - 1.0 # Complex function. # Draw the fractal. for y in range(height): zy = y * (ymax - ymin) / (height - 1) + ymin for x in range(width): zx = x * (xmax - xmin) / (width - 1) + xmin z = complex(zx, zy) for i in range(max_iter): # Complex numerical derivative. dz = (f(z + complex(h, h)) - f(z)) / complex(h, h) z0 = z - f(z) / dz # Newton iteration. if abs(z0 - z) < eps: # Stop when close enough to any root. break z = z0 image.putpixel((x, y), (i % 4 * 64, i % 8 * 32, i % 16 * 16)) image.save('Newton_Fractal.png', 'PNG') image.show()
947
394
import time import tweepy as twitter import os superhour = time.localtime().tm_hour hour = superhour % 12 if hour == 0: hour = 12 sentence = "Tenho %d lágrima%s no canto do mostrador, %s nos Açores%s" if superhour >= 12: if hour == 1: sentence = sentence % (hour, "", "12 lágrimas", "") else: sentence = sentence % (hour, "s", "menos uma lágrima", "") else: if hour == 1: sentence = sentence % (hour, "", "12 lágrimas", ".") else: sentence = sentence % (hour, "s", "menos uma lágrima", ".") CONSUMER_KEY = os.getenv('CONSUMER_KEY') CONSUMER_SECRET = os.getenv('CONSUMER_SECRET') ACCESS_TOKEN = os.getenv('ACCESS_TOKEN') ACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET') auth = twitter.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) api = twitter.API(auth) api.update_status(status=sentence)
870
371
# -*- coding: utf-8 -*- from .CacheDefaults import CacheDefaults from ..caches.AbstractCacheParameters import AbstractCacheParameters class CacheParameters(AbstractCacheParameters): """Decorator parameters.""" _defaults = CacheDefaults()
244
67
#! /usr/bin/env python # import thread import threading import os.path import random import hashlib import socket import time import os import copy import socket letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' host = '' port = 9093 pega_mensagem = '' addr = (host, port) serv_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) serv_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serv_socket.bind(addr) serv_socket.listen(1) tam_mensagem = "" print('Aguardando cliente...') con, cliente = serv_socket.accept() print('Na espera de mensagem') while(pega_mensagem != 0): pega_mensagem = con.recv(1024) alfabeto_normal = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' print("Chegou a mensagem " + pega_mensagem.decode('utf-8')) pega_mensagem = pega_mensagem.decode('utf-8') # se encontrar o caractere retorna seu indice if (pega_mensagem.find('!', 0, len(pega_mensagem)) != -1): tam_mensagem = len(pega_mensagem)/2 + 1 # Codificar aqui o hacker de Cesar if (pega_mensagem.find(')', 0, len(pega_mensagem)) != -1): tmp = pega_mensagem.split(')') pega_mensagem = tmp[0] decipher_text = '' print(pega_mensagem) chave = input("Digite a chave de quebra da mensagem") decodedMessage = [] for letter in pega_mensagem: indexLetterInAlfabet = alfabeto_normal.find(letter) letterDecoded = indexLetterInAlfabet - int(chave) if letterDecoded < 0: print(letterDecoded, 'foi menor que 0') letterDecoded += 26 print(indexLetterInAlfabet, letterDecoded) decodedMessage.append(alfabeto_normal[letterDecoded]) output = ''.join(decodedMessage) print(output) # Codificar aqui a decifragem da mensagem serv_socket.close()
1,816
678
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'OhsomeToolsDialogUI.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_OhsomeToolsDialogBase(object): def setupUi(self, OhsomeToolsDialogBase): OhsomeToolsDialogBase.setObjectName("OhsomeToolsDialogBase") OhsomeToolsDialogBase.resize(462, 1416) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( OhsomeToolsDialogBase.sizePolicy().hasHeightForWidth() ) OhsomeToolsDialogBase.setSizePolicy(sizePolicy) OhsomeToolsDialogBase.setToolTipDuration(-1) OhsomeToolsDialogBase.setSizeGripEnabled(True) self.verticalLayout_5 = QtWidgets.QVBoxLayout(OhsomeToolsDialogBase) self.verticalLayout_5.setSizeConstraint( QtWidgets.QLayout.SetMinAndMaxSize ) self.verticalLayout_5.setObjectName("verticalLayout_5") self.resources_group = QtWidgets.QGroupBox(OhsomeToolsDialogBase) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.resources_group.sizePolicy().hasHeightForWidth() ) self.resources_group.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.resources_group.setFont(font) self.resources_group.setAlignment(QtCore.Qt.AlignCenter) self.resources_group.setFlat(False) self.resources_group.setObjectName("resources_group") self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.resources_group) self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.api_label = QtWidgets.QLabel(self.resources_group) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.api_label.setFont(font) self.api_label.setAlignment(QtCore.Qt.AlignCenter) self.api_label.setOpenExternalLinks(True) self.api_label.setObjectName("api_label") self.horizontalLayout_3.addWidget(self.api_label) self.ohsome_label = QtWidgets.QLabel(self.resources_group) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.ohsome_label.setFont(font) self.ohsome_label.setAlignment(QtCore.Qt.AlignCenter) self.ohsome_label.setOpenExternalLinks(True) self.ohsome_label.setObjectName("ohsome_label") self.horizontalLayout_3.addWidget(self.ohsome_label) self.oqt_label = QtWidgets.QLabel(self.resources_group) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.oqt_label.setFont(font) self.oqt_label.setAlignment(QtCore.Qt.AlignCenter) self.oqt_label.setOpenExternalLinks(True) self.oqt_label.setObjectName("oqt_label") self.horizontalLayout_3.addWidget(self.oqt_label) self.verticalLayout_5.addWidget(self.resources_group) self.groupBox_4 = QtWidgets.QGroupBox(OhsomeToolsDialogBase) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.groupBox_4.sizePolicy().hasHeightForWidth() ) self.groupBox_4.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setPointSize(11) self.groupBox_4.setFont(font) self.groupBox_4.setAlignment(QtCore.Qt.AlignCenter) self.groupBox_4.setFlat(True) self.groupBox_4.setObjectName("groupBox_4") self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox_4) self.gridLayout_2.setObjectName("gridLayout_2") self.frame_2 = QtWidgets.QFrame(self.groupBox_4) self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_2.setObjectName("frame_2") self.gridLayout_4 = QtWidgets.QGridLayout(self.frame_2) self.gridLayout_4.setObjectName("gridLayout_4") self.provider_combo = QtWidgets.QComboBox(self.frame_2) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.provider_combo.sizePolicy().hasHeightForWidth() ) self.provider_combo.setSizePolicy(sizePolicy) self.provider_combo.setMinimumSize(QtCore.QSize(150, 25)) self.provider_combo.setObjectName("provider_combo") self.gridLayout_4.addWidget(self.provider_combo, 0, 1, 1, 1) self.label_15 = QtWidgets.QLabel(self.frame_2) self.label_15.setObjectName("label_15") self.gridLayout_4.addWidget(self.label_15, 0, 0, 1, 1) self.provider_refresh = QtWidgets.QPushButton(self.frame_2) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.provider_refresh.sizePolicy().hasHeightForWidth() ) self.provider_refresh.setSizePolicy(sizePolicy) self.provider_refresh.setText("") icon = QtGui.QIcon() icon.addPixmap( QtGui.QPixmap(":/plugins/ohsomeTools/img/icon_refresh.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off, ) self.provider_refresh.setIcon(icon) self.provider_refresh.setObjectName("provider_refresh") self.gridLayout_4.addWidget(self.provider_refresh, 0, 2, 1, 1) self.provider_config = QtWidgets.QPushButton(self.frame_2) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.provider_config.sizePolicy().hasHeightForWidth() ) self.provider_config.setSizePolicy(sizePolicy) self.provider_config.setText("") icon1 = QtGui.QIcon() icon1.addPixmap( QtGui.QPixmap(":/plugins/ohsomeTools/img/icon_settings.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off, ) self.provider_config.setIcon(icon1) self.provider_config.setObjectName("provider_config") self.gridLayout_4.addWidget(self.provider_config, 0, 3, 1, 1) self.gridLayout_2.addWidget(self.frame_2, 0, 0, 1, 1) self.frame_3 = QtWidgets.QFrame(self.groupBox_4) self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_3.setObjectName("frame_3") self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.frame_3) self.horizontalLayout_7.setObjectName("horizontalLayout_7") self.ohsome_spec_selection_combo = QtWidgets.QComboBox(self.frame_3) self.ohsome_spec_selection_combo.setObjectName( "ohsome_spec_selection_combo" ) self.horizontalLayout_7.addWidget(self.ohsome_spec_selection_combo) self.ohsome_spec_preference_combo = QtWidgets.QComboBox(self.frame_3) self.ohsome_spec_preference_combo.setObjectName( "ohsome_spec_preference_combo" ) self.horizontalLayout_7.addWidget(self.ohsome_spec_preference_combo) self.ohsome_spec_preference_specification = QtWidgets.QComboBox( self.frame_3 ) self.ohsome_spec_preference_specification.setObjectName( "ohsome_spec_preference_specification" ) self.horizontalLayout_7.addWidget( self.ohsome_spec_preference_specification ) self.gridLayout_2.addWidget(self.frame_3, 2, 0, 1, 1) self.verticalLayout_5.addWidget(self.groupBox_4) self.request_types_widget = QtWidgets.QTabWidget(OhsomeToolsDialogBase) self.request_types_widget.setEnabled(True) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.request_types_widget.sizePolicy().hasHeightForWidth() ) self.request_types_widget.setSizePolicy(sizePolicy) self.request_types_widget.setMaximumSize( QtCore.QSize(16777215, 16777215) ) self.request_types_widget.setUsesScrollButtons(True) self.request_types_widget.setObjectName("request_types_widget") self.centroid_tab = QtWidgets.QWidget() sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.centroid_tab.sizePolicy().hasHeightForWidth() ) self.centroid_tab.setSizePolicy(sizePolicy) self.centroid_tab.setObjectName("centroid_tab") self.gridLayout_8 = QtWidgets.QGridLayout(self.centroid_tab) self.gridLayout_8.setObjectName("gridLayout_8") self.widget = QtWidgets.QWidget(self.centroid_tab) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.widget.sizePolicy().hasHeightForWidth() ) self.widget.setSizePolicy(sizePolicy) self.widget.setMinimumSize(QtCore.QSize(0, 0)) self.widget.setMaximumSize(QtCore.QSize(16777215, 16777215)) self.widget.setObjectName("widget") self.gridLayout = QtWidgets.QGridLayout(self.widget) self.gridLayout.setObjectName("gridLayout") self.centroid_radius_input = QtWidgets.QSpinBox(self.widget) self.centroid_radius_input.setAccessibleDescription("") self.centroid_radius_input.setMinimum(1) self.centroid_radius_input.setMaximum(999999999) self.centroid_radius_input.setProperty("value", 1000) self.centroid_radius_input.setObjectName("centroid_radius_input") self.gridLayout.addWidget(self.centroid_radius_input, 0, 1, 1, 1) self.ohsome_centroid_location_list = QtWidgets.QListWidget(self.widget) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.ohsome_centroid_location_list.sizePolicy().hasHeightForWidth() ) self.ohsome_centroid_location_list.setSizePolicy(sizePolicy) self.ohsome_centroid_location_list.setMinimumSize(QtCore.QSize(0, 0)) self.ohsome_centroid_location_list.setMaximumSize( QtCore.QSize(16777215, 16777215) ) self.ohsome_centroid_location_list.setFrameShadow( QtWidgets.QFrame.Sunken ) self.ohsome_centroid_location_list.setSelectionMode( QtWidgets.QAbstractItemView.MultiSelection ) self.ohsome_centroid_location_list.setResizeMode( QtWidgets.QListView.Fixed ) self.ohsome_centroid_location_list.setObjectName( "ohsome_centroid_location_list" ) self.gridLayout.addWidget( self.ohsome_centroid_location_list, 1, 1, 1, 1 ) self.widget_7 = QtWidgets.QWidget(self.widget) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.widget_7.sizePolicy().hasHeightForWidth() ) self.widget_7.setSizePolicy(sizePolicy) self.widget_7.setObjectName("widget_7") self.gridLayout_7 = QtWidgets.QGridLayout(self.widget_7) self.gridLayout_7.setObjectName("gridLayout_7") self.centroid_list_point_add = QtWidgets.QPushButton(self.widget_7) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.centroid_list_point_add.sizePolicy().hasHeightForWidth() ) self.centroid_list_point_add.setSizePolicy(sizePolicy) self.centroid_list_point_add.setText("") icon2 = QtGui.QIcon() icon2.addPixmap( QtGui.QPixmap(":/plugins/ohsomeTools/img/icon_add.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off, ) self.centroid_list_point_add.setIcon(icon2) self.centroid_list_point_add.setObjectName("centroid_list_point_add") self.gridLayout_7.addWidget(self.centroid_list_point_add, 0, 0, 1, 1) self.centroid_list_point_clear = QtWidgets.QPushButton(self.widget_7) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.centroid_list_point_clear.sizePolicy().hasHeightForWidth() ) self.centroid_list_point_clear.setSizePolicy(sizePolicy) self.centroid_list_point_clear.setText("") icon3 = QtGui.QIcon() icon3.addPixmap( QtGui.QPixmap(":/plugins/ohsomeTools/img/icon_clear.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off, ) self.centroid_list_point_clear.setIcon(icon3) self.centroid_list_point_clear.setObjectName( "centroid_list_point_clear" ) self.gridLayout_7.addWidget(self.centroid_list_point_clear, 1, 0, 1, 1) self.gridLayout.addWidget(self.widget_7, 1, 0, 1, 1) self.centroid_radius_label = QtWidgets.QLabel(self.widget) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Maximum ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.centroid_radius_label.sizePolicy().hasHeightForWidth() ) self.centroid_radius_label.setSizePolicy(sizePolicy) self.centroid_radius_label.setObjectName("centroid_radius_label") self.gridLayout.addWidget(self.centroid_radius_label, 0, 0, 1, 1) self.gridLayout_8.addWidget(self.widget, 0, 0, 1, 1) self.request_types_widget.addTab(self.centroid_tab, "") self.point_layer_tab = QtWidgets.QWidget() self.point_layer_tab.setEnabled(True) self.point_layer_tab.setObjectName("point_layer_tab") self.gridLayout_12 = QtWidgets.QGridLayout(self.point_layer_tab) self.gridLayout_12.setObjectName("gridLayout_12") self.widget_6 = QtWidgets.QWidget(self.point_layer_tab) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.widget_6.sizePolicy().hasHeightForWidth() ) self.widget_6.setSizePolicy(sizePolicy) self.widget_6.setAutoFillBackground(False) self.widget_6.setObjectName("widget_6") self.gridLayout_5 = QtWidgets.QGridLayout(self.widget_6) self.gridLayout_5.setObjectName("gridLayout_5") self.point_layer_list = QtWidgets.QListWidget(self.widget_6) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.point_layer_list.sizePolicy().hasHeightForWidth() ) self.point_layer_list.setSizePolicy(sizePolicy) self.point_layer_list.setMinimumSize(QtCore.QSize(0, 0)) self.point_layer_list.setMaximumSize(QtCore.QSize(16777215, 16777215)) self.point_layer_list.setFrameShadow(QtWidgets.QFrame.Sunken) self.point_layer_list.setSelectionMode( QtWidgets.QAbstractItemView.MultiSelection ) self.point_layer_list.setResizeMode(QtWidgets.QListView.Fixed) self.point_layer_list.setObjectName("point_layer_list") self.gridLayout_5.addWidget(self.point_layer_list, 0, 1, 1, 1) self.widget_9 = QtWidgets.QWidget(self.widget_6) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.widget_9.sizePolicy().hasHeightForWidth() ) self.widget_9.setSizePolicy(sizePolicy) self.widget_9.setObjectName("widget_9") self.gridLayout_11 = QtWidgets.QGridLayout(self.widget_9) self.gridLayout_11.setObjectName("gridLayout_11") self.point_layer_list_remove = QtWidgets.QPushButton(self.widget_9) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.point_layer_list_remove.sizePolicy().hasHeightForWidth() ) self.point_layer_list_remove.setSizePolicy(sizePolicy) self.point_layer_list_remove.setText("") self.point_layer_list_remove.setIcon(icon3) self.point_layer_list_remove.setObjectName("point_layer_list_remove") self.gridLayout_11.addWidget(self.point_layer_list_remove, 1, 0, 1, 1) self.point_layer_list_add = QtWidgets.QPushButton(self.widget_9) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.point_layer_list_add.sizePolicy().hasHeightForWidth() ) self.point_layer_list_add.setSizePolicy(sizePolicy) self.point_layer_list_add.setText("") self.point_layer_list_add.setIcon(icon2) self.point_layer_list_add.setObjectName("point_layer_list_add") self.gridLayout_11.addWidget(self.point_layer_list_add, 0, 0, 1, 1) self.gridLayout_5.addWidget(self.widget_9, 0, 0, 1, 1) self.gridLayout_12.addWidget(self.widget_6, 2, 0, 1, 4) self.point_layer_radius_input = QtWidgets.QSpinBox(self.point_layer_tab) self.point_layer_radius_input.setAccessibleDescription("") self.point_layer_radius_input.setMinimum(1) self.point_layer_radius_input.setMaximum(999999999) self.point_layer_radius_input.setProperty("value", 1000) self.point_layer_radius_input.setObjectName("point_layer_radius_input") self.gridLayout_12.addWidget(self.point_layer_radius_input, 0, 1, 2, 1) self.point_layer_radius_label = QtWidgets.QLabel(self.point_layer_tab) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Maximum ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.point_layer_radius_label.sizePolicy().hasHeightForWidth() ) self.point_layer_radius_label.setSizePolicy(sizePolicy) self.point_layer_radius_label.setObjectName("point_layer_radius_label") self.gridLayout_12.addWidget(self.point_layer_radius_label, 0, 0, 2, 1) self.point_layer_input = gui.QgsMapLayerComboBox(self.point_layer_tab) self.point_layer_input.setObjectName("point_layer_input") self.gridLayout_12.addWidget(self.point_layer_input, 0, 2, 2, 2) self.request_types_widget.addTab(self.point_layer_tab, "") self.polygon_layer_tab = QtWidgets.QWidget() self.polygon_layer_tab.setEnabled(True) self.polygon_layer_tab.setObjectName("polygon_layer_tab") self.gridLayout_6 = QtWidgets.QGridLayout(self.polygon_layer_tab) self.gridLayout_6.setObjectName("gridLayout_6") self.layer_input = gui.QgsMapLayerComboBox(self.polygon_layer_tab) self.layer_input.setObjectName("layer_input") self.gridLayout_6.addWidget(self.layer_input, 0, 0, 1, 1) self.widget_5 = QtWidgets.QWidget(self.polygon_layer_tab) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.widget_5.sizePolicy().hasHeightForWidth() ) self.widget_5.setSizePolicy(sizePolicy) self.widget_5.setAutoFillBackground(False) self.widget_5.setObjectName("widget_5") self.gridLayout_3 = QtWidgets.QGridLayout(self.widget_5) self.gridLayout_3.setObjectName("gridLayout_3") self.layer_list = QtWidgets.QListWidget(self.widget_5) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.layer_list.sizePolicy().hasHeightForWidth() ) self.layer_list.setSizePolicy(sizePolicy) self.layer_list.setMinimumSize(QtCore.QSize(0, 0)) self.layer_list.setMaximumSize(QtCore.QSize(16777215, 16777215)) self.layer_list.setFrameShadow(QtWidgets.QFrame.Sunken) self.layer_list.setSelectionMode( QtWidgets.QAbstractItemView.MultiSelection ) self.layer_list.setResizeMode(QtWidgets.QListView.Fixed) self.layer_list.setObjectName("layer_list") self.gridLayout_3.addWidget(self.layer_list, 0, 1, 1, 1) self.widget_8 = QtWidgets.QWidget(self.widget_5) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.widget_8.sizePolicy().hasHeightForWidth() ) self.widget_8.setSizePolicy(sizePolicy) self.widget_8.setObjectName("widget_8") self.gridLayout_9 = QtWidgets.QGridLayout(self.widget_8) self.gridLayout_9.setObjectName("gridLayout_9") self.layer_list_remove = QtWidgets.QPushButton(self.widget_8) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.layer_list_remove.sizePolicy().hasHeightForWidth() ) self.layer_list_remove.setSizePolicy(sizePolicy) self.layer_list_remove.setText("") self.layer_list_remove.setIcon(icon3) self.layer_list_remove.setObjectName("layer_list_remove") self.gridLayout_9.addWidget(self.layer_list_remove, 1, 0, 1, 1) self.layer_list_add = QtWidgets.QPushButton(self.widget_8) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.layer_list_add.sizePolicy().hasHeightForWidth() ) self.layer_list_add.setSizePolicy(sizePolicy) self.layer_list_add.setText("") self.layer_list_add.setIcon(icon2) self.layer_list_add.setObjectName("layer_list_add") self.gridLayout_9.addWidget(self.layer_list_add, 0, 0, 1, 1) self.gridLayout_3.addWidget(self.widget_8, 0, 0, 1, 1) self.gridLayout_6.addWidget(self.widget_5, 1, 0, 1, 1) self.request_types_widget.addTab(self.polygon_layer_tab, "") self.verticalLayout_5.addWidget(self.request_types_widget) self.configuration_group_box = gui.QgsCollapsibleGroupBox( OhsomeToolsDialogBase ) self.configuration_group_box.setEnabled(True) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.configuration_group_box.sizePolicy().hasHeightForWidth() ) self.configuration_group_box.setSizePolicy(sizePolicy) self.configuration_group_box.setMaximumSize(QtCore.QSize(16777215, 28)) self.configuration_group_box.setAutoFillBackground(False) self.configuration_group_box.setFlat(True) self.configuration_group_box.setCheckable(False) self.configuration_group_box.setChecked(False) self.configuration_group_box.setCollapsed(True) self.configuration_group_box.setScrollOnExpand(False) self.configuration_group_box.setSaveCollapsedState(False) self.configuration_group_box.setObjectName("configuration_group_box") self.verticalLayout_4 = QtWidgets.QVBoxLayout( self.configuration_group_box ) self.verticalLayout_4.setObjectName("verticalLayout_4") self.general_options_group = gui.QgsCollapsibleGroupBox( self.configuration_group_box ) self.general_options_group.setCollapsed(True) self.general_options_group.setObjectName("general_options_group") self.verticalLayout_3 = QtWidgets.QVBoxLayout( self.general_options_group ) self.verticalLayout_3.setObjectName("verticalLayout_3") self.frame_6 = QtWidgets.QFrame(self.general_options_group) self.frame_6.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_6.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_6.setObjectName("frame_6") self.horizontalLayout_12 = QtWidgets.QHBoxLayout(self.frame_6) self.horizontalLayout_12.setObjectName("horizontalLayout_12") self.check_activate_temporal = QtWidgets.QCheckBox(self.frame_6) self.check_activate_temporal.setChecked(True) self.check_activate_temporal.setObjectName("check_activate_temporal") self.horizontalLayout_12.addWidget(self.check_activate_temporal) self.check_show_metadata = QtWidgets.QCheckBox(self.frame_6) self.check_show_metadata.setEnabled(True) self.check_show_metadata.setChecked(False) self.check_show_metadata.setObjectName("check_show_metadata") self.horizontalLayout_12.addWidget(self.check_show_metadata) self.verticalLayout_3.addWidget(self.frame_6) self.frame_8 = QtWidgets.QFrame(self.general_options_group) self.frame_8.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_8.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_8.setObjectName("frame_8") self.horizontalLayout_13 = QtWidgets.QHBoxLayout(self.frame_8) self.horizontalLayout_13.setObjectName("horizontalLayout_13") self.check_merge_geometries = QtWidgets.QCheckBox(self.frame_8) self.check_merge_geometries.setEnabled(True) self.check_merge_geometries.setChecked(True) self.check_merge_geometries.setObjectName("check_merge_geometries") self.horizontalLayout_13.addWidget(self.check_merge_geometries) self.verticalLayout_3.addWidget(self.frame_8) self.frame_5 = QtWidgets.QFrame(self.general_options_group) self.frame_5.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_5.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_5.setObjectName("frame_5") self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.frame_5) self.horizontalLayout_10.setObjectName("horizontalLayout_10") self.timeout_label = QtWidgets.QLabel(self.frame_5) self.timeout_label.setObjectName("timeout_label") self.horizontalLayout_10.addWidget(self.timeout_label) self.timeout_input = QtWidgets.QSpinBox(self.frame_5) self.timeout_input.setAccessibleDescription("") self.timeout_input.setMinimum(0) self.timeout_input.setMaximum(9999999) self.timeout_input.setProperty("value", 0) self.timeout_input.setObjectName("timeout_input") self.horizontalLayout_10.addWidget(self.timeout_input) self.verticalLayout_3.addWidget(self.frame_5) self.verticalLayout_4.addWidget(self.general_options_group) self.intervals_group = gui.QgsCollapsibleGroupBox( self.configuration_group_box ) self.intervals_group.setFlat(True) self.intervals_group.setCollapsed(True) self.intervals_group.setSaveCollapsedState(False) self.intervals_group.setObjectName("intervals_group") self.gridLayout_10 = QtWidgets.QGridLayout(self.intervals_group) self.gridLayout_10.setObjectName("gridLayout_10") self.date_start = QtWidgets.QDateEdit(self.intervals_group) self.date_start.setDateTime( QtCore.QDateTime(QtCore.QDate(2016, 1, 1), QtCore.QTime(0, 0, 0)) ) self.date_start.setMinimumDateTime( QtCore.QDateTime(QtCore.QDate(2007, 10, 8), QtCore.QTime(0, 0, 0)) ) self.date_start.setMaximumDate(QtCore.QDate(2050, 12, 31)) self.date_start.setMinimumDate(QtCore.QDate(2007, 10, 8)) self.date_start.setTimeSpec(QtCore.Qt.LocalTime) self.date_start.setObjectName("date_start") self.gridLayout_10.addWidget(self.date_start, 5, 1, 1, 1) self.interval_months = QtWidgets.QSpinBox(self.intervals_group) self.interval_months.setMaximum(12) self.interval_months.setObjectName("interval_months") self.gridLayout_10.addWidget(self.interval_months, 3, 2, 1, 1) self.label_months = QtWidgets.QLabel(self.intervals_group) self.label_months.setToolTip("") self.label_months.setObjectName("label_months") self.gridLayout_10.addWidget(self.label_months, 2, 2, 1, 1) self.label_years = QtWidgets.QLabel(self.intervals_group) self.label_years.setToolTip("") self.label_years.setObjectName("label_years") self.gridLayout_10.addWidget(self.label_years, 2, 1, 1, 1) self.interval_days = QtWidgets.QSpinBox(self.intervals_group) self.interval_days.setMaximum(31) self.interval_days.setProperty("value", 1) self.interval_days.setObjectName("interval_days") self.gridLayout_10.addWidget(self.interval_days, 3, 4, 1, 1) self.date_end_label = QtWidgets.QLabel(self.intervals_group) self.date_end_label.setObjectName("date_end_label") self.gridLayout_10.addWidget(self.date_end_label, 4, 4, 1, 1) self.date_start_label = QtWidgets.QLabel(self.intervals_group) self.date_start_label.setObjectName("date_start_label") self.gridLayout_10.addWidget(self.date_start_label, 4, 1, 1, 1) self.label_days = QtWidgets.QLabel(self.intervals_group) self.label_days.setToolTip("") self.label_days.setObjectName("label_days") self.gridLayout_10.addWidget(self.label_days, 2, 4, 1, 1) self.date_end = QtWidgets.QDateEdit(self.intervals_group) self.date_end.setDateTime( QtCore.QDateTime(QtCore.QDate(2017, 1, 1), QtCore.QTime(0, 0, 0)) ) self.date_end.setMinimumDateTime( QtCore.QDateTime(QtCore.QDate(2007, 10, 9), QtCore.QTime(0, 0, 0)) ) self.date_end.setMinimumDate(QtCore.QDate(2007, 10, 9)) self.date_end.setObjectName("date_end") self.gridLayout_10.addWidget(self.date_end, 5, 4, 1, 1) self.interval_years = QtWidgets.QSpinBox(self.intervals_group) self.interval_years.setAccessibleDescription("") self.interval_years.setMaximum(99) self.interval_years.setProperty("value", 0) self.interval_years.setObjectName("interval_years") self.gridLayout_10.addWidget(self.interval_years, 3, 1, 1, 1) self.verticalLayout_4.addWidget(self.intervals_group) self.property_groups_box = gui.QgsCollapsibleGroupBox( self.configuration_group_box ) self.property_groups_box.setFlat(True) self.property_groups_box.setCollapsed(True) self.property_groups_box.setObjectName("property_groups_box") self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.property_groups_box) self.verticalLayout_6.setObjectName("verticalLayout_6") self.frame_7 = QtWidgets.QFrame(self.property_groups_box) self.frame_7.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_7.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_7.setObjectName("frame_7") self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame_7) self.horizontalLayout.setObjectName("horizontalLayout") self.check_clip_geometry = QtWidgets.QCheckBox(self.frame_7) self.check_clip_geometry.setEnabled(True) self.check_clip_geometry.setChecked(True) self.check_clip_geometry.setObjectName("check_clip_geometry") self.horizontalLayout.addWidget(self.check_clip_geometry) self.check_keep_geometryless = QtWidgets.QCheckBox(self.frame_7) self.check_keep_geometryless.setEnabled(True) self.check_keep_geometryless.setChecked(False) self.check_keep_geometryless.setObjectName("check_keep_geometryless") self.horizontalLayout.addWidget(self.check_keep_geometryless) self.verticalLayout_6.addWidget(self.frame_7) self.property_groups_groupbox = QtWidgets.QGroupBox( self.property_groups_box ) self.property_groups_groupbox.setFlat(False) self.property_groups_groupbox.setCheckable(False) self.property_groups_groupbox.setObjectName("property_groups_groupbox") self.verticalLayout_7 = QtWidgets.QVBoxLayout( self.property_groups_groupbox ) self.verticalLayout_7.setObjectName("verticalLayout_7") self.frame_4 = QtWidgets.QFrame(self.property_groups_groupbox) self.frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_4.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_4.setObjectName("frame_4") self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame_4) self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.property_groups_check_metadata = QtWidgets.QCheckBox(self.frame_4) self.property_groups_check_metadata.setObjectName( "property_groups_check_metadata" ) self.horizontalLayout_2.addWidget(self.property_groups_check_metadata) self.property_groups_check_tags = QtWidgets.QCheckBox(self.frame_4) self.property_groups_check_tags.setChecked(True) self.property_groups_check_tags.setObjectName( "property_groups_check_tags" ) self.horizontalLayout_2.addWidget(self.property_groups_check_tags) self.verticalLayout_7.addWidget(self.frame_4) self.verticalLayout_6.addWidget(self.property_groups_groupbox) self.verticalLayout_4.addWidget(self.property_groups_box) self.data_aggregation_group = gui.QgsCollapsibleGroupBox( self.configuration_group_box ) self.data_aggregation_group.setCollapsed(True) self.data_aggregation_group.setObjectName("data_aggregation_group") self.verticalLayout_9 = QtWidgets.QVBoxLayout( self.data_aggregation_group ) self.verticalLayout_9.setObjectName("verticalLayout_9") self.frame = QtWidgets.QFrame(self.data_aggregation_group) self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame.setFrameShadow(QtWidgets.QFrame.Raised) self.frame.setObjectName("frame") self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.frame) self.horizontalLayout_9.setObjectName("horizontalLayout_9") self.label = QtWidgets.QLabel(self.frame) self.label.setObjectName("label") self.horizontalLayout_9.addWidget(self.label) self.data_aggregation_format = QtWidgets.QComboBox(self.frame) self.data_aggregation_format.setObjectName("data_aggregation_format") self.horizontalLayout_9.addWidget(self.data_aggregation_format) self.verticalLayout_9.addWidget(self.frame) self.group_by_frame = QtWidgets.QFrame(self.data_aggregation_group) self.group_by_frame.setFrameShape(QtWidgets.QFrame.StyledPanel) self.group_by_frame.setFrameShadow(QtWidgets.QFrame.Raised) self.group_by_frame.setObjectName("group_by_frame") self.verticalLayout = QtWidgets.QVBoxLayout(self.group_by_frame) self.verticalLayout.setObjectName("verticalLayout") self.filter_frame_2 = QtWidgets.QFrame(self.group_by_frame) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.filter_frame_2.sizePolicy().hasHeightForWidth() ) self.filter_frame_2.setSizePolicy(sizePolicy) self.filter_frame_2.setToolTip("") self.filter_frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel) self.filter_frame_2.setFrameShadow(QtWidgets.QFrame.Raised) self.filter_frame_2.setObjectName("filter_frame_2") self.horizontalLayout_11 = QtWidgets.QHBoxLayout(self.filter_frame_2) self.horizontalLayout_11.setObjectName("horizontalLayout_11") self.group_by_keys_label = QtWidgets.QLabel(self.filter_frame_2) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Maximum ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.group_by_keys_label.sizePolicy().hasHeightForWidth() ) self.group_by_keys_label.setSizePolicy(sizePolicy) self.group_by_keys_label.setObjectName("group_by_keys_label") self.horizontalLayout_11.addWidget(self.group_by_keys_label) self.group_by_key_line_edit = QtWidgets.QLineEdit(self.filter_frame_2) self.group_by_key_line_edit.setText("") self.group_by_key_line_edit.setEchoMode(QtWidgets.QLineEdit.Normal) self.group_by_key_line_edit.setObjectName("group_by_key_line_edit") self.horizontalLayout_11.addWidget(self.group_by_key_line_edit) self.verticalLayout.addWidget(self.filter_frame_2) self.filter_frame_3 = QtWidgets.QFrame(self.group_by_frame) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.filter_frame_3.sizePolicy().hasHeightForWidth() ) self.filter_frame_3.setSizePolicy(sizePolicy) self.filter_frame_3.setToolTip("") self.filter_frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel) self.filter_frame_3.setFrameShadow(QtWidgets.QFrame.Raised) self.filter_frame_3.setObjectName("filter_frame_3") self.horizontalLayout_14 = QtWidgets.QHBoxLayout(self.filter_frame_3) self.horizontalLayout_14.setObjectName("horizontalLayout_14") self.group_by_values_label = QtWidgets.QLabel(self.filter_frame_3) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Maximum ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.group_by_values_label.sizePolicy().hasHeightForWidth() ) self.group_by_values_label.setSizePolicy(sizePolicy) self.group_by_values_label.setObjectName("group_by_values_label") self.horizontalLayout_14.addWidget(self.group_by_values_label) self.group_by_values_line_edit = QtWidgets.QLineEdit( self.filter_frame_3 ) self.group_by_values_line_edit.setObjectName( "group_by_values_line_edit" ) self.horizontalLayout_14.addWidget(self.group_by_values_line_edit) self.verticalLayout.addWidget(self.filter_frame_3) self.filter_frame_4 = QtWidgets.QFrame(self.group_by_frame) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.filter_frame_4.sizePolicy().hasHeightForWidth() ) self.filter_frame_4.setSizePolicy(sizePolicy) self.filter_frame_4.setToolTip("") self.filter_frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel) self.filter_frame_4.setFrameShadow(QtWidgets.QFrame.Raised) self.filter_frame_4.setObjectName("filter_frame_4") self.horizontalLayout_15 = QtWidgets.QHBoxLayout(self.filter_frame_4) self.horizontalLayout_15.setObjectName("horizontalLayout_15") self.filter2_input_label = QtWidgets.QLabel(self.filter_frame_4) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Maximum ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.filter2_input_label.sizePolicy().hasHeightForWidth() ) self.filter2_input_label.setSizePolicy(sizePolicy) self.filter2_input_label.setObjectName("filter2_input_label") self.horizontalLayout_15.addWidget(self.filter2_input_label) self.filter2_input = QtWidgets.QLineEdit(self.filter_frame_4) self.filter2_input.setAutoFillBackground(False) self.filter2_input.setStyleSheet("") self.filter2_input.setObjectName("filter2_input") self.horizontalLayout_15.addWidget(self.filter2_input) self.verticalLayout.addWidget(self.filter_frame_4) self.verticalLayout_9.addWidget(self.group_by_frame) self.verticalLayout_4.addWidget(self.data_aggregation_group) self.verticalLayout_5.addWidget(self.configuration_group_box) self.filter_frame = QtWidgets.QFrame(OhsomeToolsDialogBase) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.filter_frame.sizePolicy().hasHeightForWidth() ) self.filter_frame.setSizePolicy(sizePolicy) self.filter_frame.setFrameShape(QtWidgets.QFrame.StyledPanel) self.filter_frame.setFrameShadow(QtWidgets.QFrame.Raised) self.filter_frame.setObjectName("filter_frame") self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.filter_frame) self.horizontalLayout_6.setObjectName("horizontalLayout_6") self.frame_9 = QtWidgets.QFrame(self.filter_frame) self.frame_9.setMinimumSize(QtCore.QSize(0, 0)) self.frame_9.setMaximumSize(QtCore.QSize(90, 16777215)) self.frame_9.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame_9.setFrameShadow(QtWidgets.QFrame.Raised) self.frame_9.setObjectName("frame_9") self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.frame_9) self.horizontalLayout_4.setObjectName("horizontalLayout_4") self.filter_label = QtWidgets.QLabel(self.frame_9) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Maximum ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.filter_label.sizePolicy().hasHeightForWidth() ) self.filter_label.setSizePolicy(sizePolicy) self.filter_label.setObjectName("filter_label") self.horizontalLayout_4.addWidget(self.filter_label) self.filter_help = QtWidgets.QPushButton(self.frame_9) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.filter_help.sizePolicy().hasHeightForWidth() ) self.filter_help.setSizePolicy(sizePolicy) self.filter_help.setText("") icon4 = QtGui.QIcon() icon4.addPixmap( QtGui.QPixmap(":/plugins/ohsomeTools/img/icon_help.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off, ) self.filter_help.setIcon(icon4) self.filter_help.setObjectName("filter_help") self.horizontalLayout_4.addWidget(self.filter_help) self.horizontalLayout_6.addWidget(self.frame_9) self.filter_input = QtWidgets.QPlainTextEdit(self.filter_frame) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Maximum, ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.filter_input.sizePolicy().hasHeightForWidth() ) self.filter_input.setSizePolicy(sizePolicy) self.filter_input.setMaximumSize(QtCore.QSize(360, 101)) self.filter_input.setToolTipDuration(-1) self.filter_input.setPlainText("") self.filter_input.setObjectName("filter_input") self.horizontalLayout_6.addWidget(self.filter_input) self.verticalLayout_5.addWidget(self.filter_frame) self.ohsome_log_group = gui.QgsCollapsibleGroupBox( OhsomeToolsDialogBase ) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.ohsome_log_group.sizePolicy().hasHeightForWidth() ) self.ohsome_log_group.setSizePolicy(sizePolicy) self.ohsome_log_group.setMinimumSize(QtCore.QSize(0, 0)) self.ohsome_log_group.setMaximumSize(QtCore.QSize(16777215, 28)) self.ohsome_log_group.setFlat(True) self.ohsome_log_group.setCollapsed(True) self.ohsome_log_group.setSaveCollapsedState(False) self.ohsome_log_group.setObjectName("ohsome_log_group") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.ohsome_log_group) self.verticalLayout_2.setSizeConstraint( QtWidgets.QLayout.SetDefaultConstraint ) self.verticalLayout_2.setObjectName("verticalLayout_2") self.debug_text = QtWidgets.QTextBrowser(self.ohsome_log_group) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.debug_text.sizePolicy().hasHeightForWidth() ) self.debug_text.setSizePolicy(sizePolicy) self.debug_text.setMinimumSize(QtCore.QSize(0, 80)) self.debug_text.setMaximumSize(QtCore.QSize(16777215, 80)) self.debug_text.setAutoFormatting(QtWidgets.QTextEdit.AutoBulletList) self.debug_text.setTabStopWidth(80) self.debug_text.setOpenExternalLinks(True) self.debug_text.setObjectName("debug_text") self.verticalLayout_2.addWidget(self.debug_text) self.verticalLayout_5.addWidget(self.ohsome_log_group) self.control_widget = QtWidgets.QWidget(OhsomeToolsDialogBase) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.control_widget.sizePolicy().hasHeightForWidth() ) self.control_widget.setSizePolicy(sizePolicy) self.control_widget.setObjectName("control_widget") self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.control_widget) self.horizontalLayout_8.setObjectName("horizontalLayout_8") self.help_button = QtWidgets.QPushButton(self.control_widget) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.help_button.sizePolicy().hasHeightForWidth() ) self.help_button.setSizePolicy(sizePolicy) self.help_button.setIcon(icon4) self.help_button.setObjectName("help_button") self.horizontalLayout_8.addWidget(self.help_button) self.about_button = QtWidgets.QPushButton(self.control_widget) icon5 = QtGui.QIcon() icon5.addPixmap( QtGui.QPixmap(":/plugins/ohsomeTools/img/icon_about.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off, ) self.about_button.setIcon(icon5) self.about_button.setObjectName("about_button") self.horizontalLayout_8.addWidget(self.about_button) self.global_buttons = QtWidgets.QDialogButtonBox(self.control_widget) self.global_buttons.setEnabled(True) self.global_buttons.setOrientation(QtCore.Qt.Horizontal) self.global_buttons.setStandardButtons( QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok ) self.global_buttons.setObjectName("global_buttons") self.horizontalLayout_8.addWidget(self.global_buttons) self.verticalLayout_5.addWidget(self.control_widget) self.resources_group.raise_() self.control_widget.raise_() self.ohsome_log_group.raise_() self.request_types_widget.raise_() self.configuration_group_box.raise_() self.groupBox_4.raise_() self.filter_frame.raise_() self.retranslateUi(OhsomeToolsDialogBase) self.request_types_widget.setCurrentIndex(0) self.global_buttons.accepted.connect(OhsomeToolsDialogBase.accept) self.global_buttons.rejected.connect(OhsomeToolsDialogBase.reject) QtCore.QMetaObject.connectSlotsByName(OhsomeToolsDialogBase) def retranslateUi(self, OhsomeToolsDialogBase): _translate = QtCore.QCoreApplication.translate OhsomeToolsDialogBase.setWindowTitle( _translate("OhsomeToolsDialogBase", "ohsomeTools") ) self.resources_group.setTitle( _translate("OhsomeToolsDialogBase", "Resources") ) self.api_label.setText( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p><a href="https://docs.ohsome.org/ohsome-api/stable/"><span style=" font-size:12pt; font-weight:600; text-decoration: underline; color:#2eb8e6;">API DOCS</span></a></p></body></html>', ) ) self.ohsome_label.setText( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p><a href="https://heigit.org/big-spatial-data-analytics-en/ohsome/"><span style=" font-size:14pt; font-weight:600; text-decoration: underline; color:#2eb8e6;">OHSOME</span></a></p></body></html>', ) ) self.oqt_label.setText( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p><a href="https://oqt.ohsome.org"><span style=" font-size:12pt; font-weight:600; text-decoration: underline; color:#2eb8e6;">OQT</span></a></p></body></html>', ) ) self.groupBox_4.setTitle( _translate("OhsomeToolsDialogBase", "Endpoint Selections") ) self.label_15.setText(_translate("OhsomeToolsDialogBase", "Provider")) self.provider_refresh.setToolTip( _translate( "OhsomeToolsDialogBase", "Refresh the provider list. Needed after a provider was added or deleted.", ) ) self.provider_config.setToolTip( _translate( "OhsomeToolsDialogBase", "Shortcut to Web ► ohsomeTools ► Provider Settings", ) ) self.ohsome_spec_selection_combo.setToolTip( _translate("OhsomeToolsDialogBase", "ohsome API Endpoint") ) self.ohsome_spec_preference_combo.setToolTip( _translate("OhsomeToolsDialogBase", "Request preference") ) self.ohsome_spec_preference_specification.setToolTip( _translate("OhsomeToolsDialogBase", "Request preference") ) self.centroid_radius_input.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Add a centroid radius <span style=" text-decoration: underline;">in meters</span> per centroid. Default is <span style=" text-decoration: underline;">10 meters</span>.</p></body></html>', ) ) self.ohsome_centroid_location_list.setToolTip( _translate( "OhsomeToolsDialogBase", "Select centroids from the map with the given radius!", ) ) self.centroid_list_point_add.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>Add centroids interactively from the map canvas.</p><p>Double-click will terminate centroid selection.</p></body></html>", ) ) self.centroid_list_point_clear.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>If centroids are selected in the list, only these will be deleted. Else all centroids will be deleted.</p></body></html>", ) ) self.centroid_radius_label.setText( _translate("OhsomeToolsDialogBase", "Radius") ) self.request_types_widget.setTabText( self.request_types_widget.indexOf(self.centroid_tab), _translate("OhsomeToolsDialogBase", "Centroid"), ) self.point_layer_list.setToolTip( _translate( "OhsomeToolsDialogBase", "Select layers from your layer list!" ) ) self.point_layer_list_remove.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>If layers are selected in the list, only these will be deleted.</p></body></html>", ) ) self.point_layer_list_add.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>Add layers interactively from your layer list.</p></body></html>", ) ) self.point_layer_radius_input.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Add a centroid radius <span style=" text-decoration: underline;">in meters</span> per centroid. Default is <span style=" text-decoration: underline;">10 meters</span>.</p></body></html>', ) ) self.point_layer_radius_label.setText( _translate("OhsomeToolsDialogBase", "Radius") ) self.point_layer_input.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>Select Polygon / MultiPolygon layers to query with.</p></body></html>", ) ) self.request_types_widget.setTabText( self.request_types_widget.indexOf(self.point_layer_tab), _translate("OhsomeToolsDialogBase", "Point Layer"), ) self.layer_input.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>Select Polygon / MultiPolygon layers to query with.</p></body></html>", ) ) self.layer_list.setToolTip( _translate( "OhsomeToolsDialogBase", "Select layers from your layer list!" ) ) self.layer_list_remove.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>If layers are selected in the list, only these will be deleted.</p></body></html>", ) ) self.layer_list_add.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>Add layers interactively from your layer list.</p></body></html>", ) ) self.request_types_widget.setTabText( self.request_types_widget.indexOf(self.polygon_layer_tab), _translate("OhsomeToolsDialogBase", "Polygon layer"), ) self.configuration_group_box.setTitle( _translate("OhsomeToolsDialogBase", "Configuration") ) self.general_options_group.setTitle( _translate("OhsomeToolsDialogBase", "General") ) self.check_activate_temporal.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>Automatically enable the temporal feature for new layers where applicable.</p><p><br/></p><p>This is only applied to responses that contain geometries and in that manner only on those geometry layers it makes sense for.</p></body></html>", ) ) self.check_activate_temporal.setText( _translate("OhsomeToolsDialogBase", "Qgis temporal feature") ) self.check_show_metadata.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>Include metadata into the query response. Depending on the request of the request this can increase the response data size significantly.</p></body></html>", ) ) self.check_show_metadata.setText( _translate("OhsomeToolsDialogBase", "Show metadata") ) self.check_merge_geometries.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Check this to <span style=" text-decoration: underline;">automatically merge compatible geometry types</span>. </p><p><span style=" text-decoration: underline;">It is recommended to keep this checked.</span></p><p>The benefit is that the amount of written layers will be massively reduced.</p><p>The reason is that results may contain single and multi-geometries at once (Polygon, MultiPolygon etc.) and without combining them one layer per geometry type will be written, resulting in an increased number of layers. </p><p><br/></p></body></html>', ) ) self.check_merge_geometries.setText( _translate("OhsomeToolsDialogBase", "Harmonize geometries") ) self.timeout_label.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Apply a request <span style=" text-decoration: underline;">timeout in seconds</span> after that the ohsome API should terminate the request.</p><p>For the public ohsome API instance the maximum value is 100. For private instances there is no maximum.</p></body></html>', ) ) self.timeout_label.setText( _translate("OhsomeToolsDialogBase", "Timeout") ) self.timeout_input.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Apply a request <span style=" text-decoration: underline;">timeout in seconds</span> after that the ohsome API should terminate the request. If the value 0 is set (default), the server side timeout is used automatically, which can be up to 600 seconds.</p><p>Depending on the size of the query, too low entries will yield API errors.</p><p>This setting is only used for the public ohsome API and not for private ones.</p></body></html>', ) ) self.intervals_group.setTitle( _translate("OhsomeToolsDialogBase", "Intervals") ) self.date_start.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Enter your start date. </p><p><br/></p><p>All <span style=" text-decoration: underline;">dates from the </span><span style=" font-weight:600; text-decoration: underline;">8th</span><span style=" text-decoration: underline;"> of Oktober 2007</span> are valid.</p></body></html>', ) ) self.date_start.setDisplayFormat( _translate("OhsomeToolsDialogBase", "dd-MM-yyyy") ) self.interval_months.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>Run the query every x months. Adjust the time frame accordingly.</p><p><br/></p><p>Only available for the data aggregation endpoints.</p></body></html>", ) ) self.label_months.setText(_translate("OhsomeToolsDialogBase", "Months")) self.label_years.setText(_translate("OhsomeToolsDialogBase", "Years")) self.interval_days.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>Run the query every x days. Adjust the time frame accordingly.</p><p><br/></p><p>Only available for the data aggregation endpoints.</p></body></html>", ) ) self.date_end_label.setText( _translate("OhsomeToolsDialogBase", "Date End") ) self.date_start_label.setText( _translate("OhsomeToolsDialogBase", "Date Start") ) self.label_days.setText(_translate("OhsomeToolsDialogBase", "Days")) self.date_end.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Enter your end date. </p><p><br/></p><p>All <span style=" text-decoration: underline;">dates from the </span><span style=" font-weight:600; text-decoration: underline;">9th</span><span style=" text-decoration: underline;"> of Oktober 2007</span> are valid.</p></body></html>', ) ) self.date_end.setDisplayFormat( _translate("OhsomeToolsDialogBase", "dd-MM-yyyy") ) self.interval_years.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Run the query every <span style=" text-decoration: underline;">x</span> years. Adjust the time frame accordingly.</p><p>Only available for the data aggregation endpoints.</p></body></html>', ) ) self.property_groups_box.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>List of possible property-groups added to each OSM-element.</p><p>Only available for data extraction.</p></body></html>", ) ) self.property_groups_box.setTitle( _translate("OhsomeToolsDialogBase", "Data extraction") ) self.check_clip_geometry.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Specify whether the returned geometries of the features should be clipped to the query’s spatial boundary.</p><p><span style=" text-decoration: underline;">Ony available for the data extraction endpoints</span>.</p></body></html>', ) ) self.check_clip_geometry.setText( _translate("OhsomeToolsDialogBase", "Clip Geometry") ) self.check_keep_geometryless.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>Some results don't contain geometries but metadata.</p><p>Decide if you wan't to keep them or only return ones with geometries.</p><p>If checked, the geometry less features will be stored separately.</p></body></html>", ) ) self.check_keep_geometryless.setText( _translate("OhsomeToolsDialogBase", "Keep without geometry") ) self.property_groups_groupbox.setTitle( _translate("OhsomeToolsDialogBase", "Property Groups") ) self.property_groups_check_metadata.setText( _translate("OhsomeToolsDialogBase", "Metadata") ) self.property_groups_check_tags.setText( _translate("OhsomeToolsDialogBase", "Tags") ) self.data_aggregation_group.setTitle( _translate("OhsomeToolsDialogBase", "Data aggregation") ) self.label.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>Output format geojson (for /groupBy/boundary resources only), or json.</p></body></html>", ) ) self.label.setText(_translate("OhsomeToolsDialogBase", "Format")) self.data_aggregation_format.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>Output format geojson (for /groupBy/boundary resources only), or json.</p></body></html>", ) ) self.group_by_keys_label.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>OSM key e.g.: 'highway’, 'building’; no default value (one groupByKey parameter must be defined).</p><p><span style=\" text-decoration: underline;\">Only for `groupBy/tag` endpoints.</span></p></body></html>", ) ) self.group_by_keys_label.setText( _translate("OhsomeToolsDialogBase", "groupByKey") ) self.group_by_key_line_edit.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>OSM key e.g.: 'highway’, 'building’; no default value (one groupByKey parameter must be defined).</p><p><span style=\" text-decoration: underline;\">Only for `groupBy/tag` and `groupBy/key` endpoints.</span></p></body></html>", ) ) self.group_by_key_line_edit.setPlaceholderText( _translate("OhsomeToolsDialogBase", "building") ) self.group_by_values_label.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>OSM value(s) e.g.: 'primary’, 'residential’; default: no value. Can be left empty.</p><p><span style=\" text-decoration: underline;\">Only for `groupBy/tag` endpoints.</span></p></body></html>", ) ) self.group_by_values_label.setText( _translate("OhsomeToolsDialogBase", "groupByValues") ) self.group_by_values_line_edit.setToolTip( _translate( "OhsomeToolsDialogBase", "<html><head/><body><p>OSM value(s) e.g.: 'primary’, 'residential’; default: no value. Can be left empty.</p><p><span style=\" text-decoration: underline;\">Only for `groupBy/tag` endpoints.</span></p></body></html>", ) ) self.filter2_input_label.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Combines several attributive filters, e.g. OSM type, the geometry (simple feature) type, as well as the OSM tag; no default value</p><p><span style=" text-decoration: underline;">Only for `ratio` endpoints.</span></p></body></html>', ) ) self.filter2_input_label.setText( _translate("OhsomeToolsDialogBase", "Filter 2") ) self.filter2_input.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Combines several attributive filters, e.g. OSM type, the geometry (simple feature) type, as well as the OSM tag; no default value</p><p><span style=" text-decoration: underline;">Only for `ratio` endpoints.</span></p></body></html>', ) ) self.filter_label.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Combines several attributive filters, e.g. OSM type, the geometry (simple feature) type, as well as the OSM tag. </p><p>For more information see: <a href="https://docs.ohsome.org/ohsome-api/v1/filter.html"><span style=" text-decoration: underline; color:#2eb8e6;">https://docs.ohsome.org/ohsome-api/v1/filter.html</span></a></p></body></html>', ) ) self.filter_label.setText( _translate("OhsomeToolsDialogBase", "Filter ") ) self.filter_help.setToolTip( _translate( "OhsomeToolsDialogBase", "Filter specific help page. Scroll to bottom to see examples.", ) ) self.filter_input.setToolTip( _translate( "OhsomeToolsDialogBase", '<html><head/><body><p>Combines several attributive filters, e.g. OSM type, the geometry (simple feature) type, as well as the OSM tag. </p><p>For more information see: <a href="https://docs.ohsome.org/ohsome-api/v1/filter.html"><span style=" text-decoration: underline; color:#2eb8e6;">https://docs.ohsome.org/ohsome-api/v1/filter.html</span></a></p></body></html>', ) ) self.filter_input.setPlaceholderText( _translate( "OhsomeToolsDialogBase", "building=* or (type:way and highway=residential)", ) ) self.ohsome_log_group.setTitle( _translate("OhsomeToolsDialogBase", "Log") ) self.debug_text.setPlaceholderText( _translate( "OhsomeToolsDialogBase", "Queries and errors will be printed here.", ) ) self.help_button.setText(_translate("OhsomeToolsDialogBase", " Help")) self.about_button.setText(_translate("OhsomeToolsDialogBase", "About")) from qgis import gui from . import resources_rc
71,934
22,373
"@generated by @aspect_rules_js//npm/private:npm_translate_lock.bzl from pnpm lock file @aspect_rules_swc@aspect_rules_swc//swc:pnpm-lock.yaml" load("@aspect_rules_js//npm:npm_import.bzl", "npm_import") def npm_repositories(): "Generated npm_import repository rules corresponding to npm packages in @aspect_rules_swc@aspect_rules_swc//swc:pnpm-lock.yaml" npm_import( name = "swc_cli__at_nodelib_fs.scandir__2.1.5", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "@nodelib/fs.scandir", version = "2.1.5", integrity = "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", deps = { "@nodelib/fs.stat": "2.0.5", "run-parallel": "1.2.0", }, transitive_closure = { "@nodelib/fs.scandir": ["2.1.5"], "@nodelib/fs.stat": ["2.0.5"], "run-parallel": ["1.2.0"], "queue-microtask": ["1.2.3"], }, ) npm_import( name = "swc_cli__at_nodelib_fs.stat__2.0.5", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "@nodelib/fs.stat", version = "2.0.5", integrity = "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", transitive_closure = { "@nodelib/fs.stat": ["2.0.5"], }, ) npm_import( name = "swc_cli__at_nodelib_fs.walk__1.2.8", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "@nodelib/fs.walk", version = "1.2.8", integrity = "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", deps = { "@nodelib/fs.scandir": "2.1.5", "fastq": "1.13.0", }, transitive_closure = { "@nodelib/fs.walk": ["1.2.8"], "@nodelib/fs.scandir": ["2.1.5"], "fastq": ["1.13.0"], "reusify": ["1.0.4"], "@nodelib/fs.stat": ["2.0.5"], "run-parallel": ["1.2.0"], "queue-microtask": ["1.2.3"], }, ) npm_import( name = "swc_cli__at_swc_cli__0.1.57", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = { "swc": ["@swc/cli"], }, package = "@swc/cli", version = "0.1.57", integrity = "sha512-HxM8TqYHhAg+zp7+RdTU69bnkl4MWdt1ygyp6BDIPjTiaJVH6Dizn2ezbgDS8mnFZI1FyhKvxU/bbaUs8XhzQg==", deps = { "commander": "7.2.0", "fast-glob": "3.2.11", "slash": "3.0.0", "source-map": "0.7.3", }, transitive_closure = { "@swc/cli": ["0.1.57"], "commander": ["7.2.0"], "fast-glob": ["3.2.11"], "slash": ["3.0.0"], "source-map": ["0.7.3"], "@nodelib/fs.stat": ["2.0.5"], "@nodelib/fs.walk": ["1.2.8"], "glob-parent": ["5.1.2"], "merge2": ["1.4.1"], "micromatch": ["4.0.5"], "braces": ["3.0.2"], "picomatch": ["2.3.1"], "fill-range": ["7.0.1"], "to-regex-range": ["5.0.1"], "is-number": ["7.0.0"], "is-glob": ["4.0.3"], "is-extglob": ["2.1.1"], "@nodelib/fs.scandir": ["2.1.5"], "fastq": ["1.13.0"], "reusify": ["1.0.4"], "run-parallel": ["1.2.0"], "queue-microtask": ["1.2.3"], }, ) npm_import( name = "swc_cli__braces__3.0.2", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "braces", version = "3.0.2", integrity = "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", deps = { "fill-range": "7.0.1", }, transitive_closure = { "braces": ["3.0.2"], "fill-range": ["7.0.1"], "to-regex-range": ["5.0.1"], "is-number": ["7.0.0"], }, ) npm_import( name = "swc_cli__commander__7.2.0", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "commander", version = "7.2.0", integrity = "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", transitive_closure = { "commander": ["7.2.0"], }, ) npm_import( name = "swc_cli__fast-glob__3.2.11", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "fast-glob", version = "3.2.11", integrity = "sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew==", deps = { "@nodelib/fs.stat": "2.0.5", "@nodelib/fs.walk": "1.2.8", "glob-parent": "5.1.2", "merge2": "1.4.1", "micromatch": "4.0.5", }, transitive_closure = { "fast-glob": ["3.2.11"], "@nodelib/fs.stat": ["2.0.5"], "@nodelib/fs.walk": ["1.2.8"], "glob-parent": ["5.1.2"], "merge2": ["1.4.1"], "micromatch": ["4.0.5"], "braces": ["3.0.2"], "picomatch": ["2.3.1"], "fill-range": ["7.0.1"], "to-regex-range": ["5.0.1"], "is-number": ["7.0.0"], "is-glob": ["4.0.3"], "is-extglob": ["2.1.1"], "@nodelib/fs.scandir": ["2.1.5"], "fastq": ["1.13.0"], "reusify": ["1.0.4"], "run-parallel": ["1.2.0"], "queue-microtask": ["1.2.3"], }, ) npm_import( name = "swc_cli__fastq__1.13.0", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "fastq", version = "1.13.0", integrity = "sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==", deps = { "reusify": "1.0.4", }, transitive_closure = { "fastq": ["1.13.0"], "reusify": ["1.0.4"], }, ) npm_import( name = "swc_cli__fill-range__7.0.1", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "fill-range", version = "7.0.1", integrity = "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", deps = { "to-regex-range": "5.0.1", }, transitive_closure = { "fill-range": ["7.0.1"], "to-regex-range": ["5.0.1"], "is-number": ["7.0.0"], }, ) npm_import( name = "swc_cli__glob-parent__5.1.2", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "glob-parent", version = "5.1.2", integrity = "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", deps = { "is-glob": "4.0.3", }, transitive_closure = { "glob-parent": ["5.1.2"], "is-glob": ["4.0.3"], "is-extglob": ["2.1.1"], }, ) npm_import( name = "swc_cli__is-extglob__2.1.1", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "is-extglob", version = "2.1.1", integrity = "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", transitive_closure = { "is-extglob": ["2.1.1"], }, ) npm_import( name = "swc_cli__is-glob__4.0.3", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "is-glob", version = "4.0.3", integrity = "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", deps = { "is-extglob": "2.1.1", }, transitive_closure = { "is-glob": ["4.0.3"], "is-extglob": ["2.1.1"], }, ) npm_import( name = "swc_cli__is-number__7.0.0", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "is-number", version = "7.0.0", integrity = "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", transitive_closure = { "is-number": ["7.0.0"], }, ) npm_import( name = "swc_cli__merge2__1.4.1", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "merge2", version = "1.4.1", integrity = "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", transitive_closure = { "merge2": ["1.4.1"], }, ) npm_import( name = "swc_cli__micromatch__4.0.5", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "micromatch", version = "4.0.5", integrity = "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", deps = { "braces": "3.0.2", "picomatch": "2.3.1", }, transitive_closure = { "micromatch": ["4.0.5"], "braces": ["3.0.2"], "picomatch": ["2.3.1"], "fill-range": ["7.0.1"], "to-regex-range": ["5.0.1"], "is-number": ["7.0.0"], }, ) npm_import( name = "swc_cli__picomatch__2.3.1", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "picomatch", version = "2.3.1", integrity = "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", transitive_closure = { "picomatch": ["2.3.1"], }, ) npm_import( name = "swc_cli__queue-microtask__1.2.3", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "queue-microtask", version = "1.2.3", integrity = "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", transitive_closure = { "queue-microtask": ["1.2.3"], }, ) npm_import( name = "swc_cli__reusify__1.0.4", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "reusify", version = "1.0.4", integrity = "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", transitive_closure = { "reusify": ["1.0.4"], }, ) npm_import( name = "swc_cli__run-parallel__1.2.0", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "run-parallel", version = "1.2.0", integrity = "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", deps = { "queue-microtask": "1.2.3", }, transitive_closure = { "run-parallel": ["1.2.0"], "queue-microtask": ["1.2.3"], }, ) npm_import( name = "swc_cli__slash__3.0.0", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "slash", version = "3.0.0", integrity = "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", transitive_closure = { "slash": ["3.0.0"], }, ) npm_import( name = "swc_cli__source-map__0.7.3", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "source-map", version = "0.7.3", integrity = "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==", transitive_closure = { "source-map": ["0.7.3"], }, ) npm_import( name = "swc_cli__to-regex-range__5.0.1", root_package = "swc", link_workspace = "aspect_rules_swc", link_packages = {}, package = "to-regex-range", version = "5.0.1", integrity = "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", deps = { "is-number": "7.0.0", }, transitive_closure = { "to-regex-range": ["5.0.1"], "is-number": ["7.0.0"], }, )
13,282
5,737
""" Neural network modules for WaveNet References : https://arxiv.org/pdf/1609.03499.pdf https://github.com/ibab/tensorflow-wavenet https://qiita.com/MasaEguchi/items/cd5f7e9735a120f27e2a https://github.com/musyoku/wavenet/issues/4 """ import torch import numpy as np from utils.exceptions import InputSizeError class DilatedCausalConv1d(torch.nn.Module): """Dilated Causal Convolution for WaveNet""" def __init__(self, channels, dilation=1): super(DilatedCausalConv1d, self).__init__() self.conv = torch.nn.Conv1d(channels, channels, kernel_size=2, stride=1, # Fixed for WaveNet dilation=dilation, padding=0, # Fixed for WaveNet dilation bias=False) # Fixed for WaveNet but not sure def init_weights_for_test(self): for m in self.modules(): if isinstance(m, torch.nn.Conv1d): m.weight.data.fill_(1) def forward(self, x): output = self.conv(x) return output class CausalConv1d(torch.nn.Module): """Causal Convolution for WaveNet""" def __init__(self, in_channels, out_channels): super(CausalConv1d, self).__init__() # padding=1 for same size(length) between input and output for causal convolution self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=2, stride=1, padding=1, bias=False) # Fixed for WaveNet but not sure def init_weights_for_test(self): for m in self.modules(): if isinstance(m, torch.nn.Conv1d): m.weight.data.fill_(1) def forward(self, x): output = self.conv(x) # remove last value for causal convolution return output[:, :, :-1] class ResidualBlock(torch.nn.Module): def __init__(self, res_channels, skip_channels, dilation): """ Residual block :param res_channels: number of residual channel for input, output :param skip_channels: number of skip channel for output :param dilation: """ super(ResidualBlock, self).__init__() self.dilated = DilatedCausalConv1d(res_channels, dilation=dilation) self.conv_res = torch.nn.Conv1d(res_channels, res_channels, 1) self.conv_skip = torch.nn.Conv1d(res_channels, skip_channels, 1) self.gate_tanh = torch.nn.Tanh() self.gate_sigmoid = torch.nn.Sigmoid() def forward(self, x, skip_size): """ :param x: :param skip_size: The last output size for loss and prediction :return: """ output = self.dilated(x) # PixelCNN gate gated_tanh = self.gate_tanh(output) gated_sigmoid = self.gate_sigmoid(output) gated = gated_tanh * gated_sigmoid # Residual network output = self.conv_res(gated) input_cut = x[:, :, -output.size(2):] output += input_cut # Skip connection skip = self.conv_skip(gated) skip = skip[:, :, -skip_size:] return output, skip class ResidualStack(torch.nn.Module): def __init__(self, layer_size, stack_size, res_channels, skip_channels): """ Stack residual blocks by layer and stack size :param layer_size: integer, 10 = layer[dilation=1, dilation=2, 4, 8, 16, 32, 64, 128, 256, 512] :param stack_size: integer, 5 = stack[layer1, layer2, layer3, layer4, layer5] :param res_channels: number of residual channel for input, output :param skip_channels: number of skip channel for output :return: """ super(ResidualStack, self).__init__() self.layer_size = layer_size self.stack_size = stack_size self.res_blocks = self.stack_res_block(res_channels, skip_channels) @staticmethod def _residual_block(res_channels, skip_channels, dilation): block = ResidualBlock(res_channels, skip_channels, dilation) if torch.cuda.device_count() > 1: block = torch.nn.DataParallel(block) if torch.cuda.is_available(): block.cuda() return block def build_dilations(self): dilations = [] # 5 = stack[layer1, layer2, layer3, layer4, layer5] for s in range(0, self.stack_size): # 10 = layer[dilation=1, dilation=2, 4, 8, 16, 32, 64, 128, 256, 512] for l in range(0, self.layer_size): dilations.append(2 ** l) return dilations def stack_res_block(self, res_channels, skip_channels): """ Prepare dilated convolution blocks by layer and stack size :return: """ res_blocks = [] dilations = self.build_dilations() for dilation in dilations: block = self._residual_block(res_channels, skip_channels, dilation) res_blocks.append(block) return res_blocks def forward(self, x, skip_size): """ :param x: :param skip_size: The last output size for loss and prediction :return: """ output = x skip_connections = [] for res_block in self.res_blocks: # output is the next input output, skip = res_block(output, skip_size) skip_connections.append(skip) return torch.stack(skip_connections) class DensNet(torch.nn.Module): def __init__(self, channels): """ The last network of WaveNet :param channels: number of channels for input and output :return: """ super(DensNet, self).__init__() self.conv1 = torch.nn.Conv1d(channels, channels, 1) self.conv2 = torch.nn.Conv1d(channels, channels, 1) self.relu = torch.nn.ReLU() self.softmax = torch.nn.Softmax(dim=1) def forward(self, x): output = self.relu(x) output = self.conv1(output) output = self.relu(output) output = self.conv2(output) output = self.softmax(output) return output class WaveNet(torch.nn.Module): def __init__(self, layer_size, stack_size, in_channels, res_channels): """ Stack residual blocks by layer and stack size :param layer_size: integer, 10 = layer[dilation=1, dilation=2, 4, 8, 16, 32, 64, 128, 256, 512] :param stack_size: integer, 5 = stack[layer1, layer2, layer3, layer4, layer5] :param in_channels: number of channels for input data. skip channel is same as input channel :param res_channels: number of residual channel for input, output :return: """ super(WaveNet, self).__init__() self.receptive_fields = self.calc_receptive_fields(layer_size, stack_size) self.causal = CausalConv1d(in_channels, res_channels) self.res_stack = ResidualStack(layer_size, stack_size, res_channels, in_channels) self.densnet = DensNet(in_channels) @staticmethod def calc_receptive_fields(layer_size, stack_size): layers = [2 ** i for i in range(0, layer_size)] * stack_size num_receptive_fields = np.sum(layers) return int(num_receptive_fields) def calc_output_size(self, x): output_size = int(x.size(2)) - self.receptive_fields self.check_input_size(x, output_size) return output_size def check_input_size(self, x, output_size): if output_size < 1: raise InputSizeError(int(x.size(2)), self.receptive_fields, output_size) def forward(self, x): """ The size of timestep(3rd dimention) has to be bigger than receptive fields :param x: Tensor[batch, timestep, channels] :return: Tensor[batch, timestep, channels] """ #output = x.transpose(1, 2) #output_size = self.calc_output_size(output) #output = self.causal(output) output_size = self.calc_output_size(x) output = self.causal(x) skip_connections = self.res_stack(output, output_size) output = torch.sum(skip_connections, dim=0) output = self.densnet(output) return output.transpose(1, 2).contiguous()
8,294
2,783
import requests import yaml from fair_test import FairTest, FairTestEvaluation class MetricTest(FairTest): metric_path = 'f2-structured-metadata' applies_to_principle = 'F2' title = 'Metadata is structured' description = """Tests whether a machine is able to find structured metadata. This could be (for example) RDFa, embedded json, json-ld, or content-negotiated structured metadata such as RDF Turtle. This assessment will try to extract metadata from the resource URI: - Search for structured metadata at the resource URI. - Use HTTP requests with content-negotiation (RDF, JSON-LD, JSON, YAML), - Extract metadata from the HTML landing page using extruct""" topics = ['metadata'] author = 'https://orcid.org/0000-0002-1501-1082' metric_version = '0.1.0' test_test={ 'https://doi.org/10.1594/PANGAEA.908011': 1, 'https://w3id.org/ejp-rd/fairdatapoints/wp13/dataset/c5414323-eab1-483f-a883-77951f246972': 1, 'https://doi.org/10.1186/2041-1480-5-14': 1, 'https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge': 1, 'https://doi.org/10.5281/zenodo.5541440': 1, 'https://doi.org/10.34894/DR3I2A': 1, 'https://doi.org/10.1045/november2015-vandesompel': 1, 'https://doi.org/10.1016/j.jbi.2008.03.004': 1, 'https://doi.org/10.1038/sdata.2016.18': 1, 'https://doi.org/10.1016/J.JBI.2019.103292': 1, 'https://w3id.org/AmIFAIR': 1, 'https://purl.uniprot.org/uniprot/P51587': 1, 'https://w3id.org/FAIR_Evaluator/evaluations/6259.json': 1, 'http://example.com': 0, # 'https://w3id.org/FAIR_Tests/tests/gen2_structured_metadata': 0, # FAIRsharing not consistent, most of the time give 1, but sometimes fails (their server timeout) # 'https://doi.org/10.25504/FAIRsharing.jptb1m': 1, # 'https://www.proteinatlas.org/ENSG00000084110-HAL': 1, # 'https://data.rivm.nl/meta/srv/eng/rdf.metadata.get?uuid=1c0fcd57-1102-4620-9cfa-441e93ea5604&approved=true': 1, } def evaluate(self, eval: FairTestEvaluation): eval.info('Checking if machine readable data (e.g. RDF, JSON-LD) can be retrieved using content-negotiation at ' + eval.subject) g = eval.retrieve_metadata(eval.subject) if not isinstance(g, (list, dict)) and len(g) > 1: eval.success(f'Successfully found and parsed RDF metadata. It contains {str(len(g))} triples') elif isinstance(g, (list, dict)) and len(g) > 1: eval.success(f'Successfully found and parsed structured metadata. It contains {str(len(g))} objects') else: # eval.failure(f"No RDF metadata found at the subject URL {eval.subject}") eval.warn('No RDF metadata found, checking for JSON') try: r_json = requests.get(eval.subject, headers={'accept': 'application/json'}) metadata = r_json.json() eval.data['metadata_json'] = metadata eval.success('Successfully found and parsed JSON metadata') except: eval.warn('No JSON metadata found, checking for YAML') try: r_yaml = requests.get(eval.subject, headers={'accept': 'text/yaml'}) metadata = yaml.load(str(r_yaml.text), Loader=yaml.FullLoader) eval.data['metadata_yaml'] = metadata eval.success('Successfully found and parsed YAML metadata') except Exception as e: eval.failure('No YAML metadata found') return eval.response()
3,643
1,341
import asyncio from virtool_workflow_runtime._redis import connect, VIRTOOL_JOBS_CHANNEL, job_id_queue from virtool_workflow_runtime.runtime import execute_from_redis JOB_IDs = [str(n) for n in range(3)] async def assert_correct_job_ids(): queue = job_id_queue() for id_ in JOB_IDs: _id = await queue.__anext__() assert _id == id_ async def publish_job_ids(): async with connect() as redis: for id_ in JOB_IDs: await redis.publish(VIRTOOL_JOBS_CHANNEL, id_) async def run_workflows_from_redis(test_workflow): exec_ = execute_from_redis(workflow=test_workflow) for _ in JOB_IDs: result = await exec_.__anext__() assert result["start"] and result["clean"] assert result["1"] and result["2"] async def test_job_id_queue(): await asyncio.gather(assert_correct_job_ids(), publish_job_ids()) async def test_execute_from_redis(test_workflow): await asyncio.gather(run_workflows_from_redis(test_workflow), publish_job_ids())
1,017
365
import abc from probnum.filtsmooth.statespace.transition import Transition __all__ = ["DiscreteModel"] class DiscreteModel(Transition): """ Transition models for discretely indexed processes. Transformations of the form .. math:: x_{t + \\Delta t} \\sim p(x_{t + \\Delta t} | x_t) . As such, compatible with Bayesian filtering and smoothing algorithms. See Also -------- :class:`ContinuousModel` Transition models for continuously indexed processes. :class:`BayesFiltSmooth` Bayesian filtering and smoothing algorithms. """ @abc.abstractmethod def transition_realization(self, real, start, stop, **kwargs): raise NotImplementedError @abc.abstractmethod def transition_rv(self, rv, start, stop, **kwargs): raise NotImplementedError @property @abc.abstractmethod def dimension(self): raise NotImplementedError
928
273
'''This plots the bubble results ''' from pickle import load from numpy import array, atleast_2d, hstack, where, zeros from matplotlib.pyplot import close, subplots from examples.temp_bubbles.common import DataObject from seaborn import heatmap no_i_vals = 3 no_j_vals = 3 peaks_1 = zeros((no_i_vals,no_j_vals)) peaks_2 = zeros((no_i_vals,no_j_vals)) peaks_3 = zeros((no_i_vals,no_j_vals)) peaks_4 = zeros((no_i_vals,no_j_vals)) jan_antiprev_1 = zeros((no_i_vals,no_j_vals)) jan_antiprev_2 = zeros((no_i_vals,no_j_vals)) jan_antiprev_3 = zeros((no_i_vals,no_j_vals)) jan_antiprev_4 = zeros((no_i_vals,no_j_vals)) for i in range(no_i_vals): filename_stem = 'outputs/temp_bubbles/sweep_results_' + str(i) with open(filename_stem + '.pkl', 'rb') as f: unmerged_population,baseline_H, baseline_time, baseline_S, baseline_E, baseline_I, baseline_R = load(f) ave_hh_size = unmerged_population.ave_hh_size fig, ax = subplots(1, 1, sharex=True) print(baseline_time.shape) print(baseline_E.shape) ax.plot(baseline_time, baseline_E, label='E') ax.plot(baseline_time, baseline_I, label='I') ax.plot(baseline_time, baseline_R, label='R') ax.legend(ncol=1, bbox_to_anchor=(1,0.50)) fig.savefig('sweep_baseline_epidemic' + str(i) +'.png', bbox_inches='tight', dpi=300) close() for j in range(no_j_vals): filename = filename_stem + str(j) with open(filename + '.pkl', 'rb') as f: merged_population2, merged_population3, merged_output = load(f) fig, ax = subplots(1, 1, sharex=True) lgd=['No bubbling','Policy 1', 'Policy 2', 'Policy 3', 'Policy 4',] merge_I_1 = (1/3) * merged_output.H_merge_1.T.dot( merged_population3.states[:, 2] + merged_population3.states[:, 6] + merged_population3.states[:, 10])/ave_hh_size postmerge_I_1 = merged_output.H_postmerge_1.T.dot(unmerged_population.states[:, 2])/ave_hh_size peaks_1[i,j] = max(hstack((merge_I_1, postmerge_I_1))) merge_R_1 = (1/3) * merged_output.H_merge_1.T.dot( merged_population3.states[:, 3] + merged_population3.states[:, 7] + merged_population3.states[:,11])/ave_hh_size postmerge_R_1 = merged_output.H_postmerge_1.T.dot(unmerged_population.states[:, 3])/ave_hh_size jan_antiprev_1[i,j] = postmerge_R_1[-1] merge_I_2 = (1/2) * merged_output.H_merge_2.T.dot( merged_population2.states[:, 2] + merged_population2.states[:, 6])/ave_hh_size postmerge_I_2 = merged_output.H_postmerge_2.T.dot(unmerged_population.states[:, 2])/ave_hh_size peaks_2[i,j] = max(hstack((merge_I_2, postmerge_I_2))) merge_R_2 = (1/2) * merged_output.H_merge_2.T.dot( merged_population2.states[:, 3] + merged_population2.states[:, 7])/ave_hh_size postmerge_R_2 = merged_output.H_postmerge_2.T.dot(unmerged_population.states[:, 3])/ave_hh_size jan_antiprev_2[i,j] = postmerge_R_2[-1] merge_I_3 = (1/2) * merged_output.H_merge_3.T.dot( merged_population2.states[:, 2] + merged_population2.states[:, 6])/ave_hh_size postmerge_I_3 = merged_output.H_postmerge_3.T.dot(unmerged_population.states[:, 2])/ave_hh_size peaks_3[i,j] = max(hstack((merge_I_3, postmerge_I_3))) merge_R_3 = (1/2) * merged_output.H_merge_3.T.dot( merged_population2.states[:, 3] + merged_population2.states[:, 7])/ave_hh_size postmerge_R_3 = merged_output.H_postmerge_3.T.dot(unmerged_population.states[:, 3])/ave_hh_size jan_antiprev_3[i,j] = postmerge_R_3[-1] merge_I_4 = (1/2) * merged_output.H_merge_4.T.dot( merged_population2.states[:, 2] + merged_population2.states[:, 6])/ave_hh_size postmerge_I_4 = merged_output.H_postmerge_4.T.dot(unmerged_population.states[:, 2])/ave_hh_size peaks_4[i,j] = max(hstack((merge_I_4, postmerge_I_4))) merge_R_4 = (1/2) * merged_output.H_merge_4.T.dot( merged_population2.states[:, 3] + merged_population2.states[:, 7])/ave_hh_size postmerge_R_4 = merged_output.H_postmerge_4.T.dot(unmerged_population.states[:, 3])/ave_hh_size jan_antiprev_4[i,j] = postmerge_R_4[-1] ax.plot(baseline_time, 100*baseline_R, label=lgd[0]) ax.plot(hstack((merged_output.t_merge_1,merged_output.t_postmerge_1)),100*hstack((merge_R_1,postmerge_R_1)), label=lgd[1]) ax.plot(hstack((merged_output.t_merge_2,merged_output.t_postmerge_2)),100*hstack((merge_R_2,postmerge_R_2)), label=lgd[2]) ax.plot(hstack((merged_output.t_merge_3,merged_output.t_postmerge_3)),100*hstack((merge_R_3,postmerge_R_3)), label=lgd[3]) ax.plot(hstack((merged_output.t_merge_4,merged_output.t_postmerge_4)),100*hstack((merge_R_4,postmerge_R_4)), label=lgd[4]) ax.set_xlabel('Time in days') ax.set_ylabel('Percentage recovered') ax.set_xlim([340,395]) # ax.set_ylim([5.5,9.0]) ax.legend(ncol=1, bbox_to_anchor=(1,0.50)) fig.savefig('R_by_strategy_sweep' + str(i) + str(j) + '.png', bbox_inches='tight', dpi=300) close() fig, ax = subplots(1, 1, sharex=True) ax.plot(baseline_time, 100*baseline_I, label=lgd[0]) ax.plot(hstack((merged_output.t_merge_1,merged_output.t_postmerge_1)),100*hstack((merge_I_1,postmerge_I_1)), label=lgd[1]) ax.plot(hstack((merged_output.t_merge_2,merged_output.t_postmerge_2)),100*hstack((merge_I_2,postmerge_I_2)), label=lgd[2]) ax.plot(hstack((merged_output.t_merge_3,merged_output.t_postmerge_3)),100*hstack((merge_I_3,postmerge_I_3)), label=lgd[3]) ax.plot(hstack((merged_output.t_merge_4,merged_output.t_postmerge_4)),100*hstack((merge_I_4,postmerge_I_4)), label=lgd[4]) ax.set_xlabel('Time in days') ax.set_ylabel('Percentage infectious') ax.set_xlim([340,395]) # ax.set_ylim([0,1]) ax.legend(ncol=1, bbox_to_anchor=(1,0.50)) fig.savefig('I_by_strategy_sweep' + str(i) + str(j) + '.png', bbox_inches='tight', dpi=300) close() pol_label = [] for pol in range(4): pol_label.append('Policy'+str(pol+1)) peaks_min = array([peaks_1.min(),peaks_2.min(),peaks_3.min(),peaks_4.min()]).min() peaks_max = array([peaks_1.max(),peaks_2.max(),peaks_3.max(),peaks_4.max()]).max() fig, ax = subplots(1,1,sharex=True) heatmap(peaks_1,square=True, vmin=peaks_min, vmax=peaks_max) ax.set_title(pol_label[0]) ax.set_ylabel('Single household density exponent') ax.set_xlabel('Bubbled density exponent') fig.savefig('peaks_1.png',bbox_inches='tight', dpi=300) close() fig, ax = subplots(1,1,sharex=True) heatmap(peaks_2,square=True, vmin=peaks_min, vmax=peaks_max) ax.set_title(pol_label[1]) ax.set_ylabel('Single household density exponent') ax.set_xlabel('Bubbled density exponent') fig.savefig('peaks_2.png',bbox_inches='tight', dpi=300) close() fig, ax = subplots(1,1,sharex=True) heatmap(peaks_3,square=True, vmin=peaks_min, vmax=peaks_max) ax.set_title(pol_label[2]) ax.set_ylabel('Single household density exponent') ax.set_xlabel('Bubbled density exponent') fig.savefig('peaks_3.png',bbox_inches='tight', dpi=300) close() fig, ax = subplots(1,1,sharex=True) heatmap(peaks_4,square=True, vmin=peaks_min, vmax=peaks_max) ax.set_title(pol_label[3]) ax.set_ylabel('Single household density exponent') ax.set_xlabel('Bubbled density exponent') fig.savefig('peaks_4.png',bbox_inches='tight', dpi=300) close() antiprev_min = array([jan_antiprev_1.min(),jan_antiprev_2.min(),jan_antiprev_3.min(),jan_antiprev_4.min()]).min() antiprev_max = array([jan_antiprev_1.max(),jan_antiprev_2.max(),jan_antiprev_3.max(),jan_antiprev_4.max()]).max() fig, ax = subplots(1,1,sharex=True) heatmap(jan_antiprev_1,square=True, vmin=antiprev_min, vmax=antiprev_max) ax.set_title(pol_label[0]) ax.set_ylabel('Single household density exponent') ax.set_xlabel('Bubbled density exponent') fig.savefig('jan_antiprev_1.png',bbox_inches='tight', dpi=300) close() fig, ax = subplots(1,1,sharex=True) heatmap(jan_antiprev_2,square=True, vmin=antiprev_min, vmax=antiprev_max) ax.set_title(pol_label[1]) ax.set_ylabel('Single household density exponent') ax.set_xlabel('Bubbled density exponent') fig.savefig('jan_antiprev_2.png',bbox_inches='tight', dpi=300) close() fig, ax = subplots(1,1,sharex=True) heatmap(jan_antiprev_3,square=True, vmin=antiprev_min, vmax=antiprev_max) ax.set_title(pol_label[2]) ax.set_ylabel('Single household density exponent') ax.set_xlabel('Bubbled density exponent') fig.savefig('jan_antiprev_3.png',bbox_inches='tight', dpi=300) close() fig, ax = subplots(1,1,sharex=True) heatmap(jan_antiprev_4,square=True, vmin=antiprev_min, vmax=antiprev_max) ax.set_title(pol_label[3]) ax.set_ylabel('Single household density exponent') ax.set_xlabel('Bubbled density exponent') fig.savefig('jan_antiprev_4.png',bbox_inches='tight', dpi=300) close()
8,934
3,841
#!/usr/bin/env python3 """ Sample script to extract and set level thumbnails. """ import argparse import io import os import sys from dustmaker import DFReader, DFWriter from dustmaker.cmd.common import ( run_utility, CliUtility, ) from dustmaker.variable import VariableBool class Thumbnail(CliUtility): """CLI utility for adjusting level thumbnails""" def setup_parser(self, parser: argparse.ArgumentParser) -> None: """Read CLI arguments""" parser.description = "extract or update a level thumbnail" parser.add_argument("level") parser.add_argument("image") parser.add_argument( "--force", action="store_const", const=True, default=False, required=False, help="allow overwrite of existing image", ) parser.add_argument( "--update", action="store_const", const=True, default=False, required=False, help="read in the image and update the level thumbnail", ) parser.add_argument( "--auto-convert", action="store_const", const=True, default=False, required=False, help="automatically convert to PNG format (implies --update)", ) parser.add_argument( "--auto-scale", action="store_const", const=True, default=False, required=False, help="automaticaly scale image to expected 382 x 182 size (implies --auto-convert)", ) def main(self, args) -> int: """thumbnail CLI entrypoint""" if args.auto_scale: args.auto_convert = True if args.auto_convert: args.update = True with DFReader(open(args.level, "rb")) as reader: level, region_offsets = reader.read_level_ex() region_data = b"" if args.update: region_data = reader.read_bytes(region_offsets[-1]) if not args.update: if not args.force and os.path.exists(args.image): print("path already exists, use --force to ignore") return 1 with open(args.image, "wb") as fout: fout.write(level.sshot) return 0 if args.auto_convert: try: # pylint: disable=import-outside-toplevel from PIL import Image # type: ignore except ImportError: print( "failed to import PIL, cannot convert image (try `pip install pillow`)" ) return 1 with Image.open(args.image) as im: if args.auto_scale: im = im.resize((382, 182)) with io.BytesIO() as io_out: im.save(io_out, format="PNG") level.sshot = io_out.getvalue() else: with open(args.image, "rb") as fimg: level.sshot = fimg.read() level.variables["icon_taken"] = VariableBool(True) with DFWriter(open(args.level, "wb")) as writer: writer.write_level_ex(level, region_offsets, region_data) return 0 if __name__ == "__main__": sys.exit(run_utility(Thumbnail))
3,363
928
from common.pages import Page import allure from common.pages import is_displayed, is_enabled, is_disabled from delayed_assert import expect, assert_expectations from allure import severity, severity_level @severity(severity_level.CRITICAL) @allure.title("Check presented elements on main page") def test_home_page(browser, config_host): home_page = Page(browser, config_host) home_page.load() expect(is_displayed(home_page.sidebar), "Sidebar not found") expect(is_displayed(home_page.sidebar_header), "Header on sidebar not found") expect(is_displayed(home_page.sidebar_history_button), "History button on sidebar not found") expect(is_displayed(home_page.sidebar_scripts_list), "Scripts list not found") expect(is_displayed(home_page.sidebar_search_button), "Search button not found") expect(is_displayed(home_page.sidebar_header_link), "Header link not found") expect(not is_displayed(home_page.main_app_content), "App content is displayed") assert_expectations() @severity(severity_level.CRITICAL) @allure.title("Check appeared app content on random script click") def test_app_content(browser, config_host): home_page = Page(browser, config_host) for script_link in home_page.all_script_links: script_link.click() expect(is_displayed(home_page.sidebar), "Sidebar not found") expect(is_displayed(home_page.sidebar_header), "Header on sidebar not found") expect(is_displayed(home_page.sidebar_history_button), "History button on sidebar not found") expect(is_displayed(home_page.sidebar_scripts_list), "Scripts list not found") expect(is_displayed(home_page.sidebar_search_button), "Search button not found") expect(is_displayed(home_page.sidebar_header_link), "Header link not found") expect(is_displayed(home_page.main_app_content), "App content not found") expect(is_displayed(home_page.script_header), "Script header not found") expect(is_displayed(home_page.actions_panel), "Action panel not found") expect(is_displayed(home_page.button_execute), "Execute button not found") expect(is_enabled(home_page.button_execute), "Execute button not enabled") expect(is_displayed(home_page.button_stop), "Stop button not found") expect(is_disabled(home_page.button_stop), "Stop button not disabled") expect(not is_displayed(home_page.log), "Log panel is displayed before script run") expect(not is_displayed(home_page.users_input), "Input field is displayed before script run") assert_expectations()
2,591
785
from werkzeug.security import check_password_hash, generate_password_hash from Database import DBController class UserManager: def __init__(self, username): self.username = username pass def createUser(self, password): db = DBController() cur = db.cursor() args = (self.username, generate_password_hash(password)) sql = ("INSERT INTO User (username, password) VALUES (%s, %s)") cur.execute(sql, args) return db.commit(True) def exists(self): db = DBController() cur = db.cursor() cur.execute("SELECT * FROM User WHERE username=%s", (self.username)); result = cur.fetchone() return result != None
714
191
import xarray as xray import numpy as np import pandas as pd from .ploteos import correct_mon from .ploteos_e import correct_e class cpp_e(object): def __init__(self,datos): self.data=datos def day_scaling(self,acu=False,f_cal=None,f_apl=None): #APLICAR SCALING DIARIO--------------------------- print('realizando Scaling diario') cnj_do=pd.DataFrame() cnj_dm=pd.DataFrame() cnj_da=pd.DataFrame() cnj_cor=pd.DataFrame() for name in self.data['coords']['nombre']: print(name) coords=self.data['coords'] data=self.data['do'][name] filtro_mes=data.resample('MS').agg(pd.Series.sum,skipna=False) data=data.where(~filtro_mes.reindex(data.index,method='ffill').isna()) lat=coords.loc[coords['nombre']==name]['lat'].values[0] lon=coords.loc[coords['nombre']==name]['lon'].values[0] do=data.to_xarray() do=do.rename({'index':'time'}) dm=self.data['dm'].interp(lat=lat,lon=lon) dm=dm.drop(['lat','lon']) da=self.data['da'].interp(lat=lat,lon=lon) da=da.drop(['lat','lon']) if f_cal==None or f_apl==None: pass else: do=do.loc[f_cal[0]:f_cal[1]] dm=dm.loc[f_cal[0]:f_cal[1]] da=da.loc[f_apl[0]:f_apl[1]] do['time']=dm['time'] dm=dm.where(do>-888) if acu==True: divisor=dm.resample(time='MS').sum('time',skipna=False).groupby('time.month').mean('time') divisor=divisor.where(~(divisor<0.01),0.01) fc=(do.resample(time='MS').sum('time',skipna=False).groupby('time.month').mean('time')/ divisor) cor=(da.groupby('time.month')*fc) else: fc=(do.resample(time='MS').mean('time',skipna=False).groupby('time.month').mean('time')- dm.resample(time='MS').mean('time',skipna=False).groupby('time.month').mean('time')) cor=(da.groupby('time.month')+fc) cor=cor.drop(['month']) cnj_do.loc[:,name]=do.to_dataframe(name=name).iloc[:,0] cnj_dm.loc[:,name]=dm.to_dataframe(name=name).iloc[:,0] cnj_da.loc[:,name]=da.to_dataframe(name=name).iloc[:,0] cnj_cor.loc[:,name]=cor.to_dataframe(name=name).iloc[:,0] print('Terminado Scaling diario') return correct_e(cnj_cor,cnj_da,cnj_dm,cnj_do,acu=acu) def day_eqm(self,acu=False,f_cal=None,f_apl=None): self.acumulado=acu #APLICAR EQM DIARIO.------------------------------ print('realizando EQM diario') cnj_do=pd.DataFrame() cnj_dm=pd.DataFrame() cnj_da=pd.DataFrame() cnj_cor=pd.DataFrame() for name in self.data['coords']['nombre']: print(name) coords=self.data['coords'] data=self.data['do'][name] filtro_mes=data.resample('MS').agg(pd.Series.sum,skipna=False) data=data.where(~filtro_mes.reindex(data.index,method='ffill').isna()) lat=coords.loc[coords['nombre']==name]['lat'].values[0] lon=coords.loc[coords['nombre']==name]['lon'].values[0] do=data.to_xarray() do=do.rename({'index':'time'}) dm=self.data['dm'].interp(lat=lat,lon=lon) dm=dm.drop(['lat','lon']) da=self.data['da'].interp(lat=lat,lon=lon) da=da.drop(['lat','lon']) if f_cal==None or f_apl==None: pass else: do=do.loc[f_cal[0]:f_cal[1]] dm=dm.loc[f_cal[0]:f_cal[1]] da=da.loc[f_apl[0]:f_apl[1]] do['time']=dm['time'] dm=dm.where(do>-888) quantiles=np.arange(0.01,1,0.01) meses=[1,2,3,4,5,6,7,8,9,10,11,12] for mon in meses: do2=do.loc[do['time.month']==mon] dm2=dm.loc[dm['time.month']==mon] da2=da.loc[da['time.month']==mon] if acu==True: do_f=do2.where(do2>=0.001) dm_f=dm2.where(dm2>=0.001) da_f=da2.where(da2>=0.001) else: do_f=do2 dm_f=dm2 da_f=da2 datos_his=dm_f datos_pro=da_f datos_obs_q=do_f.quantile(quantiles,dim='time') datos_his_q=dm_f.quantile(quantiles,dim='time') datos_pro_q=da_f.quantile(quantiles,dim='time') for quan in quantiles: if quan==0.01: datos_his_cor=datos_his.where(datos_his>datos_his_q.sel(quantile=0.02,method='nearest'), datos_obs_q.sel(quantile=0.01,method='nearest')) elif quan==0.99: datos_his_cor=datos_his_cor.where(~(datos_his>=datos_his_q.sel(quantile=0.99,method='nearest')), datos_obs_q.sel(quantile=0.99,method='nearest')) else: datos_his_cor=datos_his_cor.where(~((datos_his>=datos_his_q.sel(quantile=quan,method='nearest'))& (datos_his<datos_his_q.sel(quantile=quan+0.01,method='nearest'))), datos_obs_q.sel(quantile=quan,method='nearest')) deltas=datos_his_cor.quantile(quantiles,dim='time')-datos_his.quantile(quantiles,dim='time') #AÑADIENDO DELTAS DE QUANTILES A LA INFORMACION PROYECTADA. for quan in quantiles: if quan==0.01: datos_pro_cor=datos_pro.where(datos_pro>datos_pro_q.sel(quantile=0.02,method='nearest'), datos_pro+deltas.sel(quantile=0.01,method='nearest')) elif quan==0.99: datos_pro_cor=datos_pro_cor.where(~(datos_pro>=datos_pro_q.sel(quantile=0.99,method='nearest')), datos_pro+deltas.sel(quantile=0.99,method='nearest')) else: datos_pro_cor=datos_pro_cor.where(~((datos_pro>=datos_pro_q.sel(quantile=quan,method='nearest'))& (datos_pro<datos_pro_q.sel(quantile=quan+0.01,method='nearest'))), datos_pro+deltas.sel(quantile=quan,method='nearest')) if mon==1: datos_his_cor2=datos_his_cor datos_pro_cor2=datos_pro_cor else: datos_his_cor2=xray.concat([datos_his_cor2,datos_his_cor],dim='time') datos_pro_cor2=xray.concat([datos_pro_cor2,datos_pro_cor],dim='time') datos_his_cor2=datos_pro_cor2.sortby('time',ascending=True) datos_pro_cor2=datos_pro_cor2.sortby('time',ascending=True) if acu==True: #cor=da.where(~(da<0.1),0) #cor=cor.where(~(cor>=0.1),datos_pro_cor2) cor=datos_pro_cor2 cor=cor.where(cor.notnull(),da) cor=cor.where(~(cor<0),0) else: cor=datos_pro_cor2 cnj_do.loc[:,name]=do.to_dataframe(name=name).iloc[:,0] cnj_dm.loc[:,name]=dm.to_dataframe(name=name).iloc[:,0] cnj_da.loc[:,name]=da.to_dataframe(name=name).iloc[:,0] cnj_cor.loc[:,name]=cor.to_dataframe(name=name).iloc[:,0] #NOTA: LAS ADVERTENCIAS SALEN DEBIDO A QUE NO HAY DATOS EN OCEANO (POR PARTE DEL OBSERVADO) print('realizando EQM diario') return correct_e(cnj_cor,cnj_da,cnj_dm,cnj_do,acu=acu)
7,977
2,826
import time from kts.ui.components import HTMLRepr, Column, Field, Title, ThumbnailField, Raw from kts.util.formatting import format_value def format_experiment_date(date): delta = time.time() - date if delta < 60 * 60 * 24: return format_value(delta, time=True) + ' ago' else: return format_value(date, time=True) class Leaderboard(HTMLRepr): """Needs refactoring, very sketchy""" def __init__(self, experiments): self.experiments = experiments self.col_widths = [1, 6, 5, 12, 6, 8, 8] self.col_names = ['#', 'id', 'score', 'model', '# features', "date", "took"] self.data = [ ( i, e.id, format_value(e.score), e.model_class, e.n_features, format_experiment_date(e.date), format_value(e.took, time=True) ) for i, e in enumerate(experiments) ] def head_style(self, i): return dict(bg=False, accent=False, bold=False, style=f"padding: 0px 5px; margin: 0px; width: {i}em; border: 0px;") def cell_style(self, i): return dict(bg=False, style=f"padding: 0px 5px; margin: 0px; width: {i}em; border: 0px;") def concat(self, row): return ' '.join(cell.html if not isinstance(cell, str) else cell for cell in row) @property def html(self): head_cells = [Field(self.col_names[0], **self.head_style(self.col_widths[0]))] for i in range(1, len(self.col_widths)): head_cells.append(Field(self.col_names[i], **self.head_style(self.col_widths[i]))) rows = [[Field(self.data[i][j], **self.cell_style(self.col_widths[j])) for j in range(len(self.data[0])) ] for i in range(len(self.data))] rows = [Raw(e.html_collapsible(ThumbnailField(self.concat(rows[i]), css_id=-1, first=False), border=True)) for i, e in enumerate(self.experiments)] res = Column([Title('leaderboard'), Field(self.concat(head_cells), bg=False, bold=False, style="padding-bottom: 0px; margin: 0px 2px 0px 2px;")] + rows) return res.html
2,170
755
from mmcv.utils import Registry, build_from_cfg TRANSFORMER = Registry('Transformer') def build_transformer(cfg, default_args=None): """Builder for Transformer.""" return build_from_cfg(cfg, TRANSFORMER, default_args)
229
76
# import python libraries from airflow import DAG from datetime import datetime, timedelta from airflow.operators.bash_operator import BashOperator from airflow.operators.python_operator import PythonOperator from datacleaner import data_cleaner from airflow.operators.mysql_operator import MySqlOperator from airflow.operators.email_operator import EmailOperator yesterday_date = datetime.strftime((datetime.now() - timedelta(1) ),'%Y-%m-%d') # default argument directory default_args = { "owner" : "Nilesh Varshney", "start_date" : datetime(2021,3,21), "retries" : 1, "retries_delay" : timedelta(seconds=10) } dag = DAG('etl_store_dag', default_args=default_args, schedule_interval='@daily', template_searchpath = ['/usr/local/airflow/sql_files'], catchup=False) #========================================# # Task section #========================================# #Task 1 : Check the source file exist check_source_file = BashOperator( task_id = 'check_source_file', bash_command = 'shasum ~/store_files_airflow/raw_store_transactions.csv', retries=2, retry_delay=timedelta(seconds=15), dag = dag ) #Task 2 : clean the input datafile data_cleaning = PythonOperator( task_id = 'clean_raw_csv', python_callable = data_cleaner, dag = dag ) #Task 3 : create mysql table create_table = MySqlOperator( task_id = 'create_mysql_table', mysql_conn_id="mysql_conn", sql = "create_table.sql", dag= dag) #Task 4 : Populate mysql table populate_table = MySqlOperator( task_id = 'populate_table', mysql_conn_id="mysql_conn", sql = "load_data.sql", dag= dag) # task 5: Generate Aggreegate data output_report_generation = MySqlOperator( task_id = 'output_report_generation', mysql_conn_id="mysql_conn", sql = "daily_store_profit.sql", dag= dag) # Task 6: To Raname the existing file if it exists rename_existing_report_01 = BashOperator( task_id = 'rename_existing_report_01', bash_command = 'cat ~/store_files_airflow/location_wise_daily_profit.csv && mv ~/store_files_airflow/location_wise_daily_profit.csv ~/store_files_airflow/location_wise_daily_profit_%s.csv' % yesterday_date, dag = dag ) # Task 7: To Raname the existing file if it exists rename_existing_report_02 = BashOperator( task_id = 'rename_existing_report_02', bash_command = 'cat ~/store_files_airflow/store_wise_daily_profit.csv && mv ~/store_files_airflow/store_wise_daily_profit.csv ~/store_files_airflow/store_wise_daily_profit_%s.csv' % yesterday_date, dag = dag ) check_source_file >> data_cleaning >> create_table >> populate_table >> output_report_generation >> [rename_existing_report_01,rename_existing_report_02]
2,726
948
""" Decoding tools and algorithms. """ from .searchlight import SearchLight from .space_net import SpaceNetClassifier, SpaceNetRegressor from .decoder import Decoder, DecoderRegressor __all__ = ['SearchLight', 'SpaceNetClassifier', 'SpaceNetRegressor', 'Decoder', 'DecoderRegressor']
299
91
from acq4.devices.PressureControl import PressureControl class MockPressureControl(PressureControl): def _setPressure(self, p): self.pressure = p def getPressure(self): return getattr(self, "pressure", 10) def _setSource(self, source): self.source = source def getSource(self): return getattr(self, "source", self.sources[0])
379
122
SIZEOF_BYTE = 8 SIZEOF_INT_16 = 16 // SIZEOF_BYTE SIZEOF_INT_32 = 32 // SIZEOF_BYTE
85
50
#!/usr/bin/python3 """ Sprinkles - Pibow This program lights up and turns off random LEDS using the colors of the Pibow Zero Candy case .................... Functions: - sprinkles: Lights up and turns off random LEDs .................... Author: Paul Ryan This program was written on a Raspberry Pi using the Geany IDE. """ ######################################################################## # Import modules # ######################################################################## import time import unicornhat from bfp_unicornhat import print_header from bfp_unicornhat import stop from bfp_unicornhat import get_random_color from bfp_unicornhat import light_up_random_led from bfp_unicornhat import random_x_coordinate from bfp_unicornhat import random_y_coordinate ######################################################################## # Functions # ######################################################################## def sprinkles(): """ Lights up and turns off random LEDs """ start_time = time.time() time.clock() seconds_elapsed = 0 while seconds_elapsed < 15: seconds_elapsed = time.time() - start_time # Turn on a random LED red, green, blue = get_random_color() light_up_random_led(red, green, blue) # Turn OFF a random LED unicornhat.set_pixel(random_x_coordinate(), random_y_coordinate(), 0, 0, 0) unicornhat.show() time.sleep(0.01) if __name__ == '__main__': try: # STEP01: Print header print_header() # STEP02: Print instructions in white text print("\033[1;37;40mPress Ctrl-C to stop the program.") # STEP03: sprinkles() # STEP04: Exit the program. stop() except KeyboardInterrupt: stop()
1,976
536
import requests import logging from utils.authentification import get_headers logger = logging.getLogger(__name__) def do(payload, config, plugin_config, inputs): username = config.get("username", "") password = config.get("password", "") if payload.get('parameterName') == "collection_id": # Request the connections LIST_COLLECTIONS = "https://platform.api.kayrros.com/v1/processing/collection/list" response = requests.get(LIST_COLLECTIONS, headers=get_headers(username, password)) # Build choices choices = [] if response.status_code == 200: coll = response.json() for item in coll: choices += [{"value": item["id"], "label": item["name"]}] else: logger.exception("Collection could not be retrieved") return {"choices": choices} if payload.get("parameterName") == "dataset_id": GET_DATASETS = "https://platform.api.kayrros.com/v1/processing/collection/datasets" PARAMS = {"collection_id": config["collection_id"]} response = requests.post(GET_DATASETS, data=PARAMS, headers=get_headers(username, password)) # Build choices choices = [] if response.status_code == 200: ds = response.json() for item in ds: choices += [{"value": item["id"], "label": item["name"]}] else: logger.exception("Dataset could not be retrieved") return {"choices": choices}
1,520
431
# titlescreen.py import upygame logoPixels = b'\ \x99\x99\x99\x98\x80\x00\x00\x99\x99\x88\x80\x00\x99\x99\x98\x88\x80\x09\x99\x98\x88\x80\x09\x99\x88\x88\x00\x99\x99\x88\x80\x09\x99\x88\x09\ \x01\x11\x11\x19\x88\x00\x00\x91\x11\x98\x88\x99\x11\x11\x19\x98\x88\x99\x11\x99\x88\x88\x91\x11\x98\x88\x89\x11\x19\x98\x88\x91\x18\x88\x99\ \x09\x18\x81\x11\x88\x80\x09\x11\x11\x19\x89\x11\x11\x11\x11\x19\x89\x91\x11\x19\x98\x89\x11\x11\x19\x88\x91\x11\x11\x99\x89\x11\x98\x89\x99\ \x00\x90\x09\x11\x98\x88\x91\x11\x99\x17\x89\x19\x99\x19\x99\x19\x89\x11\x99\x11\x98\x91\x11\x91\x11\x98\x91\x19\x91\x19\x89\x19\x88\x99\x19\ \x00\x00\x09\x91\x19\x89\x11\x98\x88\xc9\x79\x98\x89\x19\x88\x99\x89\x19\x88\x91\x98\x91\x19\x89\x11\x98\x91\x99\x89\x99\x89\x19\x89\x91\x19\ \x00\x00\x00\x91\x19\x89\x11\x98\x88\xf7\x4c\x74\x79\x19\x88\x88\x89\x19\x88\x91\x98\x91\x99\x89\x91\x98\x91\x98\x88\x80\x09\x19\x89\x11\x98\ \x00\x00\x00\x09\x19\x89\x11\x98\x94\x7f\x4f\x4c\x49\x19\x88\xc4\xc9\x19\x99\x91\x98\x91\x89\x49\x81\x98\x91\x98\x4c\x44\xc9\x19\x89\x19\x88\ \x00\x00\x00\x09\x19\x89\x11\x11\x19\x14\x17\x77\xc9\x19\x88\x47\x49\x11\x11\x19\x87\x91\x88\x98\x81\x98\x91\x98\x47\x44\x49\x19\x99\x18\x80\ \x00\x09\x88\x89\x19\x89\x11\x11\x1f\x1c\x44\xcc\x49\x19\x88\x44\x49\x11\x99\x98\x84\x91\x11\x11\x11\x98\x91\x98\x44\x44\x49\x11\x11\x18\x00\ \x00\x99\x88\x89\x19\x89\x11\x98\x99\x77\xc7\x47\x49\x19\x88\x74\x79\x19\x88\x8c\x47\x91\x94\xc4\x91\x98\x91\x98\xc4\x44\x79\x19\x98\x19\x80\ \x09\x11\x88\x89\x19\x89\x11\x98\x88\x8f\x98\x88\x09\x19\x88\x80\x09\x19\x88\x80\x00\x91\x98\x80\x91\x98\x91\x98\x88\x00\x09\x19\x89\x19\x88\ \x91\x19\x88\x89\x19\x89\x11\x98\x88\x79\x98\x88\x09\x19\x88\x80\x09\x19\x88\x80\x00\x91\x98\x80\x91\x98\x91\x98\x88\x00\x09\x19\x89\x11\x98\ \x91\x98\x88\x91\x19\x89\x11\x98\x8c\x91\x98\x88\x09\x19\x88\x80\x09\x19\x88\x80\x00\x91\x98\x80\x91\x98\x91\x98\x88\x89\x89\x19\x88\x91\x19\ \x91\x19\x99\x11\x98\x80\x91\x19\x99\x11\x98\x88\x09\x19\x88\x80\x09\x11\x98\x80\x00\x91\x98\x80\x91\x98\x91\x19\x99\x99\x89\x19\x88\x89\x19\ \x09\x11\x11\x19\x88\x00\x09\x11\x11\x19\x88\x00\x91\x11\x98\x80\x09\x11\x19\x88\x00\x91\x19\x89\x11\x98\x09\x11\x11\x19\x88\x91\x98\x80\x99\ \x00\x99\x99\x98\x80\x00\x00\x99\x99\x98\x80\x09\x99\x99\x99\x88\x89\x99\x99\x98\x80\x99\x99\x89\x99\x98\x00\x99\x99\x98\x88\x09\x99\x88\x09\ ' logo = upygame.surface.Surface(70, 16, logoPixels);
2,402
2,335
ax = data.plot.scatter(x=data.columns[0], y=data.columns[1]) ax.plot(X["Flipper Length (mm)"], y_pred, color="black", linewidth=4) _ = ax.set_title("Can I predict penguins' body mass")
185
75
import subprocess import re from typing import Sequence, Text from luh3417.luhfs import LocalLocation, Location, SshLocation from luh3417.luhssh import SshManager from luh3417.utils import LuhError def rsync_files(source: Location, target: Location, delete: bool = False): """ Use rsync to copy files from a location to another """ args = [ "rsync", "-rz", "--exclude=.git", "--exclude=.idea", "--exclude=*.swp", "--exclude=*.un~", ] if delete: args.append("--delete") args += [source.rsync_path(True), target.rsync_path(True)] cp = subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE) return cp.returncode, cp.stderr def sync_files(source: Location, target: Location, delete: bool = False): """ Use rsync to copy files from a location to another """ target.ensure_exists_as_dir() rc, stderr = rsync_files(source, target, delete) if rc: cmd_not_found = re.search("command not found", str(stderr)) if not cmd_not_found: raise LuhError(f"Error while copying files: {stderr}") copy_files_with_delete(source, target, delete) def _build_args(location: Location, args: Sequence[Text]) -> Sequence[Text]: """ Builds args to use either with SSH either straight """ if isinstance(location, LocalLocation): return args elif isinstance(location, SshLocation): return SshManager.instance(location.user, location.host, location.port).get_args(args) def activate_maintenance_mode(remote: Location): remote_args = _build_args(remote, ["wp", "maintenance-mode", "activate", "--path=", remote.path, "--quiet"]) remote_p = subprocess.Popen( remote_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) remote_p.wait() if remote_p.returncode: raise LuhError( f'Error while activate maintenance mode at "{remote}": {remote_p.stderr.read(1000)}' ) def deactivate_maintenance_mode(remote: Location): remote_args = _build_args(remote, ["wp", "maintenance-mode", "deactivate", "--path=", remote.path, "--quiet"]) remote_p = subprocess.Popen( remote_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) remote_p.wait() if remote_p.returncode: raise LuhError( f'Error while deactivate maintenance mode at "{remote}": {remote_p.stderr.read(1000)}' ) def copy_files(source: Location, target: Location, excludes, exclude_tag_alls): """ Copies files from the remote location to the local locations. Files are serialized and pipelined through tar, maybe locally, maybe through SSH depending on the locations. """ source_tar_command = ["tar", "-C", source.path] if excludes: for exclude in excludes: source_tar_command.append("--exclude") source_tar_command.append(exclude) if exclude_tag_alls: for exclude_tag_all in exclude_tag_alls: source_tar_command.append("--exclude-tag-all") source_tar_command.append(exclude_tag_all) source_tar_command.extend(["-c", "."]) source_args = _build_args(source, source_tar_command) target_args_1 = _build_args(target, ["mkdir", "-p", target.path]) target_args_2 = _build_args(target, ["tar", "-C", target.path, "-x"]) cp = subprocess.run(target_args_1, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE) if cp.returncode: raise LuhError(f'Error while creating target dir "{target}": {cp.stderr}') source_p = subprocess.Popen( source_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) target_p = subprocess.Popen( target_args_2, stdin=source_p.stdout, stderr=subprocess.PIPE, stdout=subprocess.DEVNULL, ) source_p.wait() target_p.wait() if source_p.returncode: raise LuhError( f'Error while reading files from "{source}": {source_p.stderr.read(1000)}' ) if target_p.returncode: raise LuhError(f'Error writing files to "{target}": {target_p.stderr.read(1000)}') def copy_files_with_delete(source: Location, target: Location, delete: bool = False): if delete: target.delete_dir_content() target.ensure_exists_as_dir() copy_files(source, target, None, None)
4,387
1,439
''' Given a string which consists of lowercase or uppercase letters, find the length of the longest palindromes that can be built with those letters. This is case sensitive, for example "Aa" is not considered a palindrome here. Note: Assume the length of given string will not exceed 1,010. Example: Input: "abccccdd" Output: 7 Explanation: One longest palindrome that can be built is "dccaccd", whose length is 7. ''' class Solution(object): def longestPalindrome(self, s): """ :type s: str :rtype: int """ fre = {} for c in s: if c in fre: fre[c] += 1 else: fre[c] = 1 res = 0 has_odd = False for c in fre: if fre[c] % 2 == 0: res += fre[c] else: has_odd = True res += fre[c] - 1 if has_odd: res += 1 return res
988
298
# Generated by Django 3.0 on 2020-05-22 16:58 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('content', '0024_auto_20200430_1225'), ] operations = [ migrations.AlterField( model_name='song', name='last_changed', field=models.DateTimeField(auto_now=True, null=True), ), migrations.AlterField( model_name='song', name='uploaded_date', field=models.DateTimeField(auto_now_add=True, null=True), ), ]
580
195
try: import mysql.connector as mysql except : import MySQLdb as mysql def config_db(): return mysql.connect(user='root', password='', host='localhost', database='coin')
263
62
from pycode.tinyflow import autodiff as ad import numpy as np from pycode.tinyflow import ndarray from pycode.tinyflow import TrainExecute from pycode.tinyflow import train def test_identity(): x2 = ad.Variable(name="x2") y = x2 grad_x2, = ad.gradients(y, [x2]) executor = ad.Executor([y, grad_x2]) x2_val = 2 * np.ones(3) y_val, grad_x2_val = executor.run(feed_dict={x2: x2_val}) assert isinstance(y, ad.Node) assert np.array_equal(y_val, x2_val) assert np.array_equal(grad_x2_val, np.ones_like(x2_val)) def test_add_by_const(): x2 = ad.Variable(name="x2") y = 5 + x2 grad_x2, = ad.gradients(y, [x2]) executor = ad.Executor([y, grad_x2]) x2_val = 2 * np.ones(3) y_val, grad_x2_val = executor.run(feed_dict={x2: x2_val}) assert isinstance(y, ad.Node) assert np.array_equal(y_val, x2_val + 5) assert np.array_equal(grad_x2_val, np.ones_like(x2_val)) def test_mul_by_const(): x2 = ad.Variable(name="x2") y = 5 * x2 grad_x2, = ad.gradients(y, [x2]) executor = ad.Executor([y, grad_x2]) x2_val = 2 * np.ones(3) y_val, grad_x2_val = executor.run(feed_dict={x2: x2_val}) assert isinstance(y, ad.Node) assert np.array_equal(y_val, x2_val * 5) assert np.array_equal(grad_x2_val, np.ones_like(x2_val) * 5) def test_add_two_vars(): x2 = ad.Variable(name="x2") x3 = ad.Variable(name="x3") y = x2 + x3 grad_x2, grad_x3 = ad.gradients(y, [x2, x3]) executor = ad.Executor([y, grad_x2, grad_x3]) x2_val = 2 * np.ones(3) x3_val = 3 * np.ones(3) y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict={x2: x2_val, x3: x3_val}) assert isinstance(y, ad.Node) assert np.array_equal(y_val, x2_val + x3_val) assert np.array_equal(grad_x2_val, np.ones_like(x2_val)) assert np.array_equal(grad_x3_val, np.ones_like(x3_val)) def test_mul_two_vars(): x2 = ad.Variable(name="x2") x3 = ad.Variable(name="x3") y = x2 * x3 grad_x2, grad_x3 = ad.gradients(y, [x2, x3]) executor = ad.Executor([y, grad_x2, grad_x3]) x2_val = 2 * np.ones(3) x3_val = 3 * np.ones(3) y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict={x2: x2_val, x3: x3_val}) assert isinstance(y, ad.Node) assert np.array_equal(y_val, x2_val * x3_val) assert np.array_equal(grad_x2_val, x3_val) assert np.array_equal(grad_x3_val, x2_val) def test_add_mul_mix_1(): x1 = ad.Variable(name="x1") x2 = ad.Variable(name="x2") x3 = ad.Variable(name="x3") y = x1 + x2 * x3 * x1 grad_x1, grad_x2, grad_x3 = ad.gradients(y, [x1, x2, x3]) executor = ad.Executor([y, grad_x1, grad_x2, grad_x3]) x1_val = 1 * np.ones(3) x2_val = 2 * np.ones(3) x3_val = 3 * np.ones(3) y_val, grad_x1_val, grad_x2_val, grad_x3_val = executor.run(feed_dict={x1: x1_val, x2: x2_val, x3: x3_val}) assert isinstance(y, ad.Node) assert np.array_equal(y_val, x1_val + x2_val * x3_val) assert np.array_equal(grad_x1_val, np.ones_like(x1_val) + x2_val * x3_val) assert np.array_equal(grad_x2_val, x3_val * x1_val) assert np.array_equal(grad_x3_val, x2_val * x1_val) def test_add_mul_mix_2(): x1 = ad.Variable(name="x1") x2 = ad.Variable(name="x2") x3 = ad.Variable(name="x3") x4 = ad.Variable(name="x4") y = x1 + x2 * x3 * x4 grad_x1, grad_x2, grad_x3, grad_x4 = ad.gradients(y, [x1, x2, x3, x4]) executor = ad.Executor([y, grad_x1, grad_x2, grad_x3, grad_x4]) x1_val = 1 * np.ones(3) x2_val = 2 * np.ones(3) x3_val = 3 * np.ones(3) x4_val = 4 * np.ones(3) y_val, grad_x1_val, grad_x2_val, grad_x3_val, grad_x4_val = executor.run( feed_dict={x1: x1_val, x2: x2_val, x3: x3_val, x4: x4_val} ) assert isinstance(y, ad.Node) assert np.array_equal(y_val, x1_val + x2_val * x3_val * x4_val) assert np.array_equal(grad_x1_val, np.ones_like(x1_val)) assert np.array_equal(grad_x2_val, x3_val * x4_val) assert np.array_equal(grad_x3_val, x2_val * x4_val) assert np.array_equal(grad_x4_val, x2_val * x3_val) def test_add_mul_mix_3(): x2 = ad.Variable(name="x2") x3 = ad.Variable(name="x3") z = x2 * x2 + x2 + x3 + 3 y = z * z + x3 grad_x2, grad_x3 = ad.gradients(y, [x2, x3]) executor = ad.Executor([y, grad_x2, grad_x3]) x2_val = 2 * np.ones(3) x3_val = 3 * np.ones(3) y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict={x2: x2_val, x3: x3_val}) z_val = x2_val * x2_val + x2_val + x3_val + 3 expected_yval = z_val * z_val + x3_val expected_grad_x2_val = 2 * (x2_val * x2_val + x2_val + x3_val + 3) * (2 * x2_val + 1) expected_grad_x3_val = 2 * (x2_val * x2_val + x2_val + x3_val + 3) + 1 assert isinstance(y, ad.Node) assert np.array_equal(y_val, expected_yval) assert np.array_equal(grad_x2_val, expected_grad_x2_val) assert np.array_equal(grad_x3_val, expected_grad_x3_val) def test_grad_of_grad(): x2 = ad.Variable(name="x2") x3 = ad.Variable(name="x3") y = x2 * x2 + x2 * x3 grad_x2, grad_x3 = ad.gradients(y, [x2, x3]) grad_x2_x2, grad_x2_x3 = ad.gradients(grad_x2, [x2, x3]) executor = ad.Executor([y, grad_x2, grad_x3, grad_x2_x2, grad_x2_x3]) x2_val = 2 * np.ones(3) x3_val = 3 * np.ones(3) y_val, grad_x2_val, grad_x3_val, grad_x2_x2_val, grad_x2_x3_val = executor.run( feed_dict={x2: x2_val, x3: x3_val} ) expected_yval = x2_val * x2_val + x2_val * x3_val expected_grad_x2_val = 2 * x2_val + x3_val expected_grad_x3_val = x2_val expected_grad_x2_x2_val = 2 * np.ones_like(x2_val) expected_grad_x2_x3_val = 1 * np.ones_like(x2_val) assert isinstance(y, ad.Node) assert np.array_equal(y_val, expected_yval) assert np.array_equal(grad_x2_val, expected_grad_x2_val) assert np.array_equal(grad_x3_val, expected_grad_x3_val) assert np.array_equal(grad_x2_x2_val, expected_grad_x2_x2_val) assert np.array_equal(grad_x2_x3_val, expected_grad_x2_x3_val) def test_matmul_two_vars(): x2 = ad.Variable(name="x2") x3 = ad.Variable(name="x3") y = ad.matmul_op(x2, x3) grad_x2, grad_x3 = ad.gradients(y, [x2, x3]) executor = ad.Executor([y, grad_x2, grad_x3]) x2_val = np.array([[1, 2], [3, 4], [5, 6]]) # 3x2 x3_val = np.array([[7, 8, 9], [10, 11, 12]]) # 2x3 y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict={x2: x2_val, x3: x3_val}) expected_yval = np.matmul(x2_val, x3_val) expected_grad_x2_val = np.matmul(np.ones_like(expected_yval), np.transpose(x3_val)) expected_grad_x3_val = np.matmul(np.transpose(x2_val), np.ones_like(expected_yval)) assert isinstance(y, ad.Node) assert np.array_equal(y_val, expected_yval) assert np.array_equal(grad_x2_val, expected_grad_x2_val) assert np.array_equal(grad_x3_val, expected_grad_x3_val) def test_exp(): x1 = ad.Variable("x1") x2 = ad.exp_op(x1) x3 = x2 + 1 x4 = x2 * x3 x1_grad, = ad.gradients(x4, [x1]) executor = ad.Executor([x4]) x1_val = 1 x4_val, x1_grad = executor.run(feed_dict={x1: x1_val}) print(x4_val) print(x1_grad) def test_exp_grad(): x = ad.Variable("x") y = ad.exp_op(x) x_grad, = ad.gradients(y, [x]) executor = ad.Executor([y, x_grad]) x_val = 1 y_val, x_grad_val = executor.run(feed_dict={x: x_val}) print(y_val) print(x_grad_val) def test_lr(): W = ad.Variable(name="W") b = ad.Variable(name="b") X = ad.Variable(name="X") y_ = ad.Variable(name="y_") ctx = ndarray.gpu(0) # ini x_val = np.linspace(0,1,100).reshape((100,1)) y_val = x_val + 0.5 W_val = np.array([[0.1]]) b_val = np.array([0.1]) x_val = ndarray.array(x_val, ctx) W_val = ndarray.array(W_val, ctx) b_val = ndarray.array(b_val, ctx) y_val = ndarray.array(y_val, ctx) z = ad.matmul_op(X, W) # z.shape = (100,1) # b.shape = (1,1) y = z + ad.broadcastto_op(b, z) # y = (100,1) y = ad.fullyactivation_forward_op(y,"NCHW","relu") loss = ad.matmul_op(y + (-1)*y_, y + (-1)*y_, trans_A=True) * (1/100) # loss = ad.softmaxcrossentropy_op(y, y_) grad_W, grad_b = ad.gradients(loss, [W, b]) executor = ad.Executor([loss, grad_W, grad_b],ctx) aph = 1e-6 for i in range(100): loss_val, grad_W_val ,grad_b_val = executor.run(feed_dict={X: x_val,b: b_val,W: W_val,y_:y_val}) grad_W_val = grad_W_val.asnumpy() W_val = W_val.asnumpy() W_val = W_val - aph * grad_W_val W_val = ndarray.array(W_val, ctx) grad_b_val = grad_b_val.asnumpy() b_val = b_val.asnumpy() b_val = b_val - aph * grad_b_val b_val = ndarray.array(b_val, ctx) print(W_val.asnumpy(), b_val.asnumpy()) # executor = ad.Executor([y]) # res = executor.run(feed_dict={X: x_val,b: b_val,W: W_val}) # print('y_true'+str(y_val)) # print('y_pred'+str(res)) def test_convolution_1d_forward_op(): inputs = ad.Variable("inputs") filters = ad.Variable("filters") y_ = ad.Variable(name="y_") #ini ctx = ndarray.gpu(0) x_val = np.linspace(0,100,100).reshape((5,1,20)) filters_val = np.ones((1,1,20))*0.001 y_val = np.zeros((5,1)) x_val = ndarray.array(x_val,ctx) filters_val = ndarray.array(filters_val,ctx) y_val = ndarray.array(y_val, ctx) outputs = ad.convolution_1d_forward_op(inputs, filters, "NCHW", "VALID", 1) outputs_pool = ad.pooling_1d_forward_op(outputs,"NCHW","max",0,1,1) outputs_relu = ad.activation_forward_op(outputs_pool,"NCHW","relu") outputs_f = ad.flatten_op(outputs_relu) outputs_fu = ad.fullyactivation_forward_op(outputs_f, "NCHW", "relu") loss = ad.matmul_op(outputs_fu, outputs_fu, trans_A=True) *(1/5) grad_inputs,grad_f = ad.gradients(loss, [inputs,filters]) executor = ad.Executor([loss, grad_f],ctx=ctx) aph = 1.0e-6 for i in range(10): loss_val, filters_grad_val = executor.run(feed_dict={inputs: x_val,filters:filters_val}) filters_val = filters_val.asnumpy() filters_grad_val = filters_grad_val.asnumpy() filters_val = filters_val - aph * filters_grad_val filters_val = ndarray.array(filters_val, ctx) print("loss_val:",loss_val.asnumpy()) print("filters_val:",filters_val.asnumpy()) def test_convolution_2d_forward_op(): inputs = ad.Variable("inputs") filters = ad.Variable("filters") y_ = ad.Variable(name="y_") #ini ctx = ndarray.gpu(0) x_val = np.linspace(0,100,80).reshape((5,1,4,4)) filters_val = np.ones((1,1,3,3))*0.001 y_val = np.zeros((5,1)) x_val = ndarray.array(x_val,ctx) filters_val = ndarray.array(filters_val,ctx) y_val = ndarray.array(y_val, ctx) outputs = ad.convolution_2d_forward_op(inputs, filters, "NCHW", "VALID",1,1) outputs_pool = ad.pooling_2d_forward_op(outputs,"NCHW","max",0,0,1,1,2,2) outputs_relu = ad.activation_forward_op(outputs_pool,"NCHW","relu") outputs_f = ad.flatten_op(outputs_relu) loss = ad.matmul_op(outputs_f, outputs_f, trans_A=True) *(1/5) grad_inputs,grad_f = ad.gradients(loss, [inputs,filters]) executor = ad.Executor([loss, grad_f],ctx=ctx) aph = 1.0e-6 for i in range(20): loss_val, filters_grad_val = executor.run(feed_dict={inputs: x_val,filters:filters_val}) filters_val = filters_val.asnumpy() filters_grad_val = filters_grad_val.asnumpy() filters_val = filters_val - aph * filters_grad_val filters_val = ndarray.array(filters_val, ctx) print("loss_val:",loss_val.asnumpy()) print("filters_val:",filters_val.asnumpy()) def test_convolution_3d_forward_op(): inputs = ad.Variable("inputs") filters = ad.Variable("filters") y_ = ad.Variable(name="y_") #ini ctx = ndarray.gpu(0) x_val = np.linspace(0,100,135).reshape((5,1,3,3,3)) filters_val = np.ones((1,1,2,2,2))*0.001 y_val = np.zeros((5,1)) x_val = ndarray.array(x_val,ctx) filters_val = ndarray.array(filters_val,ctx) y_val = ndarray.array(y_val, ctx) outputs = ad.convolution_3d_forward_op(inputs, filters, "NCHW", "VALID",1,1,1) outputs_pool = ad.pooling_3d_forward_op(outputs,"NCHW","max",0,0,0,1,1,1,2,2,2) outputs_relu = ad.activation_forward_op(outputs_pool,"NCHW","relu") outputs_dro = ad.dropout_forward_op(outputs_relu,"NCHW",0.5,0) outputs_f = ad.flatten_op(outputs_dro) loss = ad.matmul_op(outputs_f, outputs_f, trans_A=True) *(1/5) grad_inputs,grad_f = ad.gradients(loss, [inputs,filters]) executor = ad.Executor([loss, grad_f],ctx=ctx) aph = 1.0e-6 for i in range(20): loss_val, filters_grad_val = executor.run(feed_dict={inputs: x_val,filters:filters_val}) filters_val = filters_val.asnumpy() filters_grad_val = filters_grad_val.asnumpy() filters_val = filters_val - aph * filters_grad_val filters_val = ndarray.array(filters_val, ctx) print("loss_val:",loss_val.asnumpy()) print("filters_val:",filters_val.asnumpy()) def test_sigmoid_conv_1d(): inputs = ad.Variable("inputs") filters = ad.Variable("filters") y_ = ad.Variable(name="y_") # ini ctx = ndarray.gpu(0) x_val = np.linspace(0, 100, 80).reshape((5, 1, 4, 4)) filters_val = np.ones((1, 1, 3, 3)) * 0.001 y_val = np.zeros((5, 1)) x_val = ndarray.array(x_val, ctx) filters_val = ndarray.array(filters_val, ctx) y_val = ndarray.array(y_val, ctx) outputs = ad.convolution_2d_forward_op(inputs, filters, "NCHW", "VALID", 1, 1) # outputs_pool = ad.pooling_2d_forward_op(outputs, "NCHW", "max", 0, 0, 1, 1, 2, 2) outputs_relu = ad.activation_forward_op(outputs, "NCHW", "relu") executor = ad.Executor([outputs_relu],ctx=ctx) loss_val= executor.run(feed_dict={inputs: x_val,filters:filters_val}) print("loss_val:",loss_val[0].asnumpy()) def test_full_forward_op(): inputs = ad.Variable("inputs") filters = ad.Variable("filters") y_ = ad.Variable(name="y_") #ini ctx = ndarray.gpu(0) x_val = np.linspace(0,100,100).reshape((5,1,20)) filters_val = np.ones((1,1,20))*0.001 y_val = np.zeros((5,1)) x_val = ndarray.array(x_val,ctx) filters_val = ndarray.array(filters_val,ctx) y_val = ndarray.array(y_val, ctx) outputs = ad.convolution_1d_forward_op(inputs, filters, "NCHW", "VALID", 1) outputs_pool = ad.pooling_1d_forward_op(outputs,"NCHW","max",0,1,1) outputs_relu = ad.activation_forward_op(outputs_pool,"NCHW","relu") outputs_f = ad.flatten_op(outputs_relu) output=ad.fullyactivation_forward_op(outputs_f,"NCHW","relu") loss = ad.matmul_op(output, output, trans_A=True) * (1 / 5) grad_f = ad.gradients(loss,[filters])#gra返回一个list executor = ad.Executor([grad_f[0]],ctx=ctx) g_val = executor.run(feed_dict={inputs: x_val,filters:filters_val})#返回一个list print("g_val:", g_val[0].asnumpy()) def test_exp_log_reverse_pow(): inputs = ad.Variable("inputs") filters = ad.Variable("filters") y_ = ad.Variable(name="y_") # ini ctx = ndarray.gpu(0) x_val = np.linspace(0, 100, 80).reshape((5, 1, 4, 4)) filters_val = np.ones((1, 1, 3, 3)) * 0.001 y_val = np.zeros((5, 1)) x_val = ndarray.array(x_val, ctx) filters_val = ndarray.array(filters_val, ctx) y_val = ndarray.array(y_val, ctx) #outputs = ad.exp_op(inputs) #outputs = ad.log_op(inputs) #outputs = ad.reverse_op(inputs) outputs = ad.pow_op(inputs,2) grad_out = ad.gradients(outputs, [inputs]) executor = ad.Executor([outputs,grad_out[0]], ctx=ctx) result= executor.run(feed_dict={inputs: filters_val}) print(result[0].asnumpy()) print(result[1].asnumpy()) def test_reduce_sum(): inputs = ad.Variable("inputs") ctx = ndarray.gpu(0) shape = (3,2,3) x = np.random.uniform(0, 20, shape).astype(np.float32) arr_x = ndarray.array(x, ctx=ctx) outputs = ad.reduce_sum_op(inputs,1) f_out = ad.pow_op(outputs,2) grad_out = ad.gradients(f_out, [inputs]) executor = ad.Executor([outputs,f_out , grad_out[0]], ctx=ctx) result = executor.run(feed_dict={inputs: arr_x}) print(arr_x.asnumpy()) print(result[0].asnumpy()) print(result[1].asnumpy()) print(result[2].asnumpy()) def test_reduce_mean(): inputs = ad.Variable("inputs") ctx = ndarray.gpu(0) shape = (2,2,3) x = np.random.uniform(0, 20, shape).astype(np.float32) arr_x = ndarray.array(x, ctx=ctx) outputs = ad.reduce_mean_op(inputs,1) f_out = ad.pow_op(outputs,2) grad_out = ad.gradients(f_out, [inputs]) executor = ad.Executor([outputs,f_out , grad_out[0]], ctx=ctx) result = executor.run(feed_dict={inputs: arr_x}) print(arr_x.asnumpy()) print(result[0].asnumpy()) print(result[1].asnumpy()) print(result[2].asnumpy()) def test_l1_l2_cross_loss(): inputs = ad.Variable("inputs") filters = ad.Variable("filters") y_ = ad.Variable(name="y_") # ini ctx = ndarray.gpu(0) x_val = np.ones((5,2))*0.5 filters_val = np.ones((2, 2, 10)) * 0.001 y_val = np.ones((5,2)) x_val = ndarray.array(x_val, ctx) filters_val = ndarray.array(filters_val, ctx) y_val = ndarray.array(y_val, ctx) # loss = ad.crossEntropy_op(inputs, y_) loss = ad.l1loss_op(inputs, y_) grad_f = ad.gradients(loss, [inputs,y_]) # gra返回一个list executor = ad.Executor([loss,grad_f[0],grad_f[1]], ctx=ctx) g_val = executor.run(feed_dict={inputs: x_val, y_: y_val}) # 返回一个list print("g_val:", g_val[0].asnumpy()) print("g_val:", g_val[1].asnumpy()) print("g_val:", g_val[2].asnumpy()) def test_l1_l2_regular(): inputs2 = ad.Variable("X2") y_ = ad.Placeholder("y_") # ini x_val = np.ones((10,2))*4 y_val = np.ones((5, 2)) * 0.1 a = ad.l1regular_op(inputs2) aph = 0.001 t = train.Adam_minimize(a, aph) t.init_Variable({inputs2: x_val}) valid_y_predicted = t.run_get_nodelist_once({ y_: y_val},[a])[a].asnumpy() print(valid_y_predicted) def test_concat(): inputs1 = ad.Placeholder("X1") inputs2 = ad.Variable("X2") y_ = ad.Placeholder("y_") # ini ctx = ndarray.gpu(0) x_val1 = np.linspace(2, 20, 10).reshape((5, 2)) * 0.5 x_val2 = np.ones((5, 3)) * 0.1 y_val=np.ones((5,5))*0.1 a = ad.concat_forward_op(inputs1, inputs2) loss = ad.softmaxcrossentropy_op(a, y_) aph = 0.001 t = train.Adam_minimize(loss, aph) t.init_Variable({inputs2: x_val2}) valid_y_predicted = t.run_get_nodelist_once({inputs1: x_val1,y_:y_val},[a])[a].asnumpy() print(valid_y_predicted) test_concat() #test_lr() # test_identity() # test_add_by_const() # test_mul_by_const() # test_add_two_vars() # test_mul_two_vars() # test_add_mul_mix_1() # test_add_mul_mix_2() # test_add_mul_mix_3() # test_grad_of_grad() # test_matmul_two_vars() #test_full_forward_op() #test_sigmoid_conv_1d() # =============not implement yet==================== # test_exp() # test_exp_grad() #test_full_forward_op() #test_full_forward_op() #test_exp_log_reverse_pow() #test_reduce_sum() #test_reduce_mean() # test_convolution_1d_forward_op() # test_convolution_2d_forward_op() # test_convolution_3d_forward_op() # test_l1_l2_cross_loss() # test_l1_l2_regular() # test_concat()
19,334
8,725
# Metadata module to save metadata as dictionary, save trial metadata as yaml and export metadata as csv import yaml import datetime import pandas as pd from pathlib import Path class Metadata: def __init__(self): base_path = Path().parent self.metadata_dir = (base_path / "RPi4Toolbox/GUI/Toolbox/metadata.yaml").resolve() self.subject = '' self.experimenter = '' self.date = '' self.session = 0 self.condition = '' self.trial = 0 self.repetition = 0 self.start_habituation = '' self.start_stimulus = '' self.reactiontime_keypeck = '' self.optimal_stimulus = '' self.key_choice = '' self.reward = 0 # Initialize dictionary from existing metadata or create new try: # if metadata exists, read keys to initialize dictionary with open(self.metadata_dir, 'r') as yamlfile: metadata = yaml.safe_load(yamlfile) self.dictionary = dict.fromkeys(metadata.keys(), []) except IOError: # if no metadata file exists initialize new empty ditionary self.dictionary = {'subject':[],'experimenter':[],'date':[],'condition':[],'session':[],'trial':[],'repetition':[], 'start_habituation':[],'start_stimulus':[],'reactiontime_keypeck':[], 'optimal_stimulus':[],'key_choice':[],'reward':[],'col1':[]} def append(self): # update dictionary with session related metadata self.dictionary['subject'].append(self.subject) self.dictionary['experimenter'].append(self.experimenter) self.dictionary['date'].append(self.date) self.dictionary['condition'].append(self.condition) self.dictionary['session'].append(self.session) # update dictionary with trial related metadata self.dictionary['trial'].append(self.trial) self.dictionary['repetition'].append(self.repetition) self.dictionary['start_habituation'].append(self.start_habituation) self.dictionary['start_stimulus'].append(self.start_stimulus) self.dictionary['reactiontime_keypeck'].append(self.reactiontime_keypeck) self.dictionary['optimal_stimulus'].append(self.optimal_stimulus) self.dictionary['key_choice'].append(self.key_choice) self.dictionary['reward'].append(self.reward) def save(self): # SAVE TO YAML at the end of session try: # if metadata exists, append new data with open(self.metadata_dir, 'r') as yamlfile: metadata = yaml.safe_load(yamlfile) metadata.update(self.dictionary) with open(self.metadata_dir, 'w') as file: yaml.safe_dump(metadata, file, sort_keys=False) except IOError: # if no metadata exists, create new file with open(self.metadata_dir, 'w') as file: yaml.dump(self.dictionary, file, sort_keys=False) def export(): """ This function exports the metadata.yaml file to a standard metadata.csv and cleans the metadata.yaml history after moving it to backup. """ ## EXPORT METADATA base_path = Path().parent file_path = (base_path / "../RPi4Toolbox/GUI/Toolbox/metadata.yaml").resolve() with open(file_path, 'r') as yamlfile: data = yaml.safe_load(yamlfile) metadata = pd.DataFrame.from_dict(data, orient='index') metadata = metadata.transpose() filename = str(file_path)[0:-5]+'_' + datetime.datetime.now().strftime('%Y-%m-%d') + '.csv' metadata.to_csv(filename, index = False, header=True, encoding='utf-8') # move metadata csv and yaml file to sciebo backup # erase yaml file to keep it slim
3,780
1,098
from __future__ import absolute_import from __future__ import division from __future__ import print_function from utils.base_solver import BaseSolver import os, logging, importlib, re, copy, random, tqdm, argparse import os.path as osp import cPickle as pickle from pprint import pprint from datetime import datetime import numpy as np from collections import defaultdict from torch.utils.data import DataLoader import tensorflow as tf import tensorflow.contrib.slim as slim import torch from utils import config as cfg from utils import dataset, utils from utils.evaluator import GCZSL_Evaluator from run_symnet_gczsl import make_parser from run_symnet_gczsl import SolverWrapper as GCZSLSolverWrapper def main(): train_dataset = CompositionDatasetActivations(name, root, phase, feat_file) # test_dataset = CompositionDatasetActivations(name, root, "test", feat_file,with_image=True) val_dataset = CompositionDatasetActivations(name, root, "val", feat_file) # /content/SymNet/data/ut-zap50k-natural/features.t7 # test_loader_notransform = DataLoader(test_dataset, batch_size =64) train_loader = DataLoader(train_dataset, batch_size =2048 ,shuffle=False) # val_loader = DataLoader(val_dataset, batch_size =50,shuffle = False) val_loader = DataLoader(val_dataset, batch_size =len(val_dataset),shuffle = False) ################################################################################ class SolverWrapper(GCZSLSolverWrapper): def __init__(self, network, test_dataloader, args): logger = self.logger("init") self.network = network self.test_dataloader = test_dataloader self.args = args self.trained_weight = os.path.join(cfg.WEIGHT_ROOT_DIR, args.name, "snapshot_epoch_%d.ckpt"%args.epoch) self.logger("init").info("pretrained model <= "+self.trained_weight) def construct_graph(self, sess): logger = self.logger('construct_graph') with sess.graph.as_default(): if cfg.RANDOM_SEED is not None: tf.set_random_seed(cfg.RANDOM_SEED) _, score_op, _ = self.network.build_network() return score_op def trainval_model(self, sess, max_epoch): logger = self.logger('train_model') logger.info('Begin training') score_op = self.construct_graph(sess) #for x in tf.global_variables(): # print(x.name) self.initialize(sess) sess.graph.finalize() evaluator = GCZSL_Evaluator(self.test_dataloader.dataset) ############################## test czsl ################################ all_attr_lab = [] all_obj_lab = [] all_pred = defaultdict(list) for image_ind, batch in tqdm.tqdm(enumerate(self.test_dataloader), total=len(self.test_dataloader), postfix='test'): predictions = self.network.test_step(sess, batch, score_op) # ordereddict of [score_pair, score_a, score_o] attr_truth, obj_truth = torch.from_numpy(batch[1]), torch.from_numpy(batch[2]) all_attr_lab.append(attr_truth) all_obj_lab.append(obj_truth) for key in score_op.keys(): all_pred[key].append(predictions[key][0]) for key,value in all_pred.items(): logger.info(key) report = self.test(self.args.epoch, evaluator, value, all_attr_lab, all_obj_lab) logger.info('Finished.') if __name__=="__main__": main()
3,504
1,124
from django.core.management.base import BaseCommand from django.db import transaction from jobya.companies.models import Company from jobya.companies.tests.factories import CompanyFactory class Command(BaseCommand): help = "Set up company data" def add_arguments(self, parser): parser.add_argument( "total", nargs="+", type=int, help="Indicates the number of companies to be created", ) parser.add_argument( "--delete", action="store_true", help="Delete old companies data before creating", ) @transaction.atomic def handle(self, *args, **options): total = options.get("total")[0] if options["delete"]: self.delete_old_data() self.stdout.write("Creating new companies...") for _ in range(total): CompanyFactory() self.stdout.write("Created successfully!") def delete_old_data(self): self.stdout.write("Deleting old companies data...") models = [Company] for m in models: m.objects.all().delete() self.stdout.write("Deleted successfully!")
1,190
317
# # BugZilla query page scanner to work with ancient # Debian Stable bugzilla installationss # # This includes three test sites # site contains one bug entry # all_bugs contains all Openmoko bugs as of \today # no_bug is a query which showed no bug # from HTMLParser import HTMLParser class BugQueryExtractor(HTMLParser): STATE_NONE = 0 STATE_FOUND_TR = 1 STATE_FOUND_NUMBER = 2 STATE_FOUND_PRIO = 3 STATE_FOUND_PRIO2 = 4 STATE_FOUND_NAME = 5 STATE_FOUND_PLATFORM = 6 STATE_FOUND_STATUS = 7 STATE_FOUND_WHATEVER = 8 # I don't know this field STATE_FOUND_DESCRIPTION =9 def __init__(self): HTMLParser.__init__(self) self.state = self.STATE_NONE self.bug = None self.bugs = [] def handle_starttag(self, tag, attr): if self.state == self.STATE_NONE and tag.lower() == "tr": # check for bz_normal and bz_P2 as indicator in buglist.cgi # use 'all' and 'map' on python2.5 if len(attr) == 1 and attr[0][0] == 'class' and \ ('bz_normal' in attr[0][1] or 'bz_blocker' in attr[0][1] or 'bz_enhancement' in attr[0][1] or 'bz_major' in attr[0][1] or 'bz_minor' in attr[0][1] or 'bz_trivial' in attr[0][1] or 'bz_critical' in attr[0][1] or 'bz_wishlist' in attr[0][1]) \ and 'bz_P' in attr[0][1]: print "Found tr %s %s" % (tag, attr) self.state = self.STATE_FOUND_TR elif self.state == self.STATE_FOUND_TR and tag.lower() == "td": self.state += 1 def handle_endtag(self, tag): if tag.lower() == "tr": print "Going back" if self.state != self.STATE_NONE: self.bugs.append( (self.bug,self.status) ) self.state = self.STATE_NONE self.bug = None if self.state > 1 and tag.lower() == "td": print "Next TD" self.state += 1 def handle_data(self,data): data = data.strip() # skip garbage if len(data) == 0: return if self.state == self.STATE_FOUND_NUMBER: """ #1995 in bugs.oe.org has [SEC] additionally to the number and we want to ignore it """ print "Bug Number '%s'" % data.strip() if self.bug: print "Ignoring bug data" return self.bug = data elif self.state == self.STATE_FOUND_STATUS: print "Status Name '%s'" % data.strip() self.status = data def result(self): print "Found bugs" return self.bugs # bugs_openmoko = """<!-- 1.0@bugzilla.org --> <!-- 1.0@bugzilla.org --> <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <title>Bug List</title> <link href="/style/style.css" rel="stylesheet" type="text/css" /> <link href="/bugzilla/css/buglist.css" rel="stylesheet" type="text/css"> </head> <body bgcolor="#FFFFFF" onload=""> <!-- 1.0@bugzilla.org --> <div id="header"> <a href="http://bugzilla.openmoko.org/cgi-bin/bugzilla/" id="site_logo"><img src="/style/images/openmoko_logo.png" alt="openmoko.org" /></a> <div id="main_navigation"> <ul> <li><a href="http://www.openmoko.org/" class="nav_home"><span>Home</span></a></li> <li><a href="http://wiki.openmoko.org/" class="nav_wiki"><span>Wiki</span></a></li> <li><a href="http://bugzilla.openmoko.org/" class="nav_bugzilla selected"><span>Bugzilla</span></a></li> <li><a href="http://planet.openmoko.org/" class="nav_planet"><span>Planet</span></a></li> <li><a href="http://projects.openmoko.org/" class="nav_projects"><span>Projects</span></a></li> <li><a href="http://lists.openmoko.org/" class="nav_lists"><span>Lists</span></a></li> </ul> </div> </div> <div class="page_title"> <strong>Bug List</strong> </div> <div class="container"> <div align="center"> <b>Fri Mar 16 20:51:52 CET 2007</b><br> <a href="quips.cgi"><i>It was a time of great struggle and heroic deeds </i></a> </div> <hr> 282 bugs found. <!-- 1.0@bugzilla.org --> <table class="bz_buglist" cellspacing="0" cellpadding="4" width="100%"> <colgroup> <col class="bz_id_column"> <col class="bz_severity_column"> <col class="bz_priority_column"> <col class="bz_platform_column"> <col class="bz_owner_column"> <col class="bz_status_column"> <col class="bz_resolution_column"> <col class="bz_summary_column"> </colgroup> <tr align="left"> <th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.bug_id">ID</a> </th> <th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.bug_severity,bugs.bug_id">Sev</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.priority,bugs.bug_id">Pri</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.rep_platform,bugs.bug_id">Plt</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=map_assigned_to.login_name,bugs.bug_id">Owner</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.bug_status,bugs.bug_id">State</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.resolution,bugs.bug_id">Result</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.short_desc,bugs.bug_id">Summary</a> </th> </tr> <tr class="bz_critical bz_P2 "> <td> <a href="show_bug.cgi?id=1">1</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>CLOS</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>kernel is running way too slow </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=2">2</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>SD card driver unstable </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=3">3</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>sean_chiang@fic.com.tw</nobr> </td> <td><nobr>CLOS</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Debug Board trying to control GSM_EN / FA_19 </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=4">4</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>random crashes of gsmd </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=5">5</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>call progress information is lacking </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=6">6</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>sean_chiang@fic.com.tw</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>GSM_EN should be called nGSM_EN </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=7">7</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>CLOS</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>PMU RTC driver date/time conversion is erroneous </td> </tr> <tr class="bz_critical bz_P5 "> <td> <a href="show_bug.cgi?id=8">8</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P5</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>SD/MMC: Card sometimes not detected </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=9">9</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>Boot speed too low (kernel part) </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=10">10</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>CLOS</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>u-boot support for usb-serial lacking </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=11">11</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>ken_zhao@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>u-boot lacks USB DFU support </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=12">12</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>gordon_hsu@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Boot speed too low (bootloader part) </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=13">13</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>teddy@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>power button should not immediately react </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=14">14</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>bootloader should display startup image before booting th... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=15">15</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>kernel oops when unloading g_ether </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=16">16</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>sean_chiang@fic.com.tw</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>bluetooth pullup / pulldown resistors </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=17">17</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>sean_chiang@fic.com.tw</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>microSD socket still has mechanical contact problems </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=18">18</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>OE build of u_boot with CVSDATE 20061030 uses latest git ... </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=19">19</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>teddy@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>&quot;reboot&quot; doesn't work </td> </tr> <tr class="bz_critical bz_P2 "> <td> <a href="show_bug.cgi?id=20">20</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>connection status </td> </tr> <tr class="bz_blocker bz_P3 "> <td> <a href="show_bug.cgi?id=21">21</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P3</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>sean_chiang@fic.com.tw</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>sms function missing </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=22">22</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>outgoing call generates 'segmentation fault' when the pee... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=23">23</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>dtmf support not available now </td> </tr> <tr class="bz_wishlist bz_P2 "> <td> <a href="show_bug.cgi?id=24">24</a> </td> <td><nobr>wis</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>libgsmd/misc.h: lgsm_get_signal_quality() </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=25">25</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>davewu01@seed.net.tw</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>GtkSpinBox unfinished </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=26">26</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>ken_zhao@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Pixmap Engine and Shadows </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=27">27</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>ken_zhao@fic-sh.com.cn</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>Labels on GtkButton don't appear centered </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=28">28</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>ken_zhao@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>GtkComboBox styling woes </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=29">29</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>ken_zhao@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>GtkProgressBar styling woes </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=30">30</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>REOP</nobr> </td> <td><nobr></nobr> </td> <td>Touchscreen emits bogus events under X </td> </tr> <tr class="bz_critical bz_P2 "> <td> <a href="show_bug.cgi?id=31">31</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Display colors are slightly off </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=32">32</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Common function for loading GdkPixbuf </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=33">33</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>incoming call status report causes gsmd to crash. </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=34">34</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>WORK</nobr> </td> <td>Need to decide if lgsm_handle is still valid. </td> </tr> <tr class="bz_enhancement bz_P5 "> <td> <a href="show_bug.cgi?id=35">35</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P5</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>WONT</nobr> </td> <td>Support debug board from u-boot </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=36">36</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Implement s3c2410 udc (usb device controller) driver in u... </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=37">37</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>DUPL</nobr> </td> <td>Implement USB Device Firmware Upgrade (DFU) </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=38">38</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>DUPL</nobr> </td> <td>implement USB serial emulation in u-boot </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=39">39</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>gordon_hsu@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Move LCM initialization into u-boot (currently in kernel ... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=40">40</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>DUPL</nobr> </td> <td>test + debug display of image on LCM in u-boot </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=41">41</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>ken_zhao@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>evaluate sapwood theme engine </td> </tr> <tr class="bz_blocker bz_P3 "> <td> <a href="show_bug.cgi?id=42">42</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P3</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>dynamic mtd partition table cration </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=43">43</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>StatusBar (Footer) API </td> </tr> <tr class="bz_wishlist bz_P2 "> <td> <a href="show_bug.cgi?id=44">44</a> </td> <td><nobr>wis</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>InputMethod API </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=45">45</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Automatic opening input methods </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=46">46</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>266MHz initialization of GTA01Bv2 </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=47">47</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>ken_zhao@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>DUPL</nobr> </td> <td>Evaluate sapwood theming engine </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=48">48</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>DUPL</nobr> </td> <td>Only power up the phone in case power button was pressed ... </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=49">49</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Implement touchscreen &amp; click daemon </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=50">50</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Sound Event API </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=51">51</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Preferences API </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=52">52</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>cj_steven@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Single Instance Startup </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=53">53</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>tonyguan@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>DTMF tones during call </td> </tr> <tr class="bz_blocker bz_P1 "> <td> <a href="show_bug.cgi?id=54">54</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P1</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>tonyguan@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>PIN Entry </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=55">55</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>tonyguan@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Don't pop up the dialer interface initially </td> </tr> <tr class="bz_blocker bz_P4 "> <td> <a href="show_bug.cgi?id=56">56</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P4</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>tonyguan@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Integrate with contacts database </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=57">57</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>tonyguan@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>LATE</nobr> </td> <td>Recording Calls </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=58">58</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>API for devmand </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=59">59</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>ken_zhao@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Real DPI vs. Fake DPI </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=60">60</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>ken_zhao@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>fontconfig antialiasing </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=61">61</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>ken_zhao@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Theme is very slow </td> </tr> <tr class="bz_wishlist bz_P2 "> <td> <a href="show_bug.cgi?id=62">62</a> </td> <td><nobr>wis</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>High Level Multi Layer Network Discovery API </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=63">63</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>matchbox-panel 1 vs. 2 </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=64">64</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Show Cipher Status in GSM-Panel applet </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=65">65</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Visual indication for SMS overflow </td> </tr> <tr class="bz_critical bz_P2 "> <td> <a href="show_bug.cgi?id=66">66</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Applet for Missed Events </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=67">67</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>WONT</nobr> </td> <td>libmokopim not necessary </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=68">68</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>SIM backend for EDS </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=69">69</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Speed up System Initialization </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=70">70</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Minimize Services started on Bootup </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=71">71</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>gordon_hsu@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>make a short vibration pulse once u-boot is starting </td> </tr> <tr class="bz_wishlist bz_P2 "> <td> <a href="show_bug.cgi?id=72">72</a> </td> <td><nobr>wis</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>gordon_hsu@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Add on-screen boot menu </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=73">73</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>test and verify battery charger control (pcf50606) </td> </tr> <tr class="bz_blocker bz_P1 "> <td> <a href="show_bug.cgi?id=74">74</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P1</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>WONT</nobr> </td> <td>stub audio driver to power up amp and route audio through... </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=75">75</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>PWM code for display brightness control </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=76">76</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>teddy@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Implement PWM control for vibrator </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=77">77</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>songcw@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Finish, test and verify agpsd implementation </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=78">78</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Implement and test ASoC platform driver </td> </tr> <tr class="bz_blocker bz_P1 "> <td> <a href="show_bug.cgi?id=79">79</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P1</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>suspend/resume to RAM support </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=80">80</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>WONT</nobr> </td> <td>Add sysfs entry for PMU wakeup reason </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=81">81</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Decide how PMU RTC alarm interrupt is signalled to userspace </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=82">82</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>implement and test cpufreq interface to S3C2410 PLL / SLO... </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=83">83</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>teddy@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>evaluate process and I/O schedulers </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=84">84</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>enable voluntary preemption </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=85">85</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>test NO_IDLE_HZ / tickless idle </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=86">86</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>APM emulation for battery / charger / charging and possib... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=87">87</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>define and implement how headphone jack routing/signallin... </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=88">88</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>teddy@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>use and test PMU watchdog driver </td> </tr> <tr class="bz_critical bz_P2 "> <td> <a href="show_bug.cgi?id=89">89</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>teddy@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>determine correct gamma calibration values and put them i... </td> </tr> <tr class="bz_critical bz_P1 "> <td> <a href="show_bug.cgi?id=90">90</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P1</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>GSM TS07.10 multiplex missing </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=91">91</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>DUPL</nobr> </td> <td>debug sd card timeout problems </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=92">92</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>test multiple microSD card vendors for compatibility with... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=93">93</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>test 4GB microSD card compatibility </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=94">94</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>tonyguan@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>+ symbol support </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=95">95</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>sean_chiang@fic.com.tw</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>verify charger current and battery temperature reading co... </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=96">96</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>make sure PMU alarm (set via rtc interface) is persistent </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=97">97</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>remove static mtd partition table, use u-boot created dyn... </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=98">98</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>how to do touch panel calibration in factory and store va... </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=99">99</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>DUPL</nobr> </td> <td>Implement SMS support </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=100">100</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Implement Cell Broadcast support </td> </tr> </table> <table class="bz_buglist" cellspacing="0" cellpadding="4" width="100%"> <colgroup> <col class="bz_id_column"> <col class="bz_severity_column"> <col class="bz_priority_column"> <col class="bz_platform_column"> <col class="bz_owner_column"> <col class="bz_status_column"> <col class="bz_resolution_column"> <col class="bz_summary_column"> </colgroup> <tr align="left"> <th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.bug_id">ID</a> </th> <th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.bug_severity,bugs.bug_id">Sev</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.priority,bugs.bug_id">Pri</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.rep_platform,bugs.bug_id">Plt</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=map_assigned_to.login_name,bugs.bug_id">Owner</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.bug_status,bugs.bug_id">State</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.resolution,bugs.bug_id">Result</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.short_desc,bugs.bug_id">Summary</a> </th> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=101">101</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Implement GPRS setup/teardown support </td> </tr> <tr class="bz_critical bz_P2 "> <td> <a href="show_bug.cgi?id=102">102</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>SIM phonebook access </td> </tr> <tr class="bz_blocker bz_P1 "> <td> <a href="show_bug.cgi?id=103">103</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P1</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>power-up/power-down GSM Modem </td> </tr> <tr class="bz_critical bz_P2 "> <td> <a href="show_bug.cgi?id=104">104</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>tonyguan@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>LATE</nobr> </td> <td>Volume control </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=105">105</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>DUPL</nobr> </td> <td>add passthrough mode </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=106">106</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>tonyguan@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>LATE</nobr> </td> <td>Emergency Call Support </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=107">107</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>obtain list of operators / control operator selection </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=108">108</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>tonyguan@fic-sh.com.cn</nobr> </td> <td><nobr>REOP</nobr> </td> <td><nobr></nobr> </td> <td>allow query of manufacturer/model/revision/imei </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=109">109</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>add dbus interface, like recent upstream gpsd </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=110">110</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>look into gps / agps integration </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=111">111</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>integrate agpsd in our system power management. </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=112">112</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>How to deliver kernel-level alarm to destination app </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=113">113</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>marcel@holtmann.org</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>bluetooth headset support </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=114">114</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Who is managing wakeup times? </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=115">115</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>marcel@holtmann.org</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>A2DP / alsa integration </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=116">116</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>marcel@holtmann.org</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>bluetooth HID support (host) </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=117">117</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>marcel@holtmann.org</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>bluetooth HID support (device) </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=118">118</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>marcel@holtmann.org</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>bluetooth networking support </td> </tr> <tr class="bz_critical bz_P3 "> <td> <a href="show_bug.cgi?id=119">119</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P3</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>merge openmoko-taskmanager into openmoko-footer </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=120">120</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>marcel@holtmann.org</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>bluetooth OBEX </td> </tr> <tr class="bz_critical bz_P3 "> <td> <a href="show_bug.cgi?id=121">121</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P3</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>merge openmoko-mainmenu into openmoko-mainmenu (panel) </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=122">122</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>rename openmoko-history to openmoko-taskmanager </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=123">123</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>DUPL</nobr> </td> <td>rename openmoko-history to openmoko-taskmanager </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=124">124</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>modem volume control on connection </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=125">125</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>davewu01@seed.net.tw</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>FInger UI is not usable on 2.8&quot; screen </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=126">126</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>sunzhiyong@fic-sh.com.cn</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>Remove back functionality from Main Menu </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=127">127</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>davewu01@seed.net.tw</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Power On / Off Images needed </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=128">128</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>cj_steven@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Tap and hold on panel icon doesn't change to Today applic... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=129">129</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>ken_zhao@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Create / Find better system fonts </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=130">130</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>GTK Popup menus size incorrectly </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=131">131</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>davewu01@seed.net.tw</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>Move Search Open / Close buttons into same location </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=132">132</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>sunzhiyong@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Task Manager is not quick to use </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=133">133</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>davewu01@seed.net.tw</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>Designer image layouts should have both 4 corners and ful... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=134">134</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Stylus applications need close function </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=135">135</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Finger applications need close functionality </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=136">136</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>application manager doesn't build </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=137">137</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>submit patch against ipkg upstream </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=138">138</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>sunzhiyong@fic-sh.com.cn</nobr> </td> <td><nobr>REOP</nobr> </td> <td><nobr></nobr> </td> <td>submit patch against matchbox-window-manager upstream </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=139">139</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>GSM API </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=140">140</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>stefan@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>add network-enabled fbgrab from openEZX to openmoko-devel... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=141">141</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Need support for device under WIndows and OS X </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=142">142</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>sjf2410-linux cleanup / help message / NAND read </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=143">143</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>REOP</nobr> </td> <td><nobr></nobr> </td> <td>Implement NAND write/read support in OpenOCD </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=144">144</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>when phone is hard-rebooted, Xfbdev complains about /tmp/... </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=145">145</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>WONT</nobr> </td> <td>battery is not automatically charging </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=146">146</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>sjf2410-linux does not contain latest svn code </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=147">147</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>WONT</nobr> </td> <td>openmoko-panel-applet could not be resized </td> </tr> <tr class="bz_blocker bz_P1 "> <td> <a href="show_bug.cgi?id=148">148</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P1</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>gsmd not talking to TI modem on GTA01Bv2 </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=149">149</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>lm4857 not i2c address compliant </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=150">150</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>graeme.gregory@wolfsonmicro...</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>INVA</nobr> </td> <td>ASoC patch doesn't compile </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=151">151</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>cj_steven@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Does mainmenu need libmatchbox or not? </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=152">152</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>cj_steven@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>VFOLDERDIR is hardcoded </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=153">153</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>sunzhiyong@fic-sh.com.cn</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>Rationale for copying GtkIconView instead of deriving? </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=154">154</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>sunzhiyong@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>mainmenu crashes when clicking wheel the 2nd time </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=155">155</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>sunzhiyong@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>How to get back one level if you are in a subdirectory? </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=156">156</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>sunzhiyong@fic-sh.com.cn</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>Where is mainmenu going to look for applications? </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=157">157</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>davewu01@seed.net.tw</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>DUPL</nobr> </td> <td>The sizes of each keys are too small </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=158">158</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>musicplayer crashes </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=159">159</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>sunzhiyong@fic-sh.com.cn</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>display thumbnails of actual applications </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=160">160</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>sunzhiyong@fic-sh.com.cn</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>display thumbnails in 3x3 grid </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=161">161</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>davewu01@seed.net.tw</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Docked Keypad is too small </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=162">162</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>REMI</nobr> </td> <td>libmutil0_svn.bb setup misses libltdl creation </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=163">163</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Audio Profile Management </td> </tr> <tr class="bz_major bz_P1 "> <td> <a href="show_bug.cgi?id=164">164</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P1</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>improve non-SanDisk microSD support in u-boot </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=165">165</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>openmoko-simplemediaplayer doesn't build in OE </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=166">166</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>u-boot cdc_acm hot un-plug/replug hang </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=167">167</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>stefan@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>add LCM QVGA switching support </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=168">168</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>usb0 is not automatically configured </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=169">169</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>gdb currently broken (gdb-6.4-r0) </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=170">170</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>usbtty: sometimes bogus characters arrive </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=171">171</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>agpsd source code and bitbake rules not in our svn </td> </tr> <tr class="bz_blocker bz_P1 "> <td> <a href="show_bug.cgi?id=172">172</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P1</nobr> </td> <td><nobr>Oth</nobr> </td> <td><nobr>tonyguan@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>missing openmoko-dialer-window-pin.o breaks build </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=173">173</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Oth</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>no NAND partitions due to ID mismatch if using defaults </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=174">174</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Oth</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>defconfig-om-gta01 could use updating </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=175">175</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>MOKO_FINGER_WINDOW has to show_all and then hide to initi... </td> </tr> <tr class="bz_critical bz_P2 "> <td> <a href="show_bug.cgi?id=176">176</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>libgsmd need a mechanism to avoid dead waiting. </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=177">177</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>libmokoui widget functions should return GtkWidget </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=178">178</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>u-boot 'factory reset' option </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=179">179</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Implement u-boot power-off timer </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=180">180</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>uboot build broken for EABI </td> </tr> <tr class="bz_wishlist bz_P2 "> <td> <a href="show_bug.cgi?id=181">181</a> </td> <td><nobr>wis</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Password Storage/Retrieval Application </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=182">182</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-panel-demo-simple hardcodes -Werror </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=183">183</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>openmoko-simple-mediaplayer missing mkinstalldirs and has... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=184">184</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>cj_steven@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>openmoko-mainmenu should link against libmb </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=185">185</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>thomas@openedhand.com</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-dates lacks intltool-update.in </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=186">186</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>thomas@openedhand.com</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Fingerbubbles take endless amount of ram and get OOMed </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=187">187</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>src/target/OM-2007/README doesn't mention ipkg patch </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=188">188</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-panel-demo fails to build </td> </tr> <tr class="bz_normal bz_P5 "> <td> <a href="show_bug.cgi?id=189">189</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P5</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>thomas@openedhand.com</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-dates tries to include non-existant header </td> </tr> <tr class="bz_normal bz_P5 "> <td> <a href="show_bug.cgi?id=190">190</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P5</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>thomas@openedhand.com</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>No rule to build dates.desktop </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=191">191</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>sean_chiang@fic.com.tw</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>investigate if we can set CPU voltage to 1.8V on 200MHz o... </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=192">192</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Graphic bootsplash during userspace sysinit </td> </tr> <tr class="bz_enhancement bz_P3 "> <td> <a href="show_bug.cgi?id=193">193</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P3</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>sean_chiang@fic.com.tw</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Information about current charging status when AC is online </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=194">194</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>stefan@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>s3c2410fb 8bit mode corrupt </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=195">195</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>stefan@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>passthrough mode (Directly use GSM Modem from PC </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=196">196</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Merge back fixes to openmoko recipes from OE </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=197">197</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Make theme suitable for qvga screens. </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=198">198</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Please enable CONFIG_TUN as a module in defconfig-om-gta01 </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=199">199</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>sean_mosko@fic.com.tw</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>We need freely licensed ringtones </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=200">200</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>PARALLEL_MAKE seems to not work </td> </tr> </table> <table class="bz_buglist" cellspacing="0" cellpadding="4" width="100%"> <colgroup> <col class="bz_id_column"> <col class="bz_severity_column"> <col class="bz_priority_column"> <col class="bz_platform_column"> <col class="bz_owner_column"> <col class="bz_status_column"> <col class="bz_resolution_column"> <col class="bz_summary_column"> </colgroup> <tr align="left"> <th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.bug_id">ID</a> </th> <th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.bug_severity,bugs.bug_id">Sev</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.priority,bugs.bug_id">Pri</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.rep_platform,bugs.bug_id">Plt</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=map_assigned_to.login_name,bugs.bug_id">Owner</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.bug_status,bugs.bug_id">State</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.resolution,bugs.bug_id">Result</a> </th><th colspan="1"> <a href="buglist.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=&amp;order=bugs.short_desc,bugs.bug_id">Summary</a> </th> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=201">201</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Use TEXT_BASE 0x37f80000 in u-boot on GTA01Bv2 and higher </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=202">202</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Start using NAND hardware ECC support </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=203">203</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>fix the web site: http://openmoko.com/ </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=204">204</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Fatal error in Special:Newimages </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=205">205</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>add code to u-boot to query hardware revision and serial ... </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=206">206</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Disallow setting of overvoltage via pcf50606 kernel driver </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=207">207</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>DFU mode should only be enabled when in &quot;911 key&quot; mode </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=208">208</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>u-boot DFU upload broken </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=209">209</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>u-boot DFU needs to block console access while in DFU mode </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=210">210</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Oth</nobr> </td> <td><nobr>henryk@ploetzli.ch</nobr> </td> <td><nobr>ASSI</nobr> </td> <td><nobr></nobr> </td> <td>&quot;now&quot; causes frequent rebuilds and fills disks </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=211">211</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>sjf2410-linux-native.bb has do_deploy in the wrong location </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=212">212</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Oth</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Charging seems completely broken </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=213">213</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Oth</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-dates-0.1+svnnow fails certificate check </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=214">214</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Add CVS_TARBALL_STASH for missing upstream sources </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=215">215</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>fingerwheel crashes mainmenu when touching the black part </td> </tr> <tr class="bz_blocker bz_P3 "> <td> <a href="show_bug.cgi?id=216">216</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P3</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>thomas@openedhand.com</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>DUPL</nobr> </td> <td>contacts crashes when tying to enter import widget </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=217">217</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Implement NAND OTP area read/write as u-boot commands </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=218">218</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Distinguish stylus from finger via tslib </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=219">219</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>tonyguan@fic-sh.com.cn</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-dialer r1159 fails to compile </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=220">220</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>libgsmd_device.c is missing </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=221">221</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>thomas@openedhand.com</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Can't add new contacts via the gui </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=222">222</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>thomas@openedhand.com</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>WORK</nobr> </td> <td>Can't add new events </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=223">223</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>thomas@openedhand.com</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>weekview only displays half the week </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=224">224</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>call to uboot-mkimage requires ${STAGING_BINDIR} prefix </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=225">225</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Fix ordering of do_deploy in uboot to be compatible with ... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=226">226</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>REOP</nobr> </td> <td><nobr></nobr> </td> <td>dfu-util-native do_deploy tries to install from wrong sou... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=227">227</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Add openmoko-mirrors.bbclass and enable use of it </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=228">228</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>openmoko applications(contacts, appmanager ...) easily c... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=229">229</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>davewu01@seed.net.tw</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>outgoing call/incoming call/talking status should be more... </td> </tr> <tr class="bz_trivial bz_P2 "> <td> <a href="show_bug.cgi?id=230">230</a> </td> <td><nobr>tri</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Use the toolchain speified in $CROSS_COMPILE in u-boot. </td> </tr> <tr class="bz_minor bz_P2 "> <td> <a href="show_bug.cgi?id=231">231</a> </td> <td><nobr>min</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>switch display backlight GPIO to &quot;output, off&quot; when suspe... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=232">232</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Oth</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>n-plicate buglog mails </td> </tr> <tr class="bz_critical bz_P2 "> <td> <a href="show_bug.cgi?id=233">233</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>power-off timer should be halted in DFU mode </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=234">234</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>werner@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>check for bad blocks in first _and_ second page of each b... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=235">235</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Deploy openocd-native, not openocd, and make openocd-nati... </td> </tr> <tr class="bz_critical bz_P2 "> <td> <a href="show_bug.cgi?id=236">236</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Close moko_dialog_window several times, moko_stylus_demo ... </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=237">237</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Fix remaining https urls in bitbake recipes. </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=238">238</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Mac</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>LATE</nobr> </td> <td>manual test bug </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=239">239</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>LATE</nobr> </td> <td>foo </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=240">240</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>INVA</nobr> </td> <td>broken-1.0-r0-do_fetch </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=241">241</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>CLOS</nobr> </td> <td><nobr>LATE</nobr> </td> <td>broken-1.0-r0-do_fetch </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=242">242</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>INVA</nobr> </td> <td>broken-1.0-r0-do_compile </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=243">243</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>INVA</nobr> </td> <td>broken-1.0-r0-do_configure </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=244">244</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>I can't build Xorg7.1 from MokoMakefile </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=245">245</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Neo crashes when writing large amounts of data to SD </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=246">246</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>sean_chiang@fic.com.tw</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Debug board needs to be recognized by mainline linux kernel. </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=247">247</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>thomas@openedhand.com</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-dates svn rev. 335 does no longer build </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=248">248</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Buttons disappear under zoom </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=249">249</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>add command to print gsmd version number </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=250">250</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>INVA</nobr> </td> <td>broken-1.0-r0-do_compile </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=251">251</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>INVA</nobr> </td> <td>broken-1.0-r0-do_compile </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=252">252</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>REOP</nobr> </td> <td><nobr></nobr> </td> <td>openmoko-devel-image-1.0-r0-do_rootfs </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=253">253</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Mount /tmp as tmpfs </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=254">254</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Oth</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>bug with &quot;patch&quot; on arklinux 2006.1?? </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=255">255</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>tony_tu@fiwin.com.tw</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>battery voltage scale is not correct </td> </tr> <tr class="bz_critical bz_P2 "> <td> <a href="show_bug.cgi?id=256">256</a> </td> <td><nobr>cri</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>sean_chiang@fic.com.tw</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>GSM Modem doesn't seem to work on some devices </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=257">257</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Oth</nobr> </td> <td><nobr>sean_chiang@fic.com.tw</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>AUX button sticking </td> </tr> <tr class="bz_major bz_P2 "> <td> <a href="show_bug.cgi?id=258">258</a> </td> <td><nobr>maj</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>All</nobr> </td> <td><nobr>cj_steven@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Main Menu needs to have Single Instance functionality </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=259">259</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>stefan@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>implement 500mA charging in u-boot </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=260">260</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>stefan@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>implement 100mA charging in Linux </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=261">261</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>stefan@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Implement 500mA charging using wall-outlet charger </td> </tr> <tr class="bz_enhancement bz_P2 "> <td> <a href="show_bug.cgi?id=262">262</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>Indicate different charging mode in battery applet </td> </tr> <tr class="bz_blocker bz_P2 "> <td> <a href="show_bug.cgi?id=263">263</a> </td> <td><nobr>blo</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>gsmd doesn't receive AT reply from the modem properly. </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=264">264</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>package libelf-0.8.6-r0: task do_populate_sysroot: failed </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=265">265</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>MokoMakefile: perl-native fix </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=266">266</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>ftdi-eeprom-native missing confuse-native dependency </td> </tr> <tr class="bz_enhancement bz_P4 "> <td> <a href="show_bug.cgi?id=267">267</a> </td> <td><nobr>enh</nobr> </td> <td><nobr>P4</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>internal function duplicates strstr(3) </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=268">268</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>openmoko-today crashes when one of the buttons is pressed </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=269">269</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-contacts-0.1+svnnow-r3_0_200703151745-do_unpack </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=270">270</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>does our xserver need security updates? </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=271">271</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>laforge@openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>It would be nice if ppp was supported by kernel </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=272">272</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-contacts-0.1+svnnow-r3_0_200703152250-do_unpack </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=273">273</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-contacts-0.1+svnnow-r3_0_200703160254-do_unpack </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=274">274</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-contacts-0.1+svnnow-r3_0_200703160321-do_unpack </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=275">275</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-contacts-0.1+svnnow-r3_0_200703160350-do_unpack </td> </tr> <tr class="bz_normal bz_P3 "> <td> <a href="show_bug.cgi?id=276">276</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P3</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>songcw@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>The open file window is too ugly </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=277">277</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-contacts-0.1+svnnow-r3_0_200703160712-do_unpack </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=278">278</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>mickey@vanille-media.de</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>openmoko-contacts-0.1+svnnow-r3_0_200703160805-do_unpack </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=279">279</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>RESO</nobr> </td> <td><nobr>FIXE</nobr> </td> <td>Appmanager crush when install packages </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=280">280</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>songcw@fic-sh.com.cn</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>openmoko-appmanager not refresh the packages list after r... </td> </tr> <tr class="bz_normal bz_P3 "> <td> <a href="show_bug.cgi?id=281">281</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P3</nobr> </td> <td><nobr>PC</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>implicit declaration of function `strdup' </td> </tr> <tr class="bz_normal bz_P2 "> <td> <a href="show_bug.cgi?id=282">282</a> </td> <td><nobr>nor</nobr> </td> <td><nobr>P2</nobr> </td> <td><nobr>Neo</nobr> </td> <td><nobr>buglog@lists.openmoko.org</nobr> </td> <td><nobr>NEW</nobr> </td> <td><nobr></nobr> </td> <td>microSD Problem </td> </tr> </table> 282 bugs found. <br> <form method="post" action="long_list.cgi"> <input type="hidden" name="buglist" value="1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282"> <input type="submit" value="Long Format"> <a href="query.cgi">Query Page</a> &nbsp;&nbsp; <a href="enter_bug.cgi">Enter New Bug</a> &nbsp;&nbsp; <a href="colchange.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=">Change Columns</a> &nbsp;&nbsp; <a href="query.cgi?short_desc_type=allwordssubstr&amp;short_desc=&amp;long_desc_type=allwordssubstr&amp;long_desc=&amp;bug_file_loc_type=allwordssubstr&amp;bug_file_loc=&amp;bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;bug_status=RESOLVED&amp;bug_status=VERIFIED&amp;bug_status=CLOSED&amp;emailassigned_to1=1&amp;emailtype1=substring&amp;email1=&amp;emailassigned_to2=1&amp;emailreporter2=1&amp;emailcc2=1&amp;emailtype2=substring&amp;email2=&amp;bugidtype=include&amp;bug_id=&amp;votes=&amp;changedin=&amp;chfieldfrom=&amp;chfieldto=Now&amp;chfieldvalue=&amp;field0-0-0=noop&amp;type0-0-0=noop&amp;value0-0-0=">Edit this Query</a> &nbsp;&nbsp; </form> <!-- 1.0@bugzilla.org --> </div> <div class="footer"> <div class="group">This is <b>Bugzilla</b>: the Mozilla bug system. For more information about what Bugzilla is and what it can do, see <a href="http://www.bugzilla.org/">bugzilla.org</a>.</div> <!-- 1.0@bugzilla.org --> <form method="get" action="show_bug.cgi"> <div class="group"> <a href="enter_bug.cgi">New</a> | <a href="query.cgi">Query</a> | <input type="submit" value="Find"> bug # <input name="id" size="6"> | <a href="reports.cgi">Reports</a> </div> <div> <a href="createaccount.cgi">New&nbsp;Account</a> | <a href="query.cgi?GoAheadAndLogIn=1">Log&nbsp;In</a> </div> </form> </div> </body> </html> """ bugfinder =BugQueryExtractor() bugfinder.feed(bugs_openmoko) print bugfinder.result() print len(bugfinder.result()) seen_numbers = {} for (number,_) in bugfinder.result(): seen_numbers[number] = "Yes" for i in range(1,283): if not seen_numbers.has_key(str(i)): print "Not seen %d" % i
140,512
73,677
import ast import datetime import cv2 import psutil from utils import * def presenter_log(message: str): log("PRST", message) def present_annotated_frames_from_stream(pipe_reader, pid): presenter_log("presenter presents") while pipe_reader.poll(3) or psutil.pid_exists(pid): message = pipe_reader.recv() if message is None: if not psutil.pid_exists(pid): break else: continue frame_string = message.split('|')[0] annotations = message.split('|')[1] gray_frame = string_to_frame(frame_string) blurred_frame = cv2.GaussianBlur(gray_frame, (21, 21), 0) text = "unoccupied" if len(annotations) == 0 else "occupied" # loop over the contours for (bottom_left_corner, top_right_corner) in ast.literal_eval(annotations): blur_mask = np.ones(gray_frame.shape, dtype=np.uint8) cv2.rectangle(blur_mask, bottom_left_corner, top_right_corner, 0, thickness=-1) gray_frame = np.where(np.logical_not(blur_mask), blurred_frame, gray_frame) cv2.rectangle(gray_frame, bottom_left_corner, top_right_corner, (0, 255, 0), 2) # draw the text and timestamp on the frame cv2.putText(img=gray_frame, text="Room Status: {}".format(text), org=(10, 20), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(2, 2, 255), thickness=2) cv2.putText(img=gray_frame, text=datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), org=(10, gray_frame.shape[0] - 10), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.35, color=(2, 2, 255), thickness=1) # show the frame and record if the user presses a key cv2.imshow("Security Feed", cv2.convertScaleAbs(gray_frame)) key = cv2.waitKey(1) & 0xFF # if the `q` key is pressed, break from the lop if key == ord("q"): break # cleanup the camera and close any open windows presenter_log("presenter finished presenting") cv2.destroyAllWindows()
2,188
748
import copy import random import numpy as np from sklearn.svm import SVC class RandomKLabelsets: """RandomKLabelsets Reference Paper: Min-Ling Zhang and Zhi-Hua Zhou. A Review on Multi-Label Learning Algorithms """ def __init__(self, classifier=SVC(kernel='rbf')): self.classifier = classifier def fit(self, X, y, k=3, n=0): self.m = X.shape[0] self.label_count = y.shape[1] self.k = self.chooseLabelsetsSize(k) self.n = self.chooseLabelsetsNum(n) self.k_labelsets = np.zeros((self.n, self.label_count)) self.classifiers = [] for i in range(self.n): classifier = copy.deepcopy(self.classifier) k_labelset = self.generateRandomK_labelsets() y_subset = self.getSubsetOfy(y, k_labelset) classifier.fit(X, self.transform(y_subset)) self.classifiers.append(classifier) self.k_labelsets[i, :] = k_labelset return self def predict(self, X_pre): result = np.zeros((X_pre.shape[0], self.label_count)) ysubsets = [] for i in range(self.n): ysubsets.append(self.inverse_transform(self.classifiers[i].predict(X_pre))) for sample in range(X_pre.shape[0]): for label in range(self.label_count): maxVotes = 0 actualVotes = 0 for i in range(self.n): if ysubsets[i][sample, label] == 1: actualVotes += 1 if self.k_labelsets[i, label] == 1: maxVotes += 1 if (actualVotes/maxVotes) > 0.5: result[sample][label] = 1 return result def chooseLabelsetsSize(self, k): if k > self.label_count: raise ValueError('the given size of labelsets is exceed') else: return k def chooseLabelsetsNum(self, n): if n == 0: n = 2*self.label_count mostLabelsetsNum = 1 for i in range(self.k): mostLabelsetsNum = mostLabelsetsNum * (self.label_count-i) / (self.k-i) return min(n, mostLabelsetsNum) def generateRandomK_labelsets(self): labelIndexes = set() labelset = np.zeros(self.label_count) while len(labelIndexes) < self.k: randomIndex = random.randint(0,self.label_count-1) labelIndexes.add(randomIndex) labelset[randomIndex] = 1 return labelset def getSubsetOfy(self, y, k_labelset): y_subset = np.zeros((self.m, self.label_count)) for sample in range(self.m): for index in range(self.label_count): if y[sample, index]==1 and k_labelset[index]==1: y_subset[sample, index] = 1 return y_subset def transform(self, y_subset): result = np.zeros(y_subset.shape[0]) for i in range(y_subset.shape[0]): for j in range(y_subset.shape[1]): result[i] += y_subset[i][j] * (2**j) return result def inverse_transform(self, y): result = np.zeros((y.shape[0], self.label_count)) for row in range(result.shape[0]): number = y[row] for col in range(result.shape[1]): result[row][col] = number % 2 number = int(number/2) return result
3,412
1,079
from django.test import TestCase, Client from django.urls import reverse class TestViews(TestCase): def setUp(self): self.client = Client() self.register_url = reverse('register') self.profile_url = reverse('profile') def test_register(self): response = self.client.get(self.register_url) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'users/register.html')
406
135
from django import forms from incidences.models import Incidence class IncidenceForm(forms.ModelForm): class Meta: model = Incidence fields = ['name', 'description', 'risk', 'causes', 'effects', 'controls'] widgets = { 'name': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Incidencia'}), 'description': forms.TextInput(attrs={'class': 'form-control'}), 'risk': forms.Select(attrs={'class': 'form-select'}), 'causes': forms.SelectMultiple(attrs={'class': 'form-select'}), 'effects': forms.SelectMultiple(attrs={'class': 'form-select'}), 'controls': forms.SelectMultiple(attrs={'class': 'form-select'}), } labels = { 'name': '', 'description': 'Descripción del evento', 'risk': 'Riesgo asociado', 'causes': 'Causas manifestadas', 'effects': 'Consecuencias manifestadas', }
976
289
import cv2 as cv import numpy as np from urllib.request import urlopen import os import datetime import time import sys #change to your ESP32-CAM ip url="http://192.168.31.184:81/stream" CAMERA_BUFFRER_SIZE=4096 stream=urlopen(url) bts=b'' while True: try: while True: bts+=stream.read(CAMERA_BUFFRER_SIZE) jpghead=bts.find(b'\xff\xd8') jpgend=bts.find(b'\xff\xd9') if jpghead>-1 and jpgend>-1: jpg=bts[jpghead:jpgend+2] bts=bts[jpgend+2:] img=cv.imdecode(np.frombuffer(jpg,dtype=np.uint8),cv.IMREAD_UNCHANGED) v=cv.flip(img,0) h=cv.flip(img,1) p=cv.flip(img,-1) frame=p img=cv.resize(frame,(480,320)) img = img[0:200, 60:300] h, w = img.shape[:2] img = cv.rotate(img, cv.cv2.ROTATE_90_CLOCKWISE) rows, cols, _ = img.shape gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY) gray_img = cv.GaussianBlur(gray_img, (7, 7), 0) _, threshold = cv.threshold(gray_img, 70, 255, cv.THRESH_BINARY_INV) contours, hierarchy = cv.findContours(threshold, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) contours = [max(contours, key = cv.contourArea)] for c in contours: M = cv.moments(c) cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) # draw the contour and center of the shape on the image cv.circle(gray_img, (cX, cY), 7, (255, 255, 255), -1) cv.putText(gray_img, "center", (cX - 20, cY - 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) #gray_img = cv.drawContours(gray_img, contours, -1, (0,255,0), 3) cv.imshow('contoured', gray_img) k=cv.waitKey(1) if k & 0xFF==ord('q'): exit() cv.destroyAllWindows() except Exception as e: pass
2,206
848
# -*- coding: utf-8 -*- import scrapy from scrapy import Request class Movie(scrapy.Item): # define all items to scrape title = scrapy.Field() genres = scrapy.Field() when = scrapy.Field() director = scrapy.Field() stars = scrapy.Field() country = scrapy.Field() language = scrapy.Field() writer = scrapy.Field() url = scrapy.Field() time = scrapy.Field() class MovieSpider(scrapy.Spider): name = 'movies' # name of the Spider allowed_domains = ['imdb.com'] try: # links.csv - list with coming soon movies with open("links.csv", "rt") as file: # read each line from the file without the first one start_urls = [url.strip() for url in file.readlines()][1:] except: start_urls = [] # 'imdb.pipelines.DuplicatesPipelineItems': 300 - calls DuplicatesPipelineItems class from pipelines.py file (this will filter duplicated links in all movies) # 'CLOSESPIDER_PAGECOUNT': 100 - sets limit pages to 100 (delay in scrapy respone - more information in project description) # 'DEPTH_LIMIT': 1 - allows scrapy to go only to one next page custom_settings = {'ITEM_PIPELINES': {'imdb.pipelines.DuplicatesPipelineItems': 300}, 'CLOSESPIDER_PAGECOUNT': 100, 'DEPTH_LIMIT': 1} def parse(self, response): # scrape all information about movie f = Movie() # get xpaths to items title_xpath = '//h1/text()' genres_xpath = '//div[@class="subtext"]/a[re:test(@href, "(genres){1}")]/text()' when_xpath = '//div[@class="subtext"]/a[re:test(text(), "[0-9]+\s+[A-Za-z]+\s+[0-9]+")]/text()' director_xpath = '//h4[re:test(text(), "(Director)")]/following-sibling::a/text()' stars_xpath = '//h4[text()="Stars:"]/following-sibling::a[re:test(@href, "name")]/text()' country_xpath = '//h4[text()="Country:"]/following-sibling::a/text()' language_xpath = '//h4[text()="Language:"]/following-sibling::a/text()' writer_xpath = '//h4[re:test(text(), "(Writer)")]/following-sibling::a[re:test(@href, "name")]/text()' time_xpath = '//h4[text()="Runtime:"]/following-sibling::time/text()' f['url'] = response.url f['title'] = [x.strip() for x in response.xpath(title_xpath).getall()] f['genres'] = response.xpath(genres_xpath).getall() f['when'] = [x.strip() for x in response.xpath(when_xpath).getall()] f['director'] = response.xpath(director_xpath).getall() f['stars'] = response.xpath(stars_xpath).getall() f['country'] = response.xpath(country_xpath).getall() f['language'] = response.xpath(language_xpath).getall() f['writer'] = response.xpath(writer_xpath).getall() f['time'] = response.xpath(time_xpath).getall() yield f # after scraping page of "coming soon" movie go to the first movie from "more like this" section # get link to movie from "more like this" section next_page = response.xpath('//div[re:test(@data-tconst, "tt")]/div/a/@href').extract_first() if next_page: next_page = response.urljoin(next_page) # go to the next page and call parse function to get all items from page yield scrapy.Request(url=next_page, callback = self.parse)
3,489
1,144
from fastapi import FastAPI from vc import vc import json from fastapi.openapi.utils import get_openapi from fastapi.openapi.docs import ( get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html, ) with open('config.json') as jf: d = json.load(jf) vh = d['vertica']['host'] vpo = d['vertica']['port'] vu = d['vertica']['user'] vp = d['vertica']['password'] vd = d['vertica']['database'] class connection(vc): ci = {'host': vh, 'port': vpo, 'user': vu, 'password': vp, 'database': vd, 'read_timeout': 100} def go(self, query): q = f'{query}' self.query(q) r = self.fetchall() self.close() return r def custom(self, query, commit): q = f'{query}' self.query(q) r = self.fetchall() if commit: self.commit() self.close() return r app = FastAPI(title="Monitoring Vertica", docs_url=None, redoc_url=None) @app.get("/docs", include_in_schema=False) async def custom_swagger_ui_html(): return get_swagger_ui_html( openapi_url=app.openapi_url, title=app.title + " - Swagger UI", oauth2_redirect_url=app.swagger_ui_oauth2_redirect_url, swagger_js_url="https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/swagger-ui-bundle.js", swagger_css_url="https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/swagger-ui.css", swagger_favicon_url="https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/favicon-32x32.png", ) @app.get(app.swagger_ui_oauth2_redirect_url, include_in_schema=False) async def swagger_ui_redirect(): return get_swagger_ui_oauth2_redirect_html() @app.get("/redoc", include_in_schema=False) async def redoc_html(): return get_redoc_html( openapi_url=app.openapi_url, title=app.title + " - ReDoc", redoc_js_url="https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js", redoc_favicon_url="https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/favicon-32x32.png", ) @app.get("/", tags=["index"]) def read_root(): return {"Hello": "World"} @app.get("/query/{content}", tags=["query"]) def custom_query(content: str, commit: bool = False): v = connection() try: r = v.custom(content, commit) except Exception as e: return {"error": e} return {"data": r} @app.get("/node/status", tags=["System Health"]) def node_status(): v = connection() try: r = v.go("""SELECT node_name, node_state FROM nodes ORDER BY 1;""") except Exception as e: return {"error": e} return {"data": r} @app.get("/epoch/status", tags=["System Health"]) def epoch_status(): v = connection() try: r = v.go("""SELECT current_epoch, ahm_epoch, last_good_epoch, designed_fault_tolerance, current_fault_tolerance, wos_used_bytes, ros_used_bytes FROM system;""") except Exception as e: return {"error": e} return {"data": r} @app.get("/delete/vector/count", tags=["System Health"]) def gather_the_total_count_of_delete_vectors_for_the_system(): v = connection() try: r = v.go("SELECT COUNT(*) FROM v_monitor.delete_vectors;") except Exception as e: return {"error": e} return {"data": r} @app.get("/delete/vector", tags=["System Health"]) def delete_vector(): v = connection() try: r = v.go("""SELECT node_name, schema_name, projection_name, total_row_count, deleted_row_count, delete_vector_count FROM storage_containers WHERE deleted_row_count > total_row_count*.05::float ORDER BY deleted_row_count desc;""") except Exception as e: return {"error": e} return {"data": r} @app.get("/delete/vector/ros/containers", tags=["System Health"]) def view_the_number_of_ROS_containers_per_projection_per_node(): v = connection() try: r = v.go("""SELECT node_name, projection_schema, projection_name, SUM(ros_count) AS ros_count FROM v_monitor.projection_storage GROUP BY node_name, projection_schema, projection_name ORDER BY ros_count DESC;""") except Exception as e: return {"error": e} return {"data": r} @app.get("/resource/pools", tags=["Resource Usage"]) def resource_pools(): v = connection() try: r = v.go("""SELECT sysdate AS current_time, node_name, pool_name, memory_inuse_kb, general_memory_borrowed_kb, running_query_count FROM resource_pool_status WHERE pool_name IN ('general') ORDER BY 1,2,3;""") except Exception as e: return {"error": e} return {"data": r} @app.get("/query/excessive/{memory}", tags=["Resource Usage"]) def monitor_if_a_query_is_taking_excessive_memory_resource_and_causing_the_cluster_to_slow_down(memory: str): v = connection() try: r = v.go( f"SELECT * FROM resource_acquisitions ORDER BY memory_inuse_kb desc limit {memory};") except Exception as e: return {"error": e} return {"data": r} @app.get("/resource/pools/queue/status", tags=["Resource Usage"]) def resource_pool_queue_status(): v = connection() try: r = v.go("SELECT * FROM v_monitor.resource_queues;") except Exception as e: return {"error": e} return {"data": r} @app.get("/resource/request/rejections", tags=["Resource Usage"]) def resource_request_rejections(): v = connection() try: r = v.go("SELECT * FROM v_monitor.resource_rejections;") except Exception as e: return {"error": e} return {"data": r} @app.get("/resource/bottleneck", tags=["Resource Usage"]) def resource_bottleneck(): v = connection() try: r = v.go( "SELECT * FROM v_monitor.system_resource_usage ORDER BY end_time DESC;") except Exception as e: return {"error": e} return {"data": r} @app.get("/storage/space", tags=["Resource Usage"]) def storage_space_availability(): v = connection() try: r = v.go( "SELECT * FROM v_monitor.storage_usage ORDER BY poll_timestamp DESC;") except Exception as e: return {"error": e} return {"data": r} @app.get("/active/sessions", tags=["Active Sessions"]) def active_sessions(): v = connection() try: r = v.go( "SELECT user_name, session_id, current_statement, statement_start FROM v_monitor.sessions;") except Exception as e: return {"error": e} return {"data": r} @app.get("/active/sessions/close/{session_id}", tags=["Active Sessions"]) def close_the_active_sessions(session_id: str): v = connection() try: r = v.go(f"SELECT close_session ('{session_id}');") except Exception as e: return {"error": e} return {"data": r} @app.get("/running/queries", tags=["Active Queries"]) def get_a_list_of_queries_executing_at_the_moment(): v = connection() try: r = v.go("""SELECT node_name, query, query_start, user_name, is_executing FROM v_monitor.query_profiles WHERE is_executing = 't';""") except Exception as e: return {"error": e} return {"data": r} @app.get("/load/status", tags=["Active Queries"]) def check_the_loading_progress_of_active_and_historical_queries(): v = connection() try: r = v.go("""SELECT table_name, read_bytes, input_file_size_bytes, accepted_row_count, rejected_row_count, parse_complete_percent, sort_complete_percent FROM load_streams WHERE is_executing = 't' ORDER BY table_name;""") except Exception as e: return {"error": e} return {"data": r} @app.get("/lock/status", tags=["Active Queries"]) def a_query_with_no_results_indicates_that_no_locks_are_in_use(): v = connection() try: r = v.go("""SELECT locks.lock_mode, locks.lock_scope, substr(locks.transaction_description, 1, 100) AS "left", locks.request_timestamp, locks.grant_timestamp FROM v_monitor.locks;""") except Exception as e: return {"error": e} return {"data": r} @app.get("/recovery/status", tags=["Recovery"]) def node_recovery_status(): v = connection() try: r = v.go("""SELECT node_name, recover_epoch, recovery_phase, current_completed, current_total, is_running FROM v_monitor.recovery_status ORDER BY 1;""") except Exception as e: return {"error": e} return {"data": r} @app.get("/rebalance/status", tags=["Rebalance"]) def rebalance_status(): v = connection() try: r = v.go("SELECT GET_NODE_DEPENDENCIES();") except Exception as e: return {"error": e} return {"data": r} @app.get("/overall/progress/rebalance/operation", tags=["Rebalance"]) def progress_of_each_currently_executing_rebalance_operation(): v = connection() try: r = v.go("""SELECT rebalance_method Rebalance_method, Status, COUNT(*) AS Count FROM ( SELECT rebalance_method, CASE WHEN (separated_percent = 100 AND transferred_percent = 100) THEN 'Completed' WHEN ( separated_percent <> 0 and separated_percent <> 100) OR (transferred_percent <> 0 AND transferred_percent <> 100) THEN 'In Progress' ELSE 'Queued' END AS Status FROM v_monitor.rebalance_projection_status WHERE is_latest) AS tab GROUP BY 1, 2 ORDER BY 1, 2;""") except Exception as e: return {"error": e} return {"data": r} @app.get("/execution/time/{limit}", tags=["Historical Activities"]) def queries_based_on_execution_time(limit: int): v = connection() try: r = v.go(f"""SELECT user_name, start_timestamp, request_duration_ms, transaction_id, statement_id, substr(request, 0, 1000) as request FROM v_monitor.query_requests WHERE transaction_id > 0 ORDER BY request_duration_ms DESC limit {limit};""") except Exception as e: return {"error": e} return {"data": r} @app.get("/memory/usage", tags=["Historical Activities"]) def memory_usage_for_a_particular_query(): v = connection() try: r = v.go("""SELECT node_name, transaction_id, statement_id, user_name, start_timestamp, request_duration_ms, memory_acquired_mb, substr(request, 1, 100) AS request FROM v_monitor.query_requests WHERE transaction_id = transaction_id AND statement_id = statement_id;""") except Exception as e: return {"error": e} return {"data": r} @app.get("/partitions", tags=["Object Statistics"]) def view_the_partition_count_per_node_per_projection(): v = connection() try: r = v.go("""SELECT node_name, projection_name, count(partition_key) FROM v_monitor.partitions GROUP BY node_name, projection_name ORDER BY node_name, projection_name;""") except Exception as e: return {"error": e} return {"data": r} @app.get("/segmentation/data/skew", tags=["Object Statistics"]) def view_the_row_count_per_segmented_projection_per_node(): v = connection() try: r = v.go("""SELECT ps.node_name, ps.projection_schema, ps.projection_name, ps.row_count FROM v_monitor.projection_storage ps INNER JOIN v_catalog.projections p ON ps.projection_schema = p.projection_schema AND ps.projection_name = p.projection_name WHERE p.is_segmented ORDER BY ps.projection_schema, ps.projection_name, ps.node_name;""") except Exception as e: return {"error": e} return {"data": r} @app.get("/load/streams", tags=["Performance"]) def view_the_performance_of_load_streams(): v = connection() try: r = v.go("""SELECT schema_name, table_name, load_start, load_duration_ms, is_executing, parse_complete_percent, sort_complete_percent, accepted_row_count, rejected_row_count FROM v_monitor.load_streams;""") except Exception as e: return {"error": e} return {"data": r} def custom_openapi(openapi_prefix: str): if app.openapi_schema: return app.openapi_schema openapi_schema = get_openapi( title="Monitoring Vertica", version="0.0.1", description="Vertica api <br><br> Project launched for test the <a href='https://fastapi.tiangolo.com/' target='_blank'>FastAPI</a> <br><br> Based on: <a href='https://www.vertica.com/kb/Best-Practices-for-Monitoring-Vertica/Content/BestPractices/BestPracticesforMonitoringVertica.htm' target='_blank'>Best Practices for Monitoring Vertica</a>", routes=app.routes, openapi_prefix=openapi_prefix, ) openapi_schema["info"]["x-logo"] = { "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/0/03/Vertica_pos_blk_rgb.svg/300px-Vertica_pos_blk_rgb.svg.png" } app.openapi_schema = openapi_schema return app.openapi_schema app.openapi = custom_openapi
14,742
4,585
def add(x=input("Please enter the first number"),y=input("Please enter the second number"),z=input("Please enter the third number")): print x,'+',y,'+',z return x+y+z def main(): print"main starts..." x=10 y=20 z=30 print "x is base" print add(x,y,z) print add(x,y) print add(x) print "y is base" print add(x,y,z) print add(y,z) print add(y) print "z is base" print add(x,y,z) print add(x,z) print add(z) print"entered values" print add() main()
489
214
# coding: utf-8 from __future__ import division, print_function, unicode_literals from abc import ABCMeta class Singleton(ABCMeta): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class cached_property(object): def __init__(self, func): self.func = func def __get__(self, instance, cls=None): result = instance.__dict__[self.func.__name__] = self.func(instance) return result def is_digit(value): try: float(value) return True except (ValueError, TypeError): return False
713
224
from sqlalchemy import Column, Integer, String, Boolean from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship, sessionmaker from sqlalchemy import create_engine Base = declarative_base() class Knowledge(Base): __tablename__='know' IDnumber=Column(Integer,primary_key=True) name=Column(String) topic=Column(String) rating=Column(Integer) def __repr__(self): return("\n If you want to learn about {}, " "you should look at the Wikipedia article called {}. " "We gave this article a rating of {} out of 10! ").format( self.topic, self.name, self.rating) site1= Knowledge(IDnumber=1,name="sport",topic="swimming",rating=8) # print(site1.__repr__())
715
247
from serial_j import SerialJ class StrIPv4Data(SerialJ): schema = [ {'name': 'prop1', 'type': (str, 'ipv4')} ] valid_data = StrIPv4Data({'prop1': '172.16.255.1'}) print(valid_data) # >>> {"prop1": "172.16.255.1"} invalid_data = StrIPv4Data({'prop1': '172.16.256.1'}) # >>> ValueError: Property: 'prop1' with Value: '172.16.256.1' does not confirm with Type: (<class 'str'>, 'ipv4').
404
189
from __future__ import print_function import pickle import os.path import sys import hashlib from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from apiclient import errors from googleapiclient.http import MediaIoBaseDownload, MediaFileUpload # If modifying these scopes, delete the file token.pickle. SCOPES = ['https://www.googleapis.com/auth/drive'] TRANSLATION_FOLDER_ID = '1Q8BO4CB6tGk-hpYsPOq_Tc6FVPYqP5JA' #TRANSLATION_FOLDER_ID = '1W7Yxvl3WRzZ1fDbuim8EPediY0qrxWBe' def get_files(service, folderId, files, filter_folder_name): page_token = None while True: try: param = {} if page_token: param['pageToken'] = page_token children = service.files().list( fields='files(id, name, mimeType, md5Checksum)', q=f"'{folderId}' in parents and trashed = false", **param).execute() for child in children['files']: mimeType = child['mimeType'] if mimeType == 'application/vnd.google-apps.folder': sub_folder_name = child['name'] print(f"searching {sub_folder_name}") if filter_folder_name and sub_folder_name != filter_folder_name: continue files[sub_folder_name] = {} get_files(service, child['id'], files[sub_folder_name], None) # xlsx elif mimeType == 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': present_files = files.get('.', []) present_files.append(child) files['.'] = present_files elif mimeType == 'text/plain': pass else: print(f"unexpected mimeType {mimeType} found, {child['name']}", file=sys.stderr) page_token = children.get('nextPageToken') if not page_token: break except errors.HttpError as error: print(f'An error occured: {error}') break def get_creds(): creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) return creds def download_folder(drive_service, tree, folder_path): downloaders = [] for folder_name, contents in tree.items(): parent_folder = os.path.normpath(os.path.join(folder_path, folder_name)) if folder_name != '.': downloaders.extend(download_folder(drive_service, contents, parent_folder)) continue for file in contents: if not os.path.exists(parent_folder): os.mkdir(parent_folder) local_file_path = os.path.join(parent_folder, file['name']) if os.path.exists(local_file_path): with open(local_file_path, 'rb') as local_file_fd: local_md5 = hashlib.md5(local_file_fd.read()).hexdigest() if local_md5 == file['md5Checksum']: continue print(f"Downloading {file['name']} at {local_file_path}") request = drive_service.files().get_media(fileId=file['id']) fd = open(local_file_path, 'wb') downloaders.append((MediaIoBaseDownload(fd, request), fd)) return downloaders def upload_folder(drive_service, tree, folder_path): for folder_name, contents in tree.items(): parent_folder = os.path.normpath(os.path.join(folder_path, folder_name)) if folder_name != '.': upload_folder(drive_service, contents, parent_folder) continue for file in contents: local_file_path = os.path.join(parent_folder, file['name']) if not os.path.exists(local_file_path): print(f"{local_file_path} not exist") continue with open(local_file_path, 'rb') as local_file_fd: local_md5 = hashlib.md5(local_file_fd.read()).hexdigest() if local_md5 == file['md5Checksum']: continue print(f"Uploading {local_file_path}") file = drive_service.files().update(fileId=file['id'], media_body=MediaFileUpload(local_file_path) ).execute() def download_drive(local_folder, filter_folder_name=None): creds = get_creds() drive_service = build('drive', 'v3', credentials=creds) root = {} get_files(drive_service, TRANSLATION_FOLDER_ID, root, filter_folder_name) downloaders = download_folder(drive_service, root, local_folder) while downloaders: for item in downloaders[:10]: down, fd = item try: status, done = down.next_chunk() except errors.HttpError: print(f"Failed to downloading {fd.name}") raise if done: fd.close() downloaders.remove(item) def upload_drive(local_folder, filter_folder_name=None): creds = get_creds() drive_service = build('drive', 'v3', credentials=creds) root = {} get_files(drive_service, TRANSLATION_FOLDER_ID, root, filter_folder_name) upload_folder(drive_service, root, local_folder) if __name__ == '__main__': if sys.argv[1] == 'download': download_drive(f"{os.path.pardir}{os.path.sep}Drive", sys.argv[2] if len(sys.argv) >= 3 else None) elif sys.argv[1] == 'upload': upload_drive(f"{os.path.pardir}{os.path.sep}Drive", sys.argv[2] if len(sys.argv) >= 3 else None)
6,465
1,904