text
string
size
int64
token_count
int64
""" Contains data ingest related functions """ import re import os.path from dateutil.parser import parse as dateparser import typing from typing import Dict import cmr from hatfieldcmr.ingest.file_type import MODISBlobType MODIS_NAME = "modis-terra" TITLE_PATTERN_STRING = r"\w+:([\w]+\.[\w]+):\w+" TITLE_PATTERN = re.compile(TITLE_PATTERN_STRING) GRANULE_TITLE_KEY = 'title' GRANULE_TIME_KEY = 'time_start' GRANULE_NAME_KEY = 'producer_granule_id' def format_object_name(meta: Dict, object_name: str) -> str: """ Parameters ---------- metas: Dict Single Granule metadata JSON response from CMR object_name: str Name of object (ex. hdf file, xml file) Returns ---------- str Object name for granule. If insufficient information is available, empty string is returned. """ default_value = "" if meta is None: return default_value folder_prefix = "" try: folder_prefix = format_object_prefix(meta) except ValueError: return '' os.makedirs(folder_prefix, exist_ok=True) return f"{folder_prefix}/{object_name}" def format_object_prefix(meta: Dict): """Helper function to generate 'folder prefix' of the bucket object """ if not ((GRANULE_TITLE_KEY in meta) and (GRANULE_TIME_KEY in meta) and (GRANULE_NAME_KEY in meta)): raise ValueError('granule does not have required keys', meta) title = meta.get(GRANULE_TITLE_KEY, "") m = TITLE_PATTERN.match(title) if m is None: raise ValueError('granule does not have well formated title', title) product_name = m.groups()[0] date_string = dateparser(meta.get("time_start")).strftime('%Y.%m.%d') folder_prefix = format_object_prefix_helper(product_name, date_string) # f"{MODIS_NAME}/{product_name}/{date_string}" return folder_prefix def format_object_prefix_helper(product_name: str, date_string: str): return f"{MODIS_NAME}/{product_name}/{date_string}" class BlobPathMetadata: def __init__(self, product_name: str, date_string: str): self.product_name = product_name self.product_name_without_version = product_name[:7].lower() self.date_string = date_string self.date = dateparser(date_string) @staticmethod def parse(prefix_or_full_name: str): parts = prefix_or_full_name.split(r'/') if (len(parts) >= 3): product_name = parts[1] date_string = parts[2] return BlobPathMetadata(product_name, date_string) return None class MODISFileNameParser: THUMBNAIL_RE = re.compile(r"BROWSE\.([\w\.]+)\.\d+\.jpg") @classmethod def identify_file_type(cls, name: str): basename = os.path.basename(name) if ('BROWSE' in basename): return MODISBlobType.THUMBNAIL elif ('.hdf.xml' in basename): return MODISBlobType.METADATA_XML elif ('.hdf_meta.json' in basename): return MODISBlobType.METADATA_JSON elif ('.hdf' in basename): return MODISBlobType.DATA_HDF elif ('.tif.aux.xml' in basename): return MODISBlobType.GEOTIFF_XML elif ('.tif' in basename): return MODISBlobType.GEOTIFF else: print(f'unknown file name {name}') return '' @classmethod def extract_blob_id(cls, name: str, file_type: MODISBlobType = None): if file_type is None: file_type = cls.identify_file_type(name) if file_type == MODISBlobType.THUMBNAIL: return cls._extract_blob_id_thumbnail(name) elif file_type == MODISBlobType.METADATA_XML: return cls._extract_basename_from_file(name, '.hdf.xml') elif file_type == MODISBlobType.METADATA_JSON: return cls._extract_basename_from_file(name, '.hdf_meta.json') elif file_type == MODISBlobType.DATA_HDF: return cls._extract_basename_from_file(name, '.hdf') elif file_type == MODISBlobType.GEOTIFF: return cls._extract_basename_from_file(name, '.tif') elif file_type == MODISBlobType.GEOTIFF_XML: return cls._extract_basename_from_file(name, '.tif.aux.xml') return '' @classmethod def _extract_blob_id_thumbnail(cls, name: str) -> str: basename = os.path.basename(name) m = cls.THUMBNAIL_RE.match(basename) if m is None: return '' blob_id = m.groups()[0] name_includes_dir = len(name.split(r'/')) >= 4 if (name_includes_dir): product_name_doesnt_match_blob_prefix = cls._check_thumbnail_product_inconsistency( name, blob_id) if (product_name_doesnt_match_blob_prefix): blob_id = cls._fix_thumbnail_product_name_inconsistency( name, blob_id) return blob_id @classmethod def _check_thumbnail_product_inconsistency(cls, name: str, blob_id: str): full_name_product_name, blob_id_product_name = cls._extract_product_names( name, blob_id) return full_name_product_name != blob_id_product_name @classmethod def _fix_thumbnail_product_name_inconsistency(cls, name: str, blob_id: str): full_name_product_name, blob_id_product_name = cls._extract_product_names( name, blob_id) return blob_id.replace(blob_id_product_name, full_name_product_name) @classmethod def _extract_product_names(cls, name: str, blob_id: str): product_name_with_version = name.split(r'/')[1] full_name_product_name = product_name_with_version[:7] blob_id_product_name = blob_id[:7] return full_name_product_name, blob_id_product_name @classmethod def _extract_basename_from_file(cls, name: str, extension: str) -> str: basename = os.path.basename(name).strip() extension_len = len(extension) if (len(basename) > extension_len and basename[-extension_len:] == extension): return basename[:-extension_len] return ''
6,152
2,042
import twitter import datetime import feedparser import re import string from django.core.management.base import BaseCommand from optparse import make_option from twittersmash.models import Feed, TwitterAccount, Message import pytz from pytz import timezone central = timezone('US/Central') utc = pytz.utc # Parses the "Tweet Format" in Twitter RSS feeds twit_re = re.compile(r'^(?P<username>\S+): (?P<message>.*)$') # Parses out hashtags tag_pat = r'\#([A-Za-z0-9]+)' tag_re = re.compile(tag_pat) class Command(BaseCommand): help = "Loops through feeds and determines if messages need to be sent to any twitter accounts" option_list = BaseCommand.option_list + ( make_option('--dryrun', '-D', action='store_true', dest='dryrun', default=False, help='Go through the motions but commit nothing to Twitter'), make_option('--quiet', '-q', action='store_true', dest='quiet', default=False, help='Don\t print anything to console'), make_option('--debug', '-d', action='store_true', dest='debug', default=False, help='Return debugging information'), ) def handle(self, *args, **options): # Get list of TwitterAccounts quiet = options.get('quiet') entries_pulled = 0 accounts_skipped = 0 accounts_ready = 0 entries_tweeted = 0 feeds_pulled = 0 messages_added = 0 feeds_checked = 0 messages_sent = [] accounts = TwitterAccount.objects.all().filter(active=True) for account in accounts: api = twitter.Api(username=account.username, password=account.password) if not quiet: print "Checking %s" % (account,) feed_list = account.feeds.all() for f in feed_list: feeds_checked += 1 if not quiet: print " - %s" % (f,) # Get list of feeds whose last_update + polling_rate is less than now if f.last_checked == None or f.last_checked + \ datetime.timedelta(minutes=f.polling_rate) < datetime.datetime.now(): accounts_ready += 1 # Update timestamp f.last_checked = datetime.datetime.now() f.save() if not quiet: print " * Pulling feed" # Pull each feed d = feedparser.parse(f.url) feeds_pulled += 1 # Loop through feed d.entries.reverse() for entry in d['entries']: entries_pulled += 1 guid = entry.id tweeted = entry.updated_parsed message = entry.title # TODO: Should probably consider moving # to dateutil here tweeted_dt = datetime.datetime( tweeted[0], tweeted[1], tweeted[2], tweeted[3], tweeted[4], tweeted[5], tzinfo=None ) tweeted_dt_cst = central.localize(tweeted_dt) tweeted_dt_utc = tweeted_dt_cst.astimezone(utc) tweeted_dt = datetime.datetime( tweeted_dt_utc.utctimetuple()[0], tweeted_dt_utc.utctimetuple()[1], tweeted_dt_utc.utctimetuple()[2], tweeted_dt_utc.utctimetuple()[3], tweeted_dt_utc.utctimetuple()[4], tweeted_dt_utc.utctimetuple()[5], ) msg, created = Message.objects.get_or_create( guid=guid, twitter_account=account, defaults={ 'feed': f, 'tweeted': tweeted_dt, 'message': message, 'twitter_account': account, }) send_to_twitter = False if created: messages_added += 1 send_to_twitter, message = self.process_messages( account=account, source_feed = f, message=message, created=tweeted_dt_utc, options=options ) if send_to_twitter: try: if not options.get('dryrun'): status = api.PostUpdate(message[:139]) if not quiet: print " * Sent to Twitter: '%s' (%s)" % (message, keyword,) else: if not quiet: print " * Dry run: '%s' (%s)" % (message, keyword,) entries_tweeted += 1 msg.sent_to_twitter = True msg.save() except e: if not quiet: print " - Failed to send to twitter (%s)" % (e,) else: if not quiet: print " * Checked within the last %s minutes" % (f.polling_rate) accounts_skipped += 1 if options.get('debug'): return { 'entries_pulled': entries_pulled, 'accounts_skipped': accounts_skipped, 'accounts_ready': accounts_ready, 'entries_tweeted': entries_tweeted, 'feeds_pulled': feeds_pulled, 'messages_added': messages_added, 'feeds_checked': feeds_checked, } def process_messages(self, account, source_feed, message, created, options): """ This method determines whether or not a message should be sent to Twitter. If needed, filters and munging are applied as well. `account` - A Twitter account instance `message` - The text of a single tweet `created` - The date/time at which a Tweet was Tweeted `options` - A dict of options, the only values used here are 'quiet' to suppress output. """ send_to_twitter = False quiet = options.get('quiet') reply_re = re.compile(r'\@%s' % account.username) # Prepare keywords keywords = account.philter.lower().strip().split(',') keywords = map(string.strip, keywords) if keywords == ['']: keywords = [] # Prep minimum DT if account.minimum_datetime: # Stored value here is UTC min_dt = utc.localize(account.minimum_datetime) else: min_dt = None # Wasn't already in the db if min_dt and created <= min_dt: if not quiet: print " * Skipped because of time restrictions" else: # Remove userames if needed if twit_re.search(message) and not account.prepend_names: message = twit_re.search(message).groups()[1] if account.prepend_names: message = "@" + message # Check to see if this message contains any of the keywords if keywords: for keyword in keywords: if keyword in message.lower(): send_to_twitter = True break else: send_to_twitter = False # Check to see if the message was directed at this account if account.philter_replies: if reply_re.search(message): send_to_twitter = True message = reply_re.sub('', message).strip() if account.strip_tags: if not quiet: print " * Removing tags" message = tag_re.sub('', message) if account.append_tags: m = re.findall(tag_pat, message) if m: # remove each hashtag for match in m: message = tag_re.sub('', message) # clean up whitespace message = message.strip() # append each tag to message for match in m: message += " #%s" % (match,) if account.append_initials and source_feed.initials: message += " ^%s" % source_feed.initials # Clean up whitespace message = message.strip() # Remove double spaces left from replacements message = message.replace(' ', ' ') return send_to_twitter, message
9,520
2,376
import os import unittest from snowflet.lib import read_sql from snowflet.lib import logging_config from snowflet.lib import extract_args from snowflet.lib import apply_kwargs from snowflet.lib import strip_table from snowflet.lib import extract_tables_from_query from snowflet.lib import add_database_id_prefix from snowflet.lib import is_table from snowflet.lib import add_table_prefix_to_sql class StringFunctions(unittest.TestCase): """ Test """ def test_strip_table(self): """ Test """ self.assertEqual( strip_table(table_name='"db"."schema"."table"'), '"db.schema.table"', "strip_table: wrong table name" ) def test_extract_tables_from_query(self): """ Test """ self.assertEqual( extract_tables_from_query(sql_query=""" select a,b,c from "db"."schema"."table" and db.schema.table not "schema"."table" """), [ '"db"."schema"."table"', 'db.schema.table' ], "does not extract the tables properly" ) class TableFunctions(unittest.TestCase): """ Test """ def test_is_table(self): self.assertTrue( is_table( word='"db"."test"."table1"' ,sql=""" select a.* from "db"."test"."table1" a left join db.test.table2 b on a.id=b.id left join db."test".table3 c on b.id = c.id """), "select: ok" ) self.assertTrue( is_table( word='"db"."test"."table4"' ,sql=""" create table "db"."test"."table4" as select a.* from "db"."test"."table1" a left join db.test.table2 b on a.id=b.id left join db."test".table3 c on b.id = c.id """), "create - select: ok" ) def test_add_table_prefix_to_sql(self): self.assertEqual( add_table_prefix_to_sql( sql=""" select a.* from "db1"."test"."table1" a left join db2.test.table2 b on a.id=b.id left join db3."test".table3 c on b.id = c.id """, prefix="CLONE_1003" ), """ select a.* from "CLONE_1003_DB1"."TEST"."TABLE1" a left join "CLONE_1003_DB2".TEST.TABLE2 b on a.id=b.id left join "CLONE_1003_DB3"."TEST".TABLE3 c on b.id = c.id """, "add_table_prefix_to_sql: ok" ) # def test_extract_tables(self): # self.assertEqual( # extract_tables(""" select a.* from "db"."test"."table1" and db.test.table2 and db."test".table3 """), # ["db.test.table1", "db.test.table2", "db.test.table3"], # "multiple tables, mix double quotes and not" # ) # self.assertEqual( # extract_tables(""" select a.* from "db"."test"."table1" and db.test.table2 and db."test".table1 """), # ["db.test.table1", "db.test.table2"], # "returned unique values" # ) class ReadSql(unittest.TestCase): """ Test """ def test_class_read_sql_file(self): """ Test """ sql = read_sql( file="tests/sql/read_sql.sql", param1="type", param2="300", param3="shipped_date", param4='trying' ) # self.assertEqual( # sql, # 'select type, shipped_date from "DB_TEST"."SCHEMA_TEST"."TABLE1" where amount > 300', # "read_sql unit test" # ) sql = read_sql( file="tests/sql/read_sql.sql" ) self.assertTrue( sql == 'select {param1}, {param3} from "DB_TEST"."SCHEMA_TEST"."TABLE1" where amount > {param2}', "read_sql file unit test no opt parameters" ) with self.assertRaises(KeyError): read_sql( file="tests/sql/read_sql.sql", database_id='something' ) def test_class_read_sql_query(self): """ Test """ sql = read_sql( query='select {param1}, {param3} from "db_test"."schema_test"."table1" where amount > {param2}', param1="type", param2="300", param3="shipped_date", param4='trying' ) self.assertEqual( sql, 'select type, shipped_date from "DB_TEST"."SCHEMA_TEST"."TABLE1" where amount > 300', "read_sql unit test" ) sql = read_sql( file="tests/sql/read_sql.sql" ) self.assertTrue( sql == 'select {param1}, {param3} from "DB_TEST"."SCHEMA_TEST"."TABLE1" where amount > {param2}', "read_sql query unit test no opt parameters" ) with self.assertRaises(KeyError): read_sql( file="tests/sql/read_sql.sql", database_id='something' ) class FunctionsInLib(unittest.TestCase): """ Unittest class for lib functions """ def test_extract_args_1_param(self): content = [ { "table_desc": "table1", "create_table": { "table_id": "table1", "dataset_id": "test", "file": "tests/sql/table1.sql" }, "pk": ["col1", "col2"], "mock_data": "sql/table1_mocked.sql" }, { "table_desc": "table2", "create_table": { "table_id": "table2", "dataset_id": "test", "file": "tests/sql/table2.sql" }, "pk": ["col1"], "mock_data": "sql/table1_mocked.sql" } ] self.assertEqual( extract_args(content, "pk"), [["col1", "col2"], ["col1"]], "extracted ok" ) self.assertEqual( extract_args(content, "create_table"), [ { "table_id": "table1", "dataset_id": "test", "file": "tests/sql/table1.sql" }, { "table_id": "table2", "dataset_id": "test", "file": "tests/sql/table2.sql" } ], "extracted ok" ) def test_add_database_id_prefix(self): self.yaml = { "desc": "test", "tables": [ { "table_desc": "table1", "create_table": { "table_id": "table1", "database_id": "test", }, }, { "table_desc": "table2", "create_table": { "table_id": "table2", "database_id": "test", }, } ] } add_database_id_prefix( self.yaml, prefix='1234' ) self.assertEqual( self.yaml , { "desc": "test", "tables": [ { "table_desc": "table1", "create_table": { "table_id": "table1", "database_id": "1234_test", }, }, { "table_desc": "table2", "create_table": { "table_id": "table2", "database_id": "1234_test", }, } ] }, "prefix properly added to database" ) if __name__ == "__main__": logging_config() unittest.main()
8,661
2,404
# -*- coding: utf-8 -*- """ Microsoft-Windows-AssignedAccess GUID : 8530db6e-51c0-43d6-9d02-a8c2088526cd """ from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct from etl.utils import WString, CString, SystemTime, Guid from etl.dtyp import Sid from etl.parsers.etw.core import Etw, declare, guid @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=10001, version=0) class Microsoft_Windows_AssignedAccess_10001_0(Etw): pattern = Struct( "SID" / WString ) @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=10002, version=0) class Microsoft_Windows_AssignedAccess_10002_0(Etw): pattern = Struct( "SID" / WString ) @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=10003, version=0) class Microsoft_Windows_AssignedAccess_10003_0(Etw): pattern = Struct( "SID" / WString ) @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=10004, version=0) class Microsoft_Windows_AssignedAccess_10004_0(Etw): pattern = Struct( "AppID" / WString ) @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=10010, version=0) class Microsoft_Windows_AssignedAccess_10010_0(Etw): pattern = Struct( "ErrorCode" / Int32ul ) @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=10020, version=0) class Microsoft_Windows_AssignedAccess_10020_0(Etw): pattern = Struct( "ErrorCode" / Int32ul ) @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=20000, version=0) class Microsoft_Windows_AssignedAccess_20000_0(Etw): pattern = Struct( "SID" / WString, "UserName" / WString, "AppID" / WString, "AppName" / WString ) @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=30000, version=0) class Microsoft_Windows_AssignedAccess_30000_0(Etw): pattern = Struct( "File" / CString, "LineNumber" / Int32ul, "ErrorCode" / Int32ul, "ErrorCodeExpanded" / Int32sl ) @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=31000, version=0) class Microsoft_Windows_AssignedAccess_31000_0(Etw): pattern = Struct( "ErrorCode" / Int32sl ) @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=31001, version=0) class Microsoft_Windows_AssignedAccess_31001_0(Etw): pattern = Struct( "ErrorCode" / Int32sl ) @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=31002, version=0) class Microsoft_Windows_AssignedAccess_31002_0(Etw): pattern = Struct( "Custom" / WString, "ErrorCode" / Int32ul ) @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=32000, version=0) class Microsoft_Windows_AssignedAccess_32000_0(Etw): pattern = Struct( "Custom" / WString, "ErrorCode" / Int32ul ) @declare(guid=guid("8530db6e-51c0-43d6-9d02-a8c2088526cd"), event_id=33000, version=0) class Microsoft_Windows_AssignedAccess_33000_0(Etw): pattern = Struct( "Custom" / WString, "ErrorCode" / Int32ul )
3,207
1,587
from capture_image import CaptureImage if __name__ == '__main__': """ This can be directly used from CLI e.g.: source /home/pi/.smartcambuddy_venv/bin/activate python smarcambuddy/take_a_photo.py """ CaptureImage.trigger()
249
88
from oyster.conf import settings CELERY_IMPORTS = ['oyster.tasks'] + list(settings.CELERY_TASK_MODULES)
105
45
#!/usr/bin/env python # -*- coding: utf-8 -*- import sonnet as snt import tensorflow as tf from .drop_mask import make_drop_mask1 from .promotion_mask import make_promotion_mask from ..boolean_board.black import select_black_fu_board, select_non_black_board from ..boolean_board.empty import select_empty_board from ..direction import Direction from ..piece import Piece __author__ = 'Yasuhiro' __date__ = '2018/2/22' class BlackFuFileLayer(snt.AbstractModule): def __init__(self, data_format, name='black_fu_file'): super().__init__(name=name) self.data_format = data_format def _build(self, board): fu_board = select_black_fu_board(board=board) axis = -1 if self.data_format == 'NCHW' else -2 flag = tf.reduce_any(fu_board, axis=axis, keep_dims=True) flag = tf.logical_not(flag) repeat_count = [1, 1, 1, 1] repeat_count[axis] = 9 available_map = tf.tile(flag, repeat_count) return available_map class BlackFuDropLayer(snt.AbstractModule): def __init__(self, data_format, name='black_fu_drop'): super().__init__(name=name) self.data_format = data_format def _build(self, board, black_hand, available_square): fu_available_file = BlackFuFileLayer( data_format=self.data_format )(board) fu_available_area = make_drop_mask1(data_format=self.data_format) empty_square = select_empty_board(board=board) available = tf.logical_and( # FUを置ける筋、2~9段 tf.logical_and(fu_available_file, fu_available_area), tf.logical_and( # 空いているマス empty_square, # 持ち駒があるかどうか tf.reshape( tf.greater_equal(black_hand[:, Piece.BLACK_FU], 1), [-1, 1, 1, 1] ) ) ) # 王手の時に有効かどうか available = tf.logical_and(available, available_square) return available class BlackFuMoveLayer(snt.AbstractModule): def __init__(self, data_format, name='black_fu_move'): super().__init__(name=name) self.data_format = data_format def _build(self, board, fu_effect): non_black_mask = select_non_black_board(board=board) movable_effect = tf.logical_and(fu_effect[Direction.UP], non_black_mask) available_mask = make_drop_mask1(data_format=self.data_format) non_promoting_effect = { Direction.UP: tf.logical_and(movable_effect, available_mask) } promotion_mask = make_promotion_mask( direction=Direction.UP, data_format=self.data_format, step_size=1 ) promoting_effect = { Direction.UP: tf.logical_and(movable_effect, promotion_mask) } return non_promoting_effect, promoting_effect
2,891
958
from connect.devops_testing.bdd.fixtures import use_connect_request_dispatcher, use_connect_request_builder from connect.devops_testing.request import Builder, Dispatcher def test_should_successfully_initialize_request_builder_in_behave_context(behave_context): use_connect_request_builder(behave_context) assert isinstance(behave_context.builder, Builder) def test_should_successfully_initialize_request_dispatcher_in_behave_context(behave_context): use_connect_request_dispatcher(behave_context, use_specs=False) assert isinstance(behave_context.connect, Dispatcher) assert behave_context.request == {}
631
192
from django.apps import AppConfig class KwueConfig(AppConfig): name = 'kwue'
83
29
import sublime, sublime_plugin import os, traceback from ...libs import util from ...libs import FlowCLI class JavascriptEnhancementsRefactorConvertToArrowFunctionCommand(sublime_plugin.TextCommand): def run(self, edit, **args): view = self.view selection = view.sel()[0] flow_cli = FlowCLI(view) result = flow_cli.ast() if result[0]: body = result[1]["body"] items = util.nested_lookup("type", ["FunctionExpression"], body) for item in items: region = sublime.Region(int(item["range"][0]), int(item["range"][1])) if region.contains(selection): text = view.substr(region) if not text.startswith("function"): return index_begin_parameter = 8 text = text[index_begin_parameter:].lstrip() while text[0] != "(" and len(text) > 0: text = text[1:].lstrip() block_statement_region = sublime.Region(int(item["body"]["range"][0]), int(item["body"]["range"][1])) block_statement = view.substr(block_statement_region) index = text.index(block_statement) while text[index - 1] == " " and index - 1 >= 0: text = text[0:index - 1] + text[index:] index = index - 1 text = text[0:index] + " => " + text[index:] view.replace(edit, region, text) break else: sublime.error_message("Cannot convert the function. Some problems occured.") def is_enabled(self, **args) : view = self.view if not util.selection_in_js_scope(view) : return False selection = view.sel()[0] scope = view.scope_name(selection.begin()).strip() if "meta.block.js" in scope: region_scope = util.get_region_scope_last_match(view, scope, selection, "meta.block.js") else: region_scope = util.get_region_scope_last_match(view, scope, selection, "meta.group.braces.curly.js") if not region_scope: return False return True def is_visible(self, **args) : view = self.view if not util.selection_in_js_scope(view) : return False selection = view.sel()[0] scope = view.scope_name(selection.begin()).strip() if "meta.block.js" in scope: region_scope = util.get_region_scope_last_match(view, scope, selection, "meta.block.js") else: region_scope = util.get_region_scope_last_match(view, scope, selection, "meta.group.braces.curly.js") if not region_scope: return False return True
2,492
792
import os import sys import base64 import fnmatch from kconfiglib import Kconfig, expr_value, Symbol, Choice, MENU, COMMENT, BOOL, STRING, INT, HEX from java.awt import BorderLayout, Dimension, FlowLayout from java.awt.event import ActionListener, MouseEvent from javax.swing import BorderFactory, BoxLayout, ImageIcon, JButton, JCheckBox, JFileChooser, JFrame, JLabel, JPanel, JRadioButton, JScrollPane, JSplitPane, JTextArea, JTextField, JTree from javax.swing.event import ChangeEvent, DocumentListener, TreeExpansionListener, TreeSelectionListener, CellEditorListener from javax.swing.tree import DefaultTreeModel, DefaultMutableTreeNode, DefaultTreeCellRenderer, TreeCellEditor, TreePath from events import addActionListener # For icons in code from org.python.core.util import StringUtil if 'knodeinfo' in sys.modules: del sys.modules["knodeinfo"] from knodeinfo import getNodeInfoString, getNodeName, setKConfig class PrintLogger(): def info(self, log_string): print(log_string) log = PrintLogger() # If True, use GIF image data embedded in this file instead of separate GIF # files. See _load_images(). _USE_EMBEDDED_IMAGES = True def _load_images(): # Loads GIF images, creating the global _*_img ImageIcon variables. # Base64-encoded images embedded in this script are used if # _USE_EMBEDDED_IMAGES is True, and separate image files in the same # directory as the script otherwise. # # Using a global variable indirectly prevents the image from being # garbage-collected. Passing an image to a Tkinter function isn't enough to # keep it alive. def load_image(name, data): var_name = "_{}_img".format(name) if _USE_EMBEDDED_IMAGES: globals()[var_name] = ImageIcon(StringUtil.toBytes(base64.b64decode(data))) else: globals()[var_name] = ImageIcon( file=os.path.join(os.path.dirname(__file__), name + ".gif")) # Note: Base64 data can be put on the clipboard with # $ base64 -w0 foo.gif | xclip load_image("icon", "R0lGODlhMAAwAPEDAAAAAADQAO7u7v///yH5BAUKAAMALAAAAAAwADAAAAL/nI+gy+2Pokyv2jazuZxryQjiSJZmyXxHeLbumH6sEATvW8OLNtf5bfLZRLFITzgEipDJ4mYxYv6A0ubuqYhWk66tVTE4enHer7jcKvt0LLUw6P45lvEprT6c0+v7OBuqhYdHohcoqIbSAHc4ljhDwrh1UlgSydRCWWlp5wiYZvmSuSh4IzrqV6p4cwhkCsmY+nhK6uJ6t1mrOhuJqfu6+WYiCiwl7HtLjNSZZZis/MeM7NY3TaRKS40ooDeoiVqIultsrav92bi9c3a5KkkOsOJZpSS99m4k/0zPng4Gks9JSbB+8DIcoQfnjwpZCHv5W+ip4aQrKrB0uOikYhiMCBw1/uPoQUMBADs=") load_image("n_bool", "R0lGODdhEAAQAPAAAAgICP///ywAAAAAEAAQAAACIISPacHtvp5kcb5qG85hZ2+BkyiRF8BBaEqtrKkqslEAADs=") load_image("y_bool", "R0lGODdhEAAQAPEAAAgICADQAP///wAAACwAAAAAEAAQAAACMoSPacLtvlh4YrIYsst2cV19AvaVF9CUXBNJJoum7ymrsKuCnhiupIWjSSjAFuWhSCIKADs=") load_image("n_tri", "R0lGODlhEAAQAPD/AAEBAf///yH5BAUKAAIALAAAAAAQABAAAAInlI+pBrAKQnCPSUlXvFhznlkfeGwjKZhnJ65h6nrfi6h0st2QXikFADs=") load_image("m_tri", "R0lGODlhEAAQAPEDAAEBAeQMuv///wAAACH5BAUKAAMALAAAAAAQABAAAAI5nI+pBrAWAhPCjYhiAJQCnWmdoElHGVBoiK5M21ofXFpXRIrgiecqxkuNciZIhNOZFRNI24PhfEoLADs=") load_image("y_tri", "R0lGODlhEAAQAPEDAAICAgDQAP///wAAACH5BAUKAAMALAAAAAAQABAAAAI0nI+pBrAYBhDCRRUypfmergmgZ4xjMpmaw2zmxk7cCB+pWiVqp4MzDwn9FhGZ5WFjIZeGAgA7") load_image("m_my", "R0lGODlhEAAQAPEDAAAAAOQMuv///wAAACH5BAUKAAMALAAAAAAQABAAAAI5nIGpxiAPI2ghxFinq/ZygQhc94zgZopmOLYf67anGr+oZdp02emfV5n9MEHN5QhqICETxkABbQ4KADs=") load_image("y_my", "R0lGODlhEAAQAPH/AAAAAADQAAPRA////yH5BAUKAAQALAAAAAAQABAAAAM+SArcrhCMSSuIM9Q8rxxBWIXawIBkmWonupLd565Um9G1PIs59fKmzw8WnAlusBYR2SEIN6DmAmqBLBxYSAIAOw==") load_image("n_locked", "R0lGODlhEAAQAPABAAAAAP///yH5BAUKAAEALAAAAAAQABAAAAIgjB8AyKwN04pu0vMutpqqz4Hih4ydlnUpyl2r23pxUAAAOw==") load_image("m_locked", "R0lGODlhEAAQAPD/AAAAAOQMuiH5BAUKAAIALAAAAAAQABAAAAIylC8AyKwN04ohnGcqqlZmfXDWI26iInZoyiore05walolV39ftxsYHgL9QBBMBGFEFAAAOw==") load_image("y_locked", "R0lGODlhEAAQAPD/AAAAAADQACH5BAUKAAIALAAAAAAQABAAAAIylC8AyKzNgnlCtoDTwvZwrHydIYpQmR3KWq4uK74IOnp0HQPmnD3cOVlUIAgKsShkFAAAOw==") load_image("not_selected", "R0lGODlhEAAQAPD/AAAAAP///yH5BAUKAAIALAAAAAAQABAAAAIrlA2px6IBw2IpWglOvTYhzmUbGD3kNZ5QqrKn2YrqigCxZoMelU6No9gdCgA7") load_image("selected", "R0lGODlhEAAQAPD/AAAAAP///yH5BAUKAAIALAAAAAAQABAAAAIzlA2px6IBw2IpWglOvTah/kTZhimASJomiqonlLov1qptHTsgKSEzh9H8QI0QzNPwmRoFADs=") load_image("edit", "R0lGODlhEAAQAPIFAAAAAKOLAMuuEPvXCvrxvgAAAAAAAAAAACH5BAUKAAUALAAAAAAQABAAAANCWLqw/gqMBp8cszJxcwVC2FEOEIAi5kVBi3IqWZhuCGMyfdpj2e4pnK+WAshmvxeAcETWlsxPkkBtsqBMa8TIBSQAADs=") class NodeType(): """Used to determine what GUI control to use in the visual tree.""" _unknown = 0 _radio = 1 _bool = 2 _tri = 3 _text = 4 _menu = 5 _comment = 6 nodeType = _unknown def __init__(self, t): self.nodeType = t def isType(self, t_list): return self.nodeType in t_list def getType(self): return self.nodeType class TreeNodeData(object): """These are the data objects that goes into the tree data model.""" def __init__ (self, node, tree): """Create a TreeNodeData object Parameters ---------- node : Kconfig.MenuNode The Kconfiglib node object that this tree node visualizes. tree : KConfigTree The tree this node object belongs to. Needed for sending events to the tree. """ self.knode = node self.tree = tree self.expanded = False def getNodeType(self): """Returns the node type""" item = self.knode.item if item == MENU: return NodeType(NodeType._menu) if item == COMMENT: return NodeType(NodeType._comment) if not item.orig_type: return NodeType(NodeType._unknown) if item.orig_type in (STRING, INT, HEX): return NodeType(NodeType._text) # BOOL or TRISTATE if isinstance(item, Symbol) and item.choice: # Choice symbol in y-mode choice return NodeType(NodeType._radio) if len(item.assignable) <= 1: # Pinned to a single value if isinstance(item, Choice): return NodeType(NodeType._menu) if item.type == BOOL: return NodeType(NodeType._bool) if item.assignable == (1, 2): return NodeType(NodeType._tri) return NodeType(NodeType._tri) def getText(self): """Return the text to display on the tree node""" if self.knode and self.knode.prompt: return self.knode.prompt[0] return getNodeName(self.knode).strip() def getValue(self): """Returns a string-type value, used for STRING, INT, HEX node types.""" if self.knode.item == MENU or self.knode.item == COMMENT: return None return self.knode.item.str_value def getTriValue(self): """Returns a boolean or tristate value. A bool checkbox has the values 0 and 2, while a tristate has 0, 1 and 2. 0 = False/N, 1 = Module/M, 2 = True/Y""" if self.knode.item == MENU or self.knode.item == COMMENT: return None # log.info(self.getText(), str(self.knode.item.tri_value))) return self.knode.item.tri_value def setValue(self, val): """Set a string value. Can be a text string, or an integer (or hex) encoded as a string.""" # log.info("TreeNodeData.setValue " + self.getText() + " " + str(val) + " was " + self.getValue()) self.knode.item.set_value(val) self.tree.updateTree() def setTriValue(self, n): """Set a tristate or bool value. 0 = False/N, 1 = Module/M, 2 = True/Y""" # log.info("TreeNodeData.setTriValue", self.getText(), n) self.knode.item.set_value(n) self.tree.updateTree() def getVisible(self): """Return the visibility state of the node.""" return TreeNodeData.isVisible(self.knode) @staticmethod def isVisible(node): """Return the visibility state of the node passed as an argument.""" return node.prompt and expr_value(node.prompt[1]) and not \ (node.item == MENU and not expr_value(node.visibility)) def isExpanded(self): return self.expanded def setExpanded(self, expanded): self.expanded = expanded def search(self, searchString, invisibleMatch): """Search all text related to this node for searchString. If it matches, it will tag the node as a search match. If invisibleMatch = False and the node is not visible, the search match will be False. The search match result (bool) is returned.""" if self.getVisible() > 0 or invisibleMatch: infoText = self.getText() searchString = "*" + searchString + "*" self.searchMatch = fnmatch.fnmatch(infoText.lower(), searchString.lower()) else: self.searchMatch = False return self.searchMatch def setSearchMatch(self, match): """Tags the node with a search match""" self.searchMatch = match def isSearchMatch(self): return self.searchMatch def toString(self): return self.getText() + " = " + str(self.getValue()) class TristateCheckBox(JCheckBox): """Custom tristate checkbox implementation.""" serialVersionUID = 1 triState = 0 _load_images() selected = _y_tri_img unselected = _n_tri_img halfselected = _m_tri_img def __init__(self, eventHandler = None): """Creates a TristateCheckBox object Arguments --------- eventHandler : ActionListener If supplied, the event handler will be called when the tristate checkbox state changes. """ JCheckBox.__init__(self) if eventHandler: addActionListener(self, eventHandler) addActionListener(self, self.actionPerformed) def paint(self, g): """Called when the tree needs to paint the checkbox icon.""" if self.triState == 2: self.setIcon(self.selected) elif self.triState == 1: self.setIcon(self.halfselected) else: self.setIcon(self.unselected) JCheckBox.paint(self, g) def getTriState(self): """Return the tristate value (0, 1 or 2).""" return self.triState def setTriState(self, tri): """Set tristate value (0, 1 or 2).""" self.triState = tri def actionPerformed(self, e): """Increments the checkbox value when clicked""" # log.info("actionPerformed()") tcb = e.getSource() newVal = (tcb.getTriState() + 1) % 3 tcb.setTriState(newVal) class CustomCellRenderer(DefaultTreeCellRenderer): """Renders the various tree controls (checkbox, tristate checkbox, string values etc.)""" def __init__(self): DefaultTreeCellRenderer.__init__(self) flowLayout = FlowLayout(FlowLayout.LEFT, 0, 0) self.cbPanel = JPanel(flowLayout) self.cb = JCheckBox() self.cb.setBackground(None) self.cbPanel.add(self.cb) self.cbLabel = JLabel() self.cbPanel.add(self.cbLabel) self.tcbPanel = JPanel(flowLayout) self.tcb = TristateCheckBox() self.tcb.setBackground(None) self.tcbPanel.add(self.tcb) self.tcbLabel = JLabel() self.tcbPanel.add(self.tcbLabel) self.rbPanel = JPanel(flowLayout) self.rb = JRadioButton() self.rb.setBackground(None) self.rbPanel.add(self.rb) self.rbLabel = JLabel() self.rbPanel.add(self.rbLabel) def getTreeCellRendererComponent(self, tree, value, selected, expanded, leaf, row, hasFocus): """Return a swing control appropriate for the node type of the supplied value""" if isinstance(value, DefaultMutableTreeNode): nodeData = value.getUserObject() if isinstance(nodeData, TreeNodeData): t = nodeData.getNodeType() isEnabled = nodeData.getVisible() > 0 # Boolean checkbox if t.isType([NodeType._bool]): self.cbLabel.setText(nodeData.getText()) self.cb.setEnabled(isEnabled) self.cbLabel.setEnabled(isEnabled) if nodeData.getTriValue() == 0: self.cb.setSelected(False) else: self.cb.setSelected(True) control = self.cbPanel # Tristate chekcbox elif t.isType([NodeType._tri]): control = self.tcbPanel self.tcbLabel.setText(nodeData.getText()) self.tcb.setEnabled(isEnabled) self.tcbLabel.setEnabled(isEnabled) self.tcb.setTriState(nodeData.getTriValue()) # Radio button elif t.isType([NodeType._radio]): self.rbLabel.setText(nodeData.getText()) self.rb.setEnabled(isEnabled) self.rbLabel.setEnabled(isEnabled) if nodeData.getTriValue() == 0: self.rb.setSelected(False) else: self.rb.setSelected(True) control = self.rbPanel # Text field elif t.isType([NodeType._text]): control = DefaultTreeCellRenderer.getTreeCellRendererComponent(self, tree, value, selected, expanded, leaf, row, hasFocus) control.setText(nodeData.getText() + ": " + str(nodeData.getValue())) # Default tree cell (a node with an icon and a label) else: control = DefaultTreeCellRenderer.getTreeCellRendererComponent(self, tree, value, selected, expanded, leaf, row, hasFocus) control.setText(nodeData.getText()) self.setColors(control, nodeData, selected) # Background color for the tree item # log.info("getTreeCellRendererComponent", t.getType(), isEnabled, "'" + nodeData.getText() + "'") control.setEnabled(isEnabled) return control # log.info("Warning: getTreeCellRendererComponent() fallthrough", nodeData) return DefaultTreeCellRenderer.getTreeCellRendererComponent(self, tree, value, selected, expanded, leaf, row, hasFocus) def setColors(self, control, data, selected): """Set background color fot the tree item.""" if selected: control.setForeground(self.getTextSelectionColor()) control.setBackground(self.getBackgroundSelectionColor()) else: control.setForeground(self.getTextNonSelectionColor()) control.setBackground(self.getBackgroundNonSelectionColor()) class CustomCellEditor(TreeCellEditor, ActionListener): """Renders the various tree edit controls (checkbox, tristate checkbox, text box etc.)""" def __init__(self, tree): TreeCellEditor.__init__(self) self.editor = None self.tree = tree flowLayout = FlowLayout(FlowLayout.LEFT, 0, 0) self.cbPanel = JPanel(flowLayout) self.cb = JCheckBox(actionPerformed = self.checked) self.cbPanel.add(self.cb) self.cbLabel = JLabel() self.cbPanel.add(self.cbLabel) self.tcbPanel = JPanel(flowLayout) self.tcb = TristateCheckBox(self.checked) self.tcbPanel.add(self.tcb) self.tcbLabel = JLabel() self.tcbPanel.add(self.tcbLabel) self.rbPanel = JPanel(flowLayout) self.rb = JRadioButton(actionPerformed = self.checked) self.rbPanel.add(self.rb) self.rbLabel = JLabel() self.rbPanel.add(self.rbLabel) self.tfPanel = JPanel(flowLayout) self.tfLabel = JLabel() self.tfPanel.add(self.tfLabel) self.tf = JTextField() self.tf.setColumns(12) self.tf.addActionListener(self) self.tfPanel.add(self.tf) def addCellEditorListener(self, l): """Register for edit events""" self.listener = l def isCellEditable(self, event): if event != None and isinstance(event.getSource(), JTree) and isinstance(event, MouseEvent): tree = event.getSource() path = tree.getPathForLocation(event.getX(), event.getY()) userData = path.getLastPathComponent().getUserObject() if isinstance(userData, TreeNodeData) and (not userData.getNodeType().isType([NodeType._comment, NodeType._menu])) and (userData.getVisible() > 0): return True return False def shouldSelectCell(self, event): # log.info("shouldSelectCell") return True def cancelCellEditing(self): # log.info("Cancel editing, please!") # super(CustomCellEditor, self).cancelCellEditing() pass def stopCellEditing(self): # log.info("stopCellEditing") if self.nodeData.getNodeType().isType([NodeType._text]): # log.info("stopCellEditing for sure!") self.nodeData.setValue(str(self.tf.getText())) return True def getTreeCellEditorComponent(self, tree, value, selected, expanded, leaf, row): """Return a swing edit control appropriate for the node type of the supplied value""" self.nodeData = self.getNodeUserData(value) if self.nodeData: text = self.nodeData.getText() t = self.nodeData.getNodeType() # Boolean checkbox if t.isType([NodeType._bool]): self.editor = self.cbPanel self.cbLabel.setText(text) if self.nodeData.getTriValue() > 0: self.cb.setSelected(True) else: self.cb.setSelected(False) # Tristate checkbox elif t.isType([NodeType._tri]): # log.info("getTreeCellEditorComponent tristate") self.editor = self.tcbPanel self.tcbLabel.setText(text) self.tcb.setTriState(self.nodeData.getTriValue()) # Radio button elif t.isType([NodeType._radio]): self.editor = self.rbPanel self.rbLabel.setText(text) if self.nodeData.getTriValue() > 0: self.rb.setSelected(True) else: self.rb.setSelected(False) # Text field elif t.isType([NodeType._text]): self.editor = self.tfPanel self.tfLabel.setText(str(self.nodeData.getText()) + ":") self.tf.setText(str(self.nodeData.getValue())) else: self.editor = self.tcb self.editor.setText(text) return self.editor def getNodeUserData(self, value): """Gets the TreeNodeData from the tree node""" if isinstance(value, DefaultMutableTreeNode): nodeData = value.getUserObject() if isinstance(nodeData, TreeNodeData): return nodeData return None def getCellEditorValue(self): newNode = TreeNodeData(self.nodeData.knode, self.tree) if isinstance(self.editor, JTextField): newNode.setValue(str(self.editor.getText())) return newNode def checked(self, e): """Updates the node data when a checkbox has been clicked""" control = e.getSource() if isinstance(control, TristateCheckBox): # log.info("tristate checked") self.nodeData.setTriValue(control.getTriState()) else: # log.info("checkbox checked") if control.isSelected(): self.nodeData.setValue(2) else: self.nodeData.setValue(0) def actionPerformed(self, event): """ ENTER pressed in text field, stop editing.""" tf = event.getSource() self.listener.editingStopped(ChangeEvent(tf)) class KConfigTree(JTree, CellEditorListener): """Custom Swing JTree based tree that visualizes a KConfig configuration. The full KConfig menu structure is put into a shadow tree model. From the shadow model, a real model is built (updateModel), where hidden nodes are not included. This update model is what the tree uses to visualize the configuration menu. Both the shadow and the updated model has the same TreeNodeData with KConfig data. The expanded state and search result state is kept in the TreeNodeData. """ shadowModel = None isUpdating = False showAll = False isSearching = False def __init__(self, kconf): self.setCellRenderer(CustomCellRenderer()) self.setCellEditor(CustomCellEditor(self)) self.createKconfShadowModel(kconf) self.setModel(self.createUpdatedModel()) self.expandRow(0) self.setEditable(True) self.setRootVisible(False) self.setShowsRootHandles(True) self.setRowHeight(0) self.addTreeExpansionListener(KConfigTreeExpansionListener()) self.getCellEditor().addCellEditorListener(self) def editingCanceled(self, event): """From CellEditorListener """ # log.info("editingCanceled", self.cellEditor.getCellEditorValue()) pass def editingStopped(self, event): """From CellEditorListener.""" # log.info("editingStopped", self.cellEditor.getCellEditorValue()) self.stopEditing() def createKconfShadowModel(self, kconf): """Create the one and only shadow data model""" rootNode = DefaultMutableTreeNode(kconf.mainmenu_text) self.addNodes(rootNode, kconf.top_node.list) self.shadowModel = DefaultTreeModel(rootNode) def addNodes(self, parent, node): """Recursively traverse the KConfig structure and add to the shadow model""" while node: newUiNode = DefaultMutableTreeNode(TreeNodeData(node, self)) parent.add(newUiNode) if node.list: self.addNodes(newUiNode, node.list) node = node.next def createUpdatedModel(self): """When the user does any changes in the tree, the underlaying kconfig structure will change. Nodes may change visibility and value. The tree control cannot hide nodes, so a new datamodel must be generated that does not include invisible nodes.""" shadowTreeRoot = self.shadowModel.getRoot() rootNode = DefaultMutableTreeNode("Root") self.addVisibleNodes(rootNode, shadowTreeRoot) return DefaultTreeModel(rootNode) def addVisibleNodes(self, visibleParent, shadowParent): """Adds visible nodes from the shadow tree model to the update tree model. If there is an active search operation, only search matches will be added. If showAll is set, all nodes are added regardless of visibility.""" childrenEnum = shadowParent.children() while childrenEnum.hasMoreElements(): shadowChild = childrenEnum.nextElement() if shadowChild.getUserObject().getVisible() > 0 or self.showAll: if not self.isSearching or shadowChild.getUserObject().isSearchMatch(): visibleChild = DefaultMutableTreeNode(shadowChild.getUserObject()) visibleParent.add(visibleChild) if shadowChild.getChildCount() > 0: self.addVisibleNodes(visibleChild, shadowChild) def isPathEditable(self, path): comp = path.getLastPathComponent() if isinstance(comp, DefaultMutableTreeNode): nodeData = comp.getUserObject() if isinstance(nodeData, TreeNodeData): return True return False def updateTree(self): """Call to create a new updated tree model""" if not self.isUpdating: # log.info("updateTree()") self.isUpdating = True self.setModel(self.createUpdatedModel()) self.updateExpandedState(self.getModel().getRoot()) self.isUpdating = False def updateExpandedState(self, parent): """Scan through the whole tree and expand the tree node if the node data has the expanded field set to True.""" childrenEnum = parent.children() while childrenEnum.hasMoreElements(): child = childrenEnum.nextElement() if child.getUserObject().isExpanded(): self.expandPath(TreePath(child.getPath())) if child.getChildCount() > 0: self.updateExpandedState(child) def setShowAll(self, show): self.showAll = show self.updateTree() def doSearch(self, searchText): """Perform a search in the data model with the supplied text.""" if len(searchText) > 0: self.isSearching = True self.doSearchBranch(self.shadowModel.getRoot(), searchText) else: self.isSearching = False self.updateTree() def doSearchBranch(self, shadowParent, searchText): """Traverse the tree model searching for the search text""" match = False childrenEnum = shadowParent.children() while childrenEnum.hasMoreElements(): shadowChild = childrenEnum.nextElement() if shadowChild.getUserObject().search(searchText, self.showAll): match = True if shadowChild.getChildCount() > 0: if self.doSearchBranch(shadowChild, searchText): shadowChild.getUserObject().setSearchMatch(True) match = True return match class KConfigTreeExpansionListener(TreeExpansionListener): """Listener for tree expand/collapse events. Used for storing the expand state in the node data, so that a new updated tree's branches can be expanded the same way as in the old tree.""" def treeExpanded(self, e): if not e.getPath().getLastPathComponent() == e.getSource().getModel().getRoot(): e.getPath().getLastPathComponent().getUserObject().setExpanded(True) def treeCollapsed(self, e): if not e.getPath().getLastPathComponent() == e.getSource().getModel().getRoot(): e.getPath().getLastPathComponent().getUserObject().setExpanded(False) class MPConfig(TreeSelectionListener): """The MPConfig component initializes the KConfig library with the requested configuration, and buildst the GUI, consisting of a "Load" and a "Save as" buttons, a search field, "show all" checkbox, tree view and information text view.""" def __init__(self, kconfig_file = "Kconfig", config_file=".config", systemLogger = None): """[summary] Parameters ---------- kconfig_file : string (default: "Kconfig") The Kconfig configuration file config_file : string (default: ".config") The save file which will be used for loading and saving the settings systemLogger (default: None) A system logger object. If None then print statements are used for logging. """ global log if systemLogger: log = systemLogger # Load Kconfig configuration files self.kconfig = Kconfig(kconfig_file) setKConfig(self.kconfig) if os.path.isfile(config_file): log.info(self.kconfig.load_config(config_file)) elif os.path.isfile(".config"): log.info(self.kconfig.load_config(".config")) self.tree = KConfigTree(self.kconfig) self.tree.addTreeSelectionListener(self.treeSelectionChanged) jTreeSP = JScrollPane(self.tree) self.jta = JTextArea() self.jta.setEditable(False) jTextSP = JScrollPane(self.jta) toolPanel = JPanel() toolPanel.setLayout(BoxLayout(toolPanel, BoxLayout.X_AXIS)) toolPanel.setBorder(BorderFactory.createEmptyBorder(2, 0, 2, 0)) toolPanel.add(JLabel("Search: ")) jSearchPanel = JPanel() jSearchPanel.setLayout(BoxLayout(jSearchPanel, BoxLayout.X_AXIS)) self.jSearchField = JTextField() jSearchPanel.setBackground(self.jSearchField.getBackground()) jSearchPanel.setBorder(self.jSearchField.getBorder()) self.jSearchField.setBorder(None) self.jSearchField.getDocument().addDocumentListener(SearchListener(self.tree)) jSearchPanel.add(self.jSearchField) clearSearchButton = JButton(u'\u00d7', actionPerformed = self.clearSearch) d = clearSearchButton.getPreferredSize() clearSearchButton.setPreferredSize(Dimension(d.height, d.height)) clearSearchButton.setBackground(self.jSearchField.getBackground()) clearSearchButton.setBorder(None) clearSearchButton.setOpaque(False) clearSearchButton.setContentAreaFilled(False) clearSearchButton.setFocusPainted(False) jSearchPanel.add(clearSearchButton) toolPanel.add(jSearchPanel) self.showAllCheckBox = JCheckBox("Show all", actionPerformed = self.OnShowAllCheck) toolPanel.add(self.showAllCheckBox) splitPane = JSplitPane(JSplitPane.VERTICAL_SPLIT, jTreeSP, jTextSP) splitPane.setOneTouchExpandable(True) splitPane.setDividerLocation(300) treePanel = JPanel(BorderLayout()) treePanel.add(toolPanel, BorderLayout.NORTH) treePanel.add(splitPane, BorderLayout.CENTER) loadSavePanel = JPanel() loadSavePanel.setLayout(BoxLayout(loadSavePanel, BoxLayout.X_AXIS)) loadSavePanel.add(JButton("Load", actionPerformed=self.loadConfigDialog)) loadSavePanel.add(JButton("Save as", actionPerformed=self.writeConfigDialog)) self.rootPanel = JPanel() self.rootPanel.setLayout(BorderLayout()) self.rootPanel.add(loadSavePanel, BorderLayout.PAGE_START) self.rootPanel.add(treePanel, BorderLayout.CENTER) def clearSearch(self, event): self.jSearchField.setText("") def OnShowAllCheck(self, event): self.tree.setShowAll(self.showAllCheckBox.isSelected()) self.tree.doSearch(self.jSearchField.getText()) # Must repeat the search if one is active def treeSelectionChanged(self, event): """When the user selects a new node in the tree, show info about the selected node in the info text area below the tree.""" path = event.getNewLeadSelectionPath() if path: comp = path.getLastPathComponent() if isinstance(comp, DefaultMutableTreeNode): nodeData = comp.getUserObject() if isinstance(nodeData, TreeNodeData): self.jta.setText(getNodeInfoString(nodeData.knode)) self.jta.setCaretPosition(0) def getPane(self): """Return the panel containing all the other components that is set up in __init__().""" return self.rootPanel def writeConfig(self, fileName): """Write the current configuration to the file specified.""" self.kconfig.write_config(fileName) # Save full configuration #self.kconfig.write_min_config(fileName) # Save minimal configuration def loadConfig(self, fileName): """Load configuration settings from the file specified.""" if os.path.isfile(fileName): log.info(self.kconfig.load_config(fileName)) self.tree.createKconfShadowModel(self.kconfig) self.tree.updateTree() def writeConfigDialog(self, e): """Open a file dialog to save configuration""" fileChooser = JFileChooser(os.getcwd()) retval = fileChooser.showSaveDialog(None) if retval == JFileChooser.APPROVE_OPTION: f = fileChooser.getSelectedFile() self.writeConfig(f.getPath()) def loadConfigDialog(self, e): """Open a file dialog to select configuration to load""" fileChooser = JFileChooser(os.getcwd()) retval = fileChooser.showOpenDialog(None) if retval == JFileChooser.APPROVE_OPTION: f = fileChooser.getSelectedFile() log.info("Selected file: " + f.getPath()) self.loadConfig(f.getPath()) class SearchListener(DocumentListener): """Triggered when the user adds or removes characters in the search text field.""" def __init__(self, tree): self.tree = tree def changedUpdate(self, e): doc = e.getDocument() searchText = doc.getText(0, doc.getLength()) self.tree.doSearch(searchText) def insertUpdate(self, e): self.changedUpdate(e) def removeUpdate(self, e): self.changedUpdate(e) if __name__ == "__main__": # Set default .config file or load it from argv if len(sys.argv) == 2: # Specify "Kconfig" mpconfig = MPConfig(sys.argv[1]) else: # Specify "Kconfig" and ".config" mpconfig = MPConfig(sys.argv[1], sys.argv[2]) jframe = JFrame("MPLAB X Kconfig Editor") jframe.getContentPane().add(mpconfig.getPane()) jframe.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE) jframe.setSize(500, 800) jframe.setVisible(True)
33,374
10,251
# coding=utf-8 # pylint: disable=missing-docstring, unused-argument import os.path import sqlite3 import tempfile import unittest import sqlalchemy.ext.declarative import sqlalchemy.orm try: # noinspection PyPackageRequirements import ujson as json except ImportError: import json import sqlalchemy_jsonfield # Path to test database db_path = os.path.join(tempfile.gettempdir(), "test.sqlite3") # Table name table_name = "create_test" # DB Base class Base = sqlalchemy.ext.declarative.declarative_base() # Model class ExampleTable(Base): __tablename__ = table_name id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True) row_name = sqlalchemy.Column(sqlalchemy.Unicode(64), unique=True) json_record = sqlalchemy.Column(sqlalchemy_jsonfield.JSONField(), nullable=False) class SQLIteTests(unittest.TestCase): def setUp(self): # type: () -> None if os.path.exists(db_path): os.remove(db_path) engine = sqlalchemy.create_engine("sqlite:///{}".format(db_path), echo=False) Base.metadata.create_all(engine) # noinspection PyPep8Naming Session = sqlalchemy.orm.sessionmaker(engine) self.session = Session() def test_create(self): # type: () -> None """Check column type""" # noinspection PyArgumentList with sqlite3.connect(database="file:{}?mode=ro".format(db_path), uri=True) as conn: conn.row_factory = sqlite3.Row c = conn.cursor() c.execute("PRAGMA TABLE_INFO({})".format(table_name)) collected = c.fetchall() result = [dict(col) for col in collected] columns = {info["name"]: info for info in result} json_record = columns["json_record"] self.assertIn( json_record["type"], ("TEXT", "JSON"), "Unexpected column type: received: {!s}, expected: TEXT|JSON".format(json_record["type"]), ) def test_operate(self): # type: () -> None """Check column data operation""" test_dict = {"key": "value"} test_list = ["item0", "item1"] # fill table with self.session.transaction: self.session.add_all( [ ExampleTable(row_name="dict_record", json_record=test_dict), ExampleTable(row_name="list_record", json_record=test_list), ] ) # Validate backward check dict_record = self.session.query(ExampleTable).filter(ExampleTable.row_name == "dict_record").first() list_record = self.session.query(ExampleTable).filter(ExampleTable.row_name == "list_record").first() self.assertEqual( dict_record.json_record, test_dict, "Dict was changed: {!r} -> {!r}".format(test_dict, dict_record.json_record), ) self.assertEqual( list_record.json_record, test_list, "List changed {!r} -> {!r}".format(test_list, list_record.json_record) ) # Low level # noinspection PyArgumentList with sqlite3.connect(database="file:{}?mode=ro".format(db_path), uri=True) as conn: c = conn.cursor() c.execute("SELECT row_name, json_record FROM {tbl}".format(tbl=table_name)) result = dict(c.fetchall()) self.assertEqual(result["dict_record"], json.dumps(test_dict)) self.assertEqual(result["list_record"], json.dumps(test_list))
3,495
1,087
'''Run Example 4.8 from Aho & Ullman p. 315-316, printing the steps to stdout. ''' from cfg import aho_ullman, core import sys CFG = core.ContextFreeGrammar G = CFG(''' S -> AA | AS | b A -> SA | AS | a ''') w = map(core.Terminal, 'abaab') print 'G:' print G print print 'w =', ''.join(map(str, w)) print T = aho_ullman.cocke_younger_kasami_algorithm(G, w, out=sys.stdout, check=False) print 'T:' print aho_ullman.parse_table_str(T) print parse = aho_ullman.left_parse_from_parse_table(G, w, T, check=False) tree = aho_ullman.LeftParse(G, parse).tree() print 'Parse tree:', tree
587
251
from .convpool_op_base import ConvPoolOpBase class PoolOp(ConvPoolOpBase): def __init__( self, op_type='Pool', pool_type=None, global_pooling=False, **kwargs ): super(PoolOp, self).__init__(op_type=op_type, **kwargs) self.global_pooling = global_pooling if pool_type is not None: self.type = pool_type def flip_operation(self): self.pad_r, self.pad_l = self.pad_l, self.pad_r
495
162
import pytest from django.db.models import Manager from cegs_portal.search.json_templates.v1.dna_region import dnaregions from cegs_portal.search.json_templates.v1.search_results import ( search_results as sr_json, ) from cegs_portal.search.models import DNARegion, Facet from cegs_portal.search.models.utils import ChromosomeLocation pytestmark = pytest.mark.django_db def test_search_results(regions: list[DNARegion], facets: Manager[Facet]): search_results = { "loc_search": { "location": ChromosomeLocation("chr1", 10_000, 15_000), "assembly": "GRCh37", }, "dhss": regions, "facets": facets, } assert sr_json(search_results) == { "location": { "assembly": search_results["loc_search"]["assembly"], "chromosome": search_results["loc_search"]["location"].chromo, "start": search_results["loc_search"]["location"].range.lower, "end": search_results["loc_search"]["location"].range.upper, }, "regions": dnaregions(search_results["dhss"]), "facets": [ {"name": f.name, "description": f.description, "values": [value.value for value in f.values.all()]} for f in search_results["facets"].all() ], } assert sr_json(search_results, json_format="genoverse") == { "location": { "assembly": search_results["loc_search"]["assembly"], "chromosome": search_results["loc_search"]["location"].chromo, "start": search_results["loc_search"]["location"].range.lower, "end": search_results["loc_search"]["location"].range.upper, }, "regions": dnaregions(search_results["dhss"], json_format="genoverse"), "facets": [ {"name": f.name, "description": f.description, "values": [value.value for value in f.values.all()]} for f in search_results["facets"].all() ], }
1,958
613
# # For licensing see accompanying LICENSE file. # Copyright (C) 2021 Apple Inc. All Rights Reserved. # """Utility functions to tag tensors with metadata. The metadata remains with the tensor under torch operations that don't change the values, e.g. .clone(), .contiguous(), .permute(), etc. """ import collections import copy from typing import Any from typing import Optional import numpy as np import torch QuantizeAffineParams2 = collections.namedtuple( "QuantizeAffineParams", ["scale", "zero_point", "num_bits"] ) class _SpecialTensor(torch.Tensor): """This class denotes special tensors. It isn't intended to be used directly, but serves as a helper for tagging tensors with metadata. It subclasses torch.Tensor so that isinstance(t, torch.Tensor) returns True for special tensors. It forbids some of the methods of torch.Tensor, and overrides a few methods used to create other tensors, to ensure the result is still special. """ _metadata = None def __getattribute__(self, attr: str) -> Any: # Disallow new_zeros, new_ones, new_full, etc. if "new_" in attr: raise AttributeError( "Invalid attr {!r} for special tensors".format(attr) ) return super().__getattribute__(attr) def detach(self) -> "_SpecialTensor": ret = super().detach() ret.__class__ = _SpecialTensor ret._metadata = self._metadata return ret @property def data(self) -> "_SpecialTensor": ret = super().data ret.__class__ = _SpecialTensor ret._metadata = self._metadata return ret def clone(self) -> "_SpecialTensor": ret = super().clone() ret.__class__ = _SpecialTensor ret._metadata = self._metadata return ret def cuda( self, device: Optional[torch.device] = None, non_blocking: bool = False ) -> "_SpecialTensor": ret = super().cuda() ret.__class__ = _SpecialTensor ret._metadata = self._metadata return ret def contiguous(self) -> "_SpecialTensor": ret = super().contiguous() ret.__class__ = _SpecialTensor ret._metadata = self._metadata return ret def view(self, *args, **kwargs) -> "_SpecialTensor": ret = super().view(*args, **kwargs) ret.__class__ = _SpecialTensor ret._metadata = self._metadata return ret def permute(self, *args, **kwargs) -> "_SpecialTensor": ret = super().permute(*args, **kwargs) ret.__class__ = _SpecialTensor ret._metadata = self._metadata return ret def __getitem__(self, *args, **kwargs) -> "_SpecialTensor": ret = super().__getitem__(*args, **kwargs) ret.__class__ = _SpecialTensor ret._metadata = self._metadata return ret def __copy__(self) -> "_SpecialTensor": ret = copy.copy(super()) ret.__class__ = _SpecialTensor ret._metadata = self._metadata return ret def _check_type(tensor: torch.Tensor) -> None: given_type = type(tensor) if not issubclass(given_type, torch.Tensor): raise TypeError("invalid type {!r}".format(given_type)) def tag_with_metadata(tensor: torch.Tensor, metadata: Any) -> None: """Tag a metadata to a tensor.""" _check_type(tensor) tensor.__class__ = _SpecialTensor tensor._metadata = metadata RepresentibleByQuantizeAffine = collections.namedtuple( "RepresentibleByQuantizeAffine", ["quant_params"] ) def mark_quantize_affine( tensor: torch.Tensor, scale: float, zero_point: int, dtype: np.dtype = np.uint8, ) -> None: """Mark a tensor as quantized with affine. See //xnorai/training/pytorch/extensions/functions:quantize_affine for more info on this method of quantization. The tensor itself can be a floating point Tensor. However, its values must be representible with @scale and @zero_point. This function, for performance reasons, does not validiate if the tensor is really quantizable as it claims to be. Arguments: tensor (torch.Tensor): The tensor to be marked as affine-quantizable Tensor. scale (float): the scale (from quantization parameters). zero_point (int): The zero_point (from quantization parameters). dtype (numpy.dtype): Type of tensor when quantized (this is usually numpy.uint8, which is used for Q8). A ValueError will be thrown if the input dtype is not one of the following: {numpy.uint8, numpy.int32}. """ allowed_dtypes = [np.uint8, np.int32] if dtype not in allowed_dtypes: raise ValueError( "Provided dtype ({}) is not supported. Please use: {}".format( dtype, allowed_dtypes ) ) quant_params = QuantizeAffineParams2(scale, zero_point, dtype) tag_with_metadata(tensor, RepresentibleByQuantizeAffine(quant_params))
5,002
1,451
# Rohan E., Lukeš V. # Modeling large-deforming fluid-saturated porous media using # an Eulerian incremental formulation. # Advances in Engineering Software, 113:84-95, 2017, # https://doi.org/10.1016/j.advengsoft.2016.11.003 # # Run simulation: # # ./simple.py example_largedef_porodyn-1/porodynhe_example2d.py # # The results are stored in `example_largedef_porodyn-1/results`. # import numpy as nm from porodyn_engine import incremental_algorithm,\ fc_fce, mat_fce, def_problem import os.path as osp wdir = osp.dirname(__file__) def define(): params = { 'mesh_file': 'rect_16x16.vtk', 'mat_store_elem': 75, # element for which material data are stored 'u_store_node': 272, # node for which displacement is stored 'p_store_node': 144, # node for which pressure is stored 'dim': 2, # problem dimension 'dt': 0.01, # time step 't_end': 2.0, # end time 'force': 4e6, # applied force 'save_step': True, # save results in each time step? 'init_mode': False, # calculate initial state? } material_params = { 'param': { 'B': nm.eye(params['dim']), 'g': 9.81, # gravitational acceleration }, 'solid': { 'Phi': 0.58, # volume fraction 'lam': 8.4e6, # Lame coefficient 'mu': 5.6e6, # Lame coefficient 'rho': 2700, # density }, 'fluid': { 'kappa': 1e-1, # permeability parameter 'beta': 0.8, # permeability parameter 'rho': 1000, # density 'Kf': 2.2e10, # bulk modulus }, } regions = { 'Omega': 'all', 'Left': ('vertices in (x < 0.001)', 'facet'), 'Right': ('vertices in (x > 9.999)', 'facet'), 'Bottom': ('vertices in (y < 0.001)', 'facet'), 'Top_r': ('vertices in (y > 9.999) & (x > 4.999)', 'facet'), 'Top_l': ('vertices in (y > 9.999) & (x < 5.001)', 'facet'), 'ForceRegion': ('copy r.Top_r', 'facet'), } ebcs = { 'Fixed_Left_u': ('Left', {'u.0': 0.0}), 'Fixed_Right_u': ('Right', {'u.0': 0.0}), 'Fixed_Bottom_u': ('Bottom', {'u.1': 0.0}), 'Fixed_Top_p': ('Top_l', {'p.0': 0.0}), } ############################################### options = { 'output_dir': osp.join(wdir, 'results'), 'parametric_hook': 'incremental_algorithm', } filename_mesh = params['mesh_file'] materials, functions, fields, variables, equations, solvers = \ def_problem(params['dt'], params['force']) return locals()
2,705
999
""" Pluggable Django email backend for capturing outbound mail for QA/review purposes. """ __version__ = "1.0" __author__ = "Scot Hacker" __email__ = "shacker@birdhouse.org" __url__ = "https://github.com/shacker/django-mailcheck" __license__ = "BSD License"
260
94
# -*- coding: utf-8 -*- """ Created on Wed Aug 28 13:41:03 2019 @author: bwc """ import numpy as np def bin_dat(dat,bin_width=0.001,user_roi=[],isBinAligned=False,isDensity=False): user_roi = np.asarray(user_roi) roi_supp = (user_roi.size == 2) # Get roi if isBinAligned and roi_supp: lower = np.floor(np.min(user_roi)/bin_width)*bin_width upper = np.ceil(np.max(user_roi)/bin_width)*bin_width roi = np.array([lower, upper]) elif isBinAligned and (not roi_supp): lower = np.floor(np.min(dat)/bin_width)*bin_width upper = np.ceil(np.max(dat)/bin_width)*bin_width roi = np.array([lower, upper]) elif (not isBinAligned) and roi_supp: roi = user_roi else: # (not isBinAligned) and (not roi_supp): roi = np.array([np.min(dat), np.max(dat)]) num_bins = int(np.rint((roi[1]/bin_width-roi[0]/bin_width))) histo = np.histogram(dat,range=(roi[0], roi[1]),bins=num_bins,density=isDensity) xs = (histo[1][1:]+histo[1][0:-1])/2 ys = histo[0] return (xs,ys) def edges_to_centers(*edges): """ Convert bin edges to bin centers Parameters ---------- *edges : bin edges Returns ------- centers : list of bin centers """ centers = [] for es in edges: centers.append((es[0:-1]+es[1:])/2) return centers def corrhist(epos): dat = epos['tof'] roi = [0, 5000] delta = 1 # dat = epos['m2q'] # roi = [0, 100] # delta = .1 # # MF = np.mean(epos['tof']/np.sqrt(epos['m2q'])) # dat = np.sqrt(epos['m2q'])*MF # roi = [0, np.sqrt(250)*MF] # delta = .001*MF ## N = int(np.ceil((roi[1]-roi[0])/delta)) corrhist = np.zeros([N,N], dtype=int) multi_idxs = np.where(epos['ipp']>1)[0] for multi_idx in multi_idxs: n_hits = epos['ipp'][multi_idx] cluster = dat[multi_idx:multi_idx+n_hits] idx1 = -1 idx2 = -1 for i in range(n_hits): for j in range(i+1,n_hits): idx1 = int(np.floor(cluster[i]/delta)) idx2 = int(np.floor(cluster[j]/delta)) if idx1 < N and idx2 < N: corrhist[idx1,idx2] += 1 return corrhist+corrhist.T-np.diag(np.diag(corrhist)) def dummy(): # Voltage and bowl correct ToF data from voltage_and_bowl import do_voltage_and_bowl p_volt = np.array([]) p_bowl = np.array([]) tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl) epos_vb = epos.copy() epos_vb['tof'] = tof_corr.copy() import voltage_and_bowl tof_vcorr = voltage_and_bowl.mod_full_voltage_correction(p_volt,epos['tof'],epos['v_dc']) epos_v = epos.copy() epos_v['tof'] = tof_vcorr.copy() tof_bcorr = voltage_and_bowl.mod_geometric_bowl_correction(p_bowl,epos['tof'],epos['x_det'],epos['y_det']) epos_b = epos.copy() epos_b['tof'] = tof_bcorr.copy() ROI = [0, None] ch = histogram_functions.corrhist(epos) fig1 = plt.figure(num=1) plt.clf() plt.imshow(np.log2(1+ch)) plt.title('raw') fig1.gca().set_xlim(ROI[0],ROI[1]) fig1.gca().set_ylim(ROI[0],ROI[1]) ch = histogram_functions.corrhist(epos_v) fig2 = plt.figure(num=2) plt.clf() plt.imshow(np.log2(1+ch)) plt.title('volt') fig2.gca().set_xlim(ROI[0],ROI[1]) fig2.gca().set_ylim(ROI[0],ROI[1]) ch = histogram_functions.corrhist(epos_b) fig3 = plt.figure(num=3) plt.clf() plt.imshow(np.log2(1+ch)) plt.title('bowl') fig3.gca().set_xlim(ROI[0],ROI[1]) fig3.gca().set_ylim(ROI[0],ROI[1]) ch = histogram_functions.corrhist(epos_vb) fig4 = plt.figure(num=4) plt.clf() plt.imshow(np.log10(1+ch)) plt.title('v+b') # fig4.gca().set_xlim(ROI[0],ROI[1]) # fig4.gca().set_ylim(ROI[0],ROI[1]) idxs = np.where(epos['ipp'] == 2)[0] fig5 = plt.figure(num=5) plt.clf() dts = np.abs(tof_corr[idxs]-tof_corr[idxs+1]) plt.hist(dts,bins=np.arange(0,2000,.5),label='deltaT') plt.hist(tof_corr[np.r_[idxs,idxs+1]],bins=np.arange(0,2000,.5),label='since t0') fig66 = plt.figure(num=66) plt.clf() dts = np.abs(tof_corr[idxs]-tof_corr[idxs+1]) # sus = np.sqrt(tof_corr[idxs]**2+tof_corr[idxs+1]**2) # sus = np.fmax(tof_corr[idxs],tof_corr[idxs+1]) sus = (tof_corr[idxs]+tof_corr[idxs+1])/np.sqrt(2) plt.plot(sus,dts,'.',ms=1,alpha=1) # fig66.gca().axis('equal') fig66.gca().set_xlim(0,7000) fig66.gca().set_ylim(-100, 800) return
4,923
2,209
import inspect # http://docs.python.org/2/library/inspect.html from pprint import pprint from bage_utils.dict_util import DictUtil # @UnusedImport class InspectUtil(object): @staticmethod def summary(): frame = inspect.stack()[1] d = {'file': frame[1], 'line': frame[2], 'function': frame[3], 'code': frame[4]} return d @staticmethod def all(): frame = inspect.stack()[1] d = {} for key in dir(frame[0]): d[key] = getattr(frame[0], key) return DictUtil.sort_by_key(d) @staticmethod def locals(): frame = inspect.stack()[1] d = {} for key in frame[0].f_locals: d[key] = frame[0].f_locals[key] return DictUtil.sort_by_key(d) @staticmethod def globals(): frame = inspect.stack()[1] d = {} for key in frame[0].f_globals: d[key] = frame[0].f_globals[key] return DictUtil.sort_by_key(d) def __test(): pprint(InspectUtil.summary()) pprint(InspectUtil.locals()) if __name__ == '__main__': pprint(InspectUtil.summary()) # __test()
1,138
397
import struct class SeriesDefinition(object): def __init__(self, seriesname, replicacount, generation, autotrim, recordsize, options, tombstonedon): self._seriesname = seriesname self._replicacount = replicacount self._generation = generation self._autotrim = autotrim self._recordsize = recordsize self._options = options self._tombstonedon = tombstonedon self._intp = None @property def seriesname(self): return self._seriesname @seriesname.setter def seriesname(self, value): self._seriesname = value self._intp = None @property def replicacount(self): return self._replicacount @replicacount.setter def replicacount(self, value): self._replicacount = value self._intp = None @property def generation(self): return self._generation @generation.setter def generation(self, value): self._generation = int(value) self._intp = None @property def recordsize(self): return self._recordsize @recordsize.setter def recordsize(self, value): self._recordsize = int(value) self._intp = None @property def options(self): return self._options @options.setter def options(self, value): self._options = value self._intp = None @property def autotrim(self): return self._autotrim @autotrim.setter def autotrim(self, value): self._autotrim = value self._intp = None @property def tombstonedon(self): return self._tombstonedon @tombstonedon.setter def tombstonedon(self, value): self._tombstonedon = long(value) self._intp = None def __precompileINTP(self): a = struct.pack('>iiqqqh', self._replicacount, self._recordsize, self._generation, self._autotrim, self._tombstonedon, len(self._options)) b = self._options + struct.pack('>h', len(self._seriesname)) + self._seriesname self._intp = str(a+b) def toINTP(self): if self._intp == None: self.__precompileINTP() return self._intp def _lengthInBytes(self): return len(self.toINTP()) @staticmethod def fromINTP(dat): repc, recs, genr, autr, tombs, lenopt = struct.unpack('>iiqqqh', dat[:34]) options = dat[34:34+lenopt] lennam, = struct.unpack('>h', dat[34+lenopt:34+lenopt+2]) nam = dat[34+lenopt+2:34+lenopt+2+lennam] if len(nam) != lennam: raise Exception return SeriesDefinition(nam, repc, genr, autr, recs, options, tombs)
2,812
879
import os import random import time import xlwt from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import WebDriverWait from front_login import * from readConfig import ReadConfig from db import DbOperate from selenium.webdriver.chrome.options import Options from mysqldb import connect chrome_options = Options() chrome_options.add_argument('--headless') driver = webdriver.Chrome(chrome_options=chrome_options) # driver = webdriver.Chrome() driver.maximize_window() driver.get(ReadConfig().get_root_url()) driver.get(ReadConfig().get_root_url()) class Common(object): def __init__(self): self.driver = driver # Excel写入 self.row = 0 self.workbook = xlwt.Workbook(encoding='utf-8') self.booksheet = self.workbook.add_sheet('Sheet1') self.timetemp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) # 存储Excel表格文件名编号 # 每个案件的数量 self.number = 1 self.report_path = ReadConfig().save_report() self.windows = None self.screen_path = ReadConfig().save_screen() # 增加案件数量 def number_add(self): if self.number > 1: for i in range(self.number): self.driver.find_element_by_xpath("//a[@class='add']").click() else: self.driver.find_element_by_xpath("//a[@class='add']").click() # 减少案件数量至1 def number_minus(self): while self.number > 1: self.driver.find_element_by_xpath("//a[@class='jian']").click() # 存入数据库 def save_to_mysql(self, parm): code = 0 if isinstance(parm, list): parm.append(code) else: parm = list(parm) parm.append(code) res_code = connect(parm) print("存储状态", res_code) # 执行下单 def execute_function(self, callback): try: eval("self.{}()".format(callback)) except Exception as e: print("错误信息:", e) self.write_error_log(callback) time.sleep(0.5) self.write_error_log(str(e)) def write_error_log(self, info): error_log_path = os.path.join(self.report_path, "error_log_{}.log".format(time.strftime("%Y-%m-%d", time.localtime()))) with open(error_log_path, "a", encoding="utf-8") as f: f.write("{}: ".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) + info + "\n") # 处理价格字符 def process_price(self, price): if "¥" in price: price = price.replace("¥", '') return price # 关闭窗口 def closed_windows(self, num): self.windows = self.driver.window_handles for n in range(num + 1, len(self.windows)): self.driver.switch_to.window(self.windows[n]) self.driver.close() self.windows = self.driver.window_handles self.driver.switch_to.window(self.windows[num]) # 存储信息 def excel_number(self, infos): # 获取案件名称、案件号 if infos: n = 0 for info in infos: self.booksheet.write(self.row, n, info) self.booksheet.col(n).width = 300 * 28 n += 1 path = os.path.join(self.report_path, "report_{}.xls".format(self.timetemp)) self.workbook.save(path) # 窗口截图 def qr_shotscreen(self, windows_handle, name): current_window = self.driver.current_window_handle if current_window != windows_handle: self.driver.switch_to.window(windows_handle) path = self.screen_path self.driver.save_screenshot(path + self.timetemp + name + ".png") print("截图成功") self.driver.switch_to.window(current_window) else: path = self.screen_path self.driver.save_screenshot(path + self.timetemp +name + ".png") print("截图成功")
3,914
1,330
# dashboard_generator.py import os.path # helps to save in a different folder import pandas as pd import itertools import locale # from https://stackoverflow.com/Questions/320929/currency-formatting-in-python from os import listdir from os.path import isfile, join #for chart generation import matplotlib import matplotlib.pyplot as plt import matplotlib.ticker as ticker # FILES PATH save_path = 'C:/Users/Owner/Desktop/NYU-MBA/Programming/Files/monthly-sales/data' # INTRODUCTION print("Select one month to report") print("---------------------------------------------------------------------") # LISTING FILES (sorted and in a proper list) onlyfiles = [f for f in listdir(save_path) if isfile(join(save_path, f))] #https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory onlyfiles.sort() print(*onlyfiles, sep = "\n") #https://www.geeksforgeeks.org/print-lists-in-python-4-different-ways/ print("---------------------------------------------------------------------") # REPORT SELECTION selected_year = input("Please input a year (Example 2018 -- for Year): ") selected_month = input("Please input a month (Example 01 -- for January): ") # FILE SELECTED file_name = "sales-" + selected_year + selected_month + ".csv" # OPENING SPECIFIC FILE find_file = os.path.join(save_path, file_name) #find the file while not os.path.exists(find_file): #correct if does not exist print("---------------------------------------------------------------------") print("\n") print("The file selected do not exist. Please try again") print("\n") print("---------------------------------------------------------------------") exit() stats = pd.read_csv(find_file) # PERFORMING THE SUM total_sales = stats["sales price"].sum() # FORMATTING TOTAL SALES locale.setlocale( locale.LC_ALL, '' ) total_sales_format = locale.currency(total_sales, grouping= True) print("---------------------------------------------------------------------") # SALES REPORT DATE if selected_month == "01": month_name = "JANUARY" if selected_month == "02": month_name = "FEBRUARY" if selected_month == "03": month_name = "MARCH" if selected_month == "04": month_name = "APRIL" if selected_month == "05": month_name = "MAY" if selected_month == "06": month_name = "JUNE" if selected_month == "07": month_name = "JULY" if selected_month == "08": month_name = "AUGUST" if selected_month == "09": month_name = "SEPTEMBER" if selected_month == "10": month_name = "OCTOBER" if selected_month == "11": month_name = "NOVEMBER" if selected_month == "12": month_name = "DECEMBER" print("SALES REPORT " + "(" + month_name + " " + selected_year + ")") # PRINTING TOTAL SALES print("TOTAL SALES: " + (total_sales_format)) print("\n") # TOP SELLING PRODUCTS product_totals = stats.groupby(["product"]).sum() product_totals = product_totals.sort_values("sales price", ascending=False) top_sellers = [] rank = 1 for i, row in product_totals.iterrows(): d = {"rank": rank, "name": row.name, "monthly_sales": row["sales price"]} top_sellers.append(d) rank = rank + 1 def to_usd(my_price): return "${0:,.2f}".format(my_price) print("TOP SELLING PRODUCTS:") for d in top_sellers: locale.setlocale( locale.LC_ALL, '' ) print(" " + str(d["rank"]) + ") " + d["name"] + ": " + to_usd(d["monthly_sales"])) print("\n") print("---------------------------------------------------------------------") print("\n") print("GENERATING BAR CHART...") print("\n") print("---------------------------------------------------------------------") ### PRINT BAR CHART # first two lines are the list comprehensions to make a list of dictionaries into a list) x = [p["name"] for p in top_sellers] ## VERY IMPORTANT y = [p["monthly_sales"] for p in top_sellers] ## VERY IMPORTANT #sorting in the correct order x.reverse() y.reverse() # break charts into two fig, ax = plt.subplots() # enables us to further customize the figure and/or the axes #formatting chart usd_formatter = ticker.FormatStrFormatter('$%1.0f') ax.xaxis.set_major_formatter(usd_formatter) # CHART GENERATION plt.barh(x, y) plt.title("TOP-SELLING PRODUCTS " + "(" + month_name + " " + selected_year + ")") # AXIS TITLES plt.ylabel('Sales (USD)') # AXIS TITLES plt.ylabel("Product") # AXIS TITLES # formatting numbers for i, v in enumerate(y): ax.text(v, i, usd_formatter(v), color='black', fontweight='bold') #https://matplotlib.org/users/colors.html #https://matplotlib.org/3.1.0/gallery/pyplots/text_commands.html#sphx-glr-gallery-pyplots-text-commands-py plt.tight_layout() # ensures all areas of the chart are visible by default (fixes labels getting cut off) plt.show() exit() ## FULL SOLUTION PROVIDED BY THE PROFESSOR # # this section needs to come before the chart construction # fig, ax = plt.subplots() # enables us to further customize the figure and/or the axes # usd_formatter = ticker.FormatStrFormatter('$%1.0f') # ax.xaxis.set_major_formatter(usd_formatter) # # # chart construction # plt.barh(sorted_products, sorted_sales) # plt.title(chart_title) # plt.ylabel("Product") # plt.xlabel("Monthly Sales (USD)") # # plt.tight_layout() # ensures all areas of the chart are visible by default (fixes labels getting cut off) # plt.show()
5,367
1,847
from urllib.parse import urljoin from scrapy import Request from product_spider.items import RawData from product_spider.utils.functions import strip from product_spider.utils.spider_mixin import BaseSpider class MedicalIsotopesSpider(BaseSpider): name = "medicalisotopes" base_url = "https://www.medicalisotopes.com/" start_urls = ['https://www.medicalisotopes.com/productsbycategories.php', ] def parse(self, response): a_nodes = response.xpath('//div[contains(@class, "main-content")]//a') for a in a_nodes: parent = a.xpath('./text()').get() url = a.xpath('./@href').get() yield Request(urljoin(self.base_url, url), callback=self.parse_list, meta={'parent': parent}) def parse_list(self, response): rel_urls = response.xpath('//td[2]/a/@href').getall() parent = response.meta.get('parent') for rel_url in rel_urls: yield Request(urljoin(self.base_url, rel_url), callback=self.parse_detail, meta={'parent': parent}) next_page = response.xpath('//a[@class="c-page"]/following-sibling::a[text()!="NEXT"]/@href').get() if next_page: yield Request(urljoin(self.base_url, next_page), callback=self.parse_list, meta={'parent': parent}) def parse_detail(self, response): tmp = '//td[contains(text(), {!r})]/following-sibling::td//text()' package = strip(response.xpath('normalize-space(//td/table//td[1]/text())').get()) d = { 'brand': 'medicalisotopes', 'parent': response.meta.get('parent'), 'cat_no': strip(response.xpath(tmp.format("Catalog Number:")).get()), 'en_name': strip(response.xpath('//th[contains(text(), "Product:")]/following-sibling::th/text()').get()), 'cas': strip(response.xpath(tmp.format("CAS Number:")).get()), 'mf': strip(''.join(response.xpath(tmp.format("Formula:")).getall())), 'mw': strip(response.xpath(tmp.format("Molecular Weight:")).get()), 'info3': package and package.rstrip('\xa0='), 'info4': strip(response.xpath('//td/table//td[2]/text()').get()), 'prd_url': response.url, } yield RawData(**d)
2,235
718
import yaml def read_config(path): with open(path, 'r') as f: conf = yaml.safe_load(f) return conf
117
45
#!/usr/bin/python import os lines = [line for line in open("hehe.txt")] for line in lines: i = 0 for c in line: if (c != '_' and not (c >= '0' and c <= '9')): break i+=1 cmd = "mv " + line[0:i].strip() + line[i+5:].strip() + " lab2_" + line[0:i].strip() + line[i+5:].strip() print cmd os.system(cmd) continue index = line.find("_lab2_") num = line[0 : index + 1] value = line[index + 6 : ] nn = "lab2_" + num + value cmd = "mv 3_" + line.strip() + " " + nn #print cmd os.system(cmd)
511
236
#!/usr/bin/enb python import string, re import Handler ####################### # Define some regular expressions inside a quoted string # then turn the string into the actual data structure. # (I found it was easiest to understand when done this way.) definitions = r""" # These are the atomic symbols Daylight allows outside of []s # See "atom_class" for names like "a" and "A" raw_atom Cl|Br|[cnospBCNOFPSI*] # For atoms inside of []s open_bracket \[ close_bracket \] # See "element_modifiers" for the patterns for element names # charges, chiralities, H count, etc. # [235U] weight \d+ # [#6] atomic_number #\d+ # [!C] atom_not ! # & is highest (an "and") # , is next (an "or") # ; is lowest (an "and") # [n&H] [n,H] [c,h;H1] atom_binary [&,;] # C.C dot \. # - single bond (aliphatic) # / directional single bond "up" # \ directional single bond "down" # /? directional bond "up or unspecified" # \? directional bond "down or unspecified" # = double bond # # triple bond # : aromatic bond # ~ any bond (wildcard) # @ any ring bond bond [/\\]\??|[=#:~@-] # *!:* -- not aromatic bond_not ! # *@;!:* -- same as !: bond_binary [&;,] # (C).(C) open_zero \( # C(C) open_branch \( # [$(*C);$(*CC)] open_recursive_smarts \$\( # special cased because it closes open_zero, open_branch, and # recursive_smarts close_parens \) # Ring closures, 1, %5 %99 (and even %00 for what it's worth) closure \d|%\d\d? """ ####################### # Turn the above string into key/value pairs where the # values are the compiled regular expressions. info = {} for line in string.split(definitions, "\n"): line = string.strip(line) if not line or line[:1] == "#": continue name, pattern = string.split(line) info[name] = re.compile(pattern) del line, name, pattern info["atom_class"] = re.compile(r""" (?P<raw_aromatic>a)| # Not really sure what these mean (?P<raw_b_unknown>b)| (?P<raw_f_unknown>f)| (?P<raw_h_unknown>h)| (?P<raw_i_unknown>i)| (?P<raw_r_unknown>r)| (?P<raw_aliphatic>A)| (?P<raw_R_unknown>R) """, re.X) # 'H' is used for the hydrogen count, so those searches require a # special recursive SMARTS definition. Eg, for deuterium or tritium # [$([2H]),$([3H])] # This is implemented as a special-case hack. Note: if there's # an error in the parse string in this section then the error # location will point to the start of this term, not at the # character that really caused the error. Can be fixed with an # 'error_' like I did for the SMILES -- not needed for now. XXX hydrogen_term_fields = [ "open_recursive_smarts", "open_bracket", "weight", "element", "positive_count", "positive_symbols", "negative_count", "negative_symbols", "close_bracket", "close_recursive_smarts", ] info["hydrogen_term"] = re.compile(r""" (?P<open_recursive_smarts>\$\() (?P<open_bracket>\[) (?P<weight>\d+)? # optional molecular weight [2H] (?P<element>H) # Must be a hydrogen ( # optional charge (?P<positive_count>\+\d+)| # +3 (?P<positive_symbols>\++)| # ++ (?P<negative_count>\-\d+)| # -2 (?P<negative_symbols>\-+)| # --- )? (?P<close_bracket>\]) (?P<close_recursive_smarts>\)) """, re.X) element_symbols_pattern = \ r"C[laroudsemf]?|Os?|N[eaibdpos]?|S[icernbmg]?|P[drmtboau]?|" \ r"H[eofgas]|c|n|o|s|p|A[lrsgutcm]|B[eraik]?|Dy|E[urs]|F[erm]?|" \ r"G[aed]|I[nr]?|Kr?|L[iaur]|M[gnodt]|R[buhenaf]|T[icebmalh]|" \ r"U|V|W|Xe|Yb?|Z[nr]|\*" info["element_modifier"] = re.compile(r""" (?P<element> # This does *not* contain H. Hydrogen searches must be done # with a special recursive SMARTS. On the other hand, it does # include the lower case aromatic names. """ + element_symbols_pattern + r""" )| (?P<aromatic>a)| # aromatic (?P<aliphatic>A)| # Aliphatic (?P<degree>D\d+)| # Degree<n> (?P<total_hcount>H\d*)| # total Hydrogen count<n> (defaults to 1) (?P<imp_hcount>h\d*)| # implicit hydrogen count<n> (defaults to 1) (?P<ring_membership>R\d*)| # in <n> Rings (no n means any rings) (?P<ring_size>r\d*)| # in a ring of size <n> (no n means any rings) (?P<valence>v\d+)| # total bond order of <n> (?P<connectivity>X\d+)| # <n> total connections (?P<positive_count>\+\d+)| # +2 +3 (?P<positive_symbols>\++)| # + ++ +++ (?P<negative_count>\-\d+)| # -1 -4 (?P<negative_symbols>\-+)| # -- - ------- # XXX What about chiral_count? (?P<chiral_named> # The optional '?' means "or unspecified" @TH[12]\??| # @TH1 @TH2? @AL[12]\??| # @AL2? @SP[123]\??| # @SP3 @SP1? @TB(1[0-9]?|20?|[3-9])\??| # @TH{1 through 20} @OH(1[0-9]?|2[0-9]?|30?|[4-9])\?? # @OH{1 through 30} )| (?P<chiral_symbols>@@?\??) # @ (anticlockwise) or @@ (clockwise) """, re.X) # The ')' closes three different open parens. This maps from the # previous open state to the appropriate close state. close_parens_states = { "open_branch": "close_branch", "open_recursive_smarts": "close_recursive_smarts", "open_zero": "close_zero", } #### Some helpful definitions to reduce clutter and complication # Possible transitions from the start node. Also visited after # a '.' disconnect or in a recursive SMARTS. expecting_start = ("raw_atom", "atom_class", "open_bracket", "open_zero") # Looking for node definition, like "C" or "a" or "[" expecting_atom = ("raw_atom", "atom_class", "open_bracket") # Inside of []s: 235U, #6, R, $([2H]), $(*=C), ! expecting_element_start = ("weight", "atomic_number", "element_modifier", "hydrogen_term", "open_recursive_smarts", "atom_not") # the ';' in [n;H1] or the ']' at the end expecting_element_end = ("atom_binary", "close_bracket") # All bonds start with a '!' or one of the bond symbols expecting_bond_start = ("bond", "bond_not") expecting_raw_term = expecting_atom + expecting_bond_start + \ ("close_parens", "open_branch", "dot", "closure") expecting_modifier = ("element_modifier", "open_recursive_smarts") table = { "start": expecting_start, # (C).(R).[U].([$(*)]) "open_zero": ("raw_atom", "atom_class", "open_bracket"), # as well as (CC(C)) "close_zero": ("dot", "close_parens"), # A raw term are the things like 'C', '[U]', '%10', '.', '(', '!#' "raw_atom": expecting_raw_term, # An atom_class is a non-specific atom term, like 'A' or 'r' "atom_class": expecting_raw_term, # the []s "open_bracket": expecting_element_start, "close_bracket": expecting_raw_term, # Yes, '[!!!!C]' is legal, according to the docs, but it isn't # supported by the parser, unless you optimze it. "atom_not": expecting_element_start, "atom_binary": expecting_element_start, # "14N", "14a", ... # Note that weight can only be set once so it isn't a modifier # Also, "14#6" isn't legal (tested against the toolkit) "weight": expecting_modifier, # "#6R2" or "#8," or "#7]" # The atomic_number can only be set once so it isn't a modifier "atomic_number": expecting_modifier + expecting_element_end, # All of these are type of modifiers "element_modifier": expecting_modifier + expecting_element_end, "hydrogen_term": expecting_modifier + expecting_element_end, "close_recursive_smarts": expecting_modifier + expecting_element_end, # This it the recursive part -- goes back to the beginning "open_recursive_smarts": expecting_start, # C=C, C1CCC=1, C~-C, C=(C)C, C=,-C "bond": expecting_atom + ("closure", "bond", "open_branch", "bond_binary"), # C!!=C "bond_not": expecting_bond_start, # C=,-C "bond_binary": expecting_bond_start, "closure": expecting_raw_term, "close_branch": expecting_raw_term, "open_branch": expecting_atom + expecting_bond_start + ("dot",), # After a "." we can start all over again "dot": expecting_start, } def tokenize(s, handler = Handler.TokenHandler()): expected = table["start"] parens_stack = [] n = len(s) i = 0 handler.begin() while i < n: for state in expected: m = info[state].match(s, i) if m: break else: handler.error("Unknown character", i, s[i:]) return if close_parens_states.has_key(state): parens_stack.append(state) elif state == "close_parens": try: state = close_parens_states[parens_stack.pop()] except IndexError: # Too many close parens handler.error("Too many ')'", i, s[i:]) return d = m.groupdict() if d and state == "hydrogen_term": # Special case the hydrogen term for field in hydrogen_term_fields: if d[field] is not None: handler.add_token(field, i, d[field]) #print " --> New state:", state else: name = state if d: # There should only be one match for name, v in d.items(): if v is not None: break handler.add_token(name, i, m.group(0)) expected = table[state] i = m.end(0) handler.end()
9,819
3,678
# This is default settings for VisARTM for local usage import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) DATA_DIR = os.path.join(BASE_DIR, "data") SECRET_KEY = 'yj_fhwf$-8ws1%a_vl5c0lf($#ke@c3+lu3l-f733k(j-!q*57' DEBUG = True ALLOWED_HOSTS = ["127.0.0.1"] THREADING = True REGISTRATION_CLOSED = False DEFAULT_FROM_EMAIL = 'visartm@yandex.ru' SERVER_EMAIL = 'visartm@yandex.ru' EMAIL_HOST = 'smtp.yandex.ru' EMAIL_HOST_USER = 'visartm@yandex.ru' EMAIL_HOST_PASSWORD = '' EMAIL_PORT = 587 EMAIL_USE_TLS = True INSTALLED_APPS = [ 'test_without_migrations', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'datasets', 'visual', 'models', 'assessment', 'research', 'tools', 'accounts' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'visartm.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'visartm.wsgi.application' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'visartm.sqlite', } } AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend'] SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies' AUTH_PASSWORD_VALIDATORS = [ { 'NAME': ( 'django.contrib.auth.password_validation.' 'UserAttributeSimilarityValidator'), }, { 'NAME': ( 'django.contrib.auth.password_validation.' 'MinimumLengthValidator'), }, { 'NAME': ( 'django.contrib.auth.password_validation.' 'CommonPasswordValidator'), }, { 'NAME': ( 'django.contrib.auth.password_validation.' 'NumericPasswordValidator'), }, ] LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = False STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, "static"), )
2,915
1,029
"""pyvizio constants.""" DEVICE_CLASS_SPEAKER = "speaker" DEVICE_CLASS_TV = "tv" DEVICE_CLASS_CRAVE360 = "crave360" DEFAULT_DEVICE_ID = "pyvizio" DEFAULT_DEVICE_CLASS = DEVICE_CLASS_TV DEFAULT_DEVICE_NAME = "Python Vizio" DEFAULT_PORTS = [7345, 9000] DEFAULT_TIMEOUT = 5 MAX_VOLUME = {DEVICE_CLASS_TV: 100, DEVICE_CLASS_SPEAKER: 31, DEVICE_CLASS_CRAVE360: 100} # Current Input when app is active INPUT_APPS = ["SMARTCAST", "CAST"] # App name returned when it is not in app dictionary UNKNOWN_APP = "_UNKNOWN_APP" NO_APP_RUNNING = "_NO_APP_RUNNING" SMARTCAST_HOME = "SmartCast Home" APP_CAST = "Cast" # NAME_SPACE values that appear to be equivalent EQUIVALENT_NAME_SPACES = (2, 4) APP_HOME = { "name": SMARTCAST_HOME, "country": ["*"], "config": [ { "NAME_SPACE": 4, "APP_ID": "1", "MESSAGE": "http://127.0.0.1:12345/scfs/sctv/main.html", } ], } # No longer needed but kept around in case the external source for APPS is unavailable APPS = [ { "name": "Prime Video", "country": ["*"], "id": ["33"], "config": [ { "APP_ID": "4", "NAME_SPACE": 4, "MESSAGE": "https://atv-ext.amazon.com/blast-app-hosting/html5/index.html?deviceTypeID=A3OI4IHTNZQWDD", }, {"NAME_SPACE": 2, "APP_ID": "4", "MESSAGE": "None"}, ], }, { "name": "CBS All Access", "country": ["usa"], "id": ["9"], "config": [{"NAME_SPACE": 2, "APP_ID": "37", "MESSAGE": "None"}], }, { "name": "CBS News", "country": ["usa", "can"], "id": ["56"], "config": [{"NAME_SPACE": 2, "APP_ID": "42", "MESSAGE": "None"}], }, { "name": "Crackle", "country": ["usa"], "id": ["8"], "config": [{"NAME_SPACE": 2, "APP_ID": "5", "MESSAGE": "None"}], }, { "name": "Curiosity Stream", "country": ["usa", "can"], "id": ["37"], "config": [{"NAME_SPACE": 2, "APP_ID": "12", "MESSAGE": "None"}], }, { "name": "Fandango Now", "country": ["usa"], "id": ["24"], "config": [{"NAME_SPACE": 2, "APP_ID": "7", "MESSAGE": "None"}], }, { "name": "FilmRise", "country": ["usa"], "id": ["47"], "config": [{"NAME_SPACE": 2, "APP_ID": "24", "MESSAGE": "None"}], }, { "name": "Flixfling", "country": ["*"], "id": ["49"], "config": [{"NAME_SPACE": 2, "APP_ID": "36", "MESSAGE": "None"}], }, { "name": "Haystack TV", "country": ["usa", "can"], "id": ["35"], "config": [ { "NAME_SPACE": 0, "APP_ID": "898AF734", "MESSAGE": '{"CAST_NAMESPACE":"urn:x-cast:com.google.cast.media","CAST_MESSAGE":{"type":"LOAD","media":{},"autoplay":true,"currentTime":0,"customData":{"platform":"sctv"}}}', } ], }, { "name": "Hulu", "country": ["usa"], "id": ["19"], "config": [ { "APP_ID": "3", "NAME_SPACE": 4, "MESSAGE": "https://viziosmartcast.app.hulu.com/livingroom/viziosmartcast/1/index.html#initialize", }, {"NAME_SPACE": 2, "APP_ID": "3", "MESSAGE": "None"}, ], }, { "name": "iHeartRadio", "country": ["usa"], "id": ["11"], "config": [{"NAME_SPACE": 2, "APP_ID": "6", "MESSAGE": "None"}], }, { "name": "NBC", "country": ["usa"], "id": ["43"], "config": [{"NAME_SPACE": 2, "APP_ID": "10", "MESSAGE": "None"}], }, { "name": "Netflix", "country": ["*"], "id": ["34"], "config": [{"NAME_SPACE": 3, "APP_ID": "1", "MESSAGE": "None"}], }, { "name": "Plex", "country": ["usa", "can"], "id": ["40"], "config": [ { "APP_ID": "9", "NAME_SPACE": 4, "MESSAGE": "https://plex.tv/web/tv/vizio-smartcast", }, {"NAME_SPACE": 2, "APP_ID": "9", "MESSAGE": "None"}, ], }, { "name": "Pluto TV", "country": ["usa"], "id": ["12"], "config": [ {"APP_ID": "65", "NAME_SPACE": 4, "MESSAGE": "https://smartcast.pluto.tv"}, { "NAME_SPACE": 0, "APP_ID": "E6F74C01", "MESSAGE": '{"CAST_NAMESPACE":"urn:x-cast:tv.pluto","CAST_MESSAGE":{"command":"initializePlayback","channel":"","episode":"","time":0}}', }, ], }, { "name": "RedBox", "country": ["usa"], "id": ["55"], "config": [{"NAME_SPACE": 2, "APP_ID": "41", "MESSAGE": "None"}], }, { "name": "TasteIt", "country": ["*"], "id": ["52"], "config": [{"NAME_SPACE": 2, "APP_ID": "26", "MESSAGE": "None"}], }, { "name": "Toon Goggles", "country": ["usa", "can"], "id": ["46"], "config": [{"NAME_SPACE": 2, "APP_ID": "21", "MESSAGE": "None"}], }, { "name": "Vudu", "country": ["usa"], "id": ["6"], "config": [ { "APP_ID": "31", "NAME_SPACE": 4, "MESSAGE": "https://my.vudu.com/castReceiver/index.html?launch-source=app-icon", } ], }, { "name": "XUMO", "country": ["usa"], "id": ["27"], "config": [ { "NAME_SPACE": 0, "APP_ID": "36E1EA1F", "MESSAGE": '{"CAST_NAMESPACE":"urn:x-cast:com.google.cast.media","CAST_MESSAGE":{"type":"LOAD","media":{},"autoplay":true,"currentTime":0,"customData":{}}}', } ], }, { "name": "YouTubeTV", "country": ["usa", "mexico"], "id": ["45"], "config": [{"NAME_SPACE": 5, "APP_ID": "3", "MESSAGE": "None"}], }, { "name": "YouTube", "country": ["*"], "id": ["44"], "config": [{"NAME_SPACE": 5, "APP_ID": "1", "MESSAGE": "None"}], }, { "name": "Baeble", "country": ["usa"], "id": ["39"], "config": [{"NAME_SPACE": 2, "APP_ID": "11", "MESSAGE": "None"}], }, { "name": "DAZN", "country": ["usa", "can"], "id": ["57"], "config": [{"NAME_SPACE": 2, "APP_ID": "34", "MESSAGE": "None"}], }, { "name": "FitFusion by Jillian Michaels", "country": ["usa", "can"], "id": ["54"], "config": [{"NAME_SPACE": 2, "APP_ID": "39", "MESSAGE": "None"}], }, { "name": "Newsy", "country": ["usa", "can"], "id": ["38"], "config": [{"NAME_SPACE": 2, "APP_ID": "15", "MESSAGE": "None"}], }, { "name": "Cocoro TV", "country": ["usa", "can"], "id": ["63"], "config": [{"NAME_SPACE": 2, "APP_ID": "55", "MESSAGE": "None"}], }, { "name": "ConTV", "country": ["usa", "can"], "id": ["41"], "config": [{"NAME_SPACE": 2, "APP_ID": "18", "MESSAGE": "None"}], }, { "name": "Dove Channel", "country": ["usa", "can"], "id": ["42"], "config": [{"NAME_SPACE": 2, "APP_ID": "16", "MESSAGE": "None"}], }, { "name": "Love Destination", "country": ["*"], "id": ["64"], "config": [{"NAME_SPACE": 2, "APP_ID": "57", "MESSAGE": "None"}], }, { "name": "WatchFree", "country": ["usa"], "id": ["48"], "config": [{"NAME_SPACE": 2, "APP_ID": "22", "MESSAGE": "None"}], }, { "name": "AsianCrush", "country": ["usa", "can"], "id": ["50"], "config": [ { "NAME_SPACE": 2, "APP_ID": "27", "MESSAGE": "https://html5.asiancrush.com/?ua=viziosmartcast", } ], }, { "name": "Disney+", "country": ["usa"], "id": ["51"], "config": [ { "NAME_SPACE": 4, "APP_ID": "75", "MESSAGE": "https://cd-dmgz.bamgrid.com/bbd/vizio_tv/index.html", } ], }, ]
8,402
3,233
# # covid19.py # owid/latest/covid # from owid.catalog.meta import License, Source import datetime as dt import pandas as pd from owid.catalog import Dataset, Table from etl.helpers import downloaded MEGAFILE_URL = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv" def run(dest_dir: str) -> None: d = create_dataset(dest_dir) with downloaded(MEGAFILE_URL) as filename: df = pd.read_csv(filename) df["date"] = pd.to_datetime(df.date) for col in ["iso_code", "continent", "location"]: df[col] = df[col].astype("category") df.set_index(["iso_code", "date"], inplace=True) t = Table(df) t.metadata.short_name = "covid" d.add(t) def create_dataset(dest_dir: str) -> Dataset: d = Dataset.create_empty(dest_dir) d.metadata.short_name = "covid19" d.metadata.namespace = "owid" d.metadata.sources = [ Source( name="Multiple sources via Our World In Data", description="Our complete COVID-19 dataset maintained by Our World in Data. We will update it daily throughout the duration of the COVID-19 pandemic.", url="https://github.com/owid/covid-19-data/tree/master/public/data", source_data_url="https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/covid-19-data.csv", owid_data_url="https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/covid-19-data.csv", date_accessed=str(dt.date.today()), publication_date=str(dt.date.today()), publication_year=dt.date.today().year, ) ] d.metadata.licenses = [ License( name="Other (Attribution)", url="https://github.com/owid/covid-19-data/tree/master/public/data#license", ) ] d.save() return d
1,858
640
import unittest import interop from SUASSystem import InteropClientConverter from SUASSystem import Location class SDATestCase(unittest.TestCase): def setUp(self): self.interop_client = InteropClientConverter() def test_submit_target(self): compiled_target_info = { "latitude" : 38, "longitude" : -77, "orientation" : "s", "shape" : "circle", "background_color" : "white", "alphanumeric" : "ABC", "alphanumeric_color" : "black", } target_image_relative_path = "tests/images/target.PNG" target_id = self.interop_client.post_standard_target(compiled_target_info, target_image_relative_path) def test_submit_position(self): """ Test POST of position data """ self.interop_client.post_telemetry(Location(38, 76, 100), 350.0) def test_get_obstacles(self): """ Test GET of obstacles """ self.interop_client.get_obstacles() def test_get_active_mission(self): """ Test GET of active mission """ self.interop_client.get_active_mission()
1,175
362
import os import json #import fire from collections import defaultdict from pprint import pprint from itertools import product from .dataset import Dataset class DocRED(Dataset): def __init__(self, path): super(DocRED, self).__init__(name='DocRED') self.path = path self._init() self.train_data = None self.val_data = None self.test_data = None def _init(self): self.rel_info = json.load(open(os.path.join(self.path, 'rel_info.json'))) self.rel2id = {v: i for i, v in enumerate(self.rel_info.keys())} self.train_path = os.path.join(self.path, 'train_annotated.json') self.train_dist_path = os.path.join(self.path, 'train_distant.json') self.dev_path = os.path.join(self.path, 'dev.json') self.test_path = os.path.join(self.path, 'test.json') def _read_instances(self, path, labels=False): with open(path, 'rt') as in_file: data = json.load(in_file) output = [] for i, instance in enumerate(data): text = "" sentences_lenghts = [] l = 0 for sent in instance['sents']: sentences_lenghts.append(l) l += len(sent) text += " " + " ".join(sent) entities = [] ent2id = defaultdict(list) for i, ent in enumerate(instance['vertexSet']): idx = f"#{i}" for elem in ent: entities.append( (idx, elem['name'], sentences_lenghts[elem['sent_id']] + elem['pos'][0], sentences_lenghts[elem['sent_id']] + elem['pos'][1], elem['type']) ) ent2id[f"{elem['sent_id']}#{i}"].append(len(entities) - 1) if labels: relation_facts = [] for label in instance['labels']: heads, tails = [], [] for evidence in label['evidence']: for h in ent2id.get(f"{evidence}#{label['h']}", []): heads.append(h) for t in ent2id.get(f"{evidence}#{label['t']}", []): tails.append(t) for head, tail in product(heads, tails): relation_facts.append( (self.rel2id[label['r']], head, tail) ) text = self.tokenizer.encode(text) output.append( (text, entities) if not labels else (text, entities, relation_facts) ) return output def get_train(self): if not self.train_data: self.train_data = self._read_instances(self.train_path, labels=True) return self.train_data def get_val(self): if not self.val_data: self.val_data = self._read_instances(self.dev_path, labels=True) return self.val_data def get_test(self): if not self.test_data: self.test_data = self._read_instances(self.test_path, labels=False) return self.test_data def test(): dataset = DocRED('data/DocRED') for instance in dataset.get_train(): pprint(instance) break if __name__ == "__main__": #fire.Fire(test) test()
3,244
993
import os class Account: ACCOUNTS_STORAGE = {} """Ethereum account""" def __init__(self): self.address = "0x" + os.urandom(20).hex() self.ACCOUNTS_STORAGE[self.address] = self def __str__(self): return f'Account {self.address}' __repr__ = __str__
295
110
from abc import ABC, abstractmethod from typing import Dict, Hashable, Tuple import torch import torch.nn as nn import swyft import swyft.utils from swyft.networks.channelized import ResidualNetWithChannel from swyft.networks.standardization import ( OnlineDictStandardizingLayer, OnlineStandardizingLayer, ) from swyft.types import Array, MarginalIndex, ObsShapeType class HeadTailClassifier(ABC): """Abstract class which ensures that child classifier networks will function with swyft""" @abstractmethod def head(self, observation: Dict[Hashable, torch.Tensor]) -> torch.Tensor: """convert the observation into a tensor of features Args: observation: observation type Returns: a tensor of features which can be utilized by tail """ pass @abstractmethod def tail(self, features: torch.Tensor, parameters: torch.Tensor) -> torch.Tensor: """finish the forward pass using features computed by head Args: features: output of head parameters: the parameters normally given to forward pass Returns: the same output as `forward(observation, parameters)` """ pass class ObservationTransform(nn.Module): def __init__( self, observation_key: Hashable, observation_shapes: ObsShapeType, online_z_score: bool, ) -> None: super().__init__() self.observation_key = observation_key self.observation_shapes = observation_shapes self.flatten = nn.Flatten() if online_z_score: self.online_z_score = OnlineDictStandardizingLayer(self.observation_shapes) else: self.online_z_score = nn.Identity() def forward(self, observation: Dict[Hashable, torch.Tensor]) -> torch.Tensor: z_scored_observation = self.online_z_score(observation) return self.flatten(z_scored_observation[self.observation_key]) # B, O @property def n_features(self) -> int: with torch.no_grad(): fabricated_observation = { key: torch.rand(2, *shape) for key, shape in self.observation_shapes.items() } _, n_features = self.forward(fabricated_observation).shape return n_features class ParameterTransform(nn.Module): def __init__( self, n_parameters: int, marginal_indices: MarginalIndex, online_z_score: bool ) -> None: super().__init__() self.register_buffer( "marginal_indices", torch.tensor(swyft.utils.tupleize_marginal_indices(marginal_indices)), ) self.n_parameters = torch.Size([n_parameters]) if online_z_score: self.online_z_score = OnlineStandardizingLayer(self.n_parameters) else: self.online_z_score = nn.Identity() def forward(self, parameters: torch.Tensor) -> torch.Tensor: parameters = self.online_z_score(parameters) return self.get_marginal_block(parameters, self.marginal_indices) # B, M, P @property def marginal_block_shape(self) -> Tuple[int, int]: return self.get_marginal_block_shape(self.marginal_indices) @staticmethod def is_marginal_block_possible(marginal_indices: MarginalIndex) -> bool: marginal_indices = swyft.utils.tupleize_marginal_indices(marginal_indices) return [len(marginal_indices[0]) == len(mi) for mi in marginal_indices] @classmethod def get_marginal_block_shape( cls, marginal_indices: MarginalIndex ) -> Tuple[int, int]: marginal_indices = swyft.utils.tupleize_marginal_indices(marginal_indices) assert cls.is_marginal_block_possible( marginal_indices ), f"Each tuple in {marginal_indices} must have the same length." return len(marginal_indices), len(marginal_indices[0]) @classmethod def get_marginal_block( cls, parameters: Array, marginal_indices: MarginalIndex ) -> torch.Tensor: depth = swyft.utils.depth(marginal_indices) tuple_marginal_indices = swyft.utils.tupleize_marginal_indices(marginal_indices) assert cls.is_marginal_block_possible( tuple_marginal_indices ), f"Each tuple in {tuple_marginal_indices} must have the same length." if depth in [0, 1, 2]: return torch.stack( [parameters[..., mi] for mi in tuple_marginal_indices], dim=1 ) else: raise ValueError( f"{marginal_indices} must be of the form (a) 2, (b) [2, 3], (c) [2, [1, 3]], or (d) [[0, 1], [1, 2]]." ) class MarginalClassifier(nn.Module): def __init__( self, n_marginals: int, n_combined_features: int, hidden_features: int, num_blocks: int, dropout_probability: float = 0.0, use_batch_norm: bool = True, ) -> None: super().__init__() self.n_marginals = n_marginals self.n_combined_features = n_combined_features self.net = ResidualNetWithChannel( channels=self.n_marginals, in_features=self.n_combined_features, out_features=1, hidden_features=hidden_features, num_blocks=num_blocks, dropout_probability=dropout_probability, use_batch_norm=use_batch_norm, ) def forward( self, features: torch.Tensor, marginal_block: torch.Tensor ) -> torch.Tensor: fb = features.unsqueeze(1).expand(-1, self.n_marginals, -1) # B, M, O combined = torch.cat([fb, marginal_block], dim=2) # B, M, O + P return self.net(combined).squeeze(-1) # B, M class Network(nn.Module, HeadTailClassifier): def __init__( self, observation_transform: nn.Module, parameter_transform: nn.Module, marginal_classifier: nn.Module, ) -> None: super().__init__() self.observation_transform = observation_transform self.parameter_transform = parameter_transform self.marginal_classifier = marginal_classifier def forward( self, observation: Dict[Hashable, torch.Tensor], parameters: torch.Tensor ) -> torch.Tensor: features = self.observation_transform(observation) # B, O marginal_block = self.parameter_transform(parameters) # B, M, P return self.marginal_classifier(features, marginal_block) # B, M def head(self, observation: Dict[Hashable, torch.Tensor]) -> torch.Tensor: return self.observation_transform(observation) # B, O def tail(self, features: torch.Tensor, parameters: torch.Tensor) -> torch.Tensor: marginal_block = self.parameter_transform(parameters) # B, M, P return self.marginal_classifier(features, marginal_block) # B, M def get_marginal_classifier( observation_key: Hashable, marginal_indices: MarginalIndex, observation_shapes: ObsShapeType, n_parameters: int, hidden_features: int, num_blocks: int, observation_online_z_score: bool = True, parameter_online_z_score: bool = True, ) -> nn.Module: observation_transform = ObservationTransform( observation_key, observation_shapes, online_z_score=observation_online_z_score ) n_observation_features = observation_transform.n_features parameter_transform = ParameterTransform( n_parameters, marginal_indices, online_z_score=parameter_online_z_score ) n_marginals, n_block_parameters = parameter_transform.marginal_block_shape marginal_classifier = MarginalClassifier( n_marginals, n_observation_features + n_block_parameters, hidden_features=hidden_features, num_blocks=num_blocks, ) return Network( observation_transform, parameter_transform, marginal_classifier, ) if __name__ == "__main__": pass
7,955
2,492
from output.models.nist_data.list_pkg.date.schema_instance.nistschema_sv_iv_list_date_min_length_1_xsd.nistschema_sv_iv_list_date_min_length_1 import NistschemaSvIvListDateMinLength1 __all__ = [ "NistschemaSvIvListDateMinLength1", ]
238
97
from collections import defaultdict from enum import auto from typing import Iterable, List, Optional, TYPE_CHECKING, Union from mstrio import config from mstrio.api import contacts from mstrio.distribution_services.contact_group import ContactGroup from mstrio.distribution_services.device import Device from mstrio.utils.entity import auto_match_args_entity, DeleteMixin, EntityBase from mstrio.utils.enum_helper import AutoName from mstrio.utils.helper import ( camel_to_snake, delete_none_values, Dictable, fetch_objects, get_objects_id ) from mstrio.users_and_groups.user import User if TYPE_CHECKING: from mstrio.connection import Connection class ContactDeliveryType(AutoName): EMAIL = auto() FILE = auto() PRINTER = auto() FTP = auto() MOBILE_ANDROID = auto() MOBILE_IPHONE = auto() MOBILE_IPAD = auto() UNSUPPORTED = auto() class ContactAddress(Dictable): """Representation of contact address object Attributes: id: id of contact address, optional name: contact address' name physical_address: physical address of contact delivery_type: object of type ContactDeliveryType is_default: specifies if address is default, optional, default value: False device: instance of Device or string (containing device's id), if device is a string, connection is required connection: instance of Connection, optional, is required if device is string """ _FROM_DICT_MAP = {'delivery_type': ContactDeliveryType, 'device': Device.from_dict} def __init__(self, name: str, physical_address: str, delivery_type: Union[ContactDeliveryType, str], device: Union['Device', str], id: Optional[str] = None, is_default: bool = False, connection: Optional['Connection'] = None): self.id = id self.name = name self.physical_address = physical_address self.is_default = is_default self.delivery_type = delivery_type if isinstance( delivery_type, ContactDeliveryType) else ContactDeliveryType(delivery_type) if isinstance(device, Device): self.device = device else: if not connection: raise ValueError('Argument: connection is required if device is a string') self.device = Device(connection, id=device) def __repr__(self) -> str: param_dict = auto_match_args_entity(self.__init__, self, exclude=['self'], include_defaults=False) params = [ f"{param}={self.delivery_type}" if param == 'delivery_type' else f'{param}={repr(value)}' for param, value in param_dict.items() ] formatted_params = ', '.join(params) return f'ContactAddress({formatted_params})' def to_dict(self, camel_case=True) -> dict: result = { 'name': self.name, 'id': self.id, 'physicalAddress': self.physical_address, 'deliveryType': self.delivery_type.value, 'deviceId': self.device.id, 'deviceName': self.device.name, 'isDefault': self.is_default } return result if camel_case else camel_to_snake(result) @classmethod def from_dict(cls, source, connection, to_snake_case=True) -> 'ContactAddress': source = source.copy() device_id = source.pop('deviceId') device_name = source.pop('deviceName') source['device'] = {'id': device_id, 'name': device_name} return super().from_dict(source, connection, to_snake_case) def list_contacts(connection: 'Connection', to_dictionary: bool = False, limit: Optional[int] = None, **filters) -> Union[List['Contact'], List[dict]]: """Get all contacts as list of Contact objects or dictionaries. Optionally filter the contacts by specifying filters. Args: connection: MicroStrategy connection object to_dictionary: If True returns a list of contact dicts, otherwise returns a list of contact objects limit: limit the number of elements returned. If `None` (default), all objects are returned. **filters: Available filter parameters: ['id', 'name', 'description', 'enabled'] """ return Contact._list_contacts( connection=connection, to_dictionary=to_dictionary, limit=limit, **filters ) class Contact(EntityBase, DeleteMixin): """Object representation of Microstrategy Contact object Attributes: name: contact's name id: contact's id description: contact's description enabled: specifies if a contact is enabled linked_user: user linked to contact, instance of User contact_addresses: list of contact's addresses, instances of ContactAddress memberships: list of Contact Groups that the contact belongs to connection: instance of Connection class, represents connection to MicroStrategy Intelligence Server """ _FROM_DICT_MAP = { **EntityBase._FROM_DICT_MAP, 'linked_user': User.from_dict, 'contact_addresses': [ContactAddress.from_dict], 'memberships': [ContactGroup.from_dict], } _API_GETTERS = { ('id', 'name', 'description', 'enabled', 'linked_user', 'memberships', 'contact_addresses'): contacts.get_contact } _API_DELETE = staticmethod(contacts.delete_contact) _API_PATCH = { ('name', 'description', 'enabled', 'linked_user', 'contact_addresses', 'memberships'): (contacts.update_contact, 'put') } _PATCH_PATH_TYPES = { 'name': str, 'description': str, 'enabled': bool, 'linked_user': dict, 'contact_addresses': list, 'memberships': list } def __init__(self, connection: 'Connection', id: Optional[str] = None, name: Optional[str] = None): """Initialize Contact object by passing id or name. When `id` is provided, name is omitted. Args: connection: MicroStrategy connection object id: ID of Contact name: name of Contact """ if id is None and name is None: raise ValueError("Please specify either 'id' or 'name' parameter in the constructor.") if id is None: result = self._list_contacts( connection=connection, name=name, to_dictionary=True ) if result: object_data = result[0] object_data['connection'] = connection self._init_variables(**object_data) else: raise ValueError(f"There is no Contact named: '{name}'") else: super().__init__(connection, id) def _init_variables(self, **kwargs) -> None: super()._init_variables(**kwargs) self.description = kwargs.get('description') self.enabled = kwargs.get('enabled') linked_user = kwargs.get("linked_user") self.linked_user = User.from_dict(linked_user, self.connection) if linked_user else None addresses = kwargs.get('contact_addresses') self.contact_addresses = [ ContactAddress.from_dict(address, self.connection) for address in addresses ] if addresses else None memberships = kwargs.get('memberships') self.memberships = [ ContactGroup.from_dict(m, self.connection) for m in memberships ] if memberships else None @classmethod def create(cls, connection: 'Connection', name: str, linked_user: Union['User', str], contact_addresses: Iterable[Union['ContactAddress', dict]], description: Optional[str] = None, enabled: bool = True) -> 'Contact': """Create a new contact. Args: connection: MicroStrategy connection object returned by `connection.Connection()` name: contact name linked_user: user linked to contact contact_addresses: list of contact addresses description: description of contact enabled: specifies if contact should be enabled Returns: Contact object """ body = { 'name': name, 'description': description, 'enabled': enabled, 'linkedUser': { 'id': get_objects_id(linked_user, User) }, 'contactAddresses': [ address.to_dict() if isinstance(address, ContactAddress) else address for address in contact_addresses ], } body = delete_none_values(body) response = contacts.create_contact(connection, body).json() if config.verbose: print( f"Successfully created contact named: '{name}' with ID: '{response['id']}'" ) return cls.from_dict(source=response, connection=connection) def alter(self, name: Optional[str] = None, description: Optional[str] = None, enabled: Optional[bool] = None, linked_user: Optional[Union['User', str]] = None, contact_addresses: Optional[Iterable[Union['ContactAddress', dict]]] = None): """Update properties of a contact Args: name: name of a contact description: description of a contact enabled: specifies if a contact is enabled linked_user: an object of class User linked to the contact contact_addresses: list of contact addresses """ linked_user = {'id': get_objects_id(linked_user, User)} if linked_user else None func = self.alter args = func.__code__.co_varnames[:func.__code__.co_argcount] defaults = func.__defaults__ # type: ignore defaults_dict = dict(zip(args[-len(defaults):], defaults)) if defaults else {} local = locals() properties = defaultdict(dict) for property_key in defaults_dict: if local[property_key] is not None: properties[property_key] = local[property_key] self._alter_properties(**properties) @classmethod def _list_contacts(cls, connection: 'Connection', to_dictionary: bool = False, limit: Optional[int] = None, **filters ) -> Union[List['Contact'], List[dict]]: """Get all contacts as list of Contact objects or dictionaries. Optionally filter the contacts by specifying filters. Args: connection: MicroStrategy connection object to_dictionary: If True returns a list of contact dicts, otherwise returns a list of contact objects limit: limit the number of elements returned. If `None` (default), all objects are returned. **filters: Available filter parameters: ['id', 'name', 'description', 'enabled'] """ objects = fetch_objects( connection=connection, api=contacts.get_contacts, limit=limit, filters=filters, dict_unpack_value='contacts' ) if to_dictionary: return objects return [ cls.from_dict(source=obj, connection=connection) for obj in objects ] def add_to_contact_group(self, contact_group: Union['ContactGroup', str]): """Add to ContactGroup Args: contact_group: contact group to which add this contact """ if isinstance(contact_group, str): contact_group = ContactGroup(self.connection, id=contact_group) contact_group.add_members([self]) self.fetch() def remove_from_contact_group(self, contact_group: Union['ContactGroup', str]): """Remove from ContactGroup Args: contact_group: contact group from which to remove this contact """ if isinstance(contact_group, str): contact_group = ContactGroup(self.connection, id=contact_group) contact_group.remove_members([self]) self.fetch()
12,386
3,220
# Iterador a partir de una función generadora def fib(): prev, curr = 0, 1 while True: yield curr prev, curr = curr, prev + curr f = fib() # Recorremos nuestro iterador, llamando a next(). Dentro del for se llama automáticamente a iter(f) print(0, end=' ') for n in range(16): print(next(f), end=' ')
330
114
#!/usr/bin/env python import os import sys import platform from setuptools import setup, Extension if platform.system() != 'Windows' and platform.python_implementation() == "CPython": ext_modules = [Extension('sevent/cbuffer', sources=['sevent/cbuffer.c'])] else: ext_modules = [] if os.path.exists("README.md"): if sys.version_info[0] >= 3: with open("README.md", encoding="utf-8") as fp: long_description = fp.read() else: with open("README.md") as fp: long_description = fp.read() else: long_description = '' setup( name='sevent', version='0.4.6', packages=['sevent', 'sevent.impl', 'sevent.coroutines', 'sevent.helpers'], ext_modules=ext_modules, package_data={ '': ['README.md'], }, install_requires=[ 'dnslib>=0.9.7', 'greenlet>=0.4.2', ], author='snower', author_email='sujian199@gmail.com', url='https://github.com/snower/sevent', license='MIT', description='lightweight event loop', long_description=long_description, long_description_content_type="text/markdown", )
1,166
413
""" mbed CMSIS-DAP debugger Copyright (c) 2006-2013 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from cortex_m import CortexM class LPC1768(CortexM): def __init__(self, transport): CortexM.__init__(self, transport) self.auto_increment_page_size = 0x1000 def reset(self): # halt processor self.halt() # not remap 0x0000-0x0020 to anything but the flash self.writeMemory(0x400FC040, 1) CortexM.reset(self) def resetStopOnReset(self): # halt processor self.halt() # not remap 0x0000-0x0020 to anything but the flash self.writeMemory(0x400FC040, 1) CortexM.resetStopOnReset(self)
1,229
405
import os import warnings warnings.simplefilter("ignore") import csv import numpy import hyperopt from hyperopt import Trials,tpe,hp,fmin from keras.utils import to_categorical import pickle from loadConfiguration import Configuration from objectCreation import createImagedObjects from trainBCNN import runOptimisingTrial,runTrial from createModelPerformancePlots import createAccuracyPlots,createConfusionMatricies from getObjectHierarchyLabels import getObjectHierarchyLabels def main(): #Below various configurations are loaded. inputConfiguration=Configuration(os.getcwd()+"/configurations/inputConfiguration.txt","=") trainSingleModel=inputConfiguration.getConfigurationValue("trainSingleModel","bool") filePrefix=inputConfiguration.getConfigurationValue("filePrefix","raw") allowedFileSuffixes=inputConfiguration.getConfigurationValue("useFileSuffix","raw") allowedFileSuffixes=[allowedFileSuffixes] if(type(allowedFileSuffixes)==str) else allowedFileSuffixes desiredImageSize=inputConfiguration.getConfigurationValue("desiredImageSize","int") dataFolder=os.getcwd()+inputConfiguration.getConfigurationValue("dataFolder","raw") contigiousEqualAreaRejectionThreshold=inputConfiguration.getConfigurationValue("contigiousEqualAreaRejectionThreshold","int") objectLeafLabelTotalQuantity=inputConfiguration.getConfigurationValue("objectLeafLabelTotalQuantity","int") transformedObjectImageRemovalChance=inputConfiguration.getConfigurationValue("transformedObjectImageRemovalChance","float") objectTypeLabels=inputConfiguration.getConfigurationValue("allowedObjectType","raw") #A map between object types and their corresponding label lists is created. objectTypeLabelDictionary={i[0]:tuple(i[1:]) for i in objectTypeLabels} objectTypePossibleLabelSets,objectHierarchyDepth=getObjectHierarchyLabels(list(objectTypeLabelDictionary.values())) #The loaded input configuration is printed. print("Input configuration loaded:") print( "Will train a single model" if(trainSingleModel) else " Will optimise hyperparameters") print(" Will use the following file suffixes:") for currentSuffix in allowedFileSuffixes: print(" "+currentSuffix) print(" Image size: "+str(desiredImageSize)+" pixels") print(" Main image folder: "+dataFolder) print(" Contigious colour area rejection threshold: "+("Disabled" if(contigiousEqualAreaRejectionThreshold is None) else str(contigiousEqualAreaRejectionThreshold))) print(" Minimum objects per object category to load/create: "+str(objectLeafLabelTotalQuantity)) print(" Chance of a individual image being removed from an augmented image: "+str(transformedObjectImageRemovalChance)) print(" Labels at each level in the object type heirarchy:") for i in range(0,objectHierarchyDepth): print(" Level "+str(i)+":") for j in objectTypePossibleLabelSets[i]: print(" "+j) trainingConfiguration=Configuration(os.getcwd()+"/configurations/trainingConfiguration.txt","=") batchSize=trainingConfiguration.getConfigurationValue("batchSize","int") epochNumber=trainingConfiguration.getConfigurationValue("epochNumber","int") trainingLossWeight=trainingConfiguration.getConfigurationValue("trainingLossWeight","float") validationFraction=trainingConfiguration.getConfigurationValue("validationFraction","float") outputFilePrefix=trainingConfiguration.getConfigurationValue("outputFilePrefix","raw") hyperparameterOptimisationMaximumEvaluations=trainingConfiguration.getConfigurationValue("hyperparameterOptimisationMaximumEvaluations","int") dropoutFraction=trainingConfiguration.getConfigurationValue("dropoutFraction","float") convolutionLayersPerBlock=trainingConfiguration.getConfigurationValue("convolutionLayersPerBlock","int") extraFirstBlock=trainingConfiguration.getConfigurationValue("extraFirstBlock","bool") initalLayerFilterCount=trainingConfiguration.getConfigurationValue("initalLayerFilterCount","int") filterCountBlockMultiplicativeFactor=trainingConfiguration.getConfigurationValue("filterCountBlockMultiplicativeFactor","float") initalLayerKernalSize=trainingConfiguration.getConfigurationValue("initalLayerKernalSize","int") kernalSizeBlockMultiplicitiveFactor=trainingConfiguration.getConfigurationValue("kernalSizeBlockMultiplicitiveFactor","float") learningRate=trainingConfiguration.getConfigurationValue("learningRate","float") gpuQuantity=trainingConfiguration.getConfigurationValue("gpuQuantity","int") earlyStoppingMinDelta=trainingConfiguration.getConfigurationValue("earlyStoppingMinDelta","float") earlyStoppingPatience=trainingConfiguration.getConfigurationValue("earlyStoppingPatience","int") #the loaded training configuration is printed. print("\n") print("Training configuration loaded:") print(" Batch size: "+str(batchSize)) print(" Epochs trained per level in hierarchy: "+str(epochNumber)) print(" Current hierarchy level training loss weight: "+str(trainingLossWeight)) print(" Validation object fraction: "+str(validationFraction)) print(" Output file prefix: "+outputFilePrefix) if(trainSingleModel): print(" The following parameters will be used for training the model:") print(" dropoutFraction: "+str(dropoutFraction)) print(" convolutionLayersPerBlock: "+str(convolutionLayersPerBlock)) print(" extraFirstBlock: "+str(extraFirstBlock)) print(" initalLayerFilterCount: "+str(initalLayerFilterCount)) print(" filterCountBlockMultiplicativeFactor: "+str(filterCountBlockMultiplicativeFactor)) print(" initalLayerKernalSize: "+str(initalLayerKernalSize)) print(" kernalSizeBlockMultiplicitiveFactor: "+str(kernalSizeBlockMultiplicitiveFactor)) print(" learningRate: "+str(learningRate)) else: print(" Maximum number of hyperparameter optimisation evaluations: "+str(hyperparameterOptimisationMaximumEvaluations)) print(" Number of GPUs to use for training: "+str(gpuQuantity)) print(" Early stopping minimum loss delta: "+str(earlyStoppingMinDelta)) print(" Early stopping patience: "+str(earlyStoppingPatience)) hyperparameterLimitsConfiguration=Configuration(os.getcwd()+"/configurations/hyperparameterLimitsConfiguration.txt","=") minimumDropoutFraction=hyperparameterLimitsConfiguration.getConfigurationValue("minimumDropoutFraction","float") maximumDropoutFraction=hyperparameterLimitsConfiguration.getConfigurationValue("maximumDropoutFraction","float") possibleConvolutionLayersPerBlock=hyperparameterLimitsConfiguration.getConfigurationValue("possibleConvolutionLayersPerBlock","int") possibleInitalLayerFilterCount=hyperparameterLimitsConfiguration.getConfigurationValue("possibleInitalLayerFilterCount","int") possibleFilterCountBlockMultiplicativeFactor=hyperparameterLimitsConfiguration.getConfigurationValue("possibleFilterCountBlockMultiplicativeFactor","float") possibleInitalLayerKernalSize=hyperparameterLimitsConfiguration.getConfigurationValue("possibleInitalLayerKernalSize","int") possibleKernalSizeBlockMultiplicitiveFactor=hyperparameterLimitsConfiguration.getConfigurationValue("possibleKernalSizeBlockMultiplicitiveFactor","float") minimumLearningRate=hyperparameterLimitsConfiguration.getConfigurationValue("minimumLearningRate","float") maximumLearningRate=hyperparameterLimitsConfiguration.getConfigurationValue("maximumLearningRate","float") if(not trainSingleModel): print("\n") print(" Hyperparameters will be optimised through the following ranges:") print(" dropoutFraction: "+str(minimumDropoutFraction)+"-"+str(maximumDropoutFraction)) print(" convolutionLayersPerBlock: "+str(possibleConvolutionLayersPerBlock)) print(" extraFirstBlock: True or False") print(" initalLayerFilterCount: "+str(possibleInitalLayerFilterCount)) print(" filterCountBlockMultiplicativeFactor: "+str(possibleFilterCountBlockMultiplicativeFactor)) print(" initalLayerKernalSize: "+str(possibleInitalLayerKernalSize)) print(" kernalSizeBlockMultiplicitiveFactor: "+str(possibleKernalSizeBlockMultiplicitiveFactor)) print(" learningRate: "+str(minimumLearningRate)+"-"+str(maximumLearningRate)) trainObjects,validationObjects=createImagedObjects(dataFolder,objectTypeLabelDictionary,desiredImageSize,contigiousEqualAreaRejectionThreshold, filePrefix,allowedFileSuffixes,validationFraction,objectLeafLabelTotalQuantity,transformedObjectImageRemovalChance) #Data from the loaded/created ImagedObjects is turned into a format that can be used in the neural network. numpy.random.shuffle(trainObjects) numpy.random.shuffle(validationObjects) trainObjectImageList=[currentObject.imageData for currentObject in trainObjects] trainObjectIntegerLabelList=[[None for j in range(0,len(trainObjects))]for i in range(0,objectHierarchyDepth)] validationObjectImageList=[currentObject.imageData for currentObject in validationObjects] validationObjectIntegerLabelList=[[None for j in range(0,len(validationObjects))]for i in range(0,objectHierarchyDepth)] #Creates a list that contains the labels for each object represented as integers instead of strings. for i in range(0,objectHierarchyDepth): trainObjectIntegerLabelList[i]=[(objectTypePossibleLabelSets[i]).index(currentObject.label[i]) for currentObject in trainObjects] validationObjectIntegerLabelList[i]=[(objectTypePossibleLabelSets[i]).index(currentObject.label[i]) for currentObject in validationObjects] #The labels are one-hot encoded for each level in the object heirarchy. trainLabels=[to_categorical(trainObjectIntegerLabelList[i],len(objectTypePossibleLabelSets[i])) for i in range(0,objectHierarchyDepth)] validationLabels=[to_categorical(validationObjectIntegerLabelList[i],len(objectTypePossibleLabelSets[i])) for i in range(0,objectHierarchyDepth)] #The above data is put into a form that can be used by the model. xTrain=numpy.zeros(shape=(len(trainObjectImageList),)+trainObjectImageList[0].shape) xValidation=numpy.zeros(shape=(len(validationObjectImageList),)+validationObjectImageList[0].shape) for currentIndex,currentImageData in enumerate(trainObjectImageList): xTrain[currentIndex,:,:,:]=currentImageData for currentIndex,currentImageData in enumerate(validationObjectImageList): xValidation[currentIndex,:,:,:]=currentImageData #Each output of the model is accociated with a set of labels. outputLayerNames=["out"+str(i+1) for i in range(0,objectHierarchyDepth)] #Each output layer is labeled sequentially from the output closest to the input layer. yTrain=dict(zip(outputLayerNames,trainLabels)) yValidation=dict(zip(outputLayerNames,validationLabels)) nonOptimisingModelParameters=validationObjectImageList[0].shape,outputLayerNames,objectTypePossibleLabelSets,gpuQuantity nonOptimisingTrainParameters=xTrain,xValidation,yTrain,yValidation,batchSize,epochNumber,trainingLossWeight,earlyStoppingMinDelta,earlyStoppingPatience nonOptimisingF1Parameters=validationObjectIntegerLabelList,objectHierarchyDepth if(trainSingleModel): #For training a single model with specified hyperparameters. modelHyperparameters=[dropoutFraction,convolutionLayersPerBlock,extraFirstBlock,initalLayerFilterCount,filterCountBlockMultiplicativeFactor,initalLayerKernalSize,kernalSizeBlockMultiplicitiveFactor,learningRate] result=runTrial(modelHyperparameters,nonOptimisingModelParameters,nonOptimisingTrainParameters,nonOptimisingF1Parameters) outputModelFileName=outputFilePrefix+"TrainedModel.h5" outputModelHistoryFileName=outputFilePrefix+"TrainingHistory.npy" print("\n") print("Saving model file at location "+os.getcwd()+"/"+outputModelFileName) print("Saving model training history file at location "+os.getcwd()+"/"+outputModelHistoryFileName) result[0].save(outputModelFileName) #The model is saved. numpy.save(outputModelHistoryFileName,result[1]) #The training history is saved. outputConfusionMatriciesFilePath=outputFilePrefix+"ConfusionMatricies.png" print("\n") print("Creating accuracy plots, will be saved in folder "+os.getcwd()+" as .png files with the prefix "+outputFilePrefix+"AccuracyPlot_") createAccuracyPlots(result[1],outputLayerNames,outputFilePrefix) print("Creating confusion matricies, plot will be saved at location: "+os.getcwd()+"/"+outputConfusionMatriciesFilePath) createConfusionMatricies(model=result[0],testObjects=validationObjects,testObjectImageList=validationObjectImageList, imageSaveFilePath=outputConfusionMatriciesFilePath,objectHierarchyLabels=objectTypePossibleLabelSets) else: #For hyperparameter optimisation rotLambda=lambda parameters:runOptimisingTrial(parameters,nonOptimisingModelParameters,nonOptimisingTrainParameters,nonOptimisingF1Parameters) space=[hp.uniform("dropoutFraction",minimumDropoutFraction,maximumDropoutFraction), hp.choice("convolutionLayersPerBlock",possibleConvolutionLayersPerBlock), hp.choice("extraFirstBlock",[True,False]), hp.choice("initalLayerFilterCount",possibleInitalLayerFilterCount), hp.choice("filterCountBlockMultiplicativeFactor",possibleFilterCountBlockMultiplicativeFactor), hp.choice("initalLayerKernalSize",possibleInitalLayerKernalSize), hp.choice("kernalSizeBlockMultiplicitiveFactor",possibleKernalSizeBlockMultiplicitiveFactor), hp.uniform("learningRate",minimumLearningRate,maximumLearningRate)] trials=Trials() bestResults=fmin(rotLambda,space=space,algo=tpe.suggest,max_evals=hyperparameterOptimisationMaximumEvaluations,trials=trials) optimisedHyperparameters=hyperopt.space_eval(space,bestResults) print("\n") print("Optimised hyperparameters: ",optimisedHyperparameters) outputOptimisedHyperparameterFileName=outputFilePrefix+"OptimisedHyperparameters.txt" print("Saving optimised hyperparmeters at location: "+os.getcwd()+"/"+outputOptimisedHyperparameterFileName) outputOptimisedHyperparameterFile=open(outputOptimisedHyperparameterFileName,"w") outputOptimisedHyperparameterFileWriter=csv.writer(outputOptimisedHyperparameterFile,delimiter="=") outputOptimisedHyperparameterFileWriter.writerow(["dropoutFraction",optimisedHyperparameters[0]]) outputOptimisedHyperparameterFileWriter.writerow(["convolutionLayersPerBlock",optimisedHyperparameters[1]]) outputOptimisedHyperparameterFileWriter.writerow(["extraFirstBlock",optimisedHyperparameters[2]]) outputOptimisedHyperparameterFileWriter.writerow(["initalLayerFilterCount",optimisedHyperparameters[3]]) outputOptimisedHyperparameterFileWriter.writerow(["filterCountBlockMultiplicativeFactor",optimisedHyperparameters[4]]) outputOptimisedHyperparameterFileWriter.writerow(["initalLayerKernalSize",optimisedHyperparameters[5]]) outputOptimisedHyperparameterFileWriter.writerow(["kernalSizeBlockMultiplicitiveFactor",optimisedHyperparameters[6]]) outputOptimisedHyperparameterFileWriter.writerow(["learningRate",optimisedHyperparameters[7]]) outputOptimisedHyperparameterFile.close() print("\n") outputTrialsFileName=outputFilePrefix+"TrainingTrials.p" print("Saving trials pickle file at location "+os.getcwd()+"/"+outputTrialsFileName) pickle.dump(trials,open(outputTrialsFileName,"wb")) main()
16,444
4,526
from torch import cat, cos, float64, sin, stack, tensor from torch.nn import Module, Parameter from core.dynamics import RoboticDynamics class CartPole(RoboticDynamics, Module): def __init__(self, m_c, m_p, l, g=9.81): RoboticDynamics.__init__(self, 2, 1) Module.__init__(self) self.params = Parameter(tensor([m_c, m_p, l, g], dtype=float64)) def D(self, q): m_c, m_p, l, _ = self.params _, theta = q return stack( (stack([m_c + m_p, m_p * l * cos(theta)]), stack([m_p * l * cos(theta), m_p * (l ** 2)]))) def C(self, q, q_dot): _, m_p, l, _ = self.params z = tensor(0, dtype=float64) _, theta = q _, theta_dot = q_dot return stack((stack([z, -m_p * l * theta_dot * sin(theta)]), stack([z, z]))) def U(self, q): _, m_p, l, g = self.params _, theta = q return m_p * g * l * cos(theta) def G(self, q): _, m_p, l, g = self.params _, theta = q z = tensor(0, dtype=float64) return stack([z, -m_p * g * l * sin(theta)]) def B(self, q): return tensor([[1], [0]], dtype=float64)
1,202
461
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ####################################################################### # This script imports your Last.fm listening history # # inside a MySQL or Sqlite database. # # # # Copyright (c) 2015-2020, Nicolas Meier # ####################################################################### import json import logging import sys from lfmconf.lfmconf import get_lastfm_conf from lfmdb import lfmdb from stats.stats import LastfmStats, recent_tracks, \ retrieve_total_json_tracks_from_db from queries.inserts import get_query_insert_json_track logging.basicConfig( level=logging.INFO, format=f'%(asctime)s %(levelname)s %(message)s' ) conf = get_lastfm_conf() user = conf['lastfm']['service']['username'] api_key = conf['lastfm']['service']['apiKey'] lastfm_stats = LastfmStats.get_lastfm_stats(user, api_key) total_pages = lastfm_stats.nb_delta_pages() total_plays_in_db = lastfm_stats.nb_json_tracks_in_db logging.info('Nb page to get: %d' % total_pages) if total_pages == 0: logging.info('Nothing to update!') sys.exit(1) all_pages = [] for page_num in range(total_pages, 0, -1): logging.info('Page %d of %d' % (page_num, total_pages)) page = recent_tracks(user, api_key, page_num) while page.get('recenttracks') is None: logging.info('has no tracks. Retrying!') page = recent_tracks(user, api_key, page_num) all_pages.append(page) # Iterate through all pages num_pages = len(all_pages) for page_num, page in enumerate(all_pages): logging.info('Page %d of %d' % (page_num + 1, num_pages)) tracks = page['recenttracks']['track'] # Remove the "nowplaying" track if found. if tracks[0].get('@attr'): if tracks[0]['@attr']['nowplaying'] == 'true': tracks.pop(0) # Get only the missing tracks. if page_num == 0: logging.info('Fist page') nb_plays = lastfm_stats.nb_plays_for_first_page() tracks = tracks[0: nb_plays] logging.info('Getting %d plays' % nb_plays) # On each page, iterate through all tracks num_tracks = len(tracks) json_tracks = [] for track_num, track in enumerate(reversed(tracks)): logging.info('Track %d of %d' % (track_num + 1, num_tracks)) json_tracks.append(json.dumps(track)) try: lfmdb.insert_many(get_query_insert_json_track(), json_tracks) except Exception: sys.exit(1) logging.info('Done! %d rows in table json_track.' % retrieve_total_json_tracks_from_db())
2,674
883
#!/usr/bin/env python3 """ data file read in data """ from typing import Tuple, Any import pandas as pd import tensorflow as tf from loguru import logger from utils import file_path_relative import numpy as np from transformers import DistilBertTokenizer NUM_ROWS_TRAIN: int = 15000 TEST_RATIO: float = 0.2 def _run_encode(texts: np.array, tokenizer: Any, maxlen: int = 512): """ Encoder for encoding the text into sequence of integers for transformer Input """ logger.info('encode') encodings = tokenizer( texts.tolist(), return_token_type_ids=False, padding='max_length', truncation=True, max_length=maxlen ) return np.array(encodings['input_ids']) def read_data_attention(strategy: tf.distribute.TPUStrategy, max_len: int, ) -> Tuple[np.array, np.array, np.array, np.array, tf.data.Dataset, tf.data.Dataset, tf.data.Dataset, int]: """ read data from attention models """ logger.info('reading data for attention models') # batch with number of tpu's batch_size = 16 * strategy.num_replicas_in_sync auto = tf.data.experimental.AUTOTUNE # First load the tokenizer tokenizer = DistilBertTokenizer.from_pretrained( 'distilbert-base-multilingual-cased') train = pd.read_csv(file_path_relative('jigsaw-toxic-comment-train.csv')) valid = pd.read_csv(file_path_relative('validation.csv')) test = pd.read_csv(file_path_relative('test.csv')) x_train = _run_encode(train['comment_text'].astype(str), tokenizer, maxlen=max_len) x_valid = _run_encode(valid['comment_text'].astype(str), tokenizer, maxlen=max_len) x_test = _run_encode(test['content'].astype( str), tokenizer, maxlen=max_len) y_train = train['toxic'].values y_valid = valid['toxic'].values train_dataset = ( tf.data.Dataset .from_tensor_slices((x_train, y_train)) .repeat() .shuffle(2048) .batch(batch_size) .prefetch(auto) ) valid_dataset = ( tf.data.Dataset .from_tensor_slices((x_valid, y_valid)) .batch(batch_size) .cache() .prefetch(auto) ) test_dataset = ( tf.data.Dataset .from_tensor_slices(x_test) .batch(batch_size) ) # return all datasets return x_train, x_valid, y_train, y_valid, train_dataset, valid_dataset, \ test_dataset, batch_size if __name__ == '__main__': raise RuntimeError('cannot run data attention on its own')
2,617
877
# Kratos imports import KratosMultiphysics import KratosMultiphysics.KratosUnittest as UnitTest from KratosMultiphysics.WindEngineeringApplication.test_suite import SuiteFlags, TestSuite import run_cpp_tests # STL imports import pathlib class TestLoader(UnitTest.TestLoader): @property def suiteClass(self): return TestSuite def AssembleTestSuites(enable_mpi=False): """ Populates the test suites to run. Populates the test suites to run. At least, it should pupulate the suites: "small", "nighlty" and "all" Return ------ suites: A dictionary of suites The set of suites with its test_cases added. """ static_suites = UnitTest.KratosSuites # Test cases will be organized into lists first, then loaded into their # corresponding suites all at once local_cases = {} for key in static_suites.keys(): local_cases[key] = [] # Glob all test cases in this application this_directory = pathlib.Path(__file__).absolute().parent test_loader = TestLoader() all_tests = test_loader.discover(this_directory) # Sort globbed test cases into lists based on their suite flags # flags correspond to entries in KratosUnittest.TestSuites # (small, nightly, all, validation) # # Cases with the 'mpi' flag are added to mpi suites as well as their corresponding normal suites. # Cases with the 'mpi_only' flag are not added to normal suites. for test_case in all_tests: suite_flags = set(test_case.suite_flags) # Check whether the test case has a flag for mpi mpi = SuiteFlags.MPI in suite_flags mpi_only = SuiteFlags.MPI_ONLY in suite_flags # Don't add the test if its mpi-exclusive and mpi is not enabled if (not enable_mpi) and mpi_only: continue # Remove mpi flags if mpi: suite_flags.remove(SuiteFlags.MPI) if mpi_only: suite_flags.remove(SuiteFlags.MPI_ONLY) # Add case to the corresponding suites for suite_flag in suite_flags: local_cases[suite_flag.name.lower()].append(test_case) if mpi or mpi_only: local_cases["mpi_" + suite_flag.name.lower()].append(test_case) # Put test in 'all' if it isn't already there if not (SuiteFlags.ALL in suite_flags): if not mpi_only: local_cases["all"].append(test_case) if mpi or mpi_only: local_cases["mpi_all"].append(test_case) # Load all sorted cases into the global suites for suite_name, test_cases in local_cases.items(): static_suites[suite_name].addTests(test_cases) return static_suites def Run(enable_mpi=False): UnitTest.runTests(AssembleTestSuites(enable_mpi=enable_mpi)) if __name__ == "__main__": Run(enable_mpi=False)
2,875
896
import re import uuid from copy import deepcopy from datetime import datetime from lxml import etree from lxml.html import xhtml_to_html from geoalchemy import WKTSpatialElement from geolucidate.functions import _cleanup, _convert from geolucidate.parser import parser_re from cadorsfeed import db from cadorsfeed.models import DailyReport, CadorsReport, ReportCategory from cadorsfeed.models import Aircraft, NarrativePart, Location, LocationRef from cadorsfeed.cadorslib.xpath_functions import extensions from cadorsfeed.cadorslib.narrative import process_narrative, normalize_ns from cadorsfeed.cadorslib.locations import LocationStore from cadorsfeed.aerodb import aerodromes_re, lookup NSMAP = {'h': 'http://www.w3.org/1999/xhtml', 'pyf': 'urn:uuid:fb23f64b-3c54-4009-b64d-cc411bd446dd', 'a': 'http://www.w3.org/2005/Atom', 'geo': 'http://www.w3.org/2003/01/geo/wgs84_pos#', 'aero':'urn:uuid:1469bf5a-50a9-4c9b-813c-af19f9d6824d'} def make_datetime(date, time): if time is None: time = "0000 Z" return datetime.strptime(date + " " + time, "%Y-%m-%d %H%M Z") def clean_html(tree): mytree = deepcopy(tree) for elem in mytree.iter(): for attr, val in elem.attrib.iteritems(): if attr.startswith('{'): del elem.attrib[attr] xhtml_to_html(mytree) return etree.tostring(normalize_ns(mytree), method="html", encoding=unicode) def format_parsed_report(parsed_report): report = CadorsReport.query.get( parsed_report['cadors_number']) or CadorsReport(uuid=uuid.uuid4()) parsed_report['timestamp'] = make_datetime(parsed_report['date'], parsed_report['time']) del parsed_report['date'] del parsed_report['time'] primary_locations = set() other_locations = set() if parsed_report['tclid'] != '': #try to do a db lookup data = lookup(parsed_report['tclid']) if data is not None: primary_locations.add(data) if parsed_report['location'] != '': location = parsed_report['location'] #Apply geolucidate and the aerodromes RE match = aerodromes_re.get_icao_re.search(location) if match: data = lookup(match.group()) primary_locations.add(data) match = parser_re.search(location) if match: (latitude, longitude) = _convert(*_cleanup(match.groupdict())) location = make_location(latitude, longitude) location.name = match.group() primary_locations.add(location) for narrative_part in parsed_report['narrative']: narrative_tree = process_narrative(narrative_part['narrative_text']) narrative_part['narrative_html'] = clean_html(narrative_tree) narrative_part['narrative_xml'] = etree.tostring(narrative_tree, method="xml", encoding=unicode) #do the location extraction here #parse out geolinks elements = narrative_tree.xpath( "//*[@class='geolink' and @geo:lat and @geo:long]", namespaces=NSMAP) for element in elements: longitude = element.attrib[ '{http://www.w3.org/2003/01/geo/wgs84_pos#}long'] latitude = element.attrib[ '{http://www.w3.org/2003/01/geo/wgs84_pos#}lat'] name = element.attrib['title'] location = make_location(latitude, longitude) location.name = name other_locations.add(location) #parse out aerodrome links elements = narrative_tree.xpath( "//*[@class='aerolink' and @aero:code]", namespaces=NSMAP) for element in elements: code = element.attrib[ '{urn:uuid:1469bf5a-50a9-4c9b-813c-af19f9d6824d}code'] other_locations.add(lookup(code)) for aircraft_part in parsed_report['aircraft']: if aircraft_part['flight_number'] is not None: match = re.match("([A-Z]{2,4})([0-9]{1,4})M?", aircraft_part['flight_number']) if match: aircraft_part['flight_number_operator'] = match.group(1) aircraft_part['flight_number_flight'] = match.group(2) report.categories = [] report.aircraft = [] report.narrative_parts = [] report.locations = [] for category in parsed_report['categories']: report.categories.append(ReportCategory(text=category)) del parsed_report['categories'] for aircraft_part in parsed_report['aircraft']: report.aircraft.append(Aircraft(**aircraft_part)) del parsed_report['aircraft'] for narrative_part in parsed_report['narrative']: report.narrative_parts.append(NarrativePart(**narrative_part)) del parsed_report['narrative'] for location in primary_locations: locref = LocationRef(report=report, location=location, primary=True) db.session.add(locref) other_locations -= primary_locations for location in other_locations: locref = LocationRef(report=report, location=location, primary=False) db.session.add(locref) for key, value in parsed_report.iteritems(): setattr(report, key, value) return report def make_location(latitude, longitude): wkt = "POINT(%s %s)" % (longitude, latitude) point = WKTSpatialElement(wkt) location = Location(location=point) return location
5,739
1,800
from sstcam_sandbox import get_checs from TargetCalibSB.pedestal import PedestalTargetCalib from TargetCalibSB import get_cell_ids_for_waveform from CHECLabPy.core.io import TIOReader from tqdm import tqdm from glob import glob def process(path): pedestal_path = path.replace("_r0.tio", "_ped.tcal") reader = TIOReader(path) pedestal = PedestalTargetCalib( reader.n_pixels, reader.n_samples-32, reader.n_cells ) desc = "Generating pedestal" for wfs in tqdm(reader, total=reader.n_events, desc=desc): if wfs.missing_packets: continue cells = get_cell_ids_for_waveform(wfs.first_cell_id, reader.n_samples, reader.n_cells) wfs = wfs[:, 32:] wfs.first_cell_id = cells[32] pedestal.add_to_pedestal(wfs, wfs.first_cell_id) pedestal.save_tcal(pedestal_path) def main(): input_paths = glob(get_checs("d181203_erlangen/pedestal/*.tio")) for path in input_paths: process(path) if __name__ == '__main__': main()
1,016
393
class Solution: def search(self, nums: List[int], target: int) -> int: # if len(nums) == 1: # return 1 if nums[0] == target else 0 tmp = [] i, j = 0, len(nums) - 1 while i <= j: m = (i + j) // 2 if nums[m] == target: tmp.append(m) if i < len(nums) and nums[i] == target: tmp.append(i) i += 1 elif nums[m] > target: j = m - 1 if j >= 0 and nums[j] == target: tmp.append(j) else: i = m + 1 if i < len(nums) and nums[i] == target: tmp.append(i) return len(set(tmp))
740
245
import pandas as pd from typing import List, NamedTuple from .timeseries import agg_by_category_by_date from primitive_interfaces.base import PrimitiveBase class AggregateByDateTimeCategory(PrimitiveBase[pd.DataFrame, List[str]]): __author__ = 'distil' def __init__(self): pass def get_params(self) -> dict: return {} def set_params(self, params: dict) -> None: self.params = params def get_call_metadata(self) -> {}: return {} def fit(self): pass def produce(self, inputs: pd.DataFrame, values: List[str] = [], groupby : List[str] = [], datetime=None, intervals=None, aggregation=None): return agg_by_category_by_date(inputs, datetime, values, groupby, interval=intervals, agg=aggregation)
821
247
class DataSufficiencyException(Exception): pass class ModelFitException(Exception): pass class ModelPredictException(Exception): pass
150
42
# 804. Unique Morse Code Words class Solution: def __init__(self): self.morse_code = [".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....", "..", ".---", "-.-", ".-..", "--", "-.", "---", ".--.", "--.-", ".-.", "...", "-", "..-", "...-", ".--", "-..-", "-.--", "--.."] self.alphabets = "abcdefghijklmnopqrstuvwxyz" self.alpha_morse = dict(zip(self.alphabets, self.morse_code)) def uniqueMorseRepresentations(self, words): """ :type words: List[str] :rtype: int """ word_set = [] for word in words: s = "" for ch in word: s += self.alpha_morse[ch] word_set.append(s) return len(list(set(word_set))) """ https://leetcode.com/problems/unique-morse-code-words/discuss/120675/\ Easy-and-Concise-Solution-C++JavaPython def uniqueMorseRepresentations(self, words): d = [".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....", "..", ".---", "-.-", ".-..", "--", "-.", "---", ".--.", "--.-", ".-.", "...", "-", "..-", "...-", ".--", "-..-", "-.--", "--.."] return len({''.join(d[ord(i) - ord('a')] for i in w) for w in words}) """ # 771. Jewels and Stones, 98.33% # https://leetcode.com/problems/jewels-and-stones/description/ def numJewelsInStones(self, J, S): """ :type J: str :type S: str :rtype: int """ count = 0 for jewel in J: for stone in S: if jewel == stone: count += 1 return count """ https://leetcode.com/problems/jewels-and-stones/discuss/113553/\ Easy-and-Concise-Solution-using-hash-set-C++JavaPython def numJewelsInStones(self, J, S): setJ = set(J) return sum(s in setJ for s in S) """ # 806. Number of Lines To Write String # https://leetcode.com/problems/number-of-lines-to-write-string/ def numberOfLines(self, widths, S): """ :type widths: List[int] :type S: str :rtype: List[int] """ lines = 1 line_width = 0 for ch in S: index = ord(ch) - ord('a') if line_width + widths[index] <= 100: line_width += widths[index] else: lines += 1 line_width = widths[index] return [lines, line_width] """ https://leetcode.com/problems/number-of-lines-to-write-string/discuss/\ 120666/Easy-Solution-6-lines-C++JavaPython def numberOfcurs(self, widths, S): res, cur = 1, 0 for i in S: width = widths[ord(i) - ord('a')] res += 1 if cur + width > 100 else 0 cur = width if cur + width > 100 else cur + width return [res, cur] """
2,723
1,036
import networkx as nx import matplotlib.pyplot as plt import matplotlib.patches as mpatches import numpy as np import logging from pygna import output from pygna.utils import YamlConfig import pandas as pd import random import string import seaborn as sns import pygna.output as output class BlockModel(object): def __init__(self, block_model_matrix, n_nodes: int = 10, nodes_percentage: list = None): """ This class implements a block model reading and elaboration methods :param block_model_matrix: the matrix to be used as block model :param n_nodes: the number of nodes :param nodes_percentage: the percentage of nodes to use for the calculations, passed through a list for example [0.5, 0.5] """ self.n_nodes = n_nodes self.nodes = ["N" + str(i) for i in range(n_nodes)] self.n_clusters = block_model_matrix.shape[0] self.graph = nx.Graph() self.bm = block_model_matrix self.nodes_in_block = False self.nodes_percentage = nodes_percentage self.cluster_dict = {} def set_nodes(self, nodes_names: list) -> None: """ Set the nodes name of the block model :param nodes_names: the names list Example _______ >>> p = 0.5 >>> n_nodes = 1000 >>> matrix = np.array([[1, 2], [3, 4]]) >>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p]) >>> nodes = list("A", "B", "C") >>> bm.set_nodes(nodes) """ self.nodes = nodes_names self.n_nodes = len(nodes_names) def set_bm(self, block_model_matrix: pd.DataFrame) -> None: """ Change block model matrix used in the class :param block_model_matrix: the block model matrix Example _______ >>> p = 0.5 >>> n_nodes = 1000 >>> matrix = np.array([[1, 2], [3, 4]]) >>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p]) >>> bmm = pd.DataFrame(mydata_matrix) >>> bm.set_bm(bmm) """ if block_model_matrix.shape[0] == self.n_clusters: self.bm = block_model_matrix else: logging.error("the block model is supposed to have %d clusters" % (self.n_clusters)) def set_nodes_in_block_percentage(self, nodes_percentage: list) -> None: """ Pass the percentage of nodes in each block as a list, for example [0.5, 0.5] :param nodes_percentage: percentage of the nodes Example _______ >>> p = 0.5 >>> n_nodes = 1000 >>> matrix = np.array([[1, 2], [3, 4]]) >>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p]) >>> bm.set_nodes_in_block_percentage([0.5, 0.5]) """ self.nodes_percentage = nodes_percentage def set_nodes_in_block(self, nodes_in_block: int) -> None: """ Set the nodes number in the block model :param nodes_in_block: the number of nodes in the block list Example _______ >>> p = 0.5 >>> n_nodes = 1000 >>> matrix = np.array([[1, 2], [3, 4]]) >>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p]) >>> bm.set_nodes_in_block(1000) """ self.nodes_in_block = nodes_in_block def create_graph(self) -> None: """ Create a graph from the parameters passed in the constructor of the class Example _______ >>> bm = BlockModel(np.array(config["BlockModel"]["matrix"]), n_nodes=config["BlockModel"]["n_nodes"], nodes_percentage=config["BlockModel"]["nodes_percentage"]) >>> bm.create_graph() """ reject = True logging.info('Reject=' + str(reject)) while reject: graph = generate_graph_from_sm(self.n_nodes, self.bm, self.nodes_in_block, self.nodes, self.nodes_percentage) LCC = max(nx.connected_components(graph), key=len) reject = (len(LCC) != self.n_nodes) logging.info('Reject=' + str(reject)) logging.info('Nodes: %d, in LCC: %d' % (self.n_nodes, len(LCC))) self.graph = graph def plot_graph(self, output_folder: str) -> None: """ Plot the block model graph :param output_folder: the folder where to save the result Example _______ >>> p = 0.5 >>> n_nodes = 1000 >>> matrix = np.array([[1, 2], [3, 4]]) >>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p]) >>> bm.plot_graph("block_model_path.pdf") """ plot_bm_graph(self.graph, self.bm, output_folder=output_folder) def write_network(self, output_file: str) -> None: """ Save the network on a given file :param output_file: the output path where to save the results Example _______ >>> p = 0.5 >>> n_nodes = 1000 >>> matrix = np.array([[1, 2], [3, 4]]) >>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p]) >>> bm.write_network("network.tsv") """ self.network_file = output_file logging.info("Network written on %s" % (output_file)) if output_file.endswith(".tsv"): nx.write_edgelist(self.graph, output_file, data=False, delimiter="\t") else: logging.error("output file format unknown") def write_cluster_genelist(self, output_file: str) -> None: """ Save the gene list to a GMT file :param output_file: the output path where to save the results Example _______ >>> p = 0.5 >>> n_nodes = 1000 >>> matrix = np.array([[1, 2], [3, 4]]) >>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p]) >>> bm.write_cluster_genelist("genes.gmt") """ self.genelist_file = output_file clusters = nx.get_node_attributes(self.graph, "cluster") for i in set(clusters.values()): c = "cluster_" + str(i) self.cluster_dict[c] = {} self.cluster_dict[c]["descriptor"] = "cluster" self.cluster_dict[c]["genes"] = [str(j) for j in clusters.keys() if clusters[j] == i] if output_file.endswith(".gmt"): output.print_GMT(self.cluster_dict, self.genelist_file) else: logging.error("output file format unknown") def generate_graph_from_sm(n_nodes: int, block_model: pd.DataFrame, nodes_in_block: list = False, node_names: list = None, nodes_percentage: list = None) -> nx.Graph: """ This function creates a graph with n_nodes number of vertices and a matrix block_model that describes the intra e inter-block connectivity. The nodes_in_block is parameter, list, to control the number of nodes in each cluster :param n_nodes: the number of nodes in the block model :param block_model: the block model to elaborate :param nodes_in_block: the list of nodes in the block model :param node_names: the list of names in the block model :param nodes_percentage: the percentage of nodes to use for the calculations, passed through a list for example [0.5, 0.5] Example _______ >>> bm = pd.DataFrame(mydata_matrix) >>> nodes = list("A","B","C") >>> graph = generate_graph_from_sm(n_nodes, bm, nodes_in_block, nodes, nodes_percentage) """ if not node_names: node_names = range(n_nodes) edges = [] G = nx.Graph() if nodes_percentage: cluster = np.random.choice(block_model.shape[0], size=n_nodes, p=nodes_percentage) np.random.shuffle(cluster) elif nodes_in_block: list_temp = [nodes_in_block[i] * [i] for i in range(len(nodes_in_block))] cluster = np.array([val for sublist in list_temp for val in sublist]) np.random.shuffle(cluster) else: # cluster is an array of random numbers corresponding to the cluster of each node cluster = np.random.randint(block_model.shape[0], size=n_nodes) for i in range(n_nodes): G.add_node(node_names[i], cluster=cluster[i]) for i in range(n_nodes): for j in range(i + 1, n_nodes): if np.random.rand() < block_model[cluster[i], cluster[j]]: edges.append((node_names[i], node_names[j])) G.add_edges_from(edges) return G def plot_bm_graph(graph: nx.Graph, block_model: pd.DataFrame, output_folder: str = None) -> None: """ Save the graph on a file :param graph: the graph with name of the nodes :param block_model: the block model :param output_folder: the folder where to save the file Example _______ >>> bm = pd.DataFrame(mydata_matrix) >>> graph = nx.complete_graph(100) >>> plot_bm_graph(graph, bm, output_folder="./results/") """ nodes = graph.nodes() colors = ['#b15928', '#1f78b4', '#6a3d9a', '#33a02c', '#ff7f00'] cluster = nx.get_node_attributes(graph, 'cluster') labels = [colors[cluster[n]] for n in nodes] layout = nx.spring_layout(graph) plt.figure(figsize=(13.5, 5)) plt.subplot(1, 3, 1) nx.draw(graph, nodelist=nodes, pos=layout, node_color='#636363', node_size=50, edge_color='#bdbdbd') plt.title("Observed network") plt.subplot(1, 3, 2) plt.imshow(block_model, cmap='OrRd', interpolation='nearest') plt.title("Stochastic block matrix") plt.subplot(1, 3, 3) legend = [] for ix, c in enumerate(colors): legend.append(mpatches.Patch(color=c, label='C%d' % ix)) nx.draw(graph, nodelist=nodes, pos=layout, node_color=labels, node_size=50, edge_color='#bdbdbd') plt.legend(handles=legend, ncol=len(colors), mode="expand", borderaxespad=0) plt.title("SB clustering") plt.savefig(output_folder + 'block_model.pdf', bbox_inches='tight') def generate_sbm_network(input_file: "yaml configuration file") -> None: """ This function generates a simulated network, using the block model matrix given as input and saves both the network and the cluster nodes. All parameters must be specified in a yaml file. This function allows to create network and geneset for any type of SBM """ ym = YamlConfig() config = ym.load_config(input_file) print(config) bm = BlockModel(np.array(config["BlockModel"]["matrix"]), n_nodes=config["BlockModel"]["n_nodes"], nodes_percentage=config["BlockModel"]["nodes_percentage"]) outpath = config["Simulations"]["output_folder"] suffix = config["Simulations"]["suffix"] for i in range(config["Simulations"]["n_simulated"]): bm.create_graph() bm.write_network(outpath + suffix + "_s_" + str(i) + "_network.tsv") bm.write_cluster_genelist(outpath + suffix + "_s_" + str(i) + "_genes.gmt") # bm.plot_graph(outpath+suffix+"_s_"+str(i)) def generate_sbm2_network(output_folder: 'folder where the simulations are saved', prefix: 'prefix for the simulations' = 'sbm', n_nodes: 'nodes in the network' = 1000, theta0: 'probability of connection in the cluster' = '0.9,0.7,0.5,0.2', percentage: 'percentage of nodes in cluster 0, use ratio 0.1 = 10 percent' = '0.1', density: 'multiplicative parameter used to define network density' = '0.06,0.1,0.2', n_simulations: 'number of simulated networks for each configuration' = 3 ): """ This function generates the simulated networks and genesets using the stochastic block model with 2 BLOCKS as described in the paper. The output names are going to be prefix_t_<theta0>_p_<percentage>_d_<density>_s_<n_simulation>_network.tsv or _genes.gmt One connected cluster while the rest of the network has the same probability of connection. SBM = d *[theta0, 1-theta0 1-theta0, 1-theta0] The simulator checks for connectedness of the generated network, if the generated net is not connected, a new simulation is generated. """ teta_ii = [float(i) for i in theta0.replace(' ', '').split(',')] percentages = [float(i) for i in percentage.replace(' ', '').split(',')] density = [float(i) for i in density.replace(' ', '').split(',')] n_simulated = int(n_simulations) n_nodes = int(n_nodes) for p in percentages: for t in teta_ii: for d in density: matrix = np.array([[d * t, d * (1 - t)], [d * (1 - t), d * (1 - t)]]) bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p]) for i in range(n_simulated): name = output_folder + prefix + "_t_" + str(t) + "_p_" + str(p) + "_d_" + str(d) + "_s_" + str(i) bm.create_graph() bm.write_network(name + "_network.tsv") bm.write_cluster_genelist(name + "_genes.gmt") def write_network(network, output_file): network_file= output_file logging.info("Network written on %s" %(output_file)) if output_file.endswith(".tsv"): nx.write_edgelist(network, output_file, data=False, delimiter="\t") else: logging.error("output file format unknown") def get_mix_genesets(gmt_diz, tups = [('positive_0', 'positive_1'), ('positive_2', 'positive_3'), ('null_4', 'null_5'), ('null_6', 'null_7')], perc = [4,6,10,12,88,90,94,96]): diz = {} for t in tups: a = gmt_diz[t[0]]['genes'] b = gmt_diz[t[1]]['genes'] for p in perc: name = t[0]+'_'+str(int(p))+'_'+t[1]+'_'+str(int(100-p)) aa = np.random.choice(a, int(len(a)/100*p), replace = False) bb = np.random.choice(b, int(len(a)/100*int(100-p)), replace = False) tot = [] for i in aa: tot.append(i) for i in bb: tot.append(i) diz[name]=tot return(diz) ######################################################################### ####### COMMAND LINE FUNCTIONS ########################################## ######################################################################### def generate_gna_sbm( output_tsv: 'output_network', output_gmt: 'output geneset filename, this contains only the blocks', output_gmt2: 'mixture output geneset filename, this contains the mixture blocks'=None, N:'number of nodes in the network' = 1000, block_size:'size of the first 8 blocks' = 50, d:'baseline probability of connection, p0 in the paper' = 0.06, fc_cis:'positive within-block scaling factor for the probability of connection, Mii = fc_cis * d (alpha parameter in the paper)' = 2., fc_trans:'positive between-block scaling factor for the probability of connection, (beta parameter in the paper)' = .5, pi : 'percentage of block-i nodes for the genesets made of block-i and block-j. Use symmetrical values (5,95),use string comma separated' = '4,6,10,12,88,90,94,96', descriptor='crosstalk_sbm', sbm_matrix_figure: 'shows the blockmodel matrix' = None): """ This function generates benchmark network and geneset to test the crosstalk between two blocks. This function generates 4 blocks with d*fold change probability and other 4 blocks with d probability. The crosstalk is set both between the the first 4 blocks and the others. Make sure that 8*cluster_size < N """ clusters = 8 lc = N - (block_size*clusters) if lc < 1: logging.error('nodes are less than cluster groups') d =float(d) sizes = clusters*[block_size] sizes.append(lc) print(sizes) probs = d*np.ones((9,9)) #pp = np.tril(d/100*(1+np.random.randn(ncluster+1,ncluster+1))) A = fc_cis*d B = d + fc_trans*(d*(fc_cis-1)) probs[0,1] = B probs[2,3] = B probs[1,0] = B probs[3,2] = B probs[4,5] = B probs[6,7] = B probs[5,4] = B probs[7,6] = B probs[0,0] = A probs[1,1] = A probs[2,2] = A probs[3,3] = A if type(sbm_matrix_figure)==str: f,ax = plt.subplots(1) sns.heatmap(probs, ax = ax, cmap = 'YlOrRd', annot=True) f.savefig(sbm_matrix_figure) ncycle = 0 k = 0 while (k<N): g = nx.stochastic_block_model(sizes, probs) g = max(nx.connected_component_subgraphs(g), key=len) k = len(g) ncycle +=1 if ncycle > 20: logging.error('density is too low') H = nx.relabel_nodes(g, lambda x:'n'+str(x)) gmt_diz = {} nodes = list(H.nodes) for p,l in enumerate(H.graph['partition'][:-1]): if p<4: name = 'positive_'+str(p) else: name = 'null_'+str(p) ll = [nodes[i] for i in l] gmt_diz[name]={} gmt_diz[name]['genes']=ll gmt_diz[name]['descriptor']=descriptor if type(output_gmt2)==str: perc = [float(i) for i in pi.split(',')] logging.info('Generating mixes with perc = %s') gmt_diz2={} mix_dix = get_mix_genesets(gmt_diz, perc = perc) for name,i in mix_dix.items(): gmt_diz2[name]={} gmt_diz2[name]['genes']=i gmt_diz2[name]['descriptor']=descriptor output.print_GMT(gmt_diz2, output_gmt2) write_network(H, output_tsv) output.print_GMT(gmt_diz, output_gmt) print('Generated'+output_tsv) def generate_gnt_sbm( output_tsv: 'output network filename', output_gmt: 'output geneset filename, this contains only the blocks', N:'number of nodes in the network' = 1000, block_size: 'size of the first 6 blocks'= 50, d: 'baseline probability of connection, p0 in the paper' = 0.06, fold_change:'positive within-block scaling factor for the probability of connection, Mii = fold_change * d (alpha parameter in the paper)' = 2., descriptor:'descriptor for the gmt file'='mixed_sbm'): """ This function generates 3 blocks with d*fold_change probability and other 3 blocks with d probability. Make sure that 6*cluster_size < N """ lc = N - (block_size*6) if lc < 1: logging.error('nodes are less than cluster groups') d =float(d) sizes = 6*[block_size] sizes.append(lc) print(sizes) probs = d*np.ones((7,7)) #pp = np.tril(d/100*(1+np.random.randn(ncluster+1,ncluster+1))) probs[0,0]=fold_change*d probs[1,1]=fold_change*d probs[2,2]=fold_change*d ncycle = 0 k = 0 while (k<N): g = nx.stochastic_block_model(sizes, probs) g = max(nx.connected_component_subgraphs(g), key=len) k = len(g) ncycle +=1 if ncycle > 20: logging.error('density is too low') H = nx.relabel_nodes(g, lambda x:'n'+str(x)) gmt_diz = {} nodes = list(H.nodes) for p,l in enumerate(H.graph['partition'][:-1]): if p<3: name = 'positive_'+str(p) else: name = 'null_'+str(p) ll = [nodes[i] for i in l] gmt_diz[name]={} gmt_diz[name]['genes']=ll gmt_diz[name]['descriptor']=descriptor write_network(H, output_tsv) output.print_GMT(gmt_diz, output_gmt)
19,779
6,634
from django.apps import AppConfig class PrintshopsConfig(AppConfig): name = 'printshops' """ Register our signals """ def ready(self): import printshops.signals
184
54
def grade(key, submission): if submission.lower() == 'sea' or submission.lower() == 'the sea': return True, "Yes! Miles learns Russian so he came up with the words that visually look same in both English and Russian." else: return False, "Nyet!"
275
81
import pandas as pd people_dict = { "weight": pd.Series([145, 182, 191],index=["joan", "bob", "mike"]), "birthyear": pd.Series([2002, 2000, 1999], index=["bob", "joan", "mike"], name="year"), "children": pd.Series([1, 2], index=["mike", "bob"]), "hobby": pd.Series(["Rock Climbing", "Scuba Diving", "Sailing"], index=["joan", "bob", "mike"]), } people = pd.DataFrame(people_dict) print ( people )
443
189
# # Copyright (C) 2014 Dominik Oepen # # This file is part of virtualsmartcard. # # virtualsmartcard is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # virtualsmartcard is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # virtualsmartcard. If not, see <http://www.gnu.org/licenses/>. # import unittest from virtualsmartcard.SmartcardSAM import * class TestSmartcardSAM(unittest.TestCase): def setUp(self): self.password = "DUMMYKEYDUMMYKEY" self.myCard = SAM("1234", "1234567890") self.secEnv = Security_Environment(None, self.myCard) # TODO: Set CRTs self.secEnv.ht.algorithm = "SHA" self.secEnv.ct.algorithm = "AES-CBC" def test_incorrect_pin(self): with self.assertRaises(SwError): self.myCard.verify(0x00, 0x00, "5678") def test_counter_decrement(self): ctr1 = self.myCard.counter try: self.myCard.verify(0x00, 0x00, "3456") except SwError as e: pass self.assertEquals(self.myCard.counter, ctr1 - 1) def test_internal_authenticate(self): sw, challenge = self.myCard.get_challenge(0x00, 0x00, "") blocklen = vsCrypto.get_cipher_blocklen("DES3-ECB") padded = vsCrypto.append_padding(blocklen, challenge) sw, result_data = self.myCard.internal_authenticate(0x00, 0x00, padded) self.assertEquals(sw, SW["NORMAL"]) def test_external_authenticate(self): sw, challenge = self.myCard.get_challenge(0x00, 0x00, "") blocklen = vsCrypto.get_cipher_blocklen("DES3-ECB") padded = vsCrypto.append_padding(blocklen, challenge) sw, result_data = self.myCard.internal_authenticate(0x00, 0x00, padded) sw, result_data = self.myCard.external_authenticate(0x00, 0x00, result_data) self.assertEquals(sw, SW["NORMAL"]) def test_security_environment(self): hash = self.secEnv.hash(0x90, 0x80, self.password) # The API should be changed so that the hash function returns SW_NORMAL self.secEnv.ct.key = hash[:16] crypted = self.secEnv.encipher(0x00, 0x00, self.password) # The API should be changed so that encipher() returns SW_NORMAL plain = self.secEnv.decipher(0x00, 0x00, crypted) # The API should be changed so that decipher() returns SW_NORMAL # self.assertEqual(plain, self.password) # secEnv.decipher doesn't strip padding. Should it? # should this really be secEnv.ct? probably rather secEnv.dst self.secEnv.ct.algorithm = "RSA" self.secEnv.dst.keylength = 1024 sw, pk = self.secEnv.generate_public_key_pair(0x00, 0x00, "") self.assertEquals(sw, SW["NORMAL"]) if __name__ == "__main__": unittest.main() # CF = CryptoflexSE(None) # print CF.generate_public_key_pair(0x00, 0x80, "\x01\x00\x01\x00") # print MyCard._get_referenced_key(0x01)
3,440
1,210
############################################################### # _ _ _ _ _ # | |__ (_) ___ _ __ __ _ _ __| |_(_) ___| | ___ # | '_ \| |/ _ \| '_ \ / _` | '__| __| |/ __| |/ _ \ # | |_) | | (_) | |_) | (_| | | | |_| | (__| | __/ # |_.__/|_|\___/| .__/ \__,_|_| \__|_|\___|_|\___| # |_| # ############################################################### # # $ python3 runTableCases.py [CASES.CSV] [TEMPLATE.IN] -run # # Where: # - [CASES.CSV] path to csv file with the list of # parameters and the corresponding tags # - [TEMPLATE.IN] input file template for PFLOTRAN and # the corresponding tags # - [shouldRunPFLOTRAN = "-run"] # ############################################################### import numpy as np import matplotlib.pyplot as plt from pandas import read_csv from os import system import sys ## Global variables ColumnLenght = 50.0 ConcentrationAtInlet = 1.66E-16 ## Non-dimensional numbers def DaII(K,A,U,L=ColumnLenght): return (L*L*K)/(A*U) def Peclet(A,L=ColumnLenght): return L/A def plotResults(U,pH,IS,PV,kATT,kDET,dAq,dIm,alpha): FILE = current_folder+"/pflotran-obs-0.tec" textBoxpH = "pH = {:n}".format(pH)\ + "\nIS = {:n}".format(IS) textBoxKin = \ "$k_{\\rm att}$"+" = {:.4f}".format(kATT) + " h$^{-1}$" +"\n" + \ "$k_{\\rm det}$"+" = {:.4f}".format(kDET) + " h$^{-1}$" +"\n" + \ "$\lambda_{\\rm aq}$"+" = {:.4f}".format(dAq)+ " h$^{-1}$" +"\n" + \ "$\lambda_{\\rm im}$"+" = {:.4f}".format(dIm)+ " h$^{-1}$" +"\n" + \ "$\\alpha_{\\rm L}$"+" = {:.4f}".format(alpha)+ " cm " textBoxDimensionless = "Damköhler(II) = $\\dfrac{\\rm reaction}{\\rm dispersion}$"+"\n" +\ "Da$^{\\rm att}$"+" = {:.1E}".format(DaII(kATT,alpha,U)) +"\n" +\ "Da$^{\\rm det}$"+" = {:.1E}".format(DaII(kDET,alpha,U)) +"\n" +\ "Da$^{\\rm λaq}$"+" = {:.1E}".format(DaII(dAq, alpha,U)) +"\n" +\ "Da$^{\\rm λim}$"+" = {:.1E}".format(DaII(dIm, alpha,U)) +"\n\n" +\ "Péclet = $\\dfrac{\\rm advection}{\\rm dispersion}$"+"\n" +\ "P$_{\\rm é}$"+" = {:.1E}".format(Peclet(alpha)) system("./miscellaneous/PFT2CSV.sh " + FILE) #system("rm " + current_folder +"/*.out") ObservationPoint = np.loadtxt(FILE,delimiter=",",skiprows=1) Cnorm = ObservationPoint[:,1]/ConcentrationAtInlet TimeInPoreVolumes = ObservationPoint[:,0] * U*24./(ColumnLenght) Legend=["$\\dfrac{[V_{(aq)}]}{[V_{(aq)}]_0}$"] plt.figure(figsize=(10,4),facecolor="white") ## Plot log-scale ax1 = plt.subplot(1,2,1) ax1.plot(TimeInPoreVolumes,Cnorm,c="purple",lw=3) ax1.set_yscale("symlog",\ linthresh=1.0E-6,subs=[1,2,3,4,5,6,7,8,9]) ax1.set_ylim([-1.0E-7,1.15]) ax1.set_xlim([0,10]) ax1.set_xlabel("Pore Volume [$-$]",fontsize="large") ax1.axvline(x=PV,ls="dotted",c="gray",lw=1) ax1.axhspan(ymin=-1.0E-7,ymax=1.0E-6,facecolor="pink",alpha=0.2) ## Rate values ax1.text(9.5,5.0E-5,textBoxKin,\ bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5),\ horizontalalignment='right') ## Case pH/IS ax1.text(9.0,1.0E-1,textBoxpH,\ bbox=dict(boxstyle='round', facecolor='purple', alpha=0.15),\ horizontalalignment='right') ## Plot linear-scale ax2 = plt.subplot(1,2,2) ax2.plot(TimeInPoreVolumes,Cnorm,c="purple",lw=3,label=Legend[0]) ax2.set_ylim([-1.0E-2,1.02]) ax2.set_xlim([0,10]) ax2.set_xlabel("Pore Volume [$-$]",fontsize="large") ax2.axvline(x=PV,ls="dotted",c="gray",lw=1) ax2.legend(fontsize="large",loc="upper right") ## Péclet and Damköhler numbers ax2.text(9.5,0.1,textBoxDimensionless,\ bbox=dict(boxstyle='round', facecolor='purple', alpha=0.15),\ horizontalalignment='right') plt.tight_layout() FIGPATH = current_folder + "/" + "CASE_" + current_folder[7:10] + ".png" #plt.show() plt.savefig(FIGPATH,transparent=False) ## Tags dictionary for variables in input file tagsReplaceable = { "Porosity" : "<porosity>", "DarcyVel" : "<darcyVel>", # q = u*porosity "CleanTime" : "<elutionTime>", # t @ C0 = 0 "FinalTime" : "<endTime>", # @ 10 pore volumes "AttachRate": "<katt>", "DetachRate": "<kdet>", "DecayAq" : "<decayAq>", "DecayIm" : "<decayIm>", "LongDisp" : "<longDisp>" } ## Tags dictionary for other parameters tagsAccesory = { "FlowVel" : "poreWaterVel", "PoreVol" : "poreVolume", "pH" : "pH", "IonicStr" : "IS" } ## Path to PFLOTRAN executable PFLOTRAN_path = "$PFLOTRAN_DIR/src/pflotran/pflotran " ## Table with the set of parameters try: parameters_file = str(sys.argv[1]) except IndexError: sys.exit("Parameters file not defined :(") setParameters = read_csv(parameters_file) total_rows = setParameters.shape[0] ## Template for the PFLOTRAN input file try: template_file = str(sys.argv[2]) except IndexError: sys.exit("Template file not found :(") ## Run cases? try: shouldRunPFLOTRAN = "-run" in str(sys.argv[3]) except IndexError: shouldRunPFLOTRAN = False ## Delete previous cases system("rm -rf CASE*") ## Row in the set of parameters table = case to be run for i in range(total_rows): #for i in range(1): ## Create a folder for the case current_folder = "./CASE_" + "{0:03}".format(i+1) system("mkdir " + current_folder) ## Copy template input file to folder system("cp " + template_file + " " + current_folder+"/pflotran.in") current_file = current_folder + "/pflotran.in" ## Replace tags for values in case for current_tag in tagsReplaceable: COMM = "sed -i 's/" + tagsReplaceable[current_tag] + "/"\ +'{:.3E}'.format(setParameters.loc[i,tagsReplaceable[current_tag]])\ + "/g' " + current_file system(COMM) ## Run PFLOTRAN in that case if shouldRunPFLOTRAN: #print(PFLOTRAN_path + "-pflotranin " + current_file) system(PFLOTRAN_path + "-pflotranin " + current_file) #system("python3 ./miscellaneous/organizeResults.py " + current_folder + "/pflotran-obs-0.tec -clean") current_U = setParameters.loc[i,tagsAccesory["FlowVel"]] current_pH = setParameters.loc[i,tagsAccesory["pH"]] current_IS = setParameters.loc[i,tagsAccesory["IonicStr"]] current_PV = setParameters.loc[i,tagsAccesory["PoreVol"]] #Porosity = setParameters.loc[i,tagsReplaceable["Porosity"]] #input("Press Enter to continue...") plotResults(current_U,current_pH,current_IS,current_PV,\ setParameters.loc[i,tagsReplaceable["AttachRate"]],\ setParameters.loc[i,tagsReplaceable["DetachRate"]],\ setParameters.loc[i,tagsReplaceable["DecayAq"]],\ setParameters.loc[i,tagsReplaceable["DecayIm"]],\ setParameters.loc[i,tagsReplaceable["LongDisp"]]) #input("Press Enter to continue...") system("rm -r pictures ; mkdir pictures") system("cp CASE**/*.png ./pictures/")
6,762
2,826
#!/usr/bin/python3 # # This software is covered by The Unlicense license # import os, pymongo, sys def print_mongo(): myclient = pymongo.MongoClient("mongodb://localhost:27017/") mydb = myclient["cpu_temperature"] mycol = mydb["temps"] #print(myclient.list_database_names()) for x in mycol.find(): print(x) myclient.close() def main(): print_mongo() if __name__ == '__main__': try: main() except KeyboardInterrupt: print('Interrupted') try: sys.exit(0) except SystemExit: os._exit(0)
593
205
# -*- coding: utf-8 -*- def test_all_contains_only_valid_names(): import pycamunda.decisionreqdef for name in pycamunda.decisionreqdef.__all__: getattr(pycamunda.decisionreqdef, name)
203
77
import time import torch from torch import nn from torch.nn import functional as F #import spconv import torchplus from torchplus.nn import Empty, GroupNorm, Sequential from torchplus.ops.array_ops import gather_nd, scatter_nd from torchplus.tools import change_default_args import sys if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path: sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages') class fusion(nn.Module): def __init__(self): super(fusion, self).__init__() self._total_time = 0.0 self._total_count = 0 self.name = 'fusion_layer' self.corner_points_feature = Sequential( nn.Conv2d(24,48,1), nn.ReLU(), nn.Conv2d(48,96,1), nn.ReLU(), nn.Conv2d(96,96,1), nn.ReLU(), nn.Conv2d(96,4,1), ) self.fuse_2d_3d = Sequential( nn.Conv2d(4,18,1), nn.ReLU(), nn.Conv2d(18,36,1), nn.ReLU(), nn.Conv2d(36,36,1), nn.ReLU(), nn.Conv2d(36,1,1), ) self.maxpool = Sequential( nn.MaxPool2d([200,1],1), ) def forward(self,input_1,tensor_index): torch.cuda.synchronize() t1 = time.time() flag = -1 if tensor_index[0,0] == -1: #tensor_index[0,0]=0 out_1 = torch.zeros(1,200,107136,dtype = input_1.dtype,device = input_1.device) out_1[:,:,:] = -9999999 flag = 0 else: x = self.fuse_2d_3d(input_1) #input例:[1, 4, 1, 193283],4 channel,1*193283 out_1 = torch.zeros(1,200,107136,dtype = input_1.dtype,device = input_1.device) out_1[:,:,:] = -9999999 out_1[:,tensor_index[:,0],tensor_index[:,1]] = x[0,:,0,:] flag = 1 x = self.maxpool(out_1) #x, _ = torch.max(out_1,1) x = x.squeeze().reshape(1,-1,1) torch.cuda.synchronize() self._total_time += time.time() - t1 self._total_count += 1 #batch size = 1 #print("avg fusion time:", self._total_time/self._total_count*1000) return x, flag
2,185
892
import string from typing import List, Dict # inject code here # def _mean_in_window(lines, i) -> float: start = max(i - 5, 0) finish = min(i + 5, len(lines) - 1) sm, count = 0, 0 for n in range(start, finish): sm += len(lines[n]) - 1 # minus one-char prefix count += 1 return sm / max(count, 1) def _last_char(line: str) -> str: return ' ' if len(line) < 1 else line[-1] def _last_char_features(l_char: str) -> Dict[str, object]: res = { 'isalpha': l_char.isalpha(), 'isdigit': l_char.isdigit(), 'islower': l_char.islower(), 'punct': l_char if l_char in string.punctuation else ' ', } return res def _first_chars(line: str) -> str: if len(line) < 1: chars = ' ' elif len(line) < 2: chars = line[0] else: chars = line[:2] res = [] for c in chars: if c.isdigit(): res.append('0') elif c.isalpha(): res.append('a' if c.islower() else 'A') else: res.append(c) return ''.join(res) def _line_to_features(line: str, i: int, lines: List[str], annotated: bool) -> Dict[str, object]: features = {} this_len = len(line) mean_len = _mean_in_window(lines, i) if i > 0: prev_len = len(lines[i-1]) - (1 if annotated else 0) l_char = _last_char(lines[i-1]) else: prev_len = 0 l_char = ' ' features.update( { 'this_len': this_len, 'mean_len': mean_len, 'prev_len': prev_len, 'first_chars': _first_chars(line), }) features.update(_last_char_features(l_char)) return features def _featurize_text_with_annotation(text: str) -> (List[object], List[bool]): lines = text.strip().splitlines() x, y = [], [] for i, line in enumerate(lines): y.append(line[0] == '+') # True, if line should be glued with previous line = line[1:] x.append(_line_to_features(line, i, lines, True)) return x, y _HYPHEN_CHARS = { '\u002D', # HYPHEN-MINUS '\u00AD', # SOFT HYPHEN '\u2010', # HYPHEN '\u2011', # NON-BREAKING HYPHEN } def _preprocess_pdf(text: str, clf, v) -> str: lines = [s.strip() for s in text.strip().splitlines()] x = [] for i, line in enumerate(lines): x.append(_line_to_features(line, i, lines, False)) if not x: return '' x_features = v.transform(x) y_pred = clf.predict(x_features) corrected_acc = [] for i, line in enumerate(lines): line = line.strip() if i == 0 or not y_pred[i]: corrected_acc.append(line) else: prev_line = corrected_acc[-1] if prev_line != '' and prev_line[-1] in _HYPHEN_CHARS: corrected_acc[-1] = prev_line[:-1] else: corrected_acc[-1] += ' ' corrected_acc[-1] += line corrected = '\n'.join(corrected_acc) return corrected
3,001
1,098
from django.conf.urls import include from django.urls import path from django.contrib import admin from users.views import FacebookLogin import django_js_reverse.views from rest_framework.routers import DefaultRouter from common.routes import routes as common_routes router = DefaultRouter() routes = common_routes for route in routes: router.register(route['regex'], route['viewset'], basename=route['basename']) urlpatterns = [ path("", include("common.urls"), name="common"), path("assignments/", include("assignments.urls"), name='assignments'), path('api-auth/', include('rest_framework.urls')), path('rest-auth/', include('rest_auth.urls')), path('rest-auth/registration/', include('rest_auth.registration.urls')), path('rest-auth/facebook/', FacebookLogin.as_view(), name='fb_login'), path("admin/", admin.site.urls, name="admin"), path("jsreverse/", django_js_reverse.views.urls_js, name="js_reverse"), path("api/", include(router.urls), name="api"), path("api/assignments/", include("assignments.api.assignment.urls")), path("api/grade-assignment/", include("assignments.api.graded-assignment.urls")), path("api/", include("users.urls"), name="user"), ]
1,241
368
import numpy as np from path import Path import random import pickle import torch import os import cv2 def load_as_float(path): """Loads image""" im = cv2.imread(path) im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB).astype(np.float32) return im class SequenceFolder(torch.utils.data.Dataset): """Creates a pickle file for ScanNet scene loading, and corresponding dataloader""" def __init__(self, root, ttype, seed=None, seq_length=3, seq_gap=20, transform=None): np.random.seed(seed) random.seed(seed) self.root = Path(root) scene_list_path = ttype self.scene_list_path = scene_list_path[:-4] fold_root = 'scans_test_sample' if 'test' in ttype else 'scannet_nas' #fold_root = 'scannet_nas' scenes = [self.root/fold_root/folder[:-1] for folder in open(scene_list_path)] self.ttype = ttype self.scenes = sorted(scenes) self.seq_gap = seq_gap self.seq_length = seq_length self.transform = transform file_pickle = self.scene_list_path+ '_len_'+str(self.seq_length)+ '_gap_'+str(self.seq_gap)+'.pickle' if os.path.exists(file_pickle): with open(file_pickle, 'rb') as handle: sequence_set = pickle.load(handle) self.samples = sequence_set else: self.crawl_folders() def crawl_folders(self): sequence_set = [] isc = 0 cnt = 0 for scene in self.scenes: #print(isc, len(self.scenes)) isc += 1 frames = os.listdir(os.path.join(scene, "color")) frames = [int(os.path.splitext(frame)[0]) for frame in frames] frames = sorted(frames) intrinsics = np.genfromtxt(os.path.join(scene, "intrinsic", "intrinsic_depth.txt")).astype(np.float32).reshape((4, 4))[:3,:3] # The index from scannet nas is already sampled if len(frames) < (self.seq_gap // 20) * self.seq_length: continue cnt += len(frames) end_idx = len(frames) * 20 path_split = scene.split('/') for i in range(len(frames)): idx = frames[i] img = os.path.join(scene, "color", "%04d.jpg" % idx) if 'test' in self.ttype: depth = os.path.join(scene, "depth", "%04d.png" % idx) # do not require normal when test normal = "" else: depth = os.path.join(scene, "depth", "%04d.npy" % idx) normal = os.path.join(scene, "normal", "%04d_normal.npy" % idx) pose_tgt = np.loadtxt(os.path.join(scene, "pose", "%04d.txt" % idx)) do_nan_tgt = False nan_pose_tgt = np.sum(np.isnan(pose_tgt) | np.isinf(pose_tgt)) if nan_pose_tgt>0: do_nan_tgt = True sample = {'intrinsics': intrinsics, 'tgt': img, 'tgt_depth': depth, 'tgt_normal': normal, 'ref_depths': [], 'ref_imgs': [], 'ref_poses': [], 'path': []} sample['path'] = os.path.join(scene , img[:-4]) if idx < self.seq_gap: shifts = list(range(idx,idx+(self.seq_length-1)*self.seq_gap+1,self.seq_gap)) shifts.remove(idx) #.pop(i) elif idx >= end_idx - self.seq_gap: shifts = list(range(idx,end_idx,self.seq_gap)) shifts = list(range(idx-(self.seq_length-1)*self.seq_gap,idx+1,self.seq_gap)) shifts.remove(idx) else: if self.seq_length%2 == 1: demi_length = self.seq_length//2 if (idx>=demi_length*self.seq_gap) and (idx<end_idx- demi_length*self.seq_gap): shifts = list(range(idx- (demi_length)*self.seq_gap, idx+(demi_length)*self.seq_gap+1,self.seq_gap)) elif idx<demi_length*self.seq_gap: diff_demi = (demi_length-idx//self.seq_gap) shifts = list(range(idx- (demi_length-diff_demi)*self.seq_gap, idx+(demi_length+diff_demi)*self.seq_gap+1,self.seq_gap)) elif idx>=end_idx- demi_length*self.seq_gap: diff_demi = (demi_length-(end_idx-idx-1)//self.seq_gap) shifts = list(range(idx- (demi_length+diff_demi)*self.seq_gap, idx+(demi_length-diff_demi)*self.seq_gap+1,self.seq_gap)) else: print('Error') shifts.remove(idx) else: #2 scenarios demi_length = self.seq_length//2 if (idx >= demi_length*self.seq_gap) and (idx < end_idx- demi_length*self.seq_gap): shifts = list(range(idx - demi_length*self.seq_gap, idx + (demi_length-1)*self.seq_gap+1, self.seq_gap)) elif idx < demi_length*self.seq_gap: diff_demi = (demi_length-idx//self.seq_gap) shifts = list(range(idx- (demi_length-diff_demi)*self.seq_gap, idx+(demi_length+diff_demi-1)*self.seq_gap+1,self.seq_gap)) elif idx>=end_idx- demi_length*self.seq_gap: diff_demi = (demi_length-(end_idx-idx-1)//self.seq_gap) shifts = list(range(idx- (demi_length+diff_demi-1)*self.seq_gap, idx+(demi_length-diff_demi)*self.seq_gap+1,self.seq_gap)) else: print('Error') shifts.remove(idx) do_nan = False try: for j in shifts: pose_src = np.loadtxt(os.path.join(scene, "pose", "%04d.txt" % j)) pose_rel = np.linalg.inv(pose_src) @ pose_tgt pose = pose_rel[:3,:].reshape((1,3,4)).astype(np.float32) sample['ref_poses'].append(pose) sample['ref_imgs'].append(os.path.join(scene, "color", "%04d.jpg" % j)) if 'test' in self.ttype: sample['ref_depths'].append(os.path.join(scene, "depth", "%04d.png" % j)) else: sample['ref_depths'].append(os.path.join(scene, "depth", "%04d.npy" % j)) nan_pose = np.sum(np.isnan(pose)) + np.sum(np.isinf(pose)) if nan_pose>0: do_nan = True if not do_nan_tgt and not do_nan: sequence_set.append(sample) except: continue file_pickle = self.scene_list_path+ '_len_'+str(self.seq_length)+ '_gap_'+str(self.seq_gap)+'.pickle' with open(file_pickle, 'wb') as handle: pickle.dump(sequence_set, handle, protocol=pickle.HIGHEST_PROTOCOL) self.samples = sequence_set def __getitem__(self, index): sample = self.samples[index] tgt_img = load_as_float(sample['tgt']) if 'test' in self.ttype: tgt_depth = cv2.imread(sample['tgt_depth'],-1).astype(np.float32) / 1000.0 tgt_normal = np.tile(np.expand_dims(np.ones_like(tgt_depth), -1), (1,1,3)) else: tgt_depth = np.load(sample['tgt_depth']).astype(np.float32) / 1000.0 tgt_normal = np.load(sample['tgt_normal']).astype(np.float32) tgt_normal = 1.0 - tgt_normal * 2.0 # [-1, 1] tgt_normal[:,:,2] = np.abs(tgt_normal[:,:,2]) * -1.0 ref_poses = sample['ref_poses'] ref_imgs = [load_as_float(ref_img) for ref_img in sample['ref_imgs']] if 'test' in self.ttype: ref_depths = [cv2.imread(depth_img,-1).astype(np.float32)/1000.0 for depth_img in sample['ref_depths']] else: ref_depths = [np.load(depth_img).astype(np.float32)/1000.0 for depth_img in sample['ref_depths']] if self.transform is not None: imgs, depths, normals, intrinsics = self.transform([tgt_img] + ref_imgs, [tgt_depth] + ref_depths, [tgt_normal], np.copy(sample['intrinsics'])) tgt_img = imgs[0] tgt_depth = depths[0] tgt_normal = normals[0] ref_imgs = imgs[1:] ref_depths = depths[1:] else: intrinsics = np.copy(sample['intrinsics']) intrinsics_inv = np.linalg.inv(intrinsics) return tgt_img, ref_imgs, tgt_normal, ref_poses, intrinsics, intrinsics_inv, tgt_depth, ref_depths def __len__(self): return len(self.samples)
7,302
3,559
import Queue import select import socket from conf import ADDRESS, BACKLOG, SIZE server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setblocking(0) print 'starting up on %s port %s' % ADDRESS server.bind(ADDRESS) server.listen(BACKLOG) inputs = [server] outputs = [] message_queues = {} while inputs: readable, writable, exceptional = select.select(inputs, outputs, inputs) for s in readable: if s is server: connection, client_address = s.accept() print 'new connection from', client_address connection.setblocking(0) inputs.append(connection) message_queues[connection] = Queue.Queue() else: data = s.recv(SIZE) if data: print 'received from %s' % str(s.getpeername()) message_queues[s].put(data) if s not in outputs: outputs.append(s) else: print 'closing socket after reading no data' inputs.remove(s) s.close() del message_queues[s] for s in writable: try: next_msg = message_queues[s].get_nowait() print 'sending to %s' % str(s.getpeername()) s.send(next_msg) except Queue.Empty: print 'output queue for', s.getpeername(), 'is empty' outputs.remove(s) for s in exceptional: print 'handling exceptional condition for', s.getpeername() inputs.remove(s) if s in outputs: outputs.remove(s) s.close() del message_queues[s]
1,632
479
import re import copy from collections import defaultdict from string import Template # initialize the dictionary for the methods with checked exceptions such as {fake method: real method} method_dict_checked = {'deleteRecord' : 'delete', \ 'editText' : 'setText_new', \ 'insertData' : 'insert_new', \ 'setLayout' : 'setContentView_new', \ 'findViewId' : 'findViewById_new', \ 'changeTextColor' : 'setTextColor_new', \ 'getCursorString' : 'getString', \ 'queryData' : 'query_new', \ 'updateRecord' : 'update', \ 'drawTxt' : 'drawText_new'} # initialize the dictionary for the methods with unchecked exceptions such as {fake method: real method} method_dict_unchecked = {'deleteRecord' : 'delete', \ 'editText' : 'setText', \ 'insertData' : 'insert', \ 'setLayout' : 'setContentView', \ 'findViewId' : 'findViewById', \ 'changeTextColor' : 'setTextColor', \ 'getCursorString' : 'getString', \ 'queryData' : 'query', \ 'updateRecord' : 'update', \ 'drawTxt' : 'drawText'} # answer_block is a dict of user's answers, # i.e. answer_block = {'answer_1' : fake_answer} # survey type refers to the different surveys # (methods with checked exceptions Vs. methods with unchecked exceptions--documented and undocumented) def glue_answer(filepath, answers, survey_type, email): method_dict = set_dict(survey_type) # open the file filein = open(filepath) # read it src = Template(filein.read()) result = src.substitute(answers) with open('static/%s-NoteEditor.java' % (email), 'w') as f: f.write("%s" % result) # dictionary for answers with real Android's API methods real_answers = bind_method(answers, method_dict) #do the substitution result = src.substitute(real_answers) return result # Bind the answers' methods to the real Android's API methods # answers is a dict, i.e. answers = {'answer_1' : fake_answer} # This function returns a dict of answers with real Android's # API methods, i.e. real_answers = {'answer_1' : real_answer} def bind_method(answers, method_dict): real_answers = {} a_keys = list(answers.keys()) m_keys = list(method_dict.keys()) # for each user answer for k, l in enumerate(a_keys): # get the value of the answer an = answers.get(a_keys[k]) # for each fake method for m, n in enumerate(m_keys): # search for fake method in the answer fake = m_keys[m] if (re.search(fake, an)): #print ("find fake :" + fake) # get real method real = method_dict.get(fake) if (a_keys[k] not in list(real_answers.keys())): real_answers[a_keys[k]] = re.sub(fake+'\(', real+'(', an) break # check if finally there exists fake method in user's answer for d, f in enumerate(a_keys): if (a_keys[d] not in list(real_answers.keys())): real_answers[a_keys[d]] = answers.get(a_keys[d]) return real_answers def replace_methods(compiler_output, survey_type): method_dict = set_dict(survey_type) for fake, real in method_dict.items(): #compiler_output = compiler_output.replace(fake, real) compiler_output = re.sub(real, fake, compiler_output) if re.search("\bsetTextColor\b\(\bcolors\b\)", compiler_output): compiler_output = re.sub("\bsetTextColor\b\(\bcolors\b\)", "changeTextColor(colors)", replace_output) # check for line numbers #comp_output = remove_line_numbers(compiler_output) return compiler_output # dict depending on the survey type def set_dict(survey_type): if (survey_type == 'unchecked'): return method_dict_unchecked elif (survey_type == 'checked'): return method_dict_checked # replace line numbers with spaces def remove_line_numbers(output): out = '' #.java:118 print ("Here is the output.") print (output) #if re.seach('.java:/d+', output): # print ("OKK") out = re.sub(':[0-9]+', '', output) return out # vim: tabstop=8 noexpandtab shiftwidth=8 softtabstop=0
3,813
1,346
# a = 1 # b = 1 # while (not ((a==0) and (b==0))): # a, b = map(int, input().split()) # print(a+b) while True: a, b = map(int, input().split()) if a == 0 and b == 0: break print(a+b)
272
107
from builtins import str from .helpers import run import logging import subprocess import functools import types logger = logging.getLogger("commander") def maestro(scriptId): """Run a Keyboard Maestro script by ID (more robust) or name.""" run( """osascript -e 'tell application "Keyboard Maestro Engine" to """ """do script "%s"'\n""" % scriptId )
381
113
from planning_system.db.schema.views import _get_set_cols def definition(session): """ Return UI view. Complex view, which requires a dynamic pivot. """ pvt_list = _get_set_cols(session) sql = f""" SELECT costc, summary_code, summary, section, supersection, summary_order, sec_order, super_order, level, {pvt_list} FROM (SELECT costc, summary_code, summary, section, supersection, summary_order, sec_order, super_order, level, CAST(f_Set.acad_year as CHAR(4)) + ' ' + f_set.set_cat_id as finance_summary, amount as amount FROM [v_mri_finance_grouped_subtotal] f INNER JOIN f_set ON f_set.set_id = f.set_id) p PIVOT (SUM(amount) FOR finance_summary in ({pvt_list})) as pvt """ return sql
764
259
class Solution: def minNumberOperations(self, target: List[int]) -> int: num_ops = target[0] for i in range(1, len(target)): diff = target[i]-target[i-1] if diff > 0: num_ops += diff return num_ops
267
84
import torch import torchvision.models as models ''' Description: convert torch module to JIT TracedModule. 功能说明: 将torch 模型转化为 JIT TracedModule。 ''' def TracedModelFactory(file_name, traced_model): traced_model.save(file_name) traced_model = torch.jit.load(file_name) print("filename : ", file_name) print(traced_model.graph) if __name__ == "__main__": dummy_input = torch.randn(1, 3, 224, 224) # dummy_input is customized by user model = models.resnet18(pretrained=True) # model is customized by user model = model.cpu().eval() traced_model = torch.jit.trace(model, dummy_input) model_name = 'model_name' # model_name is customized by user TracedModelFactory(model_name + '.pth', traced_model)
756
283
# cppsimdata.py # written by Michael H. Perrott # with minor modifications by Doug Pastorello to work with both Python 2.7 and Python 3.4 # available at www.cppsim.com as part of the CppSim package # Copyright (c) 2013-2017 by Michael H. Perrott # This file is disributed under the MIT license (see Copying file) import ctypes as ct import numpy as np import sys import os import platform import subprocess as sp import contextlib from scipy.signal import lfilter,welch class CPPSIM_STORAGE_INFO(ct.Structure): _fields_ = [ ('filename',ct.c_char_p), ('num_sigs',ct.c_int), ('num_samples',ct.c_int) ] class CppSimData(object): def __init__(self, filename=None): if filename != None: self.storage_info = CPPSIM_STORAGE_INFO(filename.encode('UTF-8'),0,0) else: self.storage_info = CPPSIM_STORAGE_INFO('None'.encode('UTF-8'),0,0) self.err_msg = ct.create_string_buffer(1000) self.cur_sig_name = ct.create_string_buffer(1000) if sys.platform == 'darwin': home_dir = os.getenv("HOME") arch_val = platform.architecture()[0] cppsimdata_lib_file = home_dir + '/CppSim/CppSimShared/Python/macosx/cppsimdata_lib.so' elif sys.platform == 'win32': cppsimsharedhome = os.getenv("CPPSIMSHAREDHOME") if sys.maxsize == 2147483647: cppsimdata_lib_file = cppsimsharedhome + '/Python/win32/cppsimdata_lib.dll' else: cppsimdata_lib_file = cppsimsharedhome + '/Python/win64/cppsimdata_lib.dll' else: cppsimsharedhome = os.getenv("CPPSIMSHAREDHOME") arch_val = platform.architecture()[0] if arch_val == '64bit': cppsimdata_lib_file = cppsimsharedhome + '/Python/glnxa64/cppsimdata_lib.so' else: cppsimdata_lib_file = cppsimsharedhome + '/Python/glnx86/cppsimdata_lib.so' self.cppsimdata_lib = ct.CDLL(cppsimdata_lib_file) self.cppsimdata_lib.loadsig.argtypes = [ct.POINTER(CPPSIM_STORAGE_INFO), ct.c_char_p] self.cppsimdata_lib.lssig.argtypes = [ct.POINTER(CPPSIM_STORAGE_INFO), ct.c_char_p, ct.c_char_p] self.cppsimdata_lib.evalsig.argtypes = [ct.POINTER(CPPSIM_STORAGE_INFO), ct.c_char_p, ct.POINTER(ct.c_double), ct.c_char_p] self.cppsimdata_lib.initialize() if filename != None: error_flag = self.cppsimdata_lib.loadsig(ct.byref(self.storage_info),self.err_msg) if error_flag == 1: print(self.err_msg.value.decode('UTF-8')) sys.exit() def __repr__(self): return "File: '%s', num_samples = %d, num_sigs = %d"%(self.storage_info.filename, self.storage_info.num_samples, self.storage_info.num_sigs) def loadsig(self,filename): self.storage_info.filename = filename error_flag = self.cppsimdata_lib.loadsig(ct.byref(self.storage_info),self.err_msg) if error_flag == 1: print(self.err_msg.value.decode('UTF-8')) sys.exit() def get_num_samples(self): return self.storage_info.num_samples def get_num_sigs(self): return self.storage_info.num_sigs def get_filename(self): return self.storage_info.filename def lssig(self,print_str_flag=None): sig_list = [] self.cppsimdata_lib.reset_cur_sig_count() for i in range(self.storage_info.num_sigs): error_flag = self.cppsimdata_lib.lssig(ct.byref(self.storage_info),self.cur_sig_name, self.err_msg) if error_flag == 1: print(self.err_msg.value.decode('UTF-8')) sys.exit() if print_str_flag == 'print': print('%d: %s' % (i,self.cur_sig_name.value.decode('UTF-8'))) sig_list.append(self.cur_sig_name.value.decode('UTF-8')) return sig_list def evalsig(self,sig_name): # If the signal name is a string, convert it to a byte array for the interface if (type(sig_name) is str): sig_name = str.encode(sig_name) sig_data = np.zeros(self.storage_info.num_samples) error_flag = self.cppsimdata_lib.evalsig(ct.byref(self.storage_info), sig_name, sig_data.ctypes.data_as(ct.POINTER(ct.c_double)),self.err_msg) if error_flag == 1: print(self.err_msg.value.decode('UTF-8')) sys.exit() return sig_data def cppsim_unbuffer_for_print(status, stream='stdout'): newline_chars = ['\r', '\n', '\r\n'] stream = getattr(status, stream) with contextlib.closing(stream): while True: out = [] last = stream.read(1) if last == '' and status.poll() is not None: break while last not in newline_chars: if last == '' and status.poll() is not None: break out.append(last) last = stream.read(1) out = ''.join(out) yield out def cppsim(sim_file="test.par"): if sim_file.find('.par') < 0: sim_file = sim_file + '.par' cppsim_home = os.getenv('CppSimHome') if cppsim_home == None: cppsim_home = os.getenv('CPPSIMHOME') if cppsim_home == None: home = os.getenv('HOME') if sys.platform == 'win32': default_cppsim_home = "%s\\CppSim" % (home) else: default_cppsim_home = "%s/CppSim" % (home) if os.path.isdir(default_cppsim_home): cppsim_home = default_cppsim_home else: print('Error running cppsim from Python: environment variable') print(' CPPSIMHOME is undefined') cppsimshared_home = os.getenv('CppSimSharedHome') if cppsimshared_home == None: cppsimshared_home = os.getenv('CPPSIMSHAREDHOME') if cppsimshared_home == None: if sys.platform == 'win32': default_cppsimshared_home = "%s\\CppSimShared" % (cppsim_home) else: default_cppsimshared_home = "%s/CppSimShared" % (cppsim_home) if os.path.isdir(default_cppsimshared_home): cppsimshared_home = default_cppsimshared_home else: print('Error running cppsim: environment variable') print(' CPPSIMSHAREDHOME is undefined') # print('cppsimhome: %s' % cppsim_home) # print('cppsimsharedhome: %s' % cppsimshared_home) cur_dir = os.getcwd() if sys.platform == 'win32': i = cur_dir.lower().find('\\simruns\\') else: i = cur_dir.lower().find('/simruns/') if i < 0: print('Error running cppsim: you need to run this Python script') print(' in a directory of form:') if sys.platform == 'win32': print(' .....\\SimRuns\\Library_name\\Module_name') else: print(' ...../SimRuns/Library_name/Module_name') print(' -> in this case, you ran in directory:') print(' %s' % cur_dir) sys.exit() library_cell = cur_dir[i+9:1000] if sys.platform == 'win32': i = library_cell.find('\\') else: i = library_cell.find('/') if i < 0: print('Error running cppsim: you need to run this Python script') print(' in a directory of form:') print(' ...../SimRuns/Library_name/Module_name') print(' -> in this case, you ran in directory:') print(' %s' % cur_dir) sys.exit() library_name = library_cell[0:i] cell_name = library_cell[i+1:1000] print("Running CppSim on module '%s' (Lib:'%s'):" % (cell_name, library_name)) print("\n... netlisting ...\n") if sys.platform == 'win32': rp_base = '%s/Sue2/bin/win32/sue_cppsim_netlister' % (cppsimshared_home) else: rp_base = '%s/Sue2/bin/sue_cppsim_netlister' % (cppsimshared_home) rp_arg1 = cell_name rp_arg2 = '%s/Sue2/sue.lib' % (cppsim_home) rp_arg3 = '%s/Netlist/netlist.cppsim' % (cppsim_home) rp = [rp_base, rp_arg1, rp_arg2, rp_arg3] status = sp.Popen(rp, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True) for line in cppsim_unbuffer_for_print(status): print(line) if status.returncode != 0: print('************** ERROR: exited CppSim run prematurely! ****************') sys.exit() print('\n... running net2code ...\n') if sys.platform == 'win32': rp_base = '%s/bin/win32/net2code' % (cppsimshared_home) else: rp_base = '%s/bin/net2code' % (cppsimshared_home) rp_arg1 = '-cpp' rp_arg2 = sim_file rp = [rp_base, rp_arg1, rp_arg2] status = sp.Popen(rp, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True) for line in cppsim_unbuffer_for_print(status): print(line) if status.returncode != 0: print('************** ERROR: exited CppSim run prematurely! ****************') sys.exit() print('... compiling ...\n') if sys.platform == 'win32': rp_base = '%s/msys/bin/make' % (cppsimshared_home) else: rp_base = 'make' rp = [rp_base] status = sp.Popen(rp, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True) for line in cppsim_unbuffer_for_print(status): print(line) if status.returncode != 0: print('************** ERROR: exited CppSim run prematurely! ****************') sys.exit() # calculate phase noise: returns frequency (Hz) and specral density (dBc/Hz) def calc_pll_phasenoise(noiseout,Ts): num_segments = 20; window_length = np.floor(noiseout.size/num_segments) Kv = 1.0 phase = lfilter([Ts*2*np.pi*Kv],[1,-1],noiseout-np.mean(noiseout)) # calculate L(f) f, Pxx = welch(phase,1/Ts,'hanning',2**16, None, None, 'constant', False, 'density',-1) # In Matlab: # [Pxx,f] = pwelch(phase,window_length,[],[],1/Ts,'twosided'); # [Pxx,f] = psd(sqrt(Ts)*phase,2^16,1/Ts,2^16,'mean'); Pxx_db = 10.0*np.log10(Pxx) return f, Pxx_db
9,739
3,586
class ResProcessingError(Exception): """The base class for exceptions the occur during resonator processing.""" pass class ResMinIsLeftMost(ResProcessingError): """Raised when the RWHM definition detects that the resonator's minima is the left-most point""" pass class ResMinIsRightMost(ResProcessingError): """Raised when the RWHM definition detects that the resonator's minima is the right-most point""" pass class FailedResFit(ResProcessingError): """Raised when the curvefit has a runtime error and a the resonator fit fails to converge""" pass # Lambda processing class LambdaProcessingError(Exception): """The base class for exceptions the occur during lambda curve fitting and processing.""" pass class NoDataForCurveFit(LambdaProcessingError): """Empty lists, [], were return for currentuA and/or freqGHz needed for lambda fitting""" pass class NotEnoughDataForCurveFit(LambdaProcessingError): """Curve Fit for lambda fitting has more free parameters then data points""" pass class OptimalParametersNotFoundForCurveFit(LambdaProcessingError): """Optimal parameters not found: Number of calls to function has reached maxfev""" pass
1,217
338
import logging from datetime import datetime from typing import List from notify.backends import BackendFactory from notify.commands import Command from notify.config import Config, Stack from notify.notifications import Factory, Notification from notify.strategies import StrategyFactory class MaintainConfig: def __init__(self, stack: Stack, success_template: Notification, failure_template: Notification, logger: logging.Logger): self.__success_template = success_template self.__failure_template = failure_template self.__logger = logger self.__configuration_stack = stack self.__configuration_stack.on_pop += self.__apply_on_pop self.__apply_config(self.__configuration_stack.current) def __apply_config(self, cfg: Config): self.notifications_backend_handler(cfg.notifications_backend.name, *cfg.notifications_backend.args) self.success_title_handler(cfg.success_title) self.success_message_handler(cfg.success_message) self.success_icon_handler(cfg.success_icon) self.success_sound_handler(cfg.success_sound) self.failure_title_handler(cfg.failure_title) self.failure_message_handler(cfg.failure_message) self.failure_icon_handler(cfg.failure_icon) self.failure_sound_handler(cfg.failure_sound) self.command_complete_timeout_handler(*cfg.notifications_strategy.args) self.logging_name_handler(cfg.logger_name) self.logging_level_handler(cfg.logger_level) def __apply_on_pop(self): self.__apply_config(self.__configuration_stack.current) def notifications_backend_handler(self, name: str, *args): selected_backend = self.__configuration_stack.notifications_backend.with_name(name, *args) self.__configuration_stack.notifications_backend = selected_backend def command_complete_timeout_handler(self, t: str): selected_strategy = self.__configuration_stack.notifications_strategy.with_args(int(t)) self.__configuration_stack.notifications_strategy = selected_strategy def success_title_handler(self, title: str): self.__success_template = self.__success_template.with_title(title) self.__configuration_stack.success_title = self.__success_template.title def success_message_handler(self, message: str): self.__success_template = self.__success_template.with_message(message) self.__configuration_stack.success_message = self.__success_template.message def success_icon_handler(self, icon: str): self.__success_template = self.__success_template.with_icon(icon if icon != "" else None) self.__configuration_stack.success_icon = self.__success_template.icon def success_sound_handler(self, sound: str): self.__success_template = self.__success_template.with_sound(sound if sound != "" else None) self.__configuration_stack.success_sound = self.__success_template.sound def failure_title_handler(self, title: str): self.__failure_template = self.__failure_template.with_title(title) self.__configuration_stack.failure_title = self.__failure_template.title def failure_message_handler(self, message: str): self.__failure_template = self.__failure_template.with_message(message) self.__configuration_stack.failure_message = self.__failure_template.message def failure_icon_handler(self, icon: str): self.__failure_template = self.__failure_template.with_icon(icon if icon != "" else None) self.__configuration_stack.failure_icon = self.__failure_template.icon def failure_sound_handler(self, sound: str): self.__failure_template = self.__failure_template.with_sound(sound if sound != "" else None) self.__configuration_stack.failure_sound = self.__failure_template.sound def logging_name_handler(self, new_name: str): self.__logger.name = new_name self.__configuration_stack.logger_name = self.__logger.name def logging_level_handler(self, new_level: str): self.__logger.setLevel(new_level) self.__configuration_stack.logger_level = self.__logger.level class Notify: def __init__(self, stack: Stack, notification_factory: Factory, backend_factory: BackendFactory): self.__stack = stack self.__notification_factory = notification_factory self.__backend_factory = backend_factory self.__commands: list = [] def notify(self, message: str, title: str): n = self.__notification_factory.create(message=message, title=title, success=True) self.__backend_factory.create(self.__stack.notifications_backend).notify(n) class NotifyCommandComplete: def __init__(self, stack: Stack, strategy_factory: StrategyFactory, notification_factory: Factory, backend_factory: BackendFactory): self.__stack = stack self.__strategy_factory = strategy_factory self.__notification_factory = notification_factory self.__backend_factory = backend_factory self.__commands: List[Command] = [] def before_command(self, command_line: str): self.__stack.push() self.__commands.append(Command(datetime.now(), command_line)) def after_command(self, exit_code: str): exit_code = int(exit_code) if len(self.__commands) == 0: raise RuntimeError("after_command without a command") cmd = self.__commands.pop() complete_cmd = cmd.complete(exit_code, datetime.now()) if self.__strategy_factory.create(self.__stack.current.notifications_strategy).should_notify(complete_cmd): n = self.__notification_factory.from_command(complete_cmd) self.__backend_factory.create(self.__stack.current.notifications_backend).notify(n) self.__stack.pop()
5,977
1,604
technology = { 'kb': ''' Oculus(rift) HTC(vive) VR(Zuck, rift) VR(Gabe, vive) (Oculus(O) & HTC(H)) ==> Dominates(H, O) (VR(V)) ==> Technology(T) ''', 'queries':''' VR(x) Dominates(x, y) ''', } Examples = { 'technology': technology, }
254
126
class Resolver: def __init__(self): self.resolvers = [] def addResolver(self,res,priority): self.resolvers.append(dict(resolver=res,priority=priority)) self.resolvers.sort(key=lambda x: x["priority"]) def resolve(self,name): for r in [x["resolver"] for x in self.resolvers[::-1]]: success,result = r(name) if success: return result class GlobalVarResolver: def __init__(self,globs): self.globs = globs def __call__(self,name): if name in self.globs: return True, self.globs[name] elif name in dir(self.globs["__builtins__"]): return True, getattr(self.globs["__builtins__"],name) return False, None class FunctionalMapping: def __init__(self,get,set): self.get = get self.set = set def __getitem__(self,k): return self.get(k) def __setitem__(self,k,v): self.set(k,v)
817
330
import logging import multiprocessing import os import time from functools import partial from multiprocessing import Process, Queue, Pool from typing import Iterable import pandas as pd import pyarrow as pa from feast.feature_set import FeatureSet from feast.type_map import convert_dict_to_proto_values from feast.types.FeatureRow_pb2 import FeatureRow from kafka import KafkaProducer from tqdm import tqdm from feast.constants import DATETIME_COLUMN _logger = logging.getLogger(__name__) GRPC_CONNECTION_TIMEOUT_DEFAULT = 3 # type: int GRPC_CONNECTION_TIMEOUT_APPLY = 300 # type: int FEAST_SERVING_URL_ENV_KEY = "FEAST_SERVING_URL" # type: str FEAST_CORE_URL_ENV_KEY = "FEAST_CORE_URL" # type: str BATCH_FEATURE_REQUEST_WAIT_TIME_SECONDS = 300 CPU_COUNT = os.cpu_count() # type: int KAFKA_CHUNK_PRODUCTION_TIMEOUT = 120 # type: int def _kafka_feature_row_producer( feature_row_queue: Queue, row_count: int, brokers, topic, ctx: dict, pbar: tqdm ): """ Pushes Feature Rows to Kafka. Reads rows from a queue. Function will run until total row_count is reached. Args: feature_row_queue: Queue containing feature rows. row_count: Total row count to process brokers: Broker to push to topic: Topic to push to ctx: Context dict used to communicate with primary process pbar: Progress bar object """ # Callback for failed production to Kafka def on_error(e): # Save last exception ctx["last_exception"] = e # Increment error count if "error_count" in ctx: ctx["error_count"] += 1 else: ctx["error_count"] = 1 # Callback for succeeded production to Kafka def on_success(meta): pbar.update() producer = KafkaProducer(bootstrap_servers=brokers) processed_rows = 0 # Loop through feature rows until all rows are processed while processed_rows < row_count: # Wait if queue is empty if feature_row_queue.empty(): time.sleep(1) producer.flush(timeout=KAFKA_CHUNK_PRODUCTION_TIMEOUT) else: while not feature_row_queue.empty(): row = feature_row_queue.get() if row is not None: # Push row to Kafka producer.send(topic, row.SerializeToString()).add_callback( on_success ).add_errback(on_error) processed_rows += 1 # Force an occasional flush if processed_rows % 10000 == 0: producer.flush(timeout=KAFKA_CHUNK_PRODUCTION_TIMEOUT) del row pbar.refresh() # Ensure that all rows are pushed producer.flush(timeout=KAFKA_CHUNK_PRODUCTION_TIMEOUT) # Using progress bar as counter is much faster than incrementing dict ctx["success_count"] = pbar.n pbar.close() def _encode_pa_chunks( tbl: pa.lib.Table, fs: FeatureSet, max_workers: int, df_datetime_dtype: pd.DataFrame.dtypes, chunk_size: int = 5000, ) -> Iterable[FeatureRow]: """ Generator function to encode rows in PyArrow table to FeatureRows by breaking up the table into batches. Each batch will have its rows spread accross a pool of workers to be transformed into FeatureRow objects. Args: tbl: PyArrow table to be processed. fs: FeatureSet describing PyArrow table. max_workers: Maximum number of workers. df_datetime_dtype: Pandas dtype of datetime column. chunk_size: Maximum size of each chunk when PyArrow table is batched. Returns: Iterable FeatureRow object. """ pool = Pool(max_workers) # Create a partial function with static non-iterable arguments func = partial( convert_dict_to_proto_values, df_datetime_dtype=df_datetime_dtype, feature_set=fs, ) for batch in tbl.to_batches(max_chunksize=chunk_size): m_df = batch.to_pandas() results = pool.map_async(func, m_df.to_dict("records")) yield from results.get() pool.close() pool.join() return def ingest_table_to_kafka( feature_set: FeatureSet, table: pa.lib.Table, max_workers: int, chunk_size: int = 5000, disable_pbar: bool = False, timeout: int = None, ) -> None: """ Ingest a PyArrow Table to a Kafka topic based for a Feature Set Args: feature_set: FeatureSet describing PyArrow table. table: PyArrow table to be processed. max_workers: Maximum number of workers. chunk_size: Maximum size of each chunk when PyArrow table is batched. disable_pbar: Flag to indicate if tqdm progress bar should be disabled. timeout: Maximum time before method times out """ pbar = tqdm(unit="rows", total=table.num_rows, disable=disable_pbar) # Use a small DataFrame to validate feature set schema ref_df = table.to_batches(max_chunksize=100)[0].to_pandas() df_datetime_dtype = ref_df[DATETIME_COLUMN].dtype # Validate feature set schema _validate_dataframe(ref_df, feature_set) # Create queue through which encoding and production will coordinate row_queue = Queue() # Create a context object to send and receive information across processes ctx = multiprocessing.Manager().dict( {"success_count": 0, "error_count": 0, "last_exception": ""} ) # Create producer to push feature rows to Kafka ingestion_process = Process( target=_kafka_feature_row_producer, args=( row_queue, table.num_rows, feature_set.get_kafka_source_brokers(), feature_set.get_kafka_source_topic(), ctx, pbar, ), ) try: # Start ingestion process print( f"\n(ingest table to kafka) Ingestion started for {feature_set.name}:{feature_set.version}" ) ingestion_process.start() # Iterate over chunks in the table and return feature rows for row in _encode_pa_chunks( tbl=table, fs=feature_set, max_workers=max_workers, chunk_size=chunk_size, df_datetime_dtype=df_datetime_dtype, ): # Push rows onto a queue for the production process to pick up row_queue.put(row) while row_queue.qsize() > chunk_size: time.sleep(0.1) row_queue.put(None) except Exception as ex: _logger.error(f"Exception occurred: {ex}") finally: # Wait for the Kafka production to complete ingestion_process.join(timeout=timeout) failed_message = ( "" if ctx["error_count"] == 0 else f"\nFail: {ctx['error_count']}/{table.num_rows}" ) last_exception_message = ( "" if ctx["last_exception"] == "" else f"\nLast exception:\n{ctx['last_exception']}" ) print( f"\nIngestion statistics:" f"\nSuccess: {ctx['success_count']}/{table.num_rows}" f"{failed_message}" f"{last_exception_message}" ) def _validate_dataframe(dataframe: pd.DataFrame, feature_set: FeatureSet): """ Validates a Pandas dataframe based on a feature set Args: dataframe: Pandas dataframe feature_set: Feature Set instance """ if "datetime" not in dataframe.columns: raise ValueError( f'Dataframe does not contain entity "datetime" in columns {dataframe.columns}' ) for entity in feature_set.entities: if entity.name not in dataframe.columns: raise ValueError( f"Dataframe does not contain entity {entity.name} in columns {dataframe.columns}" ) for feature in feature_set.features: if feature.name not in dataframe.columns: raise ValueError( f"Dataframe does not contain feature {feature.name} in columns {dataframe.columns}" )
8,113
2,426
import collections class Solution: def findTheDifference(self, s, t): """ :type s: str :type t: str :rtype: str """ scount, tcount = collections.Counter(s), collections.Counter(t) for t in tcount: if tcount[t] > scount[t]: return t s = "abcd" t = "abcde" p = Solution() print(p.findTheDifference(s,t))
389
127
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['BackendAddressPoolAddressArgs', 'BackendAddressPoolAddress'] @pulumi.input_type class BackendAddressPoolAddressArgs: def __init__(__self__, *, backend_address_pool_id: pulumi.Input[str], ip_address: pulumi.Input[str], virtual_network_id: pulumi.Input[str], name: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a BackendAddressPoolAddress resource. :param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created. :param pulumi.Input[str] ip_address: The Static IP Address which should be allocated to this Backend Address Pool. :param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network within which the Backend Address Pool should exist. :param pulumi.Input[str] name: The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created. """ pulumi.set(__self__, "backend_address_pool_id", backend_address_pool_id) pulumi.set(__self__, "ip_address", ip_address) pulumi.set(__self__, "virtual_network_id", virtual_network_id) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="backendAddressPoolId") def backend_address_pool_id(self) -> pulumi.Input[str]: """ The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created. """ return pulumi.get(self, "backend_address_pool_id") @backend_address_pool_id.setter def backend_address_pool_id(self, value: pulumi.Input[str]): pulumi.set(self, "backend_address_pool_id", value) @property @pulumi.getter(name="ipAddress") def ip_address(self) -> pulumi.Input[str]: """ The Static IP Address which should be allocated to this Backend Address Pool. """ return pulumi.get(self, "ip_address") @ip_address.setter def ip_address(self, value: pulumi.Input[str]): pulumi.set(self, "ip_address", value) @property @pulumi.getter(name="virtualNetworkId") def virtual_network_id(self) -> pulumi.Input[str]: """ The ID of the Virtual Network within which the Backend Address Pool should exist. """ return pulumi.get(self, "virtual_network_id") @virtual_network_id.setter def virtual_network_id(self, value: pulumi.Input[str]): pulumi.set(self, "virtual_network_id", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class _BackendAddressPoolAddressState: def __init__(__self__, *, backend_address_pool_id: Optional[pulumi.Input[str]] = None, ip_address: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, virtual_network_id: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering BackendAddressPoolAddress resources. :param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created. :param pulumi.Input[str] ip_address: The Static IP Address which should be allocated to this Backend Address Pool. :param pulumi.Input[str] name: The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created. :param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network within which the Backend Address Pool should exist. """ if backend_address_pool_id is not None: pulumi.set(__self__, "backend_address_pool_id", backend_address_pool_id) if ip_address is not None: pulumi.set(__self__, "ip_address", ip_address) if name is not None: pulumi.set(__self__, "name", name) if virtual_network_id is not None: pulumi.set(__self__, "virtual_network_id", virtual_network_id) @property @pulumi.getter(name="backendAddressPoolId") def backend_address_pool_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created. """ return pulumi.get(self, "backend_address_pool_id") @backend_address_pool_id.setter def backend_address_pool_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "backend_address_pool_id", value) @property @pulumi.getter(name="ipAddress") def ip_address(self) -> Optional[pulumi.Input[str]]: """ The Static IP Address which should be allocated to this Backend Address Pool. """ return pulumi.get(self, "ip_address") @ip_address.setter def ip_address(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ip_address", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="virtualNetworkId") def virtual_network_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the Virtual Network within which the Backend Address Pool should exist. """ return pulumi.get(self, "virtual_network_id") @virtual_network_id.setter def virtual_network_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "virtual_network_id", value) class BackendAddressPoolAddress(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, backend_address_pool_id: Optional[pulumi.Input[str]] = None, ip_address: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, virtual_network_id: Optional[pulumi.Input[str]] = None, __props__=None): """ Manages a Backend Address within a Backend Address Pool. > **Note:** Backend Addresses can only be added to a `Standard` SKU Load Balancer. ## Example Usage ```python import pulumi import pulumi_azure as azure example_virtual_network = azure.network.get_virtual_network(name="example-network", resource_group_name="example-resources") example_lb = azure.lb.get_lb(name="example-lb", resource_group_name="example-resources") example_backend_address_pool = azure.lb.get_backend_address_pool(name="first", loadbalancer_id=example_lb.id) example_backend_address_pool_address = azure.lb.BackendAddressPoolAddress("exampleBackendAddressPoolAddress", backend_address_pool_id=example_backend_address_pool.id, virtual_network_id=example_virtual_network.id, ip_address="10.0.0.1") ``` ## Import Backend Address Pool Addresses can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:lb/backendAddressPoolAddress:BackendAddressPoolAddress example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/loadBalancer1/backendAddressPools/backendAddressPool1/addresses/address1 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created. :param pulumi.Input[str] ip_address: The Static IP Address which should be allocated to this Backend Address Pool. :param pulumi.Input[str] name: The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created. :param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network within which the Backend Address Pool should exist. """ ... @overload def __init__(__self__, resource_name: str, args: BackendAddressPoolAddressArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Manages a Backend Address within a Backend Address Pool. > **Note:** Backend Addresses can only be added to a `Standard` SKU Load Balancer. ## Example Usage ```python import pulumi import pulumi_azure as azure example_virtual_network = azure.network.get_virtual_network(name="example-network", resource_group_name="example-resources") example_lb = azure.lb.get_lb(name="example-lb", resource_group_name="example-resources") example_backend_address_pool = azure.lb.get_backend_address_pool(name="first", loadbalancer_id=example_lb.id) example_backend_address_pool_address = azure.lb.BackendAddressPoolAddress("exampleBackendAddressPoolAddress", backend_address_pool_id=example_backend_address_pool.id, virtual_network_id=example_virtual_network.id, ip_address="10.0.0.1") ``` ## Import Backend Address Pool Addresses can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:lb/backendAddressPoolAddress:BackendAddressPoolAddress example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/loadBalancer1/backendAddressPools/backendAddressPool1/addresses/address1 ``` :param str resource_name: The name of the resource. :param BackendAddressPoolAddressArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(BackendAddressPoolAddressArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, backend_address_pool_id: Optional[pulumi.Input[str]] = None, ip_address: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, virtual_network_id: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = BackendAddressPoolAddressArgs.__new__(BackendAddressPoolAddressArgs) if backend_address_pool_id is None and not opts.urn: raise TypeError("Missing required property 'backend_address_pool_id'") __props__.__dict__["backend_address_pool_id"] = backend_address_pool_id if ip_address is None and not opts.urn: raise TypeError("Missing required property 'ip_address'") __props__.__dict__["ip_address"] = ip_address __props__.__dict__["name"] = name if virtual_network_id is None and not opts.urn: raise TypeError("Missing required property 'virtual_network_id'") __props__.__dict__["virtual_network_id"] = virtual_network_id super(BackendAddressPoolAddress, __self__).__init__( 'azure:lb/backendAddressPoolAddress:BackendAddressPoolAddress', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, backend_address_pool_id: Optional[pulumi.Input[str]] = None, ip_address: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, virtual_network_id: Optional[pulumi.Input[str]] = None) -> 'BackendAddressPoolAddress': """ Get an existing BackendAddressPoolAddress resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created. :param pulumi.Input[str] ip_address: The Static IP Address which should be allocated to this Backend Address Pool. :param pulumi.Input[str] name: The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created. :param pulumi.Input[str] virtual_network_id: The ID of the Virtual Network within which the Backend Address Pool should exist. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _BackendAddressPoolAddressState.__new__(_BackendAddressPoolAddressState) __props__.__dict__["backend_address_pool_id"] = backend_address_pool_id __props__.__dict__["ip_address"] = ip_address __props__.__dict__["name"] = name __props__.__dict__["virtual_network_id"] = virtual_network_id return BackendAddressPoolAddress(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="backendAddressPoolId") def backend_address_pool_id(self) -> pulumi.Output[str]: """ The ID of the Backend Address Pool. Changing this forces a new Backend Address Pool Address to be created. """ return pulumi.get(self, "backend_address_pool_id") @property @pulumi.getter(name="ipAddress") def ip_address(self) -> pulumi.Output[str]: """ The Static IP Address which should be allocated to this Backend Address Pool. """ return pulumi.get(self, "ip_address") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name which should be used for this Backend Address Pool Address. Changing this forces a new Backend Address Pool Address to be created. """ return pulumi.get(self, "name") @property @pulumi.getter(name="virtualNetworkId") def virtual_network_id(self) -> pulumi.Output[str]: """ The ID of the Virtual Network within which the Backend Address Pool should exist. """ return pulumi.get(self, "virtual_network_id")
16,580
4,672
# -*- coding: utf-8 -*- import pickle import os # third-party imports import jsonpickle class Submission: def __init__(self): # Source is either Tumblr or Reddit self.source = u'' self.title = u'' self.author = u'' self.subreddit = u'' self.subredditTitle = u'' self.body = u'' self.bodyUrl = u'' self.postUrl = u'' def getXML(self): baseString = (u'\t<source>' + self.source + u'</source>\n' + u'\t<title>' + self.title + u'</title>\n' + u'\t<author>' + self.author + u'</author>\n' + u'\t<subreddit>' + self.subreddit + u'</subreddit>\n' + u'\t<subredditTitle>' + self.subredditTitle + u'</subredditTitle>\n' + u'\t<body>' + self.body + u'</body>\n' + u'\t<bodyUrl>' + self.bodyUrl + u'</bodyUrl>\n' + u'\t<postUrl>' + self.postUrl + u'</postUrl>\n') return str(baseString) def getHtml(self): baseString = (u'\t<p>' + self.source + u'</p>\n' + u'\t<h2>' + self.title + u'</h2>\n' + u'\t<h3>' + self.author + u'</h3>\n' + u'\t<h4>' + self.subreddit + u'</h4>\n' + u'\t<h4>' + self.subredditTitle + u'</h4>\n' + u'\t<p>' + self.body + u'</p>\n' # + u'\t<p>' + self.bodyUrl + u'</p>\n' + u'\t<a href=' + self.postUrl + u'/>Link</a><br /><br />\n') return baseString def getJson(self): jsonpickle.set_preferred_backend('json') jsonpickle.set_encoder_options('json', ensure_ascii=False, indent=4, separators=(',', ': ')) return jsonpickle.encode(self) def getAsList(self): return [self.source, self.title, self.author, self.subreddit, self.subredditTitle, self.body, self.bodyUrl, self.postUrl] def initFromDict(self, dictEntry): self.source = dictEntry['source'] self.title = dictEntry['title'] self.author = dictEntry['author'] self.subreddit = dictEntry['subreddit'] self.subredditTitle = dictEntry['subredditTitle'] self.body = dictEntry['body'] self.bodyUrl = dictEntry['bodyUrl'] self.postUrl = dictEntry['postUrl'] def getAsList_generator(submissions): for submission in submissions: yield submission.getAsList() def writeOutSubmissionsAsJson(redditList, file): file.write('{\n'.encode('utf8')) for submission in redditList: outputString = submission.getJson() + u',\n' file.write(outputString.encode('utf8')) file.write('}'.encode('utf8')) def saveSubmissionsAsJson(submissions, fileName): outputFile = open(fileName, 'wb') writeOutSubmissionsAsJson(submissions, outputFile) outputFile.close() def writeOutSubmissionsAsHtml(redditList, file): submissionsStr = "" for submission in redditList: submissionsStr += submission.getHtml() + u'\n' htmlStructure = u"""<!doctype html> <html lang="en"> <head> <meta charset="utf-8"> <title>Reddit Saved Comments</title> </head> <body> {0} </body> </html> """.format(submissionsStr) file.write(htmlStructure.encode('utf8')) def saveSubmissionsAsHtml(submissions, fileName): outputFile = open(fileName, 'wb') writeOutSubmissionsAsHtml(submissions, outputFile) outputFile.close() def writeOutSubmissionsAsXML(redditList, file): for submission in redditList: outputString = u'<submission>\n' + submission.getXML() + u'</submission>\n' file.write(outputString.encode('utf8')) def saveSubmissionsAsXML(submissions, fileName): outputFile = open(fileName, 'wb') writeOutSubmissionsAsXML(submissions, outputFile) outputFile.close() def writeCacheSubmissions(submissions, cacheFileName): cacheFile = open(cacheFileName, 'wb') pickle.dump(submissions, cacheFile) def readCacheSubmissions(cacheFileName): if os.path.exists(cacheFileName): cacheFile = open(cacheFileName, 'rb') submissions = pickle.load(cacheFile) return submissions else: return []
4,136
1,406
#!/usr/bin/env python3.7 from decimal import Decimal from collections import namedtuple EventPrizeLevel = namedtuple( "EventPrizeLevel", ["packs", "gems", "gold"], defaults=[0, 0, 0], )
191
67
""" Escreva um programa que faça o computador 'Pensar' em um número inteiro entre 0 e 5 e peça para o usuário tentar descobrir qual foi o número escolhido pelo computador. """ from random import randint numero_gerado_aleatoriamente = randint(0,5) numero_digitado_pelo_usuario = int(input('Adivinhe qual número estou pensando, uma dica: é entre 0 e 5! ')) if numero_digitado_pelo_usuario == numero_gerado_aleatoriamente: print(f'VOCÊ ACERTOU! O número que estava pensando era mesmo o {numero_gerado_aleatoriamente}!') else: print(f'Você errou! O número que pensei era {numero_gerado_aleatoriamente}')
621
224
import torch.nn as nn import torch.nn.functional as F from torch_geometric.nn import GCNConv, GATConv class GCN(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, dropout=0.5): super(GCN, self).__init__() self.dropout = dropout self.conv1 = GCNConv(input_dim, hidden_dim) self.conv2 = GCNConv(hidden_dim, output_dim) def forward(self, x, edge_index): x = self.conv1(x, edge_index) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.conv2(x, edge_index) return F.log_softmax(x, dim=1) class GAT(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, heads_1=8, heads_2=1, att_dropout=0.6, input_dropout=0.6): super(GAT, self).__init__() self.att_dropout = att_dropout self.input_dropout = input_dropout self.conv1 = GATConv(in_channels=input_dim, out_channels=hidden_dim // heads_1, heads=heads_1, concat=True, dropout=att_dropout) self.conv2 = GATConv(in_channels=hidden_dim, out_channels=output_dim, heads=heads_2, concat=False, dropout=att_dropout) def forward(self, x, edge_index): x = F.dropout(x, p=self.input_dropout, training=self.training) x = self.conv1(x, edge_index) x = F.elu(x) x = F.dropout(x, p=self.input_dropout, training=self.training) x = self.conv2(x, edge_index) return F.log_softmax(x, dim=1)
1,699
594
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : scene_graph.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 07/19/2018 # # This file is part of NSCL-PyTorch. # Distributed under terms of the MIT license. """ Scene Graph generation. """ import os import torch import torch.nn as nn import jactorch import jactorch.nn as jacnn from . import functional DEBUG = bool(int(os.getenv('DEBUG_SCENE_GRAPH', 0))) __all__ = ['SceneGraph'] class SceneGraph(nn.Module): def __init__(self, feature_dim, output_dims, downsample_rate): super().__init__() self.pool_size = 7 self.feature_dim = feature_dim self.output_dims = output_dims self.downsample_rate = downsample_rate self.object_roi_pool = jacnn.PrRoIPool2D(self.pool_size, self.pool_size, 1.0 / downsample_rate) self.context_roi_pool = jacnn.PrRoIPool2D(self.pool_size, self.pool_size, 1.0 / downsample_rate) self.relation_roi_pool = jacnn.PrRoIPool2D(self.pool_size, self.pool_size, 1.0 / downsample_rate) if not DEBUG: self.context_feature_extract = nn.Conv2d(feature_dim, feature_dim, 1) self.relation_feature_extract = nn.Conv2d(feature_dim, feature_dim // 2 * 3, 1) self.object_feature_fuse = nn.Conv2d(feature_dim * 2, output_dims[1], 1) self.relation_feature_fuse = nn.Conv2d(feature_dim // 2 * 3 + output_dims[1] * 2, output_dims[2], 1) self.object_feature_fc = nn.Sequential(nn.ReLU(True), nn.Linear(output_dims[1] * self.pool_size ** 2, output_dims[1])) self.relation_feature_fc = nn.Sequential(nn.ReLU(True), nn.Linear(output_dims[2] * self.pool_size ** 2, output_dims[2])) self.reset_parameters() else: def gen_replicate(n): def rep(x): return torch.cat([x for _ in range(n)], dim=1) return rep self.pool_size = 32 self.object_roi_pool = jacnn.PrRoIPool2D(32, 32, 1.0 / downsample_rate) self.context_roi_pool = jacnn.PrRoIPool2D(32, 32, 1.0 / downsample_rate) self.relation_roi_pool = jacnn.PrRoIPool2D(32, 32, 1.0 / downsample_rate) self.context_feature_extract = gen_replicate(2) self.relation_feature_extract = gen_replicate(3) self.object_feature_fuse = jacnn.Identity() self.relation_feature_fuse = jacnn.Identity() def reset_parameters(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight.data) m.bias.data.zero_() elif isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight.data) m.bias.data.zero_() def forward(self, input, objects, objects_length): """qian: to thoroughly understand the meanings of object_features, context_features, relation_features, i mean, the semantic meaning, i'd better go back to the paper itself.""" object_features = input # qian: (32, 256, 16, 24) context_features = self.context_feature_extract(input) # qian: (32, 256, 16, 24) relation_features = self.relation_feature_extract(input) # qian: (32, 384, 16, 24) outputs = list() objects_index = 0 for i in range(input.size(0)): """qian: iterate through every instance in the input batch.""" box = objects[objects_index:objects_index + objects_length[i].item()] # qian: (3, 4) [3 objects, 4 for bb]. objects_index += objects_length[i].item() with torch.no_grad(): batch_ind = i + torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device) # generate a "full-image" bounding box image_h, image_w = input.size(2) * self.downsample_rate, input.size(3) * self.downsample_rate image_box = torch.cat([ torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device), torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device), image_w + torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device), image_h + torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device) ], dim=-1) # qian: this box contains the entire image. # meshgrid to obtain the subject and object bounding boxes """qian: i don't perfectly understand the meaning of meshgrid, but the idea is to obtain all the combinations of multiple bounding boxes (here is 2).""" sub_id, obj_id = jactorch.meshgrid(torch.arange(box.size(0), dtype=torch.int64, device=box.device), dim=0) sub_id, obj_id = sub_id.contiguous().view(-1), obj_id.contiguous().view(-1) sub_box, obj_box = jactorch.meshgrid(box, dim=0) sub_box = sub_box.contiguous().view(box.size(0) ** 2, 4) obj_box = obj_box.contiguous().view(box.size(0) ** 2, 4) # union box """qian: union_box (9, 4), including all 9 possible bounding box pairs' union. The union means the set union operation.""" union_box = functional.generate_union_box(sub_box, obj_box) rel_batch_ind = i + torch.zeros(union_box.size(0), 1, dtype=box.dtype, device=box.device) # intersection maps # qian: (3, 1, 7, 7). crop the object ROI. box_context_imap = functional.generate_intersection_map(box, image_box, self.pool_size) # qian: (9, 1, 7, 7). crop ordered object ROI in each pair. sub_union_imap = functional.generate_intersection_map(sub_box, union_box, self.pool_size) # qian: (9, 1, 7, 7). crop ordered object ROI in each pair. obj_union_imap = functional.generate_intersection_map(obj_box, union_box, self.pool_size) this_context_features = self.context_roi_pool(context_features, torch.cat([batch_ind, image_box], dim=-1)) x, y = this_context_features.chunk(2, dim=1) this_object_features = self.object_feature_fuse(torch.cat([ self.object_roi_pool(object_features, torch.cat([batch_ind, box], dim=-1)), x, y * box_context_imap ], dim=1)) this_relation_features = self.relation_roi_pool(relation_features, torch.cat([rel_batch_ind, union_box], dim=-1)) x, y, z = this_relation_features.chunk(3, dim=1) this_relation_features = self.relation_feature_fuse(torch.cat([ this_object_features[sub_id], this_object_features[obj_id], x, y * sub_union_imap, z * obj_union_imap ], dim=1)) if DEBUG: outputs.append([ None, this_object_features, this_relation_features ]) else: outputs.append([ None, self._norm(self.object_feature_fc(this_object_features.view(box.size(0), -1))), self._norm( self.relation_feature_fc(this_relation_features.view(box.size(0) * box.size(0), -1)).view( box.size(0), box.size(0), -1)) ]) return outputs def _norm(self, x): return x / x.norm(2, dim=-1, keepdim=True)
7,740
2,553
class Room (object): def __init__(self, name, xl, yl, layout): self.name = str(name) self.xl = int(xl) self.yl = int(yl) self.layout = layout def load_room_file(file): roomfile = open(file, "r") roomlist = [] linelist = [] for line in roomfile: linelist.append(line) while linelist[0] != "STOP": temproomformat = [] for line in range(0, int(linelist[1])): temproomformat.append([]) for tile in range(0, int(linelist[2])): temproomformat[-1].append(linelist[3+line][tile]) roomlist.append(Room(linelist[0], int(linelist[1]), int(linelist[2]), temproomformat)) for x in range(4+int(linelist[2])): del(linelist[0]) return roomlist
789
284
import re from abc import ABCMeta from dateutil import parser class BaseData(metaclass=ABCMeta): def __init__(self): self._lastmod = None self._loc = None @property def lastmod(self): return self._lastmod @lastmod.setter def lastmod(self, value): self._lastmod = parser.isoparse(value) if value is not None else None @property def loc(self): return self._loc @loc.setter def loc(self, value): value = str(value) if not re.match('http[s]?://', value): raise ValueError("{} does not match a url".format(value)) self._loc = value
645
198
import unittest import networkx as nx from core.placement.spsolver import DPShortestPathSolver class TestShorthestPathSolverMethods(unittest.TestCase): def setUp(self): self.g1 = nx.read_weighted_edgelist('tests/test-graph_1.txt', create_using=nx.MultiDiGraph, nodetype=int) def test_shortest_path(self): u = 0 v = 3 k = 2 weight_shortest_path = 9 (weight, path) = DPShortestPathSolver.shortest_path(self.g1, u, v, k) self.assertEqual(weight, weight_shortest_path) self.assertEqual(path, [0, 2, 3]) if __name__ == '__main__': unittest.main()
630
232
""" # INTEGER BREAK Given a positive integer n, break it into the sum of at least two positive integers and maximize the product of those integers. Return the maximum product you can get. Example 1: Input: 2 Output: 1 Explanation: 2 = 1 + 1, 1 × 1 = 1. Example 2: Input: 10 Output: 36 Explanation: 10 = 3 + 3 + 4, 3 × 3 × 4 = 36. Note: You may assume that n is not less than 2 and not larger than 58. """ class Solution: def integerBreak(self, n: int) -> int: if n == 0: return 0 if n == 1: return 1 if n == 2: return 1 if n == 3: return 2 # 0 to 3 are special cases beacuse they will produce a result less than their value. We can't use that lesser value in the other calculations known = {0: 0, 1: 1, 2: 2, 3: 3} return self.breakDown(n, known) def breakDown(self, n, known): if n in known: return known[n] else: maximum = 0 for x in range(1, n // 2 + 1): p1 = self.breakDown(x, known) p2 = self.breakDown(n - x, known) maximum = max(maximum, p1 * p2) known[n] = maximum return known[n]
1,285
437
#!/usr/bin/env python from distutils.core import setup SHORT_DESCR = "CAmera MOtion COMPensation using image stiching techniques to generate stabilized videos" try: LONG_DESCR = open('README.rst').read() except IOError: LONG_DESCR = SHORT_DESCR setup( name='camocomp', version='0.1', author='Adrien Gaidon', author_email='easy_to_guess@googleme.com', keywords='camera motion compensation, video stabilization, stitching, opencv, hugin', packages=['camocomp'], url='http://pypi.python.org/pypi/camocomp/', license='New BSD License', description=SHORT_DESCR, long_description=LONG_DESCR, platforms=["Linux"], requires=['numpy', 'ffmpeg', 'cv2', 'hsi'], scripts=['scripts/camocomp_video'], classifiers=[ 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved', 'Programming Language :: Python', 'Topic :: Software Development', 'Topic :: Scientific/Engineering', 'Operating System :: POSIX :: Linux', 'Operating System :: Unix', ] )
1,112
368
from flask import Flask, render_template app = Flask(__name__) @app.route('/') def index(): li = [1, 2, 3, 4, 5, 6, 7] return render_template('filter.html', li=li) @app.template_filter('li_rv2') # 添加过滤器 方法二 def li_reverse(li): res = list(li) res.reverse() return res # app.add_template_filter(li_reverse, 'li_rv') # 添加过滤器 方法一 if __name__ == "__main__": app.run(debug=True)
408
187
"""""" import matplotlib as mpl __all__ = ["set"] def set(tick_scale=1, rc=dict()): """ Control plot style and scaling using seaborn and the matplotlib rcParams interface. :param tick_scale: A scaler number controling the spacing on tick marks, defaults to 1. :type tick_scale: float :param rc: Additional settings to pass to rcParams. :type rc: dict """ rc_log_defaults = { 'xtick.major.size': 10. * tick_scale, 'xtick.minor.size': 6. * tick_scale, 'ytick.major.size': 10. * tick_scale, 'ytick.minor.size': 6. * tick_scale, 'xtick.color': '0.0', 'ytick.color': '0.0', 'axes.linewidth': 1.75, 'mathtext.default': 'regular' } mpl.rcParams.update(dict(rc_log_defaults, **rc))
801
294
# M1 def mul1(a1): return lambda b1:b1*a1 myresult = mul1(3) print(myresult(7)) #M-2 mul = lambda a = 3: (lambda b: a*b) myres = mul() print(myres) print(myres(7))
169
85
from enum import Enum, auto from typing import NamedTuple, Optional class Parameter(NamedTuple): class Kind(Enum): ARG = auto() VARARG = auto() KWARG = auto() name: str annotation: Optional[str] kind: Kind def __eq__(self, other: "Parameter") -> bool: if not isinstance(other, self.__class__): return False return self.name == other.name
414
125
# -*- coding: utf-8 -*- import numpy as np from odbAccess import * from abaqusConstants import * filename = 'Job-4e-SS-Pulse' """ LOAD DATA =============================================================================== """ results = np.load(filename + '.npz') vonMisesMax = results['vonMisesMax'].transpose() vonMisesMin = results['vonMisesMin'].transpose() vonMisesStatic = results['vonMisesStatic'].transpose() nodeNum = results['nodeNum'].transpose() nodeCoord = results['nodeCoord'] # Sort nodeCoord on nodal values nodeCoord = nodeCoord[nodeCoord[:,0].argsort()] # Calculate Mean and Amplitude vonMisesAmp = (vonMisesMax - vonMisesMin)/2 vonMisesMean = (vonMisesMax + vonMisesMin)/2 """ LOAD ODB =============================================================================== """ odb = openOdb(filename+'.odb',readOnly=False) # Get Instance allInstances = (odb.rootAssembly.instances.keys()) odbInstance = odb.rootAssembly.instances[allInstances[-1]] """ FORMAT AND SAVE DATA TO ODB =============================================================================== """ vMNodes = np.ascontiguousarray(nodeNum, dtype=np.int32) vMMax = np.ascontiguousarray(np.reshape(vonMisesMax,(-1,1)), dtype=np.float32) vMMin = np.ascontiguousarray(np.reshape(vonMisesMin,(-1,1)), dtype=np.float32) vMStatic = np.ascontiguousarray(np.reshape(vonMisesStatic,(-1,1)), dtype=np.float32) vMMean = np.ascontiguousarray(np.reshape(vonMisesMean,(-1,1)), dtype=np.float32) vMAmp = np.ascontiguousarray(np.reshape(vonMisesAmp,(-1,1)), dtype=np.float32) newFieldOutputMax = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMMax', description = 'Max Signed von Mises', type = SCALAR) newFieldOutputMax.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMMax.tolist()) newFieldOutputMin = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMMin', description = 'Min Signed von Mises', type = SCALAR) newFieldOutputMin.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMMin.tolist()) newFieldOutputMStatic = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMStatic', description = 'Static Signed von Mises', type = SCALAR) newFieldOutputMStatic.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMStatic.tolist()) newFieldOutputMean = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMMean', description = 'Signed von Mises Mean', type = SCALAR) newFieldOutputMean.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMMean.tolist()) newFieldOutputAmp = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMAmp', description = 'Signed von Mises Amplitude', type = SCALAR) newFieldOutputAmp.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMAmp.tolist()) """ SAVE AND CLOSE =============================================================================== """ odb.save() odb.close()
2,961
1,020
""" SSH reimplementation in Python, made by Unazed Spectaculum under the MIT license """ import socket import struct class SSH(object): """ Abstracted interface for secure-shell protocol with underlying TCP structure """ def __init__(self, host_ip, hostname, host_port=22, version="SSH-2.0"): self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind((host_ip, host_port)) self.version = version self.hostname = hostname self.qualified_name = "%s-%s\r\n" % (version, hostname) def listen(self, backlog=1): self.socket.listen(backlog) def accept(self): while 1: client, info = self.socket.accept() print("{*} %s connected." % info[0]) yield (client, info) print("{*} %s disconnected." % info[0]) client.close() def handle_connections(self): for client, info in self.accept(): version_info = client.recv(128) print("{*} Version Information: %s" % repr(version_info)) if not version_info.startswith(self.version): print("{*} Client has incompatible versions.") continue client.send(self.qualified_name) pkt_len, pdn_len, payload, _ = self.binary_packet_parse(client) data = self.kexinit_packet_parse(payload, client) @staticmethod def kexinit_packet_parse(payload, sock): SSH_MSG_KEXINIT = payload[0] COOKIE = payload[1:17] PAYLOAD = payload[17:] KEX_ALGORITHMS_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0] KEX_ALGORITHMS = PAYLOAD[4:4+KEX_ALGORITHMS_LENGTH] PAYLOAD = PAYLOAD[4+KEX_ALGORITHMS_LENGTH:] SERVER_HOST_KEY_ALGORITHMS_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0] SERVER_HOST_KEY_ALGORITHMS = PAYLOAD[4:4+SERVER_HOST_KEY_ALGORITHMS_LENGTH].split(',') PAYLOAD = PAYLOAD[4+SERVER_HOST_KEY_ALGORITHMS_LENGTH:] ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0] ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER = PAYLOAD[4:4+ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH].split(',') PAYLOAD = PAYLOAD[4+ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH:] ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0] ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT = PAYLOAD[4:4+ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH].split(',') PAYLOAD = PAYLOAD[4+ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH:] MAC_ALGORITHMS_CLIENT_TO_SERVER_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0] MAC_ALGORITHMS_CLIENT_TO_SERVER = PAYLOAD[4:4+MAC_ALGORITHMS_CLIENT_TO_SERVER_LENGTH].split(',') PAYLOAD = PAYLOAD[4+MAC_ALGORITHMS_CLIENT_TO_SERVER_LENGTH:] MAC_ALGORITHMS_SERVER_TO_CLIENT_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0] MAC_ALGORITHMS_SERVER_TO_CLIENT = PAYLOAD[4:4+MAC_ALGORITHMS_SERVER_TO_CLIENT_LENGTH].split(',') PAYLOAD = PAYLOAD[4+MAC_ALGORITHMS_SERVER_TO_CLIENT_LENGTH:] COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0] COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER = PAYLOAD[4:4+COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH].split(',') PAYLOAD = PAYLOAD[4+COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH:] COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0] COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT = PAYLOAD[4:4+COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH].split(',') PAYLOAD = PAYLOAD[4+COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH:] LANGUAGES_CLIENT_TO_SERVER_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0] LANGUAGES_CLIENT_TO_SERVER = PAYLOAD[4:4+LANGUAGES_CLIENT_TO_SERVER_LENGTH].split(',') PAYLOAD = PAYLOAD[4+LANGUAGES_CLIENT_TO_SERVER_LENGTH:] LANGUAGES_SERVER_TO_CLIENT_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0] LANGUAGES_SERVER_TO_CLIENT = PAYLOAD[4:4+LANGUAGES_SERVER_TO_CLIENT_LENGTH].split(',') PAYLOAD = PAYLOAD[4+LANGUAGES_SERVER_TO_CLIENT_LENGTH:] FIRST_KEX_PACKET_FOLLOWS = bool(PAYLOAD[0]) PAYLOAD = PAYLOAD[1:] RESERVED = struct.unpack("!l", PAYLOAD) print("{*} SSH_MSG_KEXINIT = %r" % SSH_MSG_KEXINIT) print("{*} Cookie = %r" % COOKIE) print("{*} KEX_ALGORITHMS = %s" % KEX_ALGORITHMS) print("{*} SERVER_HOST_KEY_ALGORITHMS = %s" % SERVER_HOST_KEY_ALGORITHMS) print("{*} ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER = %s" % ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER) print("{*} ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT = %s" % ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT) print("{*} MAC_ALGORITHMS_CLIENT_TO_SERVER = %s" % MAC_ALGORITHMS_CLIENT_TO_SERVER) print("{*} MAC_ALGORITHMS_SERVER_TO_CLIENT = %s" % MAC_ALGORITHMS_SERVER_TO_CLIENT) print("{*} COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER = %s" % COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER) print("{*} COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT = %s" % COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT) print("{*} LANGUAGES_CLIENT_TO_SERVER = %s" % LANGUAGES_CLIENT_TO_SERVER) print("{*} LANGUAGES_SERVER_TO_CLIENT = %s" % LANGUAGES_SERVER_TO_CLIENT) print("{*} FIRST_KEX_PACKETS_FOLLOWS = %r" % FIRST_KEX_PACKET_FOLLOWS) print("{*} RESERVED = %r" % RESERVED) if FIRST_KEX_PACKET_FOLLOWS: print("{*} Data = %r" % sock.recv(350000)) return ( SSH_MSG_KEXINIT, COOKIE, KEX_ALGORITHMS, SERVER_HOST_KEY_ALGORITHMS, ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER, ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT, MAC_ALGORITHMS_CLIENT_TO_SERVER, MAC_ALGORITHMS_SERVER_TO_CLIENT, COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER, COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT, LANGUAGES_CLIENT_TO_SERVER, LANGUAGES_SERVER_TO_CLIENT, FIRST_KEX_PACKET_FOLLOWS, RESERVED # for error checking ) @staticmethod def namelist_create(lists): pass @staticmethod def binary_packet_create(data): PACKET_LENGTH = struct.pack("!l", len(data)) print("{*} PACKET_LENGTH = %r" % PACKET_LENGTH) @staticmethod def binary_packet_parse(sock): PACKET_LENGTH = struct.unpack("!l", sock.recv(4))[0] PADDING_LENGTH = struct.unpack("!b", sock.recv(1))[0] PAYLOAD = sock.recv(PACKET_LENGTH-PADDING_LENGTH-1) RANDOM_PADDING = sock.recv(PADDING_LENGTH+1) print("{*} Packet length = %s" % PACKET_LENGTH) print("{*} Pading length = %s" % PADDING_LENGTH) print("{*} Padding = %r" % RANDOM_PADDING) return (PACKET_LENGTH, PADDING_LENGTH, PAYLOAD, RANDOM_PADDING) def close(self): self.socket.close()
7,014
2,783
import random def find_spelling(n): """ Finds d, r s.t. n-1 = 2^r * d """ r = 0 d = n - 1 # divmod used for large numbers quotient, remainder = divmod(d, 2) # while we can still divide 2's into n-1... while remainder != 1: r += 1 d = quotient # previous quotient before we overwrite it quotient, remainder = divmod(d, 2) return r, d def probably_prime(n, k=10): """ Miller-Rabin primality test Input: n > 3 k: accuracy of test Output: True if n is "probably prime", False if it is composite From psuedocode at https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test """ if n == 2: return True if n % 2 == 0: return False r, d = find_spelling(n) for check in range(k): a = random.randint(2, n - 1) x = pow(a, d, n) # a^d % n if x == 1 or x == n - 1: continue for i in range(r): x = pow(x, 2, n) if x == n - 1: break else: return False return True
1,140
438
#MARTY I2C PI #SCRIPT BASED ON MATS WORK #SCRIPT PUSHED INSIDE inmoovCustom : https://github.com/MyRobotLab/inmoov/tree/master/InmoovScript raspi = Runtime.createAndStart("RasPi","RasPi") adaFruit16c = Runtime.createAndStart("AdaFruit16C","Adafruit16CServoDriver") adaFruit16c.setController("RasPi","1","0x40") # # This part is common for both devices and creates two servo instances # on port 3 and 8 on the Adafruit16CServoDriver # Change the names of the servos and the pin numbers to your usage cuisseDroite = Runtime.createAndStart("cuisseDroite", "Servo") genouDroite = Runtime.createAndStart("genouDroite", "Servo") chevilleDroite = Runtime.createAndStart("chevilleDroite", "Servo") cuisseGauche = Runtime.createAndStart("cuisseGauche", "Servo") genouGauche = Runtime.createAndStart("genouGauche", "Servo") chevilleGauche = Runtime.createAndStart("chevilleGauche", "Servo") eyes = Runtime.createAndStart("eyes", "Servo") armLeft = Runtime.createAndStart("armLeft", "Servo") armRight = Runtime.createAndStart("armRight", "Servo") sleep(1) ledBlue=14 ledRed=13 ledGreen=12 vitesse=80 cuisseDroiteRest=90 genouDroiteRest=90 chevilleDroiteRest=80 cuisseGaucheRest=97 genouGaucheRest=95 chevilleGaucheRest=90 armLeftRest=90 armRightRest=120 eyesRest=90 cuisseDroite.setRest(cuisseDroiteRest) genouDroite.setRest(genouDroiteRest) chevilleDroite.setRest(chevilleDroiteRest) cuisseGauche.setRest(cuisseGaucheRest) genouGauche.setRest(genouGaucheRest) chevilleGauche.setRest(chevilleGaucheRest) eyes.setRest(eyesRest) eyes.map(0,180,66,100) armLeft.setRest(armLeftRest) armRight.setRest(armRightRest) cuisseDroite.attach(adaFruit16c,0) genouDroite.attach(adaFruit16c,1) chevilleDroite.attach(adaFruit16c,2) cuisseGauche.attach(adaFruit16c,4) genouGauche.attach(adaFruit16c,5) chevilleGauche.attach(adaFruit16c,15) eyes.attach(adaFruit16c,8) armLeft.attach(adaFruit16c,9) armRight.attach(adaFruit16c,10) eyes.setVelocity(-1) armLeft.setVelocity(-1) armRight.setVelocity(-1) cuisseDroite.rest() genouDroite.rest() chevilleDroite.rest() cuisseGauche.rest() genouGauche.rest() chevilleGauche.rest() eyes.rest() armLeft.rest() armRight.rest() sleep(2) cuisseDroite.detach() genouDroite.detach() chevilleDroite.detach() cuisseGauche.detach() genouGauche.detach() chevilleGauche.detach() armLeft.detach() armRight.detach() def walk(step): talkBlocking("D'accord, c'est parti !") start(step) talk("Je m'aichauffe") cuisseDroite.attach() genouDroite.attach() chevilleDroite.attach() cuisseGauche.attach() genouGauche.attach() chevilleGauche.attach() genouGauche.attach() armLeft.attach() armRight.attach() cuisseDroite.setVelocity(vitesse) genouDroite.setVelocity(vitesse) chevilleDroite.setVelocity(vitesse) cuisseGauche.setVelocity(vitesse) genouGauche.setVelocity(vitesse) chevilleGauche.setVelocity(vitesse) for i in range(1,step) : armLeft.moveTo(50) armRight.moveTo(50) chevilleDroite.moveTo(chevilleDroiteRest+20) chevilleGauche.moveTo(chevilleGaucheRest+30) sleep(0.8) cuisseGauche.moveTo(cuisseDroiteRest+40) cuisseDroite.moveTo(chevilleDroiteRest-40) sleep(0.8) chevilleDroite.moveTo(chevilleDroiteRest-30) chevilleGauche.moveTo(chevilleGaucheRest-20) sleep(0.8) cuisseGauche.moveTo(cuisseGaucheRest) cuisseDroite.moveTo(chevilleDroiteRest) armLeft.moveTo(90) armRight.moveTo(90) sleep(0.8) cuisseDroite.detach() genouDroite.detach() chevilleDroite.detach() cuisseGauche.detach() genouGauche.detach() chevilleGauche.detach() eyes.detach() def start(step): sleep(5) armLeft.attach() armRight.attach() armLeft.attach() eyes.attach() eyes.moveTo(180) armRight.moveTo(0) sleep(2) eyes.moveTo(0) armRight.moveTo(120) sleep(1) eyes.moveTo(180) sleep(0) eyes.moveTo(180) sleep(2) eyes.moveTo(0) armRight.moveTo(armRightRest) adaFruit16c.setPinValue(7,0) adaFruit16c.setPinValue(ledGreen,0) adaFruit16c.setPinValue(ledRed,0) adaFruit16c.setPinValue(ledBlue,0) def red(): adaFruit16c.setPinValue(7,0) adaFruit16c.setPinValue(ledGreen,1) adaFruit16c.setPinValue(ledRed,0) adaFruit16c.setPinValue(ledBlue,1) def blue(): adaFruit16c.setPinValue(7,0) adaFruit16c.setPinValue(ledGreen,1) adaFruit16c.setPinValue(ledRed,1) adaFruit16c.setPinValue(ledBlue,0) def green(): adaFruit16c.setPinValue(7,0) adaFruit16c.setPinValue(ledGreen,0) adaFruit16c.setPinValue(ledRed,1) adaFruit16c.setPinValue(ledBlue,1) def noLed(): adaFruit16c.setPinValue(ledGreen,0) adaFruit16c.setPinValue(ledRed,0) adaFruit16c.setPinValue(ledBlue,0) adaFruit16c.setPinValue(7,1) red() sleep(1) green() sleep(1) blue() sleep(1) noLed() led = Runtime.start("led","Clock") led.setInterval(100) global i i=0 def ledFunc(timedata): global i if i==0: red() i=1 else: noLed() i=0 led.setInterval(random.randint(10,100)) led.addListener("pulse", python.name, "ledFunc")
4,859
2,303
from sklearn import tree, svm from sklearn.neural_network import MLPClassifier from sklearn.multiclass import OneVsRestClassifier from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, VotingClassifier from sklearn.linear_model import LogisticRegression, RidgeClassifier from sklearn.naive_bayes import GaussianNB import matplotlib.pyplot as plt import numpy as np from generate_dataset import generate_dataset, preparing_dataset from storeExperimentalInformations import store_experimental_informations, prepare_legends import baseGraph import ape_tabular import warnings import pickle #from keras.models import Sequential #from keras.layers import Dense if __name__ == "__main__": # Filter the warning from matplotlib warnings.filterwarnings("ignore") # Datasets used for the experiments dataset_names = ["generate_circles", "generate_moons", "blood", "diabete", "generate_blobs"]# "compas", "adult", "titanic" # array of the models used for the experiments models = [GradientBoostingClassifier(n_estimators=20, learning_rate=1.0), RandomForestClassifier(n_estimators=20), #MLPClassifier(random_state=1, activation="logistic"), VotingClassifier(estimators=[('lr', LogisticRegression()), ('gnb', GaussianNB()), ('rc', LogisticRegression())], voting="soft"), MLPClassifier(random_state=1), RidgeClassifier()]#, #LogisticRegression(), #tree.DecisionTreeClassifier(), #Sequential(), #models=[RidgeClassifier(), MLPClassifier(random_state=1)] # Number of instances explained by each model on each dataset max_instance_to_explain = 10 # Print explanation result illustrative_example = False """ All the variable necessaries for generating the graph results """ # Store results inside graph if set to True graph = True verbose = False growing_sphere = False if growing_sphere: label_graph = "growing spheres " growing_method = "GS" else: label_graph = "" growing_method = "GF" # Threshold for explanation method precision threshold_interpretability = 0.99 linear_separability_index = 1 interpretability_name = ['ls', 'ls regression', 'ls raw data', 'ls extend'] #interpretability_name = ['ls log reg', 'ls raw data'] # Initialize all the variable needed to store the result in graph for dataset_name in dataset_names: if graph: experimental_informations = store_experimental_informations(len(models), len(interpretability_name), interpretability_name, len(models)) models_name = [] # Store dataset inside x and y (x data and y labels), with aditional information x, y, class_names, regression, multiclass, continuous_features, categorical_features, \ categorical_values, categorical_names, transformations = generate_dataset(dataset_name) for nb_model, model in enumerate(models): model_name = type(model).__name__ if "MLP" in model_name and nb_model <=2 : model_name += "logistic" if growing_sphere: filename = "./results/"+dataset_name+"/"+model_name+"/growing_spheres/"+str(threshold_interpretability)+"/sup_mat_" filename_all = "./results/"+dataset_name+"/growing_spheres/"+str(threshold_interpretability)+"/sup_mat_" else: filename="./results/"+dataset_name+"/"+model_name+"/"+str(threshold_interpretability)+"/sup_mat_" filename_all="./results/"+dataset_name+"/"+str(threshold_interpretability)+"/sup_mat_" if graph: experimental_informations.initialize_per_models(filename) models_name.append(model_name) # Split the dataset inside train and test set (50% each set) dataset, black_box, x_train, x_test, y_train, y_test = preparing_dataset(x, y, dataset_name, model) print("###", model_name, "training on", dataset_name, "dataset.") if 'Sequential' in model_name: # Train a neural network classifier with 2 relu and a sigmoid activation function black_box.add(Dense(12, input_dim=len(x_train[0]), activation='relu')) black_box.add(Dense(8, activation='relu')) black_box.add(Dense(1, activation='sigmoid')) black_box.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) black_box.fit(x_train, y_train, epochs=50, batch_size=10) def predict(x): if x.shape[0] > 1: return np.asarray([prediction[0] for prediction in black_box.predict_classes(x)]) return black_box.predict_classes(x)[0] def score(x, y): return sum(predict(x) == y)/len(y) else: black_box = black_box.fit(x_train, y_train) predict = black_box.predict score = black_box.score print('### Accuracy:', score(x_test, y_test)) cnt = 0 explainer = ape_tabular.ApeTabularExplainer(x_train, class_names, predict, black_box.predict_proba, continuous_features=continuous_features, categorical_features=categorical_features, categorical_values=categorical_values, feature_names=dataset.feature_names, categorical_names=categorical_names, verbose=verbose, threshold_precision=threshold_interpretability, linear_separability_index=linear_separability_index, transformations=transformations) for instance_to_explain in x_test: if cnt == max_instance_to_explain: break print("### Instance number:", cnt + 1, "over", max_instance_to_explain) print("### Models ", nb_model + 1, "over", len(models)) print("instance to explain:", instance_to_explain) try: precision, coverage, f2 = explainer.explain_instance(instance_to_explain, growing_method=growing_method, local_surrogate_experiment=True) print("precision", precision) print("coverage", coverage) print("f2", f2) if graph: experimental_informations.store_experiments_information_instance(precision, 'precision.csv', coverage, 'coverage.csv', f2, 'f2.csv') cnt += 1 except Exception as inst: print(inst) if graph: experimental_informations.store_experiments_information(max_instance_to_explain, nb_model, 'precision.csv', 'coverage.csv', 'f2.csv', filename_all=filename_all)
7,214
1,934
import pandas as pd import numpy as np import data_inputs, evaluate_EWRs #-------------------------------------------------------------------------------------------------- def sum_events(events): '''returns a sum of events''' return int(round(events.sum(), 0)) def get_frequency(events): '''Returns the frequency of years they occur in''' if events.count() == 0: result = 0 else: result = (int(events.sum())/int(events.count()))*100 return int(round(result, 0)) def get_average(input_events): '''Returns overall average length of events''' events = input_events.dropna() if len(events) == 0: result = 0 else: result = round(sum(events)/len(events),1) return result def get_event_length(input_events, num_events): events = input_events.dropna() if num_events == 0: EL = 0 else: EL = round(sum(events)/num_events,1) return EL def count_exceedence(input_events, EWR_info): events = input_events.copy(deep=True) if EWR_info['max_inter-event'] == None: return 'N/A' else: masking = events.isna() events[masking] = '' total = 0 for year in events.index: if list(events[year]) != '': count = len(events[year]) total = total + count return int(total) def initialise_summary_df_columns(input_dict): '''Ingest a dictionary of ewr yearly results and a list of statistical tests to perform initialises a dataframe with these as a multilevel heading and returns this''' analysis = data_inputs.analysis() column_list = [] list_of_arrays = [] for scenario, scenario_results in input_dict.items(): for sub_col in analysis: column_list = tuple((scenario, sub_col)) list_of_arrays.append(column_list) array_of_arrays =tuple(list_of_arrays) multi_col_df = pd.MultiIndex.from_tuples(array_of_arrays, names = ['scenario', 'type']) return multi_col_df def initialise_summary_df_rows(input_dict): '''Ingests a dictionary of ewr yearly results pulls the location information and the assocaited ewrs at each location, saves these as respective indexes and return the multi-level index''' index_1 = list() index_2 = list() index_3 = list() combined_index = list() # Get unique col list: for scenario, scenario_results in input_dict.items(): for site, site_results in scenario_results.items(): for PU in site_results: site_list = [] for col in site_results[PU]: if '_' in col: all_parts = col.split('_') remove_end = all_parts[:-1] if len(remove_end) > 1: EWR_code = '_'.join(remove_end) else: EWR_code = remove_end[0] else: EWR_code = col if EWR_code in site_list: continue else: site_list.append(EWR_code) add_index = tuple((site, PU, EWR_code)) if add_index not in combined_index: combined_index.append(add_index) unique_index = tuple(combined_index) multi_index = pd.MultiIndex.from_tuples(unique_index, names = ['gauge', 'planning unit', 'EWR']) return multi_index def allocate(df, add_this, idx, site, PU, EWR, scenario, category): '''Save element to a location in the dataframe''' df.loc[idx[[site], [PU], [EWR]], idx[scenario, category]] = add_this return df def summarise(input_dict): '''Ingests a dictionary with ewr pass/fails summarises these results and returns a single summary dataframe''' PU_items = data_inputs.get_planning_unit_info() EWR_table, see_notes_ewrs, undefined_ewrs, noThresh_df, no_duration, DSF_ewrs = data_inputs.get_EWR_table() # Initialise dataframe with multi level column heading and multi-index: multi_col_df = initialise_summary_df_columns(input_dict) index = initialise_summary_df_rows(input_dict) df = pd.DataFrame(index = index, columns=multi_col_df) # Run the analysis and add the results to the dataframe created above: for scenario, scenario_results in input_dict.items(): for site, site_results in scenario_results.items(): for PU in site_results: for col in site_results[PU]: all_parts = col.split('_') remove_end = all_parts[:-1] if len(remove_end) > 1: EWR = '_'.join(remove_end) else: EWR = remove_end[0] idx = pd.IndexSlice if ('_eventYears' in col): S = sum_events(site_results[PU][col]) df = allocate(df, S, idx, site, PU, EWR, scenario, 'Event years') F = get_frequency(site_results[PU][col]) df = allocate(df, F, idx, site, PU, EWR, scenario, 'Frequency') PU_num = PU_items['PlanningUnitID'].loc[PU_items[PU_items['PlanningUnitName'] == PU].index[0]] EWR_info = evaluate_EWRs.get_EWRs(PU_num, site, EWR, EWR_table, None, ['TF']) TF = EWR_info['frequency'] df = allocate(df, TF, idx, site, PU, EWR, scenario, 'Target frequency') elif ('_numAchieved' in col): S = sum_events(site_results[PU][col]) df = allocate(df, S, idx, site, PU, EWR, scenario, 'Achievement count') ME = get_average(site_results[PU][col]) df = allocate(df, ME, idx, site, PU, EWR, scenario, 'Achievements per year') elif ('_numEvents' in col): S = sum_events(site_results[PU][col]) df = allocate(df, S, idx, site, PU, EWR, scenario, 'Event count') ME = get_average(site_results[PU][col]) df = allocate(df, ME, idx, site, PU, EWR, scenario, 'Events per year') elif ('_eventLength' in col): EL = get_event_length(site_results[PU][col], S) df = allocate(df, EL, idx, site, PU, EWR, scenario, 'Event length') elif ('_totalEventDays' in col): AD = get_average(site_results[PU][col]) df = allocate(df, AD, idx, site, PU, EWR, scenario, 'Threshold days') elif ('daysBetweenEvents' in col): PU_num = PU_items['PlanningUnitID'].loc[PU_items[PU_items['PlanningUnitName'] == PU].index[0]] EWR_info = evaluate_EWRs.get_EWRs(PU_num, site, EWR, EWR_table, None, ['MIE']) DB = count_exceedence(site_results[PU][col], EWR_info) df = allocate(df, DB, idx, site, PU, EWR, scenario, 'Inter-event exceedence count') # Also save the max inter-event period to the data summary for reference EWR_info = evaluate_EWRs.get_EWRs(PU_num, site, EWR, EWR_table, None, ['MIE']) MIE = EWR_info['max_inter-event'] df = allocate(df, MIE, idx, site, PU, EWR, scenario, 'Max inter event period (years)') elif ('_missingDays' in col): MD = sum_events(site_results[PU][col]) df = allocate(df, MD, idx, site, PU, EWR, scenario, 'No data days') elif ('_totalPossibleDays' in col): TD = sum_events(site_results[PU][col]) df = allocate(df, TD, idx, site, PU, EWR, scenario, 'Total days') return df
8,099
2,365
import pandas as pd writer = pd.ExcelWriter("data.xlsx", engine='xlsxwriter') df.to_excel(writer, sheet_name='Sheet1', index=False) # Get the xlsxwriter workbook and worksheet objects. workbook = writer.book worksheet = writer.sheets['Sheet1']
247
81
from setuptools import setup with open('README.rst') as f: readme = f.read() setup( name="dem", version="0.0.8", author="Ian Macaulay, Jeremy Opalach", author_email="ismacaul@gmail.com", url="http://www.github.com/nitehawck/dem", description="An agnostic library/package manager for setting up a development project environment", long_description=readme, license="MIT License", classifiers=[ 'Development Status :: 3 - Alpha', #'Development Status :: 4 - Beta', #'Development Status :: 5 - Production / Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux', 'Operating System :: MacOS :: MacOS X', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Software Development :: Build Tools', ], packages=['dem', 'dem.dependency', 'dem.project'], install_requires=[ 'virtualenv', 'PyYaml', 'wget', 'gitpython' ], tests_require=[ 'pyfakefs', 'mock' ], entry_points={ 'console_scripts': [ 'dem = dem.__main__:main' ] }, )
1,564
466
# django imports from django.contrib import admin # lfs imports from lfs.core.models import Action from lfs.core.models import ActionGroup from lfs.core.models import Shop from lfs.core.models import Country admin.site.register(Shop) admin.site.register(Action) admin.site.register(ActionGroup) admin.site.register(Country)
326
99