hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
02818a426b0092542bb1f21edb0022a84299a37f
1,006
py
Python
src/masonite/middleware/route/IpMiddleware.py
cercos/masonite
f7f220efa7fae833683e9f07ce13c3795a87d3b8
[ "MIT" ]
35
2018-01-08T01:20:16.000Z
2018-02-06T02:37:14.000Z
src/masonite/middleware/route/IpMiddleware.py
cercos/masonite
f7f220efa7fae833683e9f07ce13c3795a87d3b8
[ "MIT" ]
55
2018-01-03T02:42:03.000Z
2018-02-06T13:35:54.000Z
src/masonite/middleware/route/IpMiddleware.py
cercos/masonite
f7f220efa7fae833683e9f07ce13c3795a87d3b8
[ "MIT" ]
4
2018-01-08T13:13:14.000Z
2018-01-12T19:35:32.000Z
import ipaddress from .. import Middleware from ...request import Request class IpMiddleware(Middleware): # order of resolution of headers used to fetch request ip headers = [ "HTTP_CLIENT_IP", "HTTP_X_FORWARDED_FOR", "HTTP_X_FORWARDED", "HTTP_X_CLUSTER_CLIENT_IP", "HTTP_FORWARDED_FOR", "HTTP_FORWARDED", "REMOTE_ADDR", ] def get_ip(self, request: Request): for header in self.headers: for raw_ip in request.environ.get(header, "").split(","): try: ip = ipaddress.ip_address(raw_ip.strip()) except ValueError: continue if not ip.is_private and not ip.is_reserved: return str(ip) return request.environ.get("REMOTE_ADDR") def before(self, request, response): request._ip = self.get_ip(request) return request def after(self, request, response): return request
27.944444
69
0.588469
115
1,006
4.93913
0.417391
0.026408
0.042254
0
0
0
0
0
0
0
0
0
0.323062
1,006
35
70
28.742857
0.834068
0.054672
0
0.071429
0
0
0.135933
0.02529
0
0
0
0
0
1
0.107143
false
0
0.107143
0.035714
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
02847aefe433ca823a6e702818cc773a47afa9fa
2,165
py
Python
tests/cli/test_config.py
joe-antognini/zoia
64d0d413ce53a4f66c4829446567e20549829f2c
[ "MIT" ]
null
null
null
tests/cli/test_config.py
joe-antognini/zoia
64d0d413ce53a4f66c4829446567e20549829f2c
[ "MIT" ]
null
null
null
tests/cli/test_config.py
joe-antognini/zoia
64d0d413ce53a4f66c4829446567e20549829f2c
[ "MIT" ]
null
null
null
import os import tempfile import unittest import unittest.mock from click.testing import CliRunner from ..context import zoia import zoia.cli import zoia.cli.config class TestConfigValidator(unittest.TestCase): def test__config_validator_good_obj(self): good_obj = { 'library_root': '/tmp/foo', 'db_root': '/tmp/bar', 'backend': 'json', } self.assertIsNone(zoia.cli.config._config_validator(good_obj)) def test__config_validator_bad_obj(self): bad_obj = { 'db_root': '/tmp/bar', 'backend': 'json', } with self.assertRaises(zoia.parse.yaml.ZoiaYamlValidationError): zoia.cli.config._config_validator(bad_obj) bad_obj = { 'library_root': '/tmp/foo', 'db_root': '/tmp/bar', 'backend': 'foo', } with self.assertRaises(zoia.parse.yaml.ZoiaYamlValidationError): zoia.cli.config._config_validator(bad_obj) class TestConfig(unittest.TestCase): @unittest.mock.patch( 'zoia.cli.config.zoia.backend.config.get_config_filepath' ) @unittest.mock.patch('zoia.cli.config.zoia.parse.yaml.edit_until_valid') def test_config(self, mock_edit, mock_get_config_filepath): with tempfile.TemporaryDirectory() as tmpdir: config_filename = os.path.join(tmpdir, 'config.yaml') mock_get_config_filepath.return_value = config_filename config = zoia.backend.config.ZoiaConfig( library_root='/tmp/foo', db_root='/tmp/bar', backend='sqlite' ) zoia.backend.config.save_config(config, config_filename) config_dict = config.to_dict() config_dict['backend'] = 'json' mock_edit.return_value = config_dict runner = CliRunner() result = runner.invoke(zoia.cli.zoia, ['config']) self.assertEqual(result.exit_code, 0) new_config = zoia.backend.config.load_config(config_filename) self.assertEqual( new_config.backend, zoia.backend.config.ZoiaBackend.JSON )
30.928571
77
0.628637
243
2,165
5.366255
0.27572
0.042945
0.059816
0.03681
0.319785
0.298313
0.277607
0.22546
0.22546
0.197853
0
0.000625
0.260508
2,165
69
78
31.376812
0.813866
0
0
0.245283
0
0
0.124711
0.047575
0
0
0
0
0.09434
1
0.056604
false
0
0.150943
0
0.245283
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
02865306a72e1b14739e214754905438560e555e
11,229
py
Python
malbot/game_server/info_panel.py
Malbryn/MalBot
c0490c7524506a187fd591ffa978649e7a5e034c
[ "MIT" ]
null
null
null
malbot/game_server/info_panel.py
Malbryn/MalBot
c0490c7524506a187fd591ffa978649e7a5e034c
[ "MIT" ]
null
null
null
malbot/game_server/info_panel.py
Malbryn/MalBot
c0490c7524506a187fd591ffa978649e7a5e034c
[ "MIT" ]
null
null
null
import asyncio import os import discord import valve.source.a2s import time from datetime import datetime from discord import Client from discord.ext.commands import Context from malbot.database.database import Database from malbot.game_server.player import Player from malbot.game_server.rcon import RCON class InfoPanel: def __init__(self, database: Database, rcon_client: RCON): self.database = database self.rcon_client = rcon_client self.game = '<unknown>' self.name = '<unknown>' self.address = '<unknown>' self.password = '<unknown>' self.modset = '<unknown>' self.current_player_count = 999 self.max_player_count = 999 self.player_list = '' self.timestamp = '' self.guild_id = None self.channel_id = None self.message_id = None self.embed = discord.Embed( title='Server Info', colour=0x4C91E3 ) self.refresh_rate = 120 # Seconds self.cancel_task = False async def fetch_data(self) -> None: print('Fetching data from database...') try: await self.database.connect() data = await self.database.query('SELECT * FROM game_server FETCH FIRST ROW ONLY') print(data) if not data: raise Exception("The 'game_server' table doesn't contain any data") self.guild_id = int(data[1]) self.channel_id = int(data[2]) self.message_id = int(data[3]) self.address = data[4] self.password = data[5] self.modset = data[6] self.game = data[7] print('Server info panel reattached using ID of {}'.format(self.message_id)) await self.__build_embed() except Exception as e: print('Unable to fetch data from database: ', e) finally: await self.database.disconnect() async def create_embed(self, context: Context, game: str, address: str, password: str, modset: str) -> None: if self.message_id: await context.channel.send('Info panel already exist, ' 'please delete the old one first using `/delete_server_info_panel` command', delete_after=5.0) return None self.game = game self.address = address self.password = password self.modset = modset try: await self.__build_embed(context=context) message = await context.channel.send(embed=self.embed) self.message_id = int(message.id) self.guild_id = int(context.guild.id) self.channel_id = int(context.channel.id) await self.database.connect() await self.database.query( """ INSERT INTO game_server (guild_id, channel_id, message_id, address, password, modset, game) VALUES (%s, %s, %s, %s, %s, %s, %s) """, (self.guild_id, self.channel_id, self.message_id, address, password, modset, game) ) print('Info panel created') await context.channel.send('Info panel created', delete_after=5.0) except Exception as e: print('Creating server info embed failed: ', e) await context.channel.send(f'Creating server info embed failed: {e}', delete_after=5.0) async def delete_embed(self, context: Context) -> None: if not self.message_id: print('Info panel does not exist') await context.channel.send('Info panel does not exist', delete_after=5.0) return None try: message = await context.channel.fetch_message(self.message_id) await message.delete() await self.database.connect() await self.database.query( """ DELETE FROM game_server WHERE message_id=%s """, [self.message_id] ) print('Info panel deleted') await context.channel.send('Info panel deleted', delete_after=5.0) except Exception as e: print('Deleting info panel failed: ', e) await context.channel.send(f'Deleting info panel failed: {e}', delete_after=5.0) return None self.embed = discord.Embed( title='Server Info', colour=0x4C91E3 ) self.message_id = '' self.player_list = '' await context.channel.send('Deleted info panel', delete_after=5.0) async def start_monitoring(self, client: Client) -> None: print('Starting game server monitoring...') self.cancel_task = False await client.wait_until_ready() while not (client.is_closed() or self.cancel_task): await self.refresh(client=client) await asyncio.sleep(self.refresh_rate) print('Game server monitoring stopped') async def stop_monitoring(self) -> None: print('Stopping game server monitoring...') self.cancel_task = True async def refresh(self, **kwargs) -> None: context = kwargs.get('context', None) client = kwargs.get('client', None) if context: print('Refreshing server info panel...') if not self.message_id: print('Info panel does not exist') if context: await context.channel.send('Info panel does not exist', delete_after=5.0) return None await self.__build_embed() try: channel = context.channel if context else client.get_channel(self.channel_id) if not channel: raise Exception('Channel is not found') message = await channel.fetch_message(self.message_id) if not message: raise Exception('Message is not found') await message.edit(embed=self.embed) except Exception as e: print('Failed to fetch channel/message: ', e) return None async def __build_embed(self, context: Context = None) -> None: print('Building embed for the server info panel...') self.embed = discord.Embed( title='Server Info', colour=0x4C91E3 ) await self.__init_details(context=context) await self.__init_modset(context=context) await self.__init_player_count(context=context) await self.__init_player_list(context=context) await self.__init_footer(context=context) print('Finished building embed') async def __init_details(self, context: Context) -> None: try: with valve.source.a2s.ServerQuerier((os.environ['RCON_IP'], int(os.environ['QUERY_PORT']))) as server: self.name = server.info().values['server_name'] self.embed.add_field( name='Details', value='```\nGame: {}\n\nServer name: {}\nAddress: {}\nPassword: {}```'.format( self.game, self.name, self.address, self.password ), inline=False ) except Exception as e: print('Creating Details field failed: ', e) self.embed.add_field( name='Details', value='```\nGame: {}\n\nSERVER OFFLINE```'.format( self.game ), inline=False ) if context: await context.channel.send('Creating Details field failed: ', e) async def __init_player_count(self, context: Context) -> None: try: with valve.source.a2s.ServerQuerier((os.environ['RCON_IP'], int(os.environ['QUERY_PORT']))) as server: self.max_player_count = server.info().values['max_players'] self.current_player_count = server.info().values['player_count'] self.embed.add_field( name='Player count', value='```\n{}/{}```'.format(self.current_player_count, self.max_player_count), inline=False ) except Exception as e: print('Creating Player count field failed: ', e) if context: await context.channel.send('Creating Player field count failed: ', e) async def __init_modset(self, context: Context) -> None: try: self.embed.add_field( name='Modset', value='{}'.format(self.modset), inline=False ) except Exception as e: print('Creating Modset field failed: ', e) if context: await context.channel.send('Creating Modset field failed: ', e) async def __init_player_list(self, context: Context) -> None: try: self.player_list = '' with valve.source.a2s.ServerQuerier((os.environ['RCON_IP'], int(os.environ['QUERY_PORT']))) as server: all_players = server.players().values['players'] if os.environ['PARSE_PLAYERLIST'] == "1": players = await self.rcon_client.get_players() else: players = [] for index, current_player in enumerate(all_players): current_player = Player( rcon_id=current_player.values.get('index'), name=current_player.values.get('name'), ping=current_player.values.get('ping') or 0 ) players.append(current_player) for i in range(len(players), 0, -1): players[i-1].duration = \ time.strftime('%H:%M:%S', time.gmtime(all_players[len(players) - i].values['duration'])) info = '{:>2} | {:>20} | {:>3} ms | {:>8}\n'.format( players[i-1].rcon_id, players[i-1].name, players[i-1].ping, players[i-1].duration ) self.player_list += info self.embed.add_field( name='Player list', value='```\nID | Name | Ping | Duration' '\n---------------------------------------------\n{}```'.format(self.player_list), inline=False ) except Exception as e: print('Creating Player list field failed: ', e) if context: await context.channel.send('Creating Player list field failed: ', e) async def __init_footer(self, context: Context) -> None: try: self.timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') self.embed.set_footer( text='Last updated: {}'.format(self.timestamp), icon_url='https://probot.media/tUE1WGMdwV.png' ) except Exception as e: print('Creating Timestamp failed: ', e) if context: await context.channel.send('Creating Timestamp field failed: ', e)
34.657407
115
0.552498
1,246
11,229
4.851525
0.158909
0.02531
0.047146
0.053267
0.431266
0.379983
0.253102
0.218693
0.189578
0.14574
0
0.008871
0.33743
11,229
323
116
34.764706
0.803629
0.000623
0
0.305785
0
0
0.165354
0.007416
0
0
0.002197
0
0
1
0.004132
false
0.028926
0.045455
0
0.07438
0.090909
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0289749a67711720a4ab20cdcca527bfc17cff8b
5,113
py
Python
poem/Poem/sync/poem-syncservtype.py
kevangel79/poem-2
75cda3cdd302df9c85b963bd91b7ce7182dfa220
[ "Apache-2.0" ]
null
null
null
poem/Poem/sync/poem-syncservtype.py
kevangel79/poem-2
75cda3cdd302df9c85b963bd91b7ce7182dfa220
[ "Apache-2.0" ]
null
null
null
poem/Poem/sync/poem-syncservtype.py
kevangel79/poem-2
75cda3cdd302df9c85b963bd91b7ce7182dfa220
[ "Apache-2.0" ]
null
null
null
import os import django os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Poem.settings') django.setup() import logging import os import requests from django.conf import settings from Poem.poem import models from Poem.tenants.models import Tenant from xml.etree import ElementTree from configparser import ConfigParser from tenant_schemas.utils import schema_context, get_public_schema_name logging.basicConfig( format='%(filename)s[%(process)s]: %(levelname)s %(message)s', level=logging.INFO ) logger = logging.getLogger("POEM") def tenant_servtype_data(tenant): config = ConfigParser() config.read(settings.CONFIG_FILE) HTTPAUTH = config.getboolean('SYNC_' + tenant.upper(), 'useplainhttpauth') HTTPUSER = config.get('SYNC_' + tenant.upper(), 'httpuser') HTTPPASS = config.get('SYNC_' + tenant.upper(), 'httppass') SERVICETYPE_URL = config.get('SYNC_' + tenant.upper(), 'servicetype') return {'HTTPAUTH': HTTPAUTH, 'HTTPUSER': HTTPUSER, 'HTTPPASS': HTTPPASS, 'SERVICETYPE_URL': SERVICETYPE_URL} def main(): """Parses service flavours list from GOCDB""" schemas = list(Tenant.objects.all().values_list('schema_name', flat=True)) schemas.remove(get_public_schema_name()) for schema in schemas: with schema_context(schema): tenant = Tenant.objects.get(schema_name=schema) data = tenant_servtype_data(tenant.name) fos = [] try: for fp in [settings.HOST_CERT, settings.HOST_KEY]: if not os.path.exists(fp): raise IOError("invalid path %s" % fp) else: fos.append(open(fp)) except IOError as e: logger.error(e) raise SystemExit(1) for fo in fos: fo.close() url = data['SERVICETYPE_URL'] try: if data['HTTPAUTH']: req = requests.get( url, auth=(data['HTTPUSER'], data['HTTPPASS']) ) else: if url.startswith('https'): req = requests.get( url, cert=(settings.HOST_CERT, settings.HOST_KEY), timeout=60 ) else: req = requests.get(url) ret = req.content except Exception as e: print("%s: Error service flavours feed - %s" % ( schema.upper(), repr(e))) logger.error("%s: Error service flavours feed - %s" % ( schema.upper(), repr(e))) continue try: root = ElementTree.XML(ret) except Exception as e: logger.error("%s: Error parsing service flavours - %s" % ( schema.upper(), e)) continue elements = root.findall("SERVICE_TYPE") if not elements: logger.error( "%s: Error parsing service flavours" % schema.upper() ) continue feed_list = [] for element in elements: element_list = {} if list(element): for child_element in list(element): element_list[str(child_element.tag).lower()] = \ child_element.text feed_list.append(element_list) sfindb = set( [ ( sf.name, sf.description ) for sf in models.ServiceFlavour.objects.all() ] ) sfs = set( [ ( feed['service_type_name'], feed['service_type_desc'] ) for feed in feed_list ] ) if sfindb != sfs: for s in sfs.difference(sfindb): try: service_flavour, created = \ models.ServiceFlavour.objects.get_or_create( name=s[0] ) if not created: service_flavour.description = s[1] service_flavour.save() except Exception as e: logger.error( "%s: database operations failed - %s" % (schema.upper(), e)) logger.info( "%s: Added/updated %d service flavours" % (schema.upper(), len(sfs.difference(sfindb)))) else: logger.info( "%s: Service Flavours database is up to date" % schema.upper() ) main()
31.95625
78
0.469001
461
5,113
5.093275
0.308026
0.044719
0.025554
0.024276
0.151193
0.115843
0.089438
0.035775
0.035775
0.035775
0
0.00174
0.437903
5,113
159
79
32.157233
0.81524
0.007628
0
0.186047
0
0
0.112273
0.009471
0
0
0
0
0
1
0.015504
false
0.023256
0.085271
0
0.108527
0.007752
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0289a9d69224789bad1986304776552f56743a67
2,974
py
Python
photocrawl/utils.py
fsoubelet/PhotoCrawl
229e830a51e783a3118ca05cf35bcdaa5123f1b8
[ "MIT" ]
null
null
null
photocrawl/utils.py
fsoubelet/PhotoCrawl
229e830a51e783a3118ca05cf35bcdaa5123f1b8
[ "MIT" ]
22
2020-09-21T04:54:19.000Z
2022-02-03T12:27:13.000Z
photocrawl/utils.py
fsoubelet/PhotoCrawl
229e830a51e783a3118ca05cf35bcdaa5123f1b8
[ "MIT" ]
null
null
null
""" Created on 2019.08.15 :author: Felix Soubelet Some utilities for main functionality. """ import pathlib import sys import time from contextlib import contextmanager from typing import Callable, Iterator from loguru import logger @contextmanager def timeit(function: Callable) -> Iterator[None]: """ Returns the time elapsed when executing code in the context via `function`. Original code from @jaimecp89 Args: function: any callable taking one argument. Was conceived with a lambda in mind. Returns: The elapsed time as an argument for the provided function. Usage: with timeit(lambda spanned: logger.debug(f'Did some stuff in {spanned} seconds')): some_stuff() some_other_stuff() """ start_time = time.time() try: yield finally: time_used = time.time() - start_time function(time_used) def figure_focal_range(focal_length: float) -> str: """ Categorize the focal length value in different ranges. This is better for plotting the number of shots per focal length (focal range). To be applied as a lambda on a column of your DataFrame. Args: focal_length: integer or float value of the focal length used for a shot. Returns: A String for each value, corresponding to the focal range, """ if focal_length <= 0: logger.error("Focal length should never be a negative value") raise ValueError("Invalid focal length value (< 0)") elif focal_length < 16: return "1-15mm" elif 16 <= focal_length < 23: return "16-23mm" elif 23 <= focal_length < 70: return "24-70mm" elif 70 <= focal_length < 200: return "70-200mm" elif 200 <= focal_length < 400: return "200-400mm" else: return "400mm+" def set_logger_level(log_level: str = "info") -> None: """ Sets the logger level to the one provided at the commandline. Default loguru handler will have DEBUG level and ID 0. We need to first remove this default handler and add ours with the wanted level. Args: log_level: string, the default logging level to print out. Returns: Nothing, acts in place. """ logger.remove(0) logger.add(sys.stderr, level=log_level.upper()) def setup_output_directory(directory_name: str) -> pathlib.Path: """ Create an output directory with the provided name. Args: directory_name: A string with the name to give to the output directory. Returns: A `pathlib.Path` object of this directory. """ directory = pathlib.Path(directory_name) if not directory.is_dir(): logger.info(f"Creating output directory {directory.absolute()}") directory.mkdir() else: logger.warning( f"Output directory {directory} already present. " "This may lead to unexpected behaviour." ) return directory
27.794393
90
0.6577
394
2,974
4.893401
0.428934
0.07417
0.037344
0
0
0
0
0
0
0
0
0.02741
0.263954
2,974
106
91
28.056604
0.853358
0.452925
0
0.045455
0
0
0.175463
0.015079
0
0
0
0
0
1
0.090909
false
0
0.136364
0
0.386364
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
028ab6e725f8e451380722f8838d8a4617c77f41
2,697
py
Python
tenant_workspace/templatetags/qt_063_tag.py
smegurus/smegurus-django
053973b5ff0b997c52bfaca8daf8e07db64a877c
[ "BSD-4-Clause" ]
1
2020-07-16T10:58:23.000Z
2020-07-16T10:58:23.000Z
tenant_workspace/templatetags/qt_063_tag.py
smegurus/smegurus-django
053973b5ff0b997c52bfaca8daf8e07db64a877c
[ "BSD-4-Clause" ]
13
2018-11-30T02:29:39.000Z
2022-03-11T23:35:49.000Z
tenant_workspace/templatetags/qt_063_tag.py
smegurus/smegurus-django
053973b5ff0b997c52bfaca8daf8e07db64a877c
[ "BSD-4-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from django import template from django.db.models import Q from django.core.urlresolvers import reverse from django.contrib.auth.models import User from django.utils.translation import ugettext_lazy as _ from django.shortcuts import get_object_or_404 from foundation_tenant.utils import int_or_none from foundation_tenant.models.base.naicsoption import NAICSOption from foundation_tenant.models.base.imageupload import ImageUpload from foundation_tenant.models.bizmula.question import Question from foundation_tenant.models.bizmula.questionanswer import QuestionAnswer from smegurus import constants register = template.Library() @register.inclusion_tag('templatetags/question/template_063.html') def render_question_type_063(workspace, module, node, question, answer): """ Dependency: - Q99 | Total Sales Volume - Q100 """ # For this particular document and module, find the previous questions. q1_qid = int_or_none(question.dependency['q1_qid']) q2_qid = int_or_none(question.dependency['q2_qid']) q3_qid = int_or_none(question.dependency['q3_qid']) sales_volume = QuestionAnswer.objects.get( question_id=q1_qid, workspace=workspace ) cogs_volume = QuestionAnswer.objects.get( question_id=q2_qid, workspace=workspace ) total_sales = QuestionAnswer.objects.get( question_id=q3_qid, workspace=workspace ) # Pre-process values. sales_volume_yr1_total = 0 if sales_volume.content['yr1_total'] == None else sales_volume.content['yr1_total'] sales_volume_yr2_total = 0 if sales_volume.content['yr2_total'] == None else sales_volume.content['yr2_total'] sales_volume_yr3_total = 0 if sales_volume.content['yr3_total'] == None else sales_volume.content['yr3_total'] # Calculate Total COGS volume. total_cogs_volume = { 'yr1': cogs_volume.content['total_cogs_yr1'] * sales_volume_yr1_total, 'yr2': cogs_volume.content['total_cogs_yr2'] * sales_volume_yr2_total, 'yr3': cogs_volume.content['total_cogs_yr3'] * sales_volume_yr3_total } # Calculate gross profit. gross_profit = { 'yr1': total_sales.content['yr1_total'] - total_cogs_volume['yr1'], 'yr2': total_sales.content['yr2_total'] - total_cogs_volume['yr2'], 'yr3': total_sales.content['yr3_total'] - total_cogs_volume['yr3'] } # Save the answer content. answer.content = gross_profit answer.save() # Return result. return { 'workspace': workspace, 'module': module, 'node': node, 'question': question, 'answer': answer, 'picked': answer.content, }
35.96
114
0.718205
341
2,697
5.407625
0.266862
0.083514
0.058568
0.056399
0.331887
0.184924
0
0
0
0
0
0.024457
0.181313
2,697
74
115
36.445946
0.810688
0.093066
0
0.056604
0
0
0.101821
0.016142
0
0
0
0
0
1
0.018868
false
0
0.226415
0
0.264151
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
028b79055be281928b07338d85f39c55a31765e5
3,054
py
Python
CSC276/book/ch08-OOP-code/ABA_OOP_A.py
XiangHuang-LMC/ijava-binder
4aac9e28ec62d386c908c87b927e1007834ff58a
[ "Unlicense" ]
4
2020-02-17T22:51:44.000Z
2020-04-14T21:32:13.000Z
CSC276/book/ch08-OOP-code/ABA_OOP_A.py
XiangHuang-LMC/ijava-binder
4aac9e28ec62d386c908c87b927e1007834ff58a
[ "Unlicense" ]
null
null
null
CSC276/book/ch08-OOP-code/ABA_OOP_A.py
XiangHuang-LMC/ijava-binder
4aac9e28ec62d386c908c87b927e1007834ff58a
[ "Unlicense" ]
null
null
null
#ABA_OPP_A.py: very simple object-oriented design example. # Validation of name and phone number, in non-persistent storage. import re #regular expression library def addressBook(): aba = ABA_OOP_A() aba.go() class ABA_OOP_A: def __init__(self): self.book = {} def go(self): name = self.getValidName() while name != "exit": phone = self.getValidPhone(name) email = self.getTextLine("Enter email address for " + name + ": ") if name not in self.book: self.book[name] = (phone, email) name = self.getValidName() self.displayBook() #pre: prompt contains a message (typically instructions) to be displayed to user. #post: returns value entered by user as a string. def getTextLine(self, prompt): return input(prompt) #pre: Need to obtain a contact name from the user. #post: A valid contact name is returned. def getValidName(self): #A valid contact name may contain only spaces, uppercase letters, and lowercase letters. pattern = "^[ A-Za-z]+$" result = None #Continue asking for a contact name until valid data is entered. while result == None: name = self.getTextLine("Enter contact name ('exit' to quit): ") #Remove leading and trailing whitespace. name = name.strip() if len(name) == 0: #contact name must contain at least one letter. result = None errorMsg = "A contact name must contain at least one uppercase or lowercase letters." else: #Determine if the entered name is valid. result = re.match(pattern, name) errorMsg = "A contact name must contain only uppercase and lowercase letters and spaces." if result == None: print(errorMsg) return name #pre: Need to obtain a phone number from the user. #post: A valid phone number is returned. def getValidPhone(self, name): #A valid phone number must contain one or more digits. pattern = "^[0-9]+$" result = None #Continue asking for a phone number until valid data is entered. while result == None: phone = self.getTextLine("Enter phone number for " + name + ": ") #Remove leading and trailing whitespace. phone = phone.strip() #Determine if the entered name is valid. result = re.match(pattern, phone) if result == None: print("A phone number must contain one or more digits.") return phone def displayBook(self): print() print("TEST: Display contents of address book") print("TEST: The address book contains the following contacts") sortedNames = sorted(self.book.keys()) for name in sortedNames: print(name, self.book[name])
38.658228
106
0.583824
364
3,054
4.870879
0.324176
0.049633
0.027073
0.037225
0.313593
0.257191
0.179357
0.14326
0.058658
0.058658
0
0.001478
0.335298
3,054
78
107
39.153846
0.871921
0.300917
0
0.18
0
0
0.195684
0
0
0
0
0
0
1
0.14
false
0
0.02
0.02
0.24
0.12
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
65f18d0f9afbfb6aa4b0dbb6e48f3ad29a1c0ea1
3,035
py
Python
backtesting.py
bibofeng/CNN-TA-1
6096bf023235cedc02dd63065b56e0d456daf547
[ "MIT" ]
null
null
null
backtesting.py
bibofeng/CNN-TA-1
6096bf023235cedc02dd63065b56e0d456daf547
[ "MIT" ]
null
null
null
backtesting.py
bibofeng/CNN-TA-1
6096bf023235cedc02dd63065b56e0d456daf547
[ "MIT" ]
null
null
null
import os import pandas as pd from keras.models import load_model import talib as ta # TA-Lib for calculation of indicators from keras.preprocessing.image import ImageDataGenerator import numpy as np def add_indicators(data): data["RSI"] = ta.RSI(data["Close"]) data["EMA"] = ta.EMA(data["Close"]) data["WMA"] = ta.WMA(data["Close"]) data["ROC"] = ta.ROC(data["Close"]) data["TEMA"] = ta.TEMA(data["Close"]) data["CMO"] = ta.CMO(data["Close"]) data["SAR"] = ta.SAR(data["High"], data["Low"]) data["WILLR"] = ta.WILLR(data["High"], data["Low"], data["Close"], timeperiod=15) data["CCI"] = ta.CCI(data["High"], data["Low"], data["Close"], timeperiod=15) data["PPO"] = ta.PPO(data["Close"], fastperiod=6, slowperiod=15) data["MACD"] = ta.MACD(data["Close"], fastperiod=6, slowperiod=15)[0] a = ta.WMA(data["Close"], timeperiod=15 // 2) b = data["WMA"] data["HMA"] = ta.WMA(2 * a - b, timeperiod=int(15 ** (0.5))) data["ADX"] = ta.ADX(data["High"], data["Low"], data["Close"], timeperiod=15) data.dropna(inplace=True) rootpath = os.path.dirname(__file__) ##C:\dev\bbhub\CNN-TA-1\MODEL\type_1\phase_1 fp1 = "C:\\dev\\bbhub\\CNN-TA-1\\MODEL\\type_1\\phase_1\\saved_model.pb" fp2 = "C:\\dev\\bbhub\\CNN-TA-1\\MODEL\\type_1\\phase_2\\saved_model.pb" fp3 = "C:\\dev\\bbhub\\CNN-TA-1\\MODEL\\type_1\\phase_3\\saved_model.pb" # fp1 = rootpath+'/MODEL/type_1/phase_1/saved_model.pb' # fp2 = rootpath+'/MODEL/type_1/phase_2/saved_model.pb' # fp3 = rootpath+'/MODEL/type_1/phase_3/saved_model.pb' model_1 = load_model(fp1) model_2 = load_model(fp2) model_3 = load_model(fp3) root = "C:/temp/CNNTA/Data/AAPL/" # root data path data = pd.read_csv(rootpath + "/Data/AAPL.csv") print("loaded CSV\n") data.dropna(inplace=True) # Drop missing entries data.set_index("Date", inplace=True) # Set Date as the index column # Use Adj Close instead of Close data.drop(labels=["Close"], axis=1, inplace=True) data.rename(columns={"Adj Close": "Close"}, inplace=True) add_indicators(data) # Add indicators to the dataframe image_generator = ImageDataGenerator(rescale=1 / 255) testing_dataset = image_generator.flow_from_directory( directory="c:/temp/CNNTA/AAPL/Images/", target_size=(15, 15), batch_size=32, color_mode="grayscale", ) result_1 = model_1.predict(testing_dataset) result_2 = model_2.predict(testing_dataset) result_3 = model_3.predict(testing_dataset) max_index_1 = np.argmax(result_1, axis=1) max_index_2 = np.argmax(result_2, axis=1) max_index_3 = np.argmax(result_3, axis=1) temp = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # padding 14 days max_index_1 = np.insert(max_index_1, 0, temp) max_index_2 = np.insert(max_index_2, 0, temp) max_index_3 = np.insert(max_index_3, 0, temp) # add 'result' array as new column in DataFrame data["model_1"] = max_index_1.tolist() data["model_2"] = max_index_2.tolist() data["model_3"] = max_index_3.tolist() # { 'Buy': 0, 'Hold': 1, 'Sell': 2} data.to_csv("Data/AAPL/test.csv") # Save Data as CSV
33.722222
85
0.681054
511
3,035
3.882583
0.254403
0.013105
0.018145
0.022177
0.250504
0.228831
0.196573
0.196573
0.175907
0.068548
0
0.042045
0.130148
3,035
89
86
34.101124
0.70947
0.158484
0
0.033898
0
0.050847
0.186367
0.095351
0
0
0
0
0
1
0.016949
false
0
0.101695
0
0.118644
0.016949
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
65f585e006a09d2e189ce95df42d743448d6b822
7,299
py
Python
snakepit/my_robot.py
pbabics/robot-snake-pycon2018
18b2a1b6c1eac10d5f80cce030ea88523b6e7faf
[ "MIT" ]
null
null
null
snakepit/my_robot.py
pbabics/robot-snake-pycon2018
18b2a1b6c1eac10d5f80cce030ea88523b6e7faf
[ "MIT" ]
null
null
null
snakepit/my_robot.py
pbabics/robot-snake-pycon2018
18b2a1b6c1eac10d5f80cce030ea88523b6e7faf
[ "MIT" ]
null
null
null
from snakepit.robot_snake import RobotSnake import random import traceback import queue import sys POSITION_DIRECTION = [RobotSnake.DOWN, RobotSnake.RIGHT, RobotSnake.UP, RobotSnake.LEFT] # COS POSITION_CHANGE_X = [0, 1, 0, -1] # SIN POSITION_CHANGE_Y = [1, 0, -1, 0] # COS class TailChasingRobotSnake(RobotSnake): def __init__(self, *args): super().__init__(*args) self._planned_path = None self._target = None def scan_map(self): snakes = {} numbers = [] obstacles = [] for y in range(self.world.SIZE_Y): for x in range(self.world.SIZE_X): char, color = self.world[y][x] if char in {self.CH_TAIL, self.CH_BODY, self.CH_HEAD}: snake = snakes.get(color, [(y, x), 0]) snake[1] += 1 if char == self.CH_HEAD: snake[0] = (y, x) snakes[color] = snake elif char in {self.CH_STONE, self.CH_DEAD_HEAD, self.CH_DEAD_BODY, self.CH_DEAD_TAIL}: obstacles.append((y, x)) elif '0' <= char <= '9': # number numbers.append((y ,x, ord(char) - 48)) sorted(numbers, key = lambda t: t[2]) return snakes, numbers, obstacles @staticmethod def _manhattan_dist(start_x, start_y, end_x, end_y): return abs(start_x - end_x) + abs(start_y - end_y) def find_path(self, start_x, start_y, end_x, end_y): checked = set() q = queue.PriorityQueue() q.put((0, (start_y, start_x), [])) while not q.empty(): e = q.get() last_y, last_x = e[1] for i in range(4): next_x = last_x + POSITION_CHANGE_X[i] next_y = last_y + POSITION_CHANGE_Y[i] next_path = e[2] + [i] if not (0 <= next_y < self.world.SIZE_Y and 0 <= next_x < self.world.SIZE_X): continue if (next_y, next_x) in checked: continue checked.add((next_y, next_x)) if next_x == end_x and next_y == end_y: return next_path if self.world[next_y][next_x][0] != self.CH_VOID: continue q.put((self._manhattan_dist(next_x, next_y, end_x, end_y) + len(next_path), (next_y, next_x), next_path)) return None def _safe_next_direction(self, position_x, position_y, snakes): best_direction = None best_heuristics = None for i in range(4): next_x = position_x + POSITION_CHANGE_X[i] next_y = position_y + POSITION_CHANGE_Y[i] if not (0 <= next_y < self.world.SIZE_Y and 0 <= next_x < self.world.SIZE_X): continue if self.world[next_y][next_x][0] != self.CH_VOID: continue if not self._is_there_a_god(next_x, next_y, 8): continue min_snake_dist = 0 mean_snake_dist = 0 for snake in snakes.values(): snake_x, snake_y = snake[0] if snake_x == position_x and snake_y == position_y: continue snake_dist = self._manhattan_dist(next_x, next_y, snake_x, snake_y) min_snake_dist = min(min_snake_dist, snake_dist) mean_snake_dist += snake_dist mean_snake_dist = (mean_snake_dist / len(snakes) - 1) if snakes else 0 heuristics = 1 + min_snake_dist * 8 + mean_snake_dist * 2 if best_heuristics is None or best_heuristics < heuristics or (best_heuristics == heuristics and random.random() > 0.5): best_direction = i best_heuristics = heuristics return best_direction def _is_there_a_god(self, start_x, start_y, length): checked = set() q = queue.Queue() q.put(((start_y, start_x), [])) while not q.empty(): e = q.get() last_y, last_x = e[0] for i in range(4): next_x = last_x + POSITION_CHANGE_X[i] next_y = last_y + POSITION_CHANGE_Y[i] next_path = e[1] + [i] if not (0 <= next_y < self.world.SIZE_Y and 0 <= next_x < self.world.SIZE_X): continue if (next_y, next_x) in checked: continue checked.add((next_y, next_x)) if len(e[1]) >= length: return True if self.world[next_y][next_x][0] != self.CH_VOID and not '0' <= self.world[next_y][next_x][0] <= '9': continue q.put(((next_y, next_x), next_path)) return False def _harakriki_path(self, start_x, start_y): checked = set() q = queue.PriorityQueue() q.put((0, (start_y, start_x), [])) while not q.empty(): e = q.get() last_y, last_x = e[1] for i in range(4): next_x = last_x + POSITION_CHANGE_X[i] next_y = last_y + POSITION_CHANGE_Y[i] next_path = e[2] + [i] if (0 <= next_y < self.world.SIZE_Y and 0 <= next_x < self.world.SIZE_X): return next_path if (next_y, next_x) in checked: continue checked.add((next_y, next_x)) if next_x == end_x and next_y == end_y: return next_path if self.world[next_y][next_x][0] in {self.CH_STONE, self.CH_DEAD_HEAD, self.CH_DEAD_BODY, self.CH_DEAD_TAIL}: return next_path if self.world[next_y][next_x][0] in {self.CH_HEAD, self.CH_BODY, self.CH_TAIL}: if self.world[next_y][next_x][1] == self.color: return next_path else: continue q.put((self._manhattan_dist(next_x, next_y, end_x, end_y) + len(next_path), (next_y, next_x), next_path)) return None def _check_path(self, start_x, start_y): next_x, next_y = start_x, start_y for move in self._planned_path: if move is None: return False next_x = next_x + POSITION_CHANGE_X[move] next_y = next_y + POSITION_CHANGE_Y[move] if self.world[next_y][next_x][0] != self.CH_VOID and not '0' <= self.world[next_y][next_x][0] <= '9': return False return True def next_direction(self, initial=False): world = self.world try: snakes, numbers, obstacles = self.scan_map() (my_y, my_x), _ = snakes[self.color] nearest = None nearest_dist = None #print('I am at', my_x, my_y) for number in numbers: dist = self._manhattan_dist(my_x, my_y, number[1], number[0]) if dist == 0: continue #print('Number .. ', number[1], number[0], 'number: ', number[2], 'dist', dist) if nearest is None or (nearest_dist + nearest[2] / 2) > (dist + number[2] / 2): nearest = number nearest_dist = dist if nearest is not None: pass #print('Nearest .. ', nearest[1], nearest[0], 'number: ', nearest[2], 'dist', nearest_dist) if not self._is_there_a_god(my_x, my_y, 5): self._planned_path = self._harakriki_path(my_x, my_y) print('Going harakiri :(') self._target = None elif nearest is not None: if self._planned_path is not None and self._planned_path and self._target: if not self._check_path(my_x, my_y): self._planned_path = None if self._target is not None: target_dist = self._manhattan_dist(my_x, my_y, self._target[1], self._target[0]) print('Checking against nearest, target: ', target_dist, 'nearest: ', nearest_dist) if target_dist > nearest_dist: self._planned_path = None if self._planned_path is None or not self._planned_path or not self._target: self._planned_path = self.find_path(my_x, my_y, nearest[1], nearest[0]) self._target = nearest if self._planned_path is None or not self._planned_path: self._planned_path = [self._safe_next_direction(my_x, my_y, snakes)] if self._planned_path and self._planned_path[0] is not None: self.current_direction = POSITION_DIRECTION[self._planned_path[0]] self._planned_path = self._planned_path[1:] if not self._planned_path: self._target = None self.changed_direction = True return self.current_direction except KeyboardInterrupt: sys.exit(1) except: traceback.print_exc()
31.597403
123
0.667078
1,224
7,299
3.68219
0.105392
0.038828
0.063235
0.039938
0.488573
0.431995
0.396716
0.351453
0.330819
0.330819
0
0.014488
0.205645
7,299
230
124
31.734783
0.762849
0.029319
0
0.398936
0
0
0.009326
0
0
0
0
0
0
1
0.047872
false
0.005319
0.026596
0.005319
0.164894
0.015957
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
65f61b45a738a071d063e90b433846c325439236
1,334
py
Python
screen_brightness.py
ell-hol/brightness_controller
2700d81c7a86bf3386a221df615551bfe247c6e5
[ "Apache-2.0" ]
2
2020-08-14T16:46:34.000Z
2020-08-24T08:47:16.000Z
screen_brightness.py
ell-hol/brightness_controller
2700d81c7a86bf3386a221df615551bfe247c6e5
[ "Apache-2.0" ]
null
null
null
screen_brightness.py
ell-hol/brightness_controller
2700d81c7a86bf3386a221df615551bfe247c6e5
[ "Apache-2.0" ]
2
2021-01-06T09:23:55.000Z
2021-04-18T20:06:12.000Z
#! /usr/bin/python3 #Version 1 #https://github.com/momen84 import subprocess from tkinter import * def get_display_name(): display=subprocess.check_output('xrandr | grep -w connected',stderr=subprocess.STDOUT,shell=True) return display.split()[0].decode('utf-8') def apply(scale_value): subprocess.call(['xrandr' ,'--output' ,'{}'.format(get_display_name()), '--brightness', '{}'.format(scale_value)]) def get_scale_value(event): frame.focus_set() apply(scale.get()/100) def get_current_brightness(event): current=subprocess.check_output('xrandr --verbose | grep -m 1 -i brightness',stderr=subprocess.STDOUT,shell=True) current=current.decode('utf-8').strip().split(':')[1].strip() return float(current) def center(root): h=root.winfo_screenheight() w=root.winfo_screenwidth() root.geometry("+%d+%d" % (w/2, h/2)) root = Tk() center(root) root.title("Screen Brightness") scalevar = IntVar() frame=Frame(root,width=50, height=50) scale = Scale(frame, from_=0, to=100, variable=scalevar, orient="horizontal") button=Button(frame,text='Apply',width=50) root.update_idletasks() scalevar.set(get_current_brightness(None)*100) root.update() scale.pack(side="top", fill="x", expand=True) button.pack(side="bottom", fill="x", expand=True) button.bind("<Button-1>",get_scale_value) frame.pack() root.mainloop()
24.254545
116
0.725637
192
1,334
4.932292
0.442708
0.042239
0.029567
0.057022
0.10982
0
0
0
0
0
0
0.02297
0.086207
1,334
55
117
24.254545
0.753897
0.03973
0
0
0
0
0.132916
0
0
0
0
0
0
1
0.151515
false
0
0.060606
0
0.272727
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
65fab1e4f190cac3112b444ae1204685901b870e
362
py
Python
gpu_utils/_scripts/tmux_gpu_info.py
SliMM/gpu-utils
b0a6d608620ec580bcd3b618e0591714998da8fb
[ "MIT" ]
10
2019-03-07T17:26:35.000Z
2022-02-19T10:49:34.000Z
gpu_utils/_scripts/tmux_gpu_info.py
SliMM/gpu-utils
b0a6d608620ec580bcd3b618e0591714998da8fb
[ "MIT" ]
2
2019-03-07T15:42:08.000Z
2021-07-05T18:49:18.000Z
gpu_utils/_scripts/tmux_gpu_info.py
SliMM/gpu-utils
b0a6d608620ec580bcd3b618e0591714998da8fb
[ "MIT" ]
1
2021-02-22T01:33:19.000Z
2021-02-22T01:33:19.000Z
# I use this to show the utilisation of each GPU in the status bar in tmux # e.g. with this line in ~/.tmux.conf: # set -g status-right '#[fg=yellow]#(tmux_gpu_info.py)' from .. import get_gpus def main(): gpus = get_gpus() # list of util_used for each GPU print([round(gpu.util_used, 2) for gpu in gpus]) if __name__ == "__main__": main()
21.294118
74
0.657459
64
362
3.5
0.625
0.0625
0
0
0
0
0
0
0
0
0
0.003521
0.21547
362
16
75
22.625
0.785211
0.530387
0
0
0
0
0.04908
0
0
0
0
0
0
1
0.166667
false
0
0.166667
0
0.333333
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
65fce86715fc05d1fc30296058cf5a3f40d3d4b7
336
py
Python
views/home.py
mikeckennedy/fastapi-twitch-examples
d2baafb51569fb9fe90c029238c2d7b02d61627a
[ "MIT" ]
3
2020-10-28T21:53:36.000Z
2020-12-19T09:49:33.000Z
views/home.py
imbi7py/fastapi-twitch-examples
d2baafb51569fb9fe90c029238c2d7b02d61627a
[ "MIT" ]
null
null
null
views/home.py
imbi7py/fastapi-twitch-examples
d2baafb51569fb9fe90c029238c2d7b02d61627a
[ "MIT" ]
1
2020-12-19T09:49:30.000Z
2020-12-19T09:49:30.000Z
import datetime import fastapi router = fastapi.APIRouter() @router.get('/') def index(): return dict(msg="Hello world", time=datetime.datetime.now().isoformat()) @router.get('/home') def home(): msg = 'This is not the home you\'re looking for' return fastapi.Response(msg, status_code=302, headers={'location': '/'})
19.764706
76
0.678571
45
336
5.044444
0.688889
0.079295
0
0
0
0
0
0
0
0
0
0.01049
0.14881
336
16
77
21
0.783217
0
0
0
0
0
0.151786
0
0
0
0
0
0
1
0.2
false
0
0.2
0.1
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
65fd048bfad2318777d09ee1fca24bfa9023c451
420
py
Python
notas.py
BrayanTorres2/Curso_Refuerso_Algoritmos_y_programacion
56a6a67b5289f5e3e26c1aae6ea2c41c7f115fff
[ "MIT" ]
1
2021-08-17T20:49:22.000Z
2021-08-17T20:49:22.000Z
notas.py
BrayanTorres2/Curso_Refuerso_Algoritmos_y_programacion
56a6a67b5289f5e3e26c1aae6ea2c41c7f115fff
[ "MIT" ]
null
null
null
notas.py
BrayanTorres2/Curso_Refuerso_Algoritmos_y_programacion
56a6a67b5289f5e3e26c1aae6ea2c41c7f115fff
[ "MIT" ]
1
2021-11-19T02:28:38.000Z
2021-11-19T02:28:38.000Z
''' Entradas parcial1--->float parcial2--->float parcial3--->float examenfinal--->float trabajofinal---float Salidas notafinal--->nota-->float ''' parcial1=float(input("")) parcial2=float(input("")) parcial3=float(input("")) examenfinal=float(input("")) trabajofinal=float(input("")) nota= ((parcial1+parcial2+parcial3)/3)*0.55+(examenfinal*0.30)+(trabajofinal*0.15) print("su nota final es: "+str(nota))
24.705882
83
0.685714
50
420
5.76
0.42
0.173611
0
0
0
0
0
0
0
0
0
0.049608
0.088095
420
17
84
24.705882
0.70235
0.328571
0
0
0
0
0.069498
0
0
0
0
0
0
1
0
false
0
0
0
0
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a018c60307b1b6fe0cfceda628c8601a24a3599
5,090
py
Python
main.py
onidzelskyi/dating_grabber
08f27efe6e0a75e76a7b50f7f126fb01de0f5345
[ "MIT" ]
null
null
null
main.py
onidzelskyi/dating_grabber
08f27efe6e0a75e76a7b50f7f126fb01de0f5345
[ "MIT" ]
null
null
null
main.py
onidzelskyi/dating_grabber
08f27efe6e0a75e76a7b50f7f126fb01de0f5345
[ "MIT" ]
null
null
null
"""Main script for questonare grabbing.""" from selenium import webdriver from selenium.webdriver.remote.remote_connection import LOGGER from pyvirtualdisplay import Display from scrapy import Selector from sqlalchemy import and_ from sqlalchemy.orm import sessionmaker from sqlalchemy.exc import IntegrityError import time import logging from dating_grabber.models import logger, Question, User, Image, Choice, config, engine timeout = config.getint('general', 'timeout') question_url = 'http://www.mamba.ru/ru/questions/list.phtml?from_item=10' xpath_comments = '//ul[@class="answers-container"]/li[@class="comment"]' xpath_next_question = '//div[@class="question_module__next"]' LOGGER.setLevel(logging.WARNING) # Run without GUI display = Display(visible=0, size=(800, 800)) display.start() driver = webdriver.Firefox() connection = engine.connect() Session = sessionmaker() session = Session(bind=connection) def _add_user(user_name: str, user_age: str, user_avatar: str) -> User: """Add user to the system. @:arg user_name - user' name, a string. @:arg user_age - user' age, a string. @:arg user_avatar - user' photo profile, a string. @:return User as object.""" # Find out if user already exists in DB user = session.query(User).filter(and_(User.name == user_name, User.age == user_age)).one_or_none() # User not found in DB. Add new user if not user: user = User(user_name, user_age) # user_avatar can be empty or None. Add image to user only if user_avatar exists. if user_avatar: image = Image(user_avatar) user.images.append(image) return user def main(): """Main routine.""" driver.get(question_url) _exit = False i = 1 while not _exit: # Extract relevant information from page sel = Selector(text=driver.page_source) xpath_prefix = '//*[@id="QuestionCarousel"]/div[1]' xpath_suffix_a = 'ul/li[{}]/div/div/div/ul/li/div/div/div'.format(i) xpath_suffix_b = 'ul/li[{}]/div/div[2]/div/span/img/@src'.format(i) xpath_suffix_c = 'div[1]/ul/li[{}]/div/div/div/ul/li/div/div/div/div/span[1]/span[2]/text()'.format(i) user_question = sel.xpath('{}/div[1]/{}/h1/text()'.format(xpath_prefix, xpath_suffix_a)).extract_first() question_likes = sel.xpath('{}/{}'.format(xpath_prefix, xpath_suffix_c)).extract_first() user_name = sel.xpath('{}/div[1]/{}/p/span[1]/text()'.format(xpath_prefix, xpath_suffix_a)).extract_first() user_age = sel.xpath('{}/div[1]/{}/p/span[2]/text()'.format(xpath_prefix, xpath_suffix_a)).extract_first() user_avatar = sel.xpath('{}/{}'.format(xpath_prefix, xpath_suffix_b)).extract_first() # Add user user = _add_user(user_name, user_age, user_avatar) # Find out if user already has a question question = session.query(Question).filter(User.id == Question.user_id).filter( Question.text == user_question).one_or_none() if not question: question = Question(user_question, question_likes) user.questions.append(question) else: question.likes = question_likes logger.debug('Question: {} {}'.format(user, question)) # Add items in table for inserting session.add(user) try: session.commit() except IntegrityError as err: logger.error(err) session.roolback() # Gathering choices comments = driver.find_elements_by_xpath(xpath_comments) for comment in comments: choice_text = comment.find_element_by_class_name('comment__data').text choice_likes = comment.find_elements_by_class_name('icon-counter')[1].text user_info = comment.find_elements_by_class_name('comment__data')[1].text.split(', ') user_name = user_info[0] user_age = user_info[1] if len(user_info) == 2 else None user_avatar = comment.find_element_by_xpath('//img[@class="avatar"]').get_attribute('src') # Add user user = _add_user(user_name, user_age, user_avatar) # Find out if user already has a question choice = session.query(Choice).filter(User.id == Choice.user_id).filter( Choice.text == choice_text).one_or_none() if not choice: choice = Choice(choice_text, choice_likes) user.choices.append(choice) question.choices.append(choice) else: choice.likes = choice_likes logger.debug('Choice: {} {}'.format(user, choice)) session.add(user) try: session.commit() except IntegrityError as err: logger.error(err) session.roolback() # Next question driver.find_element_by_xpath(xpath_next_question).click() time.sleep(timeout) # Time delay to prevent banning from remote host. i = 2 if __name__ == '__main__': main()
35.84507
115
0.642633
663
5,090
4.728507
0.254902
0.025518
0.026794
0.015949
0.257735
0.225837
0.195853
0.163636
0.163636
0.149282
0
0.00692
0.233399
5,090
141
116
36.099291
0.796515
0.127308
0
0.181818
0
0.011364
0.121646
0.085493
0
0
0
0
0
1
0.022727
false
0
0.113636
0
0.147727
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a031be5dbcf86185d64ca19c77daa520fc53ec8
1,444
py
Python
tools/locate-py-files.py
BearerPipelineTest/client-2
2fc881cd72f79a44258f61f1c75c452ee1ff463f
[ "MIT" ]
null
null
null
tools/locate-py-files.py
BearerPipelineTest/client-2
2fc881cd72f79a44258f61f1c75c452ee1ff463f
[ "MIT" ]
null
null
null
tools/locate-py-files.py
BearerPipelineTest/client-2
2fc881cd72f79a44258f61f1c75c452ee1ff463f
[ "MIT" ]
null
null
null
import os import pathlib CONFIG = { "include": [ "functional_tests", "standalone_tests", "tests", "tools", "wandb", ], "exclude": [ os.path.join("wandb", "proto"), os.path.join("wandb", "sweeps"), os.path.join("wandb", "vendor"), os.path.join("tests", "logs"), ], "exclude_unrooted": [ os.path.join("wandb", "run-"), os.path.join("wandb", "offline-run-"), ], } def locate_py_files(root_path: pathlib.Path): """ Recursively search for Python files in the given root directory. """ include = {root_path / dir_path for dir_path in CONFIG["include"]} exclude = {root_path / dir_path for dir_path in CONFIG["exclude"]} exclude_unrooted = CONFIG["exclude_unrooted"] for path in map(str, root_path.rglob("*.py")): if ( any( path.startswith(str(root_path / dir_path)) for dir_path in map(pathlib.Path.absolute, include) ) and all( not path.startswith(str(root_path / dir_path)) for dir_path in map(pathlib.Path.absolute, exclude) ) and all(dir_path not in path for dir_path in exclude_unrooted) ): print(path) if __name__ == "__main__": repo_root_path = pathlib.Path.absolute(pathlib.Path(__file__).parent.parent) locate_py_files(repo_root_path)
28.313725
80
0.572715
173
1,444
4.537572
0.289017
0.089172
0.076433
0.095541
0.272611
0.252229
0.252229
0.252229
0.252229
0.168153
0
0
0.293629
1,444
50
81
28.88
0.769608
0.044321
0
0.073171
0
0
0.136364
0
0
0
0
0
0
1
0.02439
false
0
0.04878
0
0.073171
0.02439
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a03abf839f480263bad7c9287c041d58d794e25
4,630
py
Python
player.py
Salaander/poker-player-relaxed-lobster
a64c26ca87aa33867cd255c2b23f088fab6f40a9
[ "MIT" ]
null
null
null
player.py
Salaander/poker-player-relaxed-lobster
a64c26ca87aa33867cd255c2b23f088fab6f40a9
[ "MIT" ]
null
null
null
player.py
Salaander/poker-player-relaxed-lobster
a64c26ca87aa33867cd255c2b23f088fab6f40a9
[ "MIT" ]
null
null
null
import urllib2 import json import traceback class Player: VERSION = "Lobster eats horses" def __init__(self): self.raise_amount = 0 self.config = {} self.config["force_all_in"] = False self.config["basic_raise_amount"] = 0 try: if False: self.config_url = "http://salaander.hu/lean.json" response = urllib2.urlopen(self.config_url, timeout=1) content = str(response.read()) self.config = json.loads(content) except Exception as e: print(e) def betRequest(self, g): try: if self.config["force_all_in"]: return 5000 if self.config["basic_raise_amount"]: assert isinstance(self.config["basic_raise_amount"], int) if self.config["basic_raise_amount"] >= 0: self.raise_amount = int(self.config["basic_raise_amount"]) in_action = g["players"][g["in_action"]] if len(g["community_cards"]) == 0: self.pre_flop(g) elif len(g["community_cards"]) == 3: self.flop(g) elif len(g["community_cards"]) == 4: self.turn(g) elif len(g["community_cards"]) == 5: self.river(g) result = int(g["current_buy_in"] - in_action["bet"] + self.raise_amount) if result < 0: result = 0 return result except Exception as ex: print(ex) traceback.print_exc() return 1200 def action_maching_card(self,g): in_action = g["players"][g["in_action"]] for card in in_action["hole_cards"]: if self.check_matching_cards(card,g["community_cards"]) > 0 and self.value_cards(card) > 8: self.raise_amount += 100 def pre_flop(self, g): in_action = g["players"][g["in_action"]] strength = self.strength(in_action["hole_cards"]) if strength < 24 and self.am_i_dealer(): return 0 if strength <= 12: return 0 if 12 < strength < 19: if g["pot"] > 200: self.raise_amount = -100 else: self.raise_amount = 0 stack_defense = 0.1 if g["current_buy_in"] > int(in_action["stack"] * stack_defense): return 0 if strength >= 19: self.raise_amount += 100 if g["pot"] > 500: self.raise_amount = 0 if strength >= 24: self.raise_amount += 300 def am_i_dealer(self, g): return g["in_action"] == g["dealer"] def flop(self,g): self.raise_amount = 0 self.action_maching_card(g) def turn(self,g): self.raise_amount = 0 self.action_maching_card(g) def river(self,g): self.raise_amount = 0 self.action_maching_card(g) def _call(self, g): in_action = g["players"][g["in_action"]] return int(g["current_buy_in"] - in_action["bet"]) def _raise(self, g, amount): assert isinstance(amount, int) in_action = g["players"][g["in_action"]] return int(g["current_buy_in"] - in_action["bet"] + amount) def strength(self, cards): card1 = int(self.value_cards(cards[0])) card2 = int(self.value_cards(cards[1])) card1s = cards[0]["suit"] card2s = cards[1]["suit"] result = card1 + card2 #/ (2 * 14)) * 10 # flush if card1s == card2s: result = int(result*1.15) # sorra if abs(card1 - card2) <= 2: result = int(result*1.1) # pair if card1 == card2: result = int(result*1.45) return result def value_cards(self, card): rank = card["rank"] switcher = { "J": 11, "Q": 12, "K": 13, "A": 14, } return int(switcher.get(rank, rank)) def check_matching_cards(self, card, community_cards): match_count = 0 for comm_card in community_cards: if (self.value_cards(card) == self.value_cards(comm_card)): match_count += 1 return match_count # return len([]) def check_flush(self, cards, community_cards): suits = [i["suit"] for i in cards + community_cards] for i in set(suits): if suits.count(i) >= 5: return True return False def showdown(self, game_state): pass
28.757764
103
0.52203
568
4,630
4.072183
0.223592
0.080847
0.077821
0.041505
0.313878
0.230004
0.183744
0.149589
0.137916
0.109814
0
0.034645
0.357883
4,630
160
104
28.9375
0.743357
0.010367
0
0.186992
0
0
0.097881
0
0
0
0
0
0.01626
1
0.121951
false
0.00813
0.02439
0.00813
0.276423
0.02439
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a04347a1fdbc8c4463a91291b7146a61b0fa267
801
py
Python
Voice broadcast number/baidu_aip_voice.py
16647615268/python
933644a49dfac2c614e243e3db378441cb6a6dd7
[ "Apache-2.0" ]
3
2019-10-05T07:26:53.000Z
2019-10-10T08:08:17.000Z
Voice broadcast number/baidu_aip_voice.py
yangli-os/python
ddf5636fb522c0b28f93866f0f65a38fa007a79b
[ "Apache-2.0" ]
null
null
null
Voice broadcast number/baidu_aip_voice.py
yangli-os/python
ddf5636fb522c0b28f93866f0f65a38fa007a79b
[ "Apache-2.0" ]
2
2020-06-18T09:54:02.000Z
2021-01-24T03:48:19.000Z
# -*- coding: utf-8 -* from aip import AipSpeech """ 你的 APPID AK SK """ APP_ID = '17084747' API_KEY = 'DBz2N6iV9tCrCGVeoBYWNBAa' SECRET_KEY = 'QwmbnPwyEugy7RZ10xuANl82WSiAMzuB' sourse="12321.42" numbers_list=['零','一','二','三','四','五','六','七','八','九'] units_list=["拾","佰","仟","万","亿"] others_list=["已收到","点"] client = AipSpeech(APP_ID, API_KEY, SECRET_KEY) def create_sound_basic(basic): result = client.synthesis(basic, 'zh', 1, {'vol': 5,'per':0}) # 识别正确返回语音二进制 错误则返回dict 参照下面错误码 if not isinstance(result, dict): with open(basic+'.wav', 'wb') as f: f.write(result) #creat basic sound for numbers in numbers_list: create_sound_basic(numbers) for numbers in units_list: create_sound_basic(numbers) for numbers in others_list: create_sound_basic(numbers)
27.62069
66
0.674157
117
801
4.444444
0.623932
0.084615
0.123077
0.115385
0.201923
0.15
0.15
0.15
0
0
0
0.039823
0.153558
801
29
67
27.62069
0.727139
0.083645
0
0.15
0
0
0.148096
0.078984
0
0
0
0
0
1
0.05
false
0
0.05
0
0.1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a061ad3864279150ed8402085551d95ac6cca8d
1,232
py
Python
boot/scripts/logger.py
maxmlr/rpi-photobooth
bd479e321320d433bbc33c678b3e0a97640b6cb5
[ "MIT" ]
1
2021-11-08T20:46:00.000Z
2021-11-08T20:46:00.000Z
boot/scripts/logger.py
maxmlr/rpi-photobooth
bd479e321320d433bbc33c678b3e0a97640b6cb5
[ "MIT" ]
72
2020-01-23T16:56:30.000Z
2021-11-14T23:25:15.000Z
boot/scripts/logger.py
maxmlr/rpi-photobooth
bd479e321320d433bbc33c678b3e0a97640b6cb5
[ "MIT" ]
null
null
null
from os import environ import logging class Logger(): def __init__(self, name, level=logging.INFO): self.level = level self.log = logging.getLogger(name) self.log.setLevel(level if level else environ.get("LOGLEVEL", logging.INFO)) def addConsoleHandler(self, level=None, formatted=True): console_handler = logging.StreamHandler() console_handler.setLevel(level if level else self.level) if formatted: formatter = logging.Formatter( "%(asctime)s - %(filename)s:%(lineno)-4s - [ %(levelname)8s ] --- %(message)s" ) console_handler.setFormatter(formatter) self.log.addHandler(console_handler) def addFileHandler(self, filename, level=None): file_handler = logging.FileHandler(filename) file_handler.setLevel(level if level else self.level) formatter = logging.Formatter( "%(asctime)s - %(filename)s:%(lineno)-4s - [ %(levelname)8s ] --- %(message)s" ) file_handler.setFormatter(formatter) self.log.addHandler(file_handler) def getLogger(self): return self.log def getRootLogger(self): return logging.getLogger()
34.222222
94
0.632305
133
1,232
5.766917
0.323308
0.045632
0.05867
0.078227
0.432855
0.401565
0.284224
0.284224
0.179922
0.179922
0
0.004343
0.252435
1,232
35
95
35.2
0.828447
0
0
0.142857
0
0.071429
0.12987
0.040584
0
0
0
0
0
1
0.178571
false
0
0.071429
0.071429
0.357143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a08893f19c7f634e91dee1a11fb5091ca317c98
618
py
Python
Bot/extensions/events/start.py
ChrissisCodeXD/Hikari-TestProject
236c8fc9081172d9edff6d629e5d11c5abe64205
[ "MIT" ]
null
null
null
Bot/extensions/events/start.py
ChrissisCodeXD/Hikari-TestProject
236c8fc9081172d9edff6d629e5d11c5abe64205
[ "MIT" ]
null
null
null
Bot/extensions/events/start.py
ChrissisCodeXD/Hikari-TestProject
236c8fc9081172d9edff6d629e5d11c5abe64205
[ "MIT" ]
null
null
null
from imports import * from Bot.logger.main_loggers import Logger log = logging.getLogger(__name__) Log = Logger() start_plugin = lightbulb.Plugin("start_event_plugin") @start_plugin.listener(lightbulb.LightbulbStartedEvent) async def on_start(event: lightbulb.LightbulbStartedEvent): print("Bot ready") print( f"Invite URL: https://discord.com/api/oauth2/authorize?client_id={event.app.application.id}&permissions=8&scope=bot%20applications.commands") # await Log.send_on_start(event.app) def load(bot): bot.add_plugin(start_plugin) def unload(bot): bot.remove_plugin(start_plugin)
25.75
149
0.765372
83
618
5.493976
0.554217
0.096491
0.111842
0
0
0
0
0
0
0
0
0.007339
0.118123
618
23
150
26.869565
0.829358
0.055016
0
0
0
0.071429
0.281787
0
0
0
0
0
0
1
0.142857
false
0
0.142857
0
0.285714
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a08ff6a859da4c5169c0aad47f670df14b30a63
3,618
py
Python
lingvo/tools/bpe_word_tokenizer.py
muntasir2000/lingvo
1555299b817288b5a6637ded416dbbdc9b00036d
[ "Apache-2.0" ]
null
null
null
lingvo/tools/bpe_word_tokenizer.py
muntasir2000/lingvo
1555299b817288b5a6637ded416dbbdc9b00036d
[ "Apache-2.0" ]
null
null
null
lingvo/tools/bpe_word_tokenizer.py
muntasir2000/lingvo
1555299b817288b5a6637ded416dbbdc9b00036d
[ "Apache-2.0" ]
null
null
null
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Generates the words_to_ids file from a BPE encoded corpus and BPE vocab file. Extracts all the words in the corpus with their corresponding list of ids. Each subword in the vocab file is mapped to their line number as its id. The lines of the output file are like: ... TAKE 43,7,50,14 THAT 16,35 THE 26 THEIR 16,4,9,56 ... Which is compatible with the BPE tokenizer op in core/tokenizer.py. Typical workflow: subword-nmt learn-bpe train_file code_file subword-nmt apply-bpe code_file train_file train_bpe_file subword-nmt get-vocab train_bpe_file vocab_file bpe_word_tokenizer train_bpe_file vocab_file words_to_ids_file """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from six.moves import map import tensorflow as tf tf.flags.DEFINE_string('encoded_filepath', '', 'Path to the BPE encoded corpus file.') tf.flags.DEFINE_string('vocab_filepath', '', 'Path to the BPE vocab file.') tf.flags.DEFINE_string('output_filepath', '', 'The output filepath (word_to_ids).') FLAGS = tf.flags.FLAGS def _GetVocabulary(vocab_filepath): """Maps the first word in each line of the given file to its line number.""" vocab = {} with open(vocab_filepath, 'r') as vocab_file: for i, line in enumerate(vocab_file): word = line.strip('\r\n ').split(' ')[0] if word: vocab[word] = i return vocab def _ExtractTokenization(encoded_filepath, vocab): """Maps the words in the encoded file to their list of token ids. Reads all the subwords in encoded file. Concatenates them while they have @@ as their last two characters. The last token of a word is the subword without @@. Maps the full word to the list of corresponding vocab ids of the subwords from the vocab dictionary. Args: encoded_filepath: String, filepath of the BPE encoded file. vocab: Dictionary of subwords (string) to token ids (int). Returns: Dictionary of words (string) to list of token ids (list of int). """ word_tokenization = {} with open(encoded_filepath, 'r') as encoded_file: for line in encoded_file: full_word = '' ids = [] for word in line.strip('\r\n ').split(' '): ids.append(vocab[word]) if word[-2:] == '@@': full_word += word[:-2] else: full_word += word word_tokenization[full_word] = ids full_word = '' ids = [] return word_tokenization def main(_): vocab = _GetVocabulary(FLAGS.vocab_filepath) word_tokenization = _ExtractTokenization(FLAGS.encoded_filepath, vocab) with open(FLAGS.output_filepath, 'w') as output: for word, ids in six.iteritems(word_tokenization): output.write(word + ' ') output.write(','.join(map(str, ids))) output.write('\r\n') if __name__ == '__main__': tf.app.run(main)
33.192661
80
0.689608
530
3,618
4.564151
0.333962
0.026044
0.014882
0.023563
0.081025
0
0
0
0
0
0
0.011065
0.200663
3,618
108
81
33.5
0.82538
0.531233
0
0.086957
0
0
0.105231
0
0
0
0
0
0
1
0.065217
false
0
0.130435
0
0.23913
0.021739
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a09ef69b2de70a557dfd34d3d09ebf6f67b1e23
1,580
py
Python
python01/PythonThreadLocal.py
zhayangtao/HelloPython
e0e8b450afba1382f56411344ad54ef9910a5004
[ "Apache-2.0" ]
null
null
null
python01/PythonThreadLocal.py
zhayangtao/HelloPython
e0e8b450afba1382f56411344ad54ef9910a5004
[ "Apache-2.0" ]
1
2017-09-01T03:59:11.000Z
2017-09-01T03:59:11.000Z
python01/PythonThreadLocal.py
zhayangtao/HelloPython
e0e8b450afba1382f56411344ad54ef9910a5004
[ "Apache-2.0" ]
null
null
null
import threading # 全局 ThreadLocal对象 local_school = threading.local() def process_student(): std = local_school.student print('Hello, %s (in %s)' % (std, threading.current_thread().name)) def process_thread(name): local_school.student = name process_student() t1 = threading.Thread(target=process_thread, args=('Alice',), name='Thread-A') t2 = threading.Thread(target=process_thread, args=('Blice',), name='Thread-B') t1.start() t2.start() t1.join() t2.join() # 分布式进程 # 通过managers模块把Queue通过网络暴露出去,就可以让其他机器的进程访问Queue了。 # 先看服务进程,服务进程负责启动Queue,把Queue注册到网络上,然后往Queue里面写入任务: import random, time, queue from multiprocessing.managers import BaseManager # 发送任务的队列 task_queue = queue.Queue() # 接受结果的队列 result_queue = queue.Queue() # 从 BaseManager继承的QueueManager class QueueManager(BaseManager): pass def return_task_queue(): global task_queue return task_queue def return_result_queue(): global result_queue return result_queue # 把两个Queue注册到网络上 QueueManager.register('get_task_queue', callable=return_task_queue) QueueManager.register('get_result_queue', callable=return_result_queue) # 绑定端口 5000,设置验证码‘abc’ manager = QueueManager(address=('', 5000), authkey=b'abc') # 启动manager manager.start() # 获得通过网络访问的Queue对象 task = manager.get_task_queue() result = manager.get_result_queue() # 添加任务 for i in range(10): n = random.randint(0, 1000) print('Put task %d' % n) task.put(n) print('Try get results') for i in range(10): r = result.get(timeout=10) print('Result: %s' % r) # 关闭 manager.shutdown() print('master exit.')
21.351351
78
0.73481
204
1,580
5.539216
0.411765
0.055752
0.039823
0.049558
0.090265
0.067257
0
0
0
0
0
0.018328
0.136709
1,580
73
79
21.643836
0.810117
0.149367
0
0.047619
0
0
0.093233
0
0
0
0
0
0
1
0.095238
false
0.02381
0.071429
0
0.238095
0.119048
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a0addb9b2b0dc7613dc773ea01f89d791ef0752
15,527
py
Python
src/eicu_eda.py
hlzhou/peer-score
bad2996b233cb118302d7453f6bb356e72912176
[ "MIT" ]
5
2020-06-04T08:57:03.000Z
2021-09-15T14:24:15.000Z
src/eicu_eda.py
hlzhou/peer-score
bad2996b233cb118302d7453f6bb356e72912176
[ "MIT" ]
1
2020-08-17T10:06:05.000Z
2020-08-17T10:06:05.000Z
src/eicu_eda.py
hlzhou/peer-score
bad2996b233cb118302d7453f6bb356e72912176
[ "MIT" ]
3
2020-06-04T08:58:27.000Z
2021-07-11T08:25:00.000Z
import pandas as pd name_map = { 'age': 'Age', 'gender': 'Gender', 'cancer': 'Malignancy', # comorbidities 'liver_disease': 'Liver disease', 'chf': 'Congestive heart failure', 'renal_failure': 'Renal failure', 'pleural_effusion': 'Pleural effusion', 'orientation': 'Orientation', # physical 'temperature': 'Temperature (\degree C)', 'heart_rate': 'Heart rate (beats per minute)', 'respiratory_rate': 'Respiratory rate (breaths per minute)', 'bp_systolic': 'Systolic blood pressure (mmHg)', 'bp_diastolic': 'Diastolic blood pressure (mmHg)', 'bp_mean_arterial': 'Mean arterial pressure (mmHg)', 'gcs': 'Glasgow Coma Scale', 'rbcs': 'Red blood cells (millions/$\mu$L)', # is this low? 'wbc': 'White blood cells (thousands/$\mu$L)', 'platelets': 'Platelets (thousands/$\mu$L)', 'hct': 'Hematocrit (%)', # is this low? 'rdw': 'Red blood cell dist. width (%)', 'mcv': 'Mean corpuscular volume (fL)', 'mch': 'Mean corpuscular hemoglobin/ MCH (pg)', 'mchc': 'MCH concentration (g/dL)', 'neutrophils': 'Neutrophils (%)', # is this higher than normal? 'lymphocytes': 'Lymphocytes (%)', # is this lower than normal? 'monocytes': 'Monocytes (%)', 'eosinophils': 'Eosinophils (%)', 'basophils': 'Basophils (%)', 'bands': 'Band cells (%)', # is this low? 'sodium': 'Sodium (mmol/L)', 'potassium': 'Potassium (mmol/L)', 'chloride': 'Chloride (mmol/L)', 'bicarbonate': 'Bicarbonate (mmol/L)', 'bun': 'Blood urea nitrogen (mg/dL)', 'creatinine': 'Creatinine (mg/dL)', 'glucose': 'Glucose (mg/dL)', 'ast': 'Aspartate aminotransferase (units/L)', 'alt': 'Alanine aminotransferase (units/L)', 'alkaline_phosphatase': 'Alkaline phosphatase (units/L)', 'crp': 'C-reactive protein (mg/L)', 'direct_bilirubin': 'Direct bilirubin (mg/L)', 'total_bilirubin': 'Total bilirubin (mg/L)', 'total_protein': 'Total protein (g/dL)', 'calcium': 'Calcium (mg/dL)', 'albumin': 'Albumin (g/dL)', 'troponin': 'Troponin (ng/mL)', 'pt': 'Prothrombin time (sec)', 'ptt': 'Partial thromboplastin time (sec)', 'ph': 'pH', 'pao2': 'Partial pressure of oxygen (mmHg)', 'sao2': 'Arterial oxygen saturation (mmHg)', 'deceased_indicator': 'Deceased', 'vasopressor_indicator': 'Vasopressors administered', 'ventilator_indicator': 'Ventilator used' } def get_mean_std_str(df, col, return_range=False, tabs=False): dcol = df[col] s = col if len(col) < 8: s += '\t' if len(col) < 16: s += '\t' if len(col) < 24: s += '\t' if return_range: dlower = dcol.quantile(0.25) # change to iqr dupper = dcol.quantile(0.75) if col == 'ph': s += '\t{0:.2f} ({1:.2f}-{2:.2f})'.format(dcol.median(), dlower, dupper) else: s += '\t{0:.1f} ({1:.1f}-{2:.1f})'.format(dcol.median(), dlower, dupper) # s += '\t{0:.1f} ({1:.1f})'.format(dcol.median(), dupper - dlower) else: s += '\t{0:.1f} ({1:.1f})'.format(dcol.mean(), dcol.std()) if not tabs: s = s.replace('\t', '').replace(col, '') return s def get_binary_var_str(df, col, missingness=False, tabs=False): if col is not None: dcol = df[col] else: dcol = df ct = int(dcol.sum()) prop = ct / float(len(df)) missing = dcol.isna().sum() / float(len(df)) s = '' if col is not None: s += col if len(col) < 8: s += '\t' if len(col) < 16: s += '\t' if len(col) < 24: s += '\t' s += '\t{0:d} ({1:.1%})'.format(ct, prop) if missingness: s += '\t(missingness: {0:.1%})'.format(missing) if not tabs: s = s.replace('\t', '') if col is not None: s = s.replace(col, '') return s def make_table1(df, df_name): global name_map N = len(df) indent = '\hspace{5mm}' names = [''] vals = ['(n = {})'.format(N)] def add_pair(key, val): names.append(key) vals.append(val) def add_header(header): names.append('\\textbf{' + header + '}') vals.append('') ## Demographics add_header('Demographics') # age age = df['age'] r1 = len(df[(df['age'] < 30)]) r2 = len(df[(df['age'] >= 30) & (df['age'] < 40)]) r3 = len(df[(df['age'] >= 40) & (df['age'] < 50)]) r4 = len(df[(df['age'] >= 50) & (df['age'] < 60)]) r5 = len(df[(df['age'] >= 60)]) add_pair('Age, years', get_mean_std_str(df, 'age', return_range=True)) add_pair('Age range, years', '') add_pair(indent + '$<$ 30', '{0:d} ({1:.1%})'.format(r1, r1 / float(N))) add_pair(indent + '30-39', '{0:d} ({1:.1%})'.format(r2, r2 / float(N))) add_pair(indent + '40-49', '{0:d} ({1:.1%})'.format(r3, r3 / float(N))) add_pair(indent + '50-59', '{0:d} ({1:.1%})'.format(r4, r4 / float(N))) add_pair(indent + '$\\leq$ 60', '{0:d} ({1:.1%})'.format(r5, r5 / float(N))) # gender gender = df['gender'] g1 = sum(gender == 'Male') + sum(gender == 'gender:m') g2 = sum(gender == 'Female') + sum(gender == 'gender:f') add_pair('Gender', '') add_pair(indent + 'Male', '{0:d} ({1:.1%})'.format(g1, g1 / float(N))) add_pair(indent + 'Female', '{0:d} ({1:.1%})'.format(g2, g2 / float(N))) ## Comorbidities add_pair('', '') add_header('Comorbidities') cvars = [ 'pleural_effusion' ] for col in cvars: val = get_binary_var_str(df, col) add_pair(name_map[col], val) ## Physicals add_pair('', '') add_header('Physical exam findings') # orientation orientation = df['orientation'] o1 = (orientation >= 4).astype(int) o2 = (orientation < 4).astype(int) add_pair('Orientation', '') add_pair(indent + 'oriented', get_binary_var_str(o1, None)) add_pair(indent + 'confused', get_binary_var_str(o2, None)) # other physical measurements pvars = [ 'temperature', 'heart_rate', 'respiratory_rate', 'bp_systolic', 'bp_diastolic', 'bp_mean_arterial', 'gcs' ] for col in pvars: val = get_mean_std_str(df, col, return_range=True) add_pair(name_map[col], val) ## Laboratory findings add_pair('', '') add_header('Laboratory findings') hema_vars = [ 'rbcs', 'wbc', 'platelets', 'hct', 'rdw', 'mcv', 'mch', 'mchc', 'neutrophils', 'lymphocytes', 'monocytes', 'eosinophils', 'basophils', 'bands', ] chem_vars = [ 'sodium', 'potassium', 'chloride', 'bicarbonate', 'bun', 'creatinine', 'glucose', 'ast', 'alt', 'alkaline_phosphatase', 'crp', 'direct_bilirubin', 'total_bilirubin', 'total_protein', 'calcium', 'albumin', 'troponin' ] coag_vars = ['pt', 'ptt'] bgas_vars = ['ph', 'pao2', 'sao2'] add_pair('Hemotology', '') for col in hema_vars: try: add_pair(indent + name_map[col], get_mean_std_str(df, col, return_range=True)) except Exception as e: print(e) import pdb; pdb.set_trace() add_pair('Chemistry', '') for col in chem_vars: add_pair(indent + name_map[col], get_mean_std_str(df, col, return_range=True)) add_pair('Coagulation', '') for col in coag_vars: add_pair(indent + name_map[col], get_mean_std_str(df, col, return_range=True)) add_pair('Blood gas', '') for col in bgas_vars: add_pair(indent + name_map[col], get_mean_std_str(df, col, return_range=True)) ## Outcomes add_pair('', '') add_header('Outcomes') out_vars = [ 'deceased_indicator', 'vasopressor_indicator', 'ventilator_indicator' ] for col in out_vars: add_pair(name_map[col], get_binary_var_str(df, col)) table_df = pd.DataFrame({'Variable': names, df_name: vals}) return table_df def print_table1(df, df_name): print('==================== TABLE FOR {} ================='.format(df_name)) N = len(df) print('Patients (n = {})\n'.format(N)) age = df['age'] print('Age missingness*:\t{0:d} ({1:.1%})'.format(age.isna().sum(), round(age.isna().sum() / float(N), 3))) print('Age, years\t\t{0:.1f} ({1:.1f})'.format(age.mean(), age.std())) print('Age range, years') r = len(df[(df['age'] < 30)]) print('\t<30\t\t{0:d} ({1:.1%})'.format(r, r / float(N))) r = len(df[(df['age'] >= 30) & (df['age'] < 40)]) print('\t30-39\t\t{0:d} ({1:.1%})'.format(r, r / float(N))) r = len(df[(df['age'] >= 40) & (df['age'] < 50)]) print('\t40-49\t\t{0:d} ({1:.1%})'.format(r, r / float(N))) r = len(df[(df['age'] >= 50) & (df['age'] < 60)]) print('\t50-59\t\t{0:d} ({1:.1%})'.format(r, r / float(N))) r = len(df[(df['age'] >= 60)]) print('\t>= 60\t\t{0:d} ({1:.1%})'.format(r, r / float(N))) gender = df['gender'] print('\nGender missingness*: \t{0:d} ({1:.1%})'.format(gender.isna().sum(), round(gender.isna().sum() / float(N), 3))) print('Gender') g = sum(gender == 'Male') print('\tMale\t\t{0:d} ({1:.1%})'.format(g, g / float(N))) g = sum(gender == 'Female') print('\tFemale\t\t{0:d} ({1:.1%})'.format(g, g / float(N))) print('\nLab values') print('(compare w/ washington state)') lvars = [ # compare w/ washington state 'wbc', 'lymphocytes', 'hemoglobin', 'platelets', 'sodium', 'creatinine', 'total_bilirubin', 'alkaline_phosphatase', 'ast', 'alt', 'troponin' ] for col in lvars: print(get_mean_std_str(df, col)) print('\n(remaining lab values)') lvars = [ # compare w/ washington state 'bun', 'temperature', 'rbcs', 'hct', 'rdw', 'mcv', 'mch', 'mchc', 'neutrophils', 'monocytes', 'eosinophils', 'basophils', 'ph', 'glucose', 'pao2', 'fio2', 'crp', 'direct_bilirubin', 'total_protein', 'albumin', 'ferritin', 'pt', 'ptt', 'fibrinogen', 'bands', 'bicarbonate', 'calcium', 'chloride', 'potassium', 'heart_rate', 'sao2', 'gcs', 'respiratory_rate', 'bp_systolic', 'bp_diastolic', 'bp_mean_arterial', ] for col in lvars: print(get_mean_std_str(df, col, return_range=True)) print('\nComorbidities') cvars = [ 'smoking', 'cancer', 'liver_disease', 'chf', 'renal_failure', 'pleural_effusion' ] for col in cvars: print(get_binary_var_str(df, col)) print('\nOrientation') print(get_mean_std_str(df, 'orientation')) print('\nOutcomes') outvars = [ 'censor_or_deceased_days', 'censor_or_vasopressor_days', 'censor_or_ventilator_days', 'deceased_indicator', 'vasopressor_indicator', 'ventilator_indicator' ] for col in outvars: if 'indicator' in col: print(get_binary_var_str(df, col)) else: print(get_mean_std_str(df, col)) import pdb; pdb.set_trace() # # # df[['gender']] # # labs = ['wbc', ] # # tab = df[] # # print('Values are n (%) or mean (SD) unless otherwise specified.') """Full list of variables: 'rbcs', 'wbc', 'platelets', 'hemoglobin', 'hct', 'rdw', 'mcv', 'mch', 'mchc', 'neutrophils', 'lymphocytes', 'monocytes', 'eosinophils', 'basophils', 'bun', 'temperature', 'ph', 'sodium', 'glucose', 'pao2', 'fio2', 'ldh', 'crp', 'direct_bilirubin', 'total_bilirubin', 'total_protein', 'albumin', 'ferritin', 'pt', 'ptt', 'fibrinogen', 'ast', 'alt', 'creatinine', 'troponin', 'alkaline_phosphatase', 'bands', 'bicarbonate', 'calcium', 'chloride', 'potassium', 'gender', 'age', 'ethnicity', 'observation_offset', 'heart_rate', 'sao2', 'gcs', 'respiratory_rate', 'bp_systolic', 'bp_diastolic', 'bp_mean_arterial', 'smoking', 'cancer', 'liver_disease', 'chf', 'renal_failure', 'pleural_effusion', 'orientation', 'censor_or_deceased_days', 'deceased_indicator', 'censor_or_vasopressor_days', 'vasopressor_indicator', 'censor_or_ventilator_days', 'ventilator_indicator' """ eicu_template = '../data/eicu/eicu_cleaned/eicu_anypna_{}_days_post_inicu.csv' mimic_template = '../data/mimic/mimic_cleaned/cleaner_mimic_anypna_timeline_flfv_{}_days_post_inicu.csv' # make anypna table table_cols = [ # (eicu_template.format(0), 'eICU Day 0'), # (eicu_template.format(1), 'eICU Day 1'), (eicu_template.format(2), 'eICU Day 2'), # (mimic_template.format(0), 'MIMIC Day 0'), # (mimic_template.format(1), 'MIMIC Day 1'), (mimic_template.format(2), 'MIMIC Day 2'), ] tte_names = ['Time to death (days)', 'Time to administering vasopressors (days)', 'Time to ventilation (days)'] median_ttes = { 'eICU Day 0': ['58.7', 'NA', '21.0'], 'eICU Day 1': ['57.7', 'NA', '14.5'], 'eICU Day 2': ['56.7', 'NA', 'NA'], 'MIMIC Day 0': ['NA', '12.7', '1.78'], 'MIMIC Day 1': ['NA', '24.3', '3.71'], 'MIMIC Day 2': ['NA', '28.6', '3.87'], } # iterate through columns and format rows of latex table table_df = None filter_neg_outcomes = True missingness_df = None for fpath, name in table_cols: if 'mimic' in name.lower(): df = pd.read_csv(fpath, '|') if filter_neg_outcomes: df = df[df['censor_or_deceased_days'] > 0] df = df[df['age'] >= 18] else: df = pd.read_csv(fpath) if filter_neg_outcomes: df = df[df['censor_or_deceased_days'] > 0] # make missingness df vars1 = ['age', 'gender', 'pleural_effusion', 'orientation'] pvars = [ 'temperature', 'heart_rate', 'respiratory_rate', 'bp_systolic', 'bp_diastolic', 'bp_mean_arterial', 'gcs' ] hema_vars = [ 'rbcs', 'wbc', 'platelets', 'hct', 'rdw', 'mcv', 'mch', 'mchc', 'neutrophils', 'lymphocytes', 'monocytes', 'eosinophils', 'basophils', 'bands', ] chem_vars = [ 'sodium', 'potassium', 'chloride', 'bicarbonate', 'bun', 'creatinine', 'glucose', 'ast', 'alt', 'alkaline_phosphatase', 'crp', 'direct_bilirubin', 'total_bilirubin', 'total_protein', 'calcium', 'albumin', 'troponin' ] coag_vars = ['pt', 'ptt'] bgas_vars = ['ph', 'pao2', 'sao2'] out_vars = [ 'deceased_indicator', 'vasopressor_indicator', 'ventilator_indicator' ] all_vars = vars1 + pvars + hema_vars + chem_vars + coag_vars + bgas_vars + out_vars if missingness_df is None: print(name) miss_frac = (df[all_vars].isna().sum() / len(df)).tolist() miss_cts = (df[all_vars].isna().sum()).tolist() miss_summ = list(['{} ({})'.format(round(f, 3), round(c, 3)) for (f, c) in zip(miss_frac, miss_cts)]) missingness_df = {'Variable': [name_map[v] for v in all_vars]} missingness_df[name + ' (n = {})'.format(len(df))] = miss_summ else: print(name) miss_frac = (df[all_vars].isna().sum() / len(df)).tolist() miss_cts = (df[all_vars].isna().sum()).tolist() miss_summ = list(['{} ({})'.format(round(f, 3), round(c, 3)) for (f, c) in zip(miss_frac, miss_cts)]) missingness_df[name + ' (n = {})'.format(len(df))] = miss_summ missingness_df = pd.DataFrame(missingness_df) # make table 1 table = make_table1(df, name) if table_df is None: table_df = table else: # iteratively add column to table assert(table_df['Variable'].tolist() == table['Variable'].tolist()) table = table.drop('Variable', axis=1) table_df = pd.concat((table_df, table), axis=1) # add median time to event values tte_rows = {'Variable': tte_names} for col in table_df.columns: table_df[col] = table_df[col].str.replace('%', '\\%') # escape % if col in median_ttes: tte_rows[col] = tte_rows.get(col, []) + median_ttes[col] tte_df = pd.DataFrame(tte_rows) table_df = table_df.append(tte_df) print('======================= table 1 ===============================') print(table_df.to_latex(index=False, escape=False)) import pdb; pdb.set_trace() print('======================= missingness ===============================') print(missingness_df.to_latex(index=False))
33.827887
121
0.591937
2,083
15,527
4.256361
0.174748
0.022896
0.005752
0.00767
0.411911
0.345477
0.313332
0.29145
0.255245
0.219829
0
0.021277
0.191795
15,527
458
122
33.901747
0.685234
0.053198
0
0.308333
0
0
0.3437
0.033207
0
0
0
0
0.002778
1
0.016667
false
0
0.011111
0
0.036111
0.094444
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a0b9078462a8fd43934f3c8fbd022675baf1cb9
269
py
Python
codeforces/math数学/1100/296A相邻相异.py
yofn/pyacm
e573f8fdeea77513711f00c42f128795cbba65a6
[ "Apache-2.0" ]
null
null
null
codeforces/math数学/1100/296A相邻相异.py
yofn/pyacm
e573f8fdeea77513711f00c42f128795cbba65a6
[ "Apache-2.0" ]
null
null
null
codeforces/math数学/1100/296A相邻相异.py
yofn/pyacm
e573f8fdeea77513711f00c42f128795cbba65a6
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # https://codeforces.com/problemset/problem/296/A def f(l): n = len(l) cl = [0]*1001 for i in l: cl[i] += 1 return max(cl) <= (n+1)//2 q = int(input()) l = list(map(int,input().split())) print('YES' if f(l) else 'NO')
17.933333
49
0.539033
49
269
2.959184
0.755102
0.027586
0
0
0
0
0
0
0
0
0
0.057971
0.230483
269
14
50
19.214286
0.642512
0.256506
0
0
0
0
0.025253
0
0
0
0
0
0
1
0.111111
false
0
0
0
0.222222
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a0d30c14655403a903bf17b692e7c7c2d25497c
992
py
Python
backend/farm/models/reference_parcel_relation.py
szkkteam/agrosys
a390332202f7200632d2ff3816e1b0f3cc76f586
[ "MIT" ]
null
null
null
backend/farm/models/reference_parcel_relation.py
szkkteam/agrosys
a390332202f7200632d2ff3816e1b0f3cc76f586
[ "MIT" ]
null
null
null
backend/farm/models/reference_parcel_relation.py
szkkteam/agrosys
a390332202f7200632d2ff3816e1b0f3cc76f586
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # Common Python library imports # Pip package imports import sqlalchemy as sa from sqlalchemy.orm import backref # Internal package imports from backend.database import ( Column, BaseModel, TimestampMixin, String, Float, BigInteger, foreign_key, relationship, ) class ReferenceParcelRelation(BaseModel, TimestampMixin): """Join table between User and Role""" block_id = foreign_key('ReferenceParcel', fk_col='parcel_id', primary_key=True, onupdate="CASCADE", ondelete="CASCADE") parcel_id = foreign_key('ReferenceParcel', fk_col='parcel_id', primary_key=True, onupdate="CASCADE") __repr_props__ = ('block_id', 'parcel_id') def __init__(self, block=None, parcel=None, **kwargs): print("Adding relation - Block: %s Parcel: %s" % (block, parcel)) super().__init__(**kwargs) if parcel: self.parcel = parcel if block: self.block = block
27.555556
123
0.673387
115
992
5.582609
0.547826
0.049844
0.037383
0.084112
0.214953
0.214953
0.214953
0.214953
0.214953
0.214953
0
0.001274
0.208669
992
35
124
28.342857
0.816561
0.15121
0
0
0
0
0.149038
0
0
0
0
0
0
1
0.043478
false
0
0.130435
0
0.347826
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a0fc227bdb50b7e2cb15c395d27d990bc802b85
7,362
py
Python
gui/widgets.py
DigitalOzUT/esdc-ce
e0d918994204f3ca69f363c71941c7a1bb123109
[ "Apache-2.0" ]
null
null
null
gui/widgets.py
DigitalOzUT/esdc-ce
e0d918994204f3ca69f363c71941c7a1bb123109
[ "Apache-2.0" ]
null
null
null
gui/widgets.py
DigitalOzUT/esdc-ce
e0d918994204f3ca69f363c71941c7a1bb123109
[ "Apache-2.0" ]
null
null
null
import json import phonenumbers from django import forms from django.forms import widgets from django.utils import six from django.utils.html import conditional_escape from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ from frozendict import frozendict from taggit.forms import TagWidget as _TagWidget from api.utils.encoders import JSONEncoder from gui.models import UserProfile __all__ = ( 'NumberInput', 'EmailInput', 'URLInput', 'TelInput', 'TelPrefixInput', 'ByteSizeInput', 'ArrayWidget', 'ArrayAreaWidget', 'DictWidget', 'TagWidget', ) HTML5_ATTRS = frozendict({'autocorrect': 'off', 'autocapitalize': 'off', 'spellcheck': 'false'}) def edit_string_for_items(array, escape_space=True, escape_comma=True, sort=False): """Like taggit.utils.edit_string_for_tags, but with list/tuple as input and without sorting""" items = [] for i in array: if not isinstance(i, six.string_types): i = str(i) if escape_space and ' ' in i: items.append('"%s"' % i) if escape_comma and ',' in i: items.append('"%s"' % i) else: items.append(i) if sort: items.sort() return ','.join(items) # noinspection PyAbstractClass class _DefaultAttrsWidget(widgets.Widget): default_attrs = None default_class = None def __init__(self, attrs=None): if self.default_attrs: # dict() converts default_attrs from frozendict to regular dict defaults = dict(self.default_attrs) if attrs: defaults.update(attrs) else: defaults = attrs super(_DefaultAttrsWidget, self).__init__(attrs=defaults) if self.default_class: self.attrs['class'] = (self.default_class + ' ' + self.attrs.get('class', '')).rstrip() class ArrayWidget(_DefaultAttrsWidget, widgets.TextInput): tag_choices = None def __init__(self, *args, **kwargs): self.tags = kwargs.pop('tags', False) self.escape_space = kwargs.pop('escape_space', True) self.escape_comma = kwargs.pop('escape_comma', True) super(ArrayWidget, self).__init__(*args, **kwargs) def build_attrs(self, *args, **kwargs): if self.tag_choices: tags = json.dumps(self.tag_choices, indent=None, cls=JSONEncoder) kwargs['data-tags-choices'] = mark_safe(conditional_escape(tags)) return super(ArrayWidget, self).build_attrs(*args, **kwargs) def render(self, name, value, attrs=None): if value is not None and not isinstance(value, six.string_types): value = edit_string_for_items(value, escape_space=self.escape_space, escape_comma=self.escape_comma, sort=self.tags) return super(ArrayWidget, self).render(name, value, attrs=attrs) class ArrayAreaWidget(_DefaultAttrsWidget, widgets.Textarea): default_attrs = frozendict({'rows': 3, 'cols': 40}) default_class = 'input-array' def render(self, name, value, attrs=None): if value is not None and not isinstance(value, six.string_types): value = json.dumps(value, indent=4, ensure_ascii=False) return super(ArrayAreaWidget, self).render(name, value, attrs=attrs) class DictWidget(_DefaultAttrsWidget, widgets.Textarea): default_attrs = frozendict({'rows': 4, 'cols': 40}) default_class = 'input-mdata' def render(self, name, value, attrs=None): if value is not None and not isinstance(value, six.string_types): value = json.dumps(value, indent=4, ensure_ascii=False) return super(DictWidget, self).render(name, value, attrs=attrs) class NumberInput(_DefaultAttrsWidget, widgets.Input): """ HTML5 input type for numbers. """ input_type = 'number' default_attrs = HTML5_ATTRS class EmailInput(_DefaultAttrsWidget, widgets.Input): """ HTML5 input type for email address. """ input_type = 'email' default_attrs = HTML5_ATTRS class URLInput(_DefaultAttrsWidget, widgets.URLInput): """ HTML5 input type for URL address. """ input_type = 'url' default_attrs = HTML5_ATTRS class TelInput(_DefaultAttrsWidget, widgets.Input): """ HTML5 input type for url address """ input_type = 'tel' default_attrs = HTML5_ATTRS class ByteSizeInput(_DefaultAttrsWidget, widgets.TextInput): """ HTML5 input type for url address """ default_attrs = frozendict({'pattern': '[0-9\.]+[BKMGTPEbkmgtpe]?'}) # noinspection PyAbstractClass class TelPrefixSelect(widgets.Select): """ A drop-down menu with international phone prefixes. """ # noinspection PyUnusedLocal def __init__(self, attrs=None, choices=()): super(TelPrefixSelect, self).__init__(attrs=attrs, choices=UserProfile.PHONE_PREFIXES) def build_attrs(self, extra_attrs=None, **kwargs): attrs = super(TelPrefixSelect, self).build_attrs(extra_attrs=extra_attrs, **kwargs) attrs['class'] = 'input-select2' attrs.pop('maxlength', None) return attrs # noinspection PyAbstractClass class TelPrefixInput(widgets.MultiWidget): """ A Widget that splits phone number input into: - a country select box for phone prefix - an input for local phone number """ erase_on_empty_input = False # noinspection PyUnusedLocal def __init__(self, attrs=None, initial=None): if attrs: self.erase_on_empty_input = attrs.pop('erase_on_empty_input', False) multi_widgets = [TelPrefixSelect(attrs=attrs), TelInput(attrs=attrs)] super(TelPrefixInput, self).__init__(multi_widgets, attrs=attrs) def decompress(self, value): if value: # noinspection PyBroadException try: num = phonenumbers.parse(value) except Exception: return value.split(' ', 1) else: return ['+' + str(num.country_code), str(num.national_number)] return [None, None] def value_from_datadict(self, data, files, name): values = super(TelPrefixInput, self).value_from_datadict(data, files, name) if self.erase_on_empty_input and not values[1]: return '' else: return '%s %s' % tuple(values) def clean_international_phonenumber(value): """ Validate phone number taken from TelPrefixInput and return in format suitable for our DB. """ invalid_number_message = _(u'The phone number entered is not valid.') try: num = phonenumbers.parse(value) if not phonenumbers.is_valid_number(num): raise forms.ValidationError(invalid_number_message) except phonenumbers.NumberParseException: raise forms.ValidationError(invalid_number_message) return phonenumbers.format_number(num, phonenumbers.PhoneNumberFormat.E164) # noinspection PyAbstractClass class TagWidget(_TagWidget): tag_choices = None def build_attrs(self, *args, **kwargs): if self.tag_choices: tags = json.dumps(self.tag_choices, indent=None, cls=JSONEncoder) kwargs['data-tags-choices'] = mark_safe(conditional_escape(tags)) return super(TagWidget, self).build_attrs(*args, **kwargs)
31.596567
112
0.666938
851
7,362
5.586369
0.223267
0.027766
0.017669
0.01788
0.374211
0.279344
0.254733
0.155448
0.140303
0.140303
0
0.004573
0.227656
7,362
232
113
31.732759
0.831516
0.105814
0
0.251748
0
0
0.064887
0.003881
0
0
0
0
0
1
0.097902
false
0
0.083916
0
0.482517
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a110ac6e61962f9be254a3f95a40bf886e05acb
1,787
py
Python
indiepaper/controllers/indieauth.py
cleverdevil/indiepaper
1f27dc1b8a73ff0c8d3c759d3b25595da9c73d5e
[ "MIT" ]
44
2018-06-27T23:19:25.000Z
2021-04-06T03:38:11.000Z
indiepaper/controllers/indieauth.py
cleverdevil/indiepaper
1f27dc1b8a73ff0c8d3c759d3b25595da9c73d5e
[ "MIT" ]
13
2018-07-07T12:47:15.000Z
2020-03-18T20:45:22.000Z
indiepaper/controllers/indieauth.py
cleverdevil/indiepaper
1f27dc1b8a73ff0c8d3c759d3b25595da9c73d5e
[ "MIT" ]
null
null
null
from uuid import uuid4 from urllib.parse import urlencode from http import cookies from pecan import expose, redirect, request from indiepaper import indieauth class IndieAuthController(object): @expose() def index(self, me='', app=False): if not me: return 'Must specify a `me` parameter' session = request.environ['beaker.session'] session['me'] = me session['state'] = str(uuid4()) if app: session['app'] = True session.save() indieauth.request_authorization(me, session['state']) @expose() def callback(self, code=None, state=None): session = request.environ['beaker.session'] try: assert session['state'] == state except AssertionError: return 'Error: state mismatch' result = indieauth.request_token(session['me'], code) if not result: return 'Error: no token returned from token endpoint' target = 'https://www.indiepaper.io/indieauth.html?success=true' if session.get('app', False) == True: target += '&app=true' c = cookies.SimpleCookie() c['indiepaper-me'] = session['me'] c['indiepaper-me']['domain'] = '.indiepaper.io' c['indiepaper-me']['path'] = '/' c['indiepaper-token'] = result['token'] c['indiepaper-token']['domain'] = '.indiepaper.io' c['indiepaper-token']['path'] = '/' c['indiepaper-endpoint'] = result['micropub'] c['indiepaper-endpoint']['domain'] = '.indiepaper.io' c['indiepaper-endpoint']['path'] = '/' headers = [ ('Set-Cookie', morsel.OutputString()) for morsel in c.values() ] redirect(target, headers=headers)
27.921875
72
0.579743
189
1,787
5.470899
0.386243
0.095745
0.037718
0.055126
0.149903
0
0
0
0
0
0
0.00154
0.273083
1,787
63
73
28.365079
0.794457
0
0
0.088889
0
0
0.253497
0
0
0
0
0
0.044444
1
0.044444
false
0
0.111111
0
0.244444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a12534d75f997f74c0f0b7af553bab830e20701
1,097
py
Python
quant/get_big_order.py
yunfeiz/py_learnt
d4134d8e9f1caed2db2848f19474c15c1b36557e
[ "Apache-2.0" ]
null
null
null
quant/get_big_order.py
yunfeiz/py_learnt
d4134d8e9f1caed2db2848f19474c15c1b36557e
[ "Apache-2.0" ]
null
null
null
quant/get_big_order.py
yunfeiz/py_learnt
d4134d8e9f1caed2db2848f19474c15c1b36557e
[ "Apache-2.0" ]
null
null
null
#-*-coding:utf-8-*- # coding: UTF-8 """ This script parse stock info """ import pandas as pd import tushare as ts import numpy as np import matplotlib.pyplot as plt from myutils import * import sys,getopt,argparse import datetime as dt def usage(): print (sys.argv[0] + ' -i stock list file') print (sys.argv[0] + ' -h #get help info') print (sys.argv[0] + ' -t show data today') print (sys.argv[0] + ' -v threshold, for example 800') print (sys.argv[0] + ' -a check all list') print (sys.argv[0] + ' -d set date') if __name__ == '__main__': opts, args = getopt.getopt(sys.argv[1:], "t:i:o:v:d:s:a", ["help", "input=", "output="]) stock_list= 'stock.txt' date='2017-05-19' volume=400 for op, value in opts: if op == '-i': stock_list = value elif op == '-t': date=n_days_ago(float(value)) elif op == '-d': date=value elif op == '-v': volume=float(value) elif op == '-a': stock_list='all.txt' elif op == '-h': usage() sys.exit() else: usage() sys.exit() STOCK_LIST=getStockList(stock_list) analyze_big_order(STOCK_LIST,date,volume)
21.096154
89
0.626253
179
1,097
3.73743
0.458101
0.073244
0.107623
0.116592
0
0
0
0
0
0
0
0.026018
0.194166
1,097
51
90
21.509804
0.730769
0.055606
0
0.105263
0
0
0.186952
0
0
0
0
0
0
1
0.026316
false
0
0.184211
0
0.210526
0.157895
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a12e92330b0bcafe0537427e32e4ce6570ef51f
4,755
py
Python
ConvexHull/ConvexHull.py
pnu-oslab-org/Algorithms
2188f395eea4816a6423094703196349d08b7fe9
[ "MIT" ]
null
null
null
ConvexHull/ConvexHull.py
pnu-oslab-org/Algorithms
2188f395eea4816a6423094703196349d08b7fe9
[ "MIT" ]
null
null
null
ConvexHull/ConvexHull.py
pnu-oslab-org/Algorithms
2188f395eea4816a6423094703196349d08b7fe9
[ "MIT" ]
null
null
null
# Convex hull of a random set of points: import numpy as np import matplotlib.pyplot as plt # from scipy.spatial import ConvexHull FILE_NAME = "layer1" def get_degree_all(x_arr, y_arr): return np.arctan2(y_arr, x_arr) * 180 / np.pi def ccw(p1, p2, p3): x = [p1[0], p2[0], p3[0]] y = [p1[1], p2[1], p3[1]] S = ((x[1] - x[0]) * (y[2] - y[0])) - ((y[1] - y[0]) * (x[2] - x[0])) if S > 0: return 1 # counter clock-wise elif S < 0: return -1 # clock-wise else: return 0 # same def MyConvexHull(_points): convex_points = np.copy(_points) y_min_idx = 0 for idx in range(1, np.alen(convex_points)): if convex_points[idx, 1] < convex_points[y_min_idx, 1]: y_min_idx = idx elif convex_points[idx, 1] == convex_points[y_min_idx, 1]: if convex_points[idx, 0] < convex_points[idx, 0]: y_min_idx = idx for idx in range(np.alen(convex_points)): if idx != y_min_idx: convex_points[idx, 0] -= convex_points[y_min_idx, 0] convex_points[idx, 1] -= convex_points[y_min_idx, 1] convex_points[y_min_idx] = np.array([0, 0]) degrees = get_degree_all(convex_points[:, 0], convex_points[:, 1]) for idx, degree in enumerate(degrees[1:]): degrees[idx + 1] = degree point_dict = {} for idx in range(1, np.alen(convex_points)): if degrees[idx] in point_dict.keys(): x1, y1 = convex_points[idx] x2, y2 = point_dict[degrees[idx]][1] if x1 ** 2 + y1 ** 2 > x2 ** 2 + y2 ** 2: point_dict[degrees[idx]] = (idx, convex_points[idx]) else: point_dict[degrees[idx]] = (idx, np.copy(convex_points[idx])) point_list = [[degrees[0], 0, np.copy(convex_points[0])]] for degree in point_dict: point_list.append([degree, point_dict[degree][0], point_dict[degree][1]]) point_list.sort() sorted_list = [] for idx in range(np.alen(point_list)): sorted_list.append([point_list[idx][1], point_list[idx][2]]) stack = [sorted_list[0], sorted_list[1]] idx = 2 while idx < np.alen(sorted_list): if ccw(stack[-2][1], stack[-1][1], sorted_list[idx][1]) > 0: stack.append(sorted_list[idx]) idx += 1 else: stack.pop() result = np.array([], dtype=int) for idx in range(len(stack) - 1): result = np.append(result, np.array([stack[idx][0], stack[idx + 1][0]])) result = np.append(result, np.array([stack[len(stack) - 1][0], stack[0][0]])) result = result.reshape(int(np.alen(result) / 2), 2) return result idx_list = list() points = np.array([], dtype=int) inp_file = open("{}.txt".format(FILE_NAME), "r") N = int(inp_file.readline()) for i in range(N): line = inp_file.readline() points = np.append(points, np.array([int(line.split()[0]), int(line.split()[1])])) idx_list.append(i) inp_file.close() points = points.reshape(int(np.alen(points) / 2), 2) out_file = open("{}_out.txt".format(FILE_NAME), "w") while True: if np.alen(points) < 3: break # hull = ConvexHull(points) hull = MyConvexHull(points) plt.plot(points[:, 0], points[:, 1], 'o') last_simplex = None point_set = set() location_set = set() # hull = hull.simplices for simplex in hull: # left src node 번호, right tgt node 번호 plt.plot(points[simplex, 0], points[simplex, 1], 'r--', alpha=0.6) point_set.add(simplex[0]) point_set.add(simplex[1]) remove_set = set() new_list = [] for simplex in hull: new_list.append(simplex[0]) for idx in range(np.alen(points)): if idx != simplex[0] and idx != simplex[1]: if ccw(points[simplex[0]], points[idx], points[simplex[1]]) == 0: remove_set.add(idx) new_list.append(idx) i = new_list.index(min(new_list)) for _ in range(len(new_list)): out_file.write("{} ".format(idx_list[new_list[(i % len(new_list))]])) i += 1 out_file.write("\n") next_point = np.array([], dtype=int) import pprint for i in range(np.alen(points)): if np.array_equal(points[i], np.array([0, 0])): next_point = np.append(next_point, np.array(points[i])) elif not (i in point_set) and not (i in remove_set): next_point = np.append(next_point, np.array(points[i])) else: idx_list[i] = -1 next_point = next_point.reshape(int(np.alen(next_point) / 2), 2) points = np.copy(next_point) new_list = [] for value in idx_list: if value != -1: new_list.append(value) idx_list = new_list plt.show() out_file.close()
31.490066
86
0.581493
735
4,755
3.6
0.163265
0.095238
0.02381
0.029478
0.21353
0.181784
0.128118
0.09675
0.09675
0.09675
0
0.033428
0.257624
4,755
150
87
31.7
0.716147
0.040799
0
0.119658
0
0
0.00725
0
0
0
0
0
0
1
0.025641
false
0
0.025641
0.008547
0.094017
0.008547
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a13b4138dcbf110b5ce3b8aeea2a5eb173b7f39
1,266
py
Python
api/tests/integration/tests/rendering/render_attached_sgroups.py
f1nzer/Indigo
59efbd0be0b42f449f706c3a3c8d094e483e5ef4
[ "Apache-2.0" ]
null
null
null
api/tests/integration/tests/rendering/render_attached_sgroups.py
f1nzer/Indigo
59efbd0be0b42f449f706c3a3c8d094e483e5ef4
[ "Apache-2.0" ]
null
null
null
api/tests/integration/tests/rendering/render_attached_sgroups.py
f1nzer/Indigo
59efbd0be0b42f449f706c3a3c8d094e483e5ef4
[ "Apache-2.0" ]
null
null
null
import errno import os import sys sys.path.append("../../common") from env_indigo import * from rendering import * indigo = Indigo() renderer = IndigoRenderer(indigo) if not os.path.exists(joinPathPy("out", __file__)): try: os.makedirs(joinPathPy("out", __file__)) except OSError as e: if e.errno != errno.EEXIST: raise def testRenderAttachedSGroups(smiles): mol = indigo.loadMolecule(smiles) mol.layout() for atom in mol.iterateAtoms(): mol.addDataSGroup([atom.index()], [], "some", str(atom.index() + 1)) indigo.setOption("render-data-sgroup-color", 0.8, 0.2, 0.8) indigo.setOption("render-output-format", "png") renderer.renderToFile( mol, joinPathPy("out/mol-with-indices.png", __file__) ) print(checkImageSimilarity("mol-with-indices.png")) indigo.setOption("render-output-format", "svg") renderer.renderToFile( mol, joinPathPy("out/mol-with-indices.svg", __file__) ) print(checkImageSimilarity("mol-with-indices.svg")) testRenderAttachedSGroups("N1C=CC=CC1C{-}c1ncccc{+n}1") # testRenderAttachedSGroups("C{-}c1ccccc{+n}1") print("Done") if isIronPython(): renderer.Dispose() indigo.Dispose()
28.133333
77
0.650079
145
1,266
5.558621
0.475862
0.064516
0.069479
0.066998
0.312655
0.230769
0.124069
0.124069
0
0
0
0.012783
0.196682
1,266
44
78
28.772727
0.779744
0.035545
0
0.057143
0
0
0.178723
0.083404
0
0
0
0
0
1
0.028571
false
0
0.142857
0
0.171429
0.085714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a140bd3a13abf85f1c9881f4327db224e01f00f
3,706
py
Python
tools/c7n_gcp/c7n_gcp/actions/notify.py
chris-angeli-rft/cloud-custodian
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
[ "Apache-2.0" ]
2
2020-10-20T11:05:54.000Z
2021-05-09T15:24:01.000Z
tools/c7n_gcp/c7n_gcp/actions/notify.py
chris-angeli-rft/cloud-custodian
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
[ "Apache-2.0" ]
2
2018-01-20T01:36:39.000Z
2021-02-01T15:35:33.000Z
tools/c7n_gcp/c7n_gcp/actions/notify.py
chris-angeli-rft/cloud-custodian
5ff331b114a591dbaf6d672e30ceefb7ae64a5dd
[ "Apache-2.0" ]
3
2017-09-21T13:36:46.000Z
2021-09-20T16:38:29.000Z
# Copyright 2019 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from c7n.actions import BaseNotify from c7n import utils from c7n.resolver import ValuesFrom from c7n_gcp.provider import resources as gcp_resources class Notify(BaseNotify): """ :example: .. code-block:: yaml policies: - name: bad-instance-get resource: gcp.instance filters: - Name: bad-instance actions: - type: notify to: - email@address # which template for the email should we use template: policy-template transport: type: pubsub topic: projects/yourproject/topics/yourtopic """ batch_size = 1000 schema = { 'type': 'object', 'addtionalProperties': False, 'anyOf': [ {'required': ['type', 'transport', 'to']}, {'required': ['type', 'transport', 'to_from']}], 'properties': { 'type': {'enum': ['notify']}, 'to': {'type': 'array', 'items': {'type': 'string'}}, 'owner_absent_contact': {'type': 'array', 'items': {'type': 'string'}}, 'to_from': ValuesFrom.schema, 'cc': {'type': 'array', 'items': {'type': 'string'}}, 'cc_from': ValuesFrom.schema, 'cc_manager': {'type': 'boolean'}, 'from': {'type': 'string'}, 'subject': {'type': 'string'}, 'template': {'type': 'string'}, 'transport': { 'oneOf': [ {'type': 'object', 'required': ['type', 'topic'], 'properties': { 'topic': {'type': 'string'}, 'type': {'enum': ['pubsub']}, }}], }, } } schema_alias = True def process(self, resources, event=None): session = utils.local_session(self.manager.session_factory) client = session.client('pubsub', 'v1', 'projects.topics') project = session.get_default_project() message = { 'event': event, 'account_id': project, 'account': project, 'region': 'all', 'policy': self.manager.data } message['action'] = self.expand_variables(message) for batch in utils.chunks(resources, self.batch_size): message['resources'] = batch self.publish_message(message, client) # Methods to handle GCP Pub Sub topic publishing def publish_message(self, message, client): """Publish message to a GCP pub/sub topic """ return client.execute_command('publish', { 'topic': self.data['transport']['topic'], 'body': { 'messages': { 'data': self.pack(message) } } }) @classmethod def register_resource(cls, registry, resource_class): resource_class.action_registry.register('notify', Notify) gcp_resources.subscribe(Notify.register_resource)
32.79646
83
0.537777
361
3,706
5.448753
0.459834
0.035587
0.021352
0.027453
0.036604
0
0
0
0
0
0
0.00688
0.333243
3,706
112
84
33.089286
0.789154
0.302752
0
0.031746
0
0
0.19759
0
0
0
0
0
0
1
0.047619
false
0
0.063492
0
0.190476
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a1888106bb32c6a843846b74b5a3f72b1b6ef3a
3,864
py
Python
src/globus_cli/principal_resolver.py
sirosen/globus-cli
5ff39edd31e58a75cfaa5a3f41f860dbfcb52a09
[ "Apache-2.0" ]
null
null
null
src/globus_cli/principal_resolver.py
sirosen/globus-cli
5ff39edd31e58a75cfaa5a3f41f860dbfcb52a09
[ "Apache-2.0" ]
null
null
null
src/globus_cli/principal_resolver.py
sirosen/globus-cli
5ff39edd31e58a75cfaa5a3f41f860dbfcb52a09
[ "Apache-2.0" ]
null
null
null
""" A wrapper which nicely integrates SDK IdentityMaps into the CLI. This was originally implemented in gcs-cli and ported into here. A "resolver" is defined with a field it uses in each item from the response data as the full principal value. This is the "key". Typical usage should be to define a resolver in each command which needs resolution, to provide the key to use. Definition may have to happen outside of a command to accommodate field lists which are defined as constants, and this is supported. A resolver provides a field handler for principal URNs + a pagination callback to get all principals from each page of results added to the ID map. It can also be initialized to take identity IDs (not in Principal URN format). This lets us get some of the benefit of the IdentityMap bulk calls without forcing the whole paginated call to be walked at once. This also lets us keep the resolution work only in the text-mode printed output (and not applied on JSON output). """ from globus_sdk import IdentityMap from globus_cli.services.auth import get_auth_client IDENTITY_URN_PREFIX = "urn:globus:auth:identity:" class InvalidPrincipalError(ValueError): def __init__(self, value): self.value = value class PrincipalResolver: """ Everything is done lazily via properties so that nothing happens during start up. Pass ``PrincipalResolver.field`` as a field key for output printing Pass ``PrincipalResolver.page_callback`` as a page callback for output printing. Usage: >>> PrincipalResolver("urnfield") creates a resolver which pulls principal URNs from the field named "urnfield" >>> PrincipalResolver("idfield", use_urns=False) creates a resolver which pulls Identity IDs from the field named "idfield" """ def __init__(self, key, use_urns=True): self.key = key self.use_urns = use_urns self._idmap = None @property def idmap(self): if not self._idmap: self._idmap = IdentityMap(get_auth_client()) return self._idmap def _raw_id_from_object(self, obj): """ returns a pair, (original, value) can raise InvalidPrincipalError if the input is malformed """ value = obj[self.key] # if not using URNs, the "raw ID" is just the value and it "always works" if not self.use_urns: return (value, value) # otherwise, check # if it doesn't have the URN prefix, it is not a valid URN, so this lookup # failed -- the result is (False, ...) to indicate failure if not value.startswith(IDENTITY_URN_PREFIX): raise InvalidPrincipalError(value) # if it has the right prefix, left-strip the prefix as the new value, return the # original and the success indicator return (value, value[len(IDENTITY_URN_PREFIX) :]) def field(self, data): try: original, value = self._raw_id_from_object(data) except InvalidPrincipalError as err: return err.value # try to do the lookup and get the "username" property # but default to the original value if this doesn't resolve return self.idmap.get(value, {}).get("username", original) # TODO: In gcs-cli, page_callback is suported by the pretty printer. In globus-cli, # we should attach this to the PagingWrapper. The purpose is to get the map # populated on a per-page basis. def page_callback(self, data_page): for item in data_page: try: _original, value = self._raw_id_from_object(item) except InvalidPrincipalError: continue self.idmap.add(value) default_principal_resolver = PrincipalResolver("principal") default_identity_id_resolver = PrincipalResolver("identity_id", use_urns=False)
35.449541
88
0.695393
545
3,864
4.831193
0.365138
0.015951
0.01937
0.017091
0.046335
0.026586
0.026586
0.026586
0
0
0
0
0.241201
3,864
108
89
35.777778
0.898022
0.565476
0
0.052632
0
0
0.033481
0.015793
0
0
0
0.009259
0
1
0.157895
false
0
0.052632
0
0.394737
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a18ac141561dcbf69250c34c50d5274972ecdaa
10,945
py
Python
dataanalysis/window.py
Anders-Holst/Bonsai
841aa4e12c8bea8945396bd232c2006260127507
[ "MIT" ]
null
null
null
dataanalysis/window.py
Anders-Holst/Bonsai
841aa4e12c8bea8945396bd232c2006260127507
[ "MIT" ]
null
null
null
dataanalysis/window.py
Anders-Holst/Bonsai
841aa4e12c8bea8945396bd232c2006260127507
[ "MIT" ]
null
null
null
""" Copyright (C) 2018-2021 RISE Research Institute of Sweden AB File: window.py Author: anders.holst@ri.se """ # %matplotlib notebook import matplotlib.pyplot as plt import matplotlib as mpl mpl.interactive(True) mpl.rcParams['toolbar']='None' class Wind(): def __init__(self, name, width, height): self.width = width self.height = height self.fig = plt.figure(name, (width/100.0, height/100.0)) self.trans = self.fig.transFigure.inverted() self.bpfid = -1 self.brfid = -1 self.whfid = -1 self.mfid = -1 self.kfid = -1 self.buttons = {} self.scrolls = {} self.keys = {} self.drags = {} self.objs = [] self.objtps = [] self.bpressed = False def add_line(self, x1, y1, x2, y2, wdt, col): fr = self.trans.transform((x1, y1)) to = self.trans.transform((x2, y2)) obj = plt.Line2D((fr[0], to[0]), (fr[1], to[1]), linewidth=wdt*0.75, color=col) self.fig.add_artist(obj) return obj def add_rect(self, x1, y1, wdt, hgt, brd, fg, bg): fr = self.trans.transform((x1, y1)) to = self.trans.transform((x1+wdt, y1+hgt)) if bg: obj = plt.Rectangle(fr, to[0]-fr[0], to[1]-fr[1], linewidth=brd*0.75 or 0, edgecolor=fg or (0,0,0,0), facecolor=bg) else: obj = plt.Rectangle(fr, to[0]-fr[0], to[1]-fr[1], linewidth=brd*0.75 or 0, edgecolor=fg or (0,0,0,0), fill=False) self.fig.add_artist(obj) return obj def add_ellipse(self, x1, y1, wdt, hgt, brd, fg, bg, angle = 0): fr = self.trans.transform((x1, y1)) to = self.trans.transform((x1+wdt, y1+hgt)) if bg: obj = mpl.patches.Ellipse(((fr[0]+to[0])*0.5, (fr[1]+to[1])*0.5), to[0]-fr[0], to[1]-fr[1], linewidth=brd*0.75 or 0, edgecolor=fg or (0,0,0,0), facecolor=bg, angle = angle) else: obj = mpl.patches.Ellipse(fr, to[0]-fr[0], to[1]-fr[1], linewidth=brd*0.75 or 0, edgecolor=fg or (0,0,0,0), fill=False, angle = angle) self.fig.add_artist(obj) return obj def add_polygon(self, xylst, brd, fg, bg): xytr = list(map(self.trans.transform, xylst)) if bg: obj = plt.Polygon(xytr, linewidth=brd*0.75 or 0, edgecolor=fg or (0,0,0,0), facecolor=bg) else: obj = plt.Polygon(xytr, linewidth=brd*0.75 or 0, edgecolor=fg or (0,0,0,0), fill=False) self.fig.add_artist(obj) return obj def add_text(self, x1, y1, txt, fontsz = None, align = None): fr = self.trans.transform((x1, y1)) obj = plt.Text(fr[0], fr[1], txt, fontsize=fontsz or 18, ha=align or 'left') self.fig.add_artist(obj) return obj def locate_object(self, event, objs): for obj in objs: if obj.contains(event)[0]: return obj def locate_object_type(self, event, types): for obj in self.fig.artists: if True in types or type(obj) in types: if obj.contains(event)[0]: return obj def button_press_callback(self, event): obj = self.locate_object(event, self.objs) if obj is False: obj = self.locate_object_type(event, self.objtps) func = False if obj is not False: if (event.guiEvent.num, event.guiEvent.state, obj) in self.buttons: func = self.buttons[(event.guiEvent.num, event.guiEvent.state, obj)] elif (event.guiEvent.num, -1, obj) in self.buttons: func = self.buttons[(event.guiEvent.num, -1, obj)] elif (event.guiEvent.num, event.guiEvent.state, True) in self.buttons: func = self.buttons[(event.guiEvent.num, event.guiEvent.state, True)] elif (event.guiEvent.num, -1, True) in self.buttons: func = self.buttons[(event.guiEvent.num, -1, True)] if func is not False: if self.bpressed is False: if type(func)==tuple: self.bpressed = (False, func[1], func[2] if len(func)==3 else False, obj) func = func[0] func(self, event, obj) else: if (event.guiEvent.num, event.guiEvent.state) in self.buttons: func = self.buttons[(event.guiEvent.num, event.guiEvent.state)] elif (event.guiEvent.num, -1) in self.buttons: func = self.buttons[(event.guiEvent.num, -1)] if func is not False: if self.bpressed is False: if type(func)==tuple: self.bpressed = (False, func[1], func[2] if len(func)==3 else False, False) func = func[0] func(self, event) # kolla om ev över object i objs # kolla om bunden: (ev, mod, obj), (ev, -1, obj), (ev, mod, T), (ev, -1, T), (ev, mod), (ev, -1) # om release-cb, spara undan (vad göra om redan finns? ignorera press?) # kör press-cb def button_release_callback(self, event): if self.bpressed is not False: func = self.bpressed[1] obj = self.bpressed[3] self.bpressed = False if obj is False: func(self, event) else: func(self, event, obj) def button_motion_callback(self, event): if self.bpressed is not False: func = self.bpressed[2] obj = self.bpressed[3] if obj is False: func(self, event) else: func(self, event, obj) def bind_button(self, button, func, obj = False): # func called as func(win, ev) or func(win, ev, obj) if object is not False if (self.bpfid == -1): self.bpfid = self.fig.canvas.mpl_connect('button_press_event', self.button_press_callback) if (self.brfid == -1): self.brfid = self.fig.canvas.mpl_connect('button_release_event', self.button_release_callback) if (self.mfid == -1 and type(func)==tuple and len(func)==3): self.mfid = self.fig.canvas.mpl_connect('motion_notify_event', self.button_motion_callback) but = button if type(button)==tuple else (button, -1) if obj is not False: but = but + (obj,) if obj is True or type(obj)==type: if not obj in self.objtps: self.objtps.append(obj) elif not obj in self.objs: self.objs.append(obj) self.buttons[but] = func def key_press_callback(self, event): obj = self.locate_object(event, self.objs) if obj is False: obj = self.locate_object_type(event, self.objtps) func = False if obj is not False: if (event.guiEvent.keysym, event.guiEvent.state, obj) in self.keys: func = self.keys[(event.guiEvent.keysum, event.guiEvent.state, obj)] elif (event.guiEvent.keysym, -1, obj) in self.keys: func = self.keys[(event.guiEvent.keysym, -1, obj)] elif (event.guiEvent.keysym, event.guiEvent.state, True) in self.keys: func = self.keys[(event.guiEvent.keysym, event.guiEvent.state, True)] elif (event.guiEvent.keysym, -1, True) in self.keys: func = self.keys[(event.guiEvent.keysym, -1, True)] if func is not False: func(self, event, obj) else: if (event.guiEvent.keysym, event.guiEvent.state) in self.keys: func = self.keys[(event.guiEvent.keysym, event.guiEvent.state)] elif (event.guiEvent.keysym, -1) in self.keys: func = self.keys[(event.guiEvent.keysym, -1)] if func is not False: func(self, event) # kolla om ev över object i objs # kolla om bunden: (ev, mod, obj), (ev, -1, obj), (ev, mod, T), (ev, -1, T), (ev, mod), (ev, -1) # kör press-cb def bind_key(self, keysym, func, obj = False): # func called as func(win, ev) or func(win, ev, obj) if object is not False if (self.kfid == -1): self.kfid = self.fig.canvas.mpl_connect('key_press_event', self.key_press_callback) key = keysym if type(keysym)==tuple else (keysym, -1) if obj is not False: key = key + (obj,) if obj is True or type(obj)==type: if not obj in self.objtps: self.objtps.append(obj) elif not obj in self.objs: self.objs.append(obj) self.keys[key] = func def scroll_callback(self, event): obj = self.locate_object(event, self.objs) if obj is False: obj = self.locate_object_type(event, self.objtps) func = False if obj is not False: if (event.button, event.guiEvent.state, obj) in self.scrolls: func = self.scrolls[(event.button, event.guiEvent.state, obj)] elif (event.button, -1, obj) in self.scrolls: func = self.scrolls[(event.button, -1, obj)] elif (event.button, event.guiEvent.state, True) in self.scrolls: func = self.scrolls[(event.button, event.guiEvent.state, True)] elif (event.button, -1, True) in self.scrolls: func = self.scrolls[(event.button, -1, True)] elif (True, -1, obj) in self.scrolls: func = self.scrolls[(True, -1, obj)] elif (True, -1, True) in self.scrolls: func = self.scrolls[(True, -1, True)] if func is not False: func(self, event, obj) else: if (event.button, event.guiEvent.state) in self.scrolls: func = self.keys[(event.button, event.guiEvent.state)] elif (event.button, -1) in self.keys: func = self.keys[(event.button, -1)] elif (True, -1) in self.scrolls: func = self.scrolls[(True, -1)] if func is not False: func(self, event) # kolla om ev över object i objs # kolla om bunden: (ev, mod, obj), (ev, -1, obj), (ev, mod, T), (ev, -1, T), (ev, mod), (ev, -1) # kör press-cb def bind_scroll(self, dir, func, obj = False): # dir can be "up", "down", "left", "right", or True # func called as func(win, ev) or func(win, ev, obj) if object is not False if (self.whfid == -1): self.whfid = self.fig.canvas.mpl_connect('scroll_event', self.scroll_callback) dd = dir if type(dir)==tuple else (dir, -1) if obj is not False: dir = dir + (obj,) if obj is True or type(obj)==type: if not obj in self.objtps: self.objtps.append(obj) elif not obj in self.objs: self.objs.append(obj) self.keys[dd] = func
43.090551
184
0.551211
1,535
10,945
3.890554
0.109446
0.091427
0.054253
0.016075
0.737107
0.695747
0.648192
0.603818
0.551072
0.51708
0
0.024846
0.316035
10,945
253
185
43.26087
0.772909
0.08095
0
0.451923
0
0
0.009866
0
0
0
0
0
0
1
0.076923
false
0
0.009615
0
0.125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a1a13216caf5e75646057305926cf09df44d4d2
2,503
py
Python
pkgs/bokeh-0.11.1-py27_0/lib/python2.7/site-packages/bokeh/models/layouts.py
wangyum/anaconda
6e5a0dbead3327661d73a61e85414cf92aa52be6
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
pkgs/bokeh-0.11.1-py27_0/lib/python2.7/site-packages/bokeh/models/layouts.py
wangyum/anaconda
6e5a0dbead3327661d73a61e85414cf92aa52be6
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
pkgs/bokeh-0.11.1-py27_0/lib/python2.7/site-packages/bokeh/models/layouts.py
wangyum/anaconda
6e5a0dbead3327661d73a61e85414cf92aa52be6
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
""" Various kinds of layout components. """ from __future__ import absolute_import import logging logger = logging.getLogger(__name__) from ..core import validation from ..core.validation.warnings import EMPTY_LAYOUT, BOTH_CHILD_AND_ROOT from ..core.properties import abstract from ..core.properties import Int, Instance, List from .component import Component @abstract class Layout(Component): """ An abstract base class for layout components. ``Layout`` is not generally useful to instantiate on its own. """ width = Int(help=""" An optional width for the component (in pixels). """) height = Int(help=""" An optional height for the component (in pixels). """) @abstract class BaseBox(Layout): """ Abstract base class for HBox and VBox. Do not use directly. """ def __init__(self, *args, **kwargs): if len(args) > 0 and "children" in kwargs: raise ValueError("'children' keyword cannot be used with positional arguments") elif len(args) > 0: kwargs["children"] = list(args) super(BaseBox, self).__init__(**kwargs) @validation.warning(EMPTY_LAYOUT) def _check_empty_layout(self): from itertools import chain if not list(chain(self.children)): return str(self) @validation.warning(BOTH_CHILD_AND_ROOT) def _check_child_is_also_root(self): problems = [] for c in self.children: if c.document is not None and c in c.document.roots: problems.append(str(c)) if problems: return ", ".join(problems) else: return None children = List(Instance(Component), help=""" The list of children, which can be other components including layouts, widgets and plots. """) class HBox(BaseBox): """ Lay out child components in a single horizontal row. Children can be specified as positional arguments, as a single argument that is a sequence, or using the ``children`` keyword argument. """ class VBox(BaseBox): """ Lay out child components in a single vertical row. Children can be specified as positional arguments, as a single argument that is a sequence, or using the ``children`` keyword argument. """ # parent class only, you need to set the fields you want class VBoxForm(VBox): """ Basically, a VBox, where all components (generally form stuff) is wrapped in a <form> tag - important for bootstrap css """
28.443182
91
0.6664
324
2,503
5.046296
0.391975
0.019572
0.014679
0.019572
0.209174
0.18104
0.18104
0.18104
0.13578
0.13578
0
0.001053
0.240911
2,503
87
92
28.770115
0.859474
0.304435
0
0.108696
0
0
0.179673
0
0
0
0
0
0
1
0.065217
false
0
0.173913
0
0.478261
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a1fbdfe8d21910a1423347f9c1cef5c7530cf66
4,186
py
Python
audioproc/util.py
penrin/audioproc
ac3df5015d87f2a1e2a7a86ac7f5b75ae8314c03
[ "MIT" ]
2
2018-09-18T08:55:26.000Z
2020-01-24T04:31:25.000Z
audioproc/util.py
penrin/audioproc
ac3df5015d87f2a1e2a7a86ac7f5b75ae8314c03
[ "MIT" ]
null
null
null
audioproc/util.py
penrin/audioproc
ac3df5015d87f2a1e2a7a86ac7f5b75ae8314c03
[ "MIT" ]
null
null
null
# coding: utf-8 import sys import time from math import ceil import threading def progressbar(percent, end=1, bar_length=40, slug='#', space='-'): percent = percent / end # float slugs = slug * int( round( percent * bar_length ) ) spaces = space * ( bar_length - len( slugs ) ) bar = slugs + spaces sys.stdout.write("\r[{bar}] {percent:.1f}% ".format( bar=bar, percent=percent*100. )) sys.stdout.flush() if percent == 1: print() class ProgressBar(): def __init__(self, bar_length=40, slug='#', space='-', countdown=True): self.bar_length = bar_length self.slug = slug self.space = space self.countdown = countdown self.start_time = None self.start_parcent = 0 def bar(self, percent, end=1, tail=''): percent = percent / end if self.countdown == True: if percent < 1: progress = percent - self.start_parcent if self.start_time == None: self.start_time = time.perf_counter() self.start_parcent = percent remain = 'Remain --:--:--' elif progress == 0: remain = 'Remain --:--:--' elif progress != 0: elapsed_time = time.perf_counter() - self.start_time progress = percent - self.start_parcent remain_t = (elapsed_time / progress) * (1 - percent) remain_t = ceil(remain_t) h = remain_t // 3600 m = remain_t % 3600 // 60 s = remain_t % 60 remain = 'Remain %02d:%02d:%02d' % (h, m, s) else: elapsed_time = time.perf_counter() - self.start_time elapsed_time = ceil(elapsed_time) h = elapsed_time // 3600 m = elapsed_time % 3600 // 60 s = elapsed_time % 60 remain = 'Elapsed %02d:%02d:%02d' % (h, m, s) else: remain = '' len_slugs = int(percent * self.bar_length) slugs = self.slug * len_slugs spaces = self.space * (self.bar_length - len_slugs) txt = '\r[{bar}] {percent:.1%} {remain} {tail}'.format( bar=(slugs + spaces), percent=percent, remain=remain, tail=tail) if percent == 1: txt += '\n' self.start_time = None sys.stdout.write(txt) sys.stdout.flush() class ProgressBar2(ProgressBar): def __init__(self, end, bar_length=40, slug='#', space='-', countdown=True): super().__init__(bar_length, slug, space, countdown) self.counter = 0 self.end = end self.bar() def bar(self, tail=''): super().bar(self.counter, end=self.end, tail=tail) self.counter += 1 class Propeller: def __init__(self, charlist=None, sleep=0.1): if charlist is None: self.charlist = ['|', '/', '-', '\\'] self.sleep = sleep self.working = True def progress(self): N = len(self.charlist) i = 0 sys.stdout.write(' ') while self.working: sys.stdout.write('\b' + self.charlist[i]) sys.stdout.flush() time.sleep(self.sleep) i = (i + 1) % N sys.stdout.write('\b' + 'done') sys.stdout.flush() def start(self): self.working = True self.thread = threading.Thread(target=self.progress) self.thread.start() def end(self): self.working = False self.thread.join() def id(x): # 配列のメモリブロックアドレスを返す return x.__array_interface__['data'][0] if __name__ == '__main__': # progress bar demo N = 100 pg = ProgressBar2(N) for n in range(N): time.sleep(0.02) pg.bar() # propeller demo print() print('Propeller...', end='') p = Propeller() p.start() time.sleep(3) p.end()
27.181818
80
0.497611
460
4,186
4.378261
0.202174
0.044687
0.038729
0.022344
0.192651
0.12711
0.087388
0.038729
0
0
0
0.027916
0.375299
4,186
153
81
27.359477
0.742256
0.016722
0
0.2
0
0
0.044526
0
0
0
0
0
0
1
0.090909
false
0
0.036364
0.009091
0.163636
0.027273
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a245ad941d89cf0fc99cf5e10852740cea18cb7
1,999
py
Python
src/nanoemoji/write_font2png.py
rsheeter/nanoemoji
d65a6271541304db60e65accd6e81f7f5a20a381
[ "Apache-2.0" ]
3
2020-04-05T08:39:30.000Z
2020-04-05T12:51:18.000Z
src/nanoemoji/write_font2png.py
rsheeter/nanoemoji
d65a6271541304db60e65accd6e81f7f5a20a381
[ "Apache-2.0" ]
2
2020-04-16T18:50:33.000Z
2020-04-17T18:19:37.000Z
src/nanoemoji/write_font2png.py
rsheeter/nanoemoji
d65a6271541304db60e65accd6e81f7f5a20a381
[ "Apache-2.0" ]
1
2020-04-16T18:46:50.000Z
2020-04-16T18:46:50.000Z
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generate a png using Skia.""" from absl import app from absl import flags from absl import logging from nanoemoji import codepoints from nanoemoji import util import os import shutil import subprocess import sys FLAGS = flags.FLAGS flags.DEFINE_integer("height", None, "png height, pixels.") flags.DEFINE_integer("width", None, "png width, pixels.") flags.DEFINE_string("output_file", None, "Output filename.") def main(argv): src_svg = util.only(argv, lambda a: a.endswith(".svg")) font_file = util.only(argv, lambda a: a.endswith(".ttf")) text = "".join( chr(cp) for cp in codepoints.from_filename(os.path.basename(src_svg)) ) logging.info("%s %s", src_svg, text) colr_test_cmd = [ "colr_test", "--font", font_file, "--output", FLAGS.output_file, "--text", text, ] if FLAGS.height is not None: colr_test_cmd.extend(("--height", str(FLAGS.height))) if FLAGS.width is not None: colr_test_cmd.extend(("--width", str(FLAGS.width))) if not shutil.which(colr_test_cmd[0]): sys.exit( f"{colr_test_cmd[0]} binary (https://github.com/rsheeter/skia_colr/tree/colr_test) must be on PATH" ) logging.info(" ".join(colr_test_cmd)) subprocess.run(colr_test_cmd, check=True) logging.info("Wrote %s", FLAGS.output_file) if __name__ == "__main__": app.run(main)
29.397059
111
0.68034
292
1,999
4.530822
0.441781
0.054422
0.058201
0.024187
0.081633
0.081633
0.081633
0
0
0
0
0.006258
0.2006
1,999
67
112
29.835821
0.821652
0.288144
0
0
0
0.02381
0.174377
0
0
0
0
0
0
1
0.02381
false
0
0.214286
0
0.238095
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a264f5dbd9a612aaac81b724c92b68c8610cf0c
5,663
py
Python
cincan/command_log.py
cincanproject/cincan-command
b8cde81931b1c8583ac7daa1327520fb9f06856e
[ "MIT" ]
1
2022-03-11T02:37:42.000Z
2022-03-11T02:37:42.000Z
cincan/command_log.py
cincanproject/cincan-command
b8cde81931b1c8583ac7daa1327520fb9f06856e
[ "MIT" ]
null
null
null
cincan/command_log.py
cincanproject/cincan-command
b8cde81931b1c8583ac7daa1327520fb9f06856e
[ "MIT" ]
null
null
null
import hashlib import json import pathlib import string import uuid import os import getpass from datetime import datetime from typing import Optional, List, Dict, Any, Iterable JSON_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f' def quote_args(args: Iterable[str]) -> List[str]: """Quote the arguments which contain whitespaces""" r = [] for arg in args: if any(map(lambda c: c in string.whitespace, arg)): r.append(f'"{arg}"') else: r.append(arg) return r def read_with_hash(read_more, write_to: Optional = None) -> str: """Read data from stream, calculate hash, optionally write the data to stream""" md = hashlib.sha256() chunk = read_more(2048) while chunk: md.update(chunk) if write_to: write_to(chunk) chunk = read_more(2048) return md.hexdigest() class FileLog: """Command log entry for a file""" def __init__(self, path: pathlib.Path, digest: str, timestamp: Optional[datetime] = None): self.path = path self.digest = digest self.timestamp = timestamp def to_json(self) -> Dict[str, Any]: js = { 'path': self.path.as_posix() } if self.digest: js['sha256'] = self.digest if self.timestamp: js['timestamp'] = self.timestamp.strftime(JSON_TIME_FORMAT) return js @classmethod def from_json(cls, js: Dict[str, Any]) -> 'FileLog': log = FileLog(pathlib.Path(js['path']), js.get('sha256', '')) if 'timestamp' in js: log.timestamp = datetime.strptime(js['timestamp'], JSON_TIME_FORMAT) return log def __repr__(self) -> str: return json.dumps(self.to_json(), indent=4) class CommandLog: """Command log entry""" def __init__(self, command: List[str], timestamp: datetime = datetime.now()): self.command = command self.timestamp = timestamp self.exit_code = 0 self.stdin: Optional[bytes] = b'' self.stdout: Optional[bytes] = b'' self.stderr: Optional[bytes] = b'' self.in_files: List[FileLog] = [] self.out_files: List[FileLog] = [] def command_string(self) -> str: return " ".join(quote_args(self.command)) def to_json(self) -> Dict[str, Any]: js = { 'command': self.command, 'timestamp': self.timestamp.strftime(JSON_TIME_FORMAT), 'exit_code': self.exit_code, } if len(self.in_files) > 0: js['input'] = [f.to_json() for f in self.in_files] if len(self.out_files) > 0: js['output'] = [f.to_json() for f in self.out_files] return js @classmethod def from_json(cls, js: Dict[str, Any]) -> 'CommandLog': log = CommandLog(js['command'], datetime.strptime(js['timestamp'], JSON_TIME_FORMAT)) if 'input' in js: log.in_files = [FileLog.from_json(fs) for fs in js['input']] if 'output' in js: log.out_files = [FileLog.from_json(fs) for fs in js['output']] return log def __repr__(self) -> str: return json.dumps(self.to_json(), indent=4) class CommandLogBase: """Command log reader/writer base class""" def __init__(self, log_directory: Optional[pathlib.Path] = None): self.file_name_format = '%Y-%m-%d-%H-%M-%S-%f' #check if .cincan contains uuid.string, don't create new folder if(os.path.isfile(pathlib.Path.home() / '.cincan/uid.txt')): with open(pathlib.Path.home() / '.cincan/uid.txt', "r") as f: self.directoryname = f.read() else: #create uuid.object and folder and such self.directoryname = str(uuid.uuid1()) with open(pathlib.Path.home() / '.cincan/uid.txt', "w") as uid_file: uid_file.write(self.directoryname) self.log_directory = log_directory or pathlib.Path.home() / '.cincan' / 'shared' / self.directoryname /'logs' self.log_directory.mkdir(parents=True, exist_ok=True) class CommandLogWriter(CommandLogBase): """Command log writer""" def __init__(self, log_directory: Optional[pathlib.Path] = None): super().__init__(log_directory) def write(self, log: CommandLog): log_file = self.__log_file() while log_file.exists(): log_file = self.__log_file() with log_file.open("w") as f: json.dump(log.to_json(), f) def __log_file(self) -> pathlib.Path: return self.log_directory / datetime.now().strftime(self.file_name_format) class CommandLogIndex(CommandLogBase): """Command log index for reading command log""" def __init__(self, log_directory: Optional[pathlib.Path] = None): super().__init__(log_directory) log_root = self.log_directory.parent or self.log_directory # read log from all users self.array = self.__read_log(log_root) def list_entries(self, reverse: bool = False) -> Iterable[CommandLog]: return sorted(self.array, key=lambda e: e.timestamp, reverse=reverse) def __read_log(self, directory: pathlib.Path) -> List[CommandLog]: log_l = [] for file in self.log_directory.iterdir(): if file.is_dir(): # recursively go to sub d log_l.extend(self.__read_log(file)) else: with file.open('r') as f: js = json.load(f) log_l.append(CommandLog.from_json(js)) return log_l class CommandRunner: def run(self, args: List[str]) -> CommandLog: raise NotImplementedError()
33.708333
117
0.605686
734
5,663
4.495913
0.231608
0.025455
0.043636
0.025455
0.254242
0.243333
0.235152
0.173333
0.13697
0.104242
0
0.005526
0.265054
5,663
167
118
33.91018
0.787362
0.073106
0
0.224
0
0
0.048369
0
0
0
0
0
0
1
0.152
false
0.008
0.072
0.04
0.368
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a26a78aca788f06dad1ff0afc3ba57148736d10
601
py
Python
scripts/create_predicate_list.py
heidonomm/mhopRL
93db4dda4646412f1783a6e05f9b779005539a2f
[ "CECILL-B" ]
null
null
null
scripts/create_predicate_list.py
heidonomm/mhopRL
93db4dda4646412f1783a6e05f9b779005539a2f
[ "CECILL-B" ]
null
null
null
scripts/create_predicate_list.py
heidonomm/mhopRL
93db4dda4646412f1783a6e05f9b779005539a2f
[ "CECILL-B" ]
null
null
null
import json # file_to_read_from = "toy_data/verb_only/constrained_training_data.txt" # file_to_write_to = "toy_data/verb_only/constrained_predicate_list.txt" all_preds = set() with open(file_to_read_from, 'r') as in_file, open(file_to_write_to, "w") as out_file: for sample in in_file: row = json.loads(sample) for pred in row['fact1_pred']: all_preds.add(pred) for pred in row['fact2_pred']: all_preds.add(pred) preds_sorted = list(all_preds) preds_sorted.sort() for pred in preds_sorted: out_file.write(f"{pred.strip()}\n")
31.631579
86
0.682196
97
601
3.886598
0.402062
0.06366
0.071618
0.074271
0.238727
0
0
0
0
0
0
0.004184
0.204659
601
18
87
33.388889
0.784519
0.234609
0
0.153846
0
0
0.083151
0
0
0
0
0
0
1
0
false
0
0.076923
0
0.076923
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a28435672b0a18582a635521b01aabb3d461e98
22,104
py
Python
edb/lang/graphql/translator.py
1st1/edgedb
3e234aede215d4fc517be9397a25bb16e5f1ace3
[ "Apache-2.0" ]
2
2019-12-09T12:52:58.000Z
2020-02-20T15:20:22.000Z
edb/lang/graphql/translator.py
1st1/edgedb
3e234aede215d4fc517be9397a25bb16e5f1ace3
[ "Apache-2.0" ]
null
null
null
edb/lang/graphql/translator.py
1st1/edgedb
3e234aede215d4fc517be9397a25bb16e5f1ace3
[ "Apache-2.0" ]
null
null
null
# # This source file is part of the EdgeDB open source project. # # Copyright 2016-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import namedtuple from graphql import graphql as gql_proc, GraphQLString, GraphQLID import json import re from edb.lang import edgeql from edb.lang.common import ast from edb.lang.edgeql import ast as qlast from edb.lang.graphql import ast as gqlast, parser as gqlparser from edb.lang.schema import error as s_error from . import types as gt from .errors import GraphQLValidationError, GraphQLCoreError class GraphQLTranslatorContext: def __init__(self, *, schema, gqlcore, variables, operation_name, query): self.schema = schema self.variables = variables self.operation_name = operation_name self.fragments = {} self.validated_fragments = {} self.vars = {} self.fields = [] self.path = [] self.filter = None self.include_base = [False] self.gql_schema = gt.Schema(gqlcore) self.gqlcore_schema = gqlcore._gql_schema self.query = query Step = namedtuple('Step', ['name', 'type']) Field = namedtuple('Field', ['name', 'value']) class GraphQLTranslator(ast.NodeVisitor): def get_type(self, name, *, context=None): # the type may be from the EdgeDB schema or some special # GraphQL type/adapter assert isinstance(name, str) try: return self._context.gql_schema.get(name) except s_error.SchemaError: if context: raise GraphQLValidationError( f"{name!r} does not exist in the schema", context=context) raise def get_field_type(self, base, name, *, args=None, context=None): try: target = base.get_field_type(name, args) except s_error.SchemaError: if not context: raise target = None if target is None: if context: raise GraphQLValidationError( f"field {name!r} is " + f"invalid for {base.short_name}", context=context) return target def visit_Document(self, node): # we need to index all of the fragments before we process operations self._context.fragments = { f.name: f for f in node.definitions if isinstance(f, gqlast.FragmentDefinition) } gqlresult = gql_proc( self._context.gqlcore_schema, self._context.query, variable_values={ name[1:]: val for name, val in self._context.variables.items() }, operation_name=self._context.operation_name, ) if gqlresult.errors: for err in gqlresult.errors: raise GraphQLCoreError( err.message, line=err.locations[0].line, col=err.locations[0].column, ) translated = dict( d for d in self.visit(node.definitions) if d is not None) eql = next(v for v in translated.values()) for el in eql[0].result.elements: # swap in the json bits if (isinstance(el.compexpr, qlast.TypeCast) and el.compexpr.type.maintype.name == 'json'): name = el.expr.steps[0].ptr.name el.compexpr.expr.value = json.dumps( gqlresult.data[name], indent=4) return translated def visit_FragmentDefinition(self, node): # fragments are already processed, no need to do anything here return None def visit_OperationDefinition(self, node): # create a dict of variables that will be marked as # critical or not self._context.vars = { name: [val, False] for name, val in self._context.variables.items()} opname = None if (self._context.operation_name and node.name != self._context.operation_name): return None if node.type is None or node.type == 'query': stmt = self._visit_query(node) if node.name: opname = f'query {node.name}' elif node.type == 'mutation': stmt = self._visit_mutation(node) if node.name: opname = f'mutation {node.name}' else: raise ValueError(f'unsupported definition type: {node.type!r}') # produce the list of variables critical to the shape # of the query critvars = [(name, val) for name, (val, crit) in self._context.vars.items() if crit] critvars.sort() return (opname, (stmt, critvars)) def _visit_query(self, node): # populate input variables with defaults, where applicable if node.variables: self.visit(node.variables) # base Query needs to be configured specially base = self._context.gql_schema.get('Query') # special treatment of the selection_set, different from inner # recursion query = qlast.SelectQuery( result=qlast.Shape( expr=qlast.Path( steps=[qlast.ObjectRef(name='Query', module='graphql')] ), elements=[] ), ) self._context.fields.append({}) self._context.path.append([Step(None, base)]) query.result.elements = self.visit(node.selection_set) self._context.fields.pop() self._context.path.pop() return query def _visit_mutation(self, node): raise NotImplementedError def _should_include(self, directives): for directive in directives: if directive.name in ('include', 'skip'): cond = [a.value for a in directive.arguments if a.name == 'if'][0] if isinstance(cond, gqlast.Variable): var = self._context.vars[cond.value] cond = var[0] var[1] = True # mark the variable as critical else: cond = cond.value if not isinstance(cond, bool): raise GraphQLValidationError( f"'if' argument of {directive.name} " + "directive must be a Boolean", context=directive.context) if directive.name == 'include' and cond is False: return False elif directive.name == 'skip' and cond is True: return False return True def visit_VariableDefinition(self, node): variables = self._context.vars if not variables.get(node.name): if node.value is None: variables[node.name] = [None, False] else: variables[node.name] = [node.value.topython(), False] def visit_SelectionSet(self, node): elements = [] for sel in node.selections: if not self._should_include(sel.directives): continue spec = self.visit(sel) if spec is not None: elements.append(spec) elements = self.combine_field_results(elements) return elements def _is_duplicate_field(self, node): # if this field is a duplicate, that is not identical to the # original, throw an exception name = node.alias or node.name dup = self._context.fields[-1].get(name) if dup: return True else: self._context.fields[-1][name] = node return False # XXX: this might need to be trimmed def _is_top_level_field(self, node, fail=None): top = False path = self._context.path[-1] # there is different handling of top-level, built-in and inner # fields top = (len(self._context.path) == 1 and len(path) == 1 and path[0].name is None) prevt = path[-1].type target = self.get_field_type( prevt, node.name, args={ arg.name: self._get_field_arg_value(arg) for arg in node.arguments }, context=node.context) path.append(Step(name=node.name, type=target)) if not top and fail: raise GraphQLValidationError( f"field {node.name!r} can only appear at the top-level Query", context=node.context) return top def _get_field_arg_value(self, arg): if isinstance(arg.value, gqlast.Variable): return self._context.vars[arg.value.value] elif isinstance(arg.value, gqlast.InputObjectLiteral): # this value only matters for introspection, but # introspection can never have an InputObjectLiteral return {} else: return arg.value.topython() def _get_parent_and_current_type(self): path = self._context.path[-1] cur = path[-1].type if len(path) > 1: par = path[-2].type else: par = self._context.path[-2][-1].type return par, cur def _prepare_field(self, node): path = self._context.path[-1] include_base = self._context.include_base[-1] is_top = self._is_top_level_field(node) spath = self._context.path[-1] prevt, target = self._get_parent_and_current_type() # insert normal or specialized link steps = [] if include_base: base = spath[0].type steps.append(qlast.ObjectRef( module=base.module, name=base.short_name)) steps.append(qlast.Ptr( ptr=qlast.ObjectRef( name=node.name ) )) return is_top, path, prevt, target, steps def visit_Field(self, node): if self._is_duplicate_field(node): return is_top, path, prevt, target, steps = \ self._prepare_field(node) json_mode = False # determine if there needs to be extra subqueries if not prevt.dummy and target.dummy: json_mode = True # this is a special introspection type eql, shape, filterable = target.get_template() spec = qlast.ShapeElement( expr=qlast.Path( steps=[qlast.Ptr( ptr=qlast.ObjectRef( name=node.alias or node.name ) )] ), compexpr=eql, ) elif prevt.is_field_shadowed(node.name): if prevt.has_native_field(node.name) and not node.alias: spec = filterable = shape = qlast.ShapeElement( expr=qlast.Path(steps=steps), ) else: prefix = qlast.Path(steps=self.get_path_prefix(-1)) eql, shape, filterable = prevt.get_field_template( node.name, parent=prefix, has_shape=bool(node.selection_set) ) spec = qlast.ShapeElement( expr=qlast.Path( steps=[qlast.Ptr( ptr=qlast.ObjectRef( # this is already a sub-query name=node.alias or node.name ) )] ), compexpr=eql ) else: # if the parent is NOT a shadowed type, we need an explicit SELECT eql, shape, filterable = target.get_template() spec = qlast.ShapeElement( expr=qlast.Path( steps=[qlast.Ptr( ptr=qlast.ObjectRef( # this is already a sub-query name=node.alias or node.name ) )] ), compexpr=eql ) if node.selection_set is not None: if json_mode: pass else: # a single recursion target, so we can process # selection set now self._context.fields.append({}) vals = self.visit(node.selection_set) self._context.fields.pop() if shape: shape.elements = vals if filterable: where, orderby, offset, limit = \ self._visit_arguments(node.arguments) filterable.where = where filterable.orderby = orderby filterable.offset = offset filterable.limit = limit path.pop() return spec def visit_InlineFragment(self, node): self._validate_fragment_type(node, node) result = self.visit(node.selection_set) if node.on is not None: self._context.path.pop() return result def visit_FragmentSpread(self, node): frag = self._context.fragments[node.name] self._validate_fragment_type(frag, node) # in case of secondary type, recurse into a copy to avoid # memoized results selection_set = frag.selection_set result = self.visit(selection_set) self._context.path.pop() return result def _validate_fragment_type(self, frag, spread): is_specialized = False base_type = None # validate the fragment type w.r.t. the base if frag.on is None: return # validate the base if it's nested if len(self._context.path) > 0: path = self._context.path[-1] base_type = path[-1].type frag_type = self.get_type(frag.on) if base_type.issubclass(frag_type): # legal hierarchy, no change pass elif frag_type.issubclass(base_type): # specialized link, but still legal is_specialized = True else: raise GraphQLValidationError( f"{base_type.short_name} and {frag_type.short_name} " + "are not related", context=spread.context) self._context.path.append([Step(frag.on, frag_type)]) self._context.include_base.append(is_specialized) def _visit_arguments(self, arguments): where = offset = limit = None orderby = [] for arg in arguments: if arg.name == 'filter': where = self.visit(arg.value) elif arg.name == 'order': orderby = self.visit_order(arg.value) return where, orderby, offset, limit def get_path_prefix(self, end_trim=None): # flatten the path path = [step for psteps in self._context.path for step in psteps] # find the first shadowed root for i, step in enumerate(path): base = step.type if base.shadow: break # trim the rest of the path path = path[i + 1:end_trim] prefix = [ qlast.ObjectRef(module=base.module, name=base.short_name) ] prefix.extend( qlast.Ptr(ptr=qlast.ObjectRef(name=step.name)) for step in path ) return prefix def visit_ListLiteral(self, node): return qlast.Array(elements=self.visit(node.value)) def visit_InputObjectLiteral(self, node): # this represents some expression to be used in filter result = [] for field in node.value: result.append(self.visit(field)) return self._join_expressions(result) def visit_ObjectField(self, node): fname = node.name # handle boolean ops if fname == 'and': return self._visit_list_of_inputs(node.value.value, ast.ops.AND) elif fname == 'or': return self._visit_list_of_inputs(node.value.value, ast.ops.OR) elif fname == 'not': return qlast.UnaryOp(op=ast.ops.NOT, operand=self.visit(node.value)) # handle various scalar ops op = gt.GQL_TO_OPS_MAP.get(fname) if op: value = self.visit(node.value) return qlast.BinOp(left=self._context.filter, op=op, right=value) # we're at the beginning of a scalar op _, target = self._get_parent_and_current_type() name = self.get_path_prefix() name.append(qlast.Ptr(ptr=qlast.ObjectRef(name=fname))) name = qlast.Path(steps=name) # potentially need to cast the 'name' side into a <str>, so as # to be compatible with the 'value' typename = target.get_field_type(fname).short_name if (typename != 'str' and gt.EDB_TO_GQL_SCALARS_MAP[typename] in {GraphQLString, GraphQLID}): name = qlast.TypeCast( expr=name, type=qlast.TypeName(maintype=qlast.ObjectRef(name='str')), ) self._context.filter = name return self.visit(node.value) def visit_order(self, node): # if there is no specific ordering, then order by id if not node.value: return [qlast.SortExpr( path=qlast.Path( steps=[qlast.Ptr(ptr=qlast.ObjectRef(name='id'))], partial=True, ), direction=qlast.SortAsc, )] # Ordering is handled by specifying a list of special Ordering objects. # Validation is already handled by this point. orderby = [] for enum in node.value: name, direction, nulls = self._visit_order_item(enum) orderby.append(qlast.SortExpr( path=qlast.Path( steps=[qlast.Ptr(ptr=qlast.ObjectRef(name=name))], partial=True, ), direction=direction, nones_order=nulls, )) return orderby def _visit_order_item(self, node): name = node.name direction = nulls = None for part in node.value.value: if part.name == 'dir': direction = part.value.value if part.name == 'nulls': nulls = part.value.value # direction is a required field, so we can rely on it having # one of two values if direction == 'ASC': direction = qlast.SortAsc # nulls are optional, but are 'SMALLEST' by default if nulls == 'BIGGEST': nulls = qlast.NonesLast else: nulls = qlast.NonesFirst else: # DESC direction = qlast.SortDesc # nulls are optional, but are 'SMALLEST' by default if nulls == 'BIGGEST': nulls = qlast.NonesFirst else: nulls = qlast.NonesLast return name, direction, nulls def visit_Variable(self, node): return qlast.Parameter(name=node.value[1:]) def visit_Literal(self, node): return qlast.Constant(value=node.value) def _visit_list_of_inputs(self, inputs, op): result = [self.visit(node) for node in inputs] return self._join_expressions(result, op) def _join_expressions(self, exprs, op=ast.ops.AND): if not exprs: return None elif len(exprs) == 1: return exprs[0] result = qlast.BinOp( left=exprs[0], op=op, right=exprs[1] ) for expr in exprs[2:]: result = qlast.BinOp( left=result, op=op, right=expr ) return result def combine_field_results(self, results, *, flatten=True): if flatten: flattened = [] for res in results: if isinstance(res, Field): flattened.append(res) elif ast.is_container(res): flattened.extend(res) else: flattened.append(res) return flattened else: return results def translate(schema, graphql, *, variables=None, operation_name=None): if variables is None: variables = {} # HACK query = re.sub(r'@edgedb\(.*?\)', '', graphql) schema2 = gt.GQLCoreSchema(schema) parser = gqlparser.GraphQLParser() gqltree = parser.parse(graphql) context = GraphQLTranslatorContext( schema=schema, gqlcore=schema2, query=query, variables=variables, operation_name=operation_name) edge_forest_map = GraphQLTranslator(context=context).visit(gqltree) code = [] for name, (tree, critvars) in sorted(edge_forest_map.items()): if name: code.append(f'# {name}') if critvars: crit = [f'{vname}={val!r}' for vname, val in critvars] code.append(f'# critical variables: {", ".join(crit)}') code += [edgeql.generate_source(tree), ';'] return '\n'.join(code)
32.892857
79
0.548408
2,462
22,104
4.818846
0.177092
0.037087
0.017701
0.010789
0.169083
0.12264
0.107384
0.084794
0.076028
0.060182
0
0.003348
0.364821
22,104
671
80
32.941878
0.841667
0.120385
0
0.240246
0
0
0.029011
0.002271
0
0
0
0
0.002053
1
0.067762
false
0.004107
0.022587
0.008214
0.186858
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a2cc27481a689894fe33cbf5033fb30bae53046
28,555
py
Python
pykob/config.py
MorseKOB/pykob-4
bf86917e4e06ce9590f414ace0eacbde08416137
[ "MIT" ]
3
2020-06-29T19:59:39.000Z
2021-02-08T19:56:32.000Z
pykob/config.py
MorseKOB/PyKOB
bf86917e4e06ce9590f414ace0eacbde08416137
[ "MIT" ]
197
2020-04-30T08:08:52.000Z
2021-03-22T19:10:20.000Z
pykob/config.py
MorseKOB/pykob-4
bf86917e4e06ce9590f414ace0eacbde08416137
[ "MIT" ]
2
2021-04-17T01:05:24.000Z
2021-11-03T16:43:53.000Z
""" MIT License Copyright (c) 2020 PyKOB - MorseKOB in Python Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ """config module Reads configuration information for `per-machine` and `per-user` values. An example of a `per-machine` value is the KOB serial/com port (PORT). An example of a `per-user` value is the code speed (WPM). Configuration/preference values are read/written to: Windows: User: [user]\AppData\Roaming\pykob\config-[user].ini Machine: \ProgramData\pykob\config_app.ini Mac: User: ~/.pykob/config-[user].ini Machine: ~/.pykob/config_app.ini Linux: User: ~/.pykob/config-[user].ini Machine: ~/.pykob/config_app.ini The files are INI format with the values in a section named "PYKOB". """ import argparse import configparser import distutils import getpass import os import platform import pykob import socket import sys from distutils.util import strtobool from enum import Enum, IntEnum, unique from pykob import log @unique class Spacing(IntEnum): none = 0 char = 1 word = 2 @unique class CodeType(IntEnum): american = 1 international = 2 @unique class InterfaceType(IntEnum): key_sounder = 1 loop = 2 keyer = 3 # Application name __APP_NAME = "pykob" # INI Section __CONFIG_SECTION = "PYKOB" # System/Machine INI file Parameters/Keys __SERIAL_PORT_KEY = "PORT" # User INI file Parameters/Keys __AUTO_CONNECT_KEY = "AUTO_CONNECT" __CODE_TYPE_KEY = "CODE_TYPE" __INTERFACE_TYPE_KEY = "INTERFACE_TYPE" __INVERT_KEY_INPUT_KEY = "KEY_INPUT_INVERT" __LOCAL_KEY = "LOCAL" __MIN_CHAR_SPEED_KEY = "CHAR_SPEED_MIN" __REMOTE_KEY = "REMOTE" __SERVER_URL_KEY = "SERVER_URL" __SOUND_KEY = "SOUND" __SOUNDER_KEY = "SOUNDER" __SPACING_KEY = "SPACING" __STATION_KEY = "STATION" __TEXT_SPEED_KEY = "TEXT_SPEED" __WIRE_KEY = "WIRE" # Paths and Configurations app_config_dir = None app_config_file_path = None app_config = None user_config_dir = None user_config_file_path = None user_config = None # System information hostname = None os_name = None platform_name = None pyaudio_version = None pyserial_version = None python_version = None pykob_version = None system_name = None system_version = None user_home = None user_name = None # Machine/System Settings serial_port = None # User Settings auto_connect = False code_type = CodeType.american interface_type = InterfaceType.loop invert_key_input = False local = True remote = True server_url = None sound = True sounder = False spacing = Spacing.none station = None wire = 0 min_char_speed = 18 text_speed = 18 def onOffFromBool(b): """Return 'ON' if `b` is `True` and 'OFF' if `b` is `False` Parameters ---------- b : boolean The value to evaluate Return ------ 'ON' for `True`, 'OFF' for `False` """ #print(b) r = "ON" if b else "OFF" return r def noneOrValueFromStr(s): """Return `None` if `s` is '' and the string value otherwise Parameters ---------- s : str The string value to evaluate Return ------ `None` or the string value """ r = None if not s or not s.strip() or s.upper() == 'NONE' else s return r def create_config_files_if_needed(): global app_config_dir global app_config_file_path global user_config_dir global user_config_file_path # Create the files if they don't exist if not os.path.isfile(user_config_file_path): # need to create user_config_dir = os.path.split(user_config_file_path)[0] if not os.path.isdir(user_config_dir): os.makedirs(user_config_dir) f = open(user_config_file_path, 'w') f.close() if not os.path.isfile(app_config_file_path): # need to create app_config_dir = os.path.split(app_config_file_path)[0] if not os.path.isdir(app_config_dir): os.makedirs(app_config_dir) f = open(app_config_file_path, 'w') f.close() def set_auto_connect(s): """Sets the Auto Connect to wire enable state When set to `True` via a value of "TRUE"/"ON"/"YES" the application should automatically connect to the configured wire. Note that this is a 'suggestion'. It isn't used by the base pykob modules. It should be used by applications (like MKOB) to initiate a connection to the configured wire. Parameters ---------- s : str The enable/disable state to set as a string. Values of `YES`|`ON`|`TRUE` will enable auto-connect. Values of `NO`|`OFF`|`FALSE` will disable auto-connect. """ global auto_connect try: auto_connect = strtobool(str(s)) user_config.set(__CONFIG_SECTION, __AUTO_CONNECT_KEY, onOffFromBool(auto_connect)) except ValueError as ex: log.err("Auto Connect value '{}' is not a valid boolean value. Not setting value.".format(ex.args[0])) raise def set_code_type(s): """Sets the Code Type (for American or International) Parameters ---------- s : str The value `A|AMERICAN` will set the code type to 'American'. The value `I|INTERNATIONAL` will set the code type to 'International'. """ global code_type s = s.upper() if s=="A" or s=="AMERICAN": code_type = CodeType.american elif s=="I" or s=="INTERNATIONAL": code_type = CodeType.international else: msg = "TYPE value '{}' is not a valid `Code Type` value of 'AMERICAN' or 'INTERNATIONAL'.".format(s) log.err(msg) raise ValueError(msg) user_config.set(__CONFIG_SECTION, __CODE_TYPE_KEY, code_type.name.upper()) def set_interface_type(s): """Sets the Interface Type (for Key-Sounder, Loop or Keyer) Parameters ---------- s : str The value `KS|KEY_SOUNDER` will set the interface type to 'InterfaceType.key_sounder'. The value `L|LOOP` will set the interface type to 'InterfaceType.loop'. The value `K|KEYER` will set the interface type to 'InterfaceType.keyer'. """ global interface_type s = s.upper() if s=="KS" or s=="KEY_SOUNDER": interface_type = InterfaceType.key_sounder elif s=="L" or s=="LOOP": interface_type = InterfaceType.loop elif s=="K" or s=="KEYER": interface_type = InterfaceType.keyer else: msg = "TYPE value '{}' is not a valid `Interface Type` value of 'KEY_SOUNDER', 'LOOP' or 'KEYER'.".format(s) log.err(msg) raise ValueError(msg) user_config.set(__CONFIG_SECTION, __INTERFACE_TYPE_KEY, interface_type.name.upper()) def set_invert_key_input(b): """ Enable/disable key input signal (DSR) invert. When key-invert is enabled, the key input (DSR on the serial interface) is inverted (because the RS-232 logic is inverted). This is primarily used when the input is from a modem (in dial-up connection). Parameters ---------- b : string 'true/false' The enable/disable state to set as a string. Values of `YES`|`ON`|`TRUE` will enable key invert. Values of `NO`|`OFF`|`FALSE` will disable key invert. """ global invert_key_input try: invert_key_input = strtobool(str(b)) user_config.set(__CONFIG_SECTION, __INVERT_KEY_INPUT_KEY, onOffFromBool(invert_key_input)) except ValueError as ex: log.err("INVERT KEY INPUT value '{}' is not a valid boolean value. Not setting value.".format(ex.args[0])) raise def set_local(l): """Enable/disable local copy When local copy is enabled, the local sound/sounder configuration is used to locally sound the content being sent to the wire. Parameters ---------- l : str The enable/disable state to set as a string. Values of `YES`|`ON`|`TRUE` will enable local copy. Values of `NO`|`OFF`|`FALSE` will disable local copy. """ global local try: local = strtobool(str(l)) user_config.set(__CONFIG_SECTION, __LOCAL_KEY, onOffFromBool(local)) except ValueError as ex: log.err("LOCAL value '{}' is not a valid boolean value. Not setting value.".format(ex.args[0])) raise def set_remote(r): """Enable/disable remote send When remote send is enabled, the content will be sent to the wire configured. Parameters ---------- r : str The enable/disable state to set as a string. Values of `YES`|`ON`|`TRUE` will enable remote send. Values of `NO`|`OFF`|`FALSE` will disable remote send. """ global remote try: remote = strtobool(str(r)) user_config.set(__CONFIG_SECTION, __REMOTE_KEY, onOffFromBool(remote)) except ValueError as ex: log.err("REMOTE value '{}' is not a valid boolean value. Not setting value.".format(ex.args[0])) raise def set_min_char_speed(s): """Sets the minimum character speed in words per minute A difference between character speed (in WPM) and text speed (in WPM) is used to calulate a Farnsworth timing value. This is the minimum character speed. If the text speed is higher, then the character speed will be bumped up to the text speed. Parameters ---------- s : str The speed in words-per-minute as an interger string value """ global min_char_speed try: _speed = int(s) min_char_speed = _speed user_config.set(__CONFIG_SECTION, __MIN_CHAR_SPEED_KEY, str(min_char_speed)) except ValueError as ex: log.err("CHARS value '{}' is not a valid integer value. Not setting CWPM value.".format(ex.args[0])) raise def set_serial_port(p): """Sets the name/path of the serial/tty port to use for a key+sounder/loop interface Parameters ---------- p : str The 'COM' port for Windows, the 'tty' device path for Mac and Linux """ global serial_port serial_port = noneOrValueFromStr(p) app_config.set(__CONFIG_SECTION, __SERIAL_PORT_KEY, serial_port) def set_server_url(s): """Sets the KOB Server URL to connect to for wires Parameters ---------- s : str The KOB Server URL or None. Also set to None if the value is 'DEFAULT'. """ global server_url server_url = noneOrValueFromStr(s) if server_url and server_url.upper() == 'DEFAULT': server_url = None user_config.set(__CONFIG_SECTION, __SERVER_URL_KEY, server_url) def set_sound(s): """Sets the Sound/Audio enable state When set to `True` via a value of "TRUE"/"ON"/"YES" the computer audio will be used to produce sounder output. Parameters ---------- s : str The enable/disable state to set as a string. Values of `YES`|`ON`|`TRUE` will enable sound. Values of `NO`|`OFF`|`FALSE` will disable sound. """ global sound try: sound = strtobool(str(s)) user_config.set(__CONFIG_SECTION, __SOUND_KEY, onOffFromBool(sound)) except ValueError as ex: log.err("SOUND value '{}' is not a valid boolean value. Not setting value.".format(ex.args[0])) raise def set_sounder(s): """Sets the Sounder enable state When set to `True` via a value of "TRUE"/"ON"/"YES" the sounder will be driven if the `port` value is configured. Parameters ---------- s : str The enable/disable state to set as a string. Values of `YES`|`ON`|`TRUE` will enable sounder output. Values of `NO`|`OFF`|`FALSE` will disable sounder output. """ global sounder try: sounder = strtobool(str(s)) user_config.set(__CONFIG_SECTION, __SOUNDER_KEY, onOffFromBool(sounder)) except ValueError as ex: log.err("SOUNDER value '{}' is not a valid boolean value. Not setting value.".format(ex.args[0])) raise def set_spacing(s): """Sets the Spacing (for Farnsworth timing) to None (disabled) `Spacing.none`, Character `Spacing.char` or Word `Spacing.word` When set to `Spacing.none` Farnsworth spacing will not be added. When set to `Spacing.char` Farnsworth spacing will be added between characters. When set to `Spacing.word` Farnsworth spacing will be added between words. Parameters ---------- s : str The value `N|NONE` will set the spacing to `Spacing.none` (disabled). The value `C|CHAR` will set the spacing to `Spacing.char`. The value `W|WORD` will set the spacing to `Spacing.word`. """ global spacing s = s.upper() if s=="N" or s=="NONE": spacing = Spacing.none elif s=="C" or s=="CHAR" or s=="CHARACTER": spacing = Spacing.char elif s=="W" or s=="WORD": spacing = Spacing.word else: msg = "SPACING value '{}' is not a valid `Spacing` value of 'NONE', 'CHAR' or 'WORD'.".format(s) log.err(msg) raise ValueError(msg) user_config.set(__CONFIG_SECTION, __SPACING_KEY, spacing.name.upper()) def set_station(s): """Sets the Station ID to use when connecting to a wire Parameters ---------- s : str The Station ID """ global station station = noneOrValueFromStr(s) user_config.set(__CONFIG_SECTION, __STATION_KEY, station) def set_wire(w: str): """Sets the wire to connect to Parameters ---------- w : str The Wire number """ global wire try: _wire = int(w) wire = _wire user_config.set(__CONFIG_SECTION, __WIRE_KEY, str(wire)) except ValueError as ex: log.err("Wire number value '{}' is not a valid integer value.".format(ex.args[0])) raise def set_text_speed(s): """Sets the Text (code) speed in words per minute Parameters ---------- s : str The text speed in words-per-minute as an interger string value """ global text_speed try: _speed = int(s) text_speed = _speed user_config.set(__CONFIG_SECTION, __TEXT_SPEED_KEY, str(text_speed)) except ValueError as ex: log.err("Text speed value '{}' is not a valid integer value.".format(ex.args[0])) raise def print_info(): """Print system and PyKOB configuration information """ print_system_info() print_config() def print_system_info(): """Print system information """ print("User:", user_name) print("User Home Path:", user_home) print("User Configuration File:", user_config_file_path) print("App Configuration File", app_config_file_path) print("OS:", os_name) print("System:", system_name) print("Version:", system_version) print("Platform:", platform_name) print("PyKOB:", pykob_version) print("Python:", python_version) print("PyAudio:", pyaudio_version) print("PySerial:", pyserial_version) print("Host:", hostname) def print_config(): """Print the PyKOB configuration """ url = noneOrValueFromStr(server_url) url = url if url else '' print("======================================") print("Serial serial_port: '{}'".format(serial_port)) print("--------------------------------------") print("Auto Connect to Wire:", onOffFromBool(auto_connect)) print("Code type:", code_type.name.upper()) print("Interface type:", interface_type.name.upper()) print("Invert key input:", onOffFromBool(invert_key_input)) print("Local copy:", onOffFromBool(local)) print("Remote send:", onOffFromBool(remote)) print("KOB Server URL:", url) print("Sound:", onOffFromBool(sound)) print("Sounder:", onOffFromBool(sounder)) print("Spacing:", spacing.name.upper()) print("Station: '{}'".format(noneOrValueFromStr(station))) print("Wire:", wire) print("Character speed", min_char_speed) print("Words per min speed:", text_speed) def save_config(): """Save (write) the configuration values out to the user and system/machine config files. """ create_config_files_if_needed() with open(user_config_file_path, 'w') as configfile: user_config.write(configfile, space_around_delimiters=False) with open(app_config_file_path, 'w') as configfile: app_config.write(configfile, space_around_delimiters=False) def read_config(): """Read the configuration values from the user and machine config files. """ global hostname global platform_name global os_name global pykob_version global python_version global pyaudio_version global pyserial_version global system_name global system_version global app_config global app_config_file_path global user_config global user_config_file_path global user_home global user_name # global serial_port # global auto_connect global code_type global interface_type global invert_key_input global local global min_char_speed global remote global server_url global sound global sounder global spacing global station global wire global text_speed # Get the system data try: user_name = getpass.getuser() user_home = os.path.expanduser('~') os_name = os.name system_name = platform.system() system_version = platform.release() platform_name = sys.platform pykob_version = pykob.VERSION python_version = "{}.{}.{}".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro) try: import pyaudio pyaudio_version = pyaudio.__version__ # NOTE: Using '__" property - not recommended, but only way to get version except: pyaudio_version = "PyAudio is not installed or the version information is not available (check installation)" try: import serial pyserial_version = serial.VERSION except: pyserial_version = "PySerial is not installed or the version information is not available (check installation)" hostname = socket.gethostname() # User configuration file name userConfigFileName = "config-{}.ini".format(user_name) app_configFileName = "config_app.ini" # Create the user and application configuration paths if system_name == "Windows": user_config_file_path = os.path.join(os.environ["LOCALAPPDATA"], os.path.normcase(os.path.join(__APP_NAME, userConfigFileName))) app_config_file_path = os.path.join(os.environ["ProgramData"], os.path.normcase(os.path.join(__APP_NAME, app_configFileName))) elif system_name == "Linux" or system_name == "Darwin": # Linux or Mac user_config_file_path = os.path.join(user_home, os.path.normcase(os.path.join(".{}".format(__APP_NAME), userConfigFileName))) app_config_file_path = os.path.join(user_home, os.path.normcase(os.path.join(".{}".format(__APP_NAME), app_configFileName))) else: log.err("Unknown System name") exit except KeyError as ex: log.err("Key '{}' not found in environment.".format(ex.args[0])) exit create_config_files_if_needed() user_config_defaults = {\ __AUTO_CONNECT_KEY:"OFF", \ __CODE_TYPE_KEY:"AMERICAN", \ __INTERFACE_TYPE_KEY:"LOOP", \ __INVERT_KEY_INPUT_KEY:"OFF", \ __LOCAL_KEY:"ON", \ __MIN_CHAR_SPEED_KEY:"18", \ __REMOTE_KEY:"ON", \ __SERVER_URL_KEY:"NONE", \ __SOUND_KEY:"ON", \ __SOUNDER_KEY:"OFF", \ __SPACING_KEY:"NONE", \ __STATION_KEY:"", \ __WIRE_KEY:"", \ __TEXT_SPEED_KEY:"18"} app_config_defaults = {"PORT":""} user_config = configparser.ConfigParser(defaults=user_config_defaults, allow_no_value=True, default_section=__CONFIG_SECTION) app_config = configparser.ConfigParser(defaults=app_config_defaults, allow_no_value=True, default_section=__CONFIG_SECTION) user_config.read(user_config_file_path) app_config.read(app_config_file_path) try: # Get the System (App) config values serial_port = app_config.get(__CONFIG_SECTION, __SERIAL_PORT_KEY) # If there isn't a PORT value set PORT to None if not serial_port: serial_port = None # Get the User config values __option = "Auto Connect to Wire" __key = __AUTO_CONNECT_KEY auto_connect = user_config.getboolean(__CONFIG_SECTION, __key) __option = "Code type" __key = __CODE_TYPE_KEY _code_type = (user_config.get(__CONFIG_SECTION, __key)).upper() if _code_type == "AMERICAN": code_type = CodeType.american elif _code_type == "INTERNATIONAL": code_type = CodeType.international else: raise ValueError(_code_type) __option = "Interface type" __key = __INTERFACE_TYPE_KEY _interface_type = (user_config.get(__CONFIG_SECTION, __key)).upper() if _interface_type == "KEY_SOUNDER": interface_type = InterfaceType.key_sounder elif _interface_type == "LOOP": interface_type = InterfaceType.loop elif _interface_type == "KEYER": interface_type = InterfaceType.keyer else: raise ValueError(_interface_type) __option = "Invert key input" __key = __INVERT_KEY_INPUT_KEY invert_key_input = user_config.getboolean(__CONFIG_SECTION, __key) __option = "Local copy" __key = __LOCAL_KEY local = user_config.getboolean(__CONFIG_SECTION, __key) __option = "Minimum character speed" __key = __MIN_CHAR_SPEED_KEY min_char_speed = user_config.getint(__CONFIG_SECTION, __key) __option = "Remote send" __key = __REMOTE_KEY remote = user_config.getboolean(__CONFIG_SECTION, __key) __option = "Text speed" __key = __TEXT_SPEED_KEY text_speed = user_config.getint(__CONFIG_SECTION, __key) __option = "Server URL" __key = __SERVER_URL_KEY _server_url = user_config.get(__CONFIG_SECTION, __key) if (not _server_url) or (_server_url.upper() != "NONE"): server_url = _server_url __option = "Sound" __key = __SOUND_KEY sound = user_config.getboolean(__CONFIG_SECTION, __key) __option = "Sounder" __key = __SOUNDER_KEY sounder = user_config.getboolean(__CONFIG_SECTION, __key) __option = "Spacing" __key = __SPACING_KEY _spacing = (user_config.get(__CONFIG_SECTION, __key)).upper() if _spacing == "NONE": spacing = Spacing.none elif _spacing == "CHAR": spacing = Spacing.char elif _spacing == "WORD": spacing = Spacing.word else: raise ValueError(_spacing) __option = "Station" __key = __STATION_KEY _station = user_config.get(__CONFIG_SECTION, __key) if (not _station) or (_station.upper() != "NONE"): station = _station __option = "Wire" __key = __WIRE_KEY _wire = user_config.get(__CONFIG_SECTION, __key) if (_wire) or (_wire.upper() != "NONE"): try: wire = int(_wire) except ValueError as ex: # log.err("Wire number value '{}' is not a valid integer value.".format(_wire)) wire = 1 except KeyError as ex: log.err("Key '{}' not found in configuration file.".format(ex.args[0])) raise except ValueError as ex: log.err("{} option value '{}' is not a valid value. INI file key: {}.".format(__option, ex.args[0], __key)) raise # ### Mainline read_config() auto_connect_override = argparse.ArgumentParser(add_help=False) auto_connect_override.add_argument("-C", "--autoconnect", default="ON" if auto_connect else "OFF", choices=["ON", "On", "on", "YES", "Yes", "yes", "OFF", "Off", "off", "NO", "No", "no"], \ help="'ON' or 'OFF' to indicate whether an application should automatically connect to a configured wire.", \ metavar="auto-connect", dest="auto_connect") code_type_override = argparse.ArgumentParser(add_help=False) code_type_override.add_argument("-T", "--type", default=code_type.name.upper(), \ help="The code type (AMERICAN|INTERNATIONAL) to use.", metavar="code-type", dest="code_type") interface_type_override = argparse.ArgumentParser(add_help=False) interface_type_override.add_argument("-I", "--interface", default=interface_type.name.upper(), \ help="The interface type (KEY_SOUNDER|LOOP|KEYER) to use.", metavar="interface-type", dest="interface_type") invert_key_input_override = argparse.ArgumentParser(add_help=False) invert_key_input_override.add_argument("-M", "--iki", default=invert_key_input, \ help="Enable/disable inverting the key input signal (used for dial-up/modem connections).", metavar="invert-key-input", dest="invert_key_input") local_override = argparse.ArgumentParser(add_help=False) local_override.add_argument("-L", "--local", default=local, \ help="Enable/disable local copy of transmitted code.", metavar="local-copy", dest="local") min_char_speed_override = argparse.ArgumentParser(add_help=False) min_char_speed_override.add_argument("-c", "--charspeed", default=min_char_speed, type=int, \ help="The minimum character speed to use in words per minute (used for Farnsworth timing).", \ metavar="wpm", dest="min_char_speed") remote_override = argparse.ArgumentParser(add_help=False) remote_override.add_argument("-R", "--remote", default=remote, \ help="Enable/disable transmission over the internet on the specified wire.", \ metavar="remote-send", dest="remote") server_url_override = argparse.ArgumentParser(add_help=False) server_url_override.add_argument("-U", "--url", default=server_url, \ help="The KOB Server URL to use (or 'NONE' to use the default).", metavar="url", dest="server_url") serial_port_override = argparse.ArgumentParser(add_help=False) serial_port_override.add_argument("-p", "--port", default=serial_port, \ help="The name of the serial port to use (or 'NONE').", metavar="portname", dest="serial_port") sound_override = argparse.ArgumentParser(add_help=False) sound_override.add_argument("-a", "--sound", default="ON" if sound else "OFF", choices=["ON", "On", "on", "YES", "Yes", "yes", "OFF", "Off", "off", "NO", "No", "no"], \ help="'ON' or 'OFF' to indicate whether computer audio should be used to simulate a sounder.", \ metavar="sound", dest="sound") sounder_override = argparse.ArgumentParser(add_help=False) sounder_override.add_argument("-A", "--sounder", default="ON" if sounder else "OFF", choices=["ON", "On", "on", "YES", "Yes", "yes", "OFF", "Off", "off", "NO", "No", "no"], \ help="'ON' or 'OFF' to indicate whether to use sounder if `port` is configured.", \ metavar="sounder", dest="sounder") spacing_override = argparse.ArgumentParser(add_help=False) spacing_override.add_argument("-s", "--spacing", default=spacing.name.upper(), \ help="The spacing (NONE|CHAR|WORD) to use.", metavar="spacing", dest="spacing") station_override = argparse.ArgumentParser(add_help=False) station_override.add_argument("-S", "--station", default=station, \ help="The Station ID to use (or 'NONE').", metavar="station", dest="station") text_speed_override = argparse.ArgumentParser(add_help=False) text_speed_override.add_argument("-t", "--textspeed", default=text_speed, type=int, \ help="The morse text speed in words per minute.", metavar="wpm", dest="text_speed") wire_override = argparse.ArgumentParser(add_help=False) wire_override.add_argument("-W", "--wire", default=wire, \ help="The Wire to use (or 'NONE').", metavar="wire", dest="wire")
34.528416
144
0.670075
3,835
28,555
4.749674
0.102477
0.028548
0.016909
0.018117
0.390393
0.306506
0.229317
0.170629
0.133956
0.12616
0
0.001743
0.216424
28,555
826
145
34.570218
0.812335
0.239398
0
0.261663
0
0.004057
0.173361
0.006074
0
0
0
0
0
1
0.046653
false
0.004057
0.028398
0
0.10142
0.070994
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a2d6e07e5f7df04f83ffe26d83f0a0d68c1aa34
8,568
py
Python
big_query.py
EnigmaOsiris/bigQuery
6ef891a93e7c7afd436952d9668c5d815223f152
[ "MIT" ]
null
null
null
big_query.py
EnigmaOsiris/bigQuery
6ef891a93e7c7afd436952d9668c5d815223f152
[ "MIT" ]
null
null
null
big_query.py
EnigmaOsiris/bigQuery
6ef891a93e7c7afd436952d9668c5d815223f152
[ "MIT" ]
null
null
null
import uuid import time from datetime import datetime from google.cloud import bigquery import google.cloud.bigquery.job from google.cloud.bigquery.job import DestinationFormat from google.cloud import storage import os import json class BigQuery(object): """ Attributes: client (google.cloud.bigquery.Client): The BigQuery connection project (str): The Google Cloud project verbose (bool): The verbosity flag """ client = None verbose = False project = None credentials = None name = None # dataset = None # storageClient = None def __init__(self, cred_file, project, verbose=True, name='Process'): """ Create a BigQuery connection Args: project (str): The Google Cloud project to connect to Yields: google.cloud.bigquery.Client: A BigQuery connection. """ self.ticks = time.time() self.verbose = True self.project = project self.credentials = cred_file self.name = name if self.verbose: print('initializing BigQuery client...') self.client = bigquery.Client.from_service_account_json( self.credentials, project=self.project) def run_query(self, query, use_legacy_sql=False, query_id=str(uuid.uuid4()), use_query_cache=True, destination_dataset=None, destination_table=None, truncate=False): """ Run a query Args: query (str): The query to be run use_legacy_sql (bool): Is the input query written using legacy sql query_id (str): The unique ID for the query use_query_cache (bool): Should the query use cached data when available destination_dataset (str): The dataset the destination_table should be saved to. destination_table (str): If the data should be saved to a flat table, name it here. truncate (bool): Should the destination_table be truncated before writing new rows. Returns: google.cloud.bigquery.query.QueryResults: The unprocessed results of the query """ job = self.client.run_async_query(query_id, query) job.use_query_cache = use_query_cache job.use_legacy_sql = use_legacy_sql if destination_table is not None: # NOTE: This is always set to true now, why would we ever not want large results, can be parameterized job.allow_large_results = True # NOTE: This saved to the class before, is that still needed? self.dataset = self.client.dataset(destination_dataset) job.destination = self.dataset.table(destination_table) # NOTE: This might need to be parameterized job.create_disposition = ( google.cloud.bigquery.job.CreateDisposition.CREATE_IF_NEEDED) if truncate: job.write_disposition = ( google.cloud.bigquery.job.WriteDisposition.WRITE_TRUNCATE) else: job.write_disposition = ( google.cloud.bigquery.job.WriteDisposition.WRITE_APPEND) if self.verbose: print("Starting Query") job.begin() self.wait_for_job(job) results = job.query_results() return results def wait_for_job(self, job): """ Blocking poll for query status """ if self.verbose: print("Waiting for results...") while True: job.reload() # Retry the job every second until it finishes. Throw an exception if it fails. if job.state == 'DONE': if job.error_result: raise RuntimeError(job.errors) return time.sleep(1) def process_results(self, results, max_results=1000, fetch_all=True): """ Process the results from a QueryResults object Allows the results of a QueryResults object to be looped through and return the data Args: results (google.cloud.bigquery.query.QueryResults): The results of the query max_results (int): The maximum number of results to return on each page fetch_all (bool): Should all of the results across the pages be returned Returns: data (list): a list of the returned results tot (int): The number of rows processed """ # NOTE: The page token in this api version is a key generated by Google, so atm, the user cannot request a specific page of results page_token = None cols = [] data = [] time_init = None time_finish = None """print("=> Starting fetching next page : %s at %s" % (self.name, time.ctime(time.time())))""" print("=> Starting fetching next page : %s at %s" % (self.name, datetime.now())) time_init = datetime.now() rows = results.fetch_data( max_results=max_results, page_token=page_token) print((rows)) time_finish = datetime.now() delay_time = time_finish - time_init duration_in_s = delay_time.total_seconds() days = divmod(duration_in_s, 86400) hours = divmod(days[1], 3600) minutes = divmod(hours[1], 60) seconds = divmod(minutes[1], 1) print("=> Time Elpased: %s Minutes %s Seconds" % (minutes, seconds)) while fetch_all: if self.verbose: print("=> Starting fetching next page : %s at %s" % (self.name, time.ctime(time.time()))) print((rows)) row_dat = list(rows) for row in row_dat: data.append(row) # TODO: This makes a list of lists, where each inner list is a page of results... stop that! if rows.next_page_token is None: # Stop looping on the last page of results break rows = results.fetch_data( max_results=max_results, page_token=rows.next_page_token) delay_time = time_finish - time_init duration_in_s = delay_time.total_seconds() days = divmod(duration_in_s, 86400) hours = divmod(days[1], 3600) minutes = divmod(hours[1], 60) seconds = divmod(minutes[1], 1) print("=> Time Elpased: %s Minutes %s Seconds" % (minutes[0], seconds[0])) schema = results.schema for col in schema: cols.append(col.name) tot = results.total_rows if self.verbose: print("Processed " + str(tot) + " results") return data, cols, tot def export_table(self, project, dataset, table_id, bucket, filename, format="JSON"): """ Saves a table to GCS Save the data from Google BigQuery to Google Cloud Storage (GCS) Args: project (str): The GCP Project name dataset (str): The name of the dataset containing the table in question table_id (str): The table name bucket (str): The GCS bucket location (i.e. folder) filename (str): The desired filename of the output files format (str): The format of the resulting file Returns: job_id (str): The ID of the job result (str): The final state of the job """ destination_url = "gs://{}/{}".format(bucket, filename) job_id = str(uuid.uuid4()) table_ref = self.client.dataset( dataset, project=project).table(table_id) if self.verbose: print("Starting load of {} to {} as {}".format( table_id, destination_url, job_id)) extract_job = self.client.extract_table_to_storage( job_id, table_ref, destination_url) extract_job.destination_format = "NEWLINE_DELIMITED_JSON" extract_job._build_resource() extract_job.begin() result = extract_job.result().state if self.verbose: print("Job {} is finished with a status of {}".format( job_id, result)) return job_id, result if __name__ == '__main__': print("hello")
39.123288
139
0.578315
1,011
8,568
4.765579
0.238378
0.031963
0.035492
0.026152
0.206725
0.186384
0.157327
0.157327
0.157327
0.13159
0
0.006906
0.34092
8,568
218
140
39.302752
0.84629
0.292834
0
0.182482
0
0
0.067748
0.003964
0
0
0
0.004587
0
1
0.036496
false
0
0.065693
0
0.175182
0.094891
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a3006539f6664524954b2a453831dc51fd37e12
955
py
Python
setup.py
brettlangdon/virtualmod
c3b5e64431a0ebb1759c2123698f542df3e765e3
[ "MIT" ]
1
2020-05-03T14:58:16.000Z
2020-05-03T14:58:16.000Z
setup.py
brettlangdon/virtualmod
c3b5e64431a0ebb1759c2123698f542df3e765e3
[ "MIT" ]
null
null
null
setup.py
brettlangdon/virtualmod
c3b5e64431a0ebb1759c2123698f542df3e765e3
[ "MIT" ]
null
null
null
""" virtualmod ========== """ from setuptools import setup def get_long_description(): with open('README.md') as f: rv = f.read() return rv setup( name='virtualmod', version='1.0.0', url='https://github.com/brettlangdon/virtualmod', license='MIT', author='Brett Langdon', author_email='me@brett.is', description='Python package for creating and importing virtual modules.', long_description=get_long_description(), long_description_content_type='text/markdown', py_modules=['virtualmod'], zip_safe=False, include_package_data=True, platforms='any', install_requires=[], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Programming Language :: Python', ] )
25.131579
77
0.638743
102
955
5.843137
0.745098
0.100671
0.060403
0
0
0
0
0
0
0
0
0.006702
0.218848
955
37
78
25.810811
0.792225
0.02199
0
0
0
0
0.419006
0
0
0
0
0
0
1
0.034483
false
0
0.068966
0
0.137931
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a3050f74ef9cdd8771747e7703b9bcca9f3a6f8
15,242
py
Python
model/world.py
mjsottile/pastoralscape
e2ec11d0931b10824edcdef8bd4aa7beb4dee024
[ "MIT" ]
null
null
null
model/world.py
mjsottile/pastoralscape
e2ec11d0931b10824edcdef8bd4aa7beb4dee024
[ "MIT" ]
null
null
null
model/world.py
mjsottile/pastoralscape
e2ec11d0931b10824edcdef8bd4aa7beb4dee024
[ "MIT" ]
null
null
null
########################################################################### # MIT License # # Copyright (c) 2020 Matthew Sottile # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ########################################################################### import sys import math import geopy.distance import numpy as np import model.agents as A # {{{ path class Path: """ A path defines an ordered sequence of cell IDs that represent a path that originates and terminates at a specific cell ID. """ def __init__(self, waypoints): self.waypoints = waypoints def nextstep(self, stepid): """ Given a step in the list of waypoints, return the step number for the next step and the cellID of that step. We assume the path is a cycle, so when we hit the end of the path assume that we've returned to the beginning. """ if stepid == len(self.waypoints)-1: return (0, self.waypoints[0]) return (stepid+1, self.waypoints[stepid+1]) # }}} # {{{ grid space class GridSpace: """ A grid space in the world has some vegetation state as well as presence of water. """ # {{{ constructor def __init__(self, params): # NDVI ranges from -1.0 to 1.0 self.mean_ndvi = 0.0 self.mean_ndvi_alltime = 0.0 self.params = params self.location = None self.has_water = False self.veg_capacity = None # }}} # {{{ forage def forage(self, num_animals, dt): # units of livestock.eat : m^2 # area of cell : 1km^2 = 1000*1000 m^2 cell_area = 1e6 food_required = num_animals * self.params['livestock']['eat'] * dt frac_avail = min((cell_area * self.veg_capacity)/food_required, 1.0) food_obtained = food_required * frac_avail return food_obtained # }}} # {{{ step def step(self, time): pass # }}} # }}} # {{{ Village class Village(GridSpace): """ A village is a grid space that holds a set of individuals that do not move. A village also has a set of paths associated with it. Individuals who call the village home will follow one of these paths when they decide to move. """ # {{{ __init__ constructor def __init__(self, params): super().__init__(params) self.fixed_individuals = [] self.paths = [] # }}} # {{{ add_path def add_path(self, path): """ Add a path defining a sequence of waypoints to this village. """ self.paths.append(path) # }}} # {{{ get_path def get_path(self): """ Select a path at random. If no paths present, return None. """ if len(self.paths) < 1: return None path_num = np.random.randint(len(self.paths)) return self.paths[path_num] # }}} # {{{ add_individual def add_individual(self, i): """ Add an individual agent to the set of agents that are fixed at this cell. Example: heads of household. """ self.fixed_individuals.append(i) # }}} def popsize(self): return len(self.fixed_individuals) def get_individual(self, i): return self.fixed_individuals[i] # }}} # {{{ World class World: # {{{ __init__ constructor def __init__(self, model_state, w, h, d): """ create a 2d world of width w and height h kilometers. d is a default cell object constructor. this may be a lambda like: d = lambda x : GridSpace(0.0, 100.0, {'eat':1.0, 'constant_growth'=0.0, 'precip_growth'=0.1}) indexing of world cells is (lat,lon) """ self.width = w self.height = h self.model_state = model_state # GIS data store self.gis = model_state.gis # grid cell is a pair: # - a list of agents residing there # - a grid cell object self.grid = np.empty((h, w), dtype=object) for i in range(h): for j in range(w): self.grid[i, j] = ([], d((i, j))) self.grid[i, j][1].location = (i, j) # diseases are propagated by the world, so we must track them self.diseases = {} # GIS related state variables self.id_to_index = {} self.mean_fci = None self.neighbor_cache = {} # extra fields to help with efficient nearest cell lookup self.first_dim_lat = False self.lat_boundaries = None self.lon_boundaries = None self.live_cells = None # }}} # {{{ nearest_cell def nearest_cell(self, latlon): """ Find the nearest cell to a given latitude and longitude by bisection search. Two assumptions: - Latitude and longitude increase as their respective cell indices increase. (e.g., cell[i,j] < cell[i,j] w.r.t. lat or lon). - For a given column or row of cells, either the latitude or longitude remains fixed. This restricts us to grids aligned with the latitude/longitude lines. Bisection search for an m by n grid will require O(log n) + O(log m) time, versus the O(n * m) time required for brute force search through all cells. This results in a substantial speedup. """ (lat, lon) = latlon # first find lat cell coordinate n = len(self.lat_boundaries) lo = 0 hi = n-1 lat_idx = n//2 while hi-lo > 1: if lat < self.lat_boundaries[lat_idx]: hi = lat_idx else: lo = lat_idx lat_idx = (hi-lo)//2 + lo # then find lon cell coordinate n = len(self.lon_boundaries) lo = 0 hi = n-1 lon_idx = n//2 while hi-lo > 1: if lon < self.lon_boundaries[lon_idx]: hi = lon_idx else: lo = lon_idx lon_idx = (hi-lo)//2 + lo if self.first_dim_lat: return (lat_idx, lon_idx) else: return (lon_idx, lat_idx) # }}} # {{{ add_disease def add_disease(self, disease): """ Add a disease to the set that the world steps. """ self.diseases[disease.name] = disease # }}} # {{{ set_cell def set_cell(self, position, cell_obj): """ Set the cell object for a grid space to the given object. The agent set at the cell is reset to empty. The location attribute of the cell object is set to the position. """ self.grid[position] = ([], cell_obj) self.grid[position][1].location = position # }}} # {{{ move def move(self, agent, position): """ Move the agent a from its current location to the coordinate (i,j). """ # where is agent now? old_position = agent.location # remove agent from current location resident set (residents, _) = self.grid[old_position] residents.remove(agent) if len(residents) == 0: self.live_cells.remove(old_position) # add agent to new location resident set (residents, _) = self.grid[position] residents.append(agent) if position not in self.live_cells: self.live_cells.add(position) # set agent location to new coordinates agent.location = position # }}} # {{{ neighborhood def neighborhood(self, position, r): """ Find coordinates for all cells around position that are within the radius r. Return a list of (coordinate, distance) pairs. """ if position not in self.neighbor_cache: neighbors = [] for i in np.arange(max(0, position[0]-r), min(self.height, position[0]+r)): for j in np.arange(max(0, position[1]-r), min(self.width, position[1]+r)): # ignore (i,j) == pos if (i != position[0]) or (j != position[1]): d = math.hypot(position[0]-i, position[1]-j) #d = U.dist(pos, (i,j)) if d <= r: neighbors.append(((int(i), int(j)), d)) self.neighbor_cache[position] = neighbors else: neighbors = self.neighbor_cache[position] return neighbors # }}} # {{{ place_agent def place_agent(self, agent, position): """ Place an agent at a position. This is only used during initialization and does not update the live cell set. """ agent.location = position (residents, _) = self.grid[position] residents.append(agent) # }}} # {{{ update_vegetation def update_vegetation(self, params, date): # load monthly GIS data gis_data = self.gis.get_date(date.year, date.month) # map GIS data onto cells, calculate mean NDVI for this time period over # the world. self.world_mean_ndvi = 0.0 for cell_id in gis_data: row = gis_data[cell_id] self.grid[self.id_to_index[cell_id]][1].mean_ndvi = row['mean_ndvi'] self.world_mean_ndvi += row['mean_ndvi'] self.grid[self.id_to_index[cell_id]][1].mean_precip = row['mean_precip'] self.world_mean_ndvi = self.world_mean_ndvi / (self.width * self.height) # get FCI for current month and update veg_capacity fci_data = self.gis.get_fci_month(date.year, date.month) if fci_data is not None: for i in np.arange(self.height): for j in np.arange(self.width): cellobj = self.grid[i, j][1] cellobj.veg_capacity = fci_data[cellobj.cell_id] / self.gis.grid_fci_averages[cellobj.cell_id] # }}} # {{{ update_gis def update_gis(self, params, date): """ Update all GIS-driven state for the given date and parameter set. """ self.update_vegetation(params, date) # }}} def step(self, params, time): """ Handler for the worldstep event type. This causes all cells that contain agents to step forward with respect to foraging and disease propagation. """ # calculate the time since the last step occurred dt = time.current_step_duration() if dt==0: print(f'Already stepped! {time.current_time}') exit() return # if the live cell set has not yet been created, populate it via a traversal # of the world. after this initial creation it is maintained by the move() # function. # TODO: possibly handle this in place_agent if we know that agents are ONLY # ever placed in the grid by either place_agent or move calls. if self.live_cells is None: self.live_cells = set([]) for i in np.arange(self.height): for j in np.arange(self.width): (agents, _) = self.grid[i, j] if len(agents) > 0: self.live_cells.add((i, j)) for (i, j) in self.live_cells: # get the agents in each cell and the cell object (agents, cell_obj) = self.grid[i, j] # collect all of the herds colocated here herds = [] for agent in agents: if isinstance(agent, A.Herdsman): herds.append(agent.herd) # if we have at least one herd here, eat and propagate diseases if len(herds) > 0: n_animals = sum([herd.size() for herd in herds]) # make sure we don't just have empty herds if n_animals > 0: # foraging food_available = cell_obj.forage(n_animals, dt) for herd in herds: # distribute food proportionally frac_food = (herd.size()/n_animals) * food_available herd.feed(frac_food, dt) # disease spread for d in self.diseases: self.diseases[d].step(herds, time) # Record the time now to calculate the duration of time until the # next step time.last_timestep = time.current_time ### NOTE: this is equivalent to the code above except in the order by which ### cells with agents are visited due to the order that the set iterator ### traverses the set of live cells. Keep this in mind when comparing the ### two. def old_step(self, params, time): dt = time.current_step_duration() if dt==0: print(f'Already stepped! {time.current_time}') exit() return # iterate over all grid cells for i in np.arange(self.height): for j in np.arange(self.width): # get the agents in each cell and the cell object (agents, cell_obj) = self.grid[i, j] # collect all of the herds colocated here herds = [] for agent in agents: if isinstance(agent, A.Herdsman): herds.append(agent.herd) # if we have at least one herd here, eat and propagate diseases if len(herds) > 0: # feeding n_animals = sum([herd.size() for herd in herds]) # make sure we don't just have empty herds if n_animals > 0: food_available = cell_obj.forage(n_animals, dt) for herd in herds: # distribute food proportionally frac_food = (herd.size()/n_animals) * food_available herd.feed(frac_food, dt) # disease spread for d in self.diseases: self.diseases[d].step(herds, time) time.last_timestep = time.current_time # }}}
35.200924
114
0.563574
1,977
15,242
4.246839
0.2261
0.003573
0.012387
0.007146
0.231896
0.204383
0.173059
0.162339
0.15829
0.15829
0
0.008128
0.338079
15,242
432
115
35.282407
0.824066
0.381446
0
0.331606
0
0
0.013049
0
0
0
0
0.002315
0
1
0.11399
false
0.005181
0.025907
0.010363
0.222798
0.010363
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a32bd2e273f336799741d7d7bf3557755f80170
3,069
py
Python
data-hub-api/apps/companieshouse/sources/matcher.py
uktrade/data-hub-api-old
5ecf093d88692870982a638ced45de6a82d55672
[ "MIT" ]
null
null
null
data-hub-api/apps/companieshouse/sources/matcher.py
uktrade/data-hub-api-old
5ecf093d88692870982a638ced45de6a82d55672
[ "MIT" ]
18
2016-04-04T12:42:45.000Z
2016-09-01T07:21:05.000Z
data-hub-api/apps/companieshouse/sources/matcher.py
uktrade/data-hub-api-old
5ecf093d88692870982a638ced45de6a82d55672
[ "MIT" ]
1
2016-06-01T15:45:21.000Z
2016-06-01T15:45:21.000Z
from django.utils.module_loading import import_string from django.utils.functional import cached_property from django.conf import settings from collections import namedtuple from .similarity import SimilarityCalculator FindingResult = namedtuple( 'FindingResult', ['company_number', 'name', 'postcode', 'proximity', 'raw'] ) class BaseMatcher(object): """ Base Matcher class to be subclassed to add actual logic. e.g. matcher = MyMatcher(name, postcode) best_match = matcher.find() # returns the best match, an instance of FindingResult matcher.findings # if you want the full list considered internally for debug purposes """ def __init__(self, name, postcode): super(BaseMatcher, self).__init__() self.name = name self.postcode = postcode self.findings = None def _get_similarity_proximity(self, other_name, other_postcode): similarity_calc = SimilarityCalculator() similarity_calc.analyse_names(self.name, other_name) similarity_calc.analyse_postcodes(self.postcode, other_postcode) return similarity_calc.get_proximity() def _choose_best_finding(self): assert self.findings is not None if not self.findings: return None return max(self.findings, key=lambda x: x.proximity) def _get_ch_postcode(self, ch_data): for prop in ['registered_office_address', 'address']: if prop in ch_data: return ch_data.get(prop, {}).get('postal_code') return None def _build_findings(self): """ To be overridden when subclassing. """ pass def find(self): """ Builds the findings and returns the best match which is an instance of FindingResult. """ self._build_findings() return self._choose_best_finding() class MatcherHelper(object): """ Helper that can be used to get the Companies House matches. """ @cached_property def matcher_classes(self): _matcher_classes = [] for matcher_path in settings.MATCHER_CLASSES: _matcher_classes.append(import_string(matcher_path)) return _matcher_classes def find_match(self, company_name, company_postcode): """ Returns the first match (not the best one as that can be time/computational expensive) if found or None otherwise. The match is of type `FindingResult`. It goes through the list of MATCHER_CLASSES in order and uses the matcher classes one by one until one acceptable match is found. The acceptance level can be set using the settings `MATCHER_ACCEPTANCE_PROXIMITY`. """ for matcher_class in self.matcher_classes: matcher = matcher_class(company_name, company_postcode) best_match = matcher.find() if best_match and best_match.proximity >= settings.MATCHER_ACCEPTANCE_PROXIMITY: return best_match return None matcher_helper = MatcherHelper()
33
111
0.676442
368
3,069
5.434783
0.345109
0.056
0.015
0.024
0.028
0
0
0
0
0
0
0
0.252851
3,069
92
112
33.358696
0.87222
0.270446
0
0.06
0
0
0.044955
0.011956
0
0
0
0
0.02
1
0.16
false
0.02
0.12
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a32d2b8b29bbdd6dc04d32db29e512a25461d5b
375
py
Python
Python/[7 kyu] sliding puzzle verification.py
KonstantinosAng/CodeWars
9ec9da9ed95b47b9656a5ecf77f486230fd15e3a
[ "MIT" ]
null
null
null
Python/[7 kyu] sliding puzzle verification.py
KonstantinosAng/CodeWars
9ec9da9ed95b47b9656a5ecf77f486230fd15e3a
[ "MIT" ]
null
null
null
Python/[7 kyu] sliding puzzle verification.py
KonstantinosAng/CodeWars
9ec9da9ed95b47b9656a5ecf77f486230fd15e3a
[ "MIT" ]
null
null
null
# see https://www.codewars.com/kata/5e28b3ff0acfbb001f348ccc/solutions/python from TestFunction import Test def is_solved(board): array = [] for row in board: for col in row: array.append(col) return array == [x for x in range(len(board)*len(board))] Test(True).assert_result(is_solved([[0,1],[2,3]])) Test(False).assert_result(is_solved([[1,0],[3,2]]))
26.785714
77
0.690667
60
375
4.233333
0.583333
0.094488
0.110236
0.15748
0
0
0
0
0
0
0
0.058824
0.138667
375
14
78
26.785714
0.727554
0.2
0
0
0
0
0
0
0
0
0
0
0.222222
1
0.111111
false
0
0.111111
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a3328a42245519fb9c9d550950b1f37a248bf94
664
py
Python
main.py
Sengolda/multi-dicts
fee5d038af93a9e59d84621f136723c0095a9905
[ "BSD-2-Clause" ]
null
null
null
main.py
Sengolda/multi-dicts
fee5d038af93a9e59d84621f136723c0095a9905
[ "BSD-2-Clause" ]
null
null
null
main.py
Sengolda/multi-dicts
fee5d038af93a9e59d84621f136723c0095a9905
[ "BSD-2-Clause" ]
null
null
null
class MultiDicts(list): def __init__(self, per_dict: int): self.per_dict = per_dict super().__init__() def append(self, __object) -> None: if not isinstance(__object, dict): raise RuntimeError("Objects appended to a MuliDicts list must be a dict.") return super().append(__object) def sort_dicts(self): for d in self: if len(d.keys()) >= self.per_dict: for k, elem in d.items(): _key = k del d[_key] new = dict() new[k] = elem self.append(new) break
31.619048
86
0.489458
77
664
3.948052
0.519481
0.092105
0.108553
0
0
0
0
0
0
0
0
0
0.412651
664
20
87
33.2
0.779487
0
0
0
0
0
0.078313
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.277778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a336fe69ab1c2a185a2811406696a821b1c5698
7,071
py
Python
src/data_conversion.py
DIAGNijmegen/adhesion_detection
21a9c810a4dee3c640d31f30ee5fdff1bbce9146
[ "Apache-2.0" ]
2
2021-10-08T13:14:49.000Z
2022-03-18T17:53:45.000Z
src/data_conversion.py
DIAGNijmegen/adhesion_detection
21a9c810a4dee3c640d31f30ee5fdff1bbce9146
[ "Apache-2.0" ]
6
2021-10-12T20:55:53.000Z
2021-10-12T21:03:45.000Z
src/data_conversion.py
DIAGNijmegen/adhesion_detection
21a9c810a4dee3c640d31f30ee5fdff1bbce9146
[ "Apache-2.0" ]
null
null
null
#!/usr/local/bin/python3 import sys import random import argparse from pathlib import Path import numpy as np from skimage import io import SimpleITK as sitk from cinemri.utils import get_patients def convert_2d_image_file_to_pseudo_3d(input_file_path, spacing=[999, 1, 1], is_seg=False): """Reads an image (must be .npy or fromat recognized by skimage) and converts it into a series of niftis. The input image should be grayscalse. Parameters ---------- input_file_path : Path A path to image to convert spacing : list, default=[999, 1, 1] is_seg : bool, default=False Indicates if the specified image is a segmentation mask Returns ------- SimpleITK Image An image converted to pseudo 2d format suitable for nnU-Net """ img = np.load(input_file_path) if input_file_path.suffix == ".npy" else io.imread(input_file_path) return convert_2d_image_to_pseudo_3d(img, spacing, is_seg) def convert_2d_image_to_pseudo_3d(image, spacing=[999, 1, 1], is_seg=False): """ Taken from https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/utilities/file_conversions.py and slightly modified Converts an image into a series of niftis. The image should be grayscalse !!!2D images are often natural images which do not have a voxel spacing that could be used for resampling. These images must be resampled by you prior to converting them to nifti!!! Datasets converted with this utility can only be used with the 2d U-Net configuration of nnU-Net Segmentations will be converted to np.uint32! Parameters ---------- image : ndarray An image to convert spacing : list, default=[999, 1, 1] is_seg : bool, default=False Indicates if the specified image is a segmentation mask Returns ------- SimpleITK Image An image converted to pseudo 2d format suitable for nnU-Net """ assert len(image.shape) == 2, 'images should be grayscalse' image = image[None] # add dimension if is_seg: image = image.astype(np.uint32) itk_image = sitk.GetImageFromArray(image) itk_image.SetSpacing(spacing[::-1]) return itk_image def subset_to_diag_nnunet(patients, segmentation_path, target_path, images_folder="images", masks_folder="masks", is_train=True): """Saves images an masks for training or test subset of patients Parameters ---------- patients : list of Patients A list of patients in the subset segmentation_path : Path A path to the segmentation dataset target_path : Path A path to save the subset images_folder : str, default="images" An images folder name masks_folder : str, default="masks" A masks folder name is_train : bool, default=True A boolean flag indicating if it is a training subset """ # Create folders of the subset target_path.mkdir(exist_ok=True) train_path_images = target_path / images_folder train_path_images.mkdir(exist_ok=True) train_path_masks = target_path / masks_folder train_path_masks.mkdir(exist_ok=True) # Extract and save files related to the specified patients list for patient in patients: for slice in patient.cinemri_slices: extension = ".mha" if is_train else ".nii.gz" image_id = slice.full_id if is_train else (slice.full_id + "_0000") image_stem = train_path_images / image_id slice_image_path = slice.build_path(segmentation_path / images_folder, extension=".npy") img_pseudo_3d = convert_2d_image_file_to_pseudo_3d(slice_image_path) sitk.WriteImage(img_pseudo_3d, str(image_stem) + extension) mask_stem = train_path_masks / slice.full_id slice_mask_path = slice.build_path(segmentation_path / masks_folder, extension=".npy") mask_pseudo_3d = convert_2d_image_file_to_pseudo_3d(slice_mask_path, is_seg=True) sitk.WriteImage(mask_pseudo_3d, str(mask_stem) + extension) def convert_to_diag_nnunet(segmentation_path, target_path, train_folder="train", images_folder="images", masks_folder="masks"): """Converts the segmentation data subset to a diag nnU-Net input format This format is expected by prepare method of a diag nnU-Net that converts it to the nnU-Net input format Parameters ---------- segmentation_path : Path A path to a segmentation subset of cine-MRI data target_path : Path A destination path to save converted files images_folder : str, default="images" A name of a folder that contains scans inside the archive masks_folder : str, default="masks" A name of a folder that contains masks inside the archive train_folder : str, default="train" A name of a folder with training data """ # Make directories to save converted images target_path.mkdir(exist_ok=True) patients = get_patients(segmentation_path / images_folder, slice_extension=".npy") # Convert training data subset_to_diag_nnunet(patients, segmentation_path, target_path / train_folder, images_folder, masks_folder) def to_diag_nnunet(argv): """A command line wrapper of convert_to_diag_nnunet Parameters ---------- argv : list of str """ parser = argparse.ArgumentParser() parser.add_argument('segmentation_path', type=str, help="path to a segmentation subset of cine-MRI data") parser.add_argument('target_path', type=str, help="a destination path to save converted files") parser.add_argument('--images', type=str, default="images", help="a folder inside the archive, which contains scans") parser.add_argument('--masks', type=str, default="masks", help="a folder inside the archive, which contains masks") parser.add_argument('--train', type=str, default="train", help="a name of a folder with training data") args = parser.parse_args(argv) segmentation_path = Path(args.segmentation_path) target_path = Path(args.target_path) images_folder = args.images masks_folder = args.masks train_folder = args.train convert_to_diag_nnunet(segmentation_path, target_path, train_folder, images_folder, masks_folder) if __name__ == '__main__': np.random.seed(99) random.seed(99) # Very first argument determines action actions = { "to_diag_nnunet": to_diag_nnunet } try: action = actions[sys.argv[1]] except (IndexError, KeyError): print('Usage: data_conversion ' + '/'.join(actions.keys()) + ' ...') else: action(sys.argv[2:])
35.00495
123
0.657757
938
7,071
4.760128
0.230277
0.029115
0.021501
0.029115
0.362374
0.343113
0.250168
0.206271
0.174916
0.140426
0
0.010547
0.262481
7,071
201
124
35.179104
0.845638
0.373356
0
0.154762
0
0
0.102955
0
0
0
0
0
0.011905
1
0.059524
false
0
0.095238
0
0.178571
0.011905
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a34d6b8e1e6bcba3c0ecf9d4504a85ed39f6569
1,013
py
Python
cogs/other/msgs.py
Iapetus-11/Villager-Bot-OLD
f220b76d2052810deb87fbacbe5400f8d2f94dd2
[ "MIT" ]
2
2021-06-03T02:31:32.000Z
2021-06-03T04:05:10.000Z
cogs/other/msgs.py
Iapetus-11/Villager-Bot-OLD
f220b76d2052810deb87fbacbe5400f8d2f94dd2
[ "MIT" ]
null
null
null
cogs/other/msgs.py
Iapetus-11/Villager-Bot-OLD
f220b76d2052810deb87fbacbe5400f8d2f94dd2
[ "MIT" ]
null
null
null
from discord.ext import commands import discord from random import choice class Msgs(commands.Cog): def __init__(self, bot): self.bot = bot self.db = self.bot.get_cog("Database") self.g = self.bot.get_cog("Global") @commands.Cog.listener() async def on_message(self, message): self.g.msg_count += 1 await self.db.incrementVaultMax(message.author.id) # Only replies handling past this point if message.author.bot: return if "emerald" in message.content.lower() or "villager bot" in message.content.lower() or self.bot.user.mentioned_in(message): if message.guild is None or await self.db.getDoReplies(message.guild.id): try: await message.channel.send(choice(["hrmm", "hmm", "hrmmm", "hrghhmmm", "hrhhmmmmmmmmm", "hrmmmmmm", "hrmmmmmmmmmm", "hrmmmmm"])) except discord.errors.Forbidden: pass def setup(bot): bot.add_cog(Msgs(bot))
32.677419
148
0.624877
128
1,013
4.867188
0.523438
0.05618
0.032103
0.041734
0.073836
0
0
0
0
0
0
0.00133
0.257651
1,013
30
149
33.766667
0.827128
0.036525
0
0
0
0
0.095483
0
0
0
0
0
0
1
0.090909
false
0.045455
0.136364
0
0.318182
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a3700fabdd0687815cd55f69dfb8b0e7524f63a
1,962
py
Python
src/component.py
jordanrburger/keboola.app-hightouch
840cd513c463719f5caf2e01b8cd8a98ddacd7a1
[ "MIT" ]
1
2022-03-09T22:00:52.000Z
2022-03-09T22:00:52.000Z
src/component.py
jordanrburger/keboola.app-hightouch
840cd513c463719f5caf2e01b8cd8a98ddacd7a1
[ "MIT" ]
null
null
null
src/component.py
jordanrburger/keboola.app-hightouch
840cd513c463719f5caf2e01b8cd8a98ddacd7a1
[ "MIT" ]
null
null
null
""" Template Component main class. """ import logging from keboola.component.base import ComponentBase from keboola.component.exceptions import UserException from hightouch import hightouchClient # configuration variables KEY_API_TOKEN = '#api_token' KEY_ENDPOINT = 'endpoint' KEY_SYNC_ID = 'sync_id' # list of mandatory parameters => if some is missing, # component will fail with readable message on initialization. REQUIRED_PARAMETERS = [KEY_API_TOKEN, KEY_ENDPOINT, KEY_SYNC_ID] REQUIRED_IMAGE_PARS = [] class Component(ComponentBase): """ Extends base class for general Python components. Initializes the CommonInterface and performs configuration validation. For easier debugging the data folder is picked up by default from `../data` path, relative to working directory. If `debug` parameter is present in the `config.json`, the default logger is set to verbose DEBUG mode. """ def __init__(self): super().__init__() def run(self): """ Main execution code """ # ####### EXAMPLE TO REMOVE # check for missing configuration parameters self.validate_configuration_parameters(REQUIRED_PARAMETERS) self.validate_image_parameters(REQUIRED_IMAGE_PARS) params = self.configuration.parameters endpoint = params.get(KEY_ENDPOINT) client = hightouchClient(params.get(KEY_API_TOKEN)) if endpoint == "Run Sync": sync_id = params.get(KEY_SYNC_ID) response = client.run_sync(sync_id) logging.logger.info(response) if __name__ == "__main__": try: comp = Component() # this triggers the run method by default and is controlled by the configuration.action parameter comp.execute_action() except UserException as exc: logging.exception(exc) exit(1) except Exception as exc: logging.exception(exc) exit(2)
30.184615
110
0.689093
230
1,962
5.673913
0.46087
0.027586
0.025287
0.029119
0.042912
0.042912
0
0
0
0
0
0.001332
0.234964
1,962
64
111
30.65625
0.868088
0.349134
0
0.0625
0
0
0.034687
0
0
0
0
0
0
1
0.0625
false
0
0.125
0
0.21875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a380506151d373d6c2fd9da3ee6f8eee93ad47d
13,962
py
Python
magmap/cv/chunking.py
kaparna126/magellanmapper
6a50e82b3bcdbbb4706f749f366b055f0c6f13f2
[ "BSD-3-Clause" ]
null
null
null
magmap/cv/chunking.py
kaparna126/magellanmapper
6a50e82b3bcdbbb4706f749f366b055f0c6f13f2
[ "BSD-3-Clause" ]
null
null
null
magmap/cv/chunking.py
kaparna126/magellanmapper
6a50e82b3bcdbbb4706f749f366b055f0c6f13f2
[ "BSD-3-Clause" ]
null
null
null
# Chunking image stacks # Author: David Young, 2017, 2020 """Divides a region into smaller chunks and reassembles it.""" import multiprocessing as mp import numpy as np from magmap.settings import config from magmap.cv import detector from magmap.io import libmag #: int: Factor to multiply by scaling for maximum number of pixels per # sub ROI for overlap. OVERLAP_FACTOR = 5 def set_mp_start_method(val=None): """Set the multiprocessing start method. If the start method has already been applied, will skip. Args: val (str): Start method to set; defaults to None to use the default for the platform. If the given method is not available for the platform, the default method will be used instead. Returns: str: The applied start method. """ if val is None: val = config.roi_profile["mp_start"] avail_start_methods = mp.get_all_start_methods() if val not in avail_start_methods: val = avail_start_methods[0] try: mp.set_start_method(val) print("set multiprocessing start method to", val) except RuntimeError: print("multiprocessing start method already set to {}, will skip" .format(mp.get_start_method(False))) return val def is_fork(): """Check if the multiprocessing start method is set to "fork". Returns: bool: True if the start method is "fork", False if otherwise. """ return mp.get_start_method(False) == "fork" def get_mp_pool(): """Get a multiprocessing ``Pool`` object, configured based on ``config`` settings. Returns: :obj:`multiprocessing.Pool`: Pool object with number of processes and max tasks per process determined by command-line and the main (first) ROI profile settings. """ prof = config.get_roi_profile(0) max_tasks = None if not prof else prof["mp_max_tasks"] print("Setting up multiprocessing pool with {} processes (None uses all " "available)\nand max tasks of {} before replacing processes (None " "does not replace processes)".format(config.cpus, max_tasks)) return mp.Pool(processes=config.cpus, maxtasksperchild=max_tasks) def calc_overlap(): """Calculate overlap based on scaling factor and :const:``OVERLAP_FACTOR``. Returns: Overlap as an array in the same shape and order as in :attr:``detector.resolutions``. """ return np.ceil(np.multiply(detector.calc_scaling_factor(), OVERLAP_FACTOR)).astype(int) def _num_units(size, max_pixels): """Calculates the shape of sub-regions that would comprise a total shape of ``size`` with ``max_pixels`` per dimension. Args: size (List[int]): Shape of the entire region. max_pixels (int): Max number of pixels per dimension. Returns: :obj:`np.ndarray`: Sequence of number of sub-regions for each dimension in ``size``. """ num = np.floor_divide(size, max_pixels) num[np.remainder(size, max_pixels) > 0] += 1 return num.astype(np.int) def _bounds_side(size, max_pixels, overlap, coord, axis): """Calculates the boundaries of a side based on where in the ROI the current sub-ROI is. Attributes: size: Size in (z, y, x) order. overlap: Overlap size between sub-ROIs. coord: Coordinates of the sub-ROI, in (z, y, x) order. axis: The axis to calculate. Returns: int, int: Boundary of sides for the given ``axis`` as ``start, end``. """ pixels = max_pixels[axis] start = coord[axis] * pixels end = start + pixels if overlap is not None: end += overlap[axis] if end > size[axis]: end = size[axis] return int(start), int(end) def stack_splitter(shape, max_pixels, overlap=None): """Split a stack into multiple sub regions. Args: shape (Tuple[int]): Shape of the stack to split. max_pixels (Tuple[int]): Max pixels for each side in (z, y, x) order. overlap (Tuple[int]): Overlap size between sub-ROIs. Defaults to None for no overlap. Return: :obj:`np.ndarray`, :obj:`np.ndarray`: Tuple of ``sub_roi_slices, sub_rois_offsets``, where ``sub_roi_slices`` is a Numpy object array where each element contains a tuple of slice objects defining the corresponding sub-region at that position, and ``sub_rois_offsets`` is a Numpy array of corresponding offsets for each sub-ROI in (z, y, x) order coordinates. """ # prepare the array containing sub ROI slices with type object so that it # can contain an arbitrary object of any size and channels, accessible by # (z, y, x) coordinates of the chunk, as well as offset for # coordinates of bottom corner for each sub ROI for transposing later num_units = _num_units(shape[:3], max_pixels) #print("num_units: {}".format(num_units)) sub_rois_slices = np.zeros(num_units, dtype=object) sub_rois_offsets = np.zeros(np.append(num_units, 3)) # fill with sub ROIs including overlap extending into next sub ROI # except for the last one in each dimension for z in range(num_units[0]): for y in range(num_units[1]): for x in range(num_units[2]): coord = (z, y, x) bounds = [_bounds_side(shape, max_pixels, overlap, coord, axis) for axis in range(3)] #print("bounds: {}".format(bounds)) sub_rois_slices[coord] = ( slice(*bounds[0]), slice(*bounds[1]), slice(*bounds[2])) sub_rois_offsets[coord] = ( bounds[0][0], bounds[1][0], bounds[2][0]) return sub_rois_slices, sub_rois_offsets def merge_split_stack(sub_rois, overlap): """Merges sub regions back into a single stack. See :func:``merge_split_stack2`` for a much faster implementation if the final output array size is known beforehand. Args: sub_rois: Array of sub regions, in (z, y, x, ...) dimensions. overlap: Overlap size between sub-ROIs. Return: The merged stack. """ size = sub_rois.shape merged = None if overlap.dtype != np.int: overlap = overlap.astype(np.int) for z in range(size[0]): merged_y = None for y in range(size[1]): merged_x = None for x in range(size[2]): coord = (z, y, x) sub_roi = sub_rois[coord] edges = list(sub_roi.shape[0:3]) # remove overlap if not at last sub_roi or row or column for n in range(len(edges)): if coord[n] != size[n] - 1: edges[n] -= overlap[n] sub_roi = sub_roi[:edges[0], :edges[1], :edges[2]] # add back the non-overlapping region to build an x-direction # array, using concatenate to avoid copying the original array if merged_x is None: merged_x = sub_roi else: merged_x = np.concatenate((merged_x, sub_roi), axis=2) # add back non-overlapping regions from each x to build xy if merged_y is None: merged_y = merged_x else: merged_y = np.concatenate((merged_y, merged_x), axis=1) # add back non-overlapping regions from xy to build xyz if merged is None: merged = merged_y else: merged = np.concatenate((merged, merged_y), axis=0) return merged def get_split_stack_total_shape(sub_rois, overlap=None): """Get the shape of a chunked stack. Useful for determining the final shape of a stack that has been chunked and potentially scaled before merging the stack into an output array of this shape. Attributes: sub_rois: Array of sub regions, in (z, y, x, ...) dimensions. overlap: Overlap size between sub-ROIs. Defaults to None for no overlap. Returns: The shape of the chunked stack after it would be merged. """ size = sub_rois.shape shape_sub_roi = sub_rois[0, 0, 0].shape # for number of dimensions merged_shape = np.zeros(len(shape_sub_roi)).astype(np.int) final_shape = np.zeros(len(shape_sub_roi)).astype(np.int) edges = None for z in range(size[0]): for y in range(size[1]): for x in range(size[2]): coord = (z, y, x) sub_roi = sub_rois[coord] edges = list(sub_roi.shape[0:3]) if overlap is not None: # remove overlap if not at last sub_roi or row or column for n in range(len(edges)): if coord[n] != size[n] - 1: edges[n] -= overlap[n] #print("edges: {}".format(edges)) merged_shape[2] += edges[2] if final_shape[2] <= 0: final_shape[2] = merged_shape[2] merged_shape[1] += edges[1] if final_shape[1] <= 0: final_shape[1] = merged_shape[1] final_shape[0] += edges[0] channel_dim = 3 if len(shape_sub_roi) > channel_dim: final_shape[channel_dim] = shape_sub_roi[channel_dim] libmag.printv("final_shape: {}".format(final_shape)) return final_shape def merge_split_stack2(sub_rois, overlap, offset, output): """Merges sub regions back into a single stack, saving directly to an output variable such as a memmapped array. Args: sub_rois: Array of sub regions, in (z, y, x, ...) dimensions. overlap: Overlap size between sub-ROIs given as an int seq in z,y,x. Can be None for no overlap. offset: Axis offset for output array. output: Output array, such as a memmapped array to bypass storing the merged array in RAM. Return: The merged stack. """ size = sub_rois.shape merged_coord = np.zeros(3, dtype=np.int) sub_roi_shape = sub_rois[0, 0, 0].shape if offset > 0: # axis offset, such as skipping the time axis output = output[0] for z in range(size[0]): merged_coord[1] = 0 for y in range(size[1]): merged_coord[2] = 0 for x in range(size[2]): coord = (z, y, x) sub_roi = sub_rois[coord] edges = list(sub_roi.shape[0:3]) if overlap is not None: # remove overlap if not at last sub_roi or row or column for n in range(len(edges)): if coord[n] != size[n] - 1: edges[n] -= overlap[n] sub_roi = sub_roi[:edges[0], :edges[1], :edges[2]] output[merged_coord[0]:merged_coord[0]+edges[0], merged_coord[1]:merged_coord[1]+edges[1], merged_coord[2]:merged_coord[2]+edges[2]] = sub_roi merged_coord[2] += sub_roi_shape[2] merged_coord[2] = 0 merged_coord[1] += sub_roi_shape[1] merged_coord[1] = 0 merged_coord[0] += sub_roi_shape[0] def merge_blobs(blob_rois): """Combine all blobs into a master list so that each overlapping region will contain all blobs from all sub-ROIs that had blobs in those regions, obviating the need to pair sub-ROIs with one another. Args: blob_rois (:obj:`np.ndarray`): Blob from each sub-region defined by :meth:`stack_splitter`. Blobs are assumed to be a 2D array in the format ``[[z, y, x, ...], ...]``. Returns: :obj:`np.ndarray`: Merged blobs in 2D format of the format, ``[[z, y, x, ..., sub_roi_z, sub_roi_y, sub_roi_x], ...]``, where sub-ROI coordinates have been added as the final columns. """ blobs_all = [] for z in range(blob_rois.shape[0]): for y in range(blob_rois.shape[1]): for x in range(blob_rois.shape[2]): coord = (z, y, x) blobs = blob_rois[coord] #print("checking blobs in {}:\n{}".format(coord, blobs)) if blobs is None: libmag.printv("no blobs to add, skipping") else: # add temporary tag with sub-ROI coordinate extras = np.zeros((blobs.shape[0], 3), dtype=int) extras[:] = coord blobs = np.concatenate((blobs, extras), axis=1) blobs_all.append(blobs) if len(blobs_all) > 0: blobs_all = np.vstack(blobs_all) else: blobs_all = None return blobs_all def main(): """Test splitting and remerging.""" config.resolutions = [[6.6, 1.1, 1.1]] roi = np.arange(5 * 4 * 4) roi = roi.reshape((5, 4, 4)) print("roi:\n{}".format(roi)) overlap = calc_overlap() sub_roi_slices, sub_rois_offsets = stack_splitter(roi.shape, [1, 2, 2]) print("sub_rois shape: {}".format(sub_roi_slices.shape)) print("sub_rois:\n{}".format(sub_roi_slices)) print("overlap: {}".format(overlap)) print("sub_rois_offsets:\n{}".format(sub_rois_offsets)) for z in range(sub_roi_slices.shape[0]): for y in range(sub_roi_slices.shape[1]): for x in range(sub_roi_slices.shape[2]): coord = (z, y, x) sub_roi_slices[coord] = roi[sub_roi_slices[coord]] merged = merge_split_stack(sub_roi_slices, overlap) print("merged:\n{}".format(merged)) print("merged shape: {}".format(merged.shape)) print("test roi == merged: {}".format(np.all(roi == merged))) if __name__ == "__main__": print("Starting chunking...") main()
37.331551
80
0.59268
1,949
13,962
4.118009
0.156491
0.035883
0.006354
0.004984
0.233865
0.197982
0.154373
0.13531
0.126339
0.115873
0
0.013488
0.304398
13,962
373
81
37.431635
0.812912
0.384114
0
0.252688
0
0
0.057146
0.002603
0
0
0
0
0
1
0.064516
false
0
0.026882
0
0.145161
0.075269
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a3cd4655928f0760038a8c3731fb3de29d231a8
1,680
py
Python
tests/test_redshift.py
InnovativeTravel/boto3facade
9ea61fb07981939340db652b4baa5cd23d15e1f8
[ "MIT" ]
1
2016-06-20T00:59:53.000Z
2016-06-20T00:59:53.000Z
tests/test_redshift.py
InnovativeTravel/boto3facade
9ea61fb07981939340db652b4baa5cd23d15e1f8
[ "MIT" ]
null
null
null
tests/test_redshift.py
InnovativeTravel/boto3facade
9ea61fb07981939340db652b4baa5cd23d15e1f8
[ "MIT" ]
1
2022-03-20T07:46:38.000Z
2022-03-20T07:46:38.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import pytest import uuid import boto3facade.redshift as rs import boto3facade.ec2 as ec2 from collections import namedtuple @pytest.yield_fixture def temp_creds(scope='module'): key_id = str(uuid.uuid4()) secret_key = str(uuid.uuid4()) token = str(uuid.uuid4()) TemporaryCredentials = namedtuple('TemporaryCredentials', 'key_id secret_key token') yield TemporaryCredentials(key_id, secret_key, token) @pytest.yield_fixture def local_creds(scope='module'): key_id = str(uuid.uuid4()) secret_key = str(uuid.uuid4()) LocalCredentials = namedtuple('LocalCredentials', 'key_id secret_key') yield LocalCredentials(key_id, secret_key) @pytest.fixture(scope='module') def redshift(): return rs.Redshift() def test_get_temp_copy_credentials(redshift, monkeypatch, temp_creds): monkeypatch.setattr(ec2, 'get_temporary_credentials', lambda: temp_creds) creds = redshift.get_copy_credentials() assert creds == ( "aws_access_key_id={};" "aws_secret_access_key={};" "token={}").format(temp_creds.key_id, temp_creds.secret_key, temp_creds.token) def test_get_local_copy_credentials(redshift, monkeypatch, local_creds): monkeypatch.setattr('boto3facade.ec2.get_temporary_credentials', lambda: None) monkeypatch.setattr(redshift, 'get_credentials', lambda: local_creds) creds = redshift.get_copy_credentials() assert creds == "aws_access_key_id={};aws_secret_access_key={}".format( local_creds.key_id, local_creds.secret_key)
32.307692
75
0.691667
200
1,680
5.52
0.24
0.04529
0.054348
0.050725
0.423913
0.311594
0.240942
0.240942
0.240942
0.240942
0
0.009637
0.197024
1,680
51
76
32.941176
0.808747
0.025
0
0.210526
0
0
0.167482
0.095966
0
0
0
0
0.052632
1
0.131579
false
0
0.131579
0.026316
0.289474
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a3d1f47eec662cdcf16da3319a5effc2c4acae6
5,510
py
Python
graphrole/features/prune.py
bingrao/GraphRole
3439cd000e256e7b84159e7083debe7873fca61e
[ "MIT" ]
60
2019-03-07T15:15:49.000Z
2022-03-25T19:21:18.000Z
graphrole/features/prune.py
bingrao/GraphRole
3439cd000e256e7b84159e7083debe7873fca61e
[ "MIT" ]
5
2020-08-24T08:51:40.000Z
2021-12-08T06:50:47.000Z
graphrole/features/prune.py
bingrao/GraphRole
3439cd000e256e7b84159e7083debe7873fca61e
[ "MIT" ]
17
2019-08-02T04:15:50.000Z
2022-03-16T21:32:23.000Z
import itertools as it from typing import Dict, Iterator, List, Set, TypeVar import numpy as np from scipy.spatial.distance import pdist from graphrole.graph.graph import AdjacencyDictGraph from graphrole.types import DataFrameDict, DataFrameLike, VectorLike T = TypeVar('T', int, str) def vertical_log_binning(arr: VectorLike, frac: float = 0.5) -> VectorLike: """ Reassigns values of an array into vertical logarithmic bins :param arr: array to be binned :param frac: value in (0, 1) defining fraction of values assigned to each bin """ if not 0 < frac < 1: raise ValueError('must specify frac in interval (0, 1)') arr_len = len(arr) binned = np.zeros(arr_len, dtype=np.int) # get sorted unique values and counts in arr arr_uniq, counts = np.unique(arr, return_counts=True) # convert to cumulative counts counts = np.cumsum(counts) # initial iteration parameters binned_len = 0 # length of binned portion of arr unbinned_len = arr_len # length of unbinned portion of arr bin_min = -np.inf # left side value of current bin (exclusive) for bin_val in range(arr_len): # bin size is fraction frac of the unbinned len (enforce at least 1) bin_size = max(int(frac * unbinned_len), 1) # get index of largest unique value from arr to be included in bin u_idx = np.searchsorted(counts, binned_len + bin_size) bin_max = arr_uniq[u_idx] # mark members of current bin with bin_val arr_idx = np.logical_and(arr > bin_min, arr <= bin_max) binned[arr_idx] = bin_val # update iteration paramters binned_len += sum(arr_idx) unbinned_len = arr_len - binned_len bin_min = bin_max # check if all values have been binned if unbinned_len == 0: break return binned class FeaturePruner: """ Determines redundant features to be removed from future recursive aggregations """ def __init__( self, generation_dict: Dict[int, DataFrameDict], feature_group_thresh: int ) -> None: """ :param generation_dict: mapping of recursive generation number to dict of {features: {node: values}} :param feature_group_thresh: distance threshold for grouping binned version of features """ self._generation_dict = generation_dict self._feature_group_thresh = feature_group_thresh def prune_features(self, features: DataFrameLike) -> List[str]: """ Eliminate redundant features from current iteration by identifying features in connected components of a feature graph and replace components with oldest (i.e., earliest generation) member feature :param features: DataFrame of features """ features_to_drop = [] groups = self._group_features(features) for group in groups: # isolated feature should not be pruned if len(group) == 1: continue oldest = self._get_oldest_feature(group) to_drop = group - {oldest} features_to_drop.extend(to_drop) return features_to_drop def _group_features(self, features: DataFrameLike) -> Iterator[Set[str]]: """ Group features according to connected components of feature graph induced by pairwise distances below distance threshold :param features: DataFrame of features """ # apply binning to features # note that some (non-pruned) features will be rebinned each time when this class is # used for pruning multiple generations of features, but this slight inefficiency removes # maintaining binned features in the state of the feature extraction class and is thus an # intentional tradeoff binned_features = features.apply(vertical_log_binning) # get condensed vector of pairwise distances measuring # max_i |u[i] - v[i]| for features u, v dists = pdist(binned_features.T, metric='chebychev') # construct feature graph by connecting features within # dist_thresh of each other and return connected components nodes = binned_features.columns all_edges = it.combinations(nodes, 2) edges = it.compress(all_edges, dists <= self._feature_group_thresh) feature_graph = AdjacencyDictGraph(edges) groups = feature_graph.get_connected_components() return groups def _get_oldest_feature(self, feature_names: Set[T]) -> T: """ Return the feature from set of feature names that was generated in the earliest generation; tie between features from same iteration are broken by sorted named order :param feature_names: set of feature names from which to find oldest """ for gen in range(len(self._generation_dict)): generation_features = self._generation_dict[gen].keys() cur_features = feature_names.intersection(generation_features) if cur_features: return self._set_getitem(cur_features) return self._set_getitem(feature_names) @staticmethod def _set_getitem(s: Set[T]) -> T: """ Cast set to list and return first element after sorting to ensure deterministic, repeatable getitem functionality from set :param s: set """ return np.partition(list(s), 0)[0]
39.357143
97
0.664428
701
5,510
5.0699
0.336662
0.023635
0.025324
0.009567
0.051773
0.017445
0
0
0
0
0
0.003979
0.270236
5,510
139
98
39.640288
0.879881
0.399274
0
0
0
0
0.015087
0
0
0
0
0
0
1
0.090909
false
0
0.090909
0
0.287879
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a413e211d97ceac12712656b1e6d40c0ec22304
10,582
py
Python
src/vnsfo/vnsfo/vnsfo_adapter.py
mcompastie/store
bc6f2b9c42f62ae32e89dc877a6eb33289c4c98b
[ "Apache-2.0" ]
2
2018-02-06T14:57:16.000Z
2018-02-06T14:58:34.000Z
src/vnsfo/vnsfo/vnsfo_adapter.py
mcompastie/store
bc6f2b9c42f62ae32e89dc877a6eb33289c4c98b
[ "Apache-2.0" ]
1
2017-08-21T10:50:49.000Z
2017-09-06T08:11:28.000Z
src/vnsfo/vnsfo/vnsfo_adapter.py
mcompastie/store
bc6f2b9c42f62ae32e89dc877a6eb33289c4c98b
[ "Apache-2.0" ]
1
2021-02-16T14:43:02.000Z
2021-02-16T14:43:02.000Z
# -*- coding: utf-8 -*- # Copyright (c) 2017 SHIELD, UBIWHERE # ALL RIGHTS RESERVED. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Neither the name of the SHIELD, UBIWHERE nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # This work has been performed in the framework of the SHIELD project, # funded by the European Commission under Grant number 700199 through the # Horizon 2020 program. The authors would like to acknowledge the contributions # of their colleagues of the SHIELD partner consortium (www.shield-h2020.eu). import logging from abc import abstractmethod, ABCMeta from storeutils import http_utils from storeutils.error_utils import ExceptionMessage, IssueHandling, IssueElement class VnsfoVnsfWrongPackageFormat(ExceptionMessage): """Wrong vNSFO package format.""" class VnsfoMissingVnfDescriptor(ExceptionMessage): """Missing vNSF Descriptor from the package.""" class VnsfoNsWrongPackageFormat(ExceptionMessage): """Wrong Network Descriptor package format.""" class VnsfoMissingNsDescriptor(ExceptionMessage): """Missing Network Service Descriptor from the package.""" class VnsfOrchestratorOnboardingIssue(ExceptionMessage): """vNSFO onboarding operation failed.""" class VnsfOrchestratorDeletingIssue(ExceptionMessage): """vNSFO deletion operation failed.""" class VnsfOrchestratorPolicyIssue(ExceptionMessage): """vNSFO policy operation failed.""" class VnsfOrchestratorUnreacheable(ExceptionMessage): """vNSFO cannot be reached.""" class VnsfInvalidFormat(ExceptionMessage): """vNSF descriptor has an invalid format""" class VnsfValidationIssue(ExceptionMessage): """Issues occurred when validating vNSF descriptor""" class NsInvalidFormat(ExceptionMessage): """NS descriptor has an invalid format""" class NsMissingDependency(ExceptionMessage): """NS descriptors contains VNFDs that are not available in the store""" class NsValidationIssue(ExceptionMessage): """Issues occurred when validating NS descriptor""" class VnsfOrchestratorAdapter(object, metaclass=ABCMeta): """ Define the interface for a vNSF Orchestrator. Each implementation must tailor its behaviour to the Orchestrator it interacts with. """ errors = { 'ONBOARD_VNSF': { 'PKG_MISSING_VNFD_FOLDER': { IssueElement.ERROR.name: ["Missing VNF folder. Expected at '{}'"], IssueElement.EXCEPTION.name: VnsfoMissingVnfDescriptor( 'vNSF Descriptor not found where manifest.yaml places it') }, 'PKG_MISSING_VNFD': { IssueElement.ERROR.name: ["Missing VNFD. Expected at '{}'"], IssueElement.EXCEPTION.name: VnsfoMissingVnfDescriptor( 'vNSF Descriptor not found where manifest.yaml places it') }, 'ONBOARDING_ISSUE': { IssueElement.ERROR.name: ['vNFSO onboarding at {}. Msg: {} | Status: {}'], IssueElement.EXCEPTION.name: VnsfOrchestratorOnboardingIssue( 'Can not onboard the package into the vNFSO') }, 'VNSFPKG_NOT_VNSFO': { IssueElement.ERROR.name: ['Package does not comply with the vNSFO format'], IssueElement.EXCEPTION.name: VnsfOrchestratorUnreacheable( 'Package does not comply with the vNSFO format') }, 'VNSFO_UNREACHABLE': { IssueElement.ERROR.name: ['Error onboarding the vNSF at {}'], IssueElement.EXCEPTION.name: VnsfOrchestratorUnreacheable('Can not reach the Orchestrator') }, 'VNFD_FORMAT_INVALID': { IssueElement.ERROR.name: ['Invalid format of VNFD {}'], IssueElement.EXCEPTION.name: VnsfInvalidFormat('Can not read vNSF descriptor') }, 'VALIDATION_ERROR': { IssueElement.ERROR.name: ['Error validating vNSF descriptor: {}'], IssueElement.EXCEPTION.name: VnsfValidationIssue("Error validating vNSF descriptor") }, }, 'DELETE_VNSF': { 'DELETING_ISSUE': { IssueElement.ERROR.name: ['vNFSO deleting at {}. Msg: {} | Status: {}'], IssueElement.EXCEPTION.name: VnsfOrchestratorDeletingIssue( 'Can not delete the vNSF package from the vNFSO') }, 'VNSFO_UNREACHABLE': { IssueElement.ERROR.name: ['Error onboarding the Network Service at {}'], IssueElement.EXCEPTION.name: VnsfOrchestratorUnreacheable('Can not reach the Orchestrator') }, }, 'ONBOARD_NS': { 'PKG_MISSING_NS_FOLDER': { IssueElement.ERROR.name: ["Missing Network Service folder. Expected at '{}'"], IssueElement.EXCEPTION.name: VnsfoMissingVnfDescriptor( 'Network Service Descriptor not found where manifest.yaml places it') }, 'PKG_MISSING_NSD': { IssueElement.ERROR.name: ["Missing NSD. Expected at '{}'"], IssueElement.EXCEPTION.name: VnsfoMissingNsDescriptor( 'Network Service Descriptor not found where manifest.yaml places it') }, 'ONBOARDING_ISSUE': { IssueElement.ERROR.name: ['NS onboarding at {}. Msg: {} | Status: {}'], IssueElement.EXCEPTION.name: VnsfOrchestratorOnboardingIssue( 'Can not onboard the package into the vNFSO') }, 'NSPKG_NOT_VNSFO': { IssueElement.ERROR.name: ['No package file provided in POST'], IssueElement.EXCEPTION.name: VnsfOrchestratorUnreacheable( 'Package does not comply with the vNSFO format') }, 'VNSFO_UNREACHABLE': { IssueElement.ERROR.name: ['Error onboarding the Network Service at {}'], IssueElement.EXCEPTION.name: VnsfOrchestratorUnreacheable('Can not reach the Orchestrator') }, 'NSD_FORMAT_INVALID': { IssueElement.ERROR.name: ['Invalid format of NSD {}'], IssueElement.EXCEPTION.name: NsInvalidFormat('Can not read NS descriptor') }, 'MISSING_VNSF_DEPENDENCY': { IssueElement.ERROR.name: ["Missing vNSF dependency '{}' in NS '{}'"], IssueElement.EXCEPTION.name: NsMissingDependency('NS Dependency not available') }, 'VALIDATION_ERROR': { IssueElement.ERROR.name: ['Error validating NS descriptor: {}'], IssueElement.EXCEPTION.name: NsValidationIssue("Error validating NS descriptor") }, }, 'DELETE_NS': { 'DELETING_ISSUE': { IssueElement.ERROR.name: ['NS deleting at {}. Msg: {} | Status: {}'], IssueElement.EXCEPTION.name: VnsfOrchestratorDeletingIssue( 'Can not delete the NS package from the vNFSO') }, 'VNSFO_UNREACHABLE': { IssueElement.ERROR.name: ['Error onboarding the Network Service at {}'], IssueElement.EXCEPTION.name: VnsfOrchestratorUnreacheable('Can not reach the Orchestrator') }, }, 'POLICY': { 'POLICY_ISSUE': { IssueElement.ERROR.name: ['vNFSO policy at {}. Status: {}'], IssueElement.EXCEPTION.name: VnsfOrchestratorPolicyIssue('Can not convey policy to the vNFSO') }, 'VNSFO_UNREACHABLE': { IssueElement.ERROR.name: ['Error conveying policy at {}'], IssueElement.EXCEPTION.name: VnsfOrchestratorUnreacheable('Can not reach the Orchestrator') } } } def __init__(self, protocol, server, port, api_basepath, logger=None): self.logger = logger or logging.getLogger(__name__) self.issue = IssueHandling(self.logger) self.basepath = http_utils.build_url(server, port, api_basepath, protocol) self.logger.debug('vNSF Orchestrator API at: %s', self.basepath) @abstractmethod def apply_policy(self, tenant_id, policy, data_format): """ Sends a security policy to the Orchestrator. :param tenant_id: The tenant to apply the policy to. :param policy: The security policy data. """ pass @abstractmethod def onboard_vnsf(self, tenant_id, vnsf_package_path, vnsfd_file, data_format, validation_data): """ Onboards a vNSF with the Orchestrator. :param tenant_id: The tenant where to onboard the vNSF. :param vnsf_package_path: The file system path where the vNSF package is stored. :param vnsfd_file: The relative path to the VNF Descriptor within the package. :return: the VNF package data. """ pass @abstractmethod def onboard_ns(self, tenant_id, ns_package_path, nsd_file, data_format, validation_data): """ Onboards a vNSF with the Orchestrator. :param tenant_id: The tenant where to onboard the Network Service. :param ns_package_path: The file system path where the Network Service package is stored. :param nsd_file: The relative path to the Network Service Descriptor within the package. :return: the Network Service package data. """ pass @abstractmethod def delete_vnsf(self, tenant_id, vnsf_id, data_format): """ Deletes a vNSF in the Orchestrator. :return: the VNF package data. """ pass @abstractmethod def delete_ns(self, tenant_id, ns_id, data_format): """ Deletes a vNSF in the Orchestrator. :return: """ pass
41.015504
118
0.63041
1,039
10,582
6.33975
0.238691
0.054198
0.06695
0.036891
0.500987
0.420373
0.387126
0.355093
0.303477
0.303477
0
0.003027
0.281894
10,582
257
119
41.175097
0.863798
0.246456
0
0.319444
0
0
0.264402
0.008752
0
0
0
0
0
1
0.041667
false
0.034722
0.027778
0
0.173611
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a453821dac727d5d1c6cc68f90e01f14ca2476f
415
py
Python
Chapter 004 Exercises/PierceAndrewM02_Ch4Ex14.py
Omtaga/IvyTechPythonVisualBasic
2fc44cc8c52476c54b138cb3ec9717e9450ea1cb
[ "MIT" ]
null
null
null
Chapter 004 Exercises/PierceAndrewM02_Ch4Ex14.py
Omtaga/IvyTechPythonVisualBasic
2fc44cc8c52476c54b138cb3ec9717e9450ea1cb
[ "MIT" ]
null
null
null
Chapter 004 Exercises/PierceAndrewM02_Ch4Ex14.py
Omtaga/IvyTechPythonVisualBasic
2fc44cc8c52476c54b138cb3ec9717e9450ea1cb
[ "MIT" ]
null
null
null
# Ivy Tech - SDEV 140 - Introduction to Software Development # Chapter 4 Exercise 14. Write a program that uses nested loops to draw this pattern: # ******* # ****** # ***** # **** # *** # ** # * # Andrew M. Pierce Associate of Applied Science - Software Development # Python 3.9.0 print() row = 7 column = 7 for r in range(row): for c in range(column): print('*', end='') column -= 1 print()
19.761905
85
0.595181
56
415
4.410714
0.785714
0.153846
0
0
0
0
0
0
0
0
0
0.037855
0.236145
415
20
86
20.75
0.741325
0.626506
0
0.25
0
0
0.006993
0
0
0
0
0
0
1
0
false
0
0
0
0
0.375
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a4a854cae4ded3b9497b9f7993efb46a78df03d
1,456
py
Python
DumpCmdDefs/DumpCmdDefs.py
dseeni/ShortcutItPy
50c3b6f5ea05a216c0bb7448e1303e0a00d91f04
[ "MIT" ]
11
2019-04-24T04:03:43.000Z
2021-03-01T17:59:16.000Z
DumpCmdDefs/DumpCmdDefs.py
dseeni/ShortcutItPy
50c3b6f5ea05a216c0bb7448e1303e0a00d91f04
[ "MIT" ]
5
2019-04-29T10:57:33.000Z
2021-04-27T14:15:38.000Z
DumpCmdDefs/DumpCmdDefs.py
dseeni/ShortcutItPy
50c3b6f5ea05a216c0bb7448e1303e0a00d91f04
[ "MIT" ]
3
2019-10-28T07:16:19.000Z
2022-01-18T08:30:04.000Z
import adsk.core, adsk.fusion, traceback # This is based on a sample from the Autodesk knowledgebase which is under # an unknown license. Trivial portions of it have been kept, and my contributions # are licensed under the MIT license of the parent project. # The sample is available at https://help.autodesk.com/view/fusion360/ENU/?guid=GUID-d2b85a7e-fd08-11e4-9e07-3417ebd3d5be # which is titled "Write user interface to a file API sample" def reprCmdDef(cmdDef): return """ id = {o.id} name = {o.name!r} """.format(o=cmdDef) def run(context): ui = None try: app = adsk.core.Application.get() ui = app.userInterface fileDialog = ui.createFileDialog() fileDialog.isMultiSelectEnabled = False fileDialog.title = "Specify result filename" fileDialog.filter = 'Text files (*.txt)' fileDialog.filterIndex = 0 dialogResult = fileDialog.showSave() if dialogResult == adsk.core.DialogResults.DialogOK: filename = fileDialog.filename else: return result = 'Command defs:' for cmdDef in ui.commandDefinitions: result += reprCmdDef(cmdDef) + '\n' output = open(filename, 'w') output.writelines(result) output.close() ui.messageBox('File written to "' + filename + '"') except: if ui: ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
32.355556
121
0.642857
171
1,456
5.467836
0.643275
0.025668
0
0
0
0
0
0
0
0
0
0.020221
0.252747
1,456
44
122
33.090909
0.839154
0.267857
0
0
0
0
0.121698
0
0
0
0
0
0
1
0.064516
false
0
0.032258
0.032258
0.16129
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a4c0681c545946a3ab4dd9e0d73ab253c370b2b
769
py
Python
src/greetings_utils/setup.py
Squallman/sample-python-versioning
8c85b90b9b0ec92c830ef9d793d79515e994f420
[ "MIT" ]
null
null
null
src/greetings_utils/setup.py
Squallman/sample-python-versioning
8c85b90b9b0ec92c830ef9d793d79515e994f420
[ "MIT" ]
null
null
null
src/greetings_utils/setup.py
Squallman/sample-python-versioning
8c85b90b9b0ec92c830ef9d793d79515e994f420
[ "MIT" ]
null
null
null
#!/usr/bin/env python from setuptools import find_packages, setup with open("README.md", "r") as fh: README_CONTENT = fh.read() setup( name="sample-python-versioning", version_format='{tag}+{gitsha}', setup_requires=['setuptools-git-version'], author="Serhii Shepel", author_email="serhiy.shepel@gmail.com", description="Sample application", long_description=README_CONTENT, long_description_content_type="text/markdown", url="https://github.com/Squallman/sample-python-versioning", packages=find_packages(), classifiers=[ "Programming Language :: Python :: 3", "Operating System :: OS Independent", ], python_requires='>=3.6', install_requires=['python-dateutil==2.8.1', 'boto3==1.11.8'], )
32.041667
65
0.681404
92
769
5.554348
0.663043
0.046967
0.086106
0
0
0
0
0
0
0
0
0.017002
0.158648
769
24
66
32.041667
0.772798
0.026008
0
0
0
0
0.399199
0.121495
0
0
0
0
0
1
0
false
0
0.047619
0
0.047619
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a4df6b75e6a0b8d827cc492ea7774027c83002e
721
py
Python
referenceqvm/tests/test_system.py
markf94/reference-qvm
e4ca313928f72b3d2348a3f9abfec6607944c59e
[ "Apache-2.0" ]
28
2017-09-30T22:04:51.000Z
2018-11-07T11:23:17.000Z
referenceqvm/tests/test_system.py
markf94/reference-qvm
e4ca313928f72b3d2348a3f9abfec6607944c59e
[ "Apache-2.0" ]
8
2017-09-30T21:32:33.000Z
2018-10-15T16:36:57.000Z
referenceqvm/tests/test_system.py
markf94/reference-qvm
e4ca313928f72b3d2348a3f9abfec6607944c59e
[ "Apache-2.0" ]
12
2017-10-05T16:55:55.000Z
2018-11-06T17:42:47.000Z
from pyquil.quil import Program from pyquil.gates import * import numpy as np from referenceqvm.tests.test_data import data_containers as dc def tests_against_cloud(qvm): # simple program p = Program(H(0)) cloud_results = [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]] local_results = qvm.run(p, classical_addresses=[0], trials=10) assert len(cloud_results) == len(local_results) cloud_wf = dc.HADAMARD_WF local_wf, _ = qvm.wavefunction(p) assert np.allclose(cloud_wf, local_wf.amplitudes) # complex program p = Program(dc.QFT_8_INSTRUCTIONS) cloud_wf = dc.QFT_8_WF_PROBS local_wf, _ = qvm.wavefunction(p) assert np.allclose(cloud_wf, local_wf.amplitudes)
30.041667
70
0.687933
109
721
4.321101
0.385321
0.038217
0.050955
0.059448
0.288747
0.288747
0.288747
0.288747
0.288747
0.267516
0
0.027257
0.185853
721
23
71
31.347826
0.775128
0.041609
0
0.25
0
0
0
0
0
0
0
0
0.1875
1
0.0625
false
0
0.25
0
0.3125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a50ca3036f461668abe9e38f8cb1f9d63a74618
809
py
Python
CS305_Computer-Network/Lab4-socket/Echo-Server-Multithreading.py
Eveneko/SUSTech-Courses
0420873110e91e8d13e6e85a974f1856e01d28d6
[ "MIT" ]
4
2020-11-11T11:56:57.000Z
2021-03-11T10:05:09.000Z
CS305_Computer-Network/Lab4-socket/Echo-Server-Multithreading.py
Eveneko/SUSTech-Courses
0420873110e91e8d13e6e85a974f1856e01d28d6
[ "MIT" ]
null
null
null
CS305_Computer-Network/Lab4-socket/Echo-Server-Multithreading.py
Eveneko/SUSTech-Courses
0420873110e91e8d13e6e85a974f1856e01d28d6
[ "MIT" ]
3
2021-01-07T04:14:11.000Z
2021-04-27T13:41:36.000Z
import socket, threading class Echo(threading.Thread): def __init__(self, conn, address): threading.Thread.__init__(self) self.conn = conn self.address = address def run(self): while True: data = self.conn.recv(2048) if data and data != b'exit\r\n': self.conn.send(data) print('{} sent: {}'.format(self.address, data)) else: self.conn.close() return def echo(): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('127.0.0.1', 5555)) sock.listen(10) while True: conn, address = sock.accept() Echo(conn, address).run() if __name__ == "__main__": try: echo() except KeyboardInterrupt: exit()
24.515152
63
0.542645
94
809
4.478723
0.5
0.095012
0
0
0
0
0
0
0
0
0
0.029466
0.328801
809
33
64
24.515152
0.745856
0
0
0.074074
0
0
0.044444
0
0
0
0
0
0
1
0.111111
false
0
0.037037
0
0.222222
0.037037
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a5436f1c74caf19759c1541d6de7dc97008792e
4,652
py
Python
pointnet2/kitti/utils.py
peiyunh/3dseg
59922b6d0b1e21b63958c41eb88c0fa83a943133
[ "BSD-3-Clause" ]
19
2020-03-31T12:25:37.000Z
2022-02-19T09:42:59.000Z
pointnet2/kitti/utils.py
peiyunh/3dseg
59922b6d0b1e21b63958c41eb88c0fa83a943133
[ "BSD-3-Clause" ]
5
2020-04-20T10:53:50.000Z
2022-01-17T16:02:24.000Z
pointnet2/kitti/utils.py
peiyunh/3dseg
59922b6d0b1e21b63958c41eb88c0fa83a943133
[ "BSD-3-Clause" ]
8
2020-04-17T15:20:21.000Z
2021-06-04T01:36:22.000Z
import numpy as np def transform_se3(pts, tf): assert(pts.shape[1] == 3) pts_1 = np.concatenate((pts, np.ones((pts.shape[0],1))), axis=1) pts_tf_1 = tf.dot(pts_1.T).T pts_tf = pts_tf_1[:,0:3] return pts_tf def read_calibration(path): calib = {} with open(path) as f: for line in f: fields = line.split() if len(fields) == 0: continue keyword = fields[0].strip(':') calib[keyword] = np.array([float(x) for x in fields[1:]]).reshape(3, -1) velo_to_cam_tf = np.concatenate((calib['Tr_velo_to_cam'], np.array([[0,0,0,1]])),axis=0) return velo_to_cam_tf def read_detection(path): det = [] with open(path) as f: for line in f: elements = line.split() if len(elements) == 15: cls, trunc, occl, alpha, x1, y1, x2, y2, h, w, l, cx, cy, cz, ry = elements score = float('nan') else: cls, trunc, occl, alpha, x1, y1, x2, y2, h, w, l, cx, cy, cz, ry, score = elements if cls=='DontCare': continue det.append([cls, float(h), float(w), float(l), float(cx), float(cy), float(cz), float(ry), float(score)]) return det # we do per point NMS with score # to make sure segments we produce do not overlap def convert_dets_to_segs(pts_velo_cs, velo_to_cam_tf, dets): pts_cam_cs = transform_se3(pts_velo_cs[:,:3], velo_to_cam_tf) point_labels = np.full(len(pts_velo_cs), -1) point_scores = np.full(len(pts_velo_cs), -10000000.0) for i in range(len(dets)): cls, height, width, length, cx, cy, cz, ry, score = dets[i] ry = ry + np.pi/2 obj_to_cam_tf = np.array([[ np.cos(ry), 0, np.sin(ry), cx], [ 0, 1, 0, cy], [-np.sin(ry), 0, np.cos(ry), cz], [ 0, 0, 0, 1]]) cam_to_obj_tf = np.linalg.inv(obj_to_cam_tf) pts_obj_cs = transform_se3(pts_cam_cs, cam_to_obj_tf) test_x = np.logical_and(pts_obj_cs.T[0] >= -width/2.0, pts_obj_cs.T[0] <= width/2.0) test_y = np.logical_and(pts_obj_cs.T[1] >= -height, pts_obj_cs.T[1] <= 0) test_z = np.logical_and(pts_obj_cs.T[2] >= -length/2.0, pts_obj_cs.T[2] <= length/2.0) # a binary mask that indicates if points are inside the bounding box space_mask = np.logical_and(test_x, np.logical_and(test_y, test_z)) # a binary mask that indicates if the bounding box has a higher score score_mask = score > point_scores # final_mask = np.logical_and(space_mask, score_mask) point_labels[final_mask] = i point_scores[final_mask] = score segs = [] scores = [] classes = [] for i in range(len(dets)): I = np.flatnonzero(point_labels == i) if len(I) > 0: segs.append(I) scores.append(dets[i][-1]) classes.append(dets[i][0]) # return segs, scores return segs, scores, classes # we do not do per point NMS with score # we allow overlap between ground truth segments # because we will take care of it during evaluation def convert_gtdets_to_gtsegs(pts_velo_cs, velo_to_cam_tf, gtdets): pts_cam_cs = transform_se3(pts_velo_cs[:,:3], velo_to_cam_tf) gtsegs = [] gtclasses = [] for i in range(len(gtdets)): cls, height, width, length, cx, cy, cz, ry, _ = gtdets[i] ry = ry + np.pi/2 obj_to_cam_tf = np.array([[ np.cos(ry), 0, np.sin(ry), cx], [ 0, 1, 0, cy], [-np.sin(ry), 0, np.cos(ry), cz], [ 0, 0, 0, 1]]) cam_to_obj_tf = np.linalg.inv(obj_to_cam_tf) pts_obj_cs = transform_se3(pts_cam_cs, cam_to_obj_tf) test_x = np.logical_and(pts_obj_cs.T[0] >= -width/2.0, pts_obj_cs.T[0] <= width/2.0) test_y = np.logical_and(pts_obj_cs.T[1] >= -height, pts_obj_cs.T[1] <= 0) test_z = np.logical_and(pts_obj_cs.T[2] >= -length/2.0, pts_obj_cs.T[2] <= length/2.0) # a binary mask that indicates if points are inside the bounding box space_mask = np.logical_and(test_x, np.logical_and(test_y, test_z)) # gtsegs.append(np.flatnonzero(space_mask)) gtclasses.append(cls) return gtsegs, gtclasses
40.807018
117
0.540413
714
4,652
3.306723
0.191877
0.035578
0.047438
0.045743
0.527742
0.517577
0.457433
0.440491
0.416773
0.395595
0
0.032413
0.330181
4,652
113
118
41.168142
0.725289
0.093508
0
0.413793
0
0
0.006183
0
0
0
0
0
0.011494
1
0.057471
false
0
0.011494
0
0.126437
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a551365778f7c988e1f99fbc4cd3c4ccd23909e
3,299
py
Python
tests/compliance/test_github_compliance.py
hmrc/platsec-compliance-alerting
c12c871a475ec8c2034504799228e565626309b6
[ "Apache-2.0" ]
null
null
null
tests/compliance/test_github_compliance.py
hmrc/platsec-compliance-alerting
c12c871a475ec8c2034504799228e565626309b6
[ "Apache-2.0" ]
null
null
null
tests/compliance/test_github_compliance.py
hmrc/platsec-compliance-alerting
c12c871a475ec8c2034504799228e565626309b6
[ "Apache-2.0" ]
null
null
null
from unittest import TestCase from tests.fixtures.github_compliance import github_report from tests.test_types_generator import account, findings from src.data.audit import Audit from src.compliance.github_compliance import GithubCompliance class TestGithubCompliance(TestCase): def test_repository_is_signed(self) -> None: self.assertTrue( GithubCompliance()._is_signed({"branchProtectionRules": {"nodes": [{"requiresCommitSignatures": True}]}}) ) def test_repository_is_not_signed(self) -> None: self.assertFalse( GithubCompliance()._is_signed({"branchProtectionRules": {"nodes": [{"requiresCommitSignatures": False}]}}) ) def test_repository_is_signed_with_multiple_nodes(self) -> None: self.assertTrue( GithubCompliance()._is_signed( { "branchProtectionRules": { "nodes": [ {"requiresCommitSignatures": False}, {"requiresCommitSignatures": True}, ] } } ) ) def test_repository_is_signed_with_multiple_nodes_false(self) -> None: self.assertFalse( GithubCompliance()._is_signed( { "branchProtectionRules": { "nodes": [ {"requiresCommitSignatures": False}, {"requiresCommitSignatures": False}, ] } } ) ) def test_repository_is_admin_permission(self) -> None: self.assertTrue(GithubCompliance()._is_admin_permission({"teamPermissions": "ADMIN"})) def test_repository_is_not_admin_permission(self) -> None: self.assertFalse(GithubCompliance()._is_admin_permission({"teamPermissions": "WRITE"})) def test_check(self) -> None: audit = Audit( type="github_admin_report.json", report=github_report, ) notifications = GithubCompliance().analyse(audit) self.assertEqual( { findings( account=account("Github", "<https://www.github.com/org/bad-repo-no-signing|bad-repo-no-signing>"), compliance_item_type="github_repository", item="bad-repo-no-signing", findings={ "repository commit signing should be turned on", }, ), findings( account=account("Github", "<https://www.github.com/org/bad-repo-no-admin|bad-repo-no-admin>"), compliance_item_type="github_repository", item="bad-repo-no-admin", findings={ "repository should have admin permissions", }, ), findings( account=account("Github", "<https://www.github.com/org/good-repo|good-repo>"), compliance_item_type="github_repository", item="good-repo", findings=set(), ), }, notifications, )
37.488636
118
0.528342
258
3,299
6.51938
0.248062
0.029132
0.060642
0.067776
0.653389
0.570155
0.469084
0.469084
0.387634
0.30321
0
0
0.36799
3,299
87
119
37.91954
0.806715
0
0
0.311688
0
0.025974
0.209457
0.076387
0
0
0
0
0.090909
1
0.090909
false
0
0.064935
0
0.168831
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a55adb11a28dabc65b9530f9aefde0ae2de0195
1,414
py
Python
vmad/core/tests/test_symbol.py
VMBoehm/vmad
3aeb57a43de10e146756f074cca7f77f210e3e74
[ "BSD-2-Clause" ]
null
null
null
vmad/core/tests/test_symbol.py
VMBoehm/vmad
3aeb57a43de10e146756f074cca7f77f210e3e74
[ "BSD-2-Clause" ]
null
null
null
vmad/core/tests/test_symbol.py
VMBoehm/vmad
3aeb57a43de10e146756f074cca7f77f210e3e74
[ "BSD-2-Clause" ]
null
null
null
from vmad.core.symbol import Symbol from vmad.core.operator import operator from vmad import Builder def test_symbol_eval(): with Builder() as m: a = m.input('a') m.output( b=a.eval(lambda a: len(a)), c=a.eval("len(x)"), ) m.compute(['b', 'c'], init=dict(a=[1, 2, 3])) m.compute_with_vjp(init=dict(a=[1, 2, 3]), v=dict(_b=1.0, _c=1.0)) m.compute_with_jvp(['b', 'c'], init=dict(a=[1, 2, 3]), v=dict(a_=1.0)) def test_symbol_add(): with Builder() as m: a, b = m.input('a', 'b') m.output( c = (a + b) * b / b ** 2 ) c = m.compute('c', init=dict(a=2, b=3)) assert round(c, 4) == round((2 + 3) * 3 / 3 ** 2, 4) def test_subclass_symbol(): class MySymbol(Symbol): @operator class __add__: ain = 'x', 'y' aout = 'z' def apl(self, x, y): return dict(z=x + y) def vjp(self, _z): return dict(_x=_z, _y=_z) def jvp(self, x_, y_): return dict(z_ = x_+ y_) with Builder() as m: a, b = m.input(MySymbol('a'), MySymbol('b')) c = a + b m.output(c=c) m.compute('c', init=dict(a=1, b=2)) m.compute_with_vjp(init=dict(a=1, b=2), v=dict(_c=1.0)) m.compute_with_jvp(['c'], init=dict(a=1, b=2), v=dict(a_=1.0, b_=1.0))
28.28
74
0.478076
237
1,414
2.708861
0.189873
0.070093
0.074766
0.093458
0.490654
0.426791
0.426791
0.305296
0
0
0
0.03903
0.329562
1,414
49
75
28.857143
0.638186
0
0
0.125
0
0
0.014862
0
0
0
0
0
0.025
1
0.15
false
0
0.075
0.075
0.35
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a56b2cd79d7b6aa0572ea8fc5e0e8cb138e713a
5,847
py
Python
packaging/pythonlib/ovirt_engine/ticket.py
emamirazavi/ovirt-engine
8e47035e8a3430550746c7d0d01242c23bacfa64
[ "Apache-2.0" ]
null
null
null
packaging/pythonlib/ovirt_engine/ticket.py
emamirazavi/ovirt-engine
8e47035e8a3430550746c7d0d01242c23bacfa64
[ "Apache-2.0" ]
null
null
null
packaging/pythonlib/ovirt_engine/ticket.py
emamirazavi/ovirt-engine
8e47035e8a3430550746c7d0d01242c23bacfa64
[ "Apache-2.0" ]
1
2021-02-24T05:36:55.000Z
2021-02-24T05:36:55.000Z
import base64 import datetime import json import os from cryptography import x509 from cryptography.exceptions import InvalidSignature from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.asymmetric import utils class TicketEncoder(): @staticmethod def _formatDate(d): return d.strftime("%Y%m%d%H%M%S") def __init__(self, cert, key, lifetime=5): self._lifetime = lifetime with open(cert, 'rb') as cert_file: self._x509 = x509.load_pem_x509_certificate( data=cert_file.read(), backend=default_backend(), ) with open(key, 'rb') as key_file: self._pkey = serialization.load_pem_private_key( key_file.read(), password=None, backend=default_backend(), ) def encode(self, data): d = { 'salt': base64.b64encode(os.urandom(8)).decode('ascii'), 'digest': 'sha1', 'validFrom': self._formatDate(datetime.datetime.utcnow()), 'validTo': self._formatDate( datetime.datetime.utcnow() + datetime.timedelta( seconds=self._lifetime ) ), 'data': data } fields = [] data_to_sign = b'' for k, v in d.items(): fields.append(k) data_to_sign += v.encode('utf-8') d['signedFields'] = ','.join(fields) signature = self._pkey.sign( data_to_sign, # TODO replace PKCS1v15 with PSS if/when we know we do not # need m2crypto compatibility. padding.PKCS1v15(), # TODO Replace SHA1 with SHA256 if/when we know this is safe, # compatibility-wise (also above). hashes.SHA1() ) d['signature'] = base64.b64encode(signature).decode('ascii') d['certificate'] = self._x509.public_bytes( encoding=serialization.Encoding.PEM ).decode('ascii') return base64.b64encode(json.dumps(d).encode('utf-8')) class TicketDecoder(): _peer = None _ca = None @staticmethod def _parseDate(d): return datetime.datetime.strptime(d, '%Y%m%d%H%M%S') @staticmethod def _verifyCertificate(ca, x509cert): try: res = ca.public_key().verify( x509cert.signature, x509cert.tbs_certificate_bytes, padding.PKCS1v15(), x509cert.signature_hash_algorithm, ) if res is not None: raise RuntimeError('Certificate validation failed') except InvalidSignature: raise ValueError('Untrusted certificate') if not ( x509cert.not_valid_before.replace(tzinfo=None) <= datetime.datetime.utcnow() <= x509cert.not_valid_after.replace(tzinfo=None) ): raise ValueError('Certificate expired') def __init__(self, ca, eku, peer=None): self._eku = eku if peer is not None: self._peer = x509.load_pem_x509_certificate( data=peer.encode(), backend=default_backend(), ) if ca is not None: with open(ca, 'rb') as ca_file: self._ca = x509.load_pem_x509_certificate( data=ca_file.read(), backend=default_backend(), ) def decode(self, ticket): decoded = json.loads(base64.b64decode(ticket)) if self._peer is not None: x509cert = self._peer else: x509cert = x509.load_pem_x509_certificate( data=decoded['certificate'].encode('utf8'), backend=default_backend(), ) if self._ca is not None: self._verifyCertificate(self._ca, x509cert) if self._eku is not None: certekus = x509cert.extensions.get_extension_for_oid( x509.oid.ExtensionOID.EXTENDED_KEY_USAGE ).value if self._eku not in (eku.dotted_string for eku in certekus): raise ValueError('Certificate is not authorized for action') signedFields = [s.strip() for s in decoded['signedFields'].split(',')] if len( set(['salt', 'data']) & set(signedFields) ) == 0: raise ValueError('Invalid ticket') pkey = x509cert.public_key() if decoded['digest'] == 'sha1': md = hashes.SHA1() elif decoded['digest'] == 'sha256': # TODO: Not implemented yet md = hashes.SHA256() else: raise RuntimeError('Unknown message digest algorithm') hasher = hashes.Hash(md, backend=default_backend()) for field in signedFields: hasher.update(decoded[field].encode('utf8')) digest = hasher.finalize() try: res = pkey.verify( base64.b64decode(decoded['signature']), digest, padding.PKCS1v15(), utils.Prehashed(md), ) if res is not None: raise RuntimeError('Certificate validation failed') except InvalidSignature: raise ValueError('Invalid ticket signature') if not ( self._parseDate(decoded['validFrom']) <= datetime.datetime.utcnow() <= self._parseDate(decoded['validTo']) ): raise ValueError('Ticket life time expired') return decoded['data'] # vim: expandtab tabstop=4 shiftwidth=4
33.033898
78
0.566444
598
5,847
5.397993
0.289298
0.012392
0.019517
0.039653
0.193309
0.129492
0.05886
0.05886
0.05886
0.05886
0
0.032522
0.332136
5,847
176
79
33.221591
0.79411
0.041389
0
0.184932
0
0
0.079493
0
0
0
0
0.005682
0
1
0.047945
false
0.006849
0.075342
0.013699
0.178082
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a5a3ca613449d86bc3d28990b0ff332dde8fc9f
431
py
Python
tests/speculos/test_pubkey_cmd.py
Cactus-15-49/nano-app
75209c738139d732867a53db0262cc9995122a38
[ "Apache-2.0" ]
3
2022-03-12T17:06:59.000Z
2022-03-14T19:53:26.000Z
tests/speculos/test_pubkey_cmd.py
Cactus-15-49/nano-app
75209c738139d732867a53db0262cc9995122a38
[ "Apache-2.0" ]
null
null
null
tests/speculos/test_pubkey_cmd.py
Cactus-15-49/nano-app
75209c738139d732867a53db0262cc9995122a38
[ "Apache-2.0" ]
null
null
null
def test_get_public_key(cmd): pub_key, chain_code = cmd.get_public_key( bip32_path="m/44'/0'/0'/0/0", display=False ) # type: bytes, bytes assert len(pub_key) == 65 assert len(chain_code) == 32 pub_key2, chain_code2 = cmd.get_public_key( bip32_path="m/44'/1'/0'/0/0", display=False ) # type: bytes, bytes assert len(pub_key2) == 65 assert len(chain_code2) == 32
25.352941
47
0.605568
68
431
3.588235
0.367647
0.040984
0.147541
0.122951
0.557377
0.557377
0.557377
0.557377
0.336066
0.336066
0
0.086957
0.2529
431
16
48
26.9375
0.670807
0.085847
0
0.153846
0
0
0.076726
0
0
0
0
0
0.307692
1
0.076923
false
0
0
0
0.076923
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a5b1f7592ef0c852b31e90013f3c955b785f8d9
813
py
Python
pdf_appx/lp_appx.py
jpelikan71/pdf_approximation
8475084c7139a12cdef62ab9549325a937c311fd
[ "MIT" ]
null
null
null
pdf_appx/lp_appx.py
jpelikan71/pdf_approximation
8475084c7139a12cdef62ab9549325a937c311fd
[ "MIT" ]
null
null
null
pdf_appx/lp_appx.py
jpelikan71/pdf_approximation
8475084c7139a12cdef62ab9549325a937c311fd
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Wed Sep 11 15:04:27 2019 @author: s.paramonov """ import numpy as np from numpy.polynomial import Polynomial, Legendre, polynomial def approx_legendre_poly(Moments): n_moments = Moments.shape[0]-1 exp_coef = (np.zeros((1))) # For method description see, for instance: # Chapter 3 of "The Problem of Moments", James Alexander Shohat, Jacob David Tamarkin for i in range(n_moments+1): p = Legendre.basis(i).convert(window = [0.0,1.0], kind=Polynomial) q = (2*i+1)*np.sum(Moments[0:(i+1)]*p.coef) pq = (p.coef*q) exp_coef = polynomial.polyadd(exp_coef, pq) expansion = Polynomial(exp_coef) return expansion
23.911765
90
0.574416
109
813
4.211009
0.587156
0.061002
0
0
0
0
0
0
0
0
0
0.046181
0.307503
813
33
91
24.636364
0.769094
0.254613
0
0
0
0
0
0
0
0
0
0
0
1
0.083333
false
0
0.166667
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a5d151e58d02ac92f4b2b24082fcaa3344f9927
1,045
py
Python
anytime_models/examples/imagenet-msdense-ann.py
Bhaskers-Blu-Org2/petridishnn
bf800c695a7f0774106968a0fadc5150074269ad
[ "MIT" ]
121
2019-06-04T08:30:53.000Z
2021-12-17T13:27:54.000Z
anytime_models/examples/imagenet-msdense-ann.py
arita37/petridishnn
bf800c695a7f0774106968a0fadc5150074269ad
[ "MIT" ]
1
2019-11-21T04:29:09.000Z
2019-11-21T04:29:09.000Z
anytime_models/examples/imagenet-msdense-ann.py
arita37/petridishnn
bf800c695a7f0774106968a0fadc5150074269ad
[ "MIT" ]
22
2019-10-10T15:35:47.000Z
2021-09-13T12:46:09.000Z
#!/usr/bin/env python # -*- coding: UTF-8 -*- import argparse import anytime_models.models.anytime_network as anytime_network from anytime_models.models.anytime_network import AnytimeMultiScaleDenseNet import ann_app_utils if __name__ == '__main__': parser = argparse.ArgumentParser() parser = ann_app_utils.parser_add_app_arguments(parser) anytime_network.parser_add_msdensenet_arguments(parser) args = parser.parse_args() model_cls = AnytimeMultiScaleDenseNet # Fixed parameter that are only for msdense args.ds_name="ilsvrc" args.num_classes == 1000 args.growth_rate=16 if hasattr(args, 'msdensenet_depth') and args.msdensenet_depth is not None: args.stack = (args.msdensenet_depth - 3) // 5 else: assert hasattr(args, 'block_config') and args.block_config is not None args.prediction_feature='msdense' args.num_scales=4 args.reduction_ratio = 0.5 args.b_type = 'bottleneck' args.s_type = 'imagenet' ann_app_utils.train_or_test_ilsvrc(args, model_cls)
31.666667
79
0.743541
141
1,045
5.191489
0.517731
0.076503
0.045082
0.071038
0.090164
0
0
0
0
0
0
0.013809
0.168421
1,045
32
80
32.65625
0.828539
0.080383
0
0
0
0
0.069937
0
0
0
0
0
0.043478
1
0
false
0
0.173913
0
0.173913
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a5eefb86a0ebda9f3d786d511f657872e76a8cd
4,326
py
Python
src/split_dataset.py
hezbranch/time_series_prediction
9bffc3f279cbfaa3ec0acc937d15610c19e0975e
[ "MIT" ]
1
2020-09-17T20:59:46.000Z
2020-09-17T20:59:46.000Z
src/split_dataset.py
hezbranch/time_series_prediction
9bffc3f279cbfaa3ec0acc937d15610c19e0975e
[ "MIT" ]
null
null
null
src/split_dataset.py
hezbranch/time_series_prediction
9bffc3f279cbfaa3ec0acc937d15610c19e0975e
[ "MIT" ]
null
null
null
# split_dataset.py # Input: --input: (required) a time-series file with one or more columns of # role 'id' # --data_dict: (required) data dictionary for that file # --test_size: (required) fractional size of the test set, expressed as # a number between 0 and 1 # --output_dir: (required) directory where output files are saved # --group_cols: (optional) columns to group by, specified as a # space-separated list # Additionally, a seed used for randomization is hard-coded. # Output: train.csv and test.csv, where grouping is by all specified columns, # or all columns of role 'id' if --group_cols is not specified. import argparse import json import pandas as pd import os import numpy as np import copy from sklearn.model_selection import GroupShuffleSplit class Splitter: def __init__(self, n_splits=1, size=0, random_state=0, cols_to_group=None): self.n_splits = n_splits self.size = size self.cols_to_group = cols_to_group if hasattr(random_state, 'rand'): self.random_state = random_state else: self.random_state = np.random.RandomState(int(random_state)) def make_groups_from_df(self, data_df): grp = data_df[self.cols_to_group] grp = [' '.join(row) for row in grp.astype(str).values] return grp def split(self, X, y=None, groups=None): gss1 = GroupShuffleSplit(random_state=copy.deepcopy(self.random_state), test_size=self.size, n_splits=self.n_splits) for tr_inds, te_inds in gss1.split(X, y=y, groups=groups): yield tr_inds, te_inds def get_n_splits(self, X, y=None, groups=None): return self.n_splits def split_dataframe_by_keys(data_df=None, size=0, random_state=0, cols_to_group=None): gss1 = Splitter(n_splits=1, size=size, random_state=random_state, cols_to_group=cols_to_group) for a, b in gss1.split(df, groups=gss1.make_groups_from_df(data_df)): train_df = df.iloc[a].copy() test_df = df.iloc[b].copy() return train_df, test_df if __name__ == '__main__': # Parse command line arguments parser = argparse.ArgumentParser() parser.add_argument('--input', required=True) parser.add_argument('--data_dict', required=True) parser.add_argument('--test_size', required=True, type=float) parser.add_argument('--output_dir', default=None) parser.add_argument('--train_csv_filename', default='train.csv') parser.add_argument('--test_csv_filename', default='test.csv') parser.add_argument('--output_data_dict_filename', required=False, type=str, default=None) parser.add_argument('--group_cols', nargs='*', default=[None]) parser.add_argument('--random_state', required=False, type=int, default=20190206) args = parser.parse_args() # Import data df = pd.read_csv(args.input) data_dict = json.load(open(args.data_dict)) # Split dataset if len(args.group_cols) == 0 or args.group_cols[0] is not None: group_cols = args.group_cols elif args.group_cols[0] is None: try: fields = data_dict['fields'] except KeyError: fields = data_dict['schema']['fields'] group_cols = [c['name'] for c in fields if c['role'] in ('id', 'key') and c['name'] in df.columns] train_df, test_df = split_dataframe_by_keys( df, cols_to_group=group_cols, size=args.test_size, random_state=args.random_state) # Write split data frames to CSV fdir_train_test = args.output_dir if fdir_train_test is not None: if not os.path.exists(fdir_train_test): os.mkdir(fdir_train_test) args.train_csv_filename = os.path.join(fdir_train_test, args.train_csv_filename) args.test_csv_filename = os.path.join(fdir_train_test, args.test_csv_filename) if args.output_data_dict_filename is not None: args.output_data_dict_filename = os.path.join(fdir_train_test, args.output_data_dict_filename) train_df.to_csv(args.train_csv_filename, index=False) test_df.to_csv(args.test_csv_filename, index=False) if args.output_data_dict_filename is not None: with open(args.output_data_dict_filename, 'w') as f: json.dump(data_dict, f, indent=4)
41.596154
124
0.684466
647
4,326
4.319938
0.24575
0.055098
0.054741
0.047227
0.226834
0.132737
0.102683
0.089088
0.076565
0
0
0.007301
0.208507
4,326
103
125
42
0.808995
0.176838
0
0.028571
0
0
0.056449
0.007621
0
0
0
0
0
1
0.071429
false
0
0.1
0.014286
0.228571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a6051d14fd3b2b8f305807fdee8e6efc574891b
38,179
py
Python
setup/gg-upgrade-4-1-to-4-2-1.py
GluuFederation/gluu-gateway-setup
1f42a49d3fdf3dc264b4ab55bfd033f07ef6aa88
[ "Apache-2.0" ]
2
2020-09-28T12:42:54.000Z
2020-12-23T17:51:35.000Z
setup/gg-upgrade-4-1-to-4-2-1.py
GluuFederation/gluu-gateway-setup
1f42a49d3fdf3dc264b4ab55bfd033f07ef6aa88
[ "Apache-2.0" ]
15
2019-12-12T10:12:38.000Z
2020-09-25T06:27:51.000Z
setup/gg-upgrade-4-1-to-4-2-1.py
GluuFederation/gluu-gateway-setup
1f42a49d3fdf3dc264b4ab55bfd033f07ef6aa88
[ "Apache-2.0" ]
3
2019-12-26T16:34:37.000Z
2020-09-24T12:24:04.000Z
#!/usr/bin/python import subprocess import traceback import time import os import sys import socket import random import string import shutil import requests import json import getpass import urllib3 import platform import pwd import glob try: import distro except: print ('Unable to find `distro` package hence using `platform`') distro = platform class Distribution: Ubuntu = "ubuntu" Debian = "debian" CENTOS = "centos" RHEL = "red" class KongSetup(object): def __init__(self): self.host_name = '' self.ip = '' self.cert_folder = './certs' self.template_folder = './templates' self.output_folder = './output' self.system_folder = './system' self.tmp_folder = '/tmp' self.log_error = 'gluu-gateway-setup_error.log' self.log = 'gluu-gateway-setup.log' self.cmd_mkdir = '/bin/mkdir' self.openssl_command = '/usr/bin/openssl' self.cmd_chown = '/bin/chown' self.cmd_chmod = '/bin/chmod' self.cmd_ln = '/bin/ln' self.host_name = '/bin/hostname' self.cmd_touch = '/bin/touch' self.cmd_mv = '/bin/mv' self.cmd_cp = '/bin/cp' self.cmd_rm = '/bin/rm' self.cmd_node = '/usr/bin/node' self.cmd_update_rs_d = '/usr/sbin/update-rc.d' self.cmd_sh = '/bin/sh' self.cmd_bash = '/bin/bash' self.cmd_update_alternatives = 'update-alternatives' self.cmd_chkconfig = 'chkconfig' self.cmd_alternatives = 'alternatives' self.cmd_echo = '/bin/echo' self.cmd_service = 'service' self.cmd_systemctl = os.popen('which systemctl').read().strip() self.cmd_rpm = '/bin/rpm' self.cmd_echo = '/bin/echo' self.cmd_dpkg = '/usr/bin/dpkg' self.cmd_kong = '/usr/local/bin/kong' self.country_code = '' self.state = '' self.city = '' self.org_name = '' self.admin_email = '' self.kong_custom_plugins = 'gluu-oauth-auth,gluu-uma-auth,gluu-uma-pep,gluu-oauth-pep,gluu-metrics,gluu-openid-connect,gluu-opa-pep' self.kong_ssl_cert = '' self.kong_ssl_key = '' self.pg_pwd = 'admin' self.kong_admin_listen_ssl_port = '8445' self.kong_admin_listen_port = '8001' self.kong_lua_ssl_trusted_certificate = '' self.kong_lua_ssl_verify_depth = 3 self.gluu_prometheus_server_ip = '104.131.17.150' self.gluu_prometheus_server_host = 'license.gluu.org' self.gluu_customer_registration_endpoint = 'https://%s:%s' % (self.gluu_prometheus_server_host, '4040/metrics/registration') self.dist_kong_config_folder = '/etc/kong' self.dist_kong_config_file = '%s/kong.conf' % self.dist_kong_config_folder self.dist_lua_folder = '/usr/local/share/lua/5.1' self.dist_gluu_lua_folder = '%s/gluu' % self.dist_lua_folder self.dist_kong_plugins_folder = '%s/kong/plugins' % self.dist_lua_folder self.opt_folder = '/opt' self.dist_gluu_gateway_folder = '%s/gluu-gateway' % self.opt_folder self.dist_gluu_gateway_ui_folder = '%s/gluu-gateway-ui' % self.opt_folder self.dist_gluu_gateway_setup_folder = '%s/gluu-gateway-setup' % self.opt_folder self.dist_gluu_gateway_ui_assest_folder = '%s/assets' % self.dist_gluu_gateway_ui_folder self.dist_gluu_gateway_ui_config_folder = '%s/config' % self.dist_gluu_gateway_ui_folder self.dist_gluu_gateway_ui_config_file = '%s/config/local.js' % self.dist_gluu_gateway_ui_folder self.gg_plugins_folder = '%s/lib/kong/plugins' % self.dist_gluu_gateway_folder self.disable_plugin_list = ['ldap-auth', 'key-auth', 'basic-auth', 'jwt', 'oauth2', 'hmac-auth'] self.gg_comman_folder = '%s/lib/gluu' % self.dist_gluu_gateway_folder self.dist_oxd_server_folder = '%s/oxd-server' % self.opt_folder self.dist_oxd_server_config_folder = '%s/conf' % self.dist_oxd_server_folder self.dist_oxd_server_config_file = '%s/oxd-server.yml' % self.dist_oxd_server_config_folder self.gg_service = 'gluu-gateway' self.oxd_server_service = 'oxd-server' # change this when oxd-server-4.0 is released # oxd kong Property values self.gluu_gateway_ui_port = '1338' self.gluu_gateway_ui_policy_type = 'uma_rpt_policy' self.gluu_gateway_ui_oxd_id = '' self.gluu_gateway_ui_op_host = '' self.gluu_gateway_ui_client_id = '' self.gluu_gateway_ui_client_secret = '' self.gluu_gateway_ui_oxd_web = '' self.gluu_gateway_ui_kong_admin_web_url = 'http://localhost:%s' % self.kong_admin_listen_port self.gluu_gateway_ui_oxd_version = '4.2.2' self.gg_version = '4.2.2' self.postgres_version = '10.x' # oxd licence configuration self.generate_client = True self.gluu_gateway_ui_redirect_uri = 'localhost' # JRE setup properties self.jre_version = '162' self.jre_destination_path = '/opt/jdk1.8.0_%s' % self.jre_version self.gg_dist_folder = '%s/dist' % self.dist_gluu_gateway_folder self.gg_dist_app_folder = '%s/app' % self.gg_dist_folder self.jre_home = '/opt/jre' self.jre_sh_file_name = 'jre-gluu.sh' self.is_prompt = True self.license = False self.init_parameters_from_json_argument() # OS types properties self.os_types = ['centos', 'red', 'ubuntu'] self.os_type = None self.os_version = None self.os_initdaemon = None # log-rotate kong config file self.dist_kong_log_rotate_config_path = '/etc/logrotate.d' self.kong_log_rotate_config_file = 'kong_logrotate' # PostgreSQL config file path self.dist_pg_hba_config_path = '/var/lib/pgsql/10/data' self.dist_pg_hba_config_file = '%s/pg_hba.conf' % self.dist_pg_hba_config_path # dependency zips self.gg_node_modules_folder = "%s/node_modules" % self.dist_gluu_gateway_ui_folder self.gg_bower_modules_folder = "%s/bower_components" % self.dist_gluu_gateway_ui_assest_folder self.gg_node_modules_archive = 'gg_node_modules.tar.gz' self.gg_bower_modules_archive = 'gg_bower_components.tar.gz' # third party lua library self.oxd_web_lua_file_path = '%s/third-party/oxd-web-lua/oxdweb.lua' % self.dist_gluu_gateway_folder self.json_logic_file_path = '%s/third-party/json-logic-lua/logic.lua' % self.dist_gluu_gateway_folder self.lrucache_files_path = '%s/third-party/lua-resty-lrucache/lib/resty' % self.dist_gluu_gateway_folder self.lsession_files_path = '%s/third-party/lua-resty-session/lib/resty' % self.dist_gluu_gateway_folder self.jwt_files_path = '%s/third-party/lua-resty-jwt/lib/resty/.' % self.dist_gluu_gateway_folder self.hmac_files_path = '%s/third-party/lua-resty-hmac/lib/resty/.' % self.dist_gluu_gateway_folder self.prometheus_file_path = '%s/third-party/nginx-lua-prometheus/prometheus.lua' % self.dist_gluu_gateway_folder # kong package file names self.ubuntu18_kong_file = "kong-2.2.1.bionic.amd64.deb" self.centos7_kong_file = "kong-2.2.1.el7.amd64.rpm" self.rhel7_kong_file = "kong-2.2.1.rhel7.amd64.rpm" # db names self.dist_konga_db_file = "%s/templates/konga.sql" % self.dist_gluu_gateway_setup_folder self.dist_kong_db_file = "%s/templates/kong.sql" % self.dist_gluu_gateway_setup_folder def init_parameters_from_json_argument(self): if len(sys.argv) > 1: self.is_prompt = False data = json.loads(sys.argv[1]) self.license = data['license'] self.ip = data['ip'] self.host_name = data['host_name'] self.country_code = data['country_code'] self.state = data['state'] self.city = data['city'] self.org_name = data['org_name'] self.admin_email = data['admin_email'] self.pg_pwd = data['pg_pwd'] self.gluu_gateway_ui_redirect_uri = data['gluu_gateway_ui_redirect_uri'] self.gluu_gateway_ui_op_host = 'https://' + data['gluu_gateway_ui_op_host'] self.gluu_gateway_ui_oxd_web = data['gluu_gateway_ui_oxd_web'] self.generate_client = data['generate_client'] if not self.generate_client: self.gluu_gateway_ui_oxd_id = data['gluu_gateway_ui_oxd_id'] self.gluu_gateway_ui_client_id = data['gluu_gateway_ui_client_id'] self.gluu_gateway_ui_client_secret = data['gluu_gateway_ui_client_secret'] def configure_postgres(self): print ("Configuring postgres...") self.log_it('Configuring postgres...') print ('Configuring postgres...') if self.os_type == Distribution.Ubuntu: self.run(['/etc/init.d/postgresql', 'start']) os.system('sudo -iu postgres /bin/bash -c "psql -c \\\"ALTER USER postgres WITH PASSWORD \'%s\';\\\""' % self.pg_pwd) os.system('sudo -iu postgres /bin/bash -c "psql -U postgres -tc \\\"SELECT 1 FROM pg_database WHERE datname = \'kong\'\\\" | grep -q 1 || psql -U postgres -c \\\"CREATE DATABASE kong;\\\""') os.system('sudo -iu postgres /bin/bash -c "psql -U postgres -tc \\\"SELECT 1 FROM pg_database WHERE datname = \'konga\'\\\" | grep -q 1 || psql -U postgres -c \\\"CREATE DATABASE konga;\\\""') os.system('sudo -iu postgres /bin/bash -c "psql konga < %s"' % self.dist_konga_db_file) os.system('sudo -iu postgres /bin/bash -c "psql kong < %s"' % self.dist_kong_db_file) if self.os_type in [Distribution.CENTOS, Distribution.RHEL] and self.os_version == '7': # Initialize PostgreSQL first time self.run([self.cmd_ln, '/usr/lib/systemd/system/postgresql-10.service', '/usr/lib/systemd/system/postgresql.service']) self.run(['/usr/pgsql-10/bin/postgresql-10-setup', 'initdb']) self.render_template_in_out(self.dist_pg_hba_config_file, self.template_folder, self.dist_pg_hba_config_path) self.run([self.cmd_systemctl, 'enable', 'postgresql']) self.run([self.cmd_systemctl, 'start', 'postgresql']) os.system('sudo -iu postgres /bin/bash -c "psql -c \\\"ALTER USER postgres WITH PASSWORD \'%s\';\\\""' % self.pg_pwd) os.system('sudo -iu postgres /bin/bash -c "psql -U postgres -tc \\\"SELECT 1 FROM pg_database WHERE datname = \'kong\'\\\" | grep -q 1 || psql -U postgres -c \\\"CREATE DATABASE kong;\\\""') os.system('sudo -iu postgres /bin/bash -c "psql -U postgres -tc \\\"SELECT 1 FROM pg_database WHERE datname = \'konga\'\\\" | grep -q 1 || psql -U postgres -c \\\"CREATE DATABASE konga;\\\""') os.system('sudo -iu postgres /bin/bash -c "psql konga < %s"' % self.dist_konga_db_file) os.system('sudo -iu postgres /bin/bash -c "psql kong < %s"' % self.dist_kong_db_file) def enable_service_at_start(self, serviceName, startSequence=None, stopSequence=None, action='enable'): # Enable service autoload on Gluu-Server startup if self.os_type in [Distribution.CENTOS, Distribution.RHEL]: if self.os_initdaemon == 'systemd': self.run([self.cmd_systemctl, action, serviceName]) else: self.run(["/sbin/chkconfig", serviceName, "on" if action=='enable' else 'off']) elif self.os_type+self.os_version in ('ubuntu18','debian9'): self.run([self.cmd_systemctl, action, serviceName]) elif self.os_type in [Distribution.Ubuntu, Distribution.Debian]: cmd_list = ["/usr/sbin/update-rc.d", serviceName, 'defaults'] if startSequence and stopSequence: cmd_list.append(str(startSequence)) cmd_list.append(str(stopSequence)) self.run(cmd_list) def detect_host_name(self): detected_host_name = None try: detected_host_name = socket.gethostbyaddr(socket.gethostname())[0] except: try: detected_host_name = os.popen("/bin/hostname").read().strip() except: self.log_it("No detected hostname", True) self.log_it(traceback.format_exc(), True) return detected_host_name def gen_cert(self, service_name, password, user='root', cn=None): self.log_it('Generating Certificate for %s' % service_name) key_with_password = '%s/%s.key.orig' % (self.cert_folder, service_name) key = '%s/%s.key' % (self.cert_folder, service_name) csr = '%s/%s.csr' % (self.cert_folder, service_name) public_certificate = '%s/%s.crt' % (self.cert_folder, service_name) self.run([self.openssl_command, 'genrsa', '-des3', '-out', key_with_password, '-passout', 'pass:%s' % password, '2048' ]) self.run([self.openssl_command, 'rsa', '-in', key_with_password, '-passin', 'pass:%s' % password, '-out', key ]) cert_cn = cn if cert_cn == None: cert_cn = self.host_name self.run([self.openssl_command, 'req', '-new', '-key', key, '-out', csr, '-subj', '/C=%s/ST=%s/L=%s/O=%s/CN=%s/emailAddress=%s' % ( self.country_code, self.state, self.city, self.org_name, cert_cn, self.admin_email) ]) self.run([self.openssl_command, 'x509', '-req', '-days', '365', '-in', csr, '-signkey', key, '-out', public_certificate ]) self.run([self.cmd_chown, '%s:%s' % (user, user), key_with_password]) self.run([self.cmd_chmod, '700', key_with_password]) self.run([self.cmd_chown, '%s:%s' % (user, user), key]) self.run([self.cmd_chmod, '700', key]) def get_pw(self, size=12, chars=string.ascii_uppercase + string.digits + string.ascii_lowercase): return ''.join(random.choice(chars) for _ in range(size)) def gen_kong_ssl_certificate(self): self.gen_cert('gluu-gateway', self.get_pw()) self.kong_ssl_cert = self.dist_gluu_gateway_setup_folder + '/certs/gluu-gateway.crt' self.kong_ssl_key = self.dist_gluu_gateway_setup_folder + '/certs/gluu-gateway.key' def get_ip(self): test_ip = None detected_ip = None try: test_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) detected_ip = [(test_socket.connect(('8.8.8.8', 80)), test_socket.getsockname()[0], test_socket.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1] except: self.log_it("No detected IP address", True) self.log_it(traceback.format_exc(), True) if detected_ip: test_ip = self.get_prompt("Enter IP Address", detected_ip) else: test_ip = self.get_prompt("Enter IP Address") if not self.is_ip(test_ip): test_ip = None print ('ERROR: The IP Address is invalid. Try again\n') return test_ip def get_prompt(self, prompt, default_value=None): try: if default_value: user_input = input("%s [%s] : " % (prompt, default_value)).strip() if user_input == '': return default_value else: return user_input else: _input = False while not _input: user_input = input("%s : " % prompt).strip() if user_input != '': _input = True return user_input except KeyboardInterrupt: sys.exit() except: return None def install_plugins(self): print ("Installing kong plugins...") self.log_it('Installing kong plugins...') # json-logic-lua self.run([self.cmd_mkdir, '-p', '%s/rucciva' % self.dist_lua_folder]) self.run([self.cmd_cp, self.json_logic_file_path, '%s/rucciva/json_logic.lua' % self.dist_lua_folder]) # lua-resty-lrucache self.run([self.cmd_cp, '-R', '%s/lrucache' % self.lrucache_files_path, '%s/resty' % self.dist_lua_folder]) self.run([self.cmd_cp, '%s/lrucache.lua' % self.lrucache_files_path, '%s/resty' % self.dist_lua_folder]) # lua-resty-session self.run([self.cmd_cp, '-R', '%s/session' % self.lsession_files_path, '%s/resty' % self.dist_lua_folder]) self.run([self.cmd_cp, '%s/session.lua' % self.lsession_files_path, '%s/resty' % self.dist_lua_folder]) # lua-resty-jwt self.run([self.cmd_cp, '-a', self.jwt_files_path, '%s/resty' % self.dist_lua_folder]) # lua-resty-hmac self.run([self.cmd_cp, '-a', self.hmac_files_path, '%s/resty' % self.dist_lua_folder]) # Prometheus self.run([self.cmd_cp, self.prometheus_file_path, self.dist_lua_folder]) # gluu plugins for plugin in self.kong_custom_plugins.split(","): self.run([self.cmd_cp, '-R', "%s/%s" % (self.gg_plugins_folder, plugin), self.dist_kong_plugins_folder]) # gluu plugins common file self.run([self.cmd_cp, '-R', '%s' % self.gg_comman_folder, self.dist_lua_folder]) # oxd-web-lua self.run([self.cmd_cp, self.oxd_web_lua_file_path, self.dist_gluu_lua_folder]) # Disable kong stock auth plugins for plugin in self.disable_plugin_list: self.run([self.cmd_cp, '-R', '%s/disable-plugin-handler.lua' % self.gg_comman_folder, "%s/%s/handler.lua" % (self.dist_kong_plugins_folder, plugin)]) if plugin == "ldap-auth": continue self.run([self.cmd_rm, '-rf', '%s/%s/migrations' % (self.dist_kong_plugins_folder, plugin)]) self.run([self.cmd_rm, '-R', '%s/%s/daos.lua' % (self.dist_kong_plugins_folder, plugin)]) def install_jre(self): self.log_it("Installing server JRE 1.8 %s..." % self.jre_version) jre_archive = 'server-jre-8u%s-linux-x64.tar.gz' % self.jre_version try: self.log_it("Extracting %s into %s/%s" % (jre_archive, self.gg_dist_app_folder, jre_archive)) self.run(['tar', '-xzf', '%s/%s' % (self.gg_dist_app_folder, jre_archive), '-C', '/opt/', '--no-xattrs', '--no-same-owner', '--no-same-permissions']) except: self.log_it("Error encountered while extracting archive %s" % jre_archive) self.log_it(traceback.format_exc(), True) self.run([self.cmd_ln, '-sf', self.jre_destination_path, self.jre_home]) self.run([self.cmd_chmod, '-R', '755', '%s/bin/' % self.jre_destination_path]) with open('/etc/environment', 'a') as f: f.write('JAVA_HOME=/opt/jre') if self.os_type in [Distribution.Ubuntu, Distribution.Debian]: self.run([self.cmd_update_alternatives, '--install', '/usr/bin/java', 'java', '%s/bin/java' % (self.jre_home), '1']) elif self.os_type in [Distribution.CENTOS, Distribution.RHEL]: self.run([self.cmd_alternatives, '--install', '/usr/bin/java', 'java', '%s/bin/java' % (self.jre_home), '1']) def config_gluu_gateway_ui(self): self.log_it('Installing gluu_gateway_ui node packages...') print ('Installing gluu_gateway_ui node packages...') if not os.path.exists(self.cmd_node): self.run([self.cmd_ln, '-s', '`which nodejs`', self.cmd_node]) try: self.run([self.cmd_mkdir, '-p', self.gg_node_modules_folder]) self.log_it("Extracting %s into %s" % (self.gg_node_modules_archive, self.gg_node_modules_folder)) self.run(['tar', '--strip', '1', '-xzf', '%s/%s' % (self.gg_dist_folder, self.gg_node_modules_archive), '-C', self.gg_node_modules_folder, '--no-xattrs', '--no-same-owner', '--no-same-permissions']) except: self.log_it("Error encountered while extracting archive %s" % self.gg_node_modules_archive) self.log_it(traceback.format_exc(), True) try: self.run([self.cmd_mkdir, '-p', self.gg_bower_modules_folder]) self.log_it("Extracting %s into %s" % (self.gg_bower_modules_archive, self.gg_bower_modules_folder)) self.run(['tar', '--strip', '1', '-xzf', '%s/%s' % (self.gg_dist_folder, self.gg_bower_modules_archive), '-C', self.gg_bower_modules_folder, '--no-xattrs', '--no-same-owner', '--no-same-permissions']) except: self.log_it("Error encountered while extracting archive %s" % self.gg_bower_modules_archive) self.log_it(traceback.format_exc(), True) if self.generate_client: msg = 'Creating OXD OP client for Gluu Gateway GUI used to call oxd-server endpoints...' self.log_it(msg) print (msg) oxd_registration_endpoint = self.gluu_gateway_ui_oxd_web + '/register-site' redirect_uri = 'https://' + self.gluu_gateway_ui_redirect_uri + ':' + self.gluu_gateway_ui_port payload = { 'op_host': self.gluu_gateway_ui_op_host, 'redirect_uris': [redirect_uri], 'post_logout_redirect_uris': [redirect_uri], 'scope': ['openid', 'oxd', 'permission', 'username'], 'grant_types': ['authorization_code', 'client_credentials'], 'client_name': 'KONGA_GG_UI_CLIENT' } oxd_registration_response = self.http_post_call(oxd_registration_endpoint, payload) self.gluu_gateway_ui_oxd_id = oxd_registration_response['oxd_id'] self.gluu_gateway_ui_client_secret = oxd_registration_response['client_secret'] self.gluu_gateway_ui_client_id = oxd_registration_response['client_id'] # Render gluu_gateway_ui property self.run([self.cmd_touch, os.path.split(self.dist_gluu_gateway_ui_config_file)[-1]], self.dist_gluu_gateway_ui_config_folder, os.environ.copy(), True) self.render_template_in_out(self.dist_gluu_gateway_ui_config_file, self.template_folder, self.dist_gluu_gateway_ui_config_folder) # konga db migration self.run(['npm', 'run', 'db-migrate', 'postgres://postgres:%s@localhost:5432/konga' % self.pg_pwd], self.dist_gluu_gateway_ui_folder, os.environ.copy(), True) def is_ip(self, address): try: socket.inet_aton(address) return True except socket.error: return False def log_it(self, msg, error_log=False): if error_log: f = open(self.log_error, 'a') f.write('%s %s\n' % (time.strftime('%X %x'), msg)) f.close() f = open(self.log, 'a') f.write('%s %s\n' % (time.strftime('%X %x'), msg)) f.close() def make_boolean(self, c): if c in ['t', 'T', 'y', 'Y']: return True if c in ['f', 'F', 'n', 'N']: return False self.log_it("make_boolean: invalid value for true|false: " + c, True) def make_folders(self): try: self.run([self.cmd_mkdir, '-p', self.cert_folder]) self.run([self.cmd_mkdir, '-p', self.output_folder]) except: self.log_it("Error making folders", True) self.log_it(traceback.format_exc(), True) def prompt_for_properties(self): # Certificate configuration self.ip = self.get_ip() self.host_name = self.get_prompt('Enter Hostname', self.detect_host_name()) print ('The next few questions are used to generate the Kong self-signed HTTPS certificate') self.country_code = self.get_prompt('Enter two letter Country Code') self.state = self.get_prompt('Enter two letter State Code') self.city = self.get_prompt('Enter your city or locality') self.org_name = self.get_prompt('Enter Organization Name') self.admin_email = self.get_prompt('Enter Email Address') # Postgres configuration msg = """ If you already have a postgres user and database in the Postgres DB, then enter existing password, otherwise enter new password: """ print (msg) pg = self.get_pw() self.pg_pwd = getpass.getpass(prompt='Password [%s] : ' % pg) or pg # We are going to ask for 'OP host_name' regardless of whether we're installing oxd or not self.gluu_gateway_ui_op_host = 'https://' + self.get_prompt('OP Server Host') # Konga Configuration msg = """ The next few questions are used to configure Konga. If you are connecting to an existing oxd server from other the network, make sure it's available from this server.""" print (msg) self.gluu_gateway_ui_oxd_web = self.get_prompt('Enter your existing OXD server URL') self.generate_client = self.make_boolean(self.get_prompt("Generate client credentials to call oxd-server API's? (y - generate, n - enter existing client credentials manually)", 'y')) if not self.generate_client: self.gluu_gateway_ui_oxd_id = self.get_prompt('OXD Id') self.gluu_gateway_ui_client_id = self.get_prompt('Client Id') self.gluu_gateway_ui_client_secret = self.get_prompt('Client Secret') def install_config_kong(self): # Install Kong kong_package_file = '' install_kong_cmd = [] if self.os_type == Distribution.Ubuntu and self.os_version == '18': kong_package_file = "%s/%s" % (self.gg_dist_app_folder, self.ubuntu18_kong_file) install_kong_cmd = [self.cmd_dpkg, '--install', kong_package_file] if self.os_type == Distribution.CENTOS and self.os_version == '7': kong_package_file = "%s/%s" % (self.gg_dist_app_folder, self.centos7_kong_file) install_kong_cmd = [self.cmd_rpm, '--install', '--verbose', '--hash', kong_package_file] if self.os_type == Distribution.RHEL and self.os_version == '7': kong_package_file = "%s/%s" % (self.gg_dist_app_folder, self.rhel7_kong_file) install_kong_cmd = [self.cmd_rpm, '--install', '--verbose', '--hash', kong_package_file] if not os.path.exists(kong_package_file): self.log_it("%s is not found" % kong_package_file) sys.exit(0) self.run(install_kong_cmd) if self.os_type == Distribution.Ubuntu and self.os_version in ['18']: self.kong_lua_ssl_trusted_certificate = "/etc/ssl/certs/ca-certificates.crt" if self.os_type in [Distribution.CENTOS, Distribution.RHEL]: self.kong_lua_ssl_trusted_certificate = "/etc/ssl/certs/ca-bundle.crt" self.render_template_in_out(self.dist_kong_config_file, self.template_folder, self.dist_kong_config_folder) def render_template_in_out(self, file_path, template_folder, output_folder): self.log_it("Rendering template %s" % file_path) fn = os.path.split(file_path)[-1] f = open(os.path.join(template_folder, fn)) template_text = f.read() f.close() newFn = open(os.path.join(output_folder, fn), 'w+') newFn.write(template_text % self.__dict__) newFn.close() def run(self, args, cwd=None, env=None, use_wait=False, shell=False): self.log_it('Running: %s' % ' '.join(args)) try: p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env, shell=shell) if use_wait: code = p.wait() self.log_it('Run: %s with result code: %d' % (' '.join(args), code)) else: output, err = p.communicate() if output: self.log_it(output) if err: self.log_it(err, True) except: self.log_it("Error running command : %s" % " ".join(args), True) self.log_it(traceback.format_exc(), True) def migrate_kong(self): print ("Migrating kong db...") self.log_it("Migrating kong db...") self.run([self.cmd_kong, "migrations", "up"]) self.run([self.cmd_kong, "migrations", "finish"]) def start_gg_service(self): print ("Starting %s..." % self.gg_service) self.log_it("Starting %s..." % self.gg_service) if self.os_type == Distribution.Ubuntu and self.os_version in ['18']: self.run([self.cmd_systemctl, 'stop', self.gg_service]) self.run([self.cmd_systemctl, 'start', self.gg_service]) self.run([self.cmd_systemctl, 'enable', self.gg_service]) elif self.os_type in [Distribution.CENTOS, Distribution.RHEL]: self.run([self.cmd_systemctl, 'stop', self.gg_service]) self.run([self.cmd_systemctl, 'start', self.gg_service]) self.run([self.cmd_systemctl, 'enable', self.gg_service]) def copy_file(self, in_file, dest_folder): try: shutil.copy(in_file, dest_folder) self.log_it("Copied %s to %s" % (in_file, dest_folder)) except: self.log_it("Error copying %s to %s" % (in_file, dest_folder), True) self.log_it(traceback.format_exc(), True) def disable_warnings(self): if self.os_type in [Distribution.Ubuntu, Distribution.CENTOS, Distribution.RHEL, Distribution.Debian]: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) def choose_from_list(self, list_of_choices, choice_name="item", default_choice_index=0): return_value = None choice_map = {} chosen_index = 0 print ("\nSelect the number for the %s from the following list:" % choice_name) for choice in list_of_choices: choice_map[chosen_index] = choice chosen_index += 1 print (" [%i] %s" % (chosen_index, choice)) while not return_value: choice_number = self.get_prompt("Please select a number listed above", str(default_choice_index + 1)) try: choice_number = int(choice_number) - 1 if (choice_number >= 0) & (choice_number < len(list_of_choices)): return_value = choice_map[choice_number] else: print ('"%i" is not a valid choice' % (choice_number + 1)) except: print ('Cannot convert "%s" to a number' % choice_number) self.log_it(traceback.format_exc(), True) return return_value def detect_os_type(self): try: p = distro.linux_distribution() self.os_type = p[0].split()[0].lower() self.os_version = p[1].split('.')[0] except: self.os_type, self.os_version = self.choose_from_list(self.os_types, "Operating System") self.log_it('OS Type: %s OS Version: %s' % (self.os_type, self.os_version)) def detect_initd(self): self.os_initdaemon = open(os.path.join('/proc/1/status'), 'r').read().split()[1] def http_post_call(self, endpoint, payload): response = None try: response = requests.post(endpoint, data=json.dumps(payload), headers={'content-type': 'application/json'}, verify=False) response_json = json.loads(response.text) if response.ok: return response_json else: message = """Error: Failed Not Ok Endpoint: %s Payload %s Response %s Response_Json %s Please check logs.""" % (endpoint, payload, response, response_json) self.exit(message) except requests.exceptions.HTTPError as e: message = """Error: Failed Http Error: Endpoint: %s Payload %s Response %s Error %s Please check logs.""" % (endpoint, payload, response, e) self.exit(message) except requests.exceptions.ConnectionError as e: message = """Error: Failed to Connect: Endpoint: %s Payload %s Response %s Error %s Please check logs.""" % (endpoint, payload, response, e) self.exit(message) except requests.exceptions.RequestException as e: message = """Error: Failed Something Else: Endpoint %s Payload %s Response %s Error %s Please check logs.""" % (endpoint, payload, response, e) self.exit(message) def http_get_call(self, endpoint): response = None try: response = requests.get(endpoint, headers={'content-type': 'application/json'}, verify=False) response_json = json.loads(response.text) return response_json except requests.exceptions.HTTPError as e: message = """Error: Failed Http Error: Endpoint: %s Response %s Error %s Please check logs.""" % (endpoint, response, e) self.exit(message) except requests.exceptions.ConnectionError as e: message = """Error: Failed to Connect: Endpoint: %s Response %s Error %s Please check logs.""" % (endpoint, response, e) self.exit(message) except requests.exceptions.RequestException as e: message = """Error: Failed Something Else: Endpoint %s Response %s Error %s Please check logs.""" % (endpoint, response, e) self.exit(message) def exit(self, message): print (message) self.log_it(message, True) sys.exit() def configure_kong_rotate(self): self.log_it("Configuring log rotate for kong") self.run([self.cmd_cp, '%s/%s' % (self.template_folder, self.kong_log_rotate_config_file), "%s/kong" % self.dist_kong_log_rotate_config_path]) def check_root(self): try: user = pwd.getpwuid(os.getuid()).pw_name print (user) if user != "root": msg="Your user is not root user, Run setup script in root user." print (msg) self.log_it(msg, True) sys.exit() except Exception as err: self.log_it("Failed to execute `pwd.getpwuid(os.getuid()).pw_name` %s " % err, True) if __name__ == "__main__": kongSetup = KongSetup() kongSetup.check_root() try: if kongSetup.is_prompt: kongSetup.license = kongSetup.make_boolean(kongSetup.get_prompt('Do you acknowledge that use of the Gluu Gateway is under the Apache 2.0 License? (y|N)', 'N')) print ("") if kongSetup.license: kongSetup.make_folders() if kongSetup.is_prompt: kongSetup.prompt_for_properties() print ("\n") print ("-----------------------".ljust(30) + "-----------------------".rjust(35) + "\n") cnf = 'Host'.ljust(30) + kongSetup.host_name.rjust(35) + "\n" \ + 'Organization'.ljust(30) + kongSetup.org_name.rjust(35) + "\n" \ + 'City'.ljust(30) + kongSetup.city.rjust(35) + "\n" \ + 'State'.ljust(30) + kongSetup.state.rjust(35) + "\n" \ + 'Country'.ljust(30) + kongSetup.country_code.rjust(35) + "\n" \ + 'OXD Server URL'.ljust(30) + kongSetup.gluu_gateway_ui_oxd_web.rjust(35) + "\n" \ + 'OP Host'.ljust(30) + kongSetup.gluu_gateway_ui_op_host.rjust(35) + "\n" if not kongSetup.generate_client: cnf += 'OXD Id'.ljust(30) + kongSetup.gluu_gateway_ui_oxd_id.rjust(35) + "\n" \ + 'Client Id'.ljust(30) + kongSetup.gluu_gateway_ui_client_id.rjust(35) + "\n" \ + 'Client Secret'.ljust(30) + kongSetup.gluu_gateway_ui_client_secret.rjust(35) + "\n" else: cnf += 'Generate Client Credentials?'.ljust(30) + repr(kongSetup.generate_client).rjust(35) + "\n" print (cnf) kongSetup.log_it(cnf) if kongSetup.is_prompt: proceed = kongSetup.make_boolean(kongSetup.get_prompt('Proceed with these values (Y|n)', 'Y')) else: proceed = True if proceed: kongSetup.detect_os_type() kongSetup.detect_initd() kongSetup.disable_warnings() kongSetup.gen_kong_ssl_certificate() kongSetup.install_jre() kongSetup.configure_kong_rotate() kongSetup.configure_postgres() kongSetup.install_config_kong() kongSetup.install_plugins() kongSetup.migrate_kong() kongSetup.config_gluu_gateway_ui() kongSetup.start_gg_service() print ("\n\nGluu Gateway configuration is successful!!! https://localhost:%s\n\n" % kongSetup.gluu_gateway_ui_port) else: print ("Exit") else: print ("Exit") except: kongSetup.log_it("***** Error caught in main loop *****", True) kongSetup.log_it(traceback.format_exc(), True) print ("Installation failed. See: \n %s \n %s \nfor more details." % (kongSetup.log, kongSetup.log_error))
46.673594
212
0.60219
4,911
38,179
4.433109
0.115659
0.043452
0.035827
0.027651
0.510679
0.425061
0.348216
0.271324
0.228791
0.193239
0
0.007744
0.269441
38,179
817
213
46.730722
0.772802
0.019828
0
0.270864
0
0.014641
0.221711
0.039903
0
0
0
0
0
1
0.048316
false
0.021962
0.02489
0.001464
0.102489
0.039531
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a61e6daf57b68c86ecf15ea45aa691e68573af2
124,628
py
Python
src/smalltrain/model/nn_model.py
Gauthams1/smalltrain
ac833d58ff2b577277079633da1b20eb50b8d332
[ "MIT" ]
32
2020-11-01T13:16:24.000Z
2022-03-12T03:09:57.000Z
src/smalltrain/model/nn_model.py
Gauthams1/smalltrain
ac833d58ff2b577277079633da1b20eb50b8d332
[ "MIT" ]
12
2020-11-02T08:48:53.000Z
2022-03-12T00:51:05.000Z
src/smalltrain/model/nn_model.py
Gauthams1/smalltrain
ac833d58ff2b577277079633da1b20eb50b8d332
[ "MIT" ]
9
2020-11-07T07:14:59.000Z
2021-11-21T04:54:59.000Z
from dateutil.parser import parse as parse_datetime from datetime import timezone from datetime import timedelta from datetime import datetime import time import pandas as pd import numpy as np import math import csv import sys import os import json class ExtendedJSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.ndarray): return obj.tolist() if hasattr(obj, 'to_dict'): return obj.to_dict() if isinstance(obj, (datetime)): return obj.isoformat() if isinstance(obj, (np.int32, np.int64)): return str(obj) if isinstance(obj, (np.float32, np.float64)): return str(obj) return json.JSONEncoder.default(self, obj) # import tensorflow as tf import tensorflow.compat.v1 as tf tf.disable_eager_execution() import tensorflow_hub as hub import tensorflow as tf_v2 import smalltrain as st from smalltrain.data_set.ts_data_set import TSDataSet from smalltrain.utils import hash_array import ggutils.gif_util as gif_util import ggutils.s3_access as s3_access item_id_col = 1 shop_id_col = 2 year_col = 6 month_col = 7 from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops # TODO test dual_leaky_relu # def test_dual_leaky_relu(): # x = tf.constant([-2.0, -1.0, 0.0, 1.0, 6.0, 8.0], dtype=tf.float32) # y = dual_leaky_relu(x, alpha=0.1, beta=0.9, intercept=6.0, name=None) # sess = tf.Session() # with sess.as_default(): # or `with sess:` to close on exit # x_eval = sess.run(x) # y_eval = sess.run(y) # # print('x_eval:{}'.format(x_eval)) # print('y_eval:{}'.format(y_eval)) # class NNModel: MODEL_ID_DNN = 'DNN' MODEL_ID = MODEL_ID_DNN DEFAULT_WEIGHT_STDDEV = 0.1 DEFAULT_BIAS_VALUE = 0.1 # act functionn @staticmethod def dual_leaky_relu(features, alpha=0.1, beta=0.9, intercept=6.0, name=None): with ops.name_scope(name, "DualLeakyRelu6", [features, alpha, beta]) as name: features = ops.convert_to_tensor(features, name="features") if features.dtype.is_integer: features = math_ops.to_float(features) alpha = ops.convert_to_tensor(alpha, dtype=features.dtype, name="alpha") beta = ops.convert_to_tensor(beta, dtype=features.dtype, name="beta") maximum_line = math_ops.maximum(alpha * features, features) return math_ops.minimum(beta * features + intercept, maximum_line) @staticmethod def dual_leaky_relu6(features, alpha=0.1, beta=0.9, thres=6.0, name=None): with ops.name_scope(name, "DualLeakyRelu6", [features, alpha, beta]) as name: features = ops.convert_to_tensor(features, name="features") if features.dtype.is_integer: features = math_ops.to_float(features) alpha = ops.convert_to_tensor(alpha, dtype=features.dtype, name="alpha") beta = ops.convert_to_tensor(beta, dtype=features.dtype, name="beta") maximum_line = math_ops.maximum(alpha * features, features) return math_ops.minimum(beta * (features - thres) + thres, maximum_line) # default act_func DEFAULT_ACT_FUNC_KEY = "relu" AVAILAVLE_ACT_FUNC_DICT = {} AVAILAVLE_ACT_FUNC_DICT['relu'] = tf.nn.relu AVAILAVLE_ACT_FUNC_DICT['relu6'] = tf.nn.relu6 AVAILAVLE_ACT_FUNC_DICT['dual_leaky_relu'] = dual_leaky_relu AVAILAVLE_ACT_FUNC_DICT['dual_leaky_relu6'] = dual_leaky_relu6 # default batch normarization parameters DEFAULT_BN_DECAY = 0.90 DEFAULT_BN_ESP = 1e-5 DEFAULT_DECREASE_RESOLUTION_RATIO = 2 # TODO integrate this constant with ts_data_set.py # oprimizers AVAILABLE_OPTIMIZER_LIST = ['AdamOptimizer', 'AdaBound'] DEFAULT_OPTIMIZER = 'AdamOptimizer' def __init__(self): return # set class variables with hparams # About minibatch operation def set_evaluate_in_minibatch(self, hparams): self.evaluate_in_minibatch = False if hparams and 'evaluate_in_minibatch' in hparams.keys(): try: print('Try to use evaluate_in_minibatch in hparams:{}'.format(hparams['evaluate_in_minibatch'])) self.evaluate_in_minibatch = bool(hparams['evaluate_in_minibatch']) except AssertionError as e: self.evaluate_in_minibatch = False print('Use evaluate_in_minibatch with default value:{} because of error: {}'.format(self.evaluate_in_minibatch, e)) else: print('Use evaluate_in_minibatch with default value:{}'.format(self.evaluate_in_minibatch)) # about sub model def set_hparams_on_sub_model(self, hparams): self.sub_model_url = None if hparams and 'sub_model_url' in hparams.keys(): try: print('Use sub_model_url in hparams:{}'.format(hparams['sub_model_url'])) self.sub_model_url = hparams['sub_model_url'] # TODO check model url # if self.sub_model_url is not None and len(self.sub_model_url) > 0: # _module = hub.Module(self.sub_model_url) # expected_image_height, expected_image_width = hub.get_expected_image_size(_module) # print('Checking self.sub_model_url: {} with expected_image_height: {} expected_image_width: {}'.format(self.sub_model_url, expected_image_height, expected_image_width)) # assert expected_image_height > 0 and expected_image_width > 0 except AssertionError as e: self.sub_model_url = None print('Use sub_model_url with default value:{} because of error: {}'.format(self.sub_model_url, e)) except tensorflow.python.framework.errors_impl.NotFoundError as e: print('Warning tensorflow.python.framework.errors_impl.NotFoundError: {}'.format(self.sub_model_url, e)) else: print('Use sub_model_url with default value:{}'.format(self.sub_model_url)) DEFAULT_SUB_MODEL_ALLOCATION = 0.0 self.sub_model_allocation = DEFAULT_SUB_MODEL_ALLOCATION if hparams and 'sub_model_allocation' in hparams.keys(): print('Use sub_model_allocation in hparams:{}'.format(hparams['sub_model_allocation'])) self.sub_model_allocation = hparams['sub_model_allocation'] try: self.sub_model_allocation = float(self.sub_model_allocation) assert (self.sub_model_allocation >= 0) and (self.sub_model_allocation <= 1.0) except AssertionError as e: self.sub_model_allocation = DEFAULT_SUB_MODEL_ALLOCATION print('Use sub_model_allocation with default value:{} because of error:{}'.format( self.sub_model_allocation, e)) else: print('Use sub_model_allocation with default value:{}'.format(self.sub_model_allocation)) self.sub_model_input_point = None if hparams and 'sub_model_input_point' in hparams.keys(): print('Use sub_model_input_point in hparams:{}'.format(hparams['sub_model_input_point'])) self.sub_model_input_point = hparams['sub_model_input_point'] else: print('Use sub_model_input_point with default value:{}'.format(self.sub_model_input_point)) self.sub_model_output_point = None if hparams and 'sub_model_output_point' in hparams.keys(): print('Use sub_model_output_point in hparams:{}'.format(hparams['sub_model_output_point'])) self.sub_model_output_point = hparams['sub_model_output_point'] else: print('Use sub_model_output_point with default value:{}'.format(self.sub_model_output_point)) if self.sub_model_url is not None and self.sub_model_allocation > 0: try: assert (len(self.sub_model_input_point) > 0) assert (len(self.sub_model_output_point) > 0) except (TypeError, AssertionError) as e: print('Can not use sub model because of error: {}'.format(e)) self.sub_model_url = None self.sub_model_allocation = DEFAULT_SUB_MODEL_ALLOCATION self.sub_model_input_point = None self.sub_model_output_point = None # Abount ONNX export def set_export_to_onnx(self, hparams): self.export_to_onnx = False if hparams and 'export_to_onnx' in hparams.keys(): try: print('Try to use export_to_onnx in hparams:{}'.format(hparams['export_to_onnx'])) self.export_to_onnx = bool(hparams['export_to_onnx']) except AssertionError as e: self.export_to_onnx = False print('Use export_to_onnx with default value:{} because of error: {}'.format(self.export_to_onnx, e)) else: print('Use export_to_onnx with default value:{}'.format(self.export_to_onnx)) # def construct_model(self, log_dir_path, model_id=None, train_data=None, debug_mode=True, prediction_mode=False, hparams=None): def construct_model(self, log_dir_path=None, hparams=None): ''' Construct model with settig parameters. NN model is to be defined and restored after this function called. :param hparams: :param log_dir_path: :return: ''' PREFIX = 'construct_model' print('{}__init__'.format(PREFIX)) self.set_params(hparams=hparams, log_dir_path=log_dir_path) # Set parameters from dataset or not has_to_generate_data_set = True if self.hparams.get('set_model_parameter_from_dataset'): last_time = time.time() self.auto_set_model_parameter() message = '---------- time:{} DONE auto_set_model_parameter'.format(time.time() - last_time) print(message) has_to_generate_data_set = False # Generate dataset has_to_generate_data_set = has_to_generate_data_set and (not self.hparams.get('prediction_mode')) message = 'has_to_generate_data_set: {}'.format(has_to_generate_data_set) print(message) if has_to_generate_data_set: self.generate_data_set() return self def construct_and_prepare_model(self, log_dir_path, model_id=None, train_data=None, debug_mode=True, prediction_mode=False, hparams=None): self.construct_model(log_dir_path=log_dir_path, hparams=hparams) self.prepare_model() return self def set_params(self, log_dir_path, model_id=None, train_data=None, debug_mode=True, prediction_mode=False, hparams=None): # input_past_days = 30 # input_day_from = 60 PREFIX = '[NNModel]' print('{}__init__'.format(PREFIX)) self.debug_mode = debug_mode # update by hparams self.hparams = hparams self.trainable_variables = None self.model_type = 'CLASSIFICATION' if hparams and 'model_type' in hparams.keys(): print('{}Use model_type in hparams:{}'.format(PREFIX, hparams['model_type'])) self.model_type = hparams['model_type'] else: print('{}TODO Use ts_start with default value:{}'.format(PREFIX, self.model_type)) self.prediction_mode = prediction_mode # about optimizer self.optimizer = 'AdamOptimizer' # Default Optimizer if hparams and 'optimizer' in hparams.keys(): print('{}Use optimizer in hparams:{}'.format(PREFIX, hparams['optimizer'])) self.optimizer = hparams['optimizer'] if self.optimizer is None or self.optimizer not in NNModel.AVAILABLE_OPTIMIZER_LIST: self.optimizer = NNModel.DEFAULT_OPTIMIZER print('{}Use optimizer with default value:{}'.format(PREFIX, self.optimizer)) self.l1_norm = 0 # whether add l1_norm_reg or not self.add_l1_norm_reg = False if hparams and 'add_l1_norm_reg' in hparams.keys(): print('{}Use add_l1_norm_reg in hparams:{}'.format(PREFIX, hparams['add_l1_norm_reg'])) self.add_l1_norm_reg = hparams['add_l1_norm_reg'] if self.add_l1_norm_reg is None: self.add_l1_norm_reg = False # preactivation regularization self.preactivation_regularization_value = 0.0 self.add_preactivation_regularization = False if hparams and 'add_preactivation_regularization' in hparams.keys(): print('{}Use add_preactivation_regularization in hparams:{}'.format(PREFIX, hparams['add_preactivation_regularization'])) self.add_preactivation_regularization = hparams['add_preactivation_regularization'] if self.add_preactivation_regularization is None: self.add_preactivation_regularization = False self.preactivation_regularization_value_ratio = 0.0 if hparams and 'preactivation_regularization_value_ratio' in hparams.keys(): print('{}Use preactivation_regularization_value_ratio in hparams:{}'.format(PREFIX, hparams['preactivation_regularization_value_ratio'])) self.preactivation_regularization_value_ratio = hparams['preactivation_regularization_value_ratio'] try: self.preactivation_regularization_value_ratio = np.float32(self.preactivation_regularization_value_ratio) except ValueError: self.preactivation_regularization_value_ratio = 0.0 print('{}Use preactivation_regularization_value_ratio with default value:{}'.format(PREFIX, self.preactivation_regularization_value_ratio)) else: print('{}Use preactivation_regularization_value_ratio with default value:{}'.format(PREFIX, self.preactivation_regularization_value_ratio)) # self.preactivation_maxout_list = [300.0, 200.0, 54.0, 18.0, 6.0, 18.0, 54.0, 200.0, 300.0, 300.0, 300.0] self.preactivation_maxout_list = None if hparams and 'preactivation_maxout_list' in hparams.keys(): print('{}Use preactivation_maxout_list in hparams:{}'.format(PREFIX, hparams['preactivation_maxout_list'])) self.preactivation_maxout_list = hparams['preactivation_maxout_list'] try: assert len(self.preactivation_maxout_list) > 0 except (AssertionError, TypeError): self.preactivation_maxout_list = None print('{}Use preactivation_maxout_list with default value:{}'.format(PREFIX, self.preactivation_maxout_list)) else: print('{}Use preactivation_maxout_list with default value:{}'.format(PREFIX, self.preactivation_maxout_list)) self.train_data = train_data # Set col_size from # 1. hparams.get('col_size') # 2. data_set.col_size self.col_size = hparams.get('col_size') if self.col_size is None: try: self.col_size = self.data_set.col_size except AttributeError: self.col_size = None # update by hparams # (For compatibility with ver0.1.1 ```input_ts_size``` and ver0.1.2 ```input_ts_width``` ) if hparams and 'input_ts_size' in hparams.keys(): print('{}Use input_ts_size in hparams:{}'.format(PREFIX, hparams['input_ts_size'])) self.input_ts_size = hparams['input_ts_size'] else: print('{}TODO Use input_ts_size with default value'.format(PREFIX)) self.input_ts_width = self.input_ts_size # (For compatibility with ver0.1.1 ```input_ts_size``` and ver0.1.2 ```input_ts_width``` ) if hparams and 'input_ts_width' in hparams.keys(): print('{}Use input_ts_width in hparams:{}'.format(PREFIX, hparams['input_ts_width'])) self.input_ts_width = hparams['input_ts_width'] else: print('{}TODO Use input_ts_width with default value'.format(PREFIX)) if self.input_ts_width is None: self.input_ts_width = self.input_ts_size # (For compatibility with ver0.1.1 ```input_ts_size``` and ver0.1.2 ```input_ts_width``` ) print('{}Use input_ts_width same as input_ts_size:{}'.format(PREFIX, self.input_ts_width)) self.input_width = self.input_ts_width if hparams and 'n_layer' in hparams.keys(): print('{}Use n_layer in hparams:{}'.format(PREFIX, hparams['n_layer'])) self.n_layer = hparams['n_layer'] else: print('{}TODO Use n_layer with default value'.format(PREFIX)) self.filter_width = 5 if hparams and 'filter_width' in hparams.keys(): print('{}Use filter_width in hparams:{}'.format(PREFIX, hparams['filter_width'])) self.filter_width = hparams['filter_width'] else: print('{}Use filter_width with default value:{}'.format(PREFIX, self.filter_width)) self.cnn_channel_size = 4 if hparams and 'cnn_channel_size' in hparams.keys(): print('{}Use cnn_channel_size in hparams:{}'.format(PREFIX, hparams['cnn_channel_size'])) self.cnn_channel_size = hparams['cnn_channel_size'] else: print('{}TODO Use cnn_channel_size with default value'.format(PREFIX)) self.cnn_channel_size_list = None if hparams and 'cnn_channel_size_list' in hparams.keys(): print('{}Use cnn_channel_size_list in hparams:{}'.format(PREFIX, hparams['cnn_channel_size_list'])) self.cnn_channel_size_list = hparams['cnn_channel_size_list'] else: print('{}Use cnn_channel_size with default value:{}'.format(PREFIX, self.cnn_channel_size_list)) self.pool_size_list = None if hparams and 'pool_size_list' in hparams.keys(): print('{}Use pool_size_list in hparams:{}'.format(PREFIX, hparams['pool_size_list'])) self.pool_size_list = hparams['pool_size_list'] if self.pool_size_list is None: self.pool_size_list = np.ones([self.n_layer], dtype="int32") self.pool_size_list[0:1] = 2 print('{}Use pool_size_list with default value:{}'.format(PREFIX, self.pool_size_list)) self.act_func_list = None if hparams and 'act_func_list' in hparams.keys(): print('{}Use act_func_list in hparams:{}'.format(PREFIX, hparams['act_func_list'])) self.act_func_list = hparams['act_func_list'] if self.act_func_list is None: self.act_func_list = np.repeat(NNModel.DEFAULT_ACT_FUNC_KEY, [self.n_layer - 1]) print('{}Use act_func_list with default value:{}'.format(PREFIX, self.act_func_list)) self.act_func_ref_list = self.set_act_func_ref_list(self.act_func_list, self.n_layer) print('{}act_func_ref_list is set :{}'.format(PREFIX, self.act_func_ref_list)) # About minibatch operation self.set_evaluate_in_minibatch(hparams) # About sub model self.set_hparams_on_sub_model(hparams) # Abount ONNX export self.set_export_to_onnx(hparams) self.test_only_mode = False if hparams and 'test_only_mode' in hparams.keys(): print('{}Use test_only_mode in hparams:{}'.format(PREFIX, hparams['test_only_mode'])) self.test_only_mode = hparams['test_only_mode'] else: print('{}TODO Use test_only_mode with default value:{}'.format(PREFIX, self.test_only_mode)) # whether has ResNet or not self.has_res_net = False if hparams and 'has_res_net' in hparams.keys(): print('{}Use has_res_net in hparams:{}'.format(PREFIX, hparams['has_res_net'])) self.has_res_net = hparams['has_res_net'] else: print('{}Use has_res_net with default value:{}'.format(PREFIX, self.has_res_net)) # about batch normalization self.has_batch_norm = True if hparams and 'has_batch_norm' in hparams.keys(): print('{}Use has_batch_norm in hparams:{}'.format(PREFIX, hparams['has_batch_norm'])) self.has_batch_norm = hparams['has_batch_norm'] else: print('{}TODO Use has_batch_norm with default value:{}'.format(PREFIX, self.has_batch_norm)) if self.has_batch_norm: self.bn_decay = NNModel.DEFAULT_BN_DECAY if hparams and 'bn_decay' in hparams.keys(): print('{}Use bn_decay in hparams:{}'.format(PREFIX, hparams['bn_decay'])) self.bn_decay = hparams['bn_decay'] else: print('{}TODO Use bn_decay with default value:{}'.format(PREFIX, self.bn_decay)) self.bn_eps = NNModel.DEFAULT_BN_ESP if hparams and 'bn_eps' in hparams.keys(): print('{}Use bn_eps in hparams:{}'.format(PREFIX, hparams['bn_eps'])) self.bn_eps = hparams['bn_eps'] else: print('{}TODO Use bn_eps with default value:{}'.format(PREFIX, self.bn_eps)) self.annotation_col_names = None if hparams and 'annotation_col_names' in hparams.keys(): print('{}Use annotation_col_names in hparams:{}'.format(PREFIX, hparams['annotation_col_names'])) self.annotation_col_names = hparams['annotation_col_names'] self.annotation_col_size = 0 if self.annotation_col_names is not None: self.annotation_col_size = len(self.annotation_col_names) # about mask_rate self.mask_rate = None if hparams and 'mask_rate' in hparams.keys(): print('{}Use mask_rate in hparams:{}'.format(PREFIX, hparams['mask_rate'])) self.mask_rate = hparams['mask_rate'] if self.mask_rate is not None: try: self.mask_rate = float(self.mask_rate) except ValueError: print('{}mask_rate is not float type. reset with None'.format(PREFIX)) self.mask_rate = None # output_data_names if hparams and 'output_data_names' in hparams.keys(): print('{}Use output_data_names in hparams:{}'.format(PREFIX, hparams['output_data_names'])) self.output_data_names = hparams['output_data_names'] if self.output_data_names is not None: try: if not isinstance(self.output_data_names, list): raise ValueError print('output_data_names size:{}'.format(len(self.output_data_names))) except ValueError: print('{}output_data_names is not list type. reset with None'.format(PREFIX)) self.output_data_names = None self.restore_var_name_list = None if hparams and 'restore_var_name_list' in hparams.keys(): print('{}Use restore_var_name_list in hparams:{}'.format(PREFIX, hparams['restore_var_name_list'])) self.restore_var_name_list = hparams['restore_var_name_list'] self.untrainable_var_name_list = None if hparams and 'untrainable_var_name_list' in hparams.keys(): print('{}Use untrainable_var_name_list in hparams:{}'.format(PREFIX, hparams['untrainable_var_name_list'])) self.untrainable_var_name_list = hparams['untrainable_var_name_list'] # plot settings self.plot_x_label = None if hparams and 'plot_x_label' in hparams.keys(): print('{}Use plot_x_label in hparams:{}'.format(PREFIX, hparams['plot_x_label'])) self.plot_x_label = hparams['plot_x_label'] self.plot_y_label = None if hparams and 'plot_y_label' in hparams.keys(): print('{}Use plot_y_label in hparams:{}'.format(PREFIX, hparams['plot_y_label'])) self.plot_y_label = hparams['plot_y_label'] self.plot_x_data_name_in_annotation = None if hparams and 'plot_x_data_name_in_annotation' in hparams.keys(): print('{}Use plot_x_data_name_in_annotation in hparams:{}'.format(PREFIX, hparams['plot_x_data_name_in_annotation'])) self.plot_x_data_name_in_annotation = hparams['plot_x_data_name_in_annotation'] self.plot_group_data_name_in_annotation = None if hparams and 'plot_group_data_name_in_annotation' in hparams.keys(): print('{}Use plot_group_data_name_in_annotation in hparams:{}'.format(PREFIX, hparams['plot_group_data_name_in_annotation'])) self.plot_group_data_name_in_annotation = hparams['plot_group_data_name_in_annotation'] self.plot_x_range = None if hparams and 'plot_x_range' in hparams.keys(): print('{}Use plot_x_range in hparams:{}'.format(PREFIX, hparams['plot_x_range'])) self.plot_x_range = hparams['plot_x_range'] self.plot_y_range = None if hparams and 'plot_y_range' in hparams.keys(): print('{}Use plot_y_range in hparams:{}'.format(PREFIX, hparams['plot_y_range'])) self.plot_y_range = hparams['plot_y_range'] self.plot_title = None if hparams and 'plot_title' in hparams.keys(): print('{}Use plot_title in hparams:{}'.format(PREFIX, hparams['plot_title'])) self.plot_title = hparams['plot_title'] self.plot_errors = None if hparams and 'plot_errors' in hparams.keys(): print('{}Use plot_errors in hparams:{}'.format(PREFIX, hparams['plot_errors'])) self.plot_errors = hparams['plot_errors'] self.plot_animation = False if hparams and 'plot_animation' in hparams.keys(): print('{}Use plot_animation in hparams:{}'.format(PREFIX, hparams['plot_animation'])) self.plot_animation = hparams['plot_animation'] if self.plot_animation is None: self.plot_animation = False print('{}Use plot_animation with default value:{}'.format(PREFIX, self.plot_animation)) self.calc_cc_errors = False if hparams and 'calc_cc_errors' in hparams.keys(): print('{}Use calc_cc_errors in hparams:{}'.format(PREFIX, hparams['calc_cc_errors'])) self.calc_cc_errors = hparams['calc_cc_errors'] if self.calc_cc_errors is None: self.calc_cc_errors = False print('{}Use calc_cc_errors with default value:{}'.format(PREFIX, self.calc_cc_errors)) self.op_errors = None if hparams and 'op_errors' in hparams.keys(): print('{}Use op_errors in hparams:{}'.format(PREFIX, hparams['op_errors'])) self.op_errors = hparams['op_errors'] # rank_boundary_list self.rank_boundary_list = None if hparams and 'rank_boundary_list' in hparams.keys(): print('{}Use rank_boundary_list in hparams:{}'.format(PREFIX, hparams['rank_boundary_list'])) self.rank_boundary_list = hparams['rank_boundary_list'] if self.rank_boundary_list is not None: # check the members of rank_boundary_list len_of_rank_boundary_list = len(self.rank_boundary_list) if len_of_rank_boundary_list < 1: self.rank_boundary_list = None for rank_boundary in self.rank_boundary_list: try: assert len(rank_boundary) > 1 lower = rank_boundary[0] upper = rank_boundary[1] print('{}rank_boundary lower:{}, func:{}'.format(PREFIX, lower, upper)) except Exception as e: print('{}No rank_boundary_list is set because of error {} on invalid parameter:{}'.format(PREFIX, e, rank_boundary)) else: print('{}No rank_boundary_list is set'.format(PREFIX)) # cloud settings self.cloud_root = None if hparams and 'cloud_root' in hparams.keys(): print('{}Use cloud_root in hparams:{}'.format(PREFIX, hparams['cloud_root'])) self.cloud_root = hparams['cloud_root'] self.prioritize_cloud = False if hparams and 'prioritize_cloud' in hparams.keys(): print('{}Use prioritize_cloud in hparams:{}'.format(PREFIX, hparams['prioritize_cloud'])) self.prioritize_cloud = hparams['prioritize_cloud'] if self.prioritize_cloud is None: self.prioritize_cloud = False print('{}Use prioritize_cloud with default value:{}'.format(PREFIX, self.prioritize_cloud)) # local setting self.save_root_dir = '/var/tensorflow/tsp/' if hparams and 'save_root_dir' in hparams.keys(): print('{}Use save_root_dir in hparams:{}'.format(PREFIX, hparams['save_root_dir'])) self.save_root_dir = hparams['save_root_dir'] else: print('{}TODO Use save_root_dir with default value'.format(PREFIX)) # check init model self.sess = tf.InteractiveSession() self.init_model_path = None if hparams and 'init_model_path' in hparams.keys(): print('{}Use init_model_path in hparams:{}'.format(PREFIX, hparams['init_model_path'])) self.init_model_path = hparams['init_model_path'] # set output_classes in CLASSIFICATION model self.output_classes = None if hparams and 'output_classes' in hparams.keys(): print('{}Use output_classes in hparams:{}'.format(PREFIX, hparams['output_classes'])) self.output_classes = hparams['output_classes'] # if output_classes is not set in CLASSIFICATION model, try to read from init_model_path if self.init_model_path is not None and self.model_type == 'CLASSIFICATION': self.output_classes = self.get_output_classes_from_model(self.init_model_path) hparams['output_classes'] = self.output_classes self.log_dir_path = log_dir_path def prepare_model(self): last_time = time.time() self.result_sum = [] self.sess = tf.InteractiveSession() self.define_model() print('---------- time:{} DONE define_model'.format(time.time() - last_time)) last_time = time.time() self.saver = tf.train.Saver(var_list=None, max_to_keep=None) self.global_iter = 0 self.sess.run(tf.global_variables_initializer()) self.restore_model() print('---------- time:{} DONE init model'.format(time.time() - last_time)) last_time = time.time() def auto_set_model_parameter(self): print('TODO auto_set_model_parameter') self.can_not_generate_input_output_data = None self.generate_data_set() self.input_width = self.data_set.input_ts_width self.col_size = self.data_set.col_size self.output_classes = self.data_set.output_classes # info_dim_size_list = [] print('DONE auto_set_model_parameter') return True def generate_data_set(self): self.data_set = TSDataSet(debug_mode=self.debug_mode, prediction_mode=self.prediction_mode, hparams=self.hparams) self.data_set.generate_input_output_data() def restore_model(self): ''' Class method to restore model (No need to set args, use hparams to restore model) :return: ''' # restore model if self.init_model_path is not None: print('[restore_model]restore model from {}'.format(self.init_model_path)) has_restored = self.restore(self.init_model_path, self.restore_var_name_list) print('[restore_model]has_restored:', has_restored) # if it has not been restored, then the model will be initialized with Prob dist. else: print('[restore_model]init_model_path is empty. No need to restore') # Set optimizer again when trainable_variables is changed if self.untrainable_var_name_list is not None: self.trainable_variables = self.remove_trainable(self.untrainable_var_name_list) self.set_optimizer() def restore(self, init_model_path, var_name_list=None): from smalltrain.model.operation import is_s3_path, download_to_local, upload_to_cloud if init_model_path is None or len(init_model_path) < 1 or os.path.isfile(init_model_path): print('[restore]init_model_path is empty. No need to restore') return False if var_name_list is not None: trainable_variables = self.get_trainable_variables() var_name_list_to_check = [ name if (len(name.split(':')) > 1 and name.split(':')[1] == '0') else '{}:0'.format(name) for name in var_name_list] var_to_restore = [var for var in trainable_variables if (var.name in var_name_list_to_check)] print('var_name_list:{}, var_to_load:{}'.format(var_name_list, var_to_restore)) else: var_to_restore = None self.saver = tf.train.Saver(var_list=var_to_restore, max_to_keep=None) # Initialize all variables print('[restore]Initialize all variables') self.sess.run(tf.global_variables_initializer()) # Restore by saver print('[restore]Restore from init_model_path:{}'.format(init_model_path)) local_init_model_path = init_model_path if self.prioritize_cloud: # download from S3 if the "init_model_path" is S3 path if is_s3_path(init_model_path): _paths, _global_iter_got_from_path = get_tf_model_file_paths(init_model_path) for _path in _paths: local_init_model_path = download_to_local(path=_path, work_dir_path='/var/tmp/tsp/') local_init_model_path = local_init_model_path.split('.ckpt')[0] + '.ckpt' if _global_iter_got_from_path is not None: local_init_model_path = local_init_model_path + '-' + str(_global_iter_got_from_path) else: print('[restore]Restore from local:{}'.format(init_model_path)) print('[restore]Restore from local_init_model_path:{}'.format(local_init_model_path)) if local_init_model_path is None or len(local_init_model_path) < 1 or os.path.isfile(local_init_model_path): print('[restore]local_init_model_path is empty. Can not restore') return False self.saver.restore(self.sess, local_init_model_path) print('[restore]Set var_name_list untrainable') # Reset saver in other to save all variables self.saver = tf.train.Saver(var_list=None, max_to_keep=None) return True def remove_trainable(self, var_name_list, current_trainable_variables=None): if current_trainable_variables is None: current_trainable_variables = self.get_trainable_variables() print('[remove_trainable]remove from current_trainable_variables: {}'.format(current_trainable_variables)) var_name_list_to_check = [ name if (len(name.split(':')) > 1 and name.split(':')[1] == '0') else '{}:0'.format(name) for name in var_name_list] print('[remove_trainable]remove var_name_list_to_check: {}'.format(var_name_list_to_check)) trainable_variables = [var for var in current_trainable_variables if (var.name not in var_name_list_to_check)] print('[remove_trainable]trainable_variables: {}'.format(current_trainable_variables)) return trainable_variables def get_trainable_variables(self): all_collection_keys = tf.get_default_graph().get_all_collection_keys() # print('all_collection_keys:{}'.format(all_collection_keys)) trainable_variables = tf.get_default_graph().get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES) # print('trainable_variables:{}'.format(trainable_variables)) return trainable_variables def get_output_classes_from_model(self, init_model_path): from smalltrain.model.operation import is_s3_path, download_to_local, upload_to_cloud print('[get_output_classes_from_model]Restore from init_model_path:{}'.format(init_model_path)) local_init_model_path = init_model_path if self.prioritize_cloud: # download from S3 if the "init_model_path" is S3 path if is_s3_path(init_model_path): _paths, _global_iter_got_from_path = get_tf_model_file_paths(init_model_path) for _path in _paths: local_init_model_path = download_to_local(path=_path, work_dir_path='/var/tmp/tsp/') local_init_model_path = local_init_model_path.split('.ckpt')[0] + '.ckpt' if _global_iter_got_from_path is not None: local_init_model_path = local_init_model_path + '-' + str(_global_iter_got_from_path) else: print('[get_output_classes_from_model]Check local:{}'.format(init_model_path)) print('[get_output_classes_from_model]Check local_init_model_path:{}'.format(local_init_model_path)) if local_init_model_path is None or len(local_init_model_path) < 1 or os.path.isfile(local_init_model_path): print('[get_output_classes_from_model]local_init_model_path is empty. output_classes set None') self.output_classes = None return None meta_file_path = '{}.meta'.format(local_init_model_path) _saver = tf.train.import_meta_graph(meta_file_path) _saver.restore(self.sess, local_init_model_path) # get output_classes from last layer b_fc2 shape _variables = tf.get_default_graph().get_collection_ref(tf.GraphKeys.VARIABLES) print(_variables) bias_before_output_layer_name = 'model/fc/b_fc2/b_fc2:0' b_fc2 = tf.get_default_graph().get_tensor_by_name(bias_before_output_layer_name) # Reset the graph to restore after model construction tf.reset_default_graph() self.output_classes = int(b_fc2.shape[0]) # have to cast from string to integer return self.output_classes def reload_setting(self, setting_file_path=None): # usage: reload_hyper_param = nn_model_ins.reload_setting() try: if setting_file_path is None: setting_file_path = self.hparams['setting_file_path'] assert os.path.isfile(setting_file_path) reload_hparams_ins = st.Hyperparameters(hparams=None, setting_file_path=setting_file_path) reload_hyper_param = reload_hparams_ins.get_as_dict() return reload_hyper_param except AssertionError as e: print('Could not reload setting with error:{}'.format(e)) return None def read_learning_rate_from_setting_file(self, setting_file_path=None): # usage: nn_model_ins.read_learning_rate_from_setting_file() # TODO make it possible to set update_learning_rate_frequency with hyper parameter DEFAULT_UPDATE_LERNING_FREQUENCY = 100 update_learning_rate_frequency = DEFAULT_UPDATE_LERNING_FREQUENCY is_iter_to_update_learning_rate = ( self.global_iter % update_learning_rate_frequency == (update_learning_rate_frequency - 1)) # print('self.global_iter:{}, is_iter_to_update_learning_rate:{}'.format(self.global_iter, is_iter_to_update_learning_rate)) if not is_iter_to_update_learning_rate: return None reload_hyper_param = self.reload_setting(setting_file_path) try: new_learning_rate = float(reload_hyper_param['learning_rate']) assert isinstance(new_learning_rate, float) print('new_learning_rate:{}'.format(new_learning_rate)) return new_learning_rate except AssertionError as e: print('Could not update learning_rate with error:{}'.format(e)) return None def train(self, iter_to=10000, learning_rate=1e-4, batch_size=128, dropout_ratio=0.5, l1_norm_reg_ratio=0.0, save_file_path=None, report_dir_path=None): from smalltrain.model.operation import is_s3_path, download_to_local, upload_to_cloud last_time = time.time() print('train with iter_to:{}, batch_size:{}, dropout_ratio:{}'.format(iter_to, batch_size, dropout_ratio)) # TODO train_index = 0 # input_data = self.data_set.input_data # output_data = self.data_set.output_data # train_index_list = self.data_set.train_index_list # test_index_list = self.data_set.test_index_list # test_size = 31 + 30 # 2015/9, 10 # test_size = int(len(output_data) * 0.1) # setup each test data # _input_data = self.data_set.input_data test_data = self.data_set.get_test_input_data() if (self.mask_rate is not None) and self.mask_rate > 0: # masked_test_data = self.data_set.masked_input_data[test_index_list].astype(np.float32) masked_test_data = self.data_set.get_masked_test_input_data() # test_values = np.asarray(output_data[test_index_list], dtype=np.float32) test_values = self.data_set.get_test_output_data() if self.model_type == 'CLASSIFICATION': test_values_laveled = np.argmax(test_values, axis=1) elif self.model_type == 'REGRESSION': test_values = test_values.reshape(-1) # TODO # print('test_index_list:{}'.format(test_index_list)) print('test_data.shape:{}'.format(test_data.shape)) print('test_values.shape:{}'.format(test_values.shape)) print('self.prediction_mode:{}'.format(self.prediction_mode)) print('---------- time:{}'.format(time.time() - last_time)) last_time = time.time() assert (test_data.shape[0] > 0) test_data_id_set = None if self.data_set.data_id_set is not None: test_data_id_set = self.data_set.get_test_data_id_set() print('test_data_id_set.shape:{}'.format(test_data_id_set.shape)) test_annotation_data = None if self.data_set.annotation_data is not None: # test_annotation_data = self.data_set.annotation_data[test_index_list] test_annotation_data = self.data_set.get_test_annotation_data() print('test_annotation_data.shape:{}'.format(test_annotation_data.shape)) # setup each train data set train_data_set = self.data_set # remove test data from train data # train_data_set.input_data = input_data[:-test_size].astype(np.float32) # train_data_set.output_data = output_data[:-test_size].astype(np.float32) # print('train_data_set.input_data.shape:{}'.format(train_data_set.input_data.shape)) print('train_data_set.input_data.shape:{}'.format(self.data_set.get_train_input_data_shape())) # plot input and output data if self.model_type == 'CLASSIFICATION': _output_data = test_values_laveled else: _output_data = test_values print('test_input_data:{}'.format(test_data[:15, -1, 0])) print('test_output_data:{}'.format(test_values[:3])) print('---------- time:{}'.format(time.time() - last_time)) last_time = time.time() plot_data(input_data=test_data, output_data=test_values_laveled if self.model_type == 'CLASSIFICATION' else test_values, y_max=None, series_range=None, report_dir_path=report_dir_path) print('---------- time:{} DONE plot_data'.format(time.time() - last_time)) last_time = time.time() if self.debug_mode: if (not self.prediction_mode) and (not self.test_only_mode): index_to_export = 0 self.data_set.export_data(data_kind='train_data', index=index_to_export, report_dir_path=report_dir_path) index_to_export = -1 self.data_set.export_data(data_kind='train_data', index=index_to_export, report_dir_path=report_dir_path) index_to_export = 0 self.data_set.export_data(data_kind='test_data', index=index_to_export, report_dir_path=report_dir_path) index_to_export = -1 self.data_set.export_data(data_kind='test_data', index=index_to_export, report_dir_path=report_dir_path) # save all_variables names all_variables = [var.name for var in tf.get_default_graph().get_collection_ref('variables')] _report_path = os.path.join(report_dir_path, 'all_variables_names.csv') f = open(_report_path, 'w') for name in all_variables: f.write('{}\n'.format(name)) f.close() print('---------- time:{} DONE save all_variables names'.format(time.time() - last_time)) last_time = time.time() # save trainable_variables names trainable_variables_names = [var.name for var in self.get_trainable_variables()] _report_path = os.path.join(report_dir_path, 'trainable_variables_names.csv') f = open(_report_path, 'w') for name in trainable_variables_names: f.write('{}\n'.format(name)) f.close() if self.cloud_root: upload_to_cloud(_report_path, self.cloud_root, self.save_root_dir) print('---------- time:{} DONE upload_to_cloud'.format(time.time() - last_time)) last_time = time.time() # if self.prediction_mode: # # TODO # return errors_history = None for i in range(iter_to): if (not self.test_only_mode) and (not self.prediction_mode): input_batch, output_batch = train_data_set.next_batch(batch_size) # print('i:{}'.format(i)) if self.global_iter == 0: print('====================') print('step %d, start training' % (self.global_iter)) print('input_batch.dtype:{}'.format(input_batch.dtype)) print('output_batch.dtype:{}'.format(output_batch.dtype)) print('input_batch.shape:{}'.format(input_batch.shape)) print('output_batch.shape:{}'.format(output_batch.shape)) # train self.train_step.run( feed_dict={self.x: input_batch, self.y_: output_batch, self.keep_prob: (1 - dropout_ratio), self.learning_rate: learning_rate, self.l1_norm_reg_ratio: l1_norm_reg_ratio, self.is_train: True}) summary, train_total_loss = self.sess.run([self.merged, self.total_loss] , feed_dict={self.x: input_batch, self.y_: output_batch, self.keep_prob: (1 - dropout_ratio), self.learning_rate: learning_rate, self.l1_norm_reg_ratio: l1_norm_reg_ratio, self.is_train: True }) if self.global_iter % 100 == 99: # train_accuracy = accuracy.test(feed_dict={ # train_total_loss = self.total_loss.test(feed_dict={ # self.x: input_batch, self.y_: output_batch, self.keep_prob: 1.0, self.learning_rate: learning_rate}) print('========================================') print('step %d, training loss %g' % (self.global_iter, train_total_loss)) print('========================================') self.train_writer.add_summary(summary, self.global_iter) # print('min and max of normed train date_block_num:{}, {}'.format(min(input_batch[:,0,0]), max(input_batch[:,0,0]))) # _test_and_report = (self.test_only_mode or self.global_iter == 9 or self.global_iter % 100 == 99) _test_and_report = (self.test_only_mode or self.prediction_mode or self.global_iter == 9 or self.global_iter % 100 == 99) # _test_and_report = (self.test_only_mode or self.global_iter % 10 == 9) if _test_and_report: # calc error if self.model_type == 'REGRESSION': y_estimated = self.y.eval(feed_dict={ self.x: test_data, self.y_: test_values, self.keep_prob: 1.0, self.learning_rate: learning_rate, self.l1_norm_reg_ratio: l1_norm_reg_ratio, self.is_train: False }) y_label_estimated = None if self.mask_rate is not None and self.mask_rate > 0: y_estimated_masked = self.y.eval(feed_dict={ self.x: masked_test_data, self.y_: test_values, self.keep_prob: 1.0, self.learning_rate: learning_rate, self.l1_norm_reg_ratio: l1_norm_reg_ratio, self.is_train: False }) y_label_estimated_masked = None else: y_label_estimated, y_estimated = self.sess.run([self.y_label, self.y_label] , feed_dict={self.x: test_data, self.y_: test_values, self.keep_prob: 1.0, self.learning_rate: learning_rate, self.l1_norm_reg_ratio: l1_norm_reg_ratio, self.is_train: False}) summary, test_total_loss = self.sess.run([self.merged, self.total_loss] , feed_dict={self.x: test_data, self.y_: test_values, self.keep_prob: 1.0, self.learning_rate: learning_rate, self.l1_norm_reg_ratio: l1_norm_reg_ratio, self.is_train: False}) root_mean_squared_error = None mean_absolute_error = None if self.model_type == 'REGRESSION': root_mean_squared_error, mean_absolute_error = self.sess.run([self.root_mean_squared_error, self.mean_absolute_error] , feed_dict={self.x: test_data, self.y_: test_values, self.keep_prob: 1.0, self.learning_rate: learning_rate, self.l1_norm_reg_ratio: l1_norm_reg_ratio, self.is_train: False}) if self.mask_rate is not None and self.mask_rate > 0: root_mean_squared_error_masked, mean_absolute_error_masked = self.sess.run([self.root_mean_squared_error, self.mean_absolute_error] , feed_dict={self.x: masked_test_data, self.y_: test_values, self.keep_prob: 1.0, self.learning_rate: learning_rate, self.l1_norm_reg_ratio: l1_norm_reg_ratio, self.is_train: False}) print('========================================') print('step:{}, testing root_mean_squared_error:{}, mean_absolute_error:{}'.format(self.global_iter, root_mean_squared_error, mean_absolute_error)) print('========================================') assert (root_mean_squared_error is not None) new_errors = pd.DataFrame([[self.global_iter, root_mean_squared_error, mean_absolute_error]], columns=(['global_iter', 'root_mean_squared_error', 'mean_absolute_error'])) errors_history = pd.concat([errors_history, new_errors]) if errors_history is not None else new_errors min_rmse_index = errors_history['root_mean_squared_error'].idxmin() min_root_mean_squared_error = errors_history.iloc[min_rmse_index]['root_mean_squared_error'] min_global_iter = errors_history.iloc[min_rmse_index]['global_iter'] at_min_mean_absolute_error = errors_history.iloc[min_rmse_index]['mean_absolute_error'] print('min_global_iter:{}, min of root_mean_squared_error:{}, wirh mean_absolute_error:{}'.format(min_global_iter, min_root_mean_squared_error, at_min_mean_absolute_error)) if report_dir_path: _report_path = os.path.join(report_dir_path, 'errors_history.csv') errors_history.to_csv(_report_path, index=False) if self.cloud_root: upload_to_cloud(_report_path, self.cloud_root, self.save_root_dir) # log_scalar(writer=self.test_writer, tag='rmse', value=rmse, step=self.global_iter) if report_dir_path: error_to_plot = None error_name = None if self.plot_errors is not None: # TODO plor more than single error for plot_error in self.plot_errors: calc_range = [0, 9.0] if len(plot_error.split('DROP')) > 1 else None if plot_error == 'accuracy': error_to_plot = calc_accuracy_with_drop(test_values, y_estimated, rank_boundary_list=self.rank_boundary_list) naive_error = None else: error_to_plot = calc_error_with_drop(plot_error, test_values, y_estimated, calc_range=calc_range) naive_error = calc_error_with_drop(plot_error, test_values[:-1], y_estimated[1:], calc_range=calc_range) error_name = 'error({})'.format(plot_error) # report naive error TODO standardize print('{}, error:{}, naive_error:{}'.format(error_name, error_to_plot, naive_error)) _offset_column_index = train_data_set.offset_column_index # print('_offset_column_index:{}'.format(_offset_column_index)) if _offset_column_index > 0: offset_values = test_data[:, 0, _offset_column_index] offset_values = np.reshape(offset_values, (-1)) offset_value_unique_list = np.unique(offset_values) else: # offset_values = train_data_set.input_output_ts_offset offset_value_unique_list = [train_data_set.input_output_ts_offset] for _offset in offset_value_unique_list: # print('_offset:{}'.format(_offset)) # print('offset_values:{}'.format(offset_values)) # print('len of offset_values:{}'.format(len(offset_values))) if _offset_column_index > 0: all_index_to_plot = [i for i, x in enumerate(offset_values) if math.fabs(x - _offset) < 1e-3] else: all_index_to_plot = list(range(len(test_data))) # calc cc errors input_target_value_column_index = 0 # TODO Enable to set with hyper param cc_error = None if self.calc_cc_errors and self.op_errors is not None: true_y_to_plot_cc = _output_data[all_index_to_plot] estimated_y_to_plot_cc = test_data[all_index_to_plot, -1, input_target_value_column_index] for op_error in self.op_errors: calc_range = [0, 9.0] if len(op_error.split('DROP')) > 1 else None if op_error != 'accuracy': cc_error = calc_error_with_drop(op_error, true_y_to_plot_cc, estimated_y_to_plot_cc, calc_range=calc_range) cc_error_name = 'cc error({})'.format(op_error) print('_offset:{}, error_name:{}, error_to_plot:{}, cc_error_name:{}, cc_error:{}'.format(_offset, error_name, error_to_plot, cc_error_name, cc_error)) x_to_plot_cc = list(range(len(estimated_y_to_plot_cc))) _group_value = None _plot_iter = None title = 'Plot Ground truth and CC\nwith input-output offset:{} for group:{}'.format( _offset, _group_value) if self.plot_title is None else self.plot_title.format(_offset, _group_value) _report_path = plot_estmated_true(x=x_to_plot_cc, estimated_y=estimated_y_to_plot_cc, estimated_label=None, model_type=self.model_type, true_y=true_y_to_plot_cc, y_max=None, series_range=None, error=cc_error, error_name=cc_error_name, report_dir_path=report_dir_path, xlabel=self.plot_x_label, ylabel=self.plot_y_label, title=title, postfix='o{}_{}_cc'.format(_offset, _group_value), iter=_plot_iter, x_range=self.plot_x_range, y_range=self.plot_y_range) if self.cloud_root: upload_to_cloud(_report_path, self.cloud_root, self.save_root_dir) self.calc_cc_errors = False # TODO # plot in group index_to_plot_group_dict = {'all':all_index_to_plot} if self.plot_group_data_name_in_annotation is not None: index_to_plot_group_dict = {} _group_values = test_annotation_data[:, 2 + self.annotation_col_names.index(self.plot_group_data_name_in_annotation)] _group_unique_values = list(set(_group_values)) for group_value in _group_unique_values: # print('group_value:{}'.format(group_value)) index_to_plot = [i for i, x in enumerate(_group_values) if (x == group_value and i in all_index_to_plot)] # print('index_to_plot:{}'.format(index_to_plot)) # print('test_annotation_data:{}'.format(test_annotation_data[index_to_plot])) index_to_plot_group_dict[group_value] = index_to_plot report_plot_file_list = [] for group_value, index_to_plot in index_to_plot_group_dict.items(): # print('_offset:{}, index_to_plot[:5]:{}'.format(_offset, index_to_plot[:5])) estimated_y_to_plot = y_estimated[index_to_plot] estimated_label_to_plot = y_label_estimated[index_to_plot] if y_label_estimated is not None else None if self.mask_rate is not None and self.mask_rate > 0: estimated_y_to_plot_masked = y_estimated_masked[index_to_plot] estimated_label_to_plot_masked = y_label_estimated_masked[index_to_plot] if y_label_estimated_masked is not None else None true_y_to_plot = _output_data[index_to_plot] data_id_set_to_plot = None if test_data_id_set is not None: data_id_set_to_plot = test_data_id_set[index_to_plot] elif test_annotation_data is not None: data_id_set_to_plot = test_annotation_data[index_to_plot, 0] test_annotation_data_dt_to_export = None if test_annotation_data is not None: test_annotation_data_dt_to_export = test_annotation_data[index_to_plot] # print('len(estimated_y_to_plot):{}'.format(len(estimated_y_to_plot))) x_to_plot = list(range(len(estimated_y_to_plot))) if test_annotation_data is not None and self.plot_x_data_name_in_annotation is not None : # print('self.plot_x_data_name_in_annotation:{}'.format(self.plot_x_data_name_in_annotation)) # print('self.annotation_col_names.index(self.plot_x_data_name_in_annotation):{}'.format(self.annotation_col_names.index(self.plot_x_data_name_in_annotation))) # TODO x軸の表示を制御 x_to_plot = 1 + _offset - test_annotation_data_dt_to_export[:, 2 + self.annotation_col_names.index(self.plot_x_data_name_in_annotation)] # print('len(x_to_plot):{}'.format(len(x_to_plot))) # print('x_to_plot:{}'.format(x_to_plot)) # if self.test_only_mode: if False: for k, v in test_index_dict.items(): postfix = k[:-11] series_range = (v[0], v[1]) title = 'Plot Estimated\nwith input-output offset:{}'.format(_offset) if self.plot_title is None else self.plot_title.format(_offset) _report_path = plot_estmated_true(estimated_y=estimated_y_to_plot, estimated_label=estimated_label_to_plot, model_type=self.model_type, true_y=None, y_max=None, series_range=series_range, error=error_to_plot, error_name=error_name, report_dir_path=report_dir_path, xlabel=self.plot_x_label, ylabel=self.plot_y_label, title=title, postfix='{}_o{}_{}'.format(postfix, _offset, group_value)) if self.cloud_root: upload_to_cloud(_report_path, self.cloud_root, self.save_root_dir) else: title = 'Plot Ground truth and Estimated\nwith input-output offset:{} for group:{}'.format(_offset, group_value) if self.plot_title is None else self.plot_title.format(_offset, group_value) plot_iter = None if self.test_only_mode or self.prediction_mode else self.global_iter true_y_to_plot = None if self.prediction_mode else true_y_to_plot error_to_plot = None if self.prediction_mode else error_to_plot error_name = None if self.prediction_mode else error_name report_plot_file_path = plot_estmated_true(x=x_to_plot, estimated_y=estimated_y_to_plot, estimated_label=estimated_label_to_plot, model_type=self.model_type, true_y=true_y_to_plot, y_max=None, series_range=None, error=error_to_plot, error_name=error_name, report_dir_path=report_dir_path, xlabel=self.plot_x_label, ylabel=self.plot_y_label, title=title, postfix='o{}_{}'.format(_offset, group_value), iter=plot_iter, x_range=self.plot_x_range, y_range=self.plot_y_range) if report_plot_file_path: if self.cloud_root: upload_to_cloud(report_plot_file_path, self.cloud_root, self.save_root_dir) report_plot_file_list.append(report_plot_file_path) if self.mask_rate is not None and self.mask_rate > 0: _report_path = plot_estmated_true(x=x_to_plot, estimated_y=estimated_y_to_plot_masked, estimated_label=estimated_label_to_plot_masked, model_type=self.model_type, true_y=true_y_to_plot, y_max=None, series_range=None, error=error_to_plot, error_name=error_name, report_dir_path=report_dir_path, xlabel=self.plot_x_label, ylabel=self.plot_y_label, title=title, postfix='o{}_{}_masked'.format(_offset, group_value), iter=plot_iter, x_range = self.plot_x_range, y_range=self.plot_y_range) if self.cloud_root: upload_to_cloud(_report_path, self.cloud_root, self.save_root_dir) # detail plot self.detail_plot = False # TODO self.detail_plot_size = 24 * 10 self.detail_plot_size = 318 if self.detail_plot: x_to_plot_detail = x_to_plot[:self.detail_plot_size] if x_to_plot is not None else None true_y_to_plot_detail = true_y_to_plot[:self.detail_plot_size] if true_y_to_plot is not None else None _report_path = plot_estmated_true(x=x_to_plot_detail, estimated_y=estimated_y_to_plot[:self.detail_plot_size], estimated_label=estimated_label_to_plot[:self.detail_plot_size] if estimated_label_to_plot is not None else None, model_type=self.model_type, true_y=true_y_to_plot_detail, y_max=None, series_range=None, error=error_to_plot, error_name=error_name, report_dir_path=report_dir_path, xlabel=self.plot_x_label, ylabel=self.plot_y_label, title=title, postfix='l{}_o{}_{}'.format(self.detail_plot_size, _offset, group_value), iter=plot_iter, x_range=self.plot_x_range, y_range=self.plot_y_range) if self.cloud_root: upload_to_cloud(_report_path, self.cloud_root, self.save_root_dir) if self.mask_rate is not None and self.mask_rate > 0: _report_path = plot_estmated_true(x=x_to_plot_detail, estimated_y=estimated_y_to_plot_masked[:self.detail_plot_size], estimated_label=estimated_label_to_plot_masked[ :self.detail_plot_size] if estimated_label_to_plot_masked is not None else None, model_type=self.model_type, true_y=true_y_to_plot_detail, y_max=None, series_range=None, error=error_to_plot, error_name=error_name, report_dir_path=report_dir_path, xlabel=self.plot_x_label, ylabel=self.plot_y_label, title=title, postfix='l{}_o{}_{}_masked'.format(self.detail_plot_size, _offset, group_value), iter=plot_iter, x_range=self.plot_x_range, y_range=self.plot_y_range) if self.cloud_root: upload_to_cloud(_report_path, self.cloud_root, self.save_root_dir) self.export_prediction = True if self.export_prediction: _size = len(estimated_y_to_plot) df_prediction_cols = ['DateTime', 'Estimated', 'MaskedEstimated', 'True'] if self.annotation_col_names is not None: df_prediction_cols.extend(self.annotation_col_names) df_prediction = pd.DataFrame(np.zeros([_size, 4 + self.annotation_col_size]), columns=df_prediction_cols) df_prediction['DateTime'] = data_id_set_to_plot df_prediction['Estimated'] = estimated_y_to_plot if self.mask_rate is not None and self.mask_rate > 0: df_prediction['MaskedEstimated'] = estimated_y_to_plot_masked df_prediction['True'] = true_y_to_plot if test_annotation_data is not None: for i, col in enumerate(self.annotation_col_names): df_prediction[col] = test_annotation_data_dt_to_export[:, 2 + i] if plot_iter is None: output_file_name = 'prediction_o{}_{}.csv'.format(_offset, group_value) else: output_file_name = 'prediction_e{}_o{}_{}.csv'.format(self.global_iter, _offset, group_value) output_file_path = os.path.join(report_dir_path, output_file_name) df_prediction.to_csv(output_file_path, index=False) if self.cloud_root: upload_to_cloud(output_file_path, self.cloud_root, self.save_root_dir) if self.plot_animation: if len(report_plot_file_list) > 0: report_plot_file_list.sort() gif_report_file_path = report_plot_file_list[0] + '.gif' gif_util.generate_gif_animation(src_file_path_list=report_plot_file_list, dst_file_path=gif_report_file_path) # TODO if self.cloud_root: upload_to_cloud(_report_path, self.cloud_root, self.save_root_dir) else: print('No report_plot_file_list to plot_animation') # if self.global_iter % 1000 == 999: print('test cross entropy %g' % test_total_loss) self.test_writer.add_summary(summary, self.global_iter) if save_file_path and not (self.test_only_mode or self.prediction_mode): print('save model to save_file_path:{}'.format(save_file_path)) self.saver.save(self.sess, save_file_path, global_step=self.global_iter) if self.cloud_root: _paths, _global_iter_got_from_path = get_tf_model_file_paths(save_file_path, self.global_iter) for _path in _paths: upload_to_cloud(_path, self.cloud_root, self.save_root_dir) if self.test_only_mode or self.prediction_mode: print('DONE test_only_mode or self.prediction_mode') return self.global_iter += 1 # get default act_func def get_act_func(self, act_func_str): try: ret_func = NNModel.AVAILAVLE_ACT_FUNC_DICT[act_func_str] except: ret_func = NNModel.AVAILAVLE_ACT_FUNC_DICT[NNModel.DEFAULT_ACT_FUNC_KEY] return ret_func def set_act_func_ref_list(self, act_func_list, n_layer): # Set default act_func_ref_list = np.repeat(self.get_act_func(NNModel.DEFAULT_ACT_FUNC_KEY), [n_layer]) # Over write for index, act_func_str in enumerate(act_func_list): act_func_ref_list[index] = self.get_act_func(act_func_str) # No act func at last layer act_func_ref_list[-1] = None return act_func_ref_list def define_model(self): self.is_train = tf.placeholder(dtype=tf.bool, name='is_train') with tf.name_scope('model/'): self.keep_prob = tf.placeholder(tf.float32) tf.summary.scalar('dropout_keep_probability', self.keep_prob) self.l1_norm_reg_ratio = tf.placeholder(tf.float32) tf.summary.scalar('l1_norm_reg_ratio', self.l1_norm_reg_ratio) output_middle_layer = self.define_dnn_model(n_layer=self.n_layer) with tf.name_scope('model/'): self.y = output_middle_layer print('y.shape:', self.y.shape) print('self.model_type :', self.model_type) if self.model_type == 'REGRESSION': self.y = tf.reshape(self.y, [-1]) print('y reshaped to :', self.y.shape) else: self.y_label = tf.cast(tf.argmax(self.y, 1), dtype=tf.int32) self.y_softmax = tf.nn.softmax(self.y) self.learning_rate = tf.placeholder(tf.float32, shape=[]) with tf.name_scope('precisions/'): with tf.name_scope('l1_norm_reg_loss'): self.l1_norm_reg_loss = 0.0 if self.add_l1_norm_reg: self.l1_norm_reg_loss = self.l1_norm_reg_ratio * self.l1_norm tf.summary.scalar('l1_norm_reg_loss', self.l1_norm_reg_loss) with tf.name_scope('preactivation_regularization_loss'): self.preactivation_regularization_loss = 0.0 if self.add_preactivation_regularization: self.preactivation_regularization_loss = self.preactivation_regularization_value_ratio * self.preactivation_regularization_value tf.summary.scalar('preactivation_regularization_loss', self.preactivation_regularization_loss) if self.model_type == 'REGRESSION': with tf.name_scope('mean_squared_error'): self.mean_squared_error = tf.reduce_mean(tf.square(self.y_ - self.y)) tf.summary.scalar('mean_squared_error', self.mean_squared_error) with tf.name_scope('mean_absolute_error'): self.mean_absolute_error = tf.reduce_mean(tf.abs(self.y_ - self.y)) tf.summary.scalar('mean_absolute_error', self.mean_absolute_error) with tf.name_scope('root_mean_squared_error'): self.root_mean_squared_error = tf.sqrt(self.mean_squared_error) tf.summary.scalar('root_mean_squared_error', self.root_mean_squared_error) with tf.name_scope('total_loss'): self.total_loss = self.root_mean_squared_error self.total_loss = self.total_loss + self.l1_norm_reg_loss + self.preactivation_regularization_loss tf.summary.scalar('total_loss', self.total_loss) else: with tf.name_scope('cross_entropy'): self.cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y)) tf.summary.scalar('cross_entropy', self.cross_entropy) with tf.name_scope('total_loss'): self.total_loss = self.cross_entropy self.total_loss = self.total_loss + self.l1_norm_reg_loss + self.preactivation_regularization_loss tf.summary.scalar('total_loss', self.total_loss) self.set_optimizer() if self.model_type == 'CLASSIFICATION': # print('DEBUG self.y.shape:{}, self.y_.shape:{}'.format(self.y.shape, self.y_.shape)) self.correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.y_, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) tf.summary.scalar('accuracy', self.accuracy) # Merge all the summaries and write them out to # /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default) self.merged = tf.summary.merge_all() self.train_writer = tf.summary.FileWriter(self.log_dir_path + '/train', self.sess.graph) self.test_writer = tf.summary.FileWriter(self.log_dir_path + '/test') def set_optimizer(self): if self.optimizer is None: raise ValueError('Error. self.optimizer is None') if self.optimizer == 'AdamOptimizer': optimizer_minimize_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.total_loss, var_list=self.trainable_variables) elif self.optimizer == 'AdaBound': from smalltrain.optimizer.AdaBound import AdaBoundOptimizer optimizer_minimize_op = AdaBoundOptimizer(learning_rate=self.learning_rate, final_lr=0.01, beta1=0.9, beta2=0.999, amsbound=False).minimize( self.total_loss, var_list=self.trainable_variables) print('{} will minimize var_list:{}'.format(self.optimizer, self.trainable_variables)) _train_step = optimizer_minimize_op update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.train_step = tf.group([_train_step, update_ops]) def define_dnn_model(self, n_layer=4): input_width = self.input_width col_size = self.col_size with tf.name_scope('model/'): self.x = tf.placeholder(tf.float32, shape=[None, input_width, col_size]) self.y_ = tf.placeholder(tf.float32, shape=[None]) # x_t = tf.transpose(self.x, perm=[0, 2, 3, 1]) # print('x_t.shape:', x_t.shape) x_r = tf.reshape(self.x, [-1, col_size * input_width]) print('x_r.shape:', x_r.shape) x_in_layer = x_r nn_layer_names = ['nn_layer_{}'.format(i) for i in range(n_layer)] # n layers layer_dims = np.ones(n_layer + 1) * 500 layer_dims[0] = col_size * input_width if self.model_type == 'REGRESSION': layer_dims[-1] = 1 else: layer_dims[-1] = self.output_classes for l in range(n_layer): nn_layer_name = nn_layer_names[l] input_dim = layer_dims[l] output_dim = layer_dims[l + 1] with tf.variable_scope(nn_layer_name): print('########## {} ########## input_dim:{}, output:{}, input x_in_layer shape:{}'.format(nn_layer_name, input_dim, output_dim, x_in_layer.shape)) # is_last_layer = (self.model_type == 'CLASSIFICATION') and ((l + 1) == n_layer) is_last_layer = ((l + 1) == n_layer) x_in_layer = self.nn_layer(x_in_layer, input_dim=input_dim, output_dim=output_dim, layer_name=nn_layer_name, actf=tf.nn.relu, last_layer=is_last_layer, has_batch_norm=self.has_batch_norm) return x_in_layer def nn_layer(self, x, input_dim, output_dim, layer_name, actf=tf.nn.relu, last_layer=True, has_batch_norm=True): """Reusable code for making a simple neural net layer. It does a matrix multiply, bias add, and then uses ReLU to nonlinearize. It also sets up name scoping so that the resultant graph is easy to read, and adds a number of summary ops. """ input_dim = int(input_dim) output_dim = int(output_dim) # Adding a name scope ensures logical grouping of the layers in the graph. with tf.name_scope('model/' + layer_name + '/'): # This Variable will hold the state of the weights for the layer with tf.name_scope('weights'): weights = self.weight_variable([input_dim, output_dim], name='weights') self.op_add_l1_norm(weights) self.variable_summaries(weights) if last_layer or (not has_batch_norm): with tf.name_scope('biases'): biases = self.bias_variable([output_dim], name='biases') self.variable_summaries(biases) with tf.name_scope('Wx_plus_b'): if last_layer or (not has_batch_norm): preactivate = tf.matmul(x, weights) + biases else: preactivate = tf.matmul(x, weights) # tf.summary.histogram('pre_activations', preactivate) # tf.summary.histogram('activations', activations) if last_layer: activations = actf(preactivate, name='activation') return activations else: if has_batch_norm: preactivate = self.batch_norm(preactivate) # preactivate = tf.layers.batch_normalization(preactivate, training=self.is_train) activations = actf(preactivate, name='activation') if last_layer: return activations else: dropped = tf.nn.dropout(activations, self.keep_prob) return dropped def cnn_layer(self, x, layer_num, layer_name, conv_in_channels, conv_out_channels, filter_width=3, pool=2, actf=tf.nn.relu, has_dropout=True, has_batch_norm=True, block_name=None): _name = 'model/' if block_name: _name += block_name + '/' _name += layer_name + '/' with tf.name_scope(_name ): with tf.name_scope('W_conv'): W_conv = self.weight_variable([1, filter_width, conv_in_channels, conv_out_channels], name="W_conv") self.op_add_l1_norm(W_conv) self.variable_summaries(W_conv) if not has_batch_norm: with tf.name_scope('b_conv'): b_conv = self.bias_variable([conv_out_channels], name="b_conv") self.variable_summaries(b_conv) print('########## {} ########## input x shape:{} ########## W_conv:{} ########## pool:{} ########## has_batch_norm:{} ########## actf:{} ##########'.format( _name, x.shape, W_conv.shape, pool, has_batch_norm, actf)) h_conv = conv1d(x, W_conv) if has_batch_norm: h_conv = self.batch_norm(h_conv) else: h_conv = tf.add(h_conv, b_conv) h_pool = max_pool_1x2(h_conv, pool) if has_dropout: conv_out = tf.nn.dropout(h_pool, self.keep_prob) else: conv_out = h_pool if actf is not None: if self.add_preactivation_regularization: self.op_add_preactivation_regularization(preactivation=conv_out, preactivation_maxout=self.preactivation_maxout_list[layer_num]) with tf.name_scope('actf'): conv_out = actf(conv_out, name='actf') return conv_out def define_1d_cnn_model(self, n_layer=5, has_res_net=False, has_non_cnn_net=False): input_width = self.input_width col_size = self.col_size if has_non_cnn_net: ts_col_index = [0, 3, 4, 6, 7] non_ts_col_index = [1, 2, 5] # date_block_num,item_id,shop_id,item_cnt_month_normed,item_price,item_category_id,year,month ts_col_size = len(ts_col_index) else: ts_col_index = range(col_size) ts_col_size = col_size with tf.name_scope('model/'): # cnn_input_channels = ts_col_size # cnn_input_channels = col_size self.x = tf.placeholder(tf.float32, shape=[None, input_width, col_size]) if self.model_type == 'REGRESSION': self.y_ = tf.placeholder(tf.float32, shape=[None]) else: self.y_ = tf.placeholder(tf.float32, shape=[None, self.output_classes]) # x_3d = tf.reshape(self.x, [-1, input_width, col_size, 1]) # print('x_3d:', x_3d.shape) # (?, 37, 10, 10, 1) self.cnn_layer_names = ['cnn_layer_{}'.format(i) for i in range(n_layer)] first_conv_in = self.col_size if self.cnn_channel_size_list is None: later_conv_in = self.cnn_channel_size conv_in = np.ones([n_layer + 1], dtype="int32") * later_conv_in conv_in[0] = first_conv_in # conv_in[1] = first_conv_in # ResNet ver 1.0 (~2019/01/08 16:01) conv_out_size = conv_in[-1] # ResNet ver 2.0 (2019/01/08 16:58~) else: print('n_layer:{}'.format(n_layer)) conv_in = np.hstack((first_conv_in, np.asarray(self.cnn_channel_size_list, dtype='int32'))) print('conv_in:{}'.format(conv_in)) conv_out_size = conv_in[-1] conved_width = self.input_width for pool in self.pool_size_list: conved_width /= pool conved_width = int(conved_width) # Input Layer # x = tf.transpose(self.x, perm=[0, 2, 1]) x = self.x # print('x shape before delete_non_ts_col_tensor:', x.shape) if has_non_cnn_net: delete_non_ts_col_tensor = tf.cast([[[1, 0, 0, 1, 1, 0, 1, 1]]], tf.float32) delete_ts_col_tensor = tf.cast([[[0, 1, 1, 0, 0, 1, 0, 0]]], tf.float32) x = tf.multiply(x, delete_non_ts_col_tensor) non_ts_x = tf.multiply(x, delete_ts_col_tensor) # non_ts_x = tf.reshape(non_ts_x, [-1, col_size]) non_ts_x = tf.reshape(non_ts_x, [-1, col_size * input_width]) print('x shape after delete_non_ts_col_tensor:', x.shape) print('non_ts_x shape after delete ts_col_tensor:', non_ts_x.shape) x = tf.reshape(x, [-1, 1, input_width, first_conv_in]) if self.has_res_net: n_cnn_layers = len(self.cnn_layer_names) print('n_cnn_layers:{}'.format(n_cnn_layers)) N_CNN_LAYERS_IN_RES_BLOCK = 2 # TODO n_res_block = int(n_cnn_layers / N_CNN_LAYERS_IN_RES_BLOCK) print('n_res_block:{}'.format(n_res_block)) l = 0 # add cnn_layer without residual n_cnn_layers_without_res_net = int(n_cnn_layers - n_res_block * N_CNN_LAYERS_IN_RES_BLOCK) print('n_cnn_layers_without_res_net:{}'.format(n_cnn_layers_without_res_net)) if n_cnn_layers_without_res_net >= 1: print('add cnn_layer {} without residual'.format(n_cnn_layers_without_res_net)) while l < n_cnn_layers_without_res_net: x = self.cnn_layer(x, layer_num=l, layer_name=self.cnn_layer_names[l], conv_in_channels=conv_in[l], conv_out_channels=conv_in[l + 1], filter_width=self.filter_width, pool=self.pool_size_list[l], actf=self.act_func_ref_list[l], has_batch_norm=self.has_batch_norm) l += 1 # add res_block while l < n_cnn_layers: res_block_index = int(l / N_CNN_LAYERS_IN_RES_BLOCK) block_name = 'res_block_{}'.format(res_block_index) with tf.name_scope('model/' + block_name + '/'): with tf.name_scope('x_id'): x_id = tf.identity(x, name='x_id') self.variable_summaries(x_id) # path 1. residual # layer_name = 'residual' # with tf.name_scope('model/' + layer_name + '/'): # res_in = tf.identity(x, name='identity_' + x.name) # path 1. cnn_layers for i in range(N_CNN_LAYERS_IN_RES_BLOCK): is_last_layer_in_res_block = (i == N_CNN_LAYERS_IN_RES_BLOCK - 1) x = self.cnn_layer(x, layer_num=l + i, layer_name=self.cnn_layer_names[l + i], block_name=block_name, conv_in_channels=conv_in[l + i], conv_out_channels=conv_in[l + i + 1], filter_width=self.filter_width, pool=self.pool_size_list[l + i], actf=(None if is_last_layer_in_res_block else self.act_func_ref_list[l]), has_batch_norm=self.has_batch_norm) # path 1. residual layer_name = 'residual' with tf.name_scope('model/' + block_name + '/' + layer_name + '/'): # res_in = tf.identity(x_id, name='identity_' + x.name) # output residual net(short_cut) conv_in_channels =conv_in[l] conv_out_channels = conv_in[l + N_CNN_LAYERS_IN_RES_BLOCK] if pow(conv_in_channels - conv_out_channels, 2) < 1e-3: # res_out = tf.identity(x_id, name='res_out') with tf.name_scope('res_out'): # res_out = x_id res_out = tf.identity(x_id, name='res_out') else: pool_res = np.asarray(self.pool_size_list[l:l + N_CNN_LAYERS_IN_RES_BLOCK]).prod() pooled_input_1 = tf.nn.avg_pool(x_id, ksize=[1, 1, pool_res, 1], strides=[1, 1, pool_res, 1], padding='VALID') print('pooled_input_1 shape:{}'.format(pooled_input_1.shape)) # Zero-padding padded_input = tf.pad(pooled_input_1, [[0, 0], [0, 0], [0, 0], [0, (conv_out_channels - conv_in_channels)]]) print('padded_input shape:{}'.format(padded_input.shape)) with tf.name_scope('res_out'): res_out = tf.identity(padded_input, name='res_out') x_add_res = tf.add(x, res_out, name='x_add_res') # add_preactivation_regularization if self.add_preactivation_regularization: self.op_add_preactivation_regularization(preactivation=x_add_res, preactivation_maxout=self.preactivation_maxout_list[l + N_CNN_LAYERS_IN_RES_BLOCK]) # activation with tf.name_scope('actf_after_add'): actf = self.act_func_ref_list[l] actf_after_add = actf(x_add_res, name='actf_after_add') print('########## layer_name:{} ########## input x shape:{} ########## conv_in_channels:{} ########## conv_out_channels:{} ########## res_out:{} ########## with activation'.format( layer_name, x.shape, conv_in_channels, conv_out_channels, res_out.shape)) l += N_CNN_LAYERS_IN_RES_BLOCK x = actf_after_add else: for l, cnn_layer_name in enumerate(self.cnn_layer_names): x = self.cnn_layer(x, layer_num=l, layer_name=cnn_layer_name, conv_in_channels=conv_in[l], conv_out_channels=conv_in[l + 1], filter_width=self.filter_width, pool=self.pool_size_list[l], # actf=tf.nn.relu, actf=self.act_func_ref_list[l], has_batch_norm=self.has_batch_norm) layer_name = 'fc' with tf.name_scope('model/' + layer_name + '/'): # fc self.fc_output_dim = 500 conv_out_flat_nodes = conved_width * conv_out_size with tf.name_scope('conv_out_flat'): conv_out_flat = tf.reshape(x, [-1, conv_out_flat_nodes], name='conv_out_flat') self.variable_summaries(conv_out_flat) ## Readout Layer if self.model_type == 'REGRESSION': y_out_dim = 1 else: y_out_dim = self.output_classes with tf.name_scope('W_fc2'): W_fc2 = self.weight_variable([conv_out_flat_nodes, y_out_dim], name='W_fc2') self.op_add_l1_norm(W_fc2) self.variable_summaries(W_fc2) with tf.name_scope('b_fc2'): b_fc2 = self.bias_variable([y_out_dim], name='b_fc2') self.variable_summaries(b_fc2) if has_non_cnn_net: non_ts_x_1_in_dim = col_size*input_width non_ts_x_1_out_dim = 500 non_ts_x = self.nn_layer(non_ts_x, input_dim=non_ts_x_1_in_dim, output_dim=non_ts_x_1_out_dim, layer_name='non_ts_x_1', actf=tf.nn.relu, last_layer=False, has_batch_norm=self.has_batch_norm) non_ts_x = self.nn_layer(non_ts_x, input_dim=non_ts_x_1_out_dim, output_dim=conv_out_flat_nodes, layer_name='non_ts_x_2', actf=tf.nn.relu, last_layer=False, has_batch_norm=self.has_batch_norm) conv_out_flat = tf.add(conv_out_flat, non_ts_x) output_middle_layer = tf.matmul(conv_out_flat, W_fc2) + b_fc2 print('########## {} ########## input x shape:{} ########## W_fc2:{} ########## b_fc2:{} ##########'.format( layer_name, x.shape, W_fc2.shape, b_fc2.shape)) return output_middle_layer def op_add_l1_norm(self, w_var): if self.add_l1_norm_reg: self.l1_norm = self.l1_norm + tf.reduce_sum(tf.abs(w_var)) def op_add_preactivation_regularization(self, preactivation, preactivation_maxout=100.0): self.add_preactivation_regularization = True if self.add_preactivation_regularization: self.preactivation_regularization_value = self.preactivation_regularization_value + tf.maximum(tf.reduce_max(preactivation) - preactivation_maxout, 0.0) def define_cc_model(self): input_width = self.input_width col_size = self.col_size with tf.name_scope('model/'): self.x = tf.placeholder(tf.float32, shape=[None, input_width, col_size]) if self.model_type == 'REGRESSION': self.y_ = tf.placeholder(tf.float32, shape=[None]) else: self.y_ = tf.placeholder(tf.float32, shape=[None, self.output_classes]) # return Carbon Copy initial = tf.constant(0.0, shape=[1]) dummy_v = tf.Variable(initial, name='dummy_v') x_flat = tf.reshape(self.x[:,-1,0], [-1, 1]) x = x_flat + 1e-8 * dummy_v return x def batch_norm(self, x, momentum=None, eps=None, decay=None): # For the compatibility with 0.1.X(`decay`) and 0.2X(`momentum`) if momentum is None: momentum = decay bn = None tf_major_version = int(tf_v2.__version__.split('.')[0]) if tf_major_version >= 2 and tf_major_version < 3: bn = self.batch_norm_v020(x, momentum=momentum, eps=eps) elif tf_major_version >= 1 and tf_major_version < 2: bn = self.batch_norm_v013(x, decay=momentum, eps=eps) print('batch_norm with tf version: {}'.format(tf_major_version)) return bn def batch_norm_v020(self, x, momentum=None, eps=None): momentum = momentum or self.bn_decay # 1st, set class field value momentum = momentum or NNModel.DEFAULT_BN_DECAY # 2nd, set default value eps = eps or self.bn_eps # 1st, set class field value eps = eps or NNModel.DEFAULT_BN_ESP # 2nd, set default value # Use TensorFlow 1.X compatible API tf.disable_v2_behavior() return tf.layers.batch_normalization(x, momentum=momentum, epsilon=eps, training=self.is_train) # TODO use TensorFlow 2.X API # tf.keras.layers.BatchNormalization._USE_V2_BEHAVIOR = False # return tf.keras.layers.BatchNormalization(momentum=momentum, epsilon=eps, # beta_initializer="zeros", # gamma_initializer="ones" # )(inputs=x, training=self.is_train) def batch_norm_v013(self, x, decay=None, eps=None): if decay is None: decay = self.bn_decay # 1st, set class field value if decay is None: decay = NNModel.DEFAULT_BN_DECAY # 2nd, set default value if eps is None: eps = self.bn_eps # 1st, set class field value if eps is None: eps = NNModel.DEFAULT_BN_ESP # 2nd, set default value shape = x.get_shape().as_list() out_dim = shape[-1] print('shape:{}, out_dim:{}'.format(shape, out_dim)) assert len(shape) in [2, 4] with tf.variable_scope('bn'): beta = tf.Variable(tf.zeros([out_dim]), name='beta', trainable=True) gamma = tf.Variable(tf.ones([out_dim]), name='gamma', trainable=True) if len(shape) == 2: moments_shape = [0] elif len(shape) == 4: moments_shape = [0, 1, 2] batch_mean, batch_var = tf.nn.moments(x, moments_shape, name='moments') ema = tf.train.ExponentialMovingAverage(decay=decay) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = tf.cond(self.is_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) bn = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps) return bn def weight_variable(self, shape, name, stddev=0.1): initial = tf.truncated_normal(shape, stddev=stddev) return tf.Variable(initial, name=name) def bias_variable(self, shape, name, value=0.1): initial = tf.constant(value, shape=shape) return tf.Variable(initial, name=name) def variable_summaries(self, var): """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev) tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min', tf.reduce_min(var)) tf.summary.histogram('histogram', var) def add_summarize_layer_op(self, layer): layer_name = layer.name if self.summarize_layer_name_list is None: print('No need to add_summarize_layer_op') return print('layer_name:{}'.format(layer_name)) for _summarize_layer_name in self.summarize_layer_name_list: if _summarize_layer_name.find(layer_name) >= 0 or layer_name.find(_summarize_layer_name) >= 0: _min_value = tf.reduce_min(layer) _max_value = tf.reduce_max(layer) _shape = tf.shape(layer) _mean = tf.reduce_mean(layer) _stddev = tf.sqrt(tf.reduce_mean(tf.square(layer - _mean))) _raw_value = tf.reshape(layer, [_shape[0], -1]) _parameters = tf.shape(_raw_value)[1] _raw_value_0dec_rounded = tf.cast(tf.round(_raw_value), tf.int32) _raw_value_1dec_rounded = tf.cast(tf.round(_raw_value * 10) * 0.1, tf.float32) _raw_value_2dec_rounded = tf.cast(tf.round(_raw_value * 100) * 0.01, tf.float32) summarize_layer_op_dict = {'layer_name': str(_summarize_layer_name), 'shape': _shape, 'parameters': _parameters, 'min_value': _min_value, 'max_value': _max_value, 'mean': _mean, 'stddev': _stddev, '_raw_value': _raw_value, 'mean_value_0dec_rounded': tf.reduce_mean(_raw_value_0dec_rounded), 'mean_value_1dec_rounded': tf.reduce_mean(_raw_value_1dec_rounded), 'mean_value_2dec_rounded': tf.reduce_mean(_raw_value_2dec_rounded), '_raw_value_0dec_rounded': _raw_value_0dec_rounded, '_raw_value_1dec_rounded': _raw_value_1dec_rounded, '_raw_value_2dec_rounded': _raw_value_2dec_rounded, } try: self.summarize_layer_op_obj_list.append(summarize_layer_op_dict) except AttributeError: self.summarize_layer_op_obj_list = [summarize_layer_op_dict] return def summarize_layer(self, feed_dict, name='', export_as_json=True, report_dir_path='report/'): # usage # self.summarize_layer(feed_dict={self.x: test_data, self.y_: test_values, # self.keep_prob: 1.0, # self.learning_rate: learning_rate, # self.l1_norm_reg_ratio: l1_norm_reg_ratio, # self.is_train: False}) # TODO naming summarize operation in the case of minibatch evaluation try: if self.summarize_layer_op_obj_list is None: print('No need to summarize_layer') return except AttributeError as e: print('No operation to summarize.') return print('TODO summarize_layer') if self.debug_mode: print('len of self.summarize_layer_op_obj_list:{}'.format(len(self.summarize_layer_op_obj_list))) # summary operation and put operation result to temp dict summarize_layer_json = {'summary_layers':[]} for summarize_layer_op_dict in self.summarize_layer_op_obj_list: layer_name = summarize_layer_op_dict['layer_name'] print('layer_name:{}'.format(layer_name)) summarize_each_layer = {'layer_name': layer_name} variations = {} for k, _op in summarize_layer_op_dict.items(): try: op_result = self.sess.run(_op, feed_dict=feed_dict) if str(k) == '_raw_value': raw_value = op_result if self.debug_mode: raw_value_0 = raw_value[0].reshape((-1)) variations_with_no_round = {} variations_with_no_round['raw_value_0'] = raw_value_0[:5] variations_with_no_round['len of raw_value_0'] = len(raw_value_0) variations_with_no_round['raw_value.shape'] = raw_value.shape variations_with_no_round['raw_value_head'] = raw_value[:5,:5] hash_value_0 = hash_array.float_v_to_hash(raw_value_0, round_dec=16) variations_with_no_round['hash_value_0'] = hash_value_0 print('TODO float_v_to_hash with raw_value with shape:{}'.format(raw_value.shape)) hash_value_list = [hash_array.float_v_to_hash(v.reshape((-1)), round_dec=16) for v in raw_value] if self.debug_mode: variations_with_no_round['hash_value_list_head'] = hash_value_list[:5] variations['num_raw_values'] = len(np.unique(hash_value_list)) variations['no_round'] = variations_with_no_round summarize_each_layer['variations'] = variations elif str(k).find('_raw_value') >= 0 and str(k).find('_rounded') > 0: raw_value = op_result round_dec = int(k[k.find('dec') - 1]) variations_with_round_dec = {} # variation information for each round decimals if self.debug_mode: raw_value_0 = raw_value[0].reshape((-1)) hash_value_0 = hash_array.float_v_to_hash(raw_value_0, round_dec) variations_with_round_dec['hash_value_{}dec_0'.format(round_dec)] = hash_value_0 variations['round_dec_{}'.format(round_dec)] = variations_with_round_dec print('TODO float_v_to_hash with raw_value rounded {}'.format(round_dec)) hash_value_list = [hash_array.float_v_to_hash(v.reshape((-1)), round_dec) for v in raw_value] if self.debug_mode: variations_with_round_dec['hash_value_list_{}dec_head'.format(round_dec)] = hash_value_list[:5] variations['num_{}dec_rounded_values'.format(round_dec)] = len(np.unique(hash_value_list)) summarize_each_layer['variations'] = variations elif str(k) not in ['layer_name']: _summary = op_result summarize_each_layer[k] = _summary except ValueError as e: print('Could not summarize layers because of error:{}'.format(e)) summarize_layer_json['summary_layers'].append(summarize_each_layer) if export_as_json: _export_path = os.path.join(report_dir_path, 'summary_layers_{}.json'.format(self.global_iter)) with open(_export_path, 'w') as f: json.dump(summarize_layer_json, f, ensure_ascii=False, cls=ExtendedJSONEncoder, indent=4, separators=(',', ': ')) def connect_sub_model(self, layer): # Add sub model if self.sub_model_url is None or self.sub_model_allocation < 1e-4: return layer # Define sub model def build_sub_model(x): module = hub.Module(self.sub_model_url, tags={"train"}) height, width = hub.get_expected_image_size(module) x = tf.image.resize_images(x, (height, width)) print('build_sub_model x after augment_data shape', x.shape) return module(x) if layer.name in [self.sub_model_input_point, '{}:0'.format(self.sub_model_input_point)]: with tf.name_scope('model/'): # In case no allocation to sub model if self.sub_model_allocation == 0.0: self.sub_model_output = tf.multiply(layer, tf.constant(0, dtype=layer.dtype)) else: self.sub_model_output = build_sub_model(layer) # In case no allocation to main model if self.sub_model_allocation == 1.0: layer = tf.multiply(layer, tf.constant(0, dtype=layer.dtype)) print('sub_model_output shape', self.sub_model_output.shape) elif layer.name in [self.sub_model_output_point, '{}:0'.format(self.sub_model_output_point)]: with tf.name_scope('model/'): self.sub_model_output = tf.multiply(self.sub_model_output, tf.constant(self.sub_model_allocation, dtype=self.sub_model_output.dtype), name='sub_model_output') main_model_output_name_scope = get_name_scope_of_tensor(layer) with tf.name_scope('{}/'.format(main_model_output_name_scope)): layer = tf.multiply(layer, tf.constant((1.0 - self.sub_model_allocation), dtype=layer.dtype), name='main_model_output') layer = tf.add(layer, self.sub_model_output , name='add_sub_model_output') return layer ## Convolution and Pooling # 1d cnn def conv1d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') # or return tf.nn.conv1d(x, W, stride=1, padding='SAME') def max_pool_1x2(x, pool=2): # pool only for time-series direction return tf.nn.max_pool(x, ksize=[1, 1, pool, 1], strides=[1, 1, pool, 1], padding='SAME') # 3d cnn def conv3d(x, W): return tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='SAME') # Pooling: max pooling over 2x2 blocks def max_pool_pxpxp(x, pool=2): return tf.nn.max_pool3d(x, ksize=[1, pool, pool, pool, 1], strides=[1, pool, pool, pool, 1], padding='SAME') def get_name_scope_of_tensor(t): _name = t.name return _name[:find_all(_name, '/')[-1]] def find_all(target_str, str_to_find): return [i for i in range(len(target_str)) if target_str.startswith(str_to_find, i)] def log_scalar(writer, tag, value, step): """Log a scalar variable. Parameter ---------- tag : basestring Name of the scalar value step : int training iteration """ summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)]) writer.add_summary(summary, step) import matplotlib.pyplot as plt def plot_estmated_true(x, estimated_y, iter=None, estimated_label=None, model_type='CLASSIFICATION', true_y=None, y_max=None, series_range=None, error=None, error_name='error', target_data_set_name=None, report_dir_path='report/', xlabel='time series', ylabel='Label', title=None, postfix='', x_range=None, y_range=None, debug_mode=False): # Nothong to plot if len(x) < 2 or len(estimated_y) < 2: return if series_range: if true_y is not None: true_y = true_y[series_range[0]:series_range[1]] estimated_y = estimated_y[series_range[0]:series_range[1]] if estimated_label is not None: estimated_label = estimated_label[series_range[0]:series_range[1]] # 正しい検出(True positive)、未検出(False negative)、誤った検出(False positive) plt.clf() # print('true_y.max():{}'.format(true_y.max())) # print('true_y.min():{}'.format(true_y.min())) # print('estimated_y.max():{}'.format(estimated_y.max())) # print('estimated_y.min():{}'.format(estimated_y.min())) if y_max is None: y_max = estimated_y.max() y_min = estimated_y.min() if true_y is not None: y_max = max(y_max, true_y.max()) y_min = min(y_min, true_y.min()) if estimated_label is not None: # print('estimated_label.max():{}'.format(estimated_label.max())) # print('estimated_label.min():{}'.format(estimated_label.min())) y_max = max(y_max, estimated_label.max()) y_min = y_max * -0.05 else: y_min = y_min - math.fabs(y_max * 0.05) x = list(range(len(estimated_y))) if x is None else x if x_range is None: x_range = [min(x), max(x)] plt.xlim(x_range[0], x_range[1]) if debug_mode: print('y_min:{}, y_max:{}, estimated_y:{}, true_y:{}'.format(y_min, y_max, estimated_y, true_y)) # delete point that is out of y_range if y_range is not None: if true_y is not None: true_y = [y if y >= y_range[0] and y <= y_range[1] else np.nan for y in true_y] if y_range is None: y_range = [y_min, y_max * 1.35] plt.ylim(y_range[0], y_range[1]) y_max = y_range[1] if true_y is not None: if model_type in ['CLASSIFICATION_ONOFF']: plt.fill_between(x, true_y, 0, where=0<true_y, color='#00a000', label="True positive") elif model_type in ['REGRESSION', 'CLASSIFICATION']: plt.plot(x, true_y, color='#00a000', label="True") if model_type == 'CLASSIFICATION_ONOFF': estimated_y_mean = [estimated_y[i][1] for i in x] if true_y is not None: plt.fill_between(x, true_y, estimated_y_mean, where=true_y<estimated_y_mean, color='#e0a000', label="False positive") plt.fill_between(x, true_y, estimated_y_mean, where=estimated_y_mean<true_y, color='#ff5000', label="False negative") else: plt.fill_between(x, estimated_y_mean, 0, color='#e06060', label="Positive") elif model_type == 'CLASSIFICATION': plt.plot(x, estimated_label, color='#e06060', label="Estimated") elif model_type == 'REGRESSION': plt.plot(x, estimated_y, color='#e06060', label="Estimated") plt.legend() if error: plt.text(x_range[0]+2, y_max * 0.8, '{} :{:.4f}'.format(error_name, error)) # if true_y is not None: # _error = np.sqrt(np.mean((true_y - estimated_y)**2)) # plt.text(x_range[0]+2, y_max * 0.75, '{} in this plot:{:.4f}'.format(error_name, _error)) if target_data_set_name: plt.text(x_range[0]+2, y_max * 0.8, 'Data set {}'.format(target_data_set_name)) if title is None: title = 'Plot Estimated' if true_y is None else 'Plot Ground truth and Estimated' plt.title(title) if xlabel is not None: plt.xlabel(xlabel) if ylabel is not None: plt.ylabel(ylabel) if iter is None: _report_path = report_dir_path + 'test_plot##POSTFIX##.png' else: _report_path = report_dir_path + 'test_plot_e{}##POSTFIX##.png'.format(iter) _report_path = _report_path.replace('##POSTFIX##', '_{}'.format(postfix)) plt.savefig(_report_path) plt.close() return _report_path def scatter_plot_estmated_true(i, true_y, estimated_y, rmse, report_dir_path='report/', plot_dense_area=True, rmse_latest_min=None, rmse_latest_max=None): plt.clf() xy_min = min(min(true_y), min(estimated_y)) xy_max = max(max(true_y), max(estimated_y)) xy_width = xy_max - xy_min plt.xlim(0, xy_max) plt.ylim(0, xy_max) plt.scatter(true_y, estimated_y) plt.plot([0, xy_max], [0, xy_max], color="red", linewidth=2, linestyle="dashed") # plt.legend() true_y_mean = true_y.mean() plt.text(xy_width * 0.1, xy_max - xy_width * 0.1, 'RMSE :{:.4f}, AVE(GT) :{:.4f}'.format(rmse, true_y_mean)) if rmse_latest_min: plt.text(xy_width * 0.1, xy_max - xy_width * 0.15, 'Min of latest RMSE :{:.4f}'.format(rmse_latest_min)) if rmse_latest_max: plt.text(xy_width * 0.1, xy_max - xy_width * 0.2, 'Max of latest RMSE :{:.4f}'.format(rmse_latest_max)) plt.title('Scatter plot Ground truth vs Estimated') plt.xlabel('Ground truth') plt.ylabel('Estimated') plt.savefig(report_dir_path + 'test_scatter_plot_e{}.png'.format(i)) plt.close() if plot_dense_area: true_y_std = true_y.std() true_y_5p = true_y_mean - 2 * true_y_std true_y_95p = true_y_mean + 2 * true_y_std plt.clf() xy_min = true_y_5p xy_max = true_y_95p xy_width = xy_max - xy_min plt.xlim(0, xy_max) plt.ylim(0, xy_max) plt.scatter(true_y, estimated_y) plt.plot([0, xy_max], [0, xy_max], color="red", linewidth=2, linestyle="dashed") plt.text(xy_width * 0.1, xy_max - xy_width * 0.1, 'RMSE :{:.4f}, AVE(GT) :{:.4f}'.format(rmse, true_y_mean)) if rmse_latest_min: plt.text(xy_width * 0.1, xy_max - xy_width * 0.15, 'Min of latest RMSE :{:.4f}'.format(rmse_latest_min)) if rmse_latest_max: plt.text(xy_width * 0.1, xy_max - xy_width * 0.2, 'Max of latest RMSE :{:.4f}'.format(rmse_latest_max)) plt.title('Scatter plot Ground truth vs Estimated') plt.xlabel('Ground truth') plt.ylabel('Estimated') plt.savefig(report_dir_path + 'test_scatter_plot_dense_e{}.png'.format(i)) plt.close() # def plot_estmated_true(i, estimated_y, estimated_label, true_y=None, y_max=None, series_range=None, rmse=None, target_data_set_name=None, report_dir_path='report/', xlabel='time series', ylabel='Label', title=None, postfix=''): def plot_data(input_data, output_data, y_max=None, series_range=None, report_dir_path='report/', xlabel='time series', ylabel_input_data='value', ylabel_output_data='Label (0 or 1)', title=None, postfix=''): ts_axis = 0 # 時系列の軸 ts_history_axis = 1 # 時系列の推移の軸 channel_axis = 2 # チャンネルの軸 plt.clf() if y_max is None: y_max = max(input_data.max(), output_data.max()) x = list(range(input_data.shape[ts_axis])) # plt.ylim(y_max * -0.05, y_max * 1.35) # draw on figure 1 plt.figure(1) # Divide vertically into 2 and horizontally into 1, and draw on 1st division plt.subplot(211) plt.plot(x, output_data, label="output_data") plt.legend() # if target_data_set_name: # plt.text(2, y_max * 1.2, 'Data set {}'.format(target_data_set_name)) if title is None: title = 'Output data(Upper part) and Input data(Lower part)' plt.title(title) plt.ylabel(ylabel_output_data) # draw on 2nd division plt.subplot(212) ts_history = 0 for channel_index in range(input_data.shape[channel_axis]): plt.plot(x, input_data[:, ts_history, channel_index], label="channel_index:{}".format(channel_index)) plt.legend() # if target_data_set_name: # plt.text(2, y_max * 1.2, 'Data set {}'.format(target_data_set_name)) plt.xlabel(xlabel) plt.ylabel(ylabel_input_data) # show figure 1 # plt.show() _report_path = report_dir_path + 'test_plot_##POSTFIX##.png' _report_path = _report_path.replace('##POSTFIX##', '_{}'.format(postfix)) plt.savefig(_report_path) plt.close() def rmse_by_day(target_df, sum_unit='day', datetime_col_name='DateTime'): datetime_list = target_df[datetime_col_name] if sum_unit == 'day': dt_group_series = [datetime(dt.year, dt.month, dt.day, 0, 0, 0) for dt in datetime_list] work_df = target_df.copy() work_df['DateTimeGroup'] = dt_group_series work_df = work_df.groupby(by='DateTimeGroup').mean() work_df['DateTime'] = work_df.index # DateTime を補完 rmse = np.sqrt(np.mean((work_df['True'] - work_df['Estimated'])**2)) return rmse, work_df def calc_error_with_drop(error_str, true_list, estimated_list, calc_range=None): if 'MAE' in error_str.split('_'): ret_error = calc_mean_absolute_error_with_drop(true_list, estimated_list, calc_range) elif 'RMSE' in error_str.split('_'): ret_error = calc_rmse_with_drop(true_list, estimated_list, calc_range) else: raise ValueError('error_str:{} contains no error definition'.format(error_str)) return ret_error def in_the_rank(rank_boundary, v, lower_equals=True): if lower_equals: return (rank_boundary[0] <= v and rank_boundary[1] > v) else: return (rank_boundary[0] < v and rank_boundary[1] >= v) def calc_mean_absolute_error_with_drop(t, e, calc_range=None): if calc_range is None: index_to_calc = list(range(len(t))) else: index_to_calc = [i for i, x in enumerate(t) if x >= calc_range[0] and x <= calc_range[1]] return np.asarray([math.fabs(t[i] - e[i]) for i in index_to_calc]).mean() def calc_rmse_with_drop(t, e, calc_range=None): if calc_range is None: index_to_calc = list(range(len(t))) else: index_to_calc = [i for i, x in enumerate(t) if x >= calc_range[0] and x <= calc_range[1]] return math.sqrt(np.asarray([math.pow(t[i] - e[i], 2) for i in index_to_calc]).mean()) def calc_accuracy_with_drop(t, e, calc_range=None, rank_boundary_list=None): if rank_boundary_list is None: return None if calc_range is None: index_to_calc = list(range(len(t))) else: index_to_calc = [i for i, x in enumerate(t) if x >= calc_range[0] and x <= calc_range[1]] # remove non-rank index remove_non_rank_index = index_to_calc for rank_boundary in rank_boundary_list: remove_non_rank_index = np.intersect1d(remove_non_rank_index, ([i for i, x in enumerate(t) if x < rank_boundary[0] or x > rank_boundary[1]])) # print('before remove non-rank index:{}'.format(len(index_to_calc))) index_to_calc = [i for i in index_to_calc if i not in remove_non_rank_index] all = len(index_to_calc) # print('all:{}'.format(all)) tp = 0.0 tn = 0.0 fp = 0.0 fn = 0.0 for rank_boundary in rank_boundary_list: _tp = np.asarray([1.0 for i in index_to_calc if (in_the_rank(rank_boundary, t[i]) and in_the_rank(rank_boundary, e[i]))]).sum() _fp = np.asarray([1.0 for i in index_to_calc if (not in_the_rank(rank_boundary, t[i]) and in_the_rank(rank_boundary, e[i]))]).sum() _tn = np.asarray([1.0 for i in index_to_calc if (not in_the_rank(rank_boundary, t[i]) and (not in_the_rank(rank_boundary, e[i])))]).sum() _fn = np.asarray([1.0 for i in index_to_calc if (in_the_rank(rank_boundary, t[i]) and (not in_the_rank(rank_boundary, e[i])))]).sum() print('rank_boundary:{}, _tp:{}, _fp:{}, _tn:{}, _fn:{}'.format(rank_boundary, _tp, _fp, _tn, _fn)) tp += _tp fp += _fp tn += _tn fn += _fn print('all:{}, tp:{}, fp:{}, tn:{}, fn:{}'.format(all, tp, fp, tn, fn)) return (tp / (tp + fn)) def get_tf_model_file_paths(tf_model_path, global_iter=None): if global_iter is None: try: print('global_iter is not given. try to get global_iter from tf_model_path:{}'.format(tf_model_path)) global_iter = tf_model_path.split('.ckpt-')[1] except Exception as e: print('Can not set global_iter with tf_model_path:{} with Exception:{}'.format(tf_model_path, e)) global_iter = None tf_model_path_with_iter = '{}.ckpt'.format(tf_model_path.split('.ckpt')[0]) if global_iter is not None: tf_model_path_with_iter = '{}-{}'.format(tf_model_path_with_iter, global_iter) postfix_list = ['data-00000-of-00001', 'index', 'meta'] return ['{}.{}'.format(tf_model_path_with_iter, postfix) for postfix in postfix_list], global_iter
52.630068
339
0.594521
15,961
124,628
4.287639
0.047992
0.008767
0.011398
0.011573
0.58191
0.470344
0.375437
0.302331
0.249302
0.217053
0
0.01144
0.305646
124,628
2,367
340
52.652302
0.779387
0.091207
0
0.271067
0
0
0.120026
0.028894
0
0
0
0.001267
0.010607
1
0.035357
false
0
0.0165
0.003536
0.09723
0.112552
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a6272812fe17d031535f4032f76953f82d66e8b
16,232
py
Python
core/src/autogluon/core/searcher/bayesopt/models/meanstd_acqfunc_impl.py
zhiqiangdon/autogluon
71ee7ef0f05d8f0aad112d8c1719174aa33194d9
[ "Apache-2.0" ]
4,462
2019-12-09T17:41:07.000Z
2022-03-31T22:00:41.000Z
core/src/autogluon/core/searcher/bayesopt/models/meanstd_acqfunc_impl.py
zhiqiangdon/autogluon
71ee7ef0f05d8f0aad112d8c1719174aa33194d9
[ "Apache-2.0" ]
1,408
2019-12-09T17:48:59.000Z
2022-03-31T20:24:12.000Z
core/src/autogluon/core/searcher/bayesopt/models/meanstd_acqfunc_impl.py
zhiqiangdon/autogluon
71ee7ef0f05d8f0aad112d8c1719174aa33194d9
[ "Apache-2.0" ]
623
2019-12-10T02:04:18.000Z
2022-03-20T17:11:01.000Z
import numpy as np from typing import Dict, Optional import logging from .meanstd_acqfunc import MeanStdAcquisitionFunction, AcquisitionWithMultiModelCurrentBest, HeadWithGradient from scipy.stats import norm from ..tuning_algorithms.base_classes import OutputSurrogateModel, SurrogateModel from ..utils.density import get_quantiles logger = logging.getLogger(__name__) MIN_COST = 1e-12 # For numerical stability when dividing EI / cost MIN_STD_CONSTRAINT = 1e-12 # For numerical stability when computing the constraint probability in CEI def _extract_active_and_secondary_metric(model_output_names, active_metric): """ Returns the active metric and the secondary metric (such as the cost or constraint metric) from model_output_names. """ assert len(model_output_names) == 2, f"The model should consist of exactly 2 outputs, " \ f"while the current outputs are {model_output_names}" assert active_metric in model_output_names, f"{active_metric} is not a valid metric. " \ f"The metric name must match one of the following metrics " \ f"in the model output: {model_output_names}" if model_output_names[0] == active_metric: secondary_metric = model_output_names[1] else: secondary_metric = model_output_names[0] logger.info(f"There are two metrics in the output: {model_output_names}. " f"The metric to optimize was set to '{active_metric}'. " f"The secondary metric is assumed to be '{secondary_metric}'") return active_metric, secondary_metric class EIAcquisitionFunction(MeanStdAcquisitionFunction): """ Minus expected improvement acquisition function (minus because the convention is to always minimize acquisition functions) """ def __init__(self, model: OutputSurrogateModel, active_metric: str = None, jitter: float = 0.01): assert isinstance(model, SurrogateModel) super().__init__(model, active_metric) self.jitter = jitter def _head_needs_current_best(self) -> bool: return True def _compute_heads(self, output_to_mean_std: Dict[str, Dict[str, np.ndarray]], current_best: Optional[np.ndarray]) -> np.ndarray: means, stds = self._extract_active_metric_stats(output_to_mean_std) assert current_best is not None # phi, Phi is PDF and CDF of Gaussian phi, Phi, u = get_quantiles(self.jitter, current_best, means, stds) return (-stds) * (u * Phi + phi) def _compute_head_and_gradient(self, output_to_mean_std: Dict[str, Dict[str, np.ndarray]], current_best: Optional[np.ndarray]) -> HeadWithGradient: mean, std = self._extract_active_metric_stats(output_to_mean_std) assert current_best is not None # phi, Phi is PDF and CDF of Gaussian phi, Phi, u = get_quantiles(self.jitter, current_best, mean, std) f_acqu = std * (u * Phi + phi) return HeadWithGradient( hvals=-f_acqu, dh_dmean={self.active_metric: Phi}, dh_dstd={self.active_metric: -phi}) class LCBAcquisitionFunction(MeanStdAcquisitionFunction): """ Lower confidence bound (LCB) acquisition function: h(mean, std) = mean - kappa * std """ def __init__(self, model: OutputSurrogateModel, kappa: float, active_metric: str = None): super().__init__(model, active_metric) assert kappa > 0, 'kappa must be positive' self.kappa = kappa def _head_needs_current_best(self) -> bool: return False def _compute_heads(self, output_to_mean_std: Dict[str, Dict[str, np.ndarray]], current_best: Optional[np.ndarray]) -> np.ndarray: means, stds, _ = self._extract_active_metric_stats(output_to_mean_std) return means - stds * self.kappa def _compute_head_and_gradient(self, output_to_mean_std: Dict[str, Dict[str, np.ndarray]], current_best: Optional[np.ndarray]) -> HeadWithGradient: mean, std = self._extract_active_metric_stats(output_to_mean_std) ones_like_mean = np.ones_like(mean) ones_like_std = np.ones_like(std) return HeadWithGradient( hvals=mean - std * self.kappa, dh_dmean={self.active_metric: ones_like_mean}, dh_dstd={self.active_metric: (-self.kappa) * ones_like_std}) class EIpuAcquisitionFunction(MeanStdAcquisitionFunction): """ Minus cost-aware expected improvement acquisition function. (minus because the convention is to always minimize the acquisition function) This is defined as EIpu(x) = EI(x) / cost(x), where cost(x) is the predictive mean of the cost model at x. Note: two metrics are expected in the model output: the main objective and the cost. The main objective needs to be indicated as active_metric when initializing EIpuAcquisitionFunction. The cost is automatically assumed to be the other metric. """ def __init__(self, model: OutputSurrogateModel, active_metric: str, jitter: float = 0.01): super(EIpuAcquisitionFunction, self).__init__(model, active_metric) self.jitter = jitter self.active_metric, self.cost_metric = _extract_active_and_secondary_metric( self.model_output_names, active_metric) def _head_needs_current_best(self) -> bool: return True def _compute_heads( self, output_to_mean_std: Dict[str, Dict[str, np.ndarray]], current_best: Optional[np.ndarray]) -> np.ndarray: """ Returns minus the cost-aware expected improvement. """ means, stds = self._extract_active_metric_stats(output_to_mean_std) assert current_best is not None pred_costs = self._extract_positive_cost(output_to_mean_std) # phi, Phi is PDF and CDF of Gaussian phi, Phi, u = get_quantiles(self.jitter, current_best, means, stds) f_ei = stds * (u * Phi + phi) f_acqu = f_ei / pred_costs return - f_acqu def _compute_head_and_gradient( self, output_to_mean_std: Dict[str, Dict[str, np.ndarray]], current_best: Optional[np.ndarray]) -> HeadWithGradient: """ Returns minus cost-aware expected improvement and, for each output model, the gradients with respect to the mean and standard deviation of that model. """ mean, std = self._extract_active_metric_stats(output_to_mean_std) assert current_best is not None pred_cost = self._extract_positive_cost(output_to_mean_std) # phi, Phi is PDF and CDF of Gaussian phi, Phi, u = get_quantiles(self.jitter, current_best, mean, std) f_ei = std * (u * Phi + phi) f_acqu = f_ei / pred_cost dh_dmean_active = Phi / pred_cost dh_dstd_active = - phi / pred_cost dh_dmean_cost = f_ei / (pred_cost ** 2) # We flip the sign twice: once because of the derivative of 1 / x # and once because the head is actually - f_ei dh_dstd_cost = np.zeros_like(dh_dstd_active) # EIpu does not depend on the standard deviation of cost return HeadWithGradient( hvals=-f_acqu, dh_dmean={self.active_metric: dh_dmean_active, self.cost_metric: dh_dmean_cost}, dh_dstd={self.active_metric: dh_dstd_active, self.cost_metric: dh_dstd_cost} ) def _extract_positive_cost(self, output_to_mean_std_best): pred_cost = output_to_mean_std_best[self.cost_metric]['mean'] if any(pred_cost) < 0.0: logger.warning(f'The model for {self.cost_metric} predicted some negative cost. ' f'Capping the minimum cost at {MIN_COST}.') pred_cost = np.maximum(pred_cost, MIN_COST) # ensure that the predicted cost/run-time is positive return pred_cost class CEIAcquisitionFunction(AcquisitionWithMultiModelCurrentBest): """ Minus constrained expected improvement acquisition function. (minus because the convention is to always minimize the acquisition function) This is defined as CEI(x) = EI(x) * P(c(x) <= 0), where EI is the standard expected improvement with respect to the current *feasible best*, and P(c(x) <= 0) is the probability that the hyperparameter configuration x satisfies the constraint modeled by c(x). If there are no feasible hyperparameters yet, the current feasible best is undefined. Thus, CEI is reduced to the P(c(x) <= 0) term until a feasible configuration is found. Two metrics are expected in the model output: the main objective and the constraint metric. The main objective needs to be indicated as active_metric when initializing CEIAcquisitionFunction. The constraint is automatically assumed to be the other metric. References on CEI: Gardner et al., Bayesian Optimization with Inequality Constraints. In ICML, 2014. Gelbart et al., Bayesian Optimization with Unknown Constraints. In UAI, 2014. """ def __init__(self, model: OutputSurrogateModel, active_metric: str, jitter: float = 0.01): super(CEIAcquisitionFunction, self).__init__(model, active_metric) self.jitter = jitter self._feasible_best_list = None self.active_metric, self.constraint_metric = _extract_active_and_secondary_metric( self.model_output_names, active_metric) def _head_needs_current_best(self) -> bool: return True def _compute_heads(self, output_to_mean_std: Dict[str, Dict[str, np.ndarray]], current_best: Optional[np.ndarray]) -> np.ndarray: """ Returns minus the constrained expected improvement (- CEI). """ assert current_best is not None means, stds = self._extract_active_metric_stats(output_to_mean_std) means_constr = output_to_mean_std[self.constraint_metric]['mean'] stds_constr = output_to_mean_std[self.constraint_metric]['std'] # Compute the probability of satisfying the constraint P(c(x) <= 0) constr_probs = norm.cdf(- means_constr / (stds_constr + MIN_STD_CONSTRAINT)) # If for some fantasies there are not feasible candidates, there is also no current_best (i.e., a nan). # The acquisition function is replaced by only the P(c(x) <= 0) term when no feasible best exist. feas_idx = ~np.isnan(current_best).flatten() num_fantasies = current_best.size means = means.reshape((-1, num_fantasies)) stds = stds.reshape((-1, 1)) current_best = current_best.reshape((1, num_fantasies)) constr_probs = constr_probs.reshape((-1, num_fantasies)) # phi, Phi is PDF and CDF of Gaussian phi, Phi, u = get_quantiles(self.jitter, current_best, means, stds) f_ei = stds * (u * Phi + phi) # CEI(x) = EI(x) * P(c(x) <= 0) if feasible best exists, CEI(x) = P(c(x) <= 0) otherwise f_acqu = np.where(feas_idx, f_ei * constr_probs, constr_probs) return - f_acqu def _compute_head_and_gradient(self, output_to_mean_std: Dict[str, Dict[str, np.ndarray]], current_best: Optional[np.ndarray]) -> HeadWithGradient: """ Returns minus cost-aware expected improvement (- CEI) and, for each output model, the gradients with respect to the mean and standard deviation of that model. """ assert current_best is not None mean, std = self._extract_active_metric_stats(output_to_mean_std) mean_constr = output_to_mean_std[self.constraint_metric]['mean'] std_constr = output_to_mean_std[self.constraint_metric]['std'] # Compute the probability of satisfying the constraint P(c(x) <= 0) std_constr = std_constr + MIN_STD_CONSTRAINT z = - mean_constr / std_constr constr_prob = norm.cdf(z) # Useful variables for computing the head gradients mean_over_squared_std_constr = mean_constr / std_constr ** 2 inverse_std_constr = 1. / std_constr phi_constr = norm.pdf(z) # If for some fantasies there are not feasible candidates, there is also no current_best (i.e., a nan). # The acquisition function is replaced by only the P(c(x) <= 0) term when no feasible best exist. feas_idx = ~np.isnan(current_best) phi, Phi, u = get_quantiles(self.jitter, current_best, mean, std) # phi, Phi is PDF and CDF of Gaussian f_ei = std * (u * Phi + phi) f_acqu = np.where(feas_idx, f_ei * constr_prob, constr_prob) # CEI(x) = EI(x) * P(c(x) <= 0) if feasible best # exists, CEI(x) = P(c(x) <= 0) otherwise dh_dmean_constraint_feas = f_ei * inverse_std_constr * phi_constr dh_dstd_constraint_feas = - f_ei * mean_over_squared_std_constr * phi_constr dh_dmean_active_feas = Phi * constr_prob dh_dstd_active_feas = - phi * constr_prob dh_dmean_constraint_infeas = inverse_std_constr * phi_constr dh_dstd_constraint_infeas = - mean_over_squared_std_constr * phi_constr dh_dmean_active_infeas = np.zeros_like(phi_constr) dh_dstd_active_infeas = np.zeros_like(phi_constr) dh_dmean_active = np.where(feas_idx, dh_dmean_active_feas, dh_dmean_active_infeas) dh_dstd_active = np.where(feas_idx, dh_dstd_active_feas, dh_dstd_active_infeas) dh_dmean_constraint = np.where(feas_idx, dh_dmean_constraint_feas, dh_dmean_constraint_infeas) dh_dstd_constraint = np.where(feas_idx, dh_dstd_constraint_feas, dh_dstd_constraint_infeas) return HeadWithGradient( hvals=-f_acqu, dh_dmean={self.active_metric: dh_dmean_active, self.constraint_metric: dh_dmean_constraint}, dh_dstd={self.active_metric: dh_dstd_active, self.constraint_metric: dh_dstd_constraint} ) def _get_current_best_for_active_metric(self): """ Returns a list of current best, one per MCMC sample (if head requires the current best). Filters out the infeasible candidates when computing the current best. This is needed as the constrained expected improvement uses the current best over the *feasible* hyperparameter configurations (i.e., satisfying the constraint) and not over all hyperparameter configurations. The assumption is that a hyperparameter configuration is feasible iff the predictive mean of the constraint model is <= 0 at that configuration. """ if self._head_needs_current_best(): if self._feasible_best_list is not None: feasible_best_list = self._feasible_best_list else: models_to_means = self._map_models_to_candidate_predictive_means() assert len(models_to_means) == 2, 'The model should consist of exactly 2 outputs.' all_means_active = models_to_means[self.active_metric] all_means_constraint = models_to_means[self.constraint_metric] assert len(all_means_constraint) == len(all_means_constraint), \ 'All models must have the same number of MCMC samples.' feasible_best_list = [] # Loop over MCMC samples (if any) for means_active, means_constraint in zip(all_means_active, all_means_constraint): assert means_active.shape == means_constraint.shape, \ 'The predictive means from each model must have the same shape.' # Remove all infeasible candidates (i.e., where means_constraint is >= 0) means_active[means_constraint >= 0] = np.nan # Compute the current *feasible* best (separately for every fantasy) min_across_observations = np.nanmin(means_active, axis=0) feasible_best_list.append(min_across_observations) self._feasible_best_list = feasible_best_list else: feasible_best_list = [None] * self.num_mcmc_samples return feasible_best_list
51.367089
119
0.678228
2,164
16,232
4.812847
0.133549
0.044935
0.027652
0.034566
0.551992
0.495343
0.468555
0.465002
0.421988
0.392031
0
0.004654
0.245441
16,232
315
120
51.530159
0.845689
0.261397
0
0.348958
0
0
0.060663
0.001804
0
0
0
0
0.067708
1
0.098958
false
0
0.036458
0.020833
0.234375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a634a81d1315b02024e5a058541a80c84797654
14,949
py
Python
tests/funcat/test_ema_trends.py
pchaos/funcat2
ff554cc134906a5a182fc31774488d62a839b314
[ "Apache-2.0" ]
10
2021-05-06T01:17:28.000Z
2022-01-09T16:56:51.000Z
tests/funcat/test_ema_trends.py
pchaos/funcat2
ff554cc134906a5a182fc31774488d62a839b314
[ "Apache-2.0" ]
null
null
null
tests/funcat/test_ema_trends.py
pchaos/funcat2
ff554cc134906a5a182fc31774488d62a839b314
[ "Apache-2.0" ]
4
2021-05-26T14:25:01.000Z
2022-01-09T16:56:44.000Z
# -*- coding: utf-8 -*- import unittest from matplotlib import rcParams import matplotlib.pyplot as plt import numpy as np import os from functools import lru_cache import json from funcat import * from funcat.api import * from funcat.helper import selectV from funcat.utils import FuncatTestCase __updated__ = "2021-08-12" def condition_ema(n: int=13): return CLOSE >= EMA(CLOSE, n) def condition_ema_ema(n: int=13, m: int=55): return (CLOSE > EMA(CLOSE, n)) & (CLOSE > EMA(CLOSE, m)) def condition_ema_ema2(n: int=13, m: int=55): return (CLOSE > EMA(CLOSE, n)) & (EMA(CLOSE, m) > REF(EMA(CLOSE, m), n)) def condition_kama_ema(n: int=10, m: int=21): return (CLOSE > KAMA(CLOSE, n)) & (EMA(CLOSE, m) > REF(EMA(CLOSE, m), n)) def condition_kama_ema2(n: int=10, m: float =0.1): kman = KAMA(CLOSE, n) amastd = STD(kman, 20) return (CLOSE > kman) & (CLOSE > kman + m * amastd) def condition_llv(n: int=20): def atr(time_period: int): import talib atr = talib.ATR(HIGH.series, LOW.series, CLOSE.series, timeperiod=time_period) return atr return CLOSE >= (LLV(LOW, n) + 2 * atr(n)) class Test_ema_trend(FuncatTestCase): @classmethod def loadFromFile(cls): filename = "../datas/etf.txt" currDir = os.path.join(os.path.abspath(os.path.dirname(__file__)), ".") fullname = os.path.join(f"{currDir}", filename) print(fullname) if os.path.exists(fullname): with open(fullname, "r") as f: cls.codes = f.readlines() # print(cls.codes[:10]) for i, item in enumerate(cls.codes): cls.codes[i] = f"{ item[:6] }.etf" if cls.codes[i].startswith("000"): # 指数替换 cls.codes[i] = "588000.etf" if cls.codes[0].startswith("代码"): print(f"del 代码") del cls.codes[0] for i in reversed(range(len(cls.codes))): # 删除空行 if (len(cls.codes[i].strip()) != 10): del cls.codes[i] return cls.codes @classmethod def setUpClass(cls)->None: super(Test_ema_trend, cls).setUpClass() cls.codes = ['510500', '159915', '510300', "512400", "512800", "512760", "515050"] for i, item in enumerate(cls.codes): cls.codes[i] = f"{ item[:6] }.etf" print(os.environ['CONDA_DEFAULT_ENV']) def sort_arr(self, arr: np.array, sort=''): result = [] for i, item in enumerate(arr): try: result.append( (item['date'], item['code'], item['cname'])) except Exception as e: print(f"{item}计算错误!") # print(f"percent:{result}") # dtype = [(('date', int), ('code', 'U'), ('cname', 'U'))] dtype = [('date', int), ('code', (np.str_, 10)), ('cname', (np.str_, 10))] arr_sorted = np.array(result, dtype=dtype) return np.sort(arr_sorted, order='code') def show_last(self, arr: np.array, last_n=-1): from funcat import get_start_date, get_current_date, get_current_security from funcat.context import ExecutionContext current_date = get_current_date() start_date = current_date - 10000 trading_dates = ExecutionContext.get_data_backend( ).get_trading_dates(start=start_date, end=current_date) lastday = trading_dates[last_n] result = [] for i, item in enumerate(arr): if item['date'] == lastday: result.append(i) if arr.shape[0] > 0: return self.sort_arr(arr[result]) else: return np.array([]) def test_condition_ema(self): data = selectV(condition_ema, start_date=20181228, end_date=20190104, order_book_id_list=self.codes) print(f"condition_ema results:{data}") def test_condition_ema_2(self): data = selectV(condition_ema, start_date=20210101, end_date=20210704, order_book_id_list=self.codes) print(f"condition_ema results:{data}") def test_condition_ema_ema(self): data = selectV(condition_ema_ema, start_date=20181001, end_date=20190104, order_book_id_list=self.codes) print(f"condition_ema_ema results:{data}") def test_condition_ema_ema2(self): data = selectV(condition_ema_ema, start_date=20210101, end_date=20210704, order_book_id_list=self.codes) print(f"condition_ema_ema results:{data}") print(f"last day status:{self.show_last(data)}") def select_conditions(self, codes, last_n=-1, func=condition_ema_ema2): data = selectV(func, start_date=20210101, end_date=20310704, order_book_id_list=codes) print(f"condition_ema_ema results {len(data)}:{data}") print(f"total:{len(codes)} codes") if last_n != 0: print( f"last day status {self.show_last(data, last_n).shape[0]} :{self.show_last(data, last_n)}") return data def test_condition_ema_ema3(self): # 从本地文件读取etf代码列表 codes = self.loadFromFile() self.select_conditions(codes) def test_condition_ema_ema3_2(self): # 从本地文件读取etf代码列表 codes = self.loadFromFile() self.select_conditions(codes) self.select_conditions(codes, last_n=-2) def test_condition_ema_ema3_3(self): """站上13日ema 并且ema55向上""" # 从本地文件读取etf代码列表 codes = self.loadFromFile() self.select_conditions(codes) data = self.select_conditions(codes) n = 10 for i in range(n): x = self.show_last(data, -i - 1) print(x) filename = f"/tmp/outfile{i}.txt" np.savetxt(filename, x, fmt='%s') print(f"save to {filename}") def test_condition_ema_ema4(self): codes = ["501078.etf"] # codes = ["588000.etf"] self.select_conditions(codes) def test_condition_ema_ema5(self): # 从本地文件读取etf代码列表 codes = self.loadFromFile() data = self.select_conditions(codes) lastdata = self.show_last(data) lastcodes = [] for i, item in enumerate(lastdata): lastcodes.append(item['code']) n = 13 result = [] if len(lastcodes) > 0: for i, item in enumerate(lastcodes): S(item) try: c = CLOSE / REF(CLOSE, n) result.append([item, np.round(c.value, 3)]) except Exception as e: print(f"{item}计算错误!") print(f"percent:{result}") result = np.array(result) print(f"percent {result.shape}:{np.array(result)}") def test_condition_kama_ema(self): # 从本地文件读取etf代码列表 codes = self.loadFromFile() data = self.select_conditions(codes, func=condition_kama_ema) lastdata = self.show_last(data) lastcodes = [] for i, item in enumerate(lastdata): lastcodes.append(item['code']) n = 13 result = [] if len(lastcodes) > 0: for i, item in enumerate(lastcodes): S(item) try: c = CLOSE / REF(CLOSE, n) result.append([item, np.round(c.value, 3)]) except Exception as e: print(f"{item}计算错误!") print(f"percent:{result}") result = np.array(result) print(f"percent {result.shape}:{np.array(result)}") def show_result(self, codes, n, topn=5): result = [] if len(codes) > 0: for i, item in enumerate(codes): S(item) try: c = CLOSE / REF(CLOSE, n) result.append((item, np.round(100 * c.value, 2))) except Exception as e: print(f"{item}计算错误!") # print(f"percent:{result}") dtype = [('code', 'S10'), ('percent', float)] result_np = np.array(result, dtype=dtype) # print(f"percent numpy: {result_np.shape}:{result_np}") sorted_result = np.sort(result_np, order='percent') print( f"{n} day percent ordered %: {sorted_result.shape}:{sorted_result}") # sorted_result.tofile('/tmp/kama.csv', sep=',') jsfile = f"/tmp/kama{n}.json" # calculate row and column numbers row_count = sorted_result.shape[0] # neglect first row and get new row numbers row_count = row_count - 1 npMatrix = sorted_result.transpose() # transfer numpy array to list matrix = npMatrix.tolist() # transfer list to that JSON file result = {} for index, item in enumerate(matrix): if index + topn > row_count: result[item[0].decode()] = item[1] return {f"{n} day (CLOSE/REF(CLOSE, {n}) percent %)": result} def dict_to_json(self, value): # When parsing JSON anything can go wrong # So we need to handle exceptions. For example # If the JSON fails validation, the exception # is triggered try: # Load JSON data from a string to Python object if isinstance(value, str): o_json = json.loads(value) elif isinstance(value, dict): o_json = value elif isinstance(value, list): o_json = value # Convert the JSON Python object back to string # Also format it in a nice way. That is what # this article is all about. The indent parameter # specifies the width of the indentation which is # self explanatory. The sorted_keys parameter # specifies if we want to keep the input JSON # as is or sort the key. In this case we are not f_json = json.dumps( o_json, indent=2, sort_keys=False, ensure_ascii=False) # Print the beautified JSON # print(f_json) return f_json # Catch any exceptions except Exception as ex: # repr is used to print more information about # the object which is handy when debugging print(repr(ex)) return json.dumps({}) def test_condition_kama_ema2(self): """kman=10日卡夫曼自适应均线, close > kman 并且 close > kman+0.1×STD(kman, 20) """ # 从本地文件读取etf代码列表 codes = self.loadFromFile() data = self.select_conditions(codes, func=condition_kama_ema2) lastdata = self.show_last(data) lastcodes = [] for i, item in enumerate(lastdata): lastcodes.append(item['code']) # 与n天前的比值 n = 10 j1 = self.show_result(lastcodes, n) n = 5 j2 = self.show_result(lastcodes, n) # print(j1, j2) print(self.dict_to_json([j1, j2])) print(f"{len(lastdata)}/{len(codes)},{lastdata}") if len(lastdata) > 0: with open(f"/tmp/kama_ema_{lastdata[0]['date']}.txt", 'w') as f: f.write(f"{lastdata[0]['date']}\n" + f"""kman=10日卡夫曼自适应均线,\n close > kman 并且 close > kman+0.1×STD(kman, 20)\n""" + f"备选etf:\n{codes}\n" + f"{str(self.dict_to_json([j1, j2]))}\n{len(lastdata)}/{len(codes)},{lastdata}\n") # print(self.dict_to_json(list(enumerate(lastdata)))) def test_condition_kama_ema3(self): """kman=10日卡夫曼自适应均线, close > kman 并且 close > kman+0.1×STD(kman, 20) """ # 从本地文件读取etf代码列表 codes = self.loadFromFile() data = self.select_conditions(codes, func=condition_kama_ema2) lastdata = self.show_last(data) lastcodes = [] for i, item in enumerate(lastdata): lastcodes.append(item['code']) # 与n天前的比值 nlist = [5, 10, 20] jlist = [] for n in nlist: j1 = self.show_result(lastcodes, n) jlist.append(j1) # print(j1, j2) print(self.dict_to_json(jlist)) print(f"{len(lastdata)}/{len(codes)},{lastdata}") # code出现的次数 codes_count = {} for item_dict in jlist: print(item_dict) for item in item_dict.values(): for key in item.keys(): # 增加中文名称 key = f"{key} {symbol(key)}" codes_count[key] = codes_count.get(key, 0) + 1 codes_count = {"排名靠前出现的次数": codes_count} if len(lastdata) > 0: with open(f"/tmp/kama_ema_{lastdata[0]['date']}.txt", 'w') as f: f.write(f"{lastdata[0]['date']}\n" + f"""kman=10日卡夫曼自适应均线,\n close > kman 并且 close > kman+0.1×STD(kman, 20)\n""" + f"标的etf:\n{codes}\n" + f"{str(self.dict_to_json(jlist))}\n" + f"{self.dict_to_json(codes_count)}\n" + f"{len(lastdata)}/{len(codes)},{lastdata}\n") def test_condition_kama_ema_llv(self): """kman=10日卡夫曼自适应均线, close > kman 并且 close > kman+0.1×STD(kman, 20) """ # 从本地文件读取etf代码列表 codes = self.loadFromFile() def f(): return condition_kama_ema2() & condition_llv(20) data = self.select_conditions(codes, func=f) lastdata = self.show_last(data) lastcodes = [] for i, item in enumerate(lastdata): lastcodes.append(item['code']) # 与n天前的比值 nlist = [5, 10, 20] jlist = [] for n in nlist: j1 = self.show_result(lastcodes, n) jlist.append(j1) # print(j1, j2) print(self.dict_to_json(jlist)) print(f"{len(lastdata)}/{len(codes)},{lastdata}") # code出现的次数 codes_count = {} for item_dict in jlist: print(item_dict) for item in item_dict.values(): for key in item.keys(): codes_count[key] = codes_count.get(key, 0) + 1 codes_count = {"排名靠前出现的次数": codes_count} if len(lastdata) > 0: with open(f"/tmp/kama_ema_{lastdata[0]['date']}.txt", 'w') as f: f.write(f"{lastdata[0]['date']}\n" + f"""kman=10日卡夫曼自适应均线,\n close > kman 并且 close > kman+0.1×STD(kman, 20)\n""" + f"标的etf:\n{codes}\n" + f"{str(self.dict_to_json(jlist))}\n" + f"{self.dict_to_json(codes_count)}\n" + f"{len(lastdata)}/{len(codes)},{lastdata}\n") if __name__ == '__main__': FuncatTestCase.main()
37.002475
153
0.544719
1,853
14,949
4.261198
0.165138
0.019757
0.024696
0.015198
0.54344
0.510638
0.492021
0.466565
0.439083
0.410841
0
0.032255
0.328049
14,949
403
154
37.094293
0.753211
0.104355
0
0.509934
0
0.016556
0.133027
0.058034
0
0
0
0
0
1
0.092715
false
0
0.046358
0.016556
0.192053
0.102649
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a63f146b7427d332ad4e59baaa8226a43db9bf3
4,933
py
Python
api/anime/k1080.py
wind2sing/Anime-API
a640c9e6a3798f5f0f77576b6c09fbd0a2dd1f6d
[ "MIT" ]
null
null
null
api/anime/k1080.py
wind2sing/Anime-API
a640c9e6a3798f5f0f77576b6c09fbd0a2dd1f6d
[ "MIT" ]
null
null
null
api/anime/k1080.py
wind2sing/Anime-API
a640c9e6a3798f5f0f77576b6c09fbd0a2dd1f6d
[ "MIT" ]
null
null
null
import json import re from base64 import b64decode from urllib.parse import unquote from api.core.anime import * from api.utils.logger import logger class K1080(AnimeSearcher): async def search(self, keyword: str): html = await self.fetch_html(keyword, 1) for item in self.parse_anime_metas(html): yield item pages = self.parse_last_page_index(html) if pages > 1: tasks = [self.parse_one_page(keyword, p) for p in range(2, pages + 1)] async for item in self.as_iter_completed(tasks): yield item async def fetch_html(self, keyword: str, page: int): url = f"https://www.k1080.net/vodsearch/{keyword}----------{page}---.html" resp = await self.get(url) if not resp or resp.status != 200: return "" html = await resp.text() if "请输入验证码" in html: logger.error("We are blocked by K1080P, need to enter the verification code.") return "" return html def parse_last_page_index(self, html: str) -> int: max_page_url = self.xpath(html, "//a[text()='尾页']/@href") if not max_page_url: return 1 # 尾页链接 /vodsearch/xxxxx----------4---.html max_page = re.search(r"--(\d+?)--", max_page_url[0]).group(1) return int(max_page) def parse_anime_metas(self, html: str): ret = [] meta_list = self.xpath(html, "//ul[@id='searchList']/li") for item in meta_list: meta = AnimeMeta() meta.cover_url = item.xpath("div/a/@data-original")[0] meta.title = item.xpath("div/a/@title")[0] meta.detail_url = item.xpath("div/a[1]/@href")[0] # /voddetail/414362.html desc = item.xpath("div[@class='detail']/p[4]/text()")[0] meta.desc = desc if desc else "无简介" meta.category = item.xpath("//p[3]/text()")[0] ret.append(meta) return ret async def parse_one_page(self, keyword: str, page: int): html = await self.fetch_html(keyword, page) return self.parse_anime_metas(html) class K1080DetailParser(AnimeDetailParser): async def parse(self, detail_url: str): detail = AnimeDetail() domain = "https://www.k1080.net" resp = await self.get(domain + detail_url) if not resp or resp.status != 200: return detail html = await resp.text() detail.cover_url = self.xpath(html, "//img[@class='lazyload']/@src")[0] detail.title = self.xpath(html, "//h1[@class='title']/text()")[0] detail.desc = self.xpath(html, "//a[@href='#desc']/parent::p/text()")[1] detail.category = self.xpath(html, "//p[@class='data'][1]/a/text()")[0] playlist_blocks = self.xpath(html, "//div[@class='tab-content myui-panel_bd']/div") # 播放列表所在的区域 playlist_names = self.xpath(html, "//a[@data-toggle='tab']/text()") for idx, block in enumerate(playlist_blocks): if playlist_names[idx] in ["超清备用", "Y播"]: continue # m3u8 图片隐写传输数据流, 太麻烦了, 丢弃 playlist = AnimePlayList() playlist.name = playlist_names[idx] for anime_block in block.xpath('ul/li'): anime = Anime() anime.name = anime_block.xpath("a/text()")[0] anime.raw_url = anime_block.xpath("a/@href")[0] playlist.append(anime) if not playlist.is_empty(): detail.append_playlist(playlist) return detail class K1080UrlParser(AnimeUrlParser): async def parse(self, raw_url: str): domain = "https://www.k1080.net" resp = await self.get(domain + raw_url) if not resp or resp.status != 200: return "" html = await resp.text() player_data = re.search(r"player_data=({.+?\})", html).group(1) player_data = json.loads(player_data) video_url = unquote(b64decode(player_data.get("url")).decode("utf8")) if video_url.endswith(".mp4") or video_url.endswith(".m3u8"): return video_url if "v.qq.com" in video_url: return await self.parse_qq_video(video_url) # 需要再重定向一次 if "app.yiranleng.top" in video_url: resp = await self.get(video_url, allow_redirects=True) if not resp or resp.status != 200: # CDN 回源失败返回了 564 return "" return resp.url.human_repr() return video_url async def parse_qq_video(self, url: str): api = f"https://jx.k1080.net/analysis.php?v={url}" headers = {"Referer": "https://www.k1080.net/"} resp = await self.get(api, headers=headers) if not resp or resp.status != 200: return "" html = await resp.text() url = re.search(r'var url\s*=\s*"(https?://.+?)";', html) url = url.group(1) if url else "" return url
39.782258
104
0.577539
655
4,933
4.242748
0.262595
0.025909
0.037424
0.028787
0.180641
0.13746
0.116589
0.107953
0.096438
0.084563
0
0.027466
0.276708
4,933
123
105
40.105691
0.751401
0.024934
0
0.209524
0
0
0.147824
0.058713
0
0
0
0
0
1
0.019048
false
0
0.057143
0
0.266667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a661dc3c2dd55dc27b6c5f8c0a8fde9aff163f1
4,886
py
Python
navi-model-server/navi/test.py
sanfengliao/DeepNavi
dc405ac0010075c2eea63083528db7cb765ad161
[ "Apache-2.0" ]
null
null
null
navi-model-server/navi/test.py
sanfengliao/DeepNavi
dc405ac0010075c2eea63083528db7cb765ad161
[ "Apache-2.0" ]
null
null
null
navi-model-server/navi/test.py
sanfengliao/DeepNavi
dc405ac0010075c2eea63083528db7cb765ad161
[ "Apache-2.0" ]
null
null
null
import os from .datasets import * from .models import * from .losses import * import torchvision.transforms as transforms from sklearn.preprocessing import StandardScaler def train_deepnavi(): # ### Congifuration pass def test_deepnavi(model_path, device_index=0): ###################################### ### Congifuration ###################################### test_model_path = model_path tag = test_model_path.split("/")[-2] dataset_root = './dataset/office/' images_dir = './dataset/office/sensor_data/images/' mag_file = './dataset/office/sensor_data/geomagnetism/geomagnetism.csv' test_file = './dataset/office/validation/validation_data.csv' img_seq_len = 1 img_seq_stride = 1 mag_seq_len = 16 mag_seq_stride = 1 os.environ["CUDA_VISIBLE_DEVICES"] = str(device_index) device = torch.device('cuda') ###################################### ### Data Preprocess ###################################### normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) mag_scaler = StandardScaler() ###################################### ### Dataset ###################################### print("Dataset Constructing...") val_loc_dataset = LocDataset(dataroot='/home/huangjianjun/dataset/deepnavi/office/validation/', filename='validation_data.csv', stride=1) mag_dataset = MagDataset(file=mag_file, scaler=mag_scaler) img_dataset = ImageSeriesFolder( data_dir=images_dir, transform=transforms.Compose([ transforms.Resize(256), # 将输入的PIL图片转换成给定的尺寸的大小 transforms.CenterCrop(224),# 剪切并返回PIL图片上中心区域 transforms.ToTensor(), # 将PIL图片或者numpy.ndarray转成Tensor类型的 normalize # 用均值和标准差对张量图像进行标准化处理 ])) val_dataset = RootBranchesDataset( root=val_loc_dataset, branches=[img_dataset, mag_dataset], branches_len=[img_seq_len, mag_seq_len], branches_stride=[img_seq_stride, mag_seq_stride]) test_loader = torch.utils.data.DataLoader( val_dataset, batch_size=1, shuffle=False, ) print("Dataset Constructed") ###################################### ### Model ###################################### image_encoder = ImageEncoder(models.resnet18(pretrained=True)) mag_encoder = MagEncoder(3 * mag_seq_len, 256, 4) fusion = Fusion(input_size=[256, 256], output_size=1024) decoder = Decoder(input_size=1024, output_size=[3, 4]) model = MainModel([image_encoder, mag_encoder], fusion, decoder) # load model weights model_weights = torch.load(model_path) model.load_state_dict(model_weights["state_dict"]) model = model.to(device) ###################################### ### Test ###################################### result_output_dir = os.path.join("./output/", tag) if not os.path.exists(result_output_dir): os.makedirs(result_output_dir) losses = AverageMeter() trans_losses = AverageMeterRecording() rotation_losses = AverageMeter() rotation_errors = AverageMeterRecording() trans_output_recoord = Recorder() rot_output_record = Recorder() model.eval() with torch.no_grad(): for targets, imgs, mags in tqdm(test_loader): batch_size = mags.size(0) imgs, mags = imgs.to(device), mags.to(device) targets = targets.to(device) mags = mags.view(batch_size, 1, -1) trans_output, rotation_output = model([imgs, mags]) trans_loss = pose_loss(trans_output, targets[:, 0:3]) rotation_loss = pose_loss(rotation_output, targets[:, 3:]) rot_err = rotation_error(rotation_output, targets[:, 3:]) trans_losses.update(trans_loss.item(), batch_size) rotation_losses.update(rotation_loss.item(), batch_size) rotation_errors.update(rot_err.item(), batch_size) trans_output_recoord.add(trans_output.data, batched=True) rot_output_record.add(rotation_output.data, batched=True) trans_losses.to_file(os.path.join(result_output_dir, "trans_error.csv")) rotation_errors.to_file(os.path.join(result_output_dir, "rotation_error.csv")) trans_output_recoord.to_file(os.path.join(result_output_dir, "trans_estimation.csv")) rot_output_record.to_file(os.path.join(result_output_dir, "rot_estimation.csv")) test_trans_loss = trans_losses.avg test_rotation_loss = rotation_losses.avg test_rotation_err = rotation_errors.avg print( '[Test] ' 'Trans Loss {:.3f}; ' 'Rotation Loss {:.3f}; ' 'Rotation Error {:.3f}; ' ''.format(test_trans_loss, test_rotation_loss, test_rotation_err)) if __name__ == '__main__': test_deepnavi('./model_weights/sc/sc.pth.tar')
36.192593
141
0.619321
543
4,886
5.276243
0.287293
0.029319
0.036649
0.016754
0.064223
0.046771
0.046771
0.046771
0.025131
0
0
0.018377
0.198117
4,886
135
142
36.192593
0.712864
0.034998
0
0
0
0
0.11534
0.051984
0
0
0
0
0
1
0.020833
false
0.010417
0.0625
0
0.083333
0.03125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a68a12d7a6b5c0bf26d53f1dfb138d13e53dc20
14,415
py
Python
resources/warframestats.py
typlosion14/WarframeBot
bd2422162023b0ee2905296f49f500308aae5497
[ "MIT" ]
null
null
null
resources/warframestats.py
typlosion14/WarframeBot
bd2422162023b0ee2905296f49f500308aae5497
[ "MIT" ]
null
null
null
resources/warframestats.py
typlosion14/WarframeBot
bd2422162023b0ee2905296f49f500308aae5497
[ "MIT" ]
null
null
null
import discord import json from datetime import * from CobraLib import importjson from StorageConfig import Traduction, ConvertPart def baro(args, client): platform = '_'.join(args[1:]).lower() if platform == 'switch' or platform == 'nintendo': platform = 'swi' if platform == 'xbox': platform = "xb1" if platform == '': platform = 'pc' if platform not in ("pc", "ps4", "xb1", "swi"): return discord.Embed(title="Baro Ki'Teer", description=Traduction.platformfail(platform), color=0xFF0000) else: dicbaro = json.loads(importjson('https://api.warframestat.us/' + platform + '/voidTrader')) embed = discord.Embed(title="Baro Ki'Teer " + str(' '.join(args[1:]).lower()), description=" ".join(args), color=0x29DDB1) embed.set_thumbnail( url='http://content.warframe.com/MobileExport/Lotus/Interface/Icons/Player/BaroKiteerAvatar.png') embed.set_author(name=client.user.name, icon_url=client.user.avatar_url, url='https://discordbots.org/bot/591950764289818634') embed.add_field(name='Location', value=dicbaro['location']) if bool(dicbaro['active']): return Traduction.baro_depart(dicbaro, embed) else: return Traduction.baro_arrive(dicbaro, embed) def nightwave(args): platform = '_'.join(args[1:]).lower() if platform == 'switch' or platform == 'nintendo': platform = 'swi' elif platform == 'xbox': platform = "xb1" elif platform == '': platform = 'pc' elif platform not in ("pc", "ps4", "xb1", "swi"): return Traduction.platformfail(platform) nightdic = json.loads(importjson("https://api.warframestat.us/" + platform + "/nightwave")) embed = discord.Embed(title="Nightwave", description=" ".join(args), color=0x8D32A8) if bool(nightdic['active']): return Traduction.Nightwave(nightdic, embed) else: embed = discord.Embed(title="Nightwave", description='Nora don\'t have any mission for you.', color=0xFF0000) embed.set_thumbnail( url='https://vignette.wikia.nocookie.net/warframe/images/9/95/Nora_Night_transmission.png/revision/latest?cb=20190301081607') embed.set_author(name="Nora", icon_url="https://vignette.wikia.nocookie.net/warframe/images/9/95/Nora_Night_transmission.png/revision/latest?cb=20190301081607") return embed def fissures(args, client): try: platform = args[1] except IndexError: platform = "" try: relicTier = args[2].capitalize() except IndexError: relicTier = "" if platform == 'switch' or platform == 'nintendo': platform = 'swi' elif platform == 'xbox': platform = "xb1" elif platform == '': platform = 'pc' elif platform not in ("pc", "ps4", "xb1", "swi"): return discord.Embed(title='Fissure', description=Traduction.platformfail(platform), color=0xFF0000) data = json.loads(importjson("https://api.warframestat.us/" + platform + "/fissures")) embed = discord.Embed(title="Fissure", description=" ".join(args), color=0xDCF3FF) embed.set_thumbnail( url='https://vignette.wikia.nocookie.net/warframe/images/9/9c/LuminousIconLarge.png/revision/latest?cb=20160717170505&path-prefix=fr') embed.set_author(name=client.user.name, icon_url=client.user.avatar_url, url='https://discordbots.org/bot/591950764289818634') for place in data: embed = Traduction.fissure(place, embed, relicTier) if len(embed.fields) >= 24: embed.set_footer(text=Traduction.MoreIG()) return embed return embed def cetus(args, client, world='Cetus'): platform = '_'.join(args[1:]).lower() if platform == 'switch' or platform == 'nintendo': platform = 'swi' elif platform == 'xbox': platform = "xb1" elif platform == '': platform = 'pc' elif platform not in ("pc", "ps4", "xb1", "swi"): return discord.Embed(title=world, description=Traduction.platformfail(platform), color=0xFF0000) if world == 'Cetus': data = json.loads(importjson('https://api.warframestat.us/' + platform + '/cetusCycle')) isDay = data['state'] timeLeft = data['timeLeft'] embed = discord.Embed(title="Cetus", description=" ".join(args), color=0xD8A24A) embed.set_thumbnail( url='https://vignette.wikia.nocookie.net/warframe/images/3/32/OstronSyndicateFlag.png/revision/latest?cb=20171021133528&path-prefix=fr') embed.set_author(name=client.user.name, icon_url=client.user.avatar_url, url='https://discordbots.org/bot/591950764289818634') embed.add_field(name=Traduction.Day(isDay), value=Traduction.timeleft() + timeLeft) return embed if world == 'Earth': data = json.loads(importjson('https://api.warframestat.us/' + platform + '/earthCycle')) isDay = data['state'] timeLeft = data['timeLeft'] embed = discord.Embed(title="Earth", description=" ".join(args), color=0x3565A9) embed.set_thumbnail( url='https://vignette.wikia.nocookie.net/warframe/images/1/1e/Earth.png/revision/latest/scale-to-width-down/350?cb=20161016212227') embed.set_author(name=client.user.name, icon_url=client.user.avatar_url, url='https://discordbots.org/bot/591950764289818634') embed.add_field(name=Traduction.Day(isDay), value=Traduction.timeleft() + timeLeft) return embed def sortie(args): platform = '_'.join(args[1:]).lower() if platform == 'switch' or platform == 'nintendo': platform = 'swi' elif platform == 'xbox': platform = "xb1" elif platform == '': platform = 'pc' elif platform not in ("pc", "ps4", "xb1", "swi"): return discord.Embed(title='Sortie', description=Traduction.platformfail(platform), color=0xFF0000) embed = discord.Embed(title="Sortie", description=" ".join(args), color=0xFFD700) embed.set_thumbnail( url='https://vignette.wikia.nocookie.net/warframe/images/1/15/Sortie_b.png/revision/latest?cb=20151217134250') data = json.loads(importjson('https://api.warframestat.us/' + platform + '/sortie')) missionList = data['variants'] for i in range(len(missionList)): dictemp = missionList[i] missionType = dictemp['missionType'] modifiername = dictemp['modifier'] modifierdesc = dictemp['modifierDescription'].replace('. ', '.\n') node = dictemp['node'] embed.add_field(name="Mission " + str(i + 1), value=node + ' ' + missionType) embed.add_field(name=modifiername, value=modifierdesc) return embed def typeofrelic(rarete, item): if "axi" in item.lower(): if rarete == "Intact": return 'https://vignette.wikia.nocookie.net/warframe/images/0/0e/VoidProjectionsGoldD.png/revision/latest?cb=20160711164509&path-prefix=fr' if rarete == "Exceptional": return 'https://vignette.wikia.nocookie.net/warframe/images/3/3c/VoidProjectionsIronA.png/revision/latest?cb=20160903181326&path-prefix=fr' if rarete == "Flawless": return 'https://vignette.wikia.nocookie.net/warframe/images/4/4e/VoidProjectionsIronB.png/revision/latest?cb=20160903181334&path-prefix=fr' if rarete == "Radiant": return 'https://vignette.wikia.nocookie.net/warframe/images/1/1a/VoidProjectionsIronC.png/revision/latest?cb=20160903181342&path-prefix=fr' else: return 'https://vignette.wikia.nocookie.net/warframe/images/0/0e/VoidProjectionsGoldD.png/revision/latest?cb=20160711164509&path-prefix=fr' if "meso" in item.lower(): return 'https://vignette.wikia.nocookie.net/warframe/images/1/12/VoidProjectionsBronzeD.png/revision/latest/scale-to-width-down/199?cb=20160711164431&path-prefix=fr' if "neo" in item.lower(): return 'https://vignette.wikia.nocookie.net/warframe/images/c/c5/VoidProjectionsSilverD.png/revision/latest/scale-to-width-down/199?cb=20160711164523&path-prefix=fr' else: return 'https://vignette.wikia.nocookie.net/warframe/images/a/ae/VoidProjectionsIronD.png/revision/latest/scale-to-width-down/199?cb=20160711164451&path-prefix=fr' def relicSearch(args, client): # !relic # Content : https://drops.warframestat.us/data/relics/Meso/R1.json try: relicTier = args[1].capitalize() relicName = args[2].capitalize() except IndexError: return try: if args[3] == "1": rarete = "Exceptional" elif args[3] == "2": rarete = "Flawless" elif args[3] == "3": rarete = "Radiant" else: rarete = "Intact" except: rarete = "Intact" embed = discord.Embed(title=" Relic " + relicTier + ' ' + relicName, description=rarete, color=0x514430) embed.set_author(name=client.user.name, icon_url=client.user.avatar_url, url='https://discordbots.org/bot/591950764289818634') embed.set_thumbnail(url=typeofrelic(rarete, relicTier)) try: jsonall = json.loads( importjson("https://drops.warframestat.us/data/relics/" + relicTier + "/" + relicName + ".json")) except json.decoder.JSONDecodeError: embed.add_field(name=Traduction.bug(relicTier + " " + relicName), value=Traduction.bug(relicTier + " " + relicName)) return embed itemList = sorted(jsonall['rewards'][rarete], key=lambda k: k['chance'], reverse=True) for item in itemList: name = item['itemName'] chance = item['chance'] embed.add_field(name="Drop rate: " + str(chance) + "%", value=name, inline=True) return embed def searchRelic(args, client): # relicsearch/searchrelic # Where to get : https://api.warframestat.us/drops/search/MeSo r1 try: relic = args[1] + " " + args[2] except IndexError: embed = discord.Embed(title=" Relic " + args[1], description=" ".join(args), color=0x514430) embed.set_author(name=client.user.name, icon_url=client.user.avatar_url, url='https://discordbots.org/bot/591950764289818634') embed.set_thumbnail(url=typeofrelic(None, args[1])) embed.add_field(name=Traduction.bug(args[1]), value=Traduction.bug(args[1]))##TODO Show !helpW relicSearch return embed embed = discord.Embed(title=" Relic " + relic, description=" ".join(args), color=0x514430) embed.set_author(name=client.user.name, icon_url=client.user.avatar_url, url='https://discordbots.org/bot/591950764289818634') embed.set_thumbnail(url=typeofrelic(None, relic)) try: jsonall = json.loads( importjson("https://api.warframestat.us/drops/search/" + relic)) except json.decoder.JSONDecodeError: embed.add_field(name=Traduction.bug(relic), value=Traduction.bug(relic)) return embed jsonSorted = sorted(jsonall, key=lambda k: k['chance'], reverse=True) for item in jsonSorted: place = item['place'] chance = item['chance'] if "Relic" not in place: embed.add_field(name="Drop rate: " + str(chance) + "%", value=place, inline=True) if len(embed.fields) == 0: embed.add_field(name="Relic is vaulted", value="Relic is vaulted, so you can only buy it.") return embed def getimage(item): try: imageNameJs = json.loads(importjson("https://api.warframestat.us/items/search/" + item)) except json.decoder.JSONDecodeError: return '' try: imageName = imageNameJs[0]['imageName'] except IndexError: return '' return "https://raw.githubusercontent.com/wfcd/warframe-items/development/data/img/" + imageName def cleanItem(args): r = "" for arg in args: r += ConvertPart(arg) + " " return r.replace("blueprint", "BP").replace("prime", "P.")[:-1].capitalize() def search(args, client): itemS = cleanItem(args[1:]) embed = discord.Embed(title=itemS, description=" ".join(args), color=0x514430) embed.set_author(name=client.user.name, icon_url=client.user.avatar_url, url='https://discordbots.org/bot/591950764289818634') embed.set_thumbnail(url=getimage(itemS.lower())) try: jsonall = json.loads( importjson("https://api.warframestat.us/drops/search/" + itemS)) except json.decoder.JSONDecodeError: embed.add_field(name=Traduction.bug(itemS), value=Traduction.bug(itemS)) return embed jsonSorted = sorted(jsonall, key=lambda k: k['chance'], reverse=True) for item in jsonSorted: place = item['place'] chance = item['chance'] if itemS.lower() in item['item'].lower(): embed.add_field(name="Drop rate: " + str(chance) + "%", value=place, inline=True) return embed def Invasions(client): embed = discord.Embed(title="Invasion", description="!invasion", color=0x514430) embed.set_author(name=client.user.name, icon_url=client.user.avatar_url, url='https://discordbots.org/bot/591950764289818634') embed.set_thumbnail( url="https://vignette.wikia.nocookie.net/warframe/images/2/26/InvasionSplash.png/revision/latest?cb=20140421123118") jsonall = json.loads(importjson("https://api.warframestat.us/pc/invasions")) for invas in jsonall: if "-" not in invas['eta']: value = invas['attackerReward']['asString'] + ' vs ' + invas['defenderReward']['asString'] if \ invas['attackerReward']['asString'] != "" else invas['defenderReward']['asString'] embed.add_field(name=invas['defendingFaction'] + ' vs ' + invas['attackingFaction'], value=value) embed.add_field(name="Time before end", value=invas['eta']) return embed
47.574257
174
0.628096
1,617
14,415
5.557823
0.170068
0.019584
0.030266
0.028374
0.602314
0.567931
0.512629
0.504395
0.477022
0.430733
0
0.049138
0.227749
14,415
302
175
47.731788
0.758175
0.012973
0
0.481481
0
0.074074
0.276672
0
0
0
0.009196
0.003311
0
1
0.044444
false
0
0.059259
0
0.233333
0.003704
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a68f7c2f0d4b35ca7ebcf85db3c6fcee5c038bc
4,789
py
Python
agents/combined_agent.py
eostendarp/chess-ai
de4bae2378d798dd11b90cb0485b85980ba85ed9
[ "MIT" ]
1
2021-12-01T08:44:53.000Z
2021-12-01T08:44:53.000Z
agents/combined_agent.py
eostendarp/chess-ai
de4bae2378d798dd11b90cb0485b85980ba85ed9
[ "MIT" ]
null
null
null
agents/combined_agent.py
eostendarp/chess-ai
de4bae2378d798dd11b90cb0485b85980ba85ed9
[ "MIT" ]
3
2019-12-19T18:06:20.000Z
2020-01-17T20:04:02.000Z
from agents.base_agent import BaseAgent from random import shuffle from chess import * from utils.heuristics import mvvlva, get_possible_moves import copy from utils.history_utils import * import os class CombinedAgent(BaseAgent): """ Constructor for our Agent with Proper Move Ordering :param color: Boolean for White (True) or Black (False) heuristic: Function passed in to score the board maximum_depth: Maximum depth the agent will go load_hh: will change if History Heuristic Tables are loaded in """ def __init__(self, color, heuristic, maximum_depth, load_hh=False): super().__init__(color) self.name='combined' self.heuristic = heuristic self.maximum_depth = maximum_depth self.history = self.init_history(load_hh=load_hh) self.pv_line = [] def init_history(self, load_hh): if load_hh: table = read_in_history_table(os.getcwd()+"/data/history_table.json") else: pieces = [PAWN, KNIGHT, BISHOP, ROOK, QUEEN, KING] values = {} for i in range(64): values[i] = 0 table = {True:{}, False:{}} for p in pieces: table[True][p] = copy.copy(values) table[False][p] = copy.copy(values) return table def get_move(self, board): """ Top level function for alpha_beta :param board: Board object :return: returns a Move object to be used in chess_game.py """ current_depth = 0 # possible_moves = [move for move in board.legal_moves] # shuffle(possible_moves) possible_moves = get_possible_moves(board, True, self.pv_line, current_depth, history=self.history) best_move = None best_score = float('-inf') score_array = [best_score] for move in possible_moves: board.push_uci(move.uci()) if board.is_checkmate() and board.turn != self.color: return move score = self.alpha_beta(board, self.heuristic, float('-inf'), float('inf'), False, self.maximum_depth-1, score_array) board.pop() if score > best_score: best_score = score best_move = move # print("AlphaBeta:",best_score) #self.pv_line.reverse() print(self.pv_line) print("Combined: ",best_move) return best_move def alpha_beta(self, board, heuristic, alpha, beta, max_turn, depth, best): """ Same as Alpha Beta from PV Agent :param board: chess board :param heuristic: heuristic function :param alpha: alpha value :param beta: beta value :param max_turn: maximum depth you wanna go to :param depth: current depth :param best: best score :return: best move """ original_best = best[0] if depth == 0 or board.is_game_over(): curr_score = heuristic(board, self.color, max_turn) if curr_score > best[0]: self.pv_line.clear() best.clear() best.append(curr_score) return curr_score else: return curr_score possible_moves = get_possible_moves(board, max_turn, self.pv_line, self.maximum_depth - depth, history=self.history) best_score = float('-inf') if max_turn else float('inf') for move in possible_moves: board.push_uci(move.uci()) score = self.alpha_beta(board, heuristic, alpha, beta, not max_turn, depth-1, best) if original_best != best[0]: original_best = best[0] self.pv_line.insert(0, board.pop()) else: board.pop() if max_turn and score > best_score: best_score = score if best_score >= beta: if not board.is_capture(move): piece = board.piece_at(move.from_square) self.history[max_turn][piece.piece_type][move.to_square] += pow(2, depth) return best_score alpha = max(alpha, best_score) if not max_turn and score < best_score: best_score = score if best_score <= alpha: if not board.is_capture(move): piece = board.piece_at(move.from_square) self.history[max_turn][piece.piece_type][move.to_square] += pow(2, depth) return best_score beta = min(beta, best_score) return best_score
35.213235
124
0.565254
577
4,789
4.4974
0.22877
0.062428
0.026975
0.020809
0.250482
0.200385
0.163391
0.163391
0.163391
0.163391
0
0.004486
0.348298
4,789
135
125
35.474074
0.826979
0.163291
0
0.290698
0
0
0.015605
0.006242
0
0
0
0
0
1
0.046512
false
0
0.081395
0
0.232558
0.023256
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a6bfe436ceb510a4ef2341c436663fb13a30fc3
1,647
py
Python
numba_dppy/tests/kernel_tests/test_caching.py
vlad-perevezentsev/numba-dppy
9c8dabf929368db96c3a2abf42072178b6cd9634
[ "Apache-2.0" ]
null
null
null
numba_dppy/tests/kernel_tests/test_caching.py
vlad-perevezentsev/numba-dppy
9c8dabf929368db96c3a2abf42072178b6cd9634
[ "Apache-2.0" ]
null
null
null
numba_dppy/tests/kernel_tests/test_caching.py
vlad-perevezentsev/numba-dppy
9c8dabf929368db96c3a2abf42072178b6cd9634
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import numpy as np import numba_dppy as dppy import pytest import dpctl from numba_dppy.tests.skip_tests import skip_test list_of_filter_strs = [ "opencl:gpu:0", "level0:gpu:0", "opencl:cpu:0", ] @pytest.fixture(params=list_of_filter_strs) def filter_str(request): return request.param def data_parallel_sum(a, b, c): i = dppy.get_global_id(0) c[i] = a[i] + b[i] def test_caching_kernel(filter_str): if skip_test(filter_str): pytest.skip() global_size = 10 N = global_size a = np.array(np.random.random(N), dtype=np.float32) b = np.array(np.random.random(N), dtype=np.float32) c = np.ones_like(a) with dpctl.device_context(filter_str) as gpu_queue: func = dppy.kernel(data_parallel_sum) caching_kernel = func[global_size, dppy.DEFAULT_LOCAL_SIZE].specialize(a, b, c) for i in range(10): cached_kernel = func[global_size, dppy.DEFAULT_LOCAL_SIZE].specialize( a, b, c ) assert caching_kernel == cached_kernel
28.396552
87
0.700668
255
1,647
4.376471
0.470588
0.053763
0.008065
0.028674
0.159498
0.159498
0.159498
0.159498
0.159498
0.094982
0
0.016067
0.206436
1,647
57
88
28.894737
0.837796
0.336976
0
0
0
0
0.033395
0
0
0
0
0
0.030303
1
0.090909
false
0
0.181818
0.030303
0.30303
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a6d4a86bd5ee2425a821798decbd47a07511f82
4,987
py
Python
datasets/quantization.py
jac99/Egonn
075e00368a1676df741a35f42f6f38497da9d58f
[ "MIT" ]
9
2021-10-31T07:11:58.000Z
2022-03-29T14:06:49.000Z
datasets/quantization.py
jac99/Egonn
075e00368a1676df741a35f42f6f38497da9d58f
[ "MIT" ]
null
null
null
datasets/quantization.py
jac99/Egonn
075e00368a1676df741a35f42f6f38497da9d58f
[ "MIT" ]
3
2021-11-12T17:42:41.000Z
2022-03-11T00:41:47.000Z
import numpy as np from typing import List from abc import ABC, abstractmethod import torch import MinkowskiEngine as ME class Quantizer(ABC): @abstractmethod def __call__(self, pc): pass @abstractmethod def dequantize(self, coords): pass @abstractmethod def keypoint_position(self, supervoxel_centers, stride, kp_offset): pass class PolarQuantizer(Quantizer): def __init__(self, quant_step: List[float]): assert len(quant_step) == 3, '3 quantization steps expected: for sector (in degrees), ring and z-coordinate (in meters)' self.quant_step = torch.tensor(quant_step, dtype=torch.float) self.theta_range = int(360. // self.quant_step[0]) self.quant_step = torch.tensor(quant_step, dtype=torch.float) def __call__(self, pc): # Convert to polar coordinates and quantize with different step size for each coordinate # pc: (N, 3) point cloud with Cartesian coordinates (X, Y, Z) assert pc.shape[1] == 3 # theta is an angle in degrees in 0..360 range theta = 180. + torch.atan2(pc[:, 1], pc[:, 0]) * 180./np.pi # dist is a distance from a coordinate origin dist = torch.sqrt(pc[:, 0]**2 + pc[:, 1]**2) z = pc[:, 2] polar_pc = torch.stack([theta, dist, z], dim=1) # Scale each coordinate so after quantization with step 1. we got the required quantization step in each dim polar_pc = polar_pc / self.quant_step quantized_polar_pc, ndx = ME.utils.sparse_quantize(polar_pc, quantization_size=1., return_index=True) # Return quantized coordinates and index of selected elements return quantized_polar_pc, ndx def to_cartesian(self, pc): # Convert to radian in -180..180 range theta = np.pi * (pc[:, 0] - 180.) / 180. x = torch.cos(theta) * pc[:, 1] y = torch.sin(theta) * pc[:, 1] z = pc[:, 2] cartesian_pc = torch.stack([x, y, z], dim=1) return cartesian_pc def dequantize(self, coords): # Dequantize coords and convert to cartesian as (N, 3) tensor of floats pc = (0.5 + coords) * self.quant_step.to(coords.device) return self.to_cartesian(pc) def keypoint_position(self, supervoxel_centres, stride, kp_offset): # Add voxel center position: 0.5 * self.voxel_size # to offset from the supervoxel centre value (in -1..1 range converted to absolute values): # self.voxel_size + features * super_voxel_size / 2 device = supervoxel_centres.device supervoxel_centres = (supervoxel_centres + 0.5) * self.quant_step.to(device) supervoxel_size = torch.tensor(stride, dtype=torch.float, device=supervoxel_centres.device) * \ self.quant_step.to(device) #kp_pos = supervoxel_centres kp_pos = supervoxel_centres + kp_offset * supervoxel_size / 2. kp_pos = self.to_cartesian(kp_pos) return kp_pos class CartesianQuantizer(Quantizer): def __init__(self, quant_step: float): self.quant_step = quant_step def __call__(self, pc): # Converts to polar coordinates and quantizes with different step size for each coordinate # pc: (N, 3) point cloud with Cartesian coordinates (X, Y, Z) assert pc.shape[1] == 3 quantized_pc, ndx = ME.utils.sparse_quantize(pc, quantization_size=self.quant_step, return_index=True) # Return quantized coordinates and index of selected elements return quantized_pc, ndx def dequantize(self, coords): # Dequantize coords and return as (N, 3) tensor of floats # Use coords of the voxel center pc = (0.5 + coords) * self.quant_step return pc def keypoint_position(self, supervoxel_centers, stride, kp_offset): # Add voxel center position: 0.5 * self.voxel_size # to offset from the supervoxel centre value (in -1..1 range converted to absolute values): # self.voxel_size + features * super_voxel_size / 2 supervoxel_centres = (supervoxel_centers + 0.5) * self.quant_step supervoxel_size = torch.tensor(stride, dtype=torch.float, device=supervoxel_centres.device) * self.quant_step if kp_offset is not None: kp_pos = supervoxel_centres + kp_offset * supervoxel_size / 2. else: kp_pos = supervoxel_centres return kp_pos if __name__ == "__main__": n = 1000 cart = torch.rand((n, 3), dtype=torch.float) cart[:, 0] = cart[:, 0] * 200. - 100. cart[:, 1] = cart[:, 1] * 200. - 100. cart[:, 2] = cart[:, 2] * 30. - 10. quantizer = PolarQuantizer([0.5, 0.3, 0.2]) polar_quant, ndx = quantizer(cart) back2cart = quantizer.dequantize(polar_quant) cart_filtered = cart[ndx] dist = torch.norm(back2cart - cart_filtered, dim=1) print(f'Residual error - min: {torch.min(dist):0.5f} max: {torch.max(dist):0.5f} mean: {torch.mean(dist):0.5f}')
41.558333
128
0.650291
688
4,987
4.546512
0.21657
0.05179
0.058184
0.028133
0.480499
0.45876
0.400256
0.358696
0.358696
0.299872
0
0.030287
0.245238
4,987
119
129
41.907563
0.800744
0.242631
0
0.307692
0
0.012821
0.054061
0.017843
0
0
0
0
0.038462
1
0.153846
false
0.038462
0.064103
0
0.346154
0.012821
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a6f0184c4d65965cb2f85e4f4dcdf6dd9435d33
35,621
py
Python
sumoslack/api.py
soagarwal07/sumologic-slack
f2c87f9c3a72ed1c090c3fa10a1cf70373e19003
[ "Apache-2.0" ]
null
null
null
sumoslack/api.py
soagarwal07/sumologic-slack
f2c87f9c3a72ed1c090c3fa10a1cf70373e19003
[ "Apache-2.0" ]
null
null
null
sumoslack/api.py
soagarwal07/sumologic-slack
f2c87f9c3a72ed1c090c3fa10a1cf70373e19003
[ "Apache-2.0" ]
null
null
null
import time import sys sys.path.insert(0, '/opt') # layer packages are in opt directory from slackclient import SlackClient from sumoappclient.common.utils import get_current_timestamp from sumoappclient.sumoclient.base import BaseAPI from sumoappclient.sumoclient.factory import OutputHandlerFactory from sumoappclient.sumoclient.httputils import ClientMixin class SlackAPI(BaseAPI): MOVING_WINDOW_DELTA = 1 def __init__(self, kvstore, config, team_name): super(SlackAPI, self).__init__(kvstore, config) self.team_name = team_name # Set Slack configuration and create Slack Client self.api_config = self.config['Slack'] self.token = self.config['Slack']['TOKEN'] self.slackClient = SlackClient(self.token) def get_window(self, last_time_epoch): start_time_epoch = last_time_epoch + self.MOVING_WINDOW_DELTA end_time_epoch = get_current_timestamp() - self.collection_config['END_TIME_EPOCH_OFFSET_SECONDS'] while end_time_epoch < start_time_epoch: # initially last_time_epoch is same as current_time_stamp so endtime becomes lesser than starttime end_time_epoch = get_current_timestamp() return start_time_epoch, end_time_epoch class FetchCursorBasedData(SlackAPI): @staticmethod def _next_cursor_is_present(result): """Determine if the response contains 'next_cursor' and 'next_cursor' is not empty. Returns: A boolean value. """ present = ( "response_metadata" in result and "next_cursor" in result["response_metadata"] and result["response_metadata"]["next_cursor"] != "" ) return present def fetch(self): next_request = True method, args = self.build_fetch_params() retry_counter = 0 page_counter = 0 record_counter = 0 output_handler = OutputHandlerFactory.get_handler(self.collection_config['OUTPUT_HANDLER'], config=self.config) try: while next_request: send_success = retry_request = has_next_cursor = False result = self.slackClient.api_call(method, self.collection_config['TIMEOUT'], **args) fetch_success = result["ok"] if fetch_success: data_to_be_sent = self.transform_data(result) send_success = output_handler.send(data_to_be_sent, **self.build_send_params()) if send_success: page_counter += 1 record_counter += len(data_to_be_sent) has_next_cursor = self._next_cursor_is_present(result) if has_next_cursor: args["cursor"] = result["response_metadata"]["next_cursor"] self.save_state(args["cursor"], data_to_be_sent) else: self.save_state(None, data_to_be_sent) else: self.save_state(args["cursor"], []) else: if "error" in result and result["error"].startswith("invalid_cursor"): self.save_state(None, []) elif "Retry-After" in result["headers"]: # The `Retry-After` header will tell you how long to wait before retrying delay = int(result["headers"]["Retry-After"]) self.log.warning("Rate limited. Retrying in %s seconds", str(delay)) time.sleep(delay) # set the counter for retry retry_counter += 1 # retry only for Max Retry times if retry_counter <= self.collection_config["MAX_RETRY"]: self.log.debug("Retrying the method %s for %s", method, args["counter"]) retry_request = True else: retry_request = False else: self.log.warning("Failed to fetch LogType %s, Cursor %s, Error %s", method, args["cursor"], result["error"]) if retry_request: next_request = True else: next_request = fetch_success and send_success and has_next_cursor and self.is_time_remaining() finally: output_handler.close() self.log.info("Completed LogType %s, Pages: %s, Records %s", method, page_counter, record_counter) class FetchPaginatedDataBasedOnLatestAndOldestTimeStamp(SlackAPI): def fetch(self): output_handler = OutputHandlerFactory.get_handler(self.collection_config['OUTPUT_HANDLER'], config=self.config) method, args = self.build_fetch_params() current_state = self.get_state() next_request = True page_counter = 0 record_counter = 0 try: while next_request: send_success = has_more_data = False result = self.slackClient.api_call(method, self.collection_config['TIMEOUT'], **args) fetch_success = result["ok"] if fetch_success: data_to_be_sent = self.transform_data(result) if len(data_to_be_sent) > 0: send_success = output_handler.send(data_to_be_sent, **self.build_send_params()) if send_success: page_counter += 1 record_counter += len(data_to_be_sent) last_record_fetched_timestamp = data_to_be_sent[-1]["ts"] self.log.debug("Successfully sent LogType %s, oldest %s, latest %s, number of records %s", method, args["oldest"], args["latest"], len(data_to_be_sent)) if "has_more" in result and result["has_more"]: has_more_data = True args["latest"] = float(last_record_fetched_timestamp) - 0.00001 self.save_state( {"fetch_oldest": current_state["fetch_oldest"], "fetch_latest": current_state["fetch_latest"], "last_record_fetched_timestamp": last_record_fetched_timestamp}) else: self.log.debug("moving time window for LogType %s, %s, oldest %s, latest %s", method, self.channel_name, args["oldest"], args["latest"]) self.save_state({"fetch_oldest": current_state["fetch_latest"], "fetch_latest": None, "last_record_fetched_timestamp": None}) else: self.log.warning("Failed to sent LogType %s, %s, oldest %s, latest %s", method, self.channel_name, args["oldest"], args["latest"]) else: self.log.debug("No Result found for %s, Oldest %s, Latest %s", self.channel_name, args["oldest"], args["latest"]) self.save_state({"fetch_oldest": current_state["fetch_oldest"], "fetch_latest": None, "last_record_fetched_timestamp": None}) else: self.log.warning("Failed to fetch LogType %s, %s, oldest %s, latest %s, error %s", method, self.channel_name, args["oldest"], args["latest"], result["error"]) next_request = fetch_success and send_success and has_more_data and self.is_time_remaining() except Exception as exc: self.log.error("Error Occurred while fetching LogType %s, %s, Error %s", method, self.channel_name, exc) finally: output_handler.close() self.log.info("Completed LogType %s, %s, Pages: %s, Records %s", method, self.channel_name, page_counter, record_counter) class FetchPaginatedDataBasedOnPageNumber(SlackAPI): def fetch(self): method, args = self.build_fetch_params() output_handler = OutputHandlerFactory.get_handler(self.collection_config['OUTPUT_HANDLER'], config=self.config) try: result = self.slackClient.api_call(method, self.collection_config['TIMEOUT'], **args) fetch_success = result["ok"] if fetch_success: data_to_be_sent = self.transform_data(result) if len(data_to_be_sent) > 0: send_success = output_handler.send(data_to_be_sent, **self.build_send_params()) if send_success: self.save_state(result["paging"]) self.log.debug("Sent successfully for LogType %s, Page %s, Before %s, Records %s", method, self.page, args["before"], len(data_to_be_sent)) else: self.log.warning("Send failed for LogType %s, Page %s, Before %s", method, self.page, args["before"]) else: self.save_state(result["paging"]) self.log.debug("No Result fetched for LogType %s, Page %s, Before %s", method, self.page, args["before"]) else: self.log.warning("Fetch failed for LogType %s, Page %s, Before %s, Error %s", method, self.page, args["before"], result["error"]) except Exception as exc: self.log.error("Error Occurred while fetching LogType %s, Page %s, Before %s, Error %s", method, self.page, args["before"], exc) self.log.info("Completed LogType %s, Page %s, Before %s", method, self.page, args["before"]) class FetchAuditData(FetchCursorBasedData): def fetch(self): output_handler = OutputHandlerFactory.get_handler(self.collection_config['OUTPUT_HANDLER'], config=self.config) url, args = self.build_fetch_params() current_state = self.get_state() log_type = self.get_key() next_request = True page_counter = 0 record_counter = 0 sess = ClientMixin.get_new_session() try: while next_request: send_success = has_more_data = False status, result = ClientMixin.make_request(url, method="get", session=sess, logger=self.log, TIMEOUT=self.collection_config['TIMEOUT'], MAX_RETRY=self.collection_config['MAX_RETRY'], BACKOFF_FACTOR=self.collection_config['BACKOFF_FACTOR'], params=args, headers={"Authorization": "Bearer " + self.token}) fetch_success = status and "entries" in result if fetch_success: data_to_be_sent = self.transform_data(result) if len(data_to_be_sent) > 0: send_success = output_handler.send(data_to_be_sent, **self.build_send_params()) if send_success: page_counter += 1 record_counter += len(data_to_be_sent) last_record_fetched_timestamp = data_to_be_sent[-1]["date_create"] self.log.debug("Successfully sent LogType %s, oldest %s, latest %s, number of records %s", log_type, args["latest"], args["oldest"], len(data_to_be_sent)) args["latest"] = float(last_record_fetched_timestamp) - 0.00001 if self._next_cursor_is_present(result): has_more_data = True args["latest"] = float(last_record_fetched_timestamp) - 0.00001 self.save_state( {"fetch_oldest": current_state["fetch_oldest"], "fetch_latest": current_state["fetch_latest"], "last_record_fetched_timestamp": last_record_fetched_timestamp}) else: self.log.debug("moving time window for LogType %s, oldest %s, latest %s", self.get_key(), args["oldest"], args["latest"]) self.save_state({"fetch_oldest": current_state["fetch_latest"], "fetch_latest": None, "last_record_fetched_timestamp": None}) else: self.log.warning("Failed to sent LogType %s, oldest %s, latest %s", log_type, args["oldest"], args["latest"]) else: self.log.debug("No Result found for %s, Oldest %s, Latest %s", log_type, args["oldest"], args["latest"]) self.save_state({"fetch_oldest": current_state["fetch_oldest"], "fetch_latest": None, "last_record_fetched_timestamp": None}) else: self.log.warning("Failed to fetch LogType %s, oldest %s, latest %s, error %s", log_type, args["oldest"], args["latest"], result["error"]) next_request = fetch_success and send_success and has_more_data and self.is_time_remaining() except Exception as exc: self.log.error("Error Occurred while fetching LogType %s, Error %s", log_type, exc) finally: output_handler.close() sess.close() self.log.info("Completed LogType %s, Pages: %s, Records %s", log_type, page_counter, record_counter) class UsersDataAPI(FetchCursorBasedData): def __init__(self, kvstore, config, team_name, data_refresh_time): super(UsersDataAPI, self).__init__(kvstore, config, team_name) self.data_refresh_time = data_refresh_time def get_key(self): return "Users" def save_state(self, cursor, users): self.kvstore.set(self.get_key(), cursor) if len(users) > 0: for user_data in users: self.kvstore.set(user_data["id"], {"updated": user_data["updated"], "lastSent": get_current_timestamp(), "user_name": user_data["name"]}) def get_state(self): key = self.get_key() if not self.kvstore.has_key(key): return None cursor = self.kvstore.get(key) return cursor def build_fetch_params(self): return "users.list", {"include_locale": True, "limit": 200, "cursor": self.get_state()} def build_send_params(self): return { "endpoint_key": "HTTP_LOGS_ENDPOINT" } def transform_data(self, content): transformed_users = [] if content is not None and "members" in content: for user_data in content["members"]: transformed_user_data = self._transform_user_data(user_data) if transformed_user_data is not None: transformed_users.append(transformed_user_data) return transformed_users def _transform_user_data(self, user_data): user_id = user_data["id"] email = "-" if "profile" in user_data and "email" in user_data["profile"]: email = user_data["profile"]["email"] # check if the data is present in key value store and send only if there is any change in user data. last_updated = None last_sent = None if self.kvstore.has_key(user_id): user = self.kvstore.get(user_id) last_updated = user["updated"] last_sent = user["lastSent"] # Send user data every 24 hours and meanwhile if updated send it if last_updated == user_data["updated"] and get_current_timestamp() - last_sent < self.data_refresh_time: self.log.debug("user already present") else: transformed_user_data = {"id": user_data.get("id"), "name": user_data.get("name"), "deleted": user_data.get("deleted", False), "real_name": user_data.get("real_name", "-"), "tz": user_data.get("tz", "-"), "tz_label": user_data.get("tz_label", "-"), "is_admin": user_data.get("is_admin", False), "is_owner": user_data.get("is_owner", False), "is_primary_owner": user_data.get("is_primary_owner", False), "is_restricted": user_data.get("is_restricted", False), "is_ultra_restricted": user_data.get("is_ultra_restricted", False), "is_bot": user_data.get("is_bot", False), "is_app_user": user_data.get("is_app_user", False), "updated": user_data.get("updated"), "has_2fa": user_data.get("has_2fa", False), "teamName": self.team_name, "email": email, "billable": self._billing_info(user_id), "logType": "UserLog"} return transformed_user_data return None def _billing_info(self, user_id): data = self.slackClient.api_call("team.billableInfo", user=user_id) if data is not None and "billable_info" in data and user_id in data["billable_info"]: billing = data["billable_info"][user_id] return billing["billing_active"] return False class ChannelsDataAPI(FetchCursorBasedData): frequent = "frequent_" in_frequent = "in_frequent_" def __init__(self, kvstore, config, team_name, channel_page_number, infrequent_channel_threshold , frequent_channels_to_be_sent, infrequent_channels_to_be_sent, enable_infrequent_channels): super(ChannelsDataAPI, self).__init__(kvstore, config, team_name) self.channel_page_number = channel_page_number # the max difference between current timestamp and last oldest fetched timestamp to mark a channel as infrequent self.infrequent_channel_threshold = infrequent_channel_threshold self.infrequent_channels_to_be_sent = infrequent_channels_to_be_sent self.frequent_channels_to_be_sent = frequent_channels_to_be_sent self.enable_infrequent_channels = enable_infrequent_channels def get_key(self): return "Channels_" # Saving state as per the channels calls for limit of 50. Keys will be like Channels_1, Channels_2, Channels_3 .... # Done to solve issue "Item size has exceeded the maximum allowed size" def save_state(self, cursor, data): # Get frequent channels current page frequent_channel_page_number = self.kvstore.get("frequent_channel_page_number") frequent_channel_page_number = 1 if frequent_channel_page_number is None else frequent_channel_page_number frequent_channels = self.kvstore.get(self.get_key() + self.frequent + str(frequent_channel_page_number)) frequent_channels = [] if frequent_channels is None or frequent_channels["ids"] is None \ else frequent_channels["ids"] # Get in-frequent channels current page in_frequent_channel_page_number = self.kvstore.get("in_frequent_channel_page_number") in_frequent_channel_page_number = 1 if in_frequent_channel_page_number is None \ else in_frequent_channel_page_number infrequent_channels = self.kvstore.get(self.get_key() + self.in_frequent + str(in_frequent_channel_page_number)) infrequent_channels = [] if infrequent_channels is None or infrequent_channels["ids"] is None \ else infrequent_channels["ids"] # Update the frequent and infrequent list as per threshold provided by user if data is not None: for channel in data: channel_id = channel["channel_id"] channel_name = channel["channel_name"] messages_details = self.kvstore.get(channel_id) if self.enable_infrequent_channels \ and messages_details is not None \ and "fetch_oldest" in messages_details \ and get_current_timestamp() - messages_details.get("fetch_oldest") > \ self.infrequent_channel_threshold: infrequent_channels.append(channel_id + "#" + channel_name) else: frequent_channels.append(channel_id + "#" + channel_name) # segregate list into chunks of User provided chunks and save them in database. self.put_channels_data(frequent_channels, frequent_channel_page_number, self.frequent , cursor, self.frequent_channels_to_be_sent) self.put_channels_data(infrequent_channels, in_frequent_channel_page_number, self.in_frequent , cursor, self.infrequent_channels_to_be_sent) def get_state(self): key = self.get_key() + str(self.channel_page_number) if not self.kvstore.has_key(key): return None obj = self.kvstore.get(key) return obj def build_fetch_params(self): cursor = None self.channel_page_number = self.frequent + str(self.kvstore.get("frequent_channel_page_number")) obj = self.get_state() if obj is not None and "cursor" in obj: cursor = obj["cursor"] return "conversations.list", {"types": "public_channel", "limit": 200, "cursor": cursor, "exclude_archived": True} def build_send_params(self): return { "endpoint_key": "HTTP_LOGS_ENDPOINT" } def transform_data(self, content): channel_details = [] if content is not None and "channels" in content: for channel in content["channels"]: if channel is not None: channel_details.append( {"channel_id": channel["id"], "channel_name": channel["name"], "members": channel["num_members"], "logType": "ChannelDetail", "teamName": self.team_name}) return channel_details def put_channels_data(self, channels, number, key, cursor, channels_to_be_sent): ids = self.batchsize_chunking(channels, channels_to_be_sent) for channels in ids: obj = {"ids": channels, "last_fetched": get_current_timestamp(), "cursor": cursor} self.kvstore.set(self.get_key() + key + str(number), obj) self.kvstore.set(key + "channel_page_number", number) number = number + 1 def batchsize_chunking(cls, iterable, size=1): l = len(iterable) for idx in range(0, l, size): data = iterable[idx:min(idx + size, l)] yield data class ChannelsMessagesAPI(FetchPaginatedDataBasedOnLatestAndOldestTimeStamp): def __init__(self, kvstore, config, channel_id, channel_name, team_name): super(ChannelsMessagesAPI, self).__init__(kvstore, config, team_name) self.channel_id = channel_id self.channel_name = channel_name def get_key(self): return self.channel_id def save_state(self, state): self.kvstore.set(self.get_key(), state) def get_state(self): key = self.get_key() if not self.kvstore.has_key(key): self.save_state({"fetch_oldest": self.DEFAULT_START_TIME_EPOCH, "fetch_latest": None, "last_record_fetched_timestamp": None}) obj = self.kvstore.get(key) return obj def build_fetch_params(self): state = self.get_state() latest = None if "fetch_latest" not in state or ("fetch_latest" in state and state["fetch_latest"] is None): oldest, latest = self.get_window(state["fetch_oldest"]) self.save_state({"fetch_oldest": oldest, "fetch_latest": latest, "last_record_fetched_timestamp": None}) else: oldest = state["fetch_oldest"] # to be sure every data has been fetched in case of previous failure if "fetch_latest" in state and state["fetch_latest"] is not None: latest = state["fetch_latest"] if "last_record_fetched_timestamp" in state and state["last_record_fetched_timestamp"] is not None: latest = state["last_record_fetched_timestamp"] return "conversations.history", {"channel": self.get_key(), "inclusive": True, "latest": latest, "oldest": oldest, "limit": 500} def build_send_params(self): return { "endpoint_key": "HTTP_LOGS_ENDPOINT" } def transform_data(self, content): if "messages" in content and len(content["messages"]) > 0: messages = content["messages"] modified_messages = [] for data in messages: # Check the message has replies, if yes, call conversations.replies and append all replies to messages if "reply_count" in data and data["reply_count"] >= 1: modified_messages.extend(self.get_replies(data)) else: modified_messages.append(self.transform_message(data)) return modified_messages return [] def get_replies(self, data): replies = [] response = self.slackClient.api_call("conversations.replies", channel=self.channel_id, ts=data["ts"], limit=500) replies.extend(self.transform_replies(response)) while "has_more" in response and response["has_more"]: response = self.slackClient.api_call("conversations.replies", channel=self.channel_id, ts=data["ts"], limit=500, cursor=response["response_metadata"]["next_cursor"]) replies.extend(self.transform_replies(response)) return replies def transform_replies(self, response): replies = [] if "messages" in response: for message in response["messages"]: replies.append(self.transform_message(message)) return replies def transform_message(self, data): if "files" in data: files = [] for file_data in data["files"]: modified_file_data = {"name": file_data["name"], "fileType": file_data["filetype"], "fileSize": file_data.get("size", 0), "urlPrivate": file_data.get("url_private", ""), "urlPrivateDownload": file_data.get("url_private_download", ""), "permalink": file_data.get("permalink", "")} files.append(modified_file_data) data["files"] = files if "attachments" in data: attachments = [] for attachment_data in data["attachments"]: modified_attachment_data = {"id": attachment_data["id"], "text": attachment_data.get("text", ""), "author_name": attachment_data.get("author_name", ""), "author_link": attachment_data.get("author_link", ""), "pretext": attachment_data.get("pretext", ""), "fallback": attachment_data.get("fallback", "")} attachments.append(modified_attachment_data) data["attachments"] = attachments if "user" in data and self.kvstore.has_key(data["user"]): data["userName"] = self.kvstore.get(data["user"])["user_name"] data["channelId"] = self.channel_id data["channelName"] = self.channel_name data["teamName"] = self.team_name data["logType"] = "ConversationLog" if "is_starred" in data: data.pop("is_starred") if "pinned_to" in data: data.pop("pinned_to") if "reactions" in data: data.pop("reactions") return data class AccessLogsAPI(FetchPaginatedDataBasedOnPageNumber): def __init__(self, kvstore, config, page, team_name): super(AccessLogsAPI, self).__init__(kvstore, config, team_name) self.page = page def get_key(self): return "AccessLogs" def save_state(self, state): if "pages" in state: if self.page == 1: self.kvstore.set("Access_logs_max_page", state["pages"] + 1) else: self.kvstore.set(self.get_key(), state) def get_state(self): key = self.get_key() if not self.kvstore.has_key(key): oldest, latest = self.get_window(get_current_timestamp()) self.save_state({"fetch_before": latest}) obj = self.kvstore.get(key) if self.kvstore.get("Access_logs_Previous_before_time", 0) == 0: self.kvstore.set("Access_logs_Previous_before_time", self.DEFAULT_START_TIME_EPOCH) return obj def build_fetch_params(self): state = self.get_state() fetch_before = None if "fetch_before" in state: fetch_before = state["fetch_before"] return "team.accessLogs", {"count": 1000, "before": fetch_before, "page": self.page} def build_send_params(self): return { "endpoint_key": "HTTP_LOGS_ENDPOINT" } def transform_data(self, content): data = [] if content is not None and "logins" in content: logs = content["logins"] for log in logs: log["teamName"] = self.team_name log["logType"] = "AccessLog" if "date_last" in log and log["date_last"] > self.kvstore.get("Access_logs_Previous_before_time", 0): data.append(log) return data class AuditLogsAPI(FetchAuditData): def __init__(self, kvstore, config, url, team_name, workspaceauditactions, userauditactions, channelauditactions, fileauditactions, appauditactions, otherauditactions): super(AuditLogsAPI, self).__init__(kvstore, config, team_name) self.url = url + "logs" self.WorkspaceAuditActions = workspaceauditactions self.UserAuditActions = userauditactions self.ChannelAuditActions = channelauditactions self.FileAuditActions = fileauditactions self.AppAuditActions = appauditactions self.OtherAuditActions = otherauditactions if "ExcludeAuditLog" in self.api_config and self.api_config["ExcludeAuditLog"] is not None: self.excludeList = self.api_config["ExcludeAuditLog"] def get_key(self): return "AuditLogs" def save_state(self, state): self.kvstore.set(self.get_key(), state) def get_state(self): key = self.get_key() if not self.kvstore.has_key(key): self.save_state({"fetch_oldest": self.DEFAULT_START_TIME_EPOCH, "fetch_latest": None, "last_record_fetched_timestamp": None}) obj = self.kvstore.get(key) return obj def build_fetch_params(self): state = self.get_state() latest = None if "fetch_latest" not in state or ("fetch_latest" in state and state["fetch_latest"] is None): oldest, latest = self.get_window(state["fetch_oldest"]) self.save_state({"fetch_oldest": oldest, "fetch_latest": latest, "last_record_fetched_timestamp": None}) else: oldest = state["fetch_oldest"] # to be sure every data has been fetched in case of previous failure if "fetch_latest" in state and state["fetch_latest"] is not None: latest = state["fetch_latest"] if "last_record_fetched_timestamp" in state and state["last_record_fetched_timestamp"] is not None: latest = state["last_record_fetched_timestamp"] return self.url, {"latest": latest, "oldest": oldest, "inclusive": True, "limit": 9999} def build_send_params(self): return { "endpoint_key": "HTTP_LOGS_ENDPOINT" } def transform_data(self, content): data_to_be_sent = [] if content is not None and "entries" in content: entries = content["entries"] for entry in entries: action = entry["action"] if hasattr(self, "WorkspaceAuditActions") and action in self.WorkspaceAuditActions: entry["logType"] = "WorkspaceAuditLog" elif hasattr(self, "UserAuditActions") and action in self.UserAuditActions: entry["logType"] = "UserAuditLog" elif hasattr(self, "ChannelAuditActions") and action in self.ChannelAuditActions: entry["logType"] = "ChannelAuditLog" elif hasattr(self, "FileAuditActions") and action in self.FileAuditActions: entry["logType"] = "FileAuditLog" elif hasattr(self, "AppAuditActions") and action in self.AppAuditActions: entry["logType"] = "AppAuditLog" elif hasattr(self, "OtherAuditActions") and action in self.OtherAuditActions: entry["logType"] = "OtherAuditLogs" # flat the entity level hierarchy if "entity" in entry and "type" in entry["entity"]: entity = entry["entity"] entity_type = entity["type"] if entity_type in entity: data = entity[entity_type] entry["entity"] = data if hasattr(self, "excludeList") and action in self.excludeList: self.log.debug("Audit Log Entry Skipped for Action - " + action) else: data_to_be_sent.append(entry) return data_to_be_sent
49.54242
120
0.564527
3,799
35,621
5.035272
0.097921
0.021277
0.014219
0.015056
0.508913
0.441006
0.392598
0.361911
0.345026
0.321815
0
0.003501
0.342523
35,621
718
121
49.611421
0.813279
0.038489
0
0.4
0
0.001681
0.151435
0.022227
0
0
0
0
0
1
0.082353
false
0
0.011765
0.018487
0.178151
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a70eecc6a37034c9b752fb6448604398081dfbe
8,250
py
Python
networks/DEPICT.py
blafabregue/TimeSeriesDeepClustering
85f4ab2fd45bda3296c6b0861ee11e6c7a77c594
[ "Apache-2.0" ]
6
2021-03-05T18:48:22.000Z
2022-03-23T08:42:16.000Z
networks/DEPICT.py
blafabregue/TimeSeriesDeepClustering
85f4ab2fd45bda3296c6b0861ee11e6c7a77c594
[ "Apache-2.0" ]
null
null
null
networks/DEPICT.py
blafabregue/TimeSeriesDeepClustering
85f4ab2fd45bda3296c6b0861ee11e6c7a77c594
[ "Apache-2.0" ]
4
2021-06-29T00:53:14.000Z
2022-03-10T11:55:32.000Z
""" Based on Theano implementation https://github.com/herandy/DEPICT and article : Dizaji, K. G., Herandi, A., & Huang, H. (2017). Deep clustering via joint convolutional autoencoder embedding and relative entropy minimization Author: Baptiste Lafabregue 2019.25.04 """ import numpy as np from sklearn import metrics from sklearn.cluster import KMeans import tensorflow as tf from tensorflow.keras.models import Model from networks.trainer import Trainer class DEPICT(Trainer): def __init__(self, dataset_name, classifier_name, encoder_model, keep_both_losses=True, gamma=0.1, n_clusters=10, alpha=1.0, batch_size=10, tol=1e-3, update_interval=5, pred_normalizition_flag=True, optimizer=None): super(DEPICT, self).__init__(dataset_name, classifier_name, encoder_model, batch_size, n_clusters, optimizer) self.keep_both_losses = keep_both_losses self.gamma = gamma self.alpha = alpha self.tol = tol self.update_interval = update_interval self.pred_normalizition_flag = pred_normalizition_flag self.clust_model = None self.clust_loss = None def initialize_model(self, x, y, ae_weights=None): """ Initialize the model for training :param ae_weights: arguments to let the encoder load its weights, None to pre-train the encoder """ if ae_weights is not None: self.encoder_model.load_weights(ae_weights) print('Pretrained AE weights are loaded successfully.') self.pretrain_model = False else: self.pretrain_model = True if self.optimizer is None: self.optimizer = tf.keras.optimizers.Adam() clustering_layer = tf.keras.layers.Dense(self.n_clusters, name='clustering')(self.encoder.output) clustering_layer = tf.keras.layers.Softmax()(clustering_layer) self.clust_model = Model(inputs=self.encoder.input, outputs=clustering_layer) self.clust_loss = tf.keras.losses.CategoricalCrossentropy() def load_weights(self, weights_path): """ Load weights of IDEC model :param weights_path: path to load weights from """ self.clust_model.load_weights(weights_path + '.tf') def save_weights(self, weights_path): """ Save weights of IDEC model :param weights_path: path to save weights to """ self.clust_model.save_weights(weights_path + '.tf') def predict_clusters(self, x, seeds=None): """ Predict cluster labels using the output of clustering layer :param x: the data to evaluate :param seeds: seeds to initialize the K-Means if needed :return: the predicted cluster labels """ q = self.clust_model.predict(x, verbose=0) y_pred = q.argmax(1) return y_pred, np.transpose(self.clust_model.get_layer(name='clustering').get_weights()[0]) def _run_training(self, x, y, x_test, y_test, nb_steps, seeds, verbose, log_writer, dist_matrix=None): if seeds is not None: seeds_enc = self.extract_features(seeds) kmeans = KMeans(n_clusters=self.n_clusters, n_init=20, init=seeds_enc) else: kmeans = KMeans(n_clusters=self.n_clusters, n_init=20) x_pred = self.extract_features(x) y_pred = kmeans.fit_predict(x_pred) centroids = kmeans.cluster_centers_.T centroids = centroids / np.sqrt(np.diag(np.matmul(centroids.T, centroids))) self.clust_model.get_layer(name='clustering').set_weights([centroids, np.zeros((self.n_clusters,))]) if y is not None: ari = np.round(metrics.adjusted_rand_score(y, y_pred), 5) if verbose: print('ari kmeans: ', str(ari)) self.log_stats(x, y, x_test, y_test, [0, 0, 0], 0, log_writer, 'init') i = 0 # Number of performed optimization steps epoch = 0 # Number of performed epochs # evaluate the clustering performance target_pred = self.clust_model.predict(x, verbose=0) if self.pred_normalizition_flag: cluster_frequency = np.sum(target_pred, axis=0) # avoid unbalanced assignment target_pred = target_pred ** 2 / cluster_frequency # y_prob = y_prob / np.sqrt(cluster_frequency) target_pred = np.transpose(target_pred.T / np.sum(target_pred, axis=1)) target_pred_last = target_pred # define the train function train_enc_loss = tf.keras.metrics.Mean(name='encoder train_loss') clust_enc_loss = tf.keras.metrics.Mean(name='clustering train_loss') depict_enc_loss = tf.keras.metrics.Mean(name='DEPICT train_loss') @tf.function def train_step(x_batch, target_batch): with tf.GradientTape() as tape: encoder_loss = self.encoder_model.loss.compute_loss(x_batch, training=True) encoding_x = self.clust_model(x_batch, training=True) depict_loss = self.clust_loss(target_batch, encoding_x) loss = (1 - self.gamma) * encoder_loss + self.gamma * depict_loss gradients = tape.gradient(loss, self.encoder_model.get_trainable_variables() + self.clust_model.trainable_variables) self.optimizer.apply_gradients( zip(gradients, self.encoder_model.get_trainable_variables() + self.clust_model.trainable_variables)) train_enc_loss(encoder_loss) clust_enc_loss(loss) depict_enc_loss(loss) if verbose: print('start training') # idec training while i < nb_steps: train_enc_loss.reset_states() clust_enc_loss.reset_states() depict_enc_loss.reset_states() # shuffle the train set # computes P each update_interval epoch if epoch % self.update_interval == 0: # evaluate the clustering performance target_pred = self.clust_model.predict(x, verbose=0) if self.pred_normalizition_flag: cluster_frequency = np.sum(target_pred, axis=0) # avoid unbalanced assignment target_pred = target_pred ** 2 / cluster_frequency # y_prob = y_prob / np.sqrt(cluster_frequency) target_pred = np.transpose(target_pred.T / np.sum(target_pred, axis=1)) delta_label = np.sum(target_pred != target_pred_last).astype(np.float32) / target_pred.shape[0] target_pred_last = target_pred # check stop criterion if epoch > 0 and delta_label < self.tol: if verbose: print('delta_label ', delta_label, '< tol ', self.tol) print('Reached tolerance threshold. Stopping training.') self.log_stats(x, y, x_test, y_test, [0, 0, 0], epoch, log_writer, 'reached_stop_criterion') break train_ds = tf.data.Dataset.from_tensor_slices((x, target_pred)) \ .shuffle(x.shape[0], reshuffle_each_iteration=True) \ .batch(self.batch_size).as_numpy_iterator() for x_batch, target_batch in train_ds: train_step(x_batch, target_batch) i += 1 if i >= nb_steps: break if verbose: template = 'Epoch {}, Loss: {}' print(template.format(epoch + 1, depict_enc_loss.result())) epoch += 1 y_pred = self.log_stats(x, y, x_test, y_test, [depict_enc_loss.result(), clust_enc_loss.result(), train_enc_loss.result()], epoch, log_writer, 'train') return epoch
41.666667
122
0.598788
991
8,250
4.744702
0.236125
0.046789
0.035729
0.015951
0.301361
0.26946
0.240111
0.199915
0.199915
0.177797
0
0.010264
0.31503
8,250
197
123
41.878173
0.821801
0.138788
0
0.151515
0
0
0.040029
0.003168
0
0
0
0
0
1
0.05303
false
0
0.045455
0
0.121212
0.045455
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a726a13c2d0e8f4612fa24cadc034a901e4f639
16,718
py
Python
multicell/analysis_gamma.py
mattsmart/celltypes
21d7fa18535033fc23be380dcee3660a814bb722
[ "BSD-3-Clause" ]
null
null
null
multicell/analysis_gamma.py
mattsmart/celltypes
21d7fa18535033fc23be380dcee3660a814bb722
[ "BSD-3-Clause" ]
null
null
null
multicell/analysis_gamma.py
mattsmart/celltypes
21d7fa18535033fc23be380dcee3660a814bb722
[ "BSD-3-Clause" ]
null
null
null
import matplotlib.pyplot as plt import numpy as np import os import pickle from multicell.multicell_class import Multicell from multicell.multicell_replot import replot_scatter_dots from multicell.graph_helper import state_load from singlecell.singlecell_simsetup import singlecell_simsetup from singlecell.singlecell_linalg import sorted_eig from utils.file_io import RUNS_FOLDER, INPUT_FOLDER def scan_plaquette_gamma_dynamics(J, W, state, coordnum=8, verbose=False, use_01=False): critgamma = None def get_state_send(state_send): if use_01: state_send = (state_send + np.ones_like(state_send)) / 2.0 return state_send for gamma in np.linspace(0.001, 0.8, 10000): Js_internal = np.dot(J, state) # conditional 01 state send state_send = get_state_send(state) h_field_nbr = gamma * coordnum * np.dot(W, state_send) updated_state = np.sign(Js_internal + h_field_nbr) if np.array_equal(updated_state, state): if verbose: print(gamma, True) else: if critgamma is None: critgamma = gamma if verbose: print(gamma, False) return critgamma def descend_to_fp(multicell): """ Helper function for gamma scan functions - scan_gamma_bifurcation_candidates() - manyruns_gamma_bifurcation_candidates() """ multicell.dynamics_full( flag_visualize=False, flag_datastore=False, flag_savestates=False, end_at_fp=True, verbose=False) current_step = multicell.current_step fp = multicell.graph_state_arr[:, current_step] return fp def check_still_fp(test_fp, J_multicell): """ Helper function for gamma scan functions - scan_gamma_bifurcation_candidates() - manyruns_gamma_bifurcation_candidates() """ A = test_fp B = np.sign(np.dot(J_multicell, test_fp)) # TODO if any sgn(0), then what? return np.array_equal(A, B) def scan_gamma_bifurcation_candidates( multicell_kwargs, simsetup_base, anchored=True, verbose=True, dg=1e-1, gmin=0.0, gmax=20.0, save_states_all=False, save_states_shift=True): """ For fixed initial condition and multicell parameters, slowly vary gamma. Find {gamma*}, the observed points where the fixed point has changed. The fixed point shifting is not a symptom of a bifurcation, for example consider pitchfork bifurcation, the two fixed points continue to shift after the (singular) bifurcation. TODO: Consider continuous dynamical system. When a static FP suddenly starts shifting in almost continuous fashion, that's a signature of a bifurcation (e.g. transcritical or pitchfork). What, if any, is the discrete (discrete time AND discrete state) analog of this? Args: multicell_kwargs: kwargs to form Multicell which is recreated for each gamma during the scan simsetup_base: simsetup dict template storing J, W, singlecell parameters anchored: if True, use a fixed initial condition for each gradient descent; else will use the previous fixed point as the initial condition save_states_all: saved the fixed point for each gamma as 'X_all_g%.5f.npz' save_states_shift: saved the fixed point for each 'fp shift' gamma as 'X_shift_g%.5f.npz' Returns: list: the sequence of points {gamma*_n} where bifurcations have occurred Notes: - naively can do full gradient descent to reach each fixed point - once a fixed point is found, there is a faster way to check that it remains a fixed point: simply check that s* = sgn(J_multicell s*) -- this is useful in "not anchored" case - if this vector condition holds then the FP is unchanged; when it breaks there is a bifurcation point (which is recorded) and the new FP should be found via descent SPEEDUP CHANGES: - only multicell instantiation before the loop (not during) - REQUIRES no varying of simsetup (J, W, N) in the loop - will need to refactor if this changes """ # build gamma_space gamma_space = np.arange(gmin, gmax, dg) num_gamma = len(gamma_space) bifurcation_candidate_sequence = [] if save_states_all: print('Warning: save_states_all is inefficient, use inferred gamma step size to recreate ' 'distribution of fixed points (i.e. fill in gaps between FP shifts)') # construct multicell_base from kwargs save_init = True multicell_base = Multicell(simsetup_base, verbose=False, **multicell_kwargs) if save_init: init_cond = multicell_base.graph_state_arr[:, 0] fpath = multicell_base.io_dict['statesdir'] + os.sep + 'X_init.npz' np.savez_compressed(fpath, init_cond, fmt='%d') # prep: perform gradient descent on the init cond to get our (potentially anchored) fixed point init_fp = descend_to_fp(multicell_base) prev_fp = np.copy(init_fp) # used for iterative comparisons # speedup: multicell_local = multicell_base # change attributes on the fly for i, gamma in enumerate(gamma_space): if i % 200 == 0: print("Checking %d/%d (gamma=%.4f)" % (i, num_gamma, gamma)) multicell_kwargs_local = multicell_kwargs.copy() multicell_kwargs_local['gamma'] = gamma # 1) Re-build Multicell for gamma J_multicell = multicell_base.build_J_multicell(gamma=gamma, plot=False) multicell_local.gamma = gamma multicell_local.matrix_J_multicell = J_multicell # 2) gradient descent to fixed point if anchored: multicell_local.simulation_reset(provided_init_state=init_fp) step_fp = descend_to_fp(multicell_local) fp_unchanged = np.array_equal(step_fp, prev_fp) prev_fp = step_fp else: fp_unchanged = check_still_fp(prev_fp, J_multicell) if not fp_unchanged: multicell_local.simulation_reset(provided_init_state=prev_fp) prev_fp = descend_to_fp(multicell_local) if save_states_all: glabel = 'all_g%.5f' % gamma fpath = multicell_local.io_dict['statesdir'] + os.sep + 'X_%s.npz' % glabel np.savez_compressed(fpath, prev_fp, fmt='%d') # 3) report a bifurcation whenever the fixed point moves if not fp_unchanged: if verbose: print('fixed point shift at gamma=%.5f' % gamma) if save_states_shift: glabel = 'fpshift_g%.5f' % gamma fpath = multicell_local.io_dict['statesdir'] + os.sep + 'X_%s.npz' % glabel np.savez_compressed(fpath, prev_fp, fmt='%d') bifurcation_candidate_sequence.append(gamma) # save primary data from gammascan loop fpath_x = multicell_local.io_dict['datadir'] + os.sep + 'bifurcation_candidates.txt' fpath_gamma = multicell_local.io_dict['datadir'] + os.sep + 'gamma_space.txt' np.savetxt(fpath_x, bifurcation_candidate_sequence, '%.5f') np.savetxt(fpath_gamma, gamma_space, '%.5f') return bifurcation_candidate_sequence, gamma_space, multicell_base def plot_bifurcation_candidates_curve(bifurcation_candidates, gamma_space, outdir, show=False): # plot type A x = np.arange(len(bifurcation_candidates)) y = np.array(bifurcation_candidates) plt.scatter(x, y, marker='x') plt.xlabel(r'$n$') plt.ylabel(r'${\gamma}^*_n$') plt.savefig(outdir + os.sep + 'bifurc_A.jpg') if show: plt.show() # plot type B x = np.arange(len(bifurcation_candidates)) y_construct = np.zeros(len(gamma_space)) k = 0 g0 = 0.0 total_bifurcation_candidates = len(bifurcation_candidates) for i, gamma in enumerate(gamma_space): if bifurcation_candidates[k] > gamma: y_construct[i] = g0 else: g0 = bifurcation_candidates[k] if k < total_bifurcation_candidates - 1: k += 1 y_construct[i] = g0 plt.plot(gamma_space, y_construct, '--', c='k') plt.plot(gamma_space, gamma_space, '-.', c='k', alpha=0.5) plt.scatter(bifurcation_candidates, bifurcation_candidates, marker='o') plt.xlabel(r'$\gamma$') plt.ylabel(r'${\gamma}^*_n$ Transitions (n=%d)' % len(bifurcation_candidates)) plt.savefig(outdir + os.sep + 'bifurc_B.jpg') if show: plt.show() # TODO check that not symmetrizing loaded W (in simsetup) if __name__ == '__main__': force_symmetry_W = True destabilize_celltypes_gamma = True flag_plot_multicell_evals = False flag_bifurcation_sequence = True main_seed = 0 #np.random.randint(1e6) curated = True random_mem = False # TODO incorporate seed in random XI in simsetup/curated random_W = False # TODO incorporate seed in random W in simsetup/curated #W_override_path = None #W_id = 'W_9_W15maze' W_id = 'W_9_maze' W_override_path = INPUT_FOLDER + os.sep + 'manual_WJ' + os.sep + 'simsetup_%s.txt' % W_id simsetup = singlecell_simsetup( unfolding=True, random_mem=random_mem, random_W=random_W, curated=curated, housekeeping=0) if W_override_path is not None: print('Note: in main, overriding W from file...') explicit_W = np.loadtxt(W_override_path, delimiter=',') simsetup['FIELD_SEND'] = explicit_W print("simsetup checks:") print("\tsimsetup['N'],", simsetup['N']) print("\tsimsetup['P'],", simsetup['P']) if force_symmetry_W: W = simsetup['FIELD_SEND'] # V1: take simple sym #simsetup['FIELD_SEND'] = (W + W.T)/2 # V2: take upper triangular part Wdiag = np.diag(np.diag(W)) Wut = np.triu(W, 1) simsetup['FIELD_SEND'] = Wut + Wut.T + Wdiag # V3: take lower triangular part #Wdiag = np.diag(np.diag(W))1 #Wut = np.tril(W, -1) #simsetup['FIELD_SEND'] = Wut + Wut.T + Wdiag # Save symmetrized W np.savetxt('Wsym.txt', simsetup['FIELD_SEND'], '%.4f', delimiter=',') print(simsetup['FIELD_SEND']) if destabilize_celltypes_gamma: coordnum = 8 # num neighbours which signals are received from W = simsetup['FIELD_SEND'] J = simsetup['J'] celltypes = [simsetup['XI'][:, a] for a in range(simsetup['P'])] print('Scanning for monotype destabilizing gamma (for coordination number %d)' % coordnum) for idx, celltype in enumerate(celltypes): critgamma = scan_plaquette_gamma_dynamics(J, W, celltype, coordnum=coordnum, verbose=False) print(idx, simsetup['CELLTYPE_LABELS'][idx], critgamma) print('Scanning for inverted monotype destabilizing gamma (for coordination number %d)' % coordnum) for idx, celltype in enumerate(celltypes): inverted_celltype = -1 * celltype critgamma = scan_plaquette_gamma_dynamics(J, W, inverted_celltype, coordnum=coordnum, verbose=False) print(idx, 'flip of celltype:', simsetup['CELLTYPE_LABELS'][idx], critgamma) print('Scanning for spurious monotype destabilizing gamma (for coordination number %d)' % coordnum) Splus = np.sign(celltypes[0] + celltypes[1] + celltypes[2]) Sminus = -1 * Splus print('S+:', Splus) critgamma = scan_plaquette_gamma_dynamics(J, W, Splus, coordnum=coordnum, verbose=False) print(0, 'spurious S+', critgamma) critgamma = scan_plaquette_gamma_dynamics(J, W, Sminus, coordnum=coordnum, verbose=False) print(1, 'spurious S-', critgamma) if flag_plot_multicell_evals: # TODO implement or take from ipynb J_multicell = 1 evals, evecs = sorted_eig(J_multicell) if flag_bifurcation_sequence: # 1) choose BASE simsetup (core singlecell params J, W) simsetup_base = simsetup # 2) choose BASE Multicell class parameters sidelength = 10 num_cells = sidelength ** 2 autocrine = False graph_style = 'lattice_square' assert graph_style == 'lattice_square' and not autocrine search_radius = 1 init_style = 'dual' graph_kwargs = {'search_radius': search_radius, 'periodic': True, 'initialization_style': init_style} load_manual_init = False init_state_path = None if load_manual_init: init_style = 'manual' print('Note: in main, loading init graph state from file...') init_state_path = INPUT_FOLDER + os.sep + 'manual_graphstate' + os.sep + 'X_8.txt' # specify gamma scan parameters dgS = '5e-3' # '5e-4' gminS = '0' gmaxS = '0.15' # '1.0' or '4.0' dg, gmin, gmax = float(dgS), float(gminS), float(gmaxS) anchored = False save_all = False save_shifts = True plot_all_tissues = True #seed = 0 for seed in range(1): print('WORKING ON seed:', seed) # create run basedir label based on specified parameters run_subdir = 'gscan_anchor%d_gLow%s_gHigh%s_gStep%s_%s_R%d_init_%s_s%d_M%d' % \ (int(anchored), gminS, gmaxS, dgS, W_id, search_radius, init_style, seed, num_cells) if save_all or save_shifts: run_basedir_path = RUNS_FOLDER + os.sep + 'multicell_manyruns' else: run_basedir_path = RUNS_FOLDER + os.sep + 'explore' + os.sep + 'bifurcation' assert not os.path.exists(run_basedir_path + os.sep + run_subdir) multicell_kwargs_base = { 'seed': seed, 'run_basedir': run_basedir_path, 'run_subdir': run_subdir, 'beta': np.Inf, 'total_steps': 500, 'num_cells': num_cells, 'flag_blockparallel': False, 'graph_style': graph_style, 'graph_kwargs': graph_kwargs, 'autocrine': autocrine, 'gamma': 0.0, 'exosome_string': 'no_exo_field', 'exosome_remove_ratio': 0.0, 'kappa': 0.0, 'field_applied': None, 'flag_housekeeping': False, 'flag_state_int': True, 'plot_period': 1, 'init_state_path': init_state_path, } bifurcation_candidates, gamma_space, multicell = scan_gamma_bifurcation_candidates( multicell_kwargs_base, simsetup_base, anchored=anchored, verbose=True, dg=dg, gmin=gmin, gmax=gmax, save_states_all=save_all, save_states_shift=save_shifts) outdir = multicell.io_dict['datadir'] plot_bifurcation_candidates_curve(bifurcation_candidates, gamma_space, outdir, show=False) # plot each bifurcation candidate as fancy tissue if plot_all_tissues: print(multicell.io_dict.keys()) plotlattice_dir = multicell.io_dict['plotlatticedir'] states_dir = multicell.io_dict['statesdir'] # plot initial state first init_state = state_load( states_dir + os.sep + 'X_init' + '.npz', cells_as_cols=True, num_genes=multicell.num_genes, num_cells=num_cells, txt=False) replot_scatter_dots( init_state, sidelength, plotlattice_dir + os.sep + 'X_init', fmod='', state_int=False, cmap=None, title=None, ext=['.jpg', '.svg'], rasterized=True) for idx, gval in enumerate(bifurcation_candidates): print('Plotting fpshift tissue #%d, gamma=%.5f...' % (idx, gval)) fname = 'X_fpshift_g%.5f' % gval lattice_state_path = states_dir + os.sep + fname + '.npz' lattice_state = state_load( lattice_state_path, cells_as_cols=True, num_genes=multicell.num_genes, num_cells=num_cells, txt=False) print(lattice_state.shape) outpath = plotlattice_dir + os.sep + fname replot_scatter_dots( lattice_state, sidelength, outpath, fmod='', state_int=False, cmap=None, title=None, ext=['.jpg', '.svg'], rasterized=True)
42.757033
112
0.627168
2,099
16,718
4.77132
0.21677
0.050325
0.01358
0.012981
0.247928
0.225162
0.183924
0.132202
0.103844
0.097654
0
0.009404
0.281254
16,718
390
113
42.866667
0.824068
0.200024
0
0.166052
0
0
0.119742
0.00653
0
0
0
0.007692
0.00738
1
0.02214
false
0
0.0369
0
0.077491
0.084871
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a7472c7924a7b3659157f418c24eb6167f24a8a
1,425
py
Python
test/commonse/test_tube.py
akey7/WISDEM
29c5abbdfdc8800cfd18237e8e0edf0d10c7e02c
[ "Apache-2.0" ]
1
2020-03-06T13:45:31.000Z
2020-03-06T13:45:31.000Z
test/commonse/test_tube.py
RFeil/WISDEM
f58ec725a4a7f959b6a97222d5e709e67d68379a
[ "Apache-2.0" ]
17
2019-09-13T22:21:15.000Z
2019-10-25T20:04:26.000Z
test/commonse/test_tube.py
akey7/WISDEM
29c5abbdfdc8800cfd18237e8e0edf0d10c7e02c
[ "Apache-2.0" ]
null
null
null
import numpy as np import numpy.testing as npt import unittest from commonse.tube import Tube, CylindricalShellProperties npts = 100 class TestTube(unittest.TestCase): def setUp(self): self.params = {} self.unknowns = {} self.resid = None self.params['d'] = 2*5.0*np.ones(npts) self.params['t'] = 1.0*np.ones(npts-1) self.mytube = CylindricalShellProperties(npts) def testTubeProperties(self): T = Tube(2*5.0, 1.0) self.assertAlmostEqual(T.Area, np.pi*9.0) self.assertAlmostEqual(T.Jxx, np.pi*369.0/4.0) self.assertAlmostEqual(T.Jyy, np.pi*369.0/4.0) self.assertAlmostEqual(T.J0, np.pi*369.0/2.0) self.assertAlmostEqual(T.S, np.pi*369.0/4.0/5.0) self.assertAlmostEqual(T.C, np.pi*369.0/2.0/5.0) def testOutputsIncremental(self): self.mytube.solve_nonlinear(self.params, self.unknowns, self.resid) npt.assert_almost_equal(self.unknowns['Az'], np.pi*9.0) npt.assert_almost_equal(self.unknowns['Ixx'], np.pi*369.0/4.0) npt.assert_almost_equal(self.unknowns['Iyy'], np.pi*369.0/4.0) npt.assert_almost_equal(self.unknowns['Jz'], np.pi*369.0/2.0) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(TestTube)) return suite if __name__ == '__main__': unittest.TextTestRunner().run(suite())
31.666667
75
0.637895
209
1,425
4.267943
0.301435
0.044843
0.06278
0.071749
0.353139
0.353139
0.202915
0.165919
0.165919
0.09417
0
0.06328
0.212632
1,425
44
76
32.386364
0.731729
0
0
0
0
0
0.014035
0
0
0
0
0
0.30303
1
0.121212
false
0
0.121212
0
0.30303
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a758dd66da3dd133fbac842a197fc9a2d383ce4
10,677
py
Python
main.py
manulera/ShareYourCloning_backend
6a33b57888102e475558bacfc13c38881c36a865
[ "MIT" ]
null
null
null
main.py
manulera/ShareYourCloning_backend
6a33b57888102e475558bacfc13c38881c36a865
[ "MIT" ]
17
2022-01-13T15:17:07.000Z
2022-03-31T16:45:03.000Z
main.py
manulera/ShareYourCloning_backend
6a33b57888102e475558bacfc13c38881c36a865
[ "MIT" ]
null
null
null
from fastapi import FastAPI, UploadFile, File, Query, HTTPException, Request from pydna.dseqrecord import Dseqrecord from pydantic import conlist, create_model from pydna.parsers import parse as pydna_parse from Bio.SeqIO import read as seqio_read from pydna.genbank import Genbank from dna_functions import assembly_list_is_valid, get_assembly_list_from_sticky_ligation_source, get_pcr_products_list, get_restriction_enzyme_products_list, \ format_sequence_genbank, get_sticky_ligation_products_list, perform_assembly, \ read_dsrecord_from_json, read_primer_from_json from pydantic_models import PCRSource, PrimerAnnealingSettings, PrimerModel, SequenceEntity, SequenceFileFormat, \ GenbankIdSource, RestrictionEnzymeDigestionSource, StickyLigationSource,\ UploadedFileSource from fastapi.middleware.cors import CORSMiddleware from urllib.error import HTTPError, URLError from fastapi.responses import HTMLResponse # Instance of the API object app = FastAPI() # Allow CORS # TODO put a wildcard on the shareyourcloning.netlify to # allow for the draft websites to also work in netlify. # TODO make this conditional to dev / prod using settings origins = ["http://localhost:3000", "https://shareyourcloning.netlify.app"] app.add_middleware( CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # TODO limit the maximum size of submitted files @app.get('/') async def greeting(request: Request): html_content = f""" <html> <head> <title>Welcome to ShareYourCloning API</title> </head> <body> <h1>Welcome to ShareYourCloning API</h1> <p>You can access the endpoints documentation <a href="{request.url._url}docs">here</a></p> </body> </html> """ return HTMLResponse(content=html_content, status_code=200) @ app.post('/read_from_file', response_model=create_model( 'UploadedFileResponse', sources=(list[UploadedFileSource], ...), sequences=(list[SequenceEntity], ...) )) async def read_from_file(file: UploadFile = File(...), file_format: SequenceFileFormat = Query( None, description='Format of the sequence file. \ Unless specified, it will be guessed\ from the extension'), index_in_file: int = Query(None, description='The index\ of the sequence in the file for multi-sequence files') ): """Return a json sequence from a sequence file """ if file_format is None: extension_dict = { 'gbk': 'genbank', 'gb': 'genbank', 'dna': 'snapgene', 'fasta': 'fasta' } extension = file.filename.split('.')[-1] if extension not in extension_dict: raise HTTPException( 422, 'We could not guess the format of the file from its extension.\ Please provide file_format as a query parameter.') # We guess the file type from the extension file_format = SequenceFileFormat(extension_dict[extension]) if file_format in ['fasta', 'genbank']: # Read the whole file to a string file_content = (await file.read()).decode() dseqs = pydna_parse(file_content) if len(dseqs) == 0: raise HTTPException( 422, 'Pydna parser reader cannot process this file.') elif file_format == 'snapgene': try: seq = seqio_read(file.file, file_format) except ValueError: raise HTTPException( 422, 'Biopython snapgene reader cannot process this file.') iscircular = 'topology' in seq.annotations.keys( ) and seq.annotations['topology'] == 'circular' dseqs = [Dseqrecord(seq, circular=iscircular)] # The common part parent_source = UploadedFileSource(file_format=file_format, file_name=file.filename) out_sources = list() for i in range(len(dseqs)): new_source = parent_source.copy() new_source.index_in_file = i out_sources.append(new_source) out_sequences = [format_sequence_genbank(s) for s in dseqs] return {'sequences': out_sequences, 'sources': out_sources} # TODO: a bit inconsistent that here you don't put {source: {...}} in the request, but # directly the object. @ app.post('/genbank_id', response_model=create_model( 'GenbankIdResponse', sources=(list[GenbankIdSource], ...), sequences=(list[SequenceEntity], ...) )) async def get_from_genbank_id(source: GenbankIdSource): gb = Genbank("example@gmail.com") try: seq = Dseqrecord(gb.nucleotide(source.genbank_id)) except HTTPError as exception: if exception.code == 500: raise HTTPException( 503, f'GenBank returned: {exception} - GenBank might be down') elif exception.code == 400: raise HTTPException( 404, f'GenBank returned: {exception} - Likely you inserted\ a wrong GenBank id') except URLError as exception: raise HTTPException(504, f'Unable to connect to GenBank: {exception}') output_sequence = format_sequence_genbank(seq) return {'sequences': [output_sequence], 'sources': [source]} @ app.post('/restriction', response_model=create_model( 'RestrictionEnzymeDigestionResponse', sources=(list[RestrictionEnzymeDigestionSource], ...), sequences=(list[SequenceEntity], ...) )) async def restriction(source: RestrictionEnzymeDigestionSource, sequences: conlist(SequenceEntity, min_items=1, max_items=1)): dseq = read_dsrecord_from_json(sequences[0]) # TODO: return error if the id of the sequence does not correspond # TODO: issue warning if number of enzymes>2 or if one does not cut # If the request provides the fragment_boundaries, the program should return only one output. output_is_known = False if len(source.fragment_boundaries) > 0: if len(source.fragment_boundaries) != 2 or len(source.restriction_enzymes) != 2: raise HTTPException( 400, 'If `fragment_boundaries` are provided, the length of `fragment_boundaries` and `restriction_enzymes` must be 2.') output_is_known = True fragments, out_sources = get_restriction_enzyme_products_list( dseq, source) out_sequences = [format_sequence_genbank(seq) for seq in fragments] if len(out_sequences) == 0: raise HTTPException( 400, 'The enzymes do not cut.') # If the user has provided boundaries, we verify that they are correct, and return only those as the output if output_is_known: for i, out_source in enumerate(out_sources): if out_source == source: return {'sequences': [out_sequences[i]], 'sources': [out_source]} # If we don't find it, there was a mistake raise HTTPException( 400, 'The fragment boundaries / enzymes provided do not correspond to the ones predicted.') return {'sources': out_sources, 'sequences': out_sequences} @ app.post('/sticky_ligation', response_model=create_model( 'StickyLigationResponse', sources=(list[StickyLigationSource], ...), sequences=(list[SequenceEntity], ...) )) async def sticky_ligation(source: StickyLigationSource, sequences: conlist(SequenceEntity, min_items=1)): dseqs = [read_dsrecord_from_json(seq) for seq in sequences] if len(source.fragments_inverted) > 0: # TODO Error if the list has different order or the ids are wrong. # TODO check input for unique ids assembly_list = get_assembly_list_from_sticky_ligation_source(dseqs, source) if not assembly_list_is_valid(assembly_list, source.circularised): raise HTTPException( 400, 'Fragments are not compatible for sticky ligation') output_sequence = format_sequence_genbank(perform_assembly(assembly_list, source.circularised)) return {'sequences': [output_sequence], 'sources': [source]} products_dseq, out_sources = get_sticky_ligation_products_list(dseqs) if len(products_dseq) == 0: raise HTTPException( 400, 'No combination of these fragments is compatible for sticky ligation') out_sequences = [format_sequence_genbank(seq) for seq in products_dseq] return {'sequences': out_sequences, 'sources': out_sources} @ app.post('/pcr', response_model=create_model( 'PCRResponse', sources=(list[PCRSource], ...), sequences=(list[SequenceEntity], ...) )) async def pcr(source: PCRSource, sequences: conlist(SequenceEntity, min_items=1, max_items=1), primers: conlist(PrimerModel, min_items=1, max_items=2),): dseq = read_dsrecord_from_json(sequences[0]) primers_pydna = [read_primer_from_json(p) for p in primers] # If the footprints are set, the output should be known output_is_known = len(source.primer_footprints) > 0 or len(source.primers) > 0 or len(source.fragment_boundaries) > 0 # Here we set the annealing settings to match the input. If we send the exact annealing, # there is no point in sending these settings as well. if output_is_known: source.primer_annealing_settings = PrimerAnnealingSettings( minimum_annealing=min(source.primer_footprints) ) # TODO: return error if the id of the sequence does not correspond. # TODO: return error if the ids not present in the list. # Error if no pair is generated. products, out_sources = get_pcr_products_list(dseq, source, primers_pydna) if len(products) == 0: raise HTTPException( 400, 'No pair of annealing primers was found.' + ('' if output_is_known else ' Try changing the annealing settings.') ) out_sequences = [format_sequence_genbank(seq) for seq in products] # If the user has provided boundaries, we verify that they are correct. if output_is_known: for i, out_source in enumerate(out_sources): if out_source == source: return {'sequences': [out_sequences[i]], 'sources': [out_source]} # If we don't find it, there was a mistake raise HTTPException( 400, 'The annealing positions of the primers seem to be wrong.') return {'sources': out_sources, 'sequences': out_sequences}
40.908046
159
0.666105
1,266
10,677
5.451817
0.238547
0.033903
0.021298
0.017386
0.27166
0.186323
0.161982
0.115329
0.115329
0.095045
0
0.009544
0.244357
10,677
260
160
41.065385
0.845935
0.12822
0
0.234043
0
0.010638
0.157364
0.017449
0
0
0
0.007692
0
1
0
false
0
0.058511
0
0.106383
0.010638
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a75b25e831903f5bd3d57469d58a9db1e5c338b
2,656
py
Python
mquad/mquad_utils.py
maurerv/MQuad
4cc22a7923b204ae0b253865b8a07e3c7341b7c1
[ "Apache-2.0" ]
3
2021-05-03T02:14:13.000Z
2022-03-08T22:38:15.000Z
mquad/mquad_utils.py
maurerv/MQuad
4cc22a7923b204ae0b253865b8a07e3c7341b7c1
[ "Apache-2.0" ]
3
2021-04-04T19:53:08.000Z
2022-02-22T06:59:25.000Z
mquad/mquad_utils.py
maurerv/MQuad
4cc22a7923b204ae0b253865b8a07e3c7341b7c1
[ "Apache-2.0" ]
4
2021-01-18T05:24:15.000Z
2022-01-25T14:53:34.000Z
#import stuff import numpy as np import pandas as pd from kneed import KneeLocator def confusionMatrix(predicted_clone, real_label): conf_df = pd.DataFrame(data={'vireo': predicted_clone, 'real_label': real_label}) confusion_matrix = pd.crosstab(conf_df['vireo'], conf_df['real_label'], rownames=['Predicted'], colnames=['Actual']) #of those cases predicted to belong to class c, which fraction truly belongs to class c? precision = np.mean(confusion_matrix.max(axis=1)/confusion_matrix.sum(axis=1)) #proportion of cases correctly identified as belonging to class c among all cases that truly belong to class c recall = np.mean(confusion_matrix.max(axis=0)/confusion_matrix.sum(axis=0)) print('Precision = ' + str(precision)) print('Recall = ' + str(recall)) return confusion_matrix def plot_confusionMatrix(mat, ax, cmap = 'Blues'): width, height = np.array(mat).shape text_colors = ['black', 'white'] norm_conf = [] for i in np.array(mat): a = 0 tmp_arr = [] a = sum(i, 0) for j in i: tmp_arr.append(float(j)/float(a)) norm_conf.append(tmp_arr) res = ax.imshow(np.array(norm_conf), cmap=cmap, interpolation='nearest') for x in range(width): for y in range(height): ax.annotate(str(np.array(mat)[x][y]), xy=(y, x), horizontalalignment='center', verticalalignment='center', color=text_colors[int(norm_conf[x][y] > 0.5)]) return res def alleleFreqMatrix(AD, DP, fillna = True): #takes sparse AD and DP, returns dense AF matrix for plotting AD_df = pd.DataFrame(AD.todense()) DP_df = pd.DataFrame(DP.todense()) AF_df = AD_df/DP_df if fillna: AF_df = AF_df.fillna(0) return AF_df def findKnee(BIC, sens=3): #Wrapper function for knee point locator given a series of deltaBIC #Remove negative BICs first BIC = BIC[BIC > 0] #Remove outliers first (Q3 + 1.5 IQR) q1 = np.percentile(BIC, 25) q3 = np.percentile(BIC, 75) iqr = q3-q1 t = q3 + 1.5*iqr ##threshold to determine outliers #print(t) ## if t is too small (ie. < 10k), set t to 10k if t < 10000: t = 10000 ## remove outliers filtered_BIC = BIC[BIC <= t] y = np.sort(filtered_BIC.astype(float)) x = np.linspace(0, 1, len(filtered_BIC)+1)[1:] kl = KneeLocator(x, y, curve="convex", direction="increasing", S=sens) #print(kl.knee_y) return x, y, kl.knee, kl.knee_y if __name__ == '__main__': test = pd.read_csv('test/BIC_params.csv') findKnee(test.deltaBIC)
30.181818
120
0.635166
393
2,656
4.170483
0.402036
0.054912
0.019524
0.028066
0.034167
0.034167
0
0
0
0
0
0.021256
0.238328
2,656
88
121
30.181818
0.788927
0.192771
0
0
0
0
0.067136
0
0
0
0
0
0
1
0.076923
false
0
0.057692
0
0.211538
0.038462
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a77997eb1f678497954d3bcac1625ff7acd6ab3
11,886
py
Python
vivarium_bioscrape/processes/bioscrape.py
BuildACell/vivarium-bioscrape
3ed35babb1cdf018cc71841e3bff928ba384632d
[ "MIT" ]
1
2020-09-17T20:28:11.000Z
2020-09-17T20:28:11.000Z
vivarium_bioscrape/processes/bioscrape.py
BuildACell/VivariumSBML
3ed35babb1cdf018cc71841e3bff928ba384632d
[ "MIT" ]
1
2021-06-19T23:06:54.000Z
2021-06-19T23:06:54.000Z
vivarium_bioscrape/processes/bioscrape.py
BuildACell/VivariumSBML
3ed35babb1cdf018cc71841e3bff928ba384632d
[ "MIT" ]
null
null
null
''' Execute by running: ``python -m bioscrape.processes.bioscrape.py`` TODO: Replace the bioscrape code to implement your own process. ''' import os import numpy as np from vivarium.core.process import Process from vivarium.core.composition import ( simulate_process, PROCESS_OUT_DIR, ) from vivarium.plots.simulation_output import plot_simulation_output from bioscrape.types import Model, Volume from bioscrape.simulator import DeterministicSimulator, ModelCSimInterface, VolumeSSASimulator, SafeModelCSimInterface from bioscrape.lineage import LineageCSimInterface, LineageModel, LineageSSASimulator, LineageVolumeCellState NAME = 'bioscrape' class Bioscrape(Process): ''' This process provides a wrapper around a bioscrape model, interface, and simulator. It allows for stochastic or determinstic simulation, variable volume, and generates ports to access all bioscrape species and rate parameters. ''' # give the process a name, so that it can register in the process_repository name = NAME # declare default parameters as class variables defaults = { 'internal_dt': 0.01, 'stochastic': False, 'initial_volume': 1.0, 'safe_mode':False, 'lineage':False } def __init__(self, parameters=None): super(Bioscrape, self).__init__(parameters) # get the parameters out of self.parameters if available, or use defaults if 'sbml_file' not in self.parameters and 'bioscrape_model' not in self.parameters: raise ValueError("Bioscrape Process requires either an sbml_file or bioscrape_model parameter.") elif 'sbml_file' not in self.parameters and isinstance(self.parameters['bioscrape_model'], Model): # load the sbml file to create the model self.sbml_file = None self.model = self.parameters['bioscrape_model'] elif isinstance(self.parameters['sbml_file'], str) and 'bioscrape_model' not in self.parameters: self.sbml_file = self.parameters['sbml_file'] if self.parameters["lineage"]: self.model = LineageModel(sbml_filename = self.sbml_file,) else: self.model = Model(sbml_filename = self.sbml_file, sbml_warnings = False) elif isinstance(self.parameters['sbml_file'], str) and isinstance(self.parameters['bioscrape_model'], Model): raise ValueError("Bioscrape recieved an sbml_file and a bioscrape_model. Please use one or the other.") else: raise ValueError(f"Bioscrape did not recieve a valid bioscrape_model " f"(recieved: {self.parameters['bioscrape_model']} or a " f"valid sbml_file (recieved: {self.parameters['sbml_file']}).") self.internal_dt = self.parameters['internal_dt'] self.stochastic = self.parameters['stochastic'] #Toggle using Lineage Model if self.parameters["lineage"]: if not self.stochastic: raise ValueError("Bioscrape lineage only available with stochastic = True") self.simulator = LineageSSASimulator() self.interface = LineageCSimInterface(self.model) self.volume = LineageVolumeCellState() #Create an internal bioscrape Volume #Otherwise use normal bioscrape models else: # create the interface if self.parameters["safe_mode"]: self.interface = SafeModelCSimInterface(self.model, max_species_count = 10**8) else: self.interface = ModelCSimInterface(self.model) #Stochastic if self.stochastic: self.simulator = VolumeSSASimulator() #Not Stochastic elif not self.stochastic: self.interface.py_prep_deterministic_simulation() # create a Simulator self.simulator = DeterministicSimulator() self.volume = Volume() #Create an internal bioscrape Volume #Set dt self.interface.py_set_dt(self.internal_dt) #Set the volume self.volume.py_set_volume(self.parameters['initial_volume']) def get_species_names(self): #Gets the names of teh species in a bioscrape model model_species = self.model.get_species_dictionary() return list(model_species.keys()) def get_state(self, array): #Gets the state of a bioscrape simulation mapping = self.model.get_species2index() return { species: array[index] for species, index in mapping.items()} def initial_state(self, config=None): #gets the current (or initial) state of a bioscrape simulation. if config is None: config = {} self.model.set_species(config) state = self.model.get_species_array() return { 'species': self.get_state(state)} def ports_schema(self): ''' ports_schema returns a dictionary that declares how each state will behave. Each key can be assigned settings for the schema_keys declared in Store: * `_default` * `_updater` * `_divider` * `_value` * `_properties` * `_emit` * `_serializer` ''' #Different divide settings between stochastic and determinsitic CRNs if self.stochastic: divider = "binomial" else: divider = "set" #division does not change concentrations return { 'species': { species: { '_default': 0.0, '_updater': 'accumulate', '_emit': True, '_divider': divider} for species in self.model.get_species()}, 'delta_species': { species: { '_default': 0.0, '_updater': 'set', '_emit': False} for species in self.model.get_species()}, 'rates': { p: { '_default': self.model.get_parameter_dictionary()[p], '_updater': 'set', } for p in self.model.get_param_list()}, 'globals': { 'volume': { '_default': self.parameters['initial_volume'], '_updater': 'accumulate', '_emit': True} } } def next_update(self, timestep, states): if 'species' in states: self.model.set_species(states['species']) self.interface.py_set_initial_state(self.model.get_species_array()) if 'rates' in states: self.model.set_params(states['rates']) #Set Volume if needed if 'volume' in states: self.volume.py_set_volume(states['globals']['volume']) # create the interface timepoints = np.arange(0, timestep, self.internal_dt) if self.parameters["lineage"]: output = self.simulator.py_SimulateSingleCell(timepoints, Model = self.model, interface = self.interface, v = self.volume) elif self.stochastic: output = self.simulator.py_volume_simulate(self.interface, self.volume, timepoints) else: output = self.simulator.py_simulate(self.interface, timepoints) result = output.py_get_result()[-1] result_state = self.get_state(result) delta = get_delta(states['species'], result_state) rates = self.model.get_parameter_dictionary() #If the simulation is a volume simulation, return the change in volume if getattr(output, "py_get_volume", None) is not None: Vi = output.py_get_volume()[0] Vf = output.py_get_volume()[-1] deltaV = Vf-Vi else: deltaV = 0 return { 'species': delta, 'delta_species': delta, 'rates':rates, 'globals': {'volume': deltaV}} def get_model(self): return self.model def get_model_species(self): return self.model.get_species_dictionary() def get_model_species_ids(self): return list(self.model.get_species_dictionary().keys()) def get_volume(self): return self.volume.py_get_volume() def get_delta(before, after): # assuming before and after have the same keys return { key: after[key] - before_value for key, before_value in before.items()} def run_bioscrape_process(): #Create a bioscrape process initial_parameters = { 'sbml_file': 'Notebooks/model1.xml'} bioscrape_process = Bioscrape(initial_parameters) # run the simulation sim_settings = { 'total_time': 10, 'initial_state': bioscrape_process.initial_state()} output = simulate_process(bioscrape_process, sim_settings) # Return the data from the simulation. return output def test_bioscrape_instantiation(): #Deterministic Case bioscrape_process = Bioscrape({'sbml_file': 'Notebooks/model1.xml'}) assert isinstance(bioscrape_process.model, Model) assert isinstance(bioscrape_process.interface, ModelCSimInterface) assert bioscrape_process.stochastic == False assert isinstance(bioscrape_process.simulator, DeterministicSimulator) #Stochastic Case bioscrape_process = Bioscrape({'sbml_file': 'Notebooks/model1.xml', "stochastic":True}) assert isinstance(bioscrape_process.model, Model) assert isinstance(bioscrape_process.interface, ModelCSimInterface) assert bioscrape_process.stochastic == True assert isinstance(bioscrape_process.simulator, VolumeSSASimulator) #Custom Model Case M = Model(species = ["S"]) bioscrape_process2 = Bioscrape({'bioscrape_model': M}) assert M == bioscrape_process2.model assert bioscrape_process2.sbml_file is None def test_next_update(): initial_parameters = { 'sbml_file': 'Notebooks/model1.xml'} bioscrape_process = Bioscrape(initial_parameters) initial_state = bioscrape_process.initial_state() output = bioscrape_process.next_update(1.0, initial_state) assert "species" in output assert "delta_species" in output assert all([output["species"][s] == output["delta_species"][s] for s in output["species"]]) assert "rates" in output #set all the rates to 0 state = { "rates": {p:0 for p in output["rates"]}, "species":bioscrape_process.initial_state()["species"] } output2 = bioscrape_process.next_update(1.0, state) #nothing should change in the simulation assert all([output2["species"][s] == output2["delta_species"][s] ==0 for s in output2["species"]]) def test_bioscrape_process(): '''Test that the process runs correctly. This will be executed by pytest. ''' output = run_bioscrape_process() #DNA concentration should be constant assert all([v == output["species"]["dna_G"][0] for v in output["species"]["dna_G"]]) #RNA concentration should be increasing assert all([output["species"]["rna_T"][i] < output["species"]["rna_T"][i+1] for i in range(len(output["species"]["rna_T"])-1)]) #Protein concentration should be increasing assert all([output["species"]["protein_X"][i] < output["species"]["protein_X"][i+1] for i in range(len(output["species"]["protein_X"])-1)]) def main(): '''Simulate the process and plot results.''' # make an output directory to save plots out_dir = os.path.join(PROCESS_OUT_DIR, NAME) if not os.path.exists(out_dir): os.makedirs(out_dir) output = run_bioscrape_process() # plot the simulation output plot_settings = {} plot_simulation_output(output, plot_settings, out_dir) if __name__ == '__main__': main()
37.14375
144
0.636968
1,339
11,886
5.48245
0.189694
0.054488
0.017981
0.018117
0.256232
0.18853
0.158017
0.10816
0.082278
0.059392
0
0.004924
0.265354
11,886
319
145
37.260188
0.835776
0.164059
0
0.191176
0
0
0.132029
0.006965
0
0
0
0.003135
0.088235
1
0.078431
false
0
0.039216
0.02451
0.186275
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a77dc3ad74814a553f3c883a2fdba3de71ca441
2,748
py
Python
api/tests/opentrons/protocol_engine/execution/test_hardware_event_forwarder.py
anuwrag/opentrons
28c8d76a19e367c6bd38f5290faaa32abf378715
[ "Apache-2.0" ]
2
2015-11-10T17:49:51.000Z
2016-01-15T04:43:37.000Z
api/tests/opentrons/protocol_engine/execution/test_hardware_event_forwarder.py
anuwrag/opentrons
28c8d76a19e367c6bd38f5290faaa32abf378715
[ "Apache-2.0" ]
null
null
null
api/tests/opentrons/protocol_engine/execution/test_hardware_event_forwarder.py
anuwrag/opentrons
28c8d76a19e367c6bd38f5290faaa32abf378715
[ "Apache-2.0" ]
null
null
null
"""Tests for hardware_event_forwarder.""" from typing import cast import pytest from anyio import to_thread from decoy import Decoy, matchers from opentrons.hardware_control import HardwareControlAPI from opentrons.hardware_control.types import ( DoorStateNotification, DoorState, HardwareEventHandler, ) from opentrons.protocol_engine.actions import ActionDispatcher, HardwareEventAction from opentrons.protocol_engine.execution.hardware_event_forwarder import ( HardwareEventForwarder, ) @pytest.fixture def hardware_control_api( decoy: Decoy, ) -> HardwareControlAPI: """Return a mock in the shape of a HardwareControlAPI.""" return decoy.mock(cls=HardwareControlAPI) @pytest.fixture def action_dispatcher(decoy: Decoy) -> ActionDispatcher: """Return a mock in the shape of an ActionDispatcher.""" return decoy.mock(cls=ActionDispatcher) @pytest.fixture async def subject( hardware_control_api: HardwareControlAPI, action_dispatcher: ActionDispatcher ) -> HardwareEventForwarder: """Return a HardwareEventForwarder with mocked dependencies. Async because HardwareEventForwarder's initializer requires a running event loop. """ return HardwareEventForwarder( hardware_api=hardware_control_api, action_dispatcher=action_dispatcher ) async def test_event_forwarding( decoy: Decoy, subject: HardwareEventForwarder, hardware_control_api: HardwareControlAPI, action_dispatcher: ActionDispatcher, ) -> None: """It should forward events that come from a different thread.""" handler_captor = matchers.Captor() unsubscribe_callback = decoy.mock() decoy.when(hardware_control_api.register_callback(handler_captor)).then_return( unsubscribe_callback ) subject.start() captured_handler = cast(HardwareEventHandler, handler_captor.value) input_event = DoorStateNotification(new_state=DoorState.OPEN, blocking=True) expected_action_to_forward = HardwareEventAction(input_event) await to_thread.run_sync(captured_handler, input_event) decoy.verify(action_dispatcher.dispatch(expected_action_to_forward)) async def test_one_subscribe_one_unsubscribe( decoy: Decoy, hardware_control_api: HardwareControlAPI, subject: HardwareEventForwarder, ) -> None: """Multiple start()s and stop()s should be collapsed.""" unsubscribe = decoy.mock() wrong_unsubscribe = decoy.mock() decoy.when(hardware_control_api.register_callback(matchers.Anything())).then_return( unsubscribe, wrong_unsubscribe ) subject.start() subject.start() subject.stop_soon() subject.stop_soon() decoy.verify(unsubscribe(), times=1) decoy.verify(wrong_unsubscribe(), times=0)
29.234043
88
0.767103
297
2,748
6.882155
0.333333
0.066047
0.061644
0.052838
0.139922
0.139922
0.139922
0.050881
0.050881
0
0
0.000858
0.152111
2,748
93
89
29.548387
0.876395
0.050218
0
0.274194
0
0
0
0
0
0
0
0
0
1
0.032258
false
0
0.129032
0
0.209677
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a795005eb522c95aeddb9f41a19181d2837f12c
918
py
Python
tests/rules/test_rm_dir.py
eoinjordan/thefeck
e04f50409ba3069ec6a9f7c0aab39ca835a41b68
[ "MIT" ]
null
null
null
tests/rules/test_rm_dir.py
eoinjordan/thefeck
e04f50409ba3069ec6a9f7c0aab39ca835a41b68
[ "MIT" ]
null
null
null
tests/rules/test_rm_dir.py
eoinjordan/thefeck
e04f50409ba3069ec6a9f7c0aab39ca835a41b68
[ "MIT" ]
null
null
null
import pytest from thefeck.rules.rm_dir import match, get_new_command from thefeck.types import Command @pytest.mark.parametrize('command', [ Command('rm bar', 'rm: bar: is a directory'), Command('rm bar', 'rm: bar: Is a directory'), Command('hdfs dfs -rm bar', 'rm: `bar`: Is a directory'), Command('./bin/hdfs dfs -rm bar', 'rm: `bar`: Is a directory'), ]) def test_match(command): assert match(command) @pytest.mark.parametrize('command', [ Command('rm bar', ''), Command('hdfs dfs -rm bar', ''), Command('./bin/hdfs dfs -rm bar', ''), Command('', ''), ]) def test_not_match(command): assert not match(command) @pytest.mark.parametrize('command, new_command', [ (Command('rm bar', ''), 'rm -rf bar'), (Command('hdfs dfs -rm bar', ''), 'hdfs dfs -rm -r bar'), ]) def test_get_new_command(command, new_command): assert get_new_command(command) == new_command
28.6875
67
0.643791
130
918
4.438462
0.207692
0.112652
0.093588
0.103986
0.712305
0.663778
0.367418
0.367418
0.213172
0
0
0
0.172113
918
31
68
29.612903
0.759211
0
0
0.2
0
0
0.299564
0
0
0
0
0
0.12
1
0.12
false
0
0.12
0
0.24
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a7982fc813241f29db3cb0417d30ba88386f7dd
1,793
py
Python
tests/test_io/test_io_write_module.py
marcoalsina/araucaria
78039106ae27d3fdef9265503c33f33992199d8e
[ "BSD-2-Clause" ]
8
2021-07-11T22:54:21.000Z
2022-02-16T20:22:25.000Z
tests/test_io/test_io_write_module.py
marcoalsina/araucaria
78039106ae27d3fdef9265503c33f33992199d8e
[ "BSD-2-Clause" ]
null
null
null
tests/test_io/test_io_write_module.py
marcoalsina/araucaria
78039106ae27d3fdef9265503c33f33992199d8e
[ "BSD-2-Clause" ]
null
null
null
import shutil, tempfile from os import path import importlib.resources as pkg_resources import unittest from numpy import allclose from araucaria import testdata from araucaria.io import read_xmu, write_xmu class TestWriteFunctions(unittest.TestCase): def setUp(self): # create a temp directory self.temp_dir = tempfile.mkdtemp() self.temp_fname = 'demo.xmu' self.temp_fpath = path.join(self.temp_dir, self.temp_fname) def tearDown(self): # remove the directory after the test shutil.rmtree(self.temp_dir) def test_write_xmu(self): # test ValueError exception invalid_group = '' self.assertRaises(TypeError, write_xmu, *(self.temp_fpath, invalid_group)) # testing written file in temp folder with pkg_resources.path(testdata, 'xmu_testfile.xmu') as path: file_path = path # testing multiple scans for scan in ['mu', 'fluo']: group_original = read_xmu(file_path, scan=scan) # write file in temp folder write_xmu(self.temp_fpath, group_original, replace=True) if scan == 'fluo': self.assertRaises(IOError, write_xmu, *(self.temp_fpath, group_original)) # reading written file group_read = read_xmu(self.temp_fpath, scan=scan) # asserting scans self.assertTrue(allclose(getattr(group_original, scan), getattr(group_read, scan))) self.assertTrue(allclose(getattr(group_original, 'mu_ref'), getattr(group_read, 'mu_ref'))) if __name__ == '__main__': unittest.main
36.591837
90
0.601785
202
1,793
5.123762
0.356436
0.077295
0.05314
0.077295
0.16715
0.14686
0.0657
0
0
0
0
0
0.318461
1,793
49
91
36.591837
0.846972
0.116007
0
0
0
0
0.035317
0
0
0
0
0
0.129032
1
0.096774
false
0
0.225806
0
0.354839
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a7b17c3ad6407653b44e5e838e265d381f792c7
7,622
py
Python
scripts/sources/s_checklist_scenariobased_step08.py
dpopadic/arpmRes
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
[ "MIT" ]
6
2021-04-10T13:24:30.000Z
2022-03-26T08:20:42.000Z
scripts/sources/s_checklist_scenariobased_step08.py
dpopadic/arpmRes
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
[ "MIT" ]
null
null
null
scripts/sources/s_checklist_scenariobased_step08.py
dpopadic/arpmRes
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
[ "MIT" ]
6
2019-08-13T22:02:17.000Z
2022-02-09T17:49:12.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.2.0 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # s_checklist_scenariobased_step08 [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_checklist_scenariobased_step08&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ex-vue-8). # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from arpym.statistics import meancov_sp from arpym.estimation import fit_lfm_lasso # - # ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step08-parameters) # + # indicates which projection to continue from # True: use copula-marginal projections # False: use historical projections copula_marginal = True # parameter for lasso minimization if copula_marginal: lam = 98000 else: lam = 15000 # - # ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step08-implementation-step00): Load data # + path = '../../../databases/temporary-databases/' # Risk drivers identification db_riskdrivers_series = pd.read_csv(path+'db_riskdrivers_series.csv', index_col=0) x = db_riskdrivers_series.values riskdriver_names = np.array(db_riskdrivers_series.columns) db_riskdrivers_tools = pd.read_csv(path+'db_riskdrivers_tools.csv') d_ = int(db_riskdrivers_tools['d_'][0]) n_stocks = int(db_riskdrivers_tools['n_stocks'][0]) t_now = np.datetime64(db_riskdrivers_tools.t_now[0], 'D') # Pricing db_holdings = pd.read_csv(path+'db_holdings.csv') if copula_marginal: # Projection db_projection_tools = pd.read_csv(path+'db_projection_tools.csv') j_ = int(db_projection_tools['j_'][0]) t_hor = np.datetime64(db_projection_tools['t_hor'][0], 'D') m_ = np.busday_count(t_now, t_hor) db_projection_riskdrivers = pd.read_csv(path+'db_projection_riskdrivers.csv') x_proj = db_projection_riskdrivers.values.reshape(j_, m_+1, d_) db_scenprob = pd.read_csv(path+'db_scenario_probs.csv') p = db_scenprob['p'].values # Aggregation db_exante_perf = pd.read_csv(path+'db_exante_perf.csv') y_h = db_exante_perf.values.squeeze() # Ex-ante evaluation db_quantile_and_satis = pd.read_csv(path+'db_quantile_and_satis.csv') c_es = db_quantile_and_satis['c_es'][0] es_yh = db_quantile_and_satis['es_yh'][0] neg_var_yh = db_quantile_and_satis['neg_var_yh'][0] else: # Projection db_projection_tools = pd.read_csv(path+'db_projection_bootstrap_tools.csv') j_ = int(db_projection_tools['j_'][0]) t_hor = np.datetime64(db_projection_tools['t_hor'][0], 'D') m_ = np.busday_count(t_now, t_hor) db_projection_riskdrivers = pd.read_csv(path+'db_projection_bootstrap_riskdrivers.csv') x_proj = db_projection_riskdrivers.values.reshape(j_, m_+1, d_) db_scenprob = pd.read_csv(path+'db_scenario_probs_bootstrap.csv') p = db_scenprob['p'].values # Aggregation db_exante_perf = pd.read_csv(path+'db_exante_perf_historical.csv') y_h = db_exante_perf.values.squeeze() # Ex-ante evaluation db_quantile_and_satis = pd.read_csv(path+'db_quantile_and_satis_historical.csv') c_es = db_quantile_and_satis['c_es'][0] es_yh = db_quantile_and_satis['es_yh'][0] neg_var_yh = db_quantile_and_satis['neg_var_yh'][0] # - # ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step08-implementation-step01): Ex-ante attribution: performance # + # risk factors: risk driver increments z = x_proj[:, -1, :]-x[-1, :] # estimate exposures, intercept and residuals alpha, beta, _, u = fit_lfm_lasso(y_h, z, p, lam) u = u.squeeze() alpha = alpha[0] # select data for relevant risk factors only ind_relevant_risk_factors = np.where(beta != 0)[0] beta = beta[ind_relevant_risk_factors] z = z[:, ind_relevant_risk_factors] # number of relevant risk factors k_ = beta.shape[0] # joint distribution of residual and risk factors f_uz = (np.c_[u, z], p) risk_factors = riskdriver_names[ind_relevant_risk_factors] print('Number of relevant risk factors: ' + str(k_)) # create output dictionary output = {'k_': k_, # number of relevant risk factors 'alpha': alpha, # shift term 'beta': beta, # exposures 'f_UZ': f_uz # joint distribution of residual and risk factors } # - # ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step08-implementation-step02): Ex-ante attribution: risk # + # map residuals to 0-th factor z_0 = (alpha + u) # exposure to the residual beta_0 = 1 # update exposures beta_new = np.append(beta_0, beta) k_new = beta_new.shape[0] # update risk factors z_new = np.c_[z_0, z] # sort the scenarios of the risk factors and probabilities # according to order induced by ex-ante performance scenarios sort_yh = np.argsort(y_h, axis=0) p_sort = p[sort_yh] z_new_sort = z_new[sort_yh, :] # marginal contributions to the negative expected shortfall satisfaction measure # calculate weights j_c = np.min(np.where(np.cumsum(p_sort) >= 1-c_es)) w = np.zeros((j_)) for j in range(j_c): w[j] = 1/(1-c_es)*p_sort[j] w[j_c] = 1 - np.sum(w) # calculate contributions es_contrib = beta_new * (w.T @ z_new_sort) # print percentage contributions pc_es_contrib = es_contrib/np.sum(es_yh) print('Percentage contributions to negative expected shortfall') print('-'*55) for k in range(1, k_+1): print('{:31}'.format(risk_factors[k-1])+':', '{: 7.2%}'.format(pc_es_contrib[k])) print('{:31}'.format('residual')+':', '{: 7.2%}'.format(pc_es_contrib[0])) print('') # marginal contributions to the variance satisfaction measure # find covariance _, cov_z_new = meancov_sp(z_new, p) # calculate contributions var_contrib = -beta_new * (cov_z_new @ beta_new.T) # print percentage contributions pc_var_contrib = var_contrib/neg_var_yh print('Percentage contributions to variance satisfaction measure') print('-'*57) for k in range(1, k_+1): print('{:31}'.format(risk_factors[k-1])+':', '{: 7.2%}'.format(pc_var_contrib[k])) print('{:31}'.format('residual')+':', '{: 7.2%}'.format(pc_var_contrib[0])) # update output dictionary output['-ES_k'] = es_contrib output['-V_k'] = var_contrib # - # ## Plots # + plt.style.use('arpm') fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(1280.0/72.0, 720.0/72.0), dpi = 72.0) # expected shortfall heights = np.flip(np.append(es_yh, np.append(es_contrib[1:], es_contrib[0]))) heights_r = heights*1e-6 lbls = np.flip(np.append('total', np.append(risk_factors, 'residual'))) colors = ['C5'] + ['C0']*k_ + ['C2'] ax1.barh(range(k_new+1), heights_r, tick_label=lbls, color=colors) plt.xticks(fontsize=14) plt.yticks(fontsize=14) ax1.set_ylabel('Risk driver increments', fontsize=17) ax1.set_xlabel('-ES (million USD)', fontsize=17) ax1.set_title('Risk attribution: expected shortfall', fontsize=20, fontweight='bold') # variance heights = np.flip(np.append(neg_var_yh, np.append(var_contrib[1:], var_contrib[0]))) colors = ['C5'] + ['C0']*k_ + ['C2'] ax2.barh(range(k_new+1), heights, color=colors) plt.yticks([]) ax2.set_xlabel('-Variance', fontsize=17) ax2.set_ylabel('') ax2.set_title('Risk attribution: variance', fontsize=20, fontweight='bold') plt.tight_layout()
32.995671
235
0.708082
1,166
7,622
4.363636
0.238422
0.036753
0.022995
0.033215
0.413325
0.365173
0.340998
0.315841
0.308569
0.308569
0
0.026259
0.145631
7,622
230
236
33.13913
0.755068
0.300577
0
0.256
0
0
0.160823
0.067454
0
0
0
0
0
1
0
false
0
0.04
0
0.04
0.08
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a7d82fb547d061dbf9485d1ec44469a075ac741
8,097
py
Python
Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/build_gallery.py
ruoclio/awesome-DeepLearning
67fcc15d6a2651d7f38de80670f0e5658f4f8827
[ "Apache-2.0" ]
null
null
null
Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/build_gallery.py
ruoclio/awesome-DeepLearning
67fcc15d6a2651d7f38de80670f0e5658f4f8827
[ "Apache-2.0" ]
null
null
null
Paddle_Industry_Practice_Sample_Library/Electromobile_In_Elevator_Detection/code/build_gallery/build_gallery.py
ruoclio/awesome-DeepLearning
67fcc15d6a2651d7f38de80670f0e5658f4f8827
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys __dir__ = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.abspath(os.path.join(__dir__, '../'))) import cv2 import faiss import numpy as np from tqdm import tqdm import pickle from predict_rec import RecPredictor from utils import logger from utils import config def split_datafile(data_file, image_root, delimiter="\t"): ''' data_file: image path and info, which can be splitted by spacer image_root: image path root delimiter: delimiter ''' gallery_images = [] gallery_docs = [] with open(data_file, 'r', encoding='utf-8') as f: lines = f.readlines() for _, ori_line in enumerate(lines): line = ori_line.strip().split(delimiter) text_num = len(line) assert text_num >= 2, f"line({ori_line}) must be splitted into at least 2 parts, but got {text_num}" image_file = os.path.join(image_root, line[0]) gallery_images.append(image_file) gallery_docs.append(ori_line.strip()) return gallery_images, gallery_docs class GalleryBuilder(object): def __init__(self, config): self.config = config self.rec_predictor = RecPredictor(config) assert 'IndexProcess' in config.keys(), "Index config not found ... " self.build(config['IndexProcess']) def build(self, config): ''' build index from scratch ''' operation_method = config.get("index_operation", "new").lower() gallery_images, gallery_docs = split_datafile( config['data_file'], config['image_root'], config['delimiter']) # when remove data in index, do not need extract fatures if operation_method != "remove": gallery_features = self._extract_features(gallery_images, config) assert operation_method in [ "new", "remove", "append" ], "Only append, remove and new operation are supported" # vector.index: faiss index file # id_map.pkl: use this file to map id to image_doc if operation_method in ["remove", "append"]: # if remove or append, vector.index and id_map.pkl must exist assert os.path.join( config["index_dir"], "vector.index" ), "The vector.index dose not exist in {} when 'index_operation' is not None".format( config["index_dir"]) assert os.path.join( config["index_dir"], "id_map.pkl" ), "The id_map.pkl dose not exist in {} when 'index_operation' is not None".format( config["index_dir"]) index = faiss.read_index( os.path.join(config["index_dir"], "vector.index")) with open(os.path.join(config["index_dir"], "id_map.pkl"), 'rb') as fd: ids = pickle.load(fd) assert index.ntotal == len(ids.keys( )), "data number in index is not equal in in id_map" else: if not os.path.exists(config["index_dir"]): os.makedirs(config["index_dir"], exist_ok=True) index_method = config.get("index_method", "HNSW32") # if IVF method, cal ivf number automaticlly if index_method == "IVF": index_method = index_method + str( min(int(len(gallery_images) // 8), 65536)) + ",Flat" # for binary index, add B at head of index_method if config["dist_type"] == "hamming": index_method = "B" + index_method #dist_type dist_type = faiss.METRIC_INNER_PRODUCT if config[ "dist_type"] == "IP" else faiss.METRIC_L2 #build index if config["dist_type"] == "hamming": index = faiss.index_binary_factory(config["embedding_size"], index_method) else: index = faiss.index_factory(config["embedding_size"], index_method, dist_type) index = faiss.IndexIDMap2(index) ids = {} if config["index_method"] == "HNSW32": logger.warning( "The HNSW32 method dose not support 'remove' operation") if operation_method != "remove": # calculate id for new data start_id = max(ids.keys()) + 1 if ids else 0 ids_now = ( np.arange(0, len(gallery_images)) + start_id).astype(np.int64) # only train when new index file if operation_method == "new": if config["dist_type"] == "hamming": index.add(gallery_features) else: index.train(gallery_features) if not config["dist_type"] == "hamming": index.add_with_ids(gallery_features, ids_now) for i, d in zip(list(ids_now), gallery_docs): ids[i] = d else: if config["index_method"] == "HNSW32": raise RuntimeError( "The index_method: HNSW32 dose not support 'remove' operation" ) # remove ids in id_map, remove index data in faiss index remove_ids = list( filter(lambda k: ids.get(k) in gallery_docs, ids.keys())) remove_ids = np.asarray(remove_ids) index.remove_ids(remove_ids) for k in remove_ids: del ids[k] # store faiss index file and id_map file if config["dist_type"] == "hamming": faiss.write_index_binary( index, os.path.join(config["index_dir"], "vector.index")) else: faiss.write_index( index, os.path.join(config["index_dir"], "vector.index")) with open(os.path.join(config["index_dir"], "id_map.pkl"), 'wb') as fd: pickle.dump(ids, fd) def _extract_features(self, gallery_images, config): # extract gallery features if config["dist_type"] == "hamming": gallery_features = np.zeros( [len(gallery_images), config['embedding_size'] // 8], dtype=np.uint8) else: gallery_features = np.zeros( [len(gallery_images), config['embedding_size']], dtype=np.float32) #construct batch imgs and do inference batch_size = config.get("batch_size", 32) batch_img = [] for i, image_file in enumerate(tqdm(gallery_images)): img = cv2.imread(image_file) if img is None: logger.error("img empty, please check {}".format(image_file)) exit() img = img[:, :, ::-1] batch_img.append(img) if (i + 1) % batch_size == 0: rec_feat = self.rec_predictor.predict(batch_img) gallery_features[i - batch_size + 1:i + 1, :] = rec_feat batch_img = [] if len(batch_img) > 0: rec_feat = self.rec_predictor.predict(batch_img) gallery_features[-len(batch_img):, :] = rec_feat batch_img = [] return gallery_features def main(config): GalleryBuilder(config) return if __name__ == "__main__": args = config.parse_args() config = config.get_config(args.config, overrides=args.override, show=True) main(config)
37.836449
112
0.578733
985
8,097
4.574619
0.257868
0.01731
0.034177
0.024856
0.214603
0.180426
0.13826
0.135597
0.12783
0.111851
0
0.008851
0.31629
8,097
213
113
38.014085
0.805094
0.153267
0
0.194444
0
0
0.151914
0
0
0
0
0
0.041667
1
0.034722
false
0
0.069444
0
0.131944
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a7d9b10e32a514f5564266c1d9bee2a581008ce
2,533
py
Python
4 term/MNA/Lab 4/Lab4.py
mrojaczy/Labs
21cd2ad3ddf8fa3b64cf253d147a4a04ad0667ab
[ "Apache-2.0" ]
1
2020-03-15T17:11:23.000Z
2020-03-15T17:11:23.000Z
4 term/MNA/Lab 4/Lab4.py
Asphobel/Labs
ee827143b32b691dd7736ba4888a4a9625b4694a
[ "Apache-2.0" ]
null
null
null
4 term/MNA/Lab 4/Lab4.py
Asphobel/Labs
ee827143b32b691dd7736ba4888a4a9625b4694a
[ "Apache-2.0" ]
null
null
null
import numpy import sympy print("System of nonlinear equations \n") m = 0.3 a = 0.5 EPS = 10.0 ** -5 # Исходные уравнения в формате f(x,y) = 0 (x, y) = sympy.symbols("x y") eq1 = sympy.tan(x * y + m) - x eq2 = a * (x ** 2) + 2 * (y ** 2) - 1 print(eq1, "= 0") print(eq2, "= 0") print() # Подстановка в исходные уравнения для вычисления значения f(x,y) def val1(x, y): return numpy.tan(x * y + m) - x def val2(x, y): return a * (x ** 2) + 2 * (y ** 2) - 1 # Выраженные из исходных уравнений x = функция(x,y), y = функция(x,y) def eqx(x, y): return numpy.tan(x * y + m) def eqy(x, y): return numpy.sqrt((1 - a * (x ** 2)) / 2) # Вычисление матрицы Якоби def W(x, y): return numpy.array([ [(1 + numpy.tan(x * y + m) ** 2) * y - 1, (1 + numpy.tan(x * y + m) ** 2) * x], [2 * a * x, 4 * y] ]) # Графики исходных уравнений plots = sympy.plot_implicit(sympy.Eq(eq1, 0), (x, -2, 2), (y, -2, 2), line_color="blue", show=False) plots.extend(sympy.plot_implicit(sympy.Eq(eq2, 0), (x, -2, 2), (y, -2, 2), line_color="red", show=False)) # plots.show() iters = 0 def SimpleSolve(x0, y0): global iters iters = 0 (x, y) = (x0, y0) while True: iters += 1 oldx = x oldy = y x = eqx(x, y) y = eqy(x, y) if not (numpy.isfinite(x) and numpy.isfinite(y)): raise RuntimeError("Sequence {x} is divergent") if max(abs(x - oldx), abs(y - oldy)) < EPS: return x, y def NewtonSolve(x0, y0): global iters iters = 0 (x, y) = (x0, y0) while True: iters += 1 w = W(x, y) f = numpy.array([[val1(x, y)], [val2(x, y)]]) deltas = numpy.linalg.solve(w, -f) x += deltas[0][0] y += deltas[1][0] if not (numpy.isfinite(x) and numpy.isfinite(y)): raise RuntimeError("Sequence {x} is divergent") if max(abs(deltas)) < EPS: return x, y x0 = 1.0 y0 = 0.5 print("Initial guess =", (x0, y0)) print() def run(method): global iters try: (x, y) = method(x0, y0) print("(x, y) = ({:.4f}, {:.4f})".format(x, y)) print(" via {} method (with {} iterations)".format(method.__name__, iters)) except Exception as ex: print("ERROR: {} - in {} method (with {} iterations)".format(ex, method.__name__, iters)) print() run(SimpleSolve) run(NewtonSolve) plots.show()
23.027273
106
0.506119
384
2,533
3.307292
0.270833
0.044094
0.019685
0.023622
0.324409
0.280315
0.280315
0.248819
0.218898
0.193701
0
0.044957
0.315041
2,533
109
107
23.238532
0.687032
0.09317
0
0.287671
0
0
0.099954
0
0
0
0
0
0
1
0.109589
false
0
0.027397
0.068493
0.232877
0.136986
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a7e5d7d31b8e717048becad533f288df8dd5a1f
2,529
py
Python
google/cloud/bigquery_migration_v2/types/__init__.py
renovate-bot/python-bigquery-migration
e95f0fcad0f6c3366ade1637b51295a89d2bc1b2
[ "Apache-2.0" ]
1
2021-12-08T10:51:47.000Z
2021-12-08T10:51:47.000Z
google/cloud/bigquery_migration_v2/types/__init__.py
renovate-bot/python-bigquery-migration
e95f0fcad0f6c3366ade1637b51295a89d2bc1b2
[ "Apache-2.0" ]
20
2021-09-28T17:00:02.000Z
2022-03-31T18:48:42.000Z
google/cloud/bigquery_migration_v2/types/__init__.py
renovate-bot/python-bigquery-migration
e95f0fcad0f6c3366ade1637b51295a89d2bc1b2
[ "Apache-2.0" ]
4
2021-09-27T18:27:49.000Z
2022-01-29T08:07:41.000Z
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .migration_entities import MigrationSubtask, MigrationTask, MigrationWorkflow from .migration_error_details import ErrorDetail, ErrorLocation, ResourceErrorDetail from .migration_metrics import Point, TimeInterval, TimeSeries, TypedValue from .migration_service import ( CreateMigrationWorkflowRequest, DeleteMigrationWorkflowRequest, GetMigrationSubtaskRequest, GetMigrationWorkflowRequest, ListMigrationSubtasksRequest, ListMigrationSubtasksResponse, ListMigrationWorkflowsRequest, ListMigrationWorkflowsResponse, StartMigrationWorkflowRequest, ) from .translation_config import ( AzureSynapseDialect, BigQueryDialect, Dialect, HiveQLDialect, NameMappingKey, NameMappingValue, NetezzaDialect, ObjectNameMapping, ObjectNameMappingList, OracleDialect, RedshiftDialect, SnowflakeDialect, SourceEnv, SparkSQLDialect, TeradataDialect, TranslationConfigDetails, VerticaDialect, ) __all__ = ( "MigrationSubtask", "MigrationTask", "MigrationWorkflow", "ErrorDetail", "ErrorLocation", "ResourceErrorDetail", "Point", "TimeInterval", "TimeSeries", "TypedValue", "CreateMigrationWorkflowRequest", "DeleteMigrationWorkflowRequest", "GetMigrationSubtaskRequest", "GetMigrationWorkflowRequest", "ListMigrationSubtasksRequest", "ListMigrationSubtasksResponse", "ListMigrationWorkflowsRequest", "ListMigrationWorkflowsResponse", "StartMigrationWorkflowRequest", "AzureSynapseDialect", "BigQueryDialect", "Dialect", "HiveQLDialect", "NameMappingKey", "NameMappingValue", "NetezzaDialect", "ObjectNameMapping", "ObjectNameMappingList", "OracleDialect", "RedshiftDialect", "SnowflakeDialect", "SourceEnv", "SparkSQLDialect", "TeradataDialect", "TranslationConfigDetails", "VerticaDialect", )
28.738636
84
0.743377
187
2,529
10
0.604278
0.032086
0.013904
0.017112
0.550802
0.550802
0.550802
0.550802
0.550802
0.274866
0
0.004327
0.177541
2,529
87
85
29.068966
0.894712
0.22499
0
0
0
0
0.329733
0.155864
0
0
0
0
0
1
0
false
0
0.070423
0
0.070423
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a806036db3987296a33c1f5a9fb6085d33841b6
2,975
py
Python
manifest_server/iiif/v2/manifests/sequence.py
bodleian/iiif_manifest_server
551b201b9588f7a3ae3656492c925eeb57dc69de
[ "MIT" ]
8
2019-11-27T12:46:16.000Z
2021-04-14T05:57:05.000Z
manifest_server/iiif/v2/manifests/sequence.py
bodleian/iiif_manifest_server
551b201b9588f7a3ae3656492c925eeb57dc69de
[ "MIT" ]
1
2021-06-02T00:46:38.000Z
2021-06-02T00:46:38.000Z
manifest_server/iiif/v2/manifests/sequence.py
bodleian/iiif_manifest_server
551b201b9588f7a3ae3656492c925eeb57dc69de
[ "MIT" ]
null
null
null
import logging from typing import Dict, Optional import pysolr import serpy from manifest_server.helpers.fields import StaticField from manifest_server.helpers.identifiers import get_identifier, IIIF_V2_CONTEXT from manifest_server.helpers.serializers import ContextDictSerializer from manifest_server.helpers.solr import SolrManager, SolrResult from manifest_server.helpers.solr_connection import SolrConnection from manifest_server.iiif.v2 import Canvas log = logging.getLogger(__name__) def create_v2_sequence(request, sequence_id: str, config: Dict) -> Optional[Dict]: record: pysolr.Results = SolrConnection.search("*:*", fq=["type:object", f"id:{sequence_id}"], rows=1) if record.hits == 0: return None object_record = record.docs[0] sequence: Sequence = Sequence(object_record, context={"request": request, "config": config, "direct_request": True}) return sequence.data class Sequence(ContextDictSerializer): ctx = serpy.MethodField( label="@context" ) sid = serpy.MethodField( label="@id" ) stype = StaticField( label="@type", value="sc:Sequence" ) label = StaticField( value="Default" ) canvases = serpy.MethodField() def get_ctx(self, obj: SolrResult) -> Optional[str]: # pylint: disable-msg=unused-argument direct_request: bool = self.context.get('direct_request') return IIIF_V2_CONTEXT if direct_request else None def get_sid(self, obj: Dict) -> str: req = self.context.get('request') cfg = self.context.get('config') obj_id = obj.get('id') sequence_tmpl = cfg['templates']['sequence_id_tmpl'] return get_identifier(req, obj_id, sequence_tmpl) def get_canvases(self, obj: SolrResult) -> Optional[Dict]: req = self.context.get('request') cfg = self.context.get('config') obj_id = obj.get('id') # Check if the canvases have annotations. We don't actually # need to retrieve them, just get the number of hits. has_annotations_res = SolrConnection.search( "*:*", fq=["type:annotationpage", f"object_id:{obj_id}"], rows=0 ) has_annotations = has_annotations_res.hits > 0 manager: SolrManager = SolrManager(SolrConnection) fq = ["type:surface", f"object_id:{obj_id}"] sort = "sort_i asc" fl = ["*,[child parentFilter=type:surface childFilter=type:image]"] rows: int = 100 manager.search("*:*", fq=fq, fl=fl, sort=sort, rows=rows) if manager.hits == 0: return None return Canvas(manager.results, context={'request': req, 'config': cfg, 'has_annotations': has_annotations}, many=True).data
34.593023
106
0.618151
334
2,975
5.359281
0.326347
0.040223
0.060335
0.069832
0.115084
0.067039
0.067039
0.067039
0.067039
0.067039
0
0.00601
0.272941
2,975
85
107
35
0.821544
0.04874
0
0.123077
0
0
0.116419
0.016985
0
0
0
0
0
1
0.061538
false
0
0.153846
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a8065a1dca29e911ece58f6ae5d6fbf064b95b0
30,611
py
Python
src/UQpy/Surrogates/Kriging/Kriging.py
marrov/UQpy
b04a267b3080e3d4d38e876547ba0d3b979734f3
[ "MIT" ]
132
2018-03-13T13:56:33.000Z
2022-03-21T13:59:17.000Z
src/UQpy/Surrogates/Kriging/Kriging.py
marrov/UQpy
b04a267b3080e3d4d38e876547ba0d3b979734f3
[ "MIT" ]
140
2018-05-21T13:40:01.000Z
2022-03-29T14:18:01.000Z
src/UQpy/Surrogates/Kriging/Kriging.py
marrov/UQpy
b04a267b3080e3d4d38e876547ba0d3b979734f3
[ "MIT" ]
61
2018-05-02T13:40:05.000Z
2022-03-06T11:31:21.000Z
# UQpy is distributed under the MIT license. # # Copyright (C) 2018 -- Michael D. Shields # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """This module contains functionality for all the surrogate methods supported in UQpy. The module currently contains the following classes: - ``SROM``: Class to estimate a discrete approximation for a continuous random variable using Stochastic Reduced Order Model. - ``Kriging``: Class to generate an approximate surrogate model using Kriging. - ``PCE``: Class to generate an approximate surrogate model using Polynomial Chaos Expansion. """ import numpy as np import scipy.stats as stats from UQpy.Distributions import Normal, Uniform, DistributionContinuous1D, JointInd import scipy.integrate as integrate import scipy.special as special import itertools, math import warnings warnings.filterwarnings("ignore") ######################################################################################################################## ######################################################################################################################## # Kriging Interpolation (Kriging) # ######################################################################################################################## ######################################################################################################################## class Kriging: """ Kriging generates an Gaussian process regression-based surrogate model to predict the model output at new sample points. **Inputs:** * **reg_model** (`str` or `function`): `reg_model` specifies and evaluates the basis functions and their coefficients, which defines the trend of the model. Built-in options (string input): 'Constant', 'Linear', 'Quadratic' The user may also pass a callable function as defined in `User-Defined Regression Model` above. * **corr_model** (`str` or `function`): `corr_model` specifies and evaluates the correlation function. Built-in options (string input): 'Exponential', 'Gaussian', 'Linear', 'Spherical', 'Cubic', 'Spline' The user may also pass a callable function as defined in `User-Defined Correlation` above. * **corr_model_params** (`ndarray` or `list of floats`): List or array of initial values for the correlation model hyperparameters/scale parameters. * **bounds** (`list` of `float`): Bounds on the hyperparameters used to solve optimization problem to estimate maximum likelihood estimator. This should be a closed bound. Default: [0.001, 10**7] for each hyperparameter. * **op** (`boolean`): Indicator to solve MLE problem or not. If 'True' corr_model_params will be used as initial solution for optimization problem. Otherwise, corr_model_params will be directly use as the hyperparamters. Default: True. * **nopt** (`int`): Number of times MLE optimization problem is to be solved with a random starting point. Default: 1. * **verbose** (`Boolean`): A boolean declaring whether to write text to the terminal. Default value: False **Attributes:** * **beta** (`ndarray`): Regression coefficients. * **err_var** (`ndarray`): Variance of the Gaussian random process. * **C_inv** (`ndarray`): Inverse Cholesky decomposition of the correlation matrix. **Methods:** """ def __init__(self, reg_model='Linear', corr_model='Exponential', bounds=None, op=True, nopt=1, normalize=True, verbose=False, corr_model_params=None, optimizer=None, random_state=None, **kwargs_optimizer): self.reg_model = reg_model self.corr_model = corr_model self.corr_model_params = np.array(corr_model_params) self.bounds = bounds self.optimizer = optimizer self.nopt = nopt self.op = op self.normalize = normalize self.verbose = verbose self.random_state = random_state self.kwargs_optimizer = kwargs_optimizer # Variables are used outside the __init__ self.samples = None self.values = None self.sample_mean, self.sample_std = None, None self.value_mean, self.value_std = None, None self.rmodel, self.cmodel = None, None self.beta, self.gamma, self.err_var = None, None, None self.F_dash, self.C_inv, self.G = None, None, None self.F, self.R = None, None # Initialize and run preliminary error checks. if self.reg_model is None: raise NotImplementedError("UQpy: Regression model is not defined.") if self.corr_model is None: raise NotImplementedError("Uqpy: Correlation model is not defined.") if self.corr_model_params is None: raise NotImplementedError("UQpy: corr_model_params is not defined.") if self.bounds is None: self.bounds = [[0.001, 10 ** 7]] * self.corr_model_params.shape[0] if self.optimizer is None: from scipy.optimize import fmin_l_bfgs_b self.optimizer = fmin_l_bfgs_b self.kwargs_optimizer = {'bounds': self.bounds} elif not callable(self.optimizer): raise TypeError('UQpy: Input optimizer should be None (set to scipy.optimize.minimize) or a callable.') if type(self.reg_model).__name__ == 'function': self.rmodel = 'User defined' elif self.reg_model in ['Constant', 'Linear', 'Quadratic']: self.rmodel = self.reg_model self.reg_model = self._regress() else: raise NotImplementedError("UQpy: Doesn't recognize the Regression model.") if type(self.corr_model).__name__ == 'function': self.cmodel = 'User defined' elif self.corr_model in ['Exponential', 'Gaussian', 'Linear', 'Spherical', 'Cubic', 'Spline', 'Other']: self.cmodel = self.corr_model self.corr_model: callable = self._corr() else: raise NotImplementedError("UQpy: Doesn't recognize the Correlation model.") if isinstance(self.random_state, int): self.random_state = np.random.RandomState(self.random_state) elif not isinstance(self.random_state, (type(None), np.random.RandomState)): raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.') def fit(self, samples, values, nopt=None, corr_model_params=None): """ Fit the surrogate model using the training samples and the corresponding model values. The user can run this method multiple time after initiating the ``Kriging`` class object. This method updates the samples and parameters of the ``Kriging`` object. This method uses `corr_model_params` from previous run as the starting point for MLE problem unless user provides a new starting point. **Inputs:** * **samples** (`ndarray`): `ndarray` containing the training points. * **values** (`ndarray`): `ndarray` containing the model evaluations at the training points. **Output/Return:** The ``fit`` method has no returns, although it creates the `beta`, `err_var` and `C_inv` attributes of the ``Kriging`` class. """ from scipy.linalg import cholesky if self.verbose: print('UQpy: Running Kriging.fit') def log_likelihood(p0, cm, s, f, y): # Return the log-likelihood function and it's gradient. Gradient is calculate using Central Difference m = s.shape[0] n = s.shape[1] r__, dr_ = cm(x=s, s=s, params=p0, dt=True) try: cc = cholesky(r__ + 2 ** (-52) * np.eye(m), lower=True) except np.linalg.LinAlgError: return np.inf, np.zeros(n) # Product of diagonal terms is negligible sometimes, even when cc exists. if np.prod(np.diagonal(cc)) == 0: return np.inf, np.zeros(n) cc_inv = np.linalg.inv(cc) r_inv = np.matmul(cc_inv.T, cc_inv) f__ = cc_inv.dot(f) y__ = cc_inv.dot(y) q__, g__ = np.linalg.qr(f__) # Eq: 3.11, DACE # Check if F is a full rank matrix if np.linalg.matrix_rank(g__) != min(np.size(f__, 0), np.size(f__, 1)): raise NotImplementedError("Chosen regression functions are not sufficiently linearly independent") # Design parameters beta_ = np.linalg.solve(g__, np.matmul(np.transpose(q__), y__)) # Computing the process variance (Eq: 3.13, DACE) sigma_ = np.zeros(y.shape[1]) ll = 0 for out_dim in range(y.shape[1]): sigma_[out_dim] = (1 / m) * (np.linalg.norm(y__[:, out_dim] - np.matmul(f__, beta_[:, out_dim])) ** 2) # Objective function:= log(det(sigma**2 * R)) + constant ll = ll + (np.log(np.linalg.det(sigma_[out_dim] * r__)) + m * (np.log(2 * np.pi) + 1)) / 2 # Gradient of loglikelihood # Reference: C. E. Rasmussen & C. K. I. Williams, Gaussian Processes for Machine Learning, the MIT Press, # 2006, ISBN 026218253X. (Page 114, Eq.(5.9)) residual = y - np.matmul(f, beta_) gamma = np.matmul(r_inv, residual) grad_mle = np.zeros(n) for in_dim in range(n): r_inv_derivative = np.matmul(r_inv, np.matmul(dr_[:, :, in_dim], r_inv)) tmp = np.matmul(residual.T, np.matmul(r_inv_derivative, residual)) for out_dim in range(y.shape[1]): alpha = gamma / sigma_[out_dim] tmp1 = np.matmul(alpha, alpha.T) - r_inv / sigma_[out_dim] cov_der = sigma_[out_dim] * dr_[:, :, in_dim] + tmp * r__ / m grad_mle[in_dim] = grad_mle[in_dim] - 0.5 * np.trace(np.matmul(tmp1, cov_der)) return ll, grad_mle if nopt is not None: self.nopt = nopt if corr_model_params is not None: self.corr_model_params = corr_model_params self.samples = np.array(samples) # Number of samples and dimensions of samples and values nsamples, input_dim = self.samples.shape output_dim = int(np.size(values) / nsamples) self.values = np.array(values).reshape(nsamples, output_dim) # Normalizing the data if self.normalize: self.sample_mean, self.sample_std = np.mean(self.samples, 0), np.std(self.samples, 0) self.value_mean, self.value_std = np.mean(self.values, 0), np.std(self.values, 0) s_ = (self.samples - self.sample_mean) / self.sample_std y_ = (self.values - self.value_mean) / self.value_std else: s_ = self.samples y_ = self.values self.F, jf_ = self.reg_model(s_) # Maximum Likelihood Estimation : Solving optimization problem to calculate hyperparameters if self.op: starting_point = self.corr_model_params minimizer, fun_value = np.zeros([self.nopt, input_dim]), np.zeros([self.nopt, 1]) for i__ in range(self.nopt): p_ = self.optimizer(log_likelihood, starting_point, args=(self.corr_model, s_, self.F, y_), **self.kwargs_optimizer) minimizer[i__, :] = p_[0] fun_value[i__, 0] = p_[1] # Generating new starting points using log-uniform distribution if i__ != self.nopt - 1: starting_point = stats.reciprocal.rvs([j[0] for j in self.bounds], [j[1] for j in self.bounds], 1, random_state=self.random_state) if min(fun_value) == np.inf: raise NotImplementedError("Maximum likelihood estimator failed: Choose different starting point or " "increase nopt") t = np.argmin(fun_value) self.corr_model_params = minimizer[t, :] # Updated Correlation matrix corresponding to MLE estimates of hyperparameters self.R = self.corr_model(x=s_, s=s_, params=self.corr_model_params) # Compute the regression coefficient (solving this linear equation: F * beta = Y) c = np.linalg.cholesky(self.R) # Eq: 3.8, DACE c_inv = np.linalg.inv(c) f_dash = np.linalg.solve(c, self.F) y_dash = np.linalg.solve(c, y_) q_, g_ = np.linalg.qr(f_dash) # Eq: 3.11, DACE # Check if F is a full rank matrix if np.linalg.matrix_rank(g_) != min(np.size(self.F, 0), np.size(self.F, 1)): raise NotImplementedError("Chosen regression functions are not sufficiently linearly independent") # Design parameters (beta: regression coefficient) self.beta = np.linalg.solve(g_, np.matmul(np.transpose(q_), y_dash)) # Design parameter (R * gamma = Y - F * beta = residual) self.gamma = np.linalg.solve(c.T, (y_dash - np.matmul(f_dash, self.beta))) # Computing the process variance (Eq: 3.13, DACE) self.err_var = np.zeros(output_dim) for i in range(output_dim): self.err_var[i] = (1 / nsamples) * (np.linalg.norm(y_dash[:, i] - np.matmul(f_dash, self.beta[:, i])) ** 2) self.F_dash, self.C_inv, self.G = f_dash, c_inv, g_ if self.verbose: print('UQpy: Kriging fit complete.') def predict(self, x, return_std=False): """ Predict the model response at new points. This method evaluates the regression and correlation model at new sample points. Then, it predicts the function value and standard deviation. **Inputs:** * **x** (`list` or `numpy array`): Points at which to predict the model response. * **return_std** (`Boolean`): Indicator to estimate standard deviation. **Outputs:** * **f_x** (`numpy array`): Predicted values at the new points. * **std_f_x** (`numpy array`): Standard deviation of predicted values at the new points. """ x_ = np.atleast_2d(x) if self.normalize: x_ = (x_ - self.sample_mean) / self.sample_std s_ = (self.samples - self.sample_mean) / self.sample_std else: s_ = self.samples fx, jf = self.reg_model(x_) rx = self.corr_model(x=x_, s=s_, params=self.corr_model_params) y = np.einsum('ij,jk->ik', fx, self.beta) + np.einsum('ij,jk->ik', rx, self.gamma) if self.normalize: y = self.value_mean + y * self.value_std if x_.shape[1] == 1: y = y.flatten() if return_std: r_dash = np.matmul(self.C_inv, rx.T) u = np.matmul(self.F_dash.T, r_dash) - fx.T norm1 = np.linalg.norm(r_dash, 2, 0) norm2 = np.linalg.norm(np.linalg.solve(self.G, u), 2, 0) mse = self.err_var * np.atleast_2d(1 + norm2 - norm1).T if self.normalize: mse = self.value_std * np.sqrt(mse) if x_.shape[1] == 1: mse = mse.flatten() return y, mse else: return y def jacobian(self, x): """ Predict the gradient of the model at new points. This method evaluates the regression and correlation model at new sample point. Then, it predicts the gradient using the regression coefficients and the training data. **Input:** * **x** (`list` or `numpy array`): Points at which to evaluate the gradient. **Output:** * **grad_x** (`list` or `numpy array`): Gradient of the surrogate model evaluated at the new points. """ x_ = np.atleast_2d(x) if self.normalize: x_ = (x_ - self.sample_mean) / self.sample_std s_ = (self.samples - self.sample_mean) / self.sample_std else: s_ = self.samples fx, jf = self.reg_model(x_) rx, drdx = self.corr_model(x=x_, s=s_, params=self.corr_model_params, dx=True) y_grad = np.einsum('ikj,jm->ik', jf, self.beta) + np.einsum('ijk,jm->ki', drdx.T, self.gamma) if self.normalize: y_grad = y_grad * self.value_std / self.sample_std if x_.shape[1] == 1: y_grad = y_grad.flatten() return y_grad # Defining Regression model (Linear) def _regress(self): if self.reg_model == 'Constant': def r(s): s = np.atleast_2d(s) fx = np.ones([np.size(s, 0), 1]) jf = np.zeros([np.size(s, 0), np.size(s, 1), 1]) return fx, jf elif self.reg_model == 'Linear': def r(s): s = np.atleast_2d(s) fx = np.concatenate((np.ones([np.size(s, 0), 1]), s), 1) jf_b = np.zeros([np.size(s, 0), np.size(s, 1), np.size(s, 1)]) np.einsum('jii->ji', jf_b)[:] = 1 jf = np.concatenate((np.zeros([np.size(s, 0), np.size(s, 1), 1]), jf_b), 2) return fx, jf else: def r(s): s = np.atleast_2d(s) fx = np.zeros([np.size(s, 0), int((np.size(s, 1) + 1) * (np.size(s, 1) + 2) / 2)]) jf = np.zeros( [np.size(s, 0), np.size(s, 1), int((np.size(s, 1) + 1) * (np.size(s, 1) + 2) / 2)]) for i in range(np.size(s, 0)): temp = np.hstack((1, s[i, :])) for j in range(np.size(s, 1)): temp = np.hstack((temp, s[i, j] * s[i, j::])) fx[i, :] = temp # definie H matrix h_ = 0 for j in range(np.size(s, 1)): tmp_ = s[i, j] * np.eye(np.size(s, 1)) t1 = np.zeros([np.size(s, 1), np.size(s, 1)]) t1[j, :] = s[i, :] tmp = tmp_ + t1 if j == 0: h_ = tmp[:, j::] else: h_ = np.hstack((h_, tmp[:, j::])) jf[i, :, :] = np.hstack((np.zeros([np.size(s, 1), 1]), np.eye(np.size(s, 1)), h_)) return fx, jf return r # Defining Correlation model (Gaussian Process) def _corr(self): def check_samples_and_return_stack(x, s): x_, s_ = np.atleast_2d(x), np.atleast_2d(s) # Create stack matrix, where each block is x_i with all s stack = np.tile(np.swapaxes(np.atleast_3d(x_), 1, 2), (1, np.size(s_, 0), 1)) - np.tile(s_, ( np.size(x_, 0), 1, 1)) return stack def derivatives(x_, s_, params): stack = check_samples_and_return_stack(x_, s_) # Taking stack and creating array of all thetaj*dij after_parameters = params * abs(stack) # Create matrix of all ones to compare comp_ones = np.ones((np.size(x_, 0), np.size(s_, 0), np.size(s_, 1))) # zeta_matrix has all values min{1,theta*dij} zeta_matrix_ = np.minimum(after_parameters, comp_ones) # Copy zeta_matrix to another matrix that will used to find where derivative should be zero indices = zeta_matrix_.copy() # If value of min{1,theta*dij} is 1, the derivative should be 0. # So, replace all values of 1 with 0, then perform the .astype(bool).astype(int) # operation like in the linear example, so you end up with an array of 1's where # the derivative should be caluclated and 0 where it should be zero indices[indices == 1] = 0 # Create matrix of all |dij| (where non zero) to be used in calculation of dR/dtheta dtheta_derivs_ = indices.astype(bool).astype(int) * abs(stack) # Same as above, but for matrix of all thetaj where non-zero dx_derivs_ = indices.astype(bool).astype(int) * params * np.sign(stack) return zeta_matrix_, dtheta_derivs_, dx_derivs_ if self.corr_model == 'Exponential': def c(x, s, params, dt=False, dx=False): stack = check_samples_and_return_stack(x, s) rx = np.exp(np.sum(-params * abs(stack), axis=2)) if dt: drdt = - abs(stack) * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0)) return rx, drdt if dx: drdx = - params * np.sign(stack) * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0)) return rx, drdx return rx elif self.corr_model == 'Gaussian': def c(x, s, params, dt=False, dx=False): stack = check_samples_and_return_stack(x, s) rx = np.exp(np.sum(-params * (stack ** 2), axis=2)) if dt: drdt = -(stack ** 2) * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0)) return rx, drdt if dx: drdx = - 2 * params * stack * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0)) return rx, drdx return rx elif self.corr_model == 'Linear': def c(x, s, params, dt=False, dx=False): stack = check_samples_and_return_stack(x, s) # Taking stack and turning each d value into 1-theta*dij after_parameters = 1 - params * abs(stack) # Define matrix of zeros to compare against (not necessary to be defined separately, # but the line is bulky if this isn't defined first, and it is used more than once) comp_zero = np.zeros((np.size(x, 0), np.size(s, 0), np.size(s, 1))) # Compute matrix of max{0,1-theta*d} max_matrix = np.maximum(after_parameters, comp_zero) rx = np.prod(max_matrix, 2) # Create matrix that has 1s where max_matrix is nonzero # -Essentially, this acts as a way to store the indices of where the values are nonzero ones_and_zeros = max_matrix.astype(bool).astype(int) # Set initial derivatives as if all were positive first_dtheta = -abs(stack) first_dx = np.negative(params) * np.sign(stack) # Multiply derivs by ones_and_zeros...this will set the values where the # derivative should be zero to zero, and keep all other values the same drdt = np.multiply(first_dtheta, ones_and_zeros) drdx = np.multiply(first_dx, ones_and_zeros) if dt: # Loop over parameters, shifting max_matrix and multiplying over derivative matrix with each iter for i in range(len(params) - 1): drdt = drdt * np.roll(max_matrix, i + 1, axis=2) return rx, drdt if dx: # Loop over parameters, shifting max_matrix and multiplying over derivative matrix with each iter for i in range(len(params) - 1): drdx = drdx * np.roll(max_matrix, i + 1, axis=2) return rx, drdx return rx elif self.corr_model == 'Spherical': def c(x, s, params, dt=False, dx=False): zeta_matrix, dtheta_derivs, dx_derivs = derivatives(x_=x, s_=s, params=params) # Initial matrices containing derivates for all values in array. Note since # dtheta_s and dx_s already accounted for where derivative should be zero, all # that must be done is multiplying the |dij| or thetaj matrix on top of a # matrix of derivates w.r.t zeta (in this case, dzeta = -1.5+1.5zeta**2) drdt = (-1.5 + 1.5 * zeta_matrix ** 2) * dtheta_derivs drdx = (-1.5 + 1.5 * zeta_matrix ** 2) * dx_derivs # Also, create matrix for values of equation, 1 - 1.5zeta + 0.5zeta**3, for loop zeta_function = 1 - 1.5 * zeta_matrix + 0.5 * zeta_matrix ** 3 rx = np.prod(zeta_function, 2) if dt: # Same as previous example, loop over zeta matrix by shifting index for i in range(len(params) - 1): drdt = drdt * np.roll(zeta_function, i + 1, axis=2) return rx, drdt if dx: # Same as previous example, loop over zeta matrix by shifting index for i in range(len(params) - 1): drdx = drdx * np.roll(zeta_function, i + 1, axis=2) return rx, drdx return rx elif self.corr_model == 'Cubic': def c(x, s, params, dt=False, dx=False): zeta_matrix, dtheta_derivs, dx_derivs = derivatives(x_=x, s_=s, params=params) # Initial matrices containing derivates for all values in array. Note since # dtheta_s and dx_s already accounted for where derivative should be zero, all # that must be done is multiplying the |dij| or thetaj matrix on top of a # matrix of derivates w.r.t zeta (in this case, dzeta = -6zeta+6zeta**2) drdt = (-6 * zeta_matrix + 6 * zeta_matrix ** 2) * dtheta_derivs drdx = (-6 * zeta_matrix + 6 * zeta_matrix ** 2) * dx_derivs # Also, create matrix for values of equation, 1 - 3zeta**2 + 2zeta**3, for loop zeta_function_cubic = 1 - 3 * zeta_matrix ** 2 + 2 * zeta_matrix ** 3 rx = np.prod(zeta_function_cubic, 2) if dt: # Same as previous example, loop over zeta matrix by shifting index for i in range(len(params) - 1): drdt = drdt * np.roll(zeta_function_cubic, i + 1, axis=2) return rx, drdt if dx: # Same as previous example, loop over zeta matrix by shifting index for i in range(len(params) - 1): drdx = drdx * np.roll(zeta_function_cubic, i + 1, axis=2) return rx, drdx return rx else: def c(x, s, params, dt=False, dx=False): # x_, s_ = np.atleast_2d(x_), np.atleast_2d(s_) # # Create stack matrix, where each block is x_i with all s # stack = np.tile(np.swapaxes(np.atleast_3d(x_), 1, 2), (1, np.size(s_, 0), 1)) - np.tile(s_, ( # np.size(x_, 0), # 1, 1)) stack = check_samples_and_return_stack(x, s) # In this case, the zeta value is just abs(stack)*parameters, no comparison zeta_matrix = abs(stack) * params # So, dtheta and dx are just |dj| and theta*sgn(dj), respectively dtheta_derivs = abs(stack) # dx_derivs = np.ones((np.size(x,0),np.size(s,0),np.size(s,1)))*parameters dx_derivs = np.sign(stack) * params # Initialize empty sigma and dsigma matrices sigma = np.ones((zeta_matrix.shape[0], zeta_matrix.shape[1], zeta_matrix.shape[2])) dsigma = np.ones((zeta_matrix.shape[0], zeta_matrix.shape[1], zeta_matrix.shape[2])) # Loop over cases to create zeta_matrix and subsequent dR matrices for i in range(zeta_matrix.shape[0]): for j in range(zeta_matrix.shape[1]): for k in range(zeta_matrix.shape[2]): y = zeta_matrix[i, j, k] if 0 <= y <= 0.2: sigma[i, j, k] = 1 - 15 * y ** 2 + 30 * y ** 3 dsigma[i, j, k] = -30 * y + 90 * y ** 2 elif 0.2 < y < 1.0: sigma[i, j, k] = 1.25 * (1 - y) ** 3 dsigma[i, j, k] = 3.75 * (1 - y) ** 2 * -1 elif y >= 1: sigma[i, j, k] = 0 dsigma[i, j, k] = 0 rx = np.prod(sigma, 2) if dt: # Initialize derivative matrices incorporating chain rule drdt = dsigma * dtheta_derivs # Loop over to create proper matrices for i in range(len(params) - 1): drdt = drdt * np.roll(sigma, i + 1, axis=2) return rx, drdt if dx: # Initialize derivative matrices incorporating chain rule drdx = dsigma * dx_derivs # Loop over to create proper matrices for i in range(len(params) - 1): drdx = drdx * np.roll(sigma, i + 1, axis=2) return rx, drdx return rx return c
48.206299
120
0.55137
4,019
30,611
4.076885
0.149291
0.016845
0.013671
0.009277
0.399634
0.347391
0.300031
0.288251
0.253036
0.240281
0
0.017062
0.329849
30,611
635
121
48.206299
0.781661
0.321061
0
0.282913
0
0
0.044662
0.002281
0
0
0
0
0
1
0.05042
false
0
0.02521
0
0.165266
0.005602
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a807e7ff236eb62741b891f895c51bb1e1e6b60
26,509
py
Python
src/models/model_utils.py
vbelissen/cslr_limsi
87e99e6827e03439ed6b12c5d8361c0d2939cf08
[ "MIT" ]
4
2020-12-16T12:55:31.000Z
2022-02-14T07:54:22.000Z
src/models/model_utils.py
vbelissen/cslr_limsi
87e99e6827e03439ed6b12c5d8361c0d2939cf08
[ "MIT" ]
null
null
null
src/models/model_utils.py
vbelissen/cslr_limsi
87e99e6827e03439ed6b12c5d8361c0d2939cf08
[ "MIT" ]
2
2021-01-13T20:41:38.000Z
2021-03-03T06:16:05.000Z
import sys import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf v0 = tf.__version__[0] if v0 == '2': # For tensorflow 2, keras is included in tf import tensorflow.keras.backend as K from tensorflow.keras import optimizers from tensorflow.keras.callbacks import TensorBoard from tensorflow.keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Input, Dense, Conv1D, Dropout, GlobalAveragePooling1D, multiply, Flatten, concatenate from tensorflow.python.keras.layers.core import * from tensorflow.keras.models import * from tensorflow.keras.utils import to_categorical, plot_model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2 from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.applications.resnet50 import preprocess_input as preprocess_input_ResNet50 from tensorflow.keras.applications.vgg16 import preprocess_input as preprocess_input_VGG16 from tensorflow.keras.applications.mobilenet import preprocess_input as preprocess_input_MobileNet elif v0 == '1': #For tensorflow 1.2.0 import keras.backend as K from keras import optimizers from keras.callbacks import TensorBoard from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Input, Dense, Conv1D, Dropout, GlobalAveragePooling1D, merge, Flatten from keras.layers.core import * from keras.models import * from keras.utils import to_categorical, plot_model from keras.applications.resnet50 import ResNet50 from keras.applications.vgg16 import VGG16 from keras.applications.mobilenet import MobileNet from keras.preprocessing.image import load_img, img_to_array from keras.applications.resnet50 import preprocess_input as preprocess_input_ResNet50 from keras.applications.vgg16 import preprocess_input as preprocess_input_VGG16 from keras.applications.mobilenet import preprocess_input as preprocess_input_MobileNet else: sys.exit('Tensorflow version should be 1.X or 2.X') def recallK(y_true, y_pred): # works with non binary data as well as binary y_true_class = K.argmax(y_true, axis=-1) y_pred_class = K.argmax(y_pred, axis=-1) TP = K.cast(K.equal(y_true_class, y_pred_class), dtype='int32') nonzero_true = K.cast(K.not_equal(y_true_class, 0), dtype='int32') return K.sum(TP*nonzero_true)/K.maximum(K.sum(nonzero_true),1) def precisionK(y_true, y_pred): # works with non binary data as well as binary y_true_class = K.argmax(y_true, axis=-1) y_pred_class = K.argmax(y_pred, axis=-1) TP = K.cast(K.equal(y_true_class, y_pred_class), dtype='int32') nonzero_pred = K.cast(K.not_equal(y_pred_class, 0), dtype='int32') return K.sum(TP*nonzero_pred)/K.maximum(K.sum(nonzero_pred),1) def f1K(y_true, y_pred): # works with non binary data as well as binary precision = precisionK(y_true, y_pred) recall = recallK(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon())) def recallKbinary(y_true, y_pred): # binary only y_true_class = K.argmax(y_true, axis=-1) y_pred_class = K.argmax(y_pred, axis=-1) TP = y_true_class * y_pred_class FP = (1-y_true_class) * y_pred_class FN = y_true_class * (1-y_pred_class) return K.sum(TP)/K.maximum(K.sum(TP+FN),1) def precisionKbinary(y_true, y_pred): # binary only y_true_class = K.argmax(y_true, axis=-1) y_pred_class = K.argmax(y_pred, axis=-1) TP = y_true_class * y_pred_class FP = (1-y_true_class) * y_pred_class FN = y_true_class * (1-y_pred_class) return K.sum(TP)/K.maximum(K.sum(TP+FP),1) def f1Kbinary(y_true, y_pred): # binary only precision = precisionKbinary(y_true, y_pred) recall = recallKbinary(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon())) def accYanovichK(y_true, y_pred): """ Not exactly Yanovich acc but close """ y_true_class = K.argmax(y_true, axis=-1) y_pred_class = K.argmax(y_pred, axis=-1) ignore_mask = K.cast(K.not_equal(y_true_class, 0), 'int32') matches = K.cast(K.equal(y_true_class, y_pred_class), 'int32') * ignore_mask accuracy = K.sum(matches) / K.maximum(K.sum(ignore_mask), 1) return accuracy def attention_timewise(inputs, time_steps, single=False, attention_layer_descriptor=''): """ Timewise attention block (Keras API). Applies this block to inputs (inputs.shape = (batch_size, time_steps, input_dim)). Inputs: inputs (Keras layer) time_steps single (bool): if True, attention is shared across features attention_layer_descriptor (string): describes where attention is applied Output: A Keras layer """ input_dim = int(inputs.shape[-1]) a = Permute((2, 1))(inputs) a = Reshape((input_dim, time_steps))(a) # this line is not useful. It's just to know which dimension is what. a = Dense(time_steps, activation='softmax')(a) if single: a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction_'+attention_layer_descriptor)(a) a = RepeatVector(input_dim)(a) a_probs = Permute((2, 1), name='attention_vec_'+attention_layer_descriptor)(a) if v0 == '2': output_attention_mul = multiply([inputs, a_probs], name='attention_mul_timewise_'+attention_layer_descriptor) else: output_attention_mul = merge([inputs, a_probs], name='attention_mul_timewise_'+attention_layer_descriptor, mode='mul') return output_attention_mul def attention_featurewise(inputs, single=False, attention_layer_descriptor=''): """ Featurewise attention block (Keras API). Applies this block to inputs (inputs.shape = (batch_size, time_steps, input_dim)). Inputs: inputs (Keras layer) single (bool): if True, attention is shared across timesteps attention_layer_descriptor (string): describes where attention is applied Output: A Keras layer """ input_dim = int(inputs.shape[-1]) a = Dense(input_dim, activation='softmax')(inputs) if single: a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction_'+attention_layer_descriptor)(a) a = RepeatVector(input_dim)(a) if v0 == '2': output_attention_mul = multiply([inputs, a], name='attention_mul_featurewise_'+attention_layer_descriptor) else: output_attention_mul = merge([inputs, a], name='attention_mul_featurewise_'+attention_layer_descriptor, mode='mul') return output_attention_mul def get_model(output_names, output_classes, output_weights=[], conv=True, conv_filt=200, conv_ker=3, conv_strides=1, rnn_number=2, rnn_type='lstm', rnn_hidden_units=55, dropout=0, att_in_rnn=False, att_in_rnn_single = False, att_in_rnn_type='timewise', att_out_rnn=False, att_out_rnn_single=False, att_out_rnn_type='timewise', rnn_return_sequences=True, classif_local=True, mlp_layers_number=0, mlp_layers_size=30, optimizer='rms', metrics=['acc'], learning_rate=0.005, time_steps=100, features_number=420, features_type='features', img_height=224, img_width=224, cnnType='resnet', cnnFirstTrainedLayer=165, cnnReduceDim=0, print_summary=True): """ Keras recurrent neural network model builder. It can include a convolutional layer, attention on the input, several RNN layers, attention on RNN output, additional dense layers. Inputs: output_names: list of outputs (strings) output_classes: list of number of classes of each output type output_weights: list of weights for each_output conv (bool): if True, applies convolution on input conv_filt: number of convolution filters conv_ker: size of convolution kernel conv_strides: size of convolution strides rnn_number: number of recurrent layers rnn_type: type of recurrent layers (string) rnn_hidden_units: number of hidden units dropout: how much dropout (0 to 1) att_in_rnn: if True, applies attention layer before recurrent layers att_in_rnn_single: single (shared) attention layer or not att_in_rnn_type (string): timewise or featurewise attention layer att_out_rnn: if True, applies attention layer after recurrent layers att_out_rnn_single: single (shared) attention layer or not att_out_rnn_type (string): timewise or featurewise attention layer rnn_return_sequences: if False, only last timestep of recurrent layers is returned classif_local (bool): whether classification is for each timestep (local) of globally for the sequence mlp_layers_number: number of additional dense layers mlp_layers_size: size of additional dense layers optimizer: gradient optimizer type (string) learning_rate: learning rate (float) time_steps: length of sequences (int) features_number: number of features (int) features_type: 'features' (1D vector of features), 'frames' (for a CNN processing) or 'both' img_height and img_width: size of CNN input cnnType: 'resnet', 'vgg' or 'mobilenet' cnnFirstTrainedLayer: index of first trainable layer in CNN (int) cnnReduceDim: if greater than 0, size of CNN flattened output is reduced to cnnReduceDim print_summary (bool) Output: A Keras model """ # input if features_type == 'features' or features_type == 'both': main_input_features = Input(shape=(time_steps, features_number)) input_transfo_features = main_input_features if features_type == 'frames' or features_type == 'both': main_input_frames = Input(shape=(time_steps, img_height, img_width, 3)) input_transfo_frames = main_input_frames if features_type != 'features' and features_type != 'frames' and features_type != 'both': sys.exit('Invalid features type') if features_type != 'frames': # convolution input if conv: input_transfo_features = Conv1D(filters=conv_filt, kernel_size=conv_ker, strides=conv_strides, padding='same', activation='relu')(input_transfo_features) # attention before RNNs if att_in_rnn: if att_in_rnn_type == 'timewise': input_transfo_features = attention_timewise(input_transfo_features, time_steps=time_steps, single=att_in_rnn_single, attention_layer_descriptor='before_rnn') elif att_in_rnn_type == 'featurewise': input_transfo_features = attention_featurewise(input_transfo_features, single=att_in_rnn_single, attention_layer_descriptor='before_rnn') else: sys.exit('Invalid attention type') if features_type != 'features': if cnnType=='resnet': cnnBackbone = ResNet50(include_top=False, weights="imagenet", pooling='max', input_shape=(img_height,img_width,3)) elif cnnType=='vgg': cnnBackbone = VGG16(include_top=False, weights="imagenet", pooling='max', input_shape=(img_height,img_width,3)) elif cnnType=='mobilenet': cnnBackbone = MobileNet(include_top=False, weights="imagenet", pooling='max', input_shape=(img_height,img_width,3)) else: sys.exit('Invalid CNN network model') input_transfo_frames = TimeDistributed(cnnBackbone)(input_transfo_frames) if cnnReduceDim > 0: input_transfo_frames = TimeDistributed(Dense(cnnReduceDim, activation='relu'))(input_transfo_frames) #denseResnet = [200] #for i in range(len(denseResnet)): # input_transfo = TimeDistributed(Dense(denseResnet[i], activation='relu'))(input_transfo) #input_transfo = TimeDistributed(Flatten())(input_transfo) if cnnFirstTrainedLayer > 0: for layer in cnnBackbone.layers[:cnnFirstTrainedLayer]: layer.trainable = False for layer in cnnBackbone.layers[cnnFirstTrainedLayer:]: layer.trainable = True #for i, layer in enumerate(cnnBackbone.layers): # print(i, layer.name, layer.trainable) if features_type == 'features': input_transfo = input_transfo_features elif features_type == 'frames': input_transfo = input_transfo_frames elif features_type == 'both': if v0 == '2': input_transfo = concatenate([input_transfo_features, input_transfo_frames], name='merge_features_frames') else: input_transfo = merge([input_transfo_features, input_transfo_frames], name='merge_features_frames', mode='concat') else: sys.exit('Invalid features type') # recurrent layers if rnn_number == 1: if rnn_type == 'lstm': rnn_n = Bidirectional(LSTM(rnn_hidden_units, return_sequences=rnn_return_sequences, dropout=dropout, recurrent_dropout=dropout))(input_transfo) else: sys.exit('Invalid RNN type') elif rnn_number == 2: if rnn_type == 'lstm': rnn_1 = Bidirectional(LSTM(rnn_hidden_units, return_sequences=True, dropout=dropout, recurrent_dropout=dropout))(input_transfo) rnn_n = Bidirectional(LSTM(rnn_hidden_units, return_sequences=rnn_return_sequences, dropout=dropout, recurrent_dropout=dropout))(rnn_1) else: sys.exit('Invalid RNN type') elif rnn_number >= 3: if rnn_type == 'lstm': rnn_i = Bidirectional(LSTM(rnn_hidden_units, return_sequences=True, dropout=dropout, recurrent_dropout=dropout))(input_transfo) else: sys.exit('Invalid RNN type') for i_rnn in range(1, rnn_number - 1): if rnn_type == 'lstm': rnn_i = Bidirectional(LSTM(rnn_hidden_units, return_sequences=True, dropout=dropout, recurrent_dropout=dropout))(rnn_i) else: sys.exit('Invalid RNN type') if rnn_type == 'lstm': rnn_n = Bidirectional(LSTM(rnn_hidden_units, return_sequences=rnn_return_sequences, dropout=dropout, recurrent_dropout=dropout))(rnn_i) else: sys.exit('Invalid RNN type') else: sys.exit('Invalid RNN number') # attention after RNNs if att_out_rnn: if rnn_return_sequences: if att_out_rnn_type == 'timewise': rnn_n = attention_timewise(rnn_n, time_steps=time_steps, single=att_out_rnn_single, attention_layer_descriptor='after_rnn') elif att_out_rnn_type == 'featurewise': rnn_n = attention_featurewise(rnn_n, single=att_out_rnn_single, attention_layer_descriptor='after_rnn') else: sys.exit('Invalid attention type') elif att_out_rnn_type == 'featurewise': rnn_n = attention_featurewise(rnn_n, single=att_out_rnn_single, attention_layer_descriptor='after_rnn') elif att_out_rnn_type == 'timewise': sys.exit('Cannot apply attention after RNN when RNN does not return sequences') else: sys.exit('Invalid attention type') # Final layers output_intermed = rnn_n output_number = len(output_names) output_intermed_list = [] output_list = [] for i_output in range(output_number): output_intermed_list.append(output_intermed) if rnn_return_sequences and not classif_local: for i_output in range(output_number): output_intermed_list[i_output] = GlobalAveragePooling1D()(output_intermed_list[i_output]) if rnn_return_sequences and classif_local: if mlp_layers_number > 0: for i_add in range(mlp_layers_number): for i_output in range(output_number): output_intermed_list[i_output] = TimeDistributed(Dense(mlp_layers_size, activation='relu'))(output_intermed_list[i_output]) if dropout > 0: for i_output in range(output_number): output_intermed_list[i_output] = TimeDistributed(Dropout(dropout))(output_intermed_list[i_output]) for i_output in range(output_number): output_list.append(TimeDistributed(Dense(output_classes[i_output], activation='softmax', name='output_' + output_names[i_output]))(output_intermed_list[i_output])) else: if mlp_layers_number > 0: for i_add in range(mlp_layers_number): for i_output in range(output_number): output_intermed_list[i_output] = Dense(mlp_layers_size, activation='relu')(output_intermed_list[i_output]) if dropout > 0: for i_output in range(output_number): output_intermed_list[i_output] = Dropout(dropout)(output_intermed_list[i_output]) for i_output in range(output_number): output_list.append(Dense(output_classes[i_output], activation='softmax', name='output_' + output_names[i_output])(output_intermed_list[i_output])) # Create model if features_type == 'features': model = Model(inputs=main_input_features, outputs=output_list) elif features_type == 'frames': model = Model(inputs=main_input_frames, outputs=output_list) elif features_type == 'both': model = Model(inputs=[main_input_features, main_input_frames], outputs=output_list) else: sys.exit('Invalid features type') # framewise weights: if classif_local: weight_mode_sequence = 'temporal' else: weight_mode_sequence = None if optimizer == 'sgd': opt = optimizers.SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True) elif optimizer == 'rms': opt = optimizers.RMSprop(lr=learning_rate, rho=0.9, epsilon=None, decay=0.0) elif optimizer == 'ada': opt = optimizers.Adagrad(lr=learning_rate, epsilon=None, decay=0.0) else: sys.exit('Invalid gradient optimizer') if output_weights == []: model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=metrics, sample_weight_mode=weight_mode_sequence) else: model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=metrics, loss_weights=output_weights, sample_weight_mode=weight_mode_sequence) if print_summary: model.summary() return model def model_predictions(model, features, features_type, seq_length, categories_per_output, img_width=224, img_height=224, cnnType='resnet', batch_size=0): ''' Used to make predictions, especially useful when input is mixed with both preprocessed features and frames Inputs: model: a Keras model features : [X_features, X_frames] X_features: a numpy array [1, total_time_steps, features_number] X_frames: a list of frame paths features_type: 'features', 'frames' or 'both' seq_length categories_per_output: a list of number of categories for each output batch_size: if 0, predictions are sequence per sequence if >0, predictions are run by batches Outputs: predictions ''' N_outputs = len(categories_per_output) if features_type == 'frames': total_length_round = (len(features[1])//seq_length)*seq_length elif features_type == 'features' or features_type == 'both': total_length_round = (features[0].shape[1]//seq_length)*seq_length feature_number = features[0].shape[2] else: sys.exit('Wrong features type') if features_type == 'features' or features_type == 'both': X_features = features[0][:,:total_length_round,:].reshape(-1, seq_length, feature_number) #batch_size_time = np.min([batch_size*seq_length, total_length_round]) if batch_size == 0: if features_type == 'frames' or features_type == 'both': X_frames = np.zeros((1, total_length_round, img_width, img_height, 3)) for iFrame in range(total_length_round): if cnnType=='resnet': X_frames[0, iFrame, :, :, :] = preprocess_input_ResNet50(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height)))) elif cnnType=='vgg': X_frames[0, iFrame, :, :, :] = preprocess_input_VGG16(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height)))) elif cnnType=='mobilenet': X_frames[0, iFrame, :, :, :] = preprocess_input_MobileNet(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height)))) else: sys.exit('Invalid CNN network model') X_frames = X_frames.reshape(-1, seq_length, img_width, img_height, 3) if features_type == 'features': output = model.predict(X_features) elif features_type == 'frames': output = model.predict(X_frames) else: #features_type == 'both': output = model.predict([X_features, X_frames]) else: if N_outputs > 1: output = [np.zeros((1, total_length_round, categories_per_output[i])) for i in range(N_outputs)] else: output = np.zeros((1, total_length_round, categories_per_output[0])) N_full_batches = total_length_round//(batch_size*seq_length) remainding_length = total_length_round - N_full_batches*batch_size*seq_length # Full batches: for i_batch in range(N_full_batches): i_seq_start = i_batch*batch_size i_seq_end = (i_batch+1)*batch_size i_frame_start = i_seq_start*seq_length i_frame_end = i_seq_end*seq_length if features_type == 'frames' or features_type == 'both': X_frames_batch = np.zeros((1, batch_size*seq_length, img_width, img_height, 3)) for iFrame in range(i_frame_start, i_frame_end): if cnnType=='resnet': X_frames_batch[0, iFrame-i_frame_start, :, :, :] = preprocess_input_ResNet50(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height)))) elif cnnType=='vgg': X_frames_batch[0, iFrame-i_frame_start, :, :, :] = preprocess_input_VGG16(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height)))) elif cnnType=='mobilenet': X_frames_batch[0, iFrame-i_frame_start, :, :, :] = preprocess_input_MobileNet(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height)))) else: sys.exit('Invalid CNN network model') X_frames_batch = X_frames_batch.reshape(-1, seq_length, img_width, img_height, 3) if features_type == 'features': pred = model.predict(X_features[i_seq_start:i_seq_end, :, :]) elif features_type == 'frames': pred = model.predict(X_frames_batch) else:#features_type == 'both': pred = model.predict([X_features[i_seq_start:i_seq_end, :, :], X_frames_batch]) if N_outputs > 1: for i_out in range(N_outputs): output[i_out][0,i_frame_start:i_frame_end,:] = pred[i_out].reshape(1, -1, categories_per_output[i_out]) else: output[0,i_frame_start:i_frame_end,:] = pred.reshape(1, -1, categories_per_output[0]) # Last (incomplete) batch: i_seq_start = N_full_batches*batch_size i_seq_end = total_length_round//seq_length i_frame_start = i_seq_start*seq_length i_frame_end = i_seq_end*seq_length if features_type == 'frames' or features_type == 'both': X_frames_batch = np.zeros((1, remainding_length, img_width, img_height, 3)) for iFrame in range(i_frame_start, i_frame_end): if cnnType=='resnet': X_frames_batch[0, iFrame-i_frame_start, :, :, :] = preprocess_input_ResNet50(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height)))) elif cnnType=='vgg': X_frames_batch[0, iFrame-i_frame_start, :, :, :] = preprocess_input_VGG16(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height)))) elif cnnType=='mobilenet': X_frames_batch[0, iFrame-i_frame_start, :, :, :] = preprocess_input_MobileNet(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height)))) else: sys.exit('Invalid CNN network model') X_frames_batch = X_frames_batch.reshape(-1, seq_length, img_width, img_height, 3) if features_type == 'features': pred = model.predict(X_features[i_seq_start:, :, :]) elif features_type == 'frames': pred = model.predict(X_frames_batch) else:#features_type == 'both': pred = model.predict([X_features[i_seq_start:, :, :], X_frames_batch]) if N_outputs > 1: for i_out in range(N_outputs): output[i_out][0,i_frame_start:i_frame_end,:] = pred[i_out].reshape(1, -1, categories_per_output[i_out]) else: output[0,i_frame_start:i_frame_end,:] = pred.reshape(1, -1, categories_per_output[0]) if N_outputs > 1: for i_out in range(N_outputs): output[i_out] = output[i_out].reshape(-1, seq_length, categories_per_output[i_out]) else: output = output.reshape(-1, seq_length, categories_per_output[0]) return output
48.640367
183
0.655966
3,406
26,509
4.815913
0.097769
0.031458
0.012071
0.017558
0.649637
0.592087
0.541364
0.524172
0.492593
0.464
0
0.013043
0.248029
26,509
544
184
48.729779
0.809822
0.157758
0
0.436693
0
0
0.060701
0.008632
0
0
0
0
0
1
0.028424
false
0
0.080103
0
0.136951
0.005168
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a808d33f099768973646ffb752ae5d9366a6b4a
3,109
py
Python
benchmarks/benchmark.py
kbrose/PyAthena
bafad4999af601e4d94cfdbbabca87c06413db65
[ "MIT" ]
null
null
null
benchmarks/benchmark.py
kbrose/PyAthena
bafad4999af601e4d94cfdbbabca87c06413db65
[ "MIT" ]
null
null
null
benchmarks/benchmark.py
kbrose/PyAthena
bafad4999af601e4d94cfdbbabca87c06413db65
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import logging import sys import time from pyathena import connect from pyathenajdbc import connect as jdbc_connect from pyathena.pandas_cursor import PandasCursor LOGGER = logging.getLogger(__name__) LOGGER.addHandler(logging.StreamHandler(sys.stdout)) LOGGER.setLevel(logging.INFO) S3_STAGING_DIR = 's3://YOUR_BUCKET/path/to/' REGION_NAME = 'us-west-2' COUNT = 5 SMALL_RESULT_SET_QUERY = """ SELECT * FROM pypi_downloads_20180915 WHERE file.project = 'pyhive' """ MEDIUM_RESULT_SET_QUERY = """ SELECT * FROM pypi_downloads_20180915 WHERE file.project = 'requests' """ LARGE_RESULT_SET_QUERY = """ SELECT * FROM pypi_downloads_20180915 WHERE file.project = 'pip' """ def run_pyathen_pandas_cursor(query): LOGGER.info('PyAthena PandasCursor =========================') cursor = connect(s3_staging_dir=S3_STAGING_DIR, region_name=REGION_NAME, cursor_class=PandasCursor).cursor() avgs = [] for i in range(0, COUNT): start = time.time() df = cursor.execute(query).as_pandas() end = time.time() elapsed = end - start LOGGER.info('loop:{0}\tcount:{1}\telasped:{2}'.format(i, df.shape[0], elapsed)) avgs.append(elapsed) avg = sum(avgs) / COUNT LOGGER.info('Avg: {0}'.format(avg)) LOGGER.info('===============================================') def run_pyathena_cursor(query): LOGGER.info('PyAthena Cursor ===============================') cursor = connect(s3_staging_dir=S3_STAGING_DIR, region_name=REGION_NAME).cursor() avgs = [] for i in range(0, COUNT): start = time.time() result = cursor.execute(query).fetchall() end = time.time() elapsed = end - start LOGGER.info('loop:{0}\tcount:{1}\telasped:{2}'.format(i, len(result), elapsed)) avgs.append(elapsed) avg = sum(avgs) / COUNT LOGGER.info('Avg: {0}'.format(avg)) LOGGER.info('===============================================') def run_pyathenajdbc_cursor(query): LOGGER.info('PyAthenaJDBC Cursor ===========================') cursor = jdbc_connect(s3_staging_dir=S3_STAGING_DIR, region_name=REGION_NAME).cursor() avgs = [] for i in range(0, COUNT): start = time.time() cursor.execute(query) result = cursor.fetchall() end = time.time() elapsed = end - start LOGGER.info('loop:{0}\tcount:{1}\telasped:{2}'.format(i, len(result), elapsed)) avgs.append(elapsed) avg = sum(avgs) / COUNT LOGGER.info('Avg: {0}'.format(avg)) LOGGER.info('===============================================') def main(): for query in [SMALL_RESULT_SET_QUERY, MEDIUM_RESULT_SET_QUERY, LARGE_RESULT_SET_QUERY]: LOGGER.info(query) run_pyathenajdbc_cursor(query) LOGGER.info('') run_pyathena_cursor(query) LOGGER.info('') run_pyathen_pandas_cursor(query) LOGGER.info('') if __name__ == '__main__': main()
30.782178
87
0.5899
362
3,109
4.853591
0.234807
0.091064
0.047809
0.071713
0.655094
0.645987
0.571998
0.52988
0.52988
0.52988
0
0.021294
0.214538
3,109
100
88
31.09
0.698198
0.013831
0
0.5
0
0
0.212141
0.135117
0
0
0
0
0
1
0.047619
false
0
0.071429
0
0.119048
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a81c503a9fafa48f926a7f5f32fd6d83b22f602
659
py
Python
abc145/abc145_c.py
Vermee81/practice-coding-contests
78aada60fa75f208ee0eef337b33b27b1c260d18
[ "MIT" ]
null
null
null
abc145/abc145_c.py
Vermee81/practice-coding-contests
78aada60fa75f208ee0eef337b33b27b1c260d18
[ "MIT" ]
null
null
null
abc145/abc145_c.py
Vermee81/practice-coding-contests
78aada60fa75f208ee0eef337b33b27b1c260d18
[ "MIT" ]
null
null
null
# https://atcoder.jp/contests/abc145/tasks/abc145_c from itertools import permutations from math import sqrt N = int(input()) towns = [] for _ in range(N): x, y = [int(i) for i in input().split()] towns.append((x, y)) towns_distance = [[0 for _ in range(N)] for _ in range(N)] for i in range(N): for j in range(N): if i == j: continue towns_distance[i][j] = sqrt((towns[i][0] - towns[j][0])**2 + (towns[i][1] - towns[j][1])**2) patterns = list(permutations([i for i in range(N)])) total = 0 for m in patterns: for i in range(N-1): total += towns_distance[m[i]][m[i+1]] print(round(total/len(patterns), 10))
31.380952
100
0.603945
116
659
3.37069
0.344828
0.12532
0.143223
0.084399
0.148338
0
0
0
0
0
0
0.034549
0.209408
659
20
101
32.95
0.715931
0.074355
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.105263
0
0.105263
0.052632
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a85b961fca159a16bd7b508e3f04a5f010bbd41
3,349
py
Python
tests/common/data_access/bucket_dao_test.py
pombredanne/forseti-security
68a9a88243460065e00b6c131b3d9abd0331fb37
[ "Apache-2.0" ]
1
2018-03-26T08:15:21.000Z
2018-03-26T08:15:21.000Z
tests/common/data_access/bucket_dao_test.py
pombredanne/forseti-security
68a9a88243460065e00b6c131b3d9abd0331fb37
[ "Apache-2.0" ]
null
null
null
tests/common/data_access/bucket_dao_test.py
pombredanne/forseti-security
68a9a88243460065e00b6c131b3d9abd0331fb37
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the BucketDao.""" import json from tests.unittest_utils import ForsetiTestCase import mock import unittest from MySQLdb import DataError from google.cloud.security.common.data_access import _db_connector from google.cloud.security.common.data_access import errors from google.cloud.security.common.data_access import bucket_dao from google.cloud.security.common.data_access.sql_queries import select_data class BucketDaoTest(ForsetiTestCase): """Tests for the BucketDao.""" FAKE_PROJECT_NUMBERS = ['11111'] @mock.patch.object(_db_connector.DbConnector, '__init__', autospec=True) def setUp(self, mock_db_connector): mock_db_connector.return_value = None self.bucket_dao = bucket_dao.BucketDao() self.fetch_mock = mock.MagicMock() self.bucket_dao.execute_sql_with_fetch = self.fetch_mock self.resource_name = 'buckets_acl' self.fake_timestamp = '12345' def test_get_buckets_by_project_number(self): """Test get_buckets_by_project_number().""" fake_query = select_data.BUCKETS_BY_PROJECT_ID.format( self.fake_timestamp, self.FAKE_PROJECT_NUMBERS[0]) self.bucket_dao.get_buckets_by_project_number( self.resource_name, self.fake_timestamp, self.FAKE_PROJECT_NUMBERS[0]) self.fetch_mock.assert_called_once_with( self.resource_name, fake_query, (self.FAKE_PROJECT_NUMBERS[0],)) def test_get_project_numbers_raises_error(self): """Test get_project_numbers() raises a MySQLError.""" self.fetch_mock.side_effect = ( errors.MySQLError(self.resource_name, mock.MagicMock())) with self.assertRaises(errors.MySQLError): self.bucket_dao.get_buckets_by_project_number( self.resource_name, self.fake_timestamp, self.FAKE_PROJECT_NUMBERS[0]) def test_get_buckets_acls(self): """Test get_buckets_acls().""" fake_query_acls = select_data.BUCKET_ACLS.format( self.fake_timestamp) self.bucket_dao.get_buckets_acls( self.resource_name, self.fake_timestamp) self.fetch_mock.assert_called_once_with( self.resource_name, fake_query_acls, None) def test_get_raw_buckets(self): """Test get_raw_buckets().""" fake_return = [{'bucket_id': 'bucketid', 'acl': {"foo": 1}}] self.fetch_mock.return_value = fake_return fake_query = select_data.RAW_BUCKETS.format(self.fake_timestamp) actual = self.bucket_dao.get_raw_buckets(self.fake_timestamp) self.assertEquals(fake_return, actual) if __name__ == '__main__': unittest.main()
35.62766
76
0.709167
437
3,349
5.12357
0.324943
0.042876
0.060741
0.056275
0.337204
0.292988
0.265297
0.23314
0.15364
0.135775
0
0.008614
0.202747
3,349
93
77
36.010753
0.829963
0.229322
0
0.283019
0
0
0.023641
0
0
0
0
0
0.075472
1
0.09434
false
0
0.169811
0
0.301887
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a868e03e70c8daef33e280e306ba17fb8100a4c
5,572
py
Python
tests/exp_whenInterestingPatten/PatternEventNotification/analytics.py
wangzhezhe/observerchain
faa8fb9d845a2720704538f01e1e7597083d4510
[ "MIT" ]
null
null
null
tests/exp_whenInterestingPatten/PatternEventNotification/analytics.py
wangzhezhe/observerchain
faa8fb9d845a2720704538f01e1e7597083d4510
[ "MIT" ]
null
null
null
tests/exp_whenInterestingPatten/PatternEventNotification/analytics.py
wangzhezhe/observerchain
faa8fb9d845a2720704538f01e1e7597083d4510
[ "MIT" ]
null
null
null
from mpi4py import MPI import numpy as np import dspaceswrapper.dataspaces as dataspaces import ctypes import os import time import math import timeit import sys sys.path.append('/project1/parashar-001/zw241/software/eventDrivenWorkflow/src/publishclient/pythonclient') import pubsub as pubsubclient # input the coordinate of the points and return the index of grid in array comm = MPI.COMM_WORLD rank = comm.Get_rank() def sendEventToPubSub(ts): addrList = pubsubclient.getServerAddr() print (addrList) addr = addrList[0] eventList = ["dataPattern_1"] # this shoule be deleted clientId = "test" + "_" + str(ts) metainfo = "GRID[<-1,-1>:<-1,-1>]%TS["+str(ts)+"]" matchtype = "NAME" pubsubclient.publishEventList(addr,eventList,clientId,metainfo,matchtype) def getIndex(px, py, pz): # TODO should add all boundry case # only for lower case r = 15 gridnum = 15 deltar = 1.0*r/gridnum if (px < 0 or py < 0 or pz < 0 or px > gridnum*deltar or py > gridnum*deltar or pz > gridnum*deltar): #print "out of the box " #print [px,py,pz] return -1 gnumx = math.floor((px-0)/deltar) gnumy = math.floor((py-0)/deltar) gnumz = math.floor((pz-0)/deltar) index = int(gnumz*gridnum*gridnum + gnumy*gridnum+gnumx) return index def checkAndPublishEvent(gridDataArray_p1, gridDataArray_p2): ifTargetEventHappen = True massOriginInterest = [6, 0, 6] targetValue = 7.5 massR = 4 # put the analysis into the simulation part for i in range(massOriginInterest[0], massOriginInterest[0]+massR): for j in range(massOriginInterest[1], massOriginInterest[1]+massR): for k in range(massOriginInterest[2], massOriginInterest[2]+massR): #print "index i j k (%d %d %d)" % (i,j,k) #print nparray[i][j][k] #print "index i j k (%d %d %d)" % (i,j,k) #print nparray[i][j][k] index = getIndex(i, j, k) if (gridDataArray_p1[index] != targetValue): ifTargetEventHappen = False break if (ifTargetEventHappen == True): print (iteration) # send publish event detecttime = timeit.default_timer() print (detecttime) print ("publish to pub/sub broker") #sendEventToPubSub(iteration) ifFirstHappen = True return initp = 1.5 targetValue = 7.5 def checkDataPattern(gridDataArray_p1, gridDataArray_p2): coord1 = [] coord2 = [] # get the index of red block in data 1 # print("caculate coord1") break_flag=False for x in range(15): if(break_flag==True): break for y in range (15): if(break_flag==True): break for z in range (15): index = getIndex(x,y,z) if (gridDataArray_p1[index]==targetValue): coord1 = [x,y,z] break_flag=True #print(coord1) break # get the index of the red block in data 2 #print("caculate coord2") break_flag=False for x in range(15): if(break_flag==True): break for y in range (15): if(break_flag==True): break for z in range (15): index = getIndex(x,y,z) if (gridDataArray_p2[index]==targetValue): coord2 = [x,y,z] break_flag=True #print(coord2) break distance = pow((coord2[0]-coord1[0]),2)+pow((coord2[1]-coord1[1]),2)+pow((coord2[2]-coord1[2]),2) #print(distance) if(distance>140 and distance<150): return True else: return False def checkDataPatternCenter(gridDataArray_p1): massOriginInterest = [7, 7, 7] targetValue = 7.5 index = getIndex(massOriginInterest[0], massOriginInterest[1], massOriginInterest[2]) if (gridDataArray_p1[index] == targetValue): return True else: return False def checkIteration(vs): # copy all conf.* file to current dir serverdir = "/home1/zw241/dataspaces/tests/C" confpath = serverdir+"/conf*" copyCommand = "cp "+confpath+" ." os.system(copyCommand) # number of clients at clients end to join server var_name = "ex1_sample_data" lock_name = "my_test_lock" #if(len(sys.argv)!=2): # print("./analytics <version>") # exit(0) #vs = int(sys.argv[1]) appid = vs ds = dataspaces.dataspaceClient(appid,comm) vsLb = vs-9 startanay = timeit.default_timer() #ds.dspaces_init(comm, num_peers, appid) for version in range (vsLb, vs+1): lb = [0] ub = [3374] #print("get version") #print(version) #ds.lock_on_read(lock_name) getdata_p1 = ds.get(var_name, version, lb, ub) #ds.unlock_on_read(lock_name) patternHeppen = checkDataPatternCenter(getdata_p1) if(patternHeppen==True): print("------------------") print("patternHeppen at ts %d"%(version)) print("------------------") print("send data patten and ts to operator") sendEventToPubSub(vs) break ds.dspaces_wrapper_finalize() endanay = timeit.default_timer() print("time span") print(endanay-startanay) if(len(sys.argv)!=2): print("./analytics <version>") exit(0) vs = int(sys.argv[1]) checkIteration(vs)
26.407583
107
0.585607
678
5,572
4.749263
0.29941
0.021739
0.006522
0.013665
0.186957
0.16087
0.143478
0.130435
0.130435
0.130435
0
0.031266
0.29397
5,572
211
108
26.407583
0.787239
0.155599
0
0.289063
0
0
0.075524
0.030809
0.007813
0
0
0.004739
0
1
0.046875
false
0
0.078125
0
0.179688
0.085938
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a86c72cf48c01e953158c374d43d02b0cc788a7
1,176
py
Python
bindings/python3/pyftvp/pyftvp.py
svaiter/ftvp
4b993fb9e61c6e33a6fb08afa3a8262d8add2bd3
[ "BSD-3-Clause" ]
2
2021-12-15T01:33:36.000Z
2021-12-31T00:40:31.000Z
bindings/python3/pyftvp/pyftvp.py
svaiter/ftvp
4b993fb9e61c6e33a6fb08afa3a8262d8add2bd3
[ "BSD-3-Clause" ]
null
null
null
bindings/python3/pyftvp/pyftvp.py
svaiter/ftvp
4b993fb9e61c6e33a6fb08afa3a8262d8add2bd3
[ "BSD-3-Clause" ]
1
2021-05-26T09:13:17.000Z
2021-05-26T09:13:17.000Z
import numpy as np from . import _ftvp as ftvp from . import _ftvp_color as ftvp_color from .base import OptMethod, UpdateStrategy def prox_tv(u, la, epsilon = 0.0, iters=5000, block_size=16, steps=3, gapiter=1, gap_factor=0.25, rmse_factor=0.1, opt_method = OptMethod.SPLIT_NEWTON, color = False, upd_strategy = UpdateStrategy.UPDATE_VARYING): if u.ndim == 1: raise Exception() elif u.ndim == 2: ures = u[:,:,np.newaxis] else: ures = u if color: ures = np.swapaxes(ures,0,2) ures = ures.astype(np.double).copy(order='C') if rmse_factor is not None: gap_factor_new = rmse_factor * rmse_factor else: gap_factor_new = gap_factor if color: res = ftvp_color._prox_tv_color(ures, la, epsilon=epsilon, iters=iters, block_size=block_size, steps=steps, gapiter=gapiter, gap_factor=gap_factor_new, opt_method=opt_method) ures = np.swapaxes(ures,0,2) else: res = ftvp._prox_tv(ures, la, epsilon=epsilon, iters=iters, block_size=block_size, steps=steps, gapiter=gapiter, gap_factor=gap_factor_new, opt_method=opt_method, upd_strategy=upd_strategy.value) return ures, res
40.551724
213
0.698129
183
1,176
4.256831
0.333333
0.092426
0.061617
0.046213
0.336329
0.336329
0.284981
0.284981
0.284981
0.284981
0
0.022175
0.194728
1,176
28
214
42
0.800422
0
0
0.291667
0
0
0.00085
0
0
0
0
0
0
1
0.041667
false
0
0.166667
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a870a89d93c916f60df3dd28454ab6a034c9b7c
1,845
py
Python
examples/delete_objects_sample.py
JMD110/huaweicloud-sdk-python-obs
db38a0bbd86b4cd08e77c93c97c961366eebc0b0
[ "Apache-2.0" ]
1
2022-03-07T06:11:10.000Z
2022-03-07T06:11:10.000Z
examples/delete_objects_sample.py
JMD110/huaweicloud-sdk-python-obs
db38a0bbd86b4cd08e77c93c97c961366eebc0b0
[ "Apache-2.0" ]
null
null
null
examples/delete_objects_sample.py
JMD110/huaweicloud-sdk-python-obs
db38a0bbd86b4cd08e77c93c97c961366eebc0b0
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # -*- coding:utf-8 -*- # Copyright 2019 Huawei Technologies Co.,Ltd. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use # this file except in compliance with the License. You may obtain a copy of the # License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. ''' This sample demonstrates how to delete objects under specified bucket from OBS using the OBS SDK for Python. ''' AK = '*** Provide your Access Key ***' SK = '*** Provide your Secret Key ***' server = 'https://your-endpoint' bucketName = 'my-obs-bucket-demo' from obs import * # Constructs a obs client instance with your account for accessing OBS obsClient = ObsClient(access_key_id=AK, secret_access_key=SK, server=server) # Create bucket print('Create a new bucket for demo\n') obsClient.createBucket(bucketName) # Batch put objects into the bucket content = 'Thank you for using Object Storage Service' keyPrefix = 'MyObjectKey' keys = [] for i in range(100): key = keyPrefix + str(i) obsClient.putContent(bucketName, key, content) print('Succeed to put object ' + str(key)) keys.append(Object(key=key)) print('\n') # Delete all objects uploaded recently under the bucket print('\nDeleting all objects\n') resp = obsClient.deleteObjects(bucketName, DeleteObjectsRequest(False, keys)) print('Delete results:') if resp.body.deleted: for delete in resp.body.deleted: print('\t' + str(delete)) if resp.body.error: for err in resp.body.error: print('\t' + str(err))
31.810345
81
0.728455
270
1,845
4.962963
0.522222
0.044776
0.019403
0.023881
0
0
0
0
0
0
0
0.007797
0.165854
1,845
57
82
32.368421
0.862898
0.477507
0
0
0
0
0.267306
0
0
0
0
0
0
1
0
false
0
0.038462
0
0.038462
0.269231
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5a8949b79d0dbf9f1b8b719f4955bad7243b8ddb
2,413
py
Python
src/pacman/s02_barycorr.py
sebastian-zieba/PACMAN
2eb1e4b450c97dc28d5a05b3ebddd80706cfca79
[ "MIT" ]
1
2022-03-23T10:26:33.000Z
2022-03-23T10:26:33.000Z
src/pacman/s02_barycorr.py
sebastian-zieba/PACMAN
2eb1e4b450c97dc28d5a05b3ebddd80706cfca79
[ "MIT" ]
null
null
null
src/pacman/s02_barycorr.py
sebastian-zieba/PACMAN
2eb1e4b450c97dc28d5a05b3ebddd80706cfca79
[ "MIT" ]
1
2022-03-29T13:37:31.000Z
2022-03-29T13:37:31.000Z
import os import numpy as np from astropy.io import ascii from astropy.table import Column from tqdm import tqdm from .lib import suntimecorr from .lib import util from .lib import manageevent as me def run02(eventlabel, workdir, meta=None): """ Performs the barycentric correction of the observation times - performs the barycentric correction based on the t_mjd in filelist.txt. - Adds another column to filelist.txt called t_bjd - Plots will be saved in ./run/run_2021-01-01_12-34-56_eventname/ancil/horizons Parameters ---------- eventlabel : str the label given to the event in the run script. Will determine the name of the run directory workdir : str the name of the work directory. meta the name of the metadata file Returns ------- meta meta object with all the meta data stored in s01 Notes: ---------- History: Written by Sebastian Zieba December 2021 """ print('Starting s02') if meta == None: meta = me.loadevent(workdir + '/WFC3_' + eventlabel + "_Meta_Save") # read in filelist filelist_path = meta.workdir + '/filelist.txt' if os.path.exists(filelist_path): filelist = ascii.read(filelist_path) ivisit = filelist['ivisit'] t_mjd = filelist['t_mjd'] t_bjd = np.zeros(len(t_mjd)) # load in more information into meta meta = util.ancil(meta) # Converting mjd to bjd for i in tqdm(range(max(ivisit) + 1), desc='Converting MJD to BJD', ascii=True): iivisit = ivisit == i t_jd = t_mjd[iivisit] + 2400000.5 # converts time to BJD_TDB; see Eastman et al. 2010 equation 4 t_jd = t_jd + (32.184) / (24.0 * 60.0 * 60.0) t_bjd[iivisit] = t_jd + (suntimecorr.suntimecorr(meta, t_jd, meta.coordtable[i], verbose=False)) / ( 60.0 * 60.0 * 24.0) print('Writing t_bjd into filelist.txt') if not any(np.array(filelist.keys()) == 't_bjd'): filelist.add_column(Column(data=t_bjd, name='t_bjd')) ascii.write(filelist, filelist_path, format='rst', overwrite=True) else: filelist.replace_column(name='t_bjd', col=Column(data=t_bjd, name='t_bjd')) ascii.write(filelist, filelist_path, format='rst', overwrite=True) # Save results print('Saving Metadata') me.saveevent(meta, meta.workdir + '/WFC3_' + meta.eventlabel + "_Meta_Save", save=[]) print('Finished s02 \n') return meta
30.544304
108
0.665562
355
2,413
4.419718
0.414085
0.025494
0.024857
0.022945
0.094328
0.094328
0.094328
0.094328
0.094328
0.094328
0
0.034519
0.219644
2,413
78
109
30.935897
0.798725
0.362619
0
0.055556
0
0
0.110971
0
0
0
0
0
0
1
0.027778
false
0
0.222222
0
0.277778
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ce46b64822f845b3c9b5a986b966ec6f1c8310e9
8,531
py
Python
utils.py
gsoykan/DeepPanel
3f69471a019c4bdd5f77dd4eca0d040651af2a94
[ "Apache-2.0" ]
63
2020-10-19T23:55:21.000Z
2022-03-24T07:23:49.000Z
utils.py
gsoykan/DeepPanel
3f69471a019c4bdd5f77dd4eca0d040651af2a94
[ "Apache-2.0" ]
1
2022-02-25T06:28:39.000Z
2022-02-25T06:28:39.000Z
utils.py
gsoykan/DeepPanel
3f69471a019c4bdd5f77dd4eca0d040651af2a94
[ "Apache-2.0" ]
6
2021-01-26T11:36:23.000Z
2022-03-10T11:28:46.000Z
import os import tensorflow as tf import numpy as np from PIL import Image import matplotlib.pyplot as plt from IPython.display import clear_output IMAGE_SIZE = 224 BACKGROUND_LABEL = 0 BORDER_LABEL = 1 CONTENT_LABEL = 2 def display(display_list): clear_output(wait=True) plt.figure(figsize=(15, 15)) title = ['Input Image', 'True Mask', 'Predicted Mask'] for i in range(len(display_list)): plt.subplot(1, len(display_list), i + 1) plt.title(title[i]) plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i])) plt.axis('off') plt.show() def show_predictions_compared_to_real_data(images, true_masks, predictions): for image_index in range(len(predictions)): image = images[image_index] true_mask = true_masks[image_index] labeled_prediction = predictions[image_index] display([image, true_mask, labeled_prediction]) def parse_image(img_path): image = tf.io.read_file(img_path) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.convert_image_dtype(image, tf.uint8) mask_path = tf.strings.regex_replace(img_path, "raw", "segmentation_mask") mask_path = tf.strings.regex_replace(mask_path, "jpg", "png") mask = tf.io.read_file(mask_path) mask = tf.image.decode_png(mask, channels=1) # Transform mask colors into labels # We will assume whites 0 which should be assigned to the background label mask = tf.where(mask == 255, np.dtype('uint8').type(BACKGROUND_LABEL), mask) # Dark values will use label the background label mask = tf.where(mask == 29, np.dtype('uint8').type(BACKGROUND_LABEL), mask) # Intermediate values will act as the border mask = tf.where(mask == 76, np.dtype('uint8').type(BORDER_LABEL), mask) mask = tf.where(mask == 134, np.dtype('uint8').type(BORDER_LABEL), mask) # Brighter values will act as the content mask = tf.where(mask == 149, np.dtype('uint8').type(CONTENT_LABEL), mask) return {'image': image, 'segmentation_mask': mask} def load_images_from_folder(folder, shuffle=True): files = tf.data.Dataset.list_files(folder + "raw/*.jpg", shuffle=shuffle) return files.map(parse_image) def load_data_set(): return { # Even when shuffle is recommended we don't want to shuffle the test dataset in order to be # able to easily interpret the prediction result using the order as the index and assign the # prediction index to the test image position inside the test folder. 'test': load_images_from_folder("./dataset/test/", shuffle=False), 'train': load_images_from_folder("./dataset/training/") } def normalize(input_image, input_mask): input_image = tf.cast(input_image, tf.float32) / 255.0 return input_image, input_mask def predicted_pixel_to_class(x): return np.argmax(x) # TODO: This makes us go really slow when checking accuracy. # Review how we can make this faster in the future using arrays. def map_prediction_to_mask(predicted_image): predicted_mask = list() for x in predicted_image: predicted_mask_per_x = [] for y in x: predicted_mask_per_x.append(predicted_pixel_to_class(y)) predicted_mask.append(predicted_mask_per_x) return np.array(predicted_mask) def tf_count(t, val): elements_equal_to_value = tf.equal(t, val) as_ints = tf.cast(elements_equal_to_value, tf.int32) count = tf.reduce_sum(as_ints) return count @tf.function def load_image_train(datapoint): input_image = tf.image.resize_with_pad(datapoint['image'], target_height=IMAGE_SIZE, target_width=IMAGE_SIZE) input_mask = tf.image.resize_with_pad(datapoint['segmentation_mask'], target_height=IMAGE_SIZE, target_width=IMAGE_SIZE) if tf.random.uniform(()) > 0.5: input_image = tf.image.flip_left_right(input_image) input_mask = tf.image.flip_left_right(input_mask) input_image, input_mask = normalize(input_image, input_mask) number_of_pixels_per_image = IMAGE_SIZE * IMAGE_SIZE percentage_of_background_labels = tf_count(input_mask, BACKGROUND_LABEL) / number_of_pixels_per_image percentage_of_content_labels = tf_count(input_mask, CONTENT_LABEL) / number_of_pixels_per_image percentage_of_border_labels = tf_count(input_mask, BORDER_LABEL) / number_of_pixels_per_image background_weight = tf.cast(0.33 / percentage_of_background_labels, tf.float32) content_weight = tf.cast(0.34 / percentage_of_content_labels, tf.float32) border_weight = tf.cast(0.33 / percentage_of_border_labels, tf.float32) weights = tf.where(input_mask == BACKGROUND_LABEL, background_weight, input_mask) weights = tf.where(input_mask == BORDER_LABEL, border_weight, weights) weights = tf.where(input_mask == CONTENT_LABEL, content_weight, weights) return input_image, input_mask, weights def load_image_test(datapoint): input_image = tf.image.resize_with_pad(datapoint['image'], target_height=IMAGE_SIZE, target_width=IMAGE_SIZE) input_mask = tf.image.resize_with_pad(datapoint['segmentation_mask'], target_height=IMAGE_SIZE, target_width=IMAGE_SIZE) input_image, input_mask = normalize(input_image, input_mask) return input_image, input_mask def display(display_list): plt.figure(figsize=(15, 15)) title = ['Input Image', 'True Mask', 'Predicted Mask'] for i in range(len(display_list)): plt.subplot(1, len(display_list), i + 1) plt.title(title[i]) plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i])) plt.axis('off') plt.show() def files_in_folder(folder): return sorted([f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))]) def count_files_in_folder(folder): return len(files_in_folder(folder)) # TODO: This is really slow. Should we avoid index access and use iterators instead? def compare_accuracy_per_label(true_mask, predicted_mask): total_background_labels = 0 total_border_labels = 0 total_content_labels = 0 properly_predicted_background_pixels = 0 properly_predicted_border_pixels = 0 properly_predicted_content_pixels = 0 for x in range(IMAGE_SIZE): for y in range(IMAGE_SIZE): prediction_label_per_pixel = int(predicted_mask[x][y]) mask_label_per_pixel = int(true_mask[x][y]) correct_prediction = prediction_label_per_pixel == mask_label_per_pixel if mask_label_per_pixel == BACKGROUND_LABEL: total_background_labels += 1 if correct_prediction: properly_predicted_background_pixels += 1 if mask_label_per_pixel == BORDER_LABEL: total_border_labels += 1 if correct_prediction: properly_predicted_border_pixels += 1 if mask_label_per_pixel == CONTENT_LABEL: total_content_labels += 1 if correct_prediction: properly_predicted_content_pixels += 1 background_accuracy = properly_predicted_background_pixels / total_background_labels border_accuracy = properly_predicted_border_pixels / total_border_labels content_accuracy = properly_predicted_content_pixels / total_content_labels return background_accuracy, border_accuracy, content_accuracy def compare_accuracy(true_masks, predictions): background_acc = 0.0 border_acc = 0.0 content_acc = 0.0 for index in range(len(predictions)): print(f" - Checking accuracy for image with index {index}") partial_back_acc, partial_border_acc, partial_content_acc = compare_accuracy_per_label(true_masks[index], predictions[index]) background_acc += partial_back_acc border_acc += partial_border_acc content_acc += partial_content_acc index += 1 pred_num = len(predictions) return background_acc / pred_num, border_acc / pred_num, content_acc / pred_num def label_to_rgb(labeled_pixel): if labeled_pixel == BACKGROUND_LABEL: return 0 if labeled_pixel == CONTENT_LABEL: return 127 if labeled_pixel == BORDER_LABEL: return 255 def labeled_prediction_to_image(predicted_result): color_matrix = np.vectorize(label_to_rgb)(predicted_result) return Image.fromarray(np.uint8(color_matrix))
40.051643
114
0.70742
1,181
8,531
4.801863
0.185436
0.030153
0.023805
0.030153
0.384941
0.276847
0.243167
0.166814
0.153059
0.136484
0
0.013561
0.204783
8,531
212
115
40.240566
0.822376
0.080999
0
0.197452
0
0
0.037557
0
0
0
0
0.004717
0
1
0.11465
false
0
0.038217
0.025478
0.261147
0.006369
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0