code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
from telegram import CallbackQuery from telegram.error import BadRequest import utils.utils as utl import commands.keyboards as kb def foo_command(query: CallbackQuery): try: query.edit_message_text( utl.prep_for_md("This is *foo*", ignore=['*']), reply_markup=kb.main_menu, parse_mode='MarkdownV2') except BadRequest as e: print("Bad request") print(e) def bar_command(query: CallbackQuery): try: query.edit_message_text("This is bar", reply_markup=kb.main_menu) except BadRequest as e: print("Bad request") print(e)
[ "utils.utils.prep_for_md" ]
[((227, 273), 'utils.utils.prep_for_md', 'utl.prep_for_md', (['"""This is *foo*"""'], {'ignore': "['*']"}), "('This is *foo*', ignore=['*'])\n", (242, 273), True, 'import utils.utils as utl\n')]
import sys sys.path.append("..") import os here = os.path.dirname(os.path.realpath(__file__)) import pickle import tempfile import numpy as np import pyrfr.regression data_set_prefix = '%(here)s/../test_data_sets/diabetes_' % {"here":here} features = np.loadtxt(data_set_prefix+'features.csv', delimiter=",") responses = np.loadtxt(data_set_prefix+'responses.csv', delimiter=",") data = pyrfr.regression.default_data_container(10) data.import_csv_files(data_set_prefix+'features.csv', data_set_prefix+'responses.csv') # create an instance of a regerssion forest using binary splits and the RSS loss the_forest = pyrfr.regression.binary_rss_forest() #reset to reseed the rng for the next fit rng = pyrfr.regression.default_random_engine(42) # create an instance of a regerssion forest using binary splits and the RSS loss the_forest = pyrfr.regression.binary_rss_forest() the_forest.options.num_trees = 16 # the forest's parameters the_forest.options.compute_oob_error = True the_forest.options.do_bootstrapping=True # default: false the_forest.options.num_data_points_per_tree=(data.num_data_points()//4)* 3 # means same number as data points the_forest.options.tree_opts.max_features = data.num_features()//2 # 0 would mean all the features the_forest.options.tree_opts.min_samples_to_split = 0 # 0 means split until pure the_forest.options.tree_opts.min_samples_in_leaf = 0 # 0 means no restriction the_forest.options.tree_opts.max_depth=1024 # 0 means no restriction the_forest.options.tree_opts.epsilon_purity = 1e-8 # when checking for purity, the data points can differ by this epsilon the_forest.fit(data, rng) predictions_1 = [ the_forest.predict(f.tolist()) for f in features] with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as f: fname = f.name pickle.dump(the_forest, f) with open(fname, 'r+b') as fh: a_second_forest = pickle.load(fh) os.remove(fname) predictions_2 = [ a_second_forest.predict(f.tolist()) for f in features] if (np.allclose(predictions_1, predictions_2)): print("successfully pickled/unpickled the forest") else: print("something went wrong")
[ "sys.path.append", "tempfile.NamedTemporaryFile", "os.remove", "pickle.dump", "os.path.realpath", "numpy.allclose", "pickle.load", "numpy.loadtxt" ]
[((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((258, 317), 'numpy.loadtxt', 'np.loadtxt', (["(data_set_prefix + 'features.csv')"], {'delimiter': '""","""'}), "(data_set_prefix + 'features.csv', delimiter=',')\n", (268, 317), True, 'import numpy as np\n'), ((329, 389), 'numpy.loadtxt', 'np.loadtxt', (["(data_set_prefix + 'responses.csv')"], {'delimiter': '""","""'}), "(data_set_prefix + 'responses.csv', delimiter=',')\n", (339, 389), True, 'import numpy as np\n'), ((1884, 1900), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (1893, 1900), False, 'import os\n'), ((1982, 2023), 'numpy.allclose', 'np.allclose', (['predictions_1', 'predictions_2'], {}), '(predictions_1, predictions_2)\n', (1993, 2023), True, 'import numpy as np\n'), ((66, 92), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (82, 92), False, 'import os\n'), ((1712, 1765), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w+b"""', 'delete': '(False)'}), "(mode='w+b', delete=False)\n", (1739, 1765), False, 'import tempfile\n'), ((1789, 1815), 'pickle.dump', 'pickle.dump', (['the_forest', 'f'], {}), '(the_forest, f)\n', (1800, 1815), False, 'import pickle\n'), ((1868, 1883), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (1879, 1883), False, 'import pickle\n')]
import yappsrt import spitfire.compiler.parser # SpitfireScanner uses the order of the match, not the length of the match to # determine what token to return. I'm not sure how fragille this is long-term, # but it seems to have been the right solution for a number of small problems # allong the way. _restrict_cache = {} class SpitfireScanner(spitfire.compiler.parser._SpitfireParserScanner): def token(self, i, restrict=0): """Get the i'th token, and if i is one past the end, then scan for another token; restrict is a list of tokens that are allowed, or 0 for any token.""" if i == len(self.tokens): self.scan(restrict) if i < len(self.tokens): # Make sure the restriction is more restricted restriction = self.restrictions[i] if restrict and restriction: if not restriction.issuperset(restrict): raise NotImplementedError( "Unimplemented: restriction set changed", restrict, self.restrictions[i]) return self.tokens[i] elif not restrict and not restriction: return self.tokens[i] raise yappsrt.NoMoreTokens(i, len(self.tokens), self.tokens[i], restrict, self.restrictions[i], self.tokens) def scan(self, restrict): """Should scan another token and add it to the list, self.tokens, and add the restriction to self.restrictions""" # Cache the list of patterns we check to avoid unnecessary iteration restrict = frozenset(restrict) try: patterns = _restrict_cache[restrict] except KeyError: patterns = [pair for pair in self.patterns if not restrict or pair[0] in restrict] _restrict_cache[restrict] = patterns _input, _pos = self.input, self.pos for best_pat, regexp in patterns: m = regexp.match(_input, _pos) if m: tname = m.group(0) best_match = len(tname) # msolo: use the first match, not the 'best' break else: # If we didn't find anything, raise an error msg = "Bad Token" if restrict: msg = "Trying to find one of " + ', '.join(restrict) raise yappsrt.SyntaxError(self.pos, msg) # Create a token with this data end = _pos + best_match token = (_pos, end, best_pat, tname) self.pos = end # Only add this token if it's not in the list # (to prevent looping) if not self.tokens or token != self.tokens[-1]: self.tokens.append(token) self.restrictions.append(restrict) return
[ "yappsrt.SyntaxError" ]
[((2096, 2130), 'yappsrt.SyntaxError', 'yappsrt.SyntaxError', (['self.pos', 'msg'], {}), '(self.pos, msg)\n', (2115, 2130), False, 'import yappsrt\n')]
# Specialization: Google IT Automation with Python # Course 02: Using Python to Interact with the Operating System # Week 2 Module Part 3 - Practice Quiz # Student: <NAME> # Learning Platform: Coursera.org # Scripting examples encountered during the Module Part 3 Practice Quiz: # 01. We're working with a list of flowers and some information about each one. # The create_file function writes this information to a CSV file. The contents_of_file # function reads this file into records and returns the information in a nicely formatted # block. Fill in the gaps of the contents_of_file function to turn the data in the CSV # file into a dictionary using DictReader. # import os # import csv # # # Create a file with data in it # def create_file(filename): # with open(filename, "w") as file: # file.write("name,color,type\n") # file.write("carnation,pink,annual\n") # file.write("daffodil,yellow,perennial\n") # file.write("iris,blue,perennial\n") # file.write("poinsettia,red,perennial\n") # file.write("sunflower,yellow,annual\n") # # # Read the file contents and format the information about each row # def contents_of_file(filename): # return_string = "" # # # Call the function to create the file # create_file(filename) # # # Open the file # ___ # # Read the rows of the file into a dictionary # ___ # # Process each item of the dictionary # for ___: # return_string += "a {} {} is {}\n".format(row["color"], row["name"], row["type"]) # return return_string # # #Call the function # print(contents_of_file("flowers.csv")) import os import csv # Create a file with data in it def create_file(filename): with open(filename, "w") as file: file.write("name,color,type\n") file.write("carnation,pink,annual\n") file.write("daffodil,yellow,perennial\n") file.write("iris,blue,perennial\n") file.write("poinsettia,red,perennial\n") file.write("sunflower,yellow,annual\n") # Read the file contents and format the information about each row def contents_of_file(filename): return_string = "" # Call the function to create the file create_file(filename) # Open the file with open(filename, "r") as csv_file: csv_reader = csv.DictReader(csv_file) # Read the rows of the file into a dictionary for row in csv_reader: # Process each item of the dictionary return_string += "a {} {} is {}\n".format(row["color"], row["name"], row["type"]) return return_string #Call the function print(contents_of_file("flowers.csv")) # 02. Using the CSV file of flowers again, fill in the gaps of the contents_of_file # function to process the data without turning it into a dictionary. How do you skip # over the header record with the field names? # import os # import csv # # # Create a file with data in it # def create_file(filename): # with open(filename, "w") as file: # file.write("name,color,type\n") # file.write("carnation,pink,annual\n") # file.write("daffodil,yellow,perennial\n") # file.write("iris,blue,perennial\n") # file.write("poinsettia,red,perennial\n") # file.write("sunflower,yellow,annual\n") # # # Read the file contents and format the information about each row # def contents_of_file(filename): # return_string = "" # # # Call the function to create the file # create_file(filename) # # # Open the file # ___ # # Read the rows of the file # rows = ___ # # Process each row # for row in rows: # ___ = row # # Format the return string for data rows only # # return_string += "a {} {} is {}\n".format(___) # return return_string # # #Call the function # print(contents_of_file("flowers.csv")) import os import csv # Create a file with data in it def create_file(filename): with open(filename, "w") as file: file.write("name,color,type\n") file.write("carnation,pink,annual\n") file.write("daffodil,yellow,perennial\n") file.write("iris,blue,perennial\n") file.write("poinsettia,red,perennial\n") file.write("sunflower,yellow,annual\n") # Read the file contents and format the information about each row def contents_of_file(filename): return_string = "" # Call the function to create the file create_file(filename) # Open the file with open(filename, "r") as csv_file: reader = csv.reader(csv_file) # Read the rows of the file rows = reader # Process each row for row in rows: name, color, typ = row # Format the return string for data rows only if name != "name": return_string += "a {} {} is {}\n".format(color, name, typ) return return_string #Call the function print(contents_of_file("flowers.csv"))
[ "csv.DictReader", "csv.reader" ]
[((2299, 2323), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (2313, 2323), False, 'import csv\n'), ((4483, 4503), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (4493, 4503), False, 'import csv\n')]
from django.db import models class Genre(models.Model): name = models.CharField(max_length=16, null=True) def __str__(self): return self.name class Director(models.Model): name = models.CharField(max_length=64, null=True) def __str__(self): return self.name class Country(models.Model): name = models.CharField(max_length=64, null=True) class Meta: verbose_name_plural = 'countries' def __str__(self): return self.name class Movie(models.Model): # String fields title = models.CharField(max_length=64, null=True) imdb_link = models.URLField(null=True) # Number field imdb_rating = models.DecimalField(max_digits=2, decimal_places=1, null=True) # Boolean field parents_guide = models.BooleanField(default=False, null=True) # Date field release_date = models.DateField(null=True) # Relationship fields director = models.ForeignKey(Director, null=True, on_delete=models.CASCADE) country = models.ForeignKey(Country, null=True, on_delete=models.CASCADE) genre = models.ManyToManyField(Genre) def __str__(self): return self.title
[ "django.db.models.URLField", "django.db.models.ManyToManyField", "django.db.models.CharField", "django.db.models.ForeignKey", "django.db.models.BooleanField", "django.db.models.DecimalField", "django.db.models.DateField" ]
[((69, 111), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)', 'null': '(True)'}), '(max_length=16, null=True)\n', (85, 111), False, 'from django.db import models\n'), ((204, 246), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (220, 246), False, 'from django.db import models\n'), ((338, 380), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (354, 380), False, 'from django.db import models\n'), ((550, 592), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'null': '(True)'}), '(max_length=64, null=True)\n', (566, 592), False, 'from django.db import models\n'), ((609, 635), 'django.db.models.URLField', 'models.URLField', ([], {'null': '(True)'}), '(null=True)\n', (624, 635), False, 'from django.db import models\n'), ((673, 735), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(2)', 'decimal_places': '(1)', 'null': '(True)'}), '(max_digits=2, decimal_places=1, null=True)\n', (692, 735), False, 'from django.db import models\n'), ((776, 821), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'null': '(True)'}), '(default=False, null=True)\n', (795, 821), False, 'from django.db import models\n'), ((858, 885), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)'}), '(null=True)\n', (874, 885), False, 'from django.db import models\n'), ((927, 991), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Director'], {'null': '(True)', 'on_delete': 'models.CASCADE'}), '(Director, null=True, on_delete=models.CASCADE)\n', (944, 991), False, 'from django.db import models\n'), ((1006, 1069), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Country'], {'null': '(True)', 'on_delete': 'models.CASCADE'}), '(Country, null=True, on_delete=models.CASCADE)\n', (1023, 1069), False, 'from django.db import models\n'), ((1082, 1111), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Genre'], {}), '(Genre)\n', (1104, 1111), False, 'from django.db import models\n')]
"""Real-time forecasting `*Model`s to predict demand for tactical purposes. Real-time `*Model`s take order counts of all time steps in the training data and make a prediction for only one time step on the day to be predicted (i.e., the one starting at `predict_at`). Thus, the training time series have a `frequency` of the number of weekdays, `7`, times the number of time steps on a day. For example, for 60-minute time steps, the `frequency` becomes `7 * 12` (= operating hours from 11 am to 11 pm), which is `84`. Real-time `*Model`s train the forecasting `methods` on a seasonally decomposed time series internally. """ # noqa:RST215 import datetime as dt import pandas as pd from urban_meal_delivery import db from urban_meal_delivery.forecasts import methods from urban_meal_delivery.forecasts.models import base class RealtimeARIMAModel(base.ForecastingModelABC): """The ARIMA model applied on a real-time time series.""" name = 'rtarima' def predict( self, pixel: db.Pixel, predict_at: dt.datetime, train_horizon: int, ) -> pd.DataFrame: """Predict demand for a time step. Args: pixel: pixel in which the prediction is made predict_at: time step (i.e., "start_at") to make the prediction for train_horizon: weeks of historic data used to predict `predict_at` Returns: actual order counts (i.e., the "actual" column), point forecasts (i.e., the "prediction" column), and confidence intervals (i.e, the four "low/high/80/95" columns); contains one row for the `predict_at` time step # noqa:DAR401 RuntimeError """ # Generate the historic (and real-time) order time series. training_ts, frequency, actuals_ts = self._order_history.make_realtime_ts( pixel_id=pixel.id, predict_at=predict_at, train_horizon=train_horizon, ) # Decompose the `training_ts` to make predictions for the seasonal # component and the seasonally adjusted observations separately. decomposed_training_ts = methods.decomposition.stl( time_series=training_ts, frequency=frequency, # "Periodic" `ns` parameter => same seasonal component value # for observations of the same lag. ns=999, ) # Make predictions for the seasonal component by linear extrapolation. seasonal_predictions = methods.extrapolate_season.predict( training_ts=decomposed_training_ts['seasonal'], forecast_interval=pd.DatetimeIndex(actuals_ts.index), frequency=frequency, ) # Make predictions with the ARIMA model on the seasonally adjusted time series. seasonally_adjusted_predictions = methods.arima.predict( training_ts=( decomposed_training_ts['trend'] + decomposed_training_ts['residual'] ), forecast_interval=pd.DatetimeIndex(actuals_ts.index), # Because the seasonality was taken out before, # the `training_ts` has, by definition, a `frequency` of `1`. frequency=1, seasonal_fit=False, ) # The overall `predictions` are the sum of the separate predictions above. # As the linear extrapolation of the seasonal component has no # confidence interval, we put the one from the ARIMA model around # the extrapolated seasonal component. predictions = pd.DataFrame( data={ 'actual': actuals_ts, 'prediction': ( seasonal_predictions['prediction'] # noqa:WPS204 + seasonally_adjusted_predictions['prediction'] ), 'low80': ( seasonal_predictions['prediction'] + seasonally_adjusted_predictions['low80'] ), 'high80': ( seasonal_predictions['prediction'] + seasonally_adjusted_predictions['high80'] ), 'low95': ( seasonal_predictions['prediction'] + seasonally_adjusted_predictions['low95'] ), 'high95': ( seasonal_predictions['prediction'] + seasonally_adjusted_predictions['high95'] ), }, index=actuals_ts.index, ) # Sanity checks. if len(predictions) != 1: # pragma: no cover raise RuntimeError('real-time models should predict exactly one time step') if predictions.isnull().sum().any(): # pragma: no cover raise RuntimeError('missing predictions in rtarima model') if predict_at not in predictions.index: # pragma: no cover raise RuntimeError('missing prediction for `predict_at`') return predictions
[ "pandas.DataFrame", "urban_meal_delivery.forecasts.methods.decomposition.stl", "pandas.DatetimeIndex" ]
[((2118, 2197), 'urban_meal_delivery.forecasts.methods.decomposition.stl', 'methods.decomposition.stl', ([], {'time_series': 'training_ts', 'frequency': 'frequency', 'ns': '(999)'}), '(time_series=training_ts, frequency=frequency, ns=999)\n', (2143, 2197), False, 'from urban_meal_delivery.forecasts import methods\n'), ((3527, 4078), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'actual': actuals_ts, 'prediction': seasonal_predictions['prediction'] +\n seasonally_adjusted_predictions['prediction'], 'low80': \n seasonal_predictions['prediction'] + seasonally_adjusted_predictions[\n 'low80'], 'high80': seasonal_predictions['prediction'] +\n seasonally_adjusted_predictions['high80'], 'low95': \n seasonal_predictions['prediction'] + seasonally_adjusted_predictions[\n 'low95'], 'high95': seasonal_predictions['prediction'] +\n seasonally_adjusted_predictions['high95']}", 'index': 'actuals_ts.index'}), "(data={'actual': actuals_ts, 'prediction': seasonal_predictions\n ['prediction'] + seasonally_adjusted_predictions['prediction'], 'low80':\n seasonal_predictions['prediction'] + seasonally_adjusted_predictions[\n 'low80'], 'high80': seasonal_predictions['prediction'] +\n seasonally_adjusted_predictions['high80'], 'low95': \n seasonal_predictions['prediction'] + seasonally_adjusted_predictions[\n 'low95'], 'high95': seasonal_predictions['prediction'] +\n seasonally_adjusted_predictions['high95']}, index=actuals_ts.index)\n", (3539, 4078), True, 'import pandas as pd\n'), ((2603, 2637), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['actuals_ts.index'], {}), '(actuals_ts.index)\n', (2619, 2637), True, 'import pandas as pd\n'), ((2992, 3026), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['actuals_ts.index'], {}), '(actuals_ts.index)\n', (3008, 3026), True, 'import pandas as pd\n')]
#!/usr/bin/env python import django from django.core import management from prometheus_client import start_http_server, Gauge from croniter import croniter from datetime import datetime import time import sys import signal import os import re import gc # # Run django management command on a continuous loop # delaying "--delay <seconds>" between each invocation, # and gracefully exiting on termination signal # def main(): signals = [signal.SIGHUP, signal.SIGINT, signal.SIGQUIT, signal.SIGTERM, signal.SIGWINCH] finish_signal = None loop_delay = 15 cron_spec = None command = None options = [] our_arg = True def report(message, error=False): print("management command daemon: {}: {}".format( command if command else sys.argv[1:], message), file=sys.stderr if error else sys.stdout) def abort(reason): report(reason, error=True) sys.exit(-1) def finish_on_signal(): report("exit on signal ({})".format(finish_signal)) sys.exit(0) def handler(signum, frame): nonlocal finish_signal if signum in signals: finish_signal = signum else: report("signal {}".format(signum), error=True) def pause(lastrun_utc): delay = 0 if cron_spec: c = croniter(cron_spec, datetime.utcfromtimestamp(lastrun_utc + 1)) delay = int(c.get_next() - lastrun_utc) else: delay = loop_delay - int(time.time() - lastrun_utc) if delay > 0 and not finish_signal: gc.collect() time.sleep(delay) # prepare to exit gracefully for signum in signals: signal.signal(signum, handler) # prepare metrics management_daemon_command_start = Gauge( 'management_daemon_command_start', 'Management Command start time', ['job', 'instance']) management_daemon_command_finish = Gauge( 'management_daemon_command_finish', 'Management Command finish time', ['job', 'instance']) management_daemon_command_duration = Gauge( 'management_daemon_command_duration', 'Management Command curation', ['job', 'instance']) management_daemon_command_exit = Gauge( 'management_daemon_command_exit', 'Management Command return value', ['job', 'instance']) # parse our args from command's for arg in sys.argv[1:]: if our_arg: if not loop_delay: if not re.match('^[0-9]+$', arg): abort('invalid loop delay') loop_delay = int(arg) elif cron_spec is not None and len(cron_spec) == 0: if not croniter.is_valid(arg): abort("invalid cron specification") cron_spec = arg elif arg == '--delay': loop_delay = None elif arg == '--cron': cron_spec = "" elif arg == '--': our_arg = False else: command = arg our_arg = False elif not command: command = arg our_arg = False else: options.append(arg) if command is None: abort('missing command') if not loop_delay: abort('missing delay') # open metrics exporter endpoint start_http_server(9100) if cron_spec: if len(cron_spec) == 0: abort('missing cron specification') # initial pause pause(time.time()) release_id = os.getenv('RELEASE_ID', None) if not release_id: m = re.match(r'(.+?)-daemon-.+$', os.getenv('HOSTNAME', '')) release_id = m.group(1) if m else 'default' # initialize django os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings') django.setup() # run provided management command in a loop while True: if finish_signal: finish_on_signal() start = time.time() rv = -1 try: rv = management.call_command(command, *options) except SystemExit as ex: rv = int(str(ex)) except Exception as ex: rv = -1 report("exception: {}".format(ex), error=True) finish = time.time() duration = finish - start management_daemon_command_exit.labels( command, release_id).set(rv if rv and isinstance(rv, int) else 0) management_daemon_command_start.labels( command, release_id).set(start) management_daemon_command_finish.labels( command, release_id).set(finish) management_daemon_command_duration.labels( command, release_id).set(duration) pause(start) if __name__ == '__main__': main()
[ "prometheus_client.start_http_server", "os.environ.setdefault", "django.setup", "re.match", "time.time", "time.sleep", "gc.collect", "django.core.management.call_command", "datetime.datetime.utcfromtimestamp", "signal.signal", "prometheus_client.Gauge", "os.getenv", "sys.exit", "croniter.croniter.is_valid" ]
[((1803, 1902), 'prometheus_client.Gauge', 'Gauge', (['"""management_daemon_command_start"""', '"""Management Command start time"""', "['job', 'instance']"], {}), "('management_daemon_command_start', 'Management Command start time', [\n 'job', 'instance'])\n", (1808, 1902), False, 'from prometheus_client import start_http_server, Gauge\n'), ((1962, 2062), 'prometheus_client.Gauge', 'Gauge', (['"""management_daemon_command_finish"""', '"""Management Command finish time"""', "['job', 'instance']"], {}), "('management_daemon_command_finish', 'Management Command finish time',\n ['job', 'instance'])\n", (1967, 2062), False, 'from prometheus_client import start_http_server, Gauge\n'), ((2125, 2224), 'prometheus_client.Gauge', 'Gauge', (['"""management_daemon_command_duration"""', '"""Management Command curation"""', "['job', 'instance']"], {}), "('management_daemon_command_duration', 'Management Command curation',\n ['job', 'instance'])\n", (2130, 2224), False, 'from prometheus_client import start_http_server, Gauge\n'), ((2283, 2382), 'prometheus_client.Gauge', 'Gauge', (['"""management_daemon_command_exit"""', '"""Management Command return value"""', "['job', 'instance']"], {}), "('management_daemon_command_exit', 'Management Command return value',\n ['job', 'instance'])\n", (2288, 2382), False, 'from prometheus_client import start_http_server, Gauge\n'), ((3415, 3438), 'prometheus_client.start_http_server', 'start_http_server', (['(9100)'], {}), '(9100)\n', (3432, 3438), False, 'from prometheus_client import start_http_server, Gauge\n'), ((3608, 3637), 'os.getenv', 'os.getenv', (['"""RELEASE_ID"""', 'None'], {}), "('RELEASE_ID', None)\n", (3617, 3637), False, 'import os\n'), ((3811, 3878), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""project.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'project.settings')\n", (3832, 3878), False, 'import os\n'), ((3883, 3897), 'django.setup', 'django.setup', ([], {}), '()\n', (3895, 3897), False, 'import django\n'), ((938, 950), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (946, 950), False, 'import sys\n'), ((1048, 1059), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1056, 1059), False, 'import sys\n'), ((1711, 1741), 'signal.signal', 'signal.signal', (['signum', 'handler'], {}), '(signum, handler)\n', (1724, 1741), False, 'import signal\n'), ((4037, 4048), 'time.time', 'time.time', ([], {}), '()\n', (4046, 4048), False, 'import time\n'), ((4330, 4341), 'time.time', 'time.time', ([], {}), '()\n', (4339, 4341), False, 'import time\n'), ((1599, 1611), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1609, 1611), False, 'import gc\n'), ((1624, 1641), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (1634, 1641), False, 'import time\n'), ((3577, 3588), 'time.time', 'time.time', ([], {}), '()\n', (3586, 3588), False, 'import time\n'), ((3703, 3728), 'os.getenv', 'os.getenv', (['"""HOSTNAME"""', '""""""'], {}), "('HOSTNAME', '')\n", (3712, 3728), False, 'import os\n'), ((4095, 4137), 'django.core.management.call_command', 'management.call_command', (['command', '*options'], {}), '(command, *options)\n', (4118, 4137), False, 'from django.core import management\n'), ((1368, 1410), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['(lastrun_utc + 1)'], {}), '(lastrun_utc + 1)\n', (1393, 1410), False, 'from datetime import datetime\n'), ((2544, 2569), 're.match', 're.match', (['"""^[0-9]+$"""', 'arg'], {}), "('^[0-9]+$', arg)\n", (2552, 2569), False, 'import re\n'), ((1515, 1526), 'time.time', 'time.time', ([], {}), '()\n', (1524, 1526), False, 'import time\n'), ((2745, 2767), 'croniter.croniter.is_valid', 'croniter.is_valid', (['arg'], {}), '(arg)\n', (2762, 2767), False, 'from croniter import croniter\n')]
#!/usr/bin/python3 def validate_user(name, minlen): assert type(name) == str, "username must be a string" if minlen < 1: raise ValueError("minlen must be at least 1") if len(name) < minlen: return False if not name .isalnum(): return False return True #LAB #1 my_list = [27, 5, 9, 6, 8] def RemoveValue(myVal): if myVal not in my_list: raise ValueError("Value must be in the given list") else: my_list.remove(myVal) return my_list print("1",RemoveValue(27)) #print("2",RemoveValue(27)) #2 my_word_list = ['east', 'after', 'up', 'over', 'inside'] def OrganizeList(myList): for item in myList: assert type(item) == str, "Word list must be a list of strings" myList.sort() return myList my_new_list = [6, 3, 8, "12", 42] #print(OrganizeList(my_new_list)) #without assert #TypeError: '<' not supported between instances of 'str' and 'int' #3 import random participants = ['Jack','Jill','NotLarry','Tom'] # Revised Guess() function def Guess(participants): my_participant_dict = {} for participant in participants: my_participant_dict[participant] = random.randint(1, 9) try: if my_participant_dict['Larry'] == 9: return True else: return False except KeyError: return None print(Guess(participants))
[ "random.randint" ]
[((1167, 1187), 'random.randint', 'random.randint', (['(1)', '(9)'], {}), '(1, 9)\n', (1181, 1187), False, 'import random\n')]
import random from .adjectives import ADJECTIVES from .nouns import NOUNS, ANIMALS, FLOWERS from .verbs import VERBS from .adverbs import ADVERBS DICTIONARY = { 'adjective': ADJECTIVES, 'noun': NOUNS, 'verb': VERBS, 'adverb': ADVERBS, 'number': list(map(str, range(10,99))) } class HRID: def __init__(self, delimeter='-', hridfmt=('adjective', 'noun', 'verb', 'adverb')): self.delimeter = delimeter self.phrasefmt = list() for element in hridfmt: self.phrasefmt.append(DICTIONARY.get(element, element)) def generate(self): phrases = list() for element in self.phrasefmt: if isinstance(element, (str)): phrases.append(element) if isinstance(element, (list)): phrases.append(random.choice(element)) return self.delimeter.join(phrases)
[ "random.choice" ]
[((815, 837), 'random.choice', 'random.choice', (['element'], {}), '(element)\n', (828, 837), False, 'import random\n')]
import numpy as np import os import torch import copy from math import cos, sqrt, pi def dct(x, y, v, u, n): # Normalisation def alpha(a): if a == 0: return sqrt(1.0 / n) else: return sqrt(2.0 / n) return alpha(u) * alpha(v) * cos(((2 * x + 1) * (u * pi)) / (2 * n)) * cos(((2 * y + 1) * (v * pi)) / (2 * n)) def generate_2d_dct_basis(root_path, image_height, sub_dim=75): path = "{}/attacked_images/GeoDA/2d_dct_basis_height_{}_subdim_{}.npy".format(root_path, image_height, sub_dim) os.makedirs(os.path.dirname(path),exist_ok=True) if os.path.exists(path): return np.load(path) n = image_height # Assume square image, so we don't have different xres and yres # We can get different frequencies by setting u and v # Here, we have a max u and v to loop over and display # Feel free to adjust maxU = sub_dim maxV = sub_dim dct_basis = [] for u in range(0, maxU): for v in range(0, maxV): basisImg = np.zeros((n, n)) for y in range(0, n): for x in range(0, n): basisImg[y, x] = dct(x, y, v, u, max(n, maxV)) dct_basis.append(basisImg) dct_basis = np.mat(np.reshape(dct_basis, (maxV*maxU, n*n))).transpose() np.save(path, dct_basis) return dct_basis def clip_image_values(x, minv, maxv): if not isinstance(minv, torch.Tensor): return torch.clamp(x,min=minv,max=maxv) return torch.min(torch.max(x, minv), maxv) def valid_bounds(img, delta=255): im = copy.deepcopy(np.asarray(img)) im = im.astype(np.int) # General valid bounds [0, 255] valid_lb = np.zeros_like(im) valid_ub = np.full_like(im, 255) # Compute the bounds lb = im - delta ub = im + delta # Validate that the bounds are in [0, 255] lb = np.maximum(valid_lb, np.minimum(lb, im)) ub = np.minimum(valid_ub, np.maximum(ub, im)) # Change types to uint8 lb = lb.astype(np.uint8) ub = ub.astype(np.uint8) return lb, ub def inv_tf(x, mean, std): for i in range(len(mean)): x[i] = np.multiply(x[i], std[i], dtype=np.float32) x[i] = np.add(x[i], mean[i], dtype=np.float32) x = np.swapaxes(x, 0, 2) x = np.swapaxes(x, 0, 1) return x def inv_tf_pert(r): pert = np.sum(np.absolute(r), axis=0) pert[pert != 0] = 1 return pert def get_label(x): s = x.split(' ') label = '' for l in range(1, len(s)): label += s[l] + ' ' return label def nnz_pixels(arr): return np.count_nonzero(np.sum(np.absolute(arr), axis=0))
[ "numpy.absolute", "numpy.load", "numpy.maximum", "numpy.full_like", "numpy.zeros_like", "numpy.multiply", "os.path.dirname", "os.path.exists", "numpy.swapaxes", "math.cos", "numpy.reshape", "numpy.add", "numpy.save", "numpy.minimum", "math.sqrt", "numpy.asarray", "torch.clamp", "torch.max", "numpy.zeros" ]
[((606, 626), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (620, 626), False, 'import os\n'), ((1305, 1329), 'numpy.save', 'np.save', (['path', 'dct_basis'], {}), '(path, dct_basis)\n', (1312, 1329), True, 'import numpy as np\n'), ((1685, 1702), 'numpy.zeros_like', 'np.zeros_like', (['im'], {}), '(im)\n', (1698, 1702), True, 'import numpy as np\n'), ((1718, 1739), 'numpy.full_like', 'np.full_like', (['im', '(255)'], {}), '(im, 255)\n', (1730, 1739), True, 'import numpy as np\n'), ((2244, 2264), 'numpy.swapaxes', 'np.swapaxes', (['x', '(0)', '(2)'], {}), '(x, 0, 2)\n', (2255, 2264), True, 'import numpy as np\n'), ((2273, 2293), 'numpy.swapaxes', 'np.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (2284, 2293), True, 'import numpy as np\n'), ((324, 361), 'math.cos', 'cos', (['((2 * y + 1) * (v * pi) / (2 * n))'], {}), '((2 * y + 1) * (v * pi) / (2 * n))\n', (327, 361), False, 'from math import cos, sqrt, pi\n'), ((562, 583), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (577, 583), False, 'import os\n'), ((643, 656), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (650, 656), True, 'import numpy as np\n'), ((1449, 1483), 'torch.clamp', 'torch.clamp', (['x'], {'min': 'minv', 'max': 'maxv'}), '(x, min=minv, max=maxv)\n', (1460, 1483), False, 'import torch\n'), ((1503, 1521), 'torch.max', 'torch.max', (['x', 'minv'], {}), '(x, minv)\n', (1512, 1521), False, 'import torch\n'), ((1589, 1604), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1599, 1604), True, 'import numpy as np\n'), ((1884, 1902), 'numpy.minimum', 'np.minimum', (['lb', 'im'], {}), '(lb, im)\n', (1894, 1902), True, 'import numpy as np\n'), ((1934, 1952), 'numpy.maximum', 'np.maximum', (['ub', 'im'], {}), '(ub, im)\n', (1944, 1952), True, 'import numpy as np\n'), ((2136, 2179), 'numpy.multiply', 'np.multiply', (['x[i]', 'std[i]'], {'dtype': 'np.float32'}), '(x[i], std[i], dtype=np.float32)\n', (2147, 2179), True, 'import numpy as np\n'), ((2195, 2234), 'numpy.add', 'np.add', (['x[i]', 'mean[i]'], {'dtype': 'np.float32'}), '(x[i], mean[i], dtype=np.float32)\n', (2201, 2234), True, 'import numpy as np\n'), ((2349, 2363), 'numpy.absolute', 'np.absolute', (['r'], {}), '(r)\n', (2360, 2363), True, 'import numpy as np\n'), ((187, 200), 'math.sqrt', 'sqrt', (['(1.0 / n)'], {}), '(1.0 / n)\n', (191, 200), False, 'from math import cos, sqrt, pi\n'), ((234, 247), 'math.sqrt', 'sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (238, 247), False, 'from math import cos, sqrt, pi\n'), ((282, 319), 'math.cos', 'cos', (['((2 * x + 1) * (u * pi) / (2 * n))'], {}), '((2 * x + 1) * (u * pi) / (2 * n))\n', (285, 319), False, 'from math import cos, sqrt, pi\n'), ((1030, 1046), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (1038, 1046), True, 'import numpy as np\n'), ((2605, 2621), 'numpy.absolute', 'np.absolute', (['arr'], {}), '(arr)\n', (2616, 2621), True, 'import numpy as np\n'), ((1248, 1291), 'numpy.reshape', 'np.reshape', (['dct_basis', '(maxV * maxU, n * n)'], {}), '(dct_basis, (maxV * maxU, n * n))\n', (1258, 1291), True, 'import numpy as np\n')]
import math from typing import Any, Dict, Tuple import attr from attr import attrib, attrs import numpy as np from nlisim.cell import CellData, CellFields, CellList from nlisim.coordinates import Point, Voxel from nlisim.grid import RectangularGrid from nlisim.modules.phagocyte import ( PhagocyteCellData, PhagocyteModel, PhagocyteModuleState, PhagocyteStatus, ) from nlisim.random import rg from nlisim.state import State from nlisim.util import TissueType, activation_function class PneumocyteCellData(PhagocyteCellData): PNEUMOCYTE_FIELDS: CellFields = [ ('status', np.uint8), ('status_iteration', np.uint), ('tnfa', bool), ] dtype = np.dtype( CellData.FIELDS + PhagocyteCellData.PHAGOCYTE_FIELDS + PNEUMOCYTE_FIELDS, align=True ) # type: ignore @classmethod def create_cell_tuple( cls, **kwargs, ) -> Tuple: initializer = { 'status': kwargs.get('status', PhagocyteStatus.RESTING), 'status_iteration': kwargs.get('status_iteration', 0), 'tnfa': kwargs.get('tnfa', False), } # ensure that these come in the correct order return PhagocyteCellData.create_cell_tuple(**kwargs) + tuple( [initializer[key] for key, *_ in PneumocyteCellData.PNEUMOCYTE_FIELDS] ) @attrs(kw_only=True, frozen=True, repr=False) class PneumocyteCellList(CellList): CellDataClass = PneumocyteCellData def cell_list_factory(self: 'PneumocyteState') -> PneumocyteCellList: return PneumocyteCellList(grid=self.global_state.grid) @attrs(kw_only=True) class PneumocyteState(PhagocyteModuleState): cells: PneumocyteCellList = attrib(default=attr.Factory(cell_list_factory, takes_self=True)) max_conidia: int # units: conidia time_to_rest: float # units: hours iter_to_rest: int # units: steps time_to_change_state: float # units: hours iter_to_change_state: int # units: steps # p_il6_qtty: float # units: mol * cell^-1 * h^-1 # p_il8_qtty: float # units: mol * cell^-1 * h^-1 p_tnf_qtty: float # units: atto-mol * cell^-1 * h^-1 pr_p_int: float # units: probability pr_p_int_param: float class Pneumocyte(PhagocyteModel): name = 'pneumocyte' StateClass = PneumocyteState def initialize(self, state: State): pneumocyte: PneumocyteState = state.pneumocyte voxel_volume: float = state.voxel_volume time_step_size: float = self.time_step lung_tissue: np.ndarray = state.lung_tissue pneumocyte.max_conidia = self.config.getint('max_conidia') # units: conidia pneumocyte.time_to_rest = self.config.getint('time_to_rest') # units: hours pneumocyte.time_to_change_state = self.config.getint('time_to_change_state') # units: hours pneumocyte.p_tnf_qtty = self.config.getfloat( 'p_tnf_qtty' ) # units: atto-mol * cell^-1 * h^-1 pneumocyte.pr_p_int_param = self.config.getfloat('pr_p_int_param') # computed values pneumocyte.iter_to_rest = int( pneumocyte.time_to_rest * (60 / self.time_step) ) # units: hours * (min/hour) / (min/step) = step pneumocyte.iter_to_change_state = int( pneumocyte.time_to_change_state * (60 / self.time_step) ) # units: hours * (min/hour) / (min/step) = step pneumocyte.pr_p_int = -math.expm1( -time_step_size / 60 / (voxel_volume * pneumocyte.pr_p_int_param) ) # units: probability # initialize cells, placing one per epithelial voxel dz_field: np.ndarray = state.grid.delta(axis=0) dy_field: np.ndarray = state.grid.delta(axis=1) dx_field: np.ndarray = state.grid.delta(axis=2) epithelial_voxels = list(zip(*np.where(lung_tissue == TissueType.EPITHELIUM))) rg.shuffle(epithelial_voxels) for vox_z, vox_y, vox_x in epithelial_voxels[: self.config.getint('count')]: # the x,y,z coordinates are in the centers of the grids z = state.grid.z[vox_z] y = state.grid.y[vox_y] x = state.grid.x[vox_x] dz = dz_field[vox_z, vox_y, vox_x] dy = dy_field[vox_z, vox_y, vox_x] dx = dx_field[vox_z, vox_y, vox_x] pneumocyte.cells.append( PneumocyteCellData.create_cell( point=Point( x=x + rg.uniform(-dx / 2, dx / 2), y=y + rg.uniform(-dy / 2, dy / 2), z=z + rg.uniform(-dz / 2, dz / 2), ) ) ) return state def single_step_probabilistic_drift( self, state: State, cell: PhagocyteCellData, voxel: Voxel ) -> Point: # pneumocytes do not move pass def advance(self, state: State, previous_time: float): """Advance the state by a single time step.""" from nlisim.modules.afumigatus import ( AfumigatusCellData, AfumigatusCellStatus, AfumigatusState, ) # from nlisim.modules.il6 import IL6State # from nlisim.modules.il8 import IL8State from nlisim.modules.tnfa import TNFaState pneumocyte: PneumocyteState = state.pneumocyte afumigatus: AfumigatusState = state.afumigatus # il6: IL6State = getattr(state, 'il6', None) # il8: IL8State = getattr(state, 'il8', None) tnfa: TNFaState = state.tnfa grid: RectangularGrid = state.grid voxel_volume: float = state.voxel_volume for pneumocyte_cell_index in pneumocyte.cells.alive(): pneumocyte_cell = pneumocyte.cells[pneumocyte_cell_index] pneumocyte_cell_voxel: Voxel = grid.get_voxel(pneumocyte_cell['point']) # self update if pneumocyte_cell['status'] == PhagocyteStatus.ACTIVE: if pneumocyte_cell['status_iteration'] >= pneumocyte.iter_to_rest: pneumocyte_cell['status_iteration'] = 0 pneumocyte_cell['status'] = PhagocyteStatus.RESTING pneumocyte_cell['tnfa'] = False else: pneumocyte_cell['status_iteration'] += 1 elif pneumocyte_cell['status'] == PhagocyteStatus.ACTIVATING: if pneumocyte_cell['status_iteration'] >= pneumocyte.iter_to_change_state: pneumocyte_cell['status_iteration'] = 0 pneumocyte_cell['status'] = PhagocyteStatus.ACTIVE else: pneumocyte_cell['status_iteration'] += 1 # ----------- interactions # interact with fungus if pneumocyte_cell['status'] not in { PhagocyteStatus.APOPTOTIC, PhagocyteStatus.NECROTIC, PhagocyteStatus.DEAD, }: local_aspergillus = afumigatus.cells.get_cells_in_voxel(pneumocyte_cell_voxel) for aspergillus_index in local_aspergillus: aspergillus_cell: AfumigatusCellData = afumigatus.cells[aspergillus_index] # skip resting conidia if aspergillus_cell['status'] == AfumigatusCellStatus.RESTING_CONIDIA: continue if pneumocyte_cell['status'] != PhagocyteStatus.ACTIVE: if rg.uniform() < pneumocyte.pr_p_int: pneumocyte_cell['status'] = PhagocyteStatus.ACTIVATING else: # TODO: I don't get this, looks like it zeros out the iteration # when activating pneumocyte_cell['status_iteration'] = 0 # # secrete IL6 # if il6 is not None and pneumocyte_cell['status'] == PhagocyteStatus.ACTIVE: # il6.grid[tuple(pneumocyte_cell_voxel)] += pneumocyte.p_il6_qtty # # # secrete IL8 # if il8 is not None and pneumocyte_cell['tnfa']: # il8.grid[tuple(pneumocyte_cell_voxel)] += pneumocyte.p_il8_qtty # interact with TNFa if pneumocyte_cell['status'] == PhagocyteStatus.ACTIVE: if ( activation_function( x=tnfa.grid[tuple(pneumocyte_cell_voxel)], k_d=tnfa.k_d, h=self.time_step / 60, # units: (min/step) / (min/hour) volume=voxel_volume, b=1, ) < rg.uniform() ): pneumocyte_cell['status_iteration'] = 0 pneumocyte_cell['tnfa'] = True # secrete TNFa tnfa.grid[tuple(pneumocyte_cell_voxel)] += pneumocyte.p_tnf_qtty return state def summary_stats(self, state: State) -> Dict[str, Any]: pneumocyte: PneumocyteState = state.pneumocyte live_pneumocytes = pneumocyte.cells.alive() max_index = max(map(int, PhagocyteStatus)) status_counts = np.bincount( np.fromiter( ( pneumocyte.cells[pneumocyte_cell_index]['status'] for pneumocyte_cell_index in live_pneumocytes ), dtype=np.uint8, ), minlength=max_index + 1, ) tnfa_active = int( np.sum( np.fromiter( ( pneumocyte.cells[pneumocyte_cell_index]['tnfa'] for pneumocyte_cell_index in live_pneumocytes ), dtype=bool, ) ) ) return { 'count': len(pneumocyte.cells.alive()), 'inactive': int(status_counts[PhagocyteStatus.INACTIVE]), 'inactivating': int(status_counts[PhagocyteStatus.INACTIVATING]), 'resting': int(status_counts[PhagocyteStatus.RESTING]), 'activating': int(status_counts[PhagocyteStatus.ACTIVATING]), 'active': int(status_counts[PhagocyteStatus.ACTIVE]), 'apoptotic': int(status_counts[PhagocyteStatus.APOPTOTIC]), 'necrotic': int(status_counts[PhagocyteStatus.NECROTIC]), 'interacting': int(status_counts[PhagocyteStatus.INTERACTING]), 'TNFa active': tnfa_active, } def visualization_data(self, state: State): return 'cells', state.pneumocyte.cells
[ "math.expm1", "attr.attrs", "attr.Factory", "numpy.dtype", "numpy.where", "numpy.fromiter", "nlisim.modules.phagocyte.PhagocyteCellData.create_cell_tuple", "nlisim.random.rg.shuffle", "nlisim.random.rg.uniform" ]
[((1350, 1394), 'attr.attrs', 'attrs', ([], {'kw_only': '(True)', 'frozen': '(True)', 'repr': '(False)'}), '(kw_only=True, frozen=True, repr=False)\n', (1355, 1394), False, 'from attr import attrib, attrs\n'), ((1604, 1623), 'attr.attrs', 'attrs', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (1609, 1623), False, 'from attr import attrib, attrs\n'), ((695, 793), 'numpy.dtype', 'np.dtype', (['(CellData.FIELDS + PhagocyteCellData.PHAGOCYTE_FIELDS + PNEUMOCYTE_FIELDS)'], {'align': '(True)'}), '(CellData.FIELDS + PhagocyteCellData.PHAGOCYTE_FIELDS +\n PNEUMOCYTE_FIELDS, align=True)\n', (703, 793), True, 'import numpy as np\n'), ((3858, 3887), 'nlisim.random.rg.shuffle', 'rg.shuffle', (['epithelial_voxels'], {}), '(epithelial_voxels)\n', (3868, 3887), False, 'from nlisim.random import rg\n'), ((1199, 1244), 'nlisim.modules.phagocyte.PhagocyteCellData.create_cell_tuple', 'PhagocyteCellData.create_cell_tuple', ([], {}), '(**kwargs)\n', (1234, 1244), False, 'from nlisim.modules.phagocyte import PhagocyteCellData, PhagocyteModel, PhagocyteModuleState, PhagocyteStatus\n'), ((1716, 1764), 'attr.Factory', 'attr.Factory', (['cell_list_factory'], {'takes_self': '(True)'}), '(cell_list_factory, takes_self=True)\n', (1728, 1764), False, 'import attr\n'), ((3411, 3488), 'math.expm1', 'math.expm1', (['(-time_step_size / 60 / (voxel_volume * pneumocyte.pr_p_int_param))'], {}), '(-time_step_size / 60 / (voxel_volume * pneumocyte.pr_p_int_param))\n', (3421, 3488), False, 'import math\n'), ((9171, 9301), 'numpy.fromiter', 'np.fromiter', (["(pneumocyte.cells[pneumocyte_cell_index]['status'] for\n pneumocyte_cell_index in live_pneumocytes)"], {'dtype': 'np.uint8'}), "((pneumocyte.cells[pneumocyte_cell_index]['status'] for\n pneumocyte_cell_index in live_pneumocytes), dtype=np.uint8)\n", (9182, 9301), True, 'import numpy as np\n'), ((9515, 9639), 'numpy.fromiter', 'np.fromiter', (["(pneumocyte.cells[pneumocyte_cell_index]['tnfa'] for pneumocyte_cell_index in\n live_pneumocytes)"], {'dtype': 'bool'}), "((pneumocyte.cells[pneumocyte_cell_index]['tnfa'] for\n pneumocyte_cell_index in live_pneumocytes), dtype=bool)\n", (9526, 9639), True, 'import numpy as np\n'), ((3801, 3847), 'numpy.where', 'np.where', (['(lung_tissue == TissueType.EPITHELIUM)'], {}), '(lung_tissue == TissueType.EPITHELIUM)\n', (3809, 3847), True, 'import numpy as np\n'), ((8623, 8635), 'nlisim.random.rg.uniform', 'rg.uniform', ([], {}), '()\n', (8633, 8635), False, 'from nlisim.random import rg\n'), ((7428, 7440), 'nlisim.random.rg.uniform', 'rg.uniform', ([], {}), '()\n', (7438, 7440), False, 'from nlisim.random import rg\n'), ((4438, 4465), 'nlisim.random.rg.uniform', 'rg.uniform', (['(-dx / 2)', '(dx / 2)'], {}), '(-dx / 2, dx / 2)\n', (4448, 4465), False, 'from nlisim.random import rg\n'), ((4497, 4524), 'nlisim.random.rg.uniform', 'rg.uniform', (['(-dy / 2)', '(dy / 2)'], {}), '(-dy / 2, dy / 2)\n', (4507, 4524), False, 'from nlisim.random import rg\n'), ((4556, 4583), 'nlisim.random.rg.uniform', 'rg.uniform', (['(-dz / 2)', '(dz / 2)'], {}), '(-dz / 2, dz / 2)\n', (4566, 4583), False, 'from nlisim.random import rg\n')]
from numpy import array, arange, zeros, unique, searchsorted, full, nan from numpy.linalg import norm # type: ignore from pyNastran.utils.numpy_utils import integer_types from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default from pyNastran.bdf.field_writer_16 import print_card_16 from pyNastran.bdf.bdf_interface.assign_type import ( integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank) from pyNastran.bdf.cards.elements.bars import BAROR from pyNastran.bdf.field_writer_8 import set_string8_blank_if_default from pyNastran.dev.bdf_vectorized.cards.elements.element import Element class CBAR(Element): """ +-------+-----+-----+-----+-----+-----+-----+-----+------+ | CBAR | EID | PID | GA | GB | X1 | X2 | X3 | OFFT | +-------+-----+-----+-----+-----+-----+-----+-----+------+ | | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B | +-------+-----+-----+-----+-----+-----+-----+-----+------+ or +-------+-----+-----+-----+-----+-----+-----+-----+------+ | CBAR | EID | PID | GA | GB | G0 | | | OFFT | +-------+-----+-----+-----+-----+-----+-----+-----+------+ | | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B | +-------+-----+-----+-----+-----+-----+-----+-----+------+ +-------+-------+-----+-------+-------+--------+-------+-------+-------+ | CBAR | 2 | 39 | 7 | 6 | 105 | | | GGG | +-------+-------+-----+-------+-------+--------+-------+-------+-------+ | | | 513 | 0.0+0 | 0.0+0 | -9. | 0.0+0 | 0.0+0 | -9. | +-------+-------+-----+-------+-------+--------+-------+-------+-------+ """ type = 'CBAR' def __init__(self, model): """ Defines the CBAR object. Parameters ---------- model : BDF the BDF object """ Element.__init__(self, model) def allocate(self, card_count): ncards = card_count[self.type] self.n = ncards if self.n: assert isinstance(ncards, int), ncards float_fmt = self.model.float_fmt #: Element ID self.element_id = zeros(ncards, 'int32') #: Property ID self.property_id = zeros(ncards, 'int32') self.node_ids = zeros((ncards, 2), 'int32') self.is_g0 = zeros(ncards, 'bool') self.g0 = full(ncards, nan, 'int32') self.x = full((ncards, 3), nan, float_fmt) self.offt = full(ncards, nan, '|U3') self.pin_flags = zeros((ncards, 2), 'int32') self.wa = zeros((ncards, 3), float_fmt) self.wb = zeros((ncards, 3), float_fmt) def add_card(self, card, comment=''): i = self.i if 0 and self.model.cbaror.n > 0: cbaror = self.model.cbaror pid_default = cbaror.property_id is_g0_default = cbaror.is_g0 x1_default = cbaror.x[0] x2_default = cbaror.x[1] x3_default = cbaror.x[2] g0_default = cbaror.g0 offt_default = cbaror.offt else: pid_default = None is_g0_default = None x1_default = 0.0 x2_default = 0.0 x3_default = 0.0 g0_default = None offt_default = 'GGG' eid = integer(card, 1, 'element_id') self.element_id[i] = eid if pid_default is not None: self.property_id[i] = integer_or_blank(card, 2, 'property_id', pid_default) else: self.property_id[i] = integer_or_blank(card, 2, 'property_id', eid) self.node_ids[i] = [integer(card, 3, 'GA'), integer(card, 4, 'GB')] #--------------------------------------------------------- # x / g0 if g0_default is not None: field5 = integer_double_or_blank(card, 5, 'g0_x1', g0_default) else: field5 = integer_double_or_blank(card, 5, 'g0_x1', x1_default) if isinstance(field5, integer_types): self.is_g0[i] = True self.g0[i] = field5 elif isinstance(field5, float): self.is_g0[i] = False x = array([field5, double_or_blank(card, 6, 'x2', x2_default), double_or_blank(card, 7, 'x3', x3_default)], dtype='float64') self.x[i, :] = x if norm(x) == 0.0: msg = 'G0 vector defining plane 1 is not defined on CBAR %s.\n' % eid msg += 'G0 = %s\n' % field5 msg += 'X = %s\n' % x msg += '%s' % card raise RuntimeError(msg) else: msg = ('field5 on CBAR (G0/X1) is the wrong type...id=%s field5=%s ' 'type=%s' % (self.eid, field5, type(field5))) raise RuntimeError(msg) #--------------------------------------------------------- # offt # bit doesn't exist on the CBAR offt = string_or_blank(card, 8, 'offt', offt_default) msg = 'invalid offt parameter of CBEAM...offt=%s' % offt assert offt[0] in ['G', 'B', 'O', 'E'], msg assert offt[1] in ['G', 'B', 'O', 'E'], msg assert offt[2] in ['G', 'B', 'O', 'E'], msg self.offt[i] = offt self.pin_flags[i, :] = [integer_or_blank(card, 9, 'pa', 0), integer_or_blank(card, 10, 'pb', 0)] self.wa[i, :] = [double_or_blank(card, 11, 'w1a', 0.0), double_or_blank(card, 12, 'w2a', 0.0), double_or_blank(card, 13, 'w3a', 0.0),] self.wb[i, :] = [double_or_blank(card, 14, 'w1b', 0.0), double_or_blank(card, 15, 'w2b', 0.0), double_or_blank(card, 16, 'w3b', 0.0),] assert len(card) <= 17, 'len(CBAR card) = %i\ncard=%s' % (len(card), card) self.i += 1 def build(self): if self.n: i = self.element_id.argsort() self.element_id = self.element_id[i] self.property_id = self.property_id[i] self.node_ids = self.node_ids[i, :] self.is_g0 = self.is_g0[i] self.g0 = self.g0[i] self.x = self.x[i, :] self.offt = self.offt[i] self.pin_flags = self.pin_flags[i, :] self.wa = self.wa[i, :] self.wb = self.wb[i, :] unique_eids = unique(self.element_id) if len(unique_eids) != len(self.element_id): raise RuntimeError('There are duplicate CBAR IDs...') self._cards = [] else: self.element_id = array([], dtype='int32') self.property_id = array([], dtype='int32') def update(self, maps): """ maps = { 'node_id' : nid_map, 'property' : pid_map, } """ if self.n: eid_map = maps['element'] nid_map = maps['node'] pid_map = maps['property'] for i, (eid, pid, nids) in enumerate(zip(self.element_id, self.property_id, self.node_ids)): self.element_id[i] = eid_map[eid] self.property_id[i] = pid_map[pid] self.node_ids[i, 0] = nid_map[nids[0]] self.node_ids[i, 1] = nid_map[nids[1]] #========================================================================= def get_mass_by_element_id(self, grid_cid0=None, total=False): """ mass = rho * A * L + nsm """ if self.n == 0: return 0.0 return [0.0] if grid_cid0 is None: grid_cid0 = self.model.grid.get_position_by_node_index() p1 = grid_cid0[self.node_ids[:, 0]] p2 = grid_cid0[self.node_ids[:, 1]] L = p2 - p1 i = self.model.properties_bar.get_index(self.property_id) A = self.model.properties_bar.get_Area[i] material_id = self.model.properties_bar.material_id[i] rho, E, J = self.model.Materials.get_rho_E_J(material_id) rho = self.model.Materials.get_rho(self.mid) E = self.model.Materials.get_E(self.mid) J = self.model.Materials.get_J(self.mid) mass = norm(L, axis=1) * A * rho + self.nsm if total: return mass.sum() else: return mass #========================================================================= def write_card(self, bdf_file, size=8, element_ids=None): if self.n: if element_ids is None: i = arange(self.n) else: i = searchsorted(self.element_id, self.element_id) for (eid, pid, n, is_g0, g0, x, offt, pin, wa, wb) in zip( self.element_id[i], self.property_id[i], self.node_ids[i], self.is_g0[i], self.g0[i], self.x[i], self.offt[i], self.pin_flags[i], self.wa[i], self.wb[i]): pa = set_blank_if_default(pin[0], 0) pb = set_blank_if_default(pin[1], 0) w1a = set_blank_if_default(wa[0], 0.0) w2a = set_blank_if_default(wa[1], 0.0) w3a = set_blank_if_default(wa[2], 0.0) w1b = set_blank_if_default(wb[0], 0.0) w2b = set_blank_if_default(wb[1], 0.0) w3b = set_blank_if_default(wb[2], 0.0) x1 = g0 if is_g0 else x[0] x2 = 0 if is_g0 else x[1] x3 = 0 if is_g0 else x[2] offt = set_string8_blank_if_default(offt, 'GGG') card = ['CBAR', eid, pid, n[0], n[1], x1, x2, x3, offt, pa, pb, w1a, w2a, w3a, w1b, w2b, w3b] if size == 8: bdf_file.write(print_card_8(card)) else: bdf_file.write(print_card_16(card)) def slice_by_index(self, i): i = self._validate_slice(i) obj = CBAR(self.model) obj.n = len(i) #obj._cards = self._cards[i] #obj._comments = obj._comments[i] #obj.comments = obj.comments[i] obj.element_id = self.element_id[i] obj.property_id = self.property_id[i] obj.node_ids = self.node_ids[i, :] obj.is_g0 = self.is_g0[i] obj.g0 = self.g0[i] obj.x = self.x[i, :] obj.offt = self.offt[i] obj.pin_flags = self.pin_flags[i] obj.wa = self.wa[i] obj.wb = self.wb[i] return obj #def get_stiffness_matrix(self, model, node_ids, index0s, fnorm=1.0): #return K, dofs, n_ijv
[ "numpy.full", "pyNastran.bdf.field_writer_8.set_blank_if_default", "pyNastran.bdf.bdf_interface.assign_type.string_or_blank", "pyNastran.bdf.bdf_interface.assign_type.integer", "pyNastran.bdf.field_writer_8.print_card_8", "numpy.unique", "numpy.zeros", "numpy.searchsorted", "pyNastran.bdf.bdf_interface.assign_type.integer_or_blank", "numpy.array", "numpy.arange", "pyNastran.bdf.field_writer_8.set_string8_blank_if_default", "numpy.linalg.norm", "pyNastran.dev.bdf_vectorized.cards.elements.element.Element.__init__", "pyNastran.bdf.bdf_interface.assign_type.integer_double_or_blank", "pyNastran.bdf.bdf_interface.assign_type.double_or_blank", "pyNastran.bdf.field_writer_16.print_card_16" ]
[((1911, 1940), 'pyNastran.dev.bdf_vectorized.cards.elements.element.Element.__init__', 'Element.__init__', (['self', 'model'], {}), '(self, model)\n', (1927, 1940), False, 'from pyNastran.dev.bdf_vectorized.cards.elements.element import Element\n'), ((3391, 3421), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(1)', '"""element_id"""'], {}), "(card, 1, 'element_id')\n", (3398, 3421), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5068, 5114), 'pyNastran.bdf.bdf_interface.assign_type.string_or_blank', 'string_or_blank', (['card', '(8)', '"""offt"""', 'offt_default'], {}), "(card, 8, 'offt', offt_default)\n", (5083, 5114), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((2212, 2234), 'numpy.zeros', 'zeros', (['ncards', '"""int32"""'], {}), "(ncards, 'int32')\n", (2217, 2234), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2293, 2315), 'numpy.zeros', 'zeros', (['ncards', '"""int32"""'], {}), "(ncards, 'int32')\n", (2298, 2315), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2344, 2371), 'numpy.zeros', 'zeros', (['(ncards, 2)', '"""int32"""'], {}), "((ncards, 2), 'int32')\n", (2349, 2371), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2397, 2418), 'numpy.zeros', 'zeros', (['ncards', '"""bool"""'], {}), "(ncards, 'bool')\n", (2402, 2418), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2441, 2467), 'numpy.full', 'full', (['ncards', 'nan', '"""int32"""'], {}), "(ncards, nan, 'int32')\n", (2445, 2467), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2489, 2522), 'numpy.full', 'full', (['(ncards, 3)', 'nan', 'float_fmt'], {}), '((ncards, 3), nan, float_fmt)\n', (2493, 2522), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2547, 2571), 'numpy.full', 'full', (['ncards', 'nan', '"""|U3"""'], {}), "(ncards, nan, '|U3')\n", (2551, 2571), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2601, 2628), 'numpy.zeros', 'zeros', (['(ncards, 2)', '"""int32"""'], {}), "((ncards, 2), 'int32')\n", (2606, 2628), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2651, 2680), 'numpy.zeros', 'zeros', (['(ncards, 3)', 'float_fmt'], {}), '((ncards, 3), float_fmt)\n', (2656, 2680), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((2703, 2732), 'numpy.zeros', 'zeros', (['(ncards, 3)', 'float_fmt'], {}), '((ncards, 3), float_fmt)\n', (2708, 2732), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((3525, 3578), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(2)', '"""property_id"""', 'pid_default'], {}), "(card, 2, 'property_id', pid_default)\n", (3541, 3578), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((3627, 3672), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(2)', '"""property_id"""', 'eid'], {}), "(card, 2, 'property_id', eid)\n", (3643, 3672), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((3701, 3723), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(3)', '"""GA"""'], {}), "(card, 3, 'GA')\n", (3708, 3723), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((3753, 3775), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(4)', '"""GB"""'], {}), "(card, 4, 'GB')\n", (3760, 3775), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((3918, 3971), 'pyNastran.bdf.bdf_interface.assign_type.integer_double_or_blank', 'integer_double_or_blank', (['card', '(5)', '"""g0_x1"""', 'g0_default'], {}), "(card, 5, 'g0_x1', g0_default)\n", (3941, 3971), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((4007, 4060), 'pyNastran.bdf.bdf_interface.assign_type.integer_double_or_blank', 'integer_double_or_blank', (['card', '(5)', '"""g0_x1"""', 'x1_default'], {}), "(card, 5, 'g0_x1', x1_default)\n", (4030, 4060), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5398, 5432), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(9)', '"""pa"""', '(0)'], {}), "(card, 9, 'pa', 0)\n", (5414, 5432), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5466, 5501), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(10)', '"""pb"""', '(0)'], {}), "(card, 10, 'pb', 0)\n", (5482, 5501), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5529, 5566), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(11)', '"""w1a"""', '(0.0)'], {}), "(card, 11, 'w1a', 0.0)\n", (5544, 5566), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5593, 5630), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(12)', '"""w2a"""', '(0.0)'], {}), "(card, 12, 'w2a', 0.0)\n", (5608, 5630), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5657, 5694), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(13)', '"""w3a"""', '(0.0)'], {}), "(card, 13, 'w3a', 0.0)\n", (5672, 5694), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5723, 5760), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(14)', '"""w1b"""', '(0.0)'], {}), "(card, 14, 'w1b', 0.0)\n", (5738, 5760), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5787, 5824), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(15)', '"""w2b"""', '(0.0)'], {}), "(card, 15, 'w2b', 0.0)\n", (5802, 5824), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((5851, 5888), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(16)', '"""w3b"""', '(0.0)'], {}), "(card, 16, 'w3b', 0.0)\n", (5866, 5888), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((6521, 6544), 'numpy.unique', 'unique', (['self.element_id'], {}), '(self.element_id)\n', (6527, 6544), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((6745, 6769), 'numpy.array', 'array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (6750, 6769), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((6801, 6825), 'numpy.array', 'array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (6806, 6825), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((8705, 8719), 'numpy.arange', 'arange', (['self.n'], {}), '(self.n)\n', (8711, 8719), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((8758, 8804), 'numpy.searchsorted', 'searchsorted', (['self.element_id', 'self.element_id'], {}), '(self.element_id, self.element_id)\n', (8770, 8804), False, 'from numpy import array, arange, zeros, unique, searchsorted, full, nan\n'), ((9118, 9149), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['pin[0]', '(0)'], {}), '(pin[0], 0)\n', (9138, 9149), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9171, 9202), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['pin[1]', '(0)'], {}), '(pin[1], 0)\n', (9191, 9202), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9226, 9258), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['wa[0]', '(0.0)'], {}), '(wa[0], 0.0)\n', (9246, 9258), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9281, 9313), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['wa[1]', '(0.0)'], {}), '(wa[1], 0.0)\n', (9301, 9313), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9336, 9368), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['wa[2]', '(0.0)'], {}), '(wa[2], 0.0)\n', (9356, 9368), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9391, 9423), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['wb[0]', '(0.0)'], {}), '(wb[0], 0.0)\n', (9411, 9423), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9446, 9478), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['wb[1]', '(0.0)'], {}), '(wb[1], 0.0)\n', (9466, 9478), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9501, 9533), 'pyNastran.bdf.field_writer_8.set_blank_if_default', 'set_blank_if_default', (['wb[2]', '(0.0)'], {}), '(wb[2], 0.0)\n', (9521, 9533), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((9685, 9726), 'pyNastran.bdf.field_writer_8.set_string8_blank_if_default', 'set_string8_blank_if_default', (['offt', '"""GGG"""'], {}), "(offt, 'GGG')\n", (9713, 9726), False, 'from pyNastran.bdf.field_writer_8 import set_string8_blank_if_default\n'), ((4474, 4481), 'numpy.linalg.norm', 'norm', (['x'], {}), '(x)\n', (4478, 4481), False, 'from numpy.linalg import norm\n'), ((8365, 8380), 'numpy.linalg.norm', 'norm', (['L'], {'axis': '(1)'}), '(L, axis=1)\n', (8369, 8380), False, 'from numpy.linalg import norm\n'), ((4301, 4343), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(6)', '"""x2"""', 'x2_default'], {}), "(card, 6, 'x2', x2_default)\n", (4316, 4343), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((4368, 4410), 'pyNastran.bdf.bdf_interface.assign_type.double_or_blank', 'double_or_blank', (['card', '(7)', '"""x3"""', 'x3_default'], {}), "(card, 7, 'x3', x3_default)\n", (4383, 4410), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank, double_or_blank, integer_double_or_blank, string_or_blank\n'), ((9926, 9944), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['card'], {}), '(card)\n', (9938, 9944), False, 'from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default\n'), ((10003, 10022), 'pyNastran.bdf.field_writer_16.print_card_16', 'print_card_16', (['card'], {}), '(card)\n', (10016, 10022), False, 'from pyNastran.bdf.field_writer_16 import print_card_16\n')]
from zen.isIterable import isIterable def sortBy(*args,**keywords): sorted=[] sel=[] if len(args)==1: if isIterable(args[0]) and len(args[0])>1: sorted=list(args[0][-1]) sel=list(args[0][0]) inputType=type(args[0][0]).__name__ else: return elif len(args)>1: sorted=list(args[-1]) sel=list(args[0]) inputType=type(args[0]).__name__ l=sorted[:] for s in l: if s not in sel: sorted.remove(s) unsorted=sel[0:0] for s in sel: if s not in sorted: unsorted=unsorted+[s] if inputType=='str' or inputType=='unicode': unsorted=''.join(unsorted) sorted=''.join(sorted) elif inputType=='dict': sorted=list(sorted) unsorted=list(unsorted) else: exec('sorted='+inputType+'(sorted)') exec('unsorted='+inputType+'(unsorted)') return(sorted+unsorted)
[ "zen.isIterable.isIterable" ]
[((115, 134), 'zen.isIterable.isIterable', 'isIterable', (['args[0]'], {}), '(args[0])\n', (125, 134), False, 'from zen.isIterable import isIterable\n')]
from django.contrib.auth import get_user_model from django.db import models User = get_user_model() ACCOUNT_TYPES = ( (1, 'Admin'), (2, 'Teacher'), (3, 'Student') ) class Account(models.Model): class Meta: verbose_name = 'Account' verbose_name_plural = 'Accounts' user = models.OneToOneField(to=User, on_delete=models.CASCADE) account_type = models.PositiveSmallIntegerField(choices=ACCOUNT_TYPES) first_name = models.CharField(max_length=50) last_name = models.CharField(max_length=50) middle_name = models.CharField(max_length=50) birth_date = models.DateField() school_name = models.CharField(max_length=100) def __str__(self): return self.first_name + self.last_name
[ "django.db.models.OneToOneField", "django.db.models.CharField", "django.contrib.auth.get_user_model", "django.db.models.PositiveSmallIntegerField", "django.db.models.DateField" ]
[((84, 100), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (98, 100), False, 'from django.contrib.auth import get_user_model\n'), ((312, 367), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'to': 'User', 'on_delete': 'models.CASCADE'}), '(to=User, on_delete=models.CASCADE)\n', (332, 367), False, 'from django.db import models\n'), ((387, 442), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': 'ACCOUNT_TYPES'}), '(choices=ACCOUNT_TYPES)\n', (419, 442), False, 'from django.db import models\n'), ((460, 491), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (476, 491), False, 'from django.db import models\n'), ((508, 539), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (524, 539), False, 'from django.db import models\n'), ((558, 589), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (574, 589), False, 'from django.db import models\n'), ((607, 625), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (623, 625), False, 'from django.db import models\n'), ((644, 676), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (660, 676), False, 'from django.db import models\n')]
#!/usr/bin/env python3 #coding: utf-8 ### 1st line allows to execute this script by typing only its name in terminal, with no need to precede it with the python command ### 2nd line declaring source code charset should be not necessary but for exemple pydoc request it __doc__ = "This is the program centerpiece,but need to be imported by other modules to be used"#information describing the purpose of this module __status__ = "Development"#should be one of 'Prototype' 'Development' 'Production' 'Deprecated' 'Release' __version__ = "3.0.0"# version number,date or about last modification made compared to the previous version __license__ = "public domain"# ref to an official existing License __date__ = "2017"#started creation date / year month day __author__ = "N-zo <EMAIL>"#the creator origin of this prog, __maintainer__ = "Nzo"#person who curently makes improvements, replacing the author __credits__ = []#passed mainteners and any other helpers __contact__ = "<EMAIL>"# current contact adress for more info about this file ### import the required modules from commonz.convert import text CONTROL_CHAR= '\x07\x0b\x09\x0a\x0d\x7f\x08\x1b\x0c\x1b' SYSTEM_CHAR="/\:*<>|.~" QUOTE_CHAR="'" DOUBLE_QUOTE_CHAR='"' SUBSTITUTE_SPACE='_' SUBSTITUTE_DOT='-' ### Misc #import random #import string # chain of char manipulation #import collections # provide alternatives specialized datatypes (dict, list, set, and tupl) ex: deque>list-like container with fast appends and pops on either end #from collections import deque # storage de queue de données #import urllib # open a URL the same way you open a local file ### asynchron #import asyncio #provides the basic infrastructure for writing asynchronous programs. #import threading # constructs higher-level threading interfaces #import queue #when information must be exchanged safely between multiple threads. #import signal #Set handlers for asynchronous events #import select # is low level module,Users are encouraged to use the selectors module instead #import selectors # built upon the select module,allows high-level I/O multiplexing #import asyncio# built upon selectors #import multiprocessing class Main(): """main software application as object""" def __init__(self): """initialization of the application""" pass def get_correct_name(self,old_name,old_ext_names,cfg,sp_char): #print(base_name,ext_names,max_ext) uncode=cfg["uncode"] ascii_only=cfg["ascii"] spaces=cfg["spaces"] case=cfg["case"] strip_list=cfg["strip"] merge_list=cfg["merge"] eraze_list=cfg["eraze"] convert_tabl=cfg["conv"] max_dots=cfg["ext"] convert_ext_tabl=cfg["conv_ext"] names_list= self.undots(old_name,old_ext_names,max_dots) #print(names_list) names_qantum=len(names_list) for name_index in range(names_qantum) : name=None new_name=names_list[name_index] if uncode : new_name=self.uncode(new_name) while not new_name==name : name=new_name new_name=self.normalize(new_name,sp_char) if ascii_only : new_name=self.ascii(new_name) if not spaces : new_name=self.unspaces(new_name) if case!=0 : new_name=self.set_case(new_name,case) if strip_list!=() : new_name=self.strip(new_name,strip_list) if merge_list!=() : new_name=self.merge(new_name,merge_list) if eraze_list!=() : new_name=self.eraze(new_name,eraze_list) if convert_tabl!={} : new_name=self.convert(new_name,convert_tabl) names_list[name_index]=new_name if convert_ext_tabl and names_qantum>1 : for ext_index in range(1,names_qantum-1) : ext_name=names_list[ext_index] new_ext_name=self.convert(ext_name,convert_ext_tabl) names_list[ext_index]=new_ext_name return names_list def undots(self,base_name,ext_names,max_ext): """allow a certain number of .ext and merge the others""" #print(base_name,ext_names,max_ext) ext_quantum=len(ext_names) if ext_quantum==0 : return [base_name] elif max_ext==-1 : return [base_name]+ext_names elif max_ext==0 : return [SUBSTITUTE_DOT.join([base_name]+ext_names)] else : if ext_quantum>max_ext : base_name=SUBSTITUTE_DOT.join([base_name]+ext_names[:0-max_ext]) ext_names=ext_names[0-max_ext:] return [base_name]+ext_names else : return [base_name]+ext_names def uncode(self,name): """decode URL special characters""" return text.decode_url(name) def ascii(self,name): """Replace special accents characters by their closest ASCII equivalents""" return text.to_ascii(name) def unspaces(self,name): """replace blank space by _""" name=name.replace(' ',SUBSTITUTE_SPACE) return name def set_case(self,name,change): """switch caracteres case (1=upper,-1=lower,0=no change)""" if change>0 : new_name=name.upper() elif change<0 : new_name=name.lower() else : new_name=name return new_name def strip(self,name,strip_list): """delete the specified characters if name beginning or ending with""" new_name=name while True : for s in strip_list : new_name=new_name.strip(s) if name==new_name : break else : name=new_name return new_name def merge(self,name,merge_list): """merge the specified characters""" new_name=name while True : for s in merge_list: new_name=new_name.replace(s+s,s) if name==new_name : break else : name=new_name return new_name def eraze(self,name,eraze_list): """remove the specified characters""" new_name=name while True : for s in eraze_list : new_name=new_name.replace(s,'') if name==new_name : break else : name=new_name return new_name def convert(self,name,convert_tabl): """convert the specific characters by other characters""" new_name=name while True : for n in convert_tabl : for o in convert_tabl[n] : new_name=new_name.replace(o,n) if name==new_name : break else : name=new_name return new_name def normalize(self,name,sp_char): """substitute special system characters""" bad_chars=CONTROL_CHAR+SYSTEM_CHAR+QUOTE_CHAR+DOUBLE_QUOTE_CHAR for c in bad_chars : name=name.replace(c,sp_char) #print(name) return name
[ "commonz.convert.text.decode_url", "commonz.convert.text.to_ascii" ]
[((4380, 4401), 'commonz.convert.text.decode_url', 'text.decode_url', (['name'], {}), '(name)\n', (4395, 4401), False, 'from commonz.convert import text\n'), ((4516, 4535), 'commonz.convert.text.to_ascii', 'text.to_ascii', (['name'], {}), '(name)\n', (4529, 4535), False, 'from commonz.convert import text\n')]
import re import math class Exemple: def __init__(self): self.inputs = [] self.outputs = [] class Dataset: def __init__(self, filename): try: file = open(filename, "r") except: raise ValueError("Cannot open dataset") else: line = file.readline() nbrs = [int(x) for x in line.split()] # Parse read line in number list self.nb_exemple = int(nbrs[0]) # Get nb exemples, inputs and outputs self.nb_input = int(nbrs[1]) self.nb_output = int(nbrs[2]) self.exemples = [] for i in range(0, self.nb_exemple): # Read each exemples inputline = file.readline() outputline = file.readline() self.loadData(inputline, outputline) file.close() # Create an new exemples with 2 readed lines # (1 for inputs and 1 for expected outputs) def loadData(self, inputline, outputline): ex = Exemple() inputs = [float(x) for x in inputline.split()] outputs = [float(x) for x in outputline.split()] ex.inputs.extend(inputs) ex.outputs.extend(outputs) self.exemples.append(ex) def computeMean(self, index): sum = 0 for i in range(0, len(self.exemples)): sum += self.exemples[i].inputs[index] return sum / len(self.exemples) def computeStandardDev(self, index, mean): sum = 0 for i in range(0, len(self.exemples)): sum += math.pow(self.exemples[i].inputs[index] - mean, 2) return math.sqrt(sum / len(self.exemples)) def normalize(self): for i in range(0, len(self.exemples[0].inputs)): mean = self.computeMean(i) s = self.computeStandardDev(i, mean) for j in range(0, len(self.exemples)): self.exemples[j].inputs[i] = (self.exemples[j].inputs[i] - mean) / s
[ "math.pow" ]
[((1546, 1596), 'math.pow', 'math.pow', (['(self.exemples[i].inputs[index] - mean)', '(2)'], {}), '(self.exemples[i].inputs[index] - mean, 2)\n', (1554, 1596), False, 'import math\n')]
""" Environments and wrappers for Sonic training. """ import gym import numpy as np import gzip import retro import os from baselines.common.atari_wrappers import WarpFrame, FrameStack # from retro_contest.local import make import logging import retro_contest import pandas as pd train_states = pd.read_csv('../data/sonic_env/sonic-train.csv') validation_states = pd.read_csv('../data/sonic_env/sonic-validation.csv') logger = logging.getLogger(__name__) def make(game, state, discrete_actions=False, bk2dir=None, max_episode_steps=4000): """Make the competition environment.""" print('game:', game, 'state:', state) use_restricted_actions = retro.ACTIONS_FILTERED if discrete_actions: use_restricted_actions = retro.ACTIONS_DISCRETE try: env = retro.make(game, state, scenario='contest', use_restricted_actions=use_restricted_actions) except Exception: env = retro.make(game, state, use_restricted_actions=use_restricted_actions) if bk2dir: env.auto_record(bk2dir) env = retro_contest.StochasticFrameSkip(env, n=4, stickprob=0.25) env = gym.wrappers.TimeLimit(env, max_episode_steps=max_episode_steps) return env def make_env(stack=True, scale_rew=True): """ Create an environment with some standard wrappers. """ start_state = train_states.sample().iloc[0] env = make(game=start_state.game, state=start_state.state, max_episode_steps=600) env = SonicDiscretizer(env) # env = AllowBacktracking(env) env = RandomGameReset(env) env = EpisodeInfo(env) if scale_rew: env = RewardScaler(env) env = WarpFrame(env) return env class SonicDiscretizer(gym.ActionWrapper): """ Wrap a gym-retro environment and make it use discrete actions for the Sonic game. """ def __init__(self, env): super(SonicDiscretizer, self).__init__(env) buttons = ["B", "A", "MODE", "START", "UP", "DOWN", "LEFT", "RIGHT", "C", "Y", "X", "Z"] actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'], ['DOWN'], ['DOWN', 'B'], ['B']] self._actions = [] for action in actions: arr = np.array([False] * 12) for button in action: arr[buttons.index(button)] = True self._actions.append(arr) self.action_space = gym.spaces.Discrete(len(self._actions)) def action(self, a): # pylint: disable=W0221 return self._actions[a].copy() class RewardScaler(gym.RewardWrapper): """ Bring rewards to a reasonable scale for PPO. This is incredibly important and effects performance drastically. """ def reward(self, reward): return reward * 0.01 class AllowBacktracking(gym.Wrapper): """ Use deltas in max(X) as the reward, rather than deltas in X. This way, agents are not discouraged too heavily from exploring backwards if there is no way to advance head-on in the level. """ def __init__(self, env): super(AllowBacktracking, self).__init__(env) self._cur_x = 0 self._max_x = 0 def reset(self, **kwargs): # pylint: disable=E0202 self._cur_x = 0 self._max_x = 0 return self.env.reset(**kwargs) def step(self, action): # pylint: disable=E0202 obs, rew, done, info = self.env.step(action) self._cur_x += rew rew = max(0, self._cur_x - self._max_x) self._max_x = max(self._max_x, self._cur_x) return obs, rew, done, info class RandomGameReset(gym.Wrapper): def __init__(self, env, state=None): """Reset game to a random level.""" super().__init__(env) self.state = state def step(self, action): return self.env.step(action) def reset(self): # Reset to a random level (but don't change the game) try: game = self.env.unwrapped.gamename except AttributeError: logger.warning('no game name') pass else: game_path = retro.get_game_path(game) # pick a random state that's in the same game game_states = train_states[train_states.game == game] # if self.state: # game_states = game_states[game_states.state.str.contains(self.state)] # Load choice = game_states.sample().iloc[0] state = choice.state + '.state' logger.info('reseting env %s to %s %s', self.unwrapped.rank, game, state) with gzip.open(os.path.join(game_path, state), 'rb') as fh: self.env.unwrapped.initial_state = fh.read() return self.env.reset() class EpisodeInfo(gym.Wrapper): """ Add information about episode end and total final reward """ def __init__(self, env): super(EpisodeInfo, self).__init__(env) self._ep_len = 0 self._ep_rew_total = 0 def reset(self, **kwargs): # pylint: disable=E0202 self._ep_len = 0 self._ep_rew_total = 0 return self.env.reset(**kwargs) def step(self, action): # pylint: disable=E0202 obs, rew, done, info = self.env.step(action) self._ep_len += 1 self._ep_rew_total += rew if done: if "episode" not in info: info = {"episode": {"l": self._ep_len, "r": self._ep_rew_total}} elif isinstance(info, dict): if "l" not in info["episode"]: info["episode"]["l"] = self._ep_len if "r" not in info["episode"]: info["episode"]["r"] = self._ep_rew_total return obs, rew, done, info
[ "pandas.read_csv", "baselines.common.atari_wrappers.WarpFrame", "retro.make", "retro.get_game_path", "numpy.array", "retro_contest.StochasticFrameSkip", "gym.wrappers.TimeLimit", "os.path.join", "logging.getLogger" ]
[((297, 345), 'pandas.read_csv', 'pd.read_csv', (['"""../data/sonic_env/sonic-train.csv"""'], {}), "('../data/sonic_env/sonic-train.csv')\n", (308, 345), True, 'import pandas as pd\n'), ((366, 419), 'pandas.read_csv', 'pd.read_csv', (['"""../data/sonic_env/sonic-validation.csv"""'], {}), "('../data/sonic_env/sonic-validation.csv')\n", (377, 419), True, 'import pandas as pd\n'), ((430, 457), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (447, 457), False, 'import logging\n'), ((1041, 1100), 'retro_contest.StochasticFrameSkip', 'retro_contest.StochasticFrameSkip', (['env'], {'n': '(4)', 'stickprob': '(0.25)'}), '(env, n=4, stickprob=0.25)\n', (1074, 1100), False, 'import retro_contest\n'), ((1111, 1175), 'gym.wrappers.TimeLimit', 'gym.wrappers.TimeLimit', (['env'], {'max_episode_steps': 'max_episode_steps'}), '(env, max_episode_steps=max_episode_steps)\n', (1133, 1175), False, 'import gym\n'), ((1625, 1639), 'baselines.common.atari_wrappers.WarpFrame', 'WarpFrame', (['env'], {}), '(env)\n', (1634, 1639), False, 'from baselines.common.atari_wrappers import WarpFrame, FrameStack\n'), ((786, 881), 'retro.make', 'retro.make', (['game', 'state'], {'scenario': '"""contest"""', 'use_restricted_actions': 'use_restricted_actions'}), "(game, state, scenario='contest', use_restricted_actions=\n use_restricted_actions)\n", (796, 881), False, 'import retro\n'), ((913, 983), 'retro.make', 'retro.make', (['game', 'state'], {'use_restricted_actions': 'use_restricted_actions'}), '(game, state, use_restricted_actions=use_restricted_actions)\n', (923, 983), False, 'import retro\n'), ((2189, 2211), 'numpy.array', 'np.array', (['([False] * 12)'], {}), '([False] * 12)\n', (2197, 2211), True, 'import numpy as np\n'), ((4057, 4082), 'retro.get_game_path', 'retro.get_game_path', (['game'], {}), '(game)\n', (4076, 4082), False, 'import retro\n'), ((4552, 4582), 'os.path.join', 'os.path.join', (['game_path', 'state'], {}), '(game_path, state)\n', (4564, 4582), False, 'import os\n')]
# This file mainly exists to allow python setup.py test to work. import os, sys from django.test.utils import get_runner from django.conf import settings os.environ["DJANGO_SETTINGS_MODULE"] = "test_project.settings" test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.insert(0, test_dir) def runtests(): try: from django import setup setup() except ImportError: pass test_runner = get_runner(settings) failures = test_runner().run_tests([]) sys.exit(failures) if __name__ == "__main__": runtests()
[ "django.setup", "os.path.realpath", "sys.path.insert", "django.test.utils.get_runner", "sys.exit" ]
[((292, 320), 'sys.path.insert', 'sys.path.insert', (['(0)', 'test_dir'], {}), '(0, test_dir)\n', (307, 320), False, 'import os, sys\n'), ((454, 474), 'django.test.utils.get_runner', 'get_runner', (['settings'], {}), '(settings)\n', (464, 474), False, 'from django.test.utils import get_runner\n'), ((522, 540), 'sys.exit', 'sys.exit', (['failures'], {}), '(failures)\n', (530, 540), False, 'import os, sys\n'), ((263, 289), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (279, 289), False, 'import os, sys\n'), ((390, 397), 'django.setup', 'setup', ([], {}), '()\n', (395, 397), False, 'from django import setup\n')]
from mstrio.utils.helper import response_handler def get_object_info(connection, id, type, error_msg=None): """Get information for a specific object in a specific project; if you do not specify a project ID, you get information for the object in all projects. You identify the object with the object ID and object type. You specify the object type as a query parameter; possible values for object type are provided in EnumDSSXMLObjectTypes. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. id (str): Object ID type (int): One of EnumDSSXMLObjectTypes. Ex. 34 (User or usergroup), 44 (Security Role), 32 (Project), 8 (Folder), 36 (type of I-Server configuration) error_msg (string, optional): Custom Error Message for Error Handling Returns: HTTP response object returned by the MicroStrategy REST server. """ headers = {} if type == "project": headers = {'X-MSTR-ProjectID': None} response = connection.session.get(url=connection.base_url + '/api/objects/' + id, headers=headers, params={'type': type}) if not response.ok: if error_msg is None: error_msg = "Error getting information for the specific object." response_handler(response, error_msg, whitelist=[('ERR001', 500)]) return response def delete_object(connection, id, type, error_msg=None): """Get information for a specific object in a specific project; if you do not specify a project ID, you get information for the object in all projects. You identify the object with the object ID and object type. You specify the object type as a query parameter; possible values for object type are provided in EnumDSSXMLObjectTypes. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. id (str): Object ID type (int): One of EnumDSSXMLObjectTypes. Ex. 34 (User or usergroup), 44 (Security Role), 32 (Project), 8 (Folder), 36 (type of I-Server configuration) error_msg (string, optional): Custom Error Message for Error Handling Returns: HTTP response object returned by the MicroStrategy REST server. """ headers = {} if type == 32: headers = {'X-MSTR-ProjectID': None} response = connection.session.delete(url=connection.base_url + '/api/objects/' + id, headers=headers, params={'type': type}) if not response.ok: if error_msg is None: error_msg = "Error deleting object {}".format(id) response_handler(response, error_msg) return response def update_object(connection, id, body, type, error_msg=None, verbose=True): """Get information for a specific object in a specific project; if you do not specify a project ID, you get information for the object in all projects. You identify the object with the object ID and object type. You specify the object type as a query parameter; possible values for object type are provided in EnumDSSXMLObjectTypes. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. id (str): Object ID body: (object): body of the response type (int): One of EnumDSSXMLObjectTypes. Ex. 34 (User or usergroup), 44 (Security Role), 32 (Project), 8 (Folder), 36 (type of I-Server configuration) error_msg (string, optional): Custom Error Message for Error Handling Returns: HTTP response object returned by the MicroStrategy REST server. """ headers = {} if type == 32: headers = {'X-MSTR-ProjectID': str(id)} response = connection.session.put(url=connection.base_url + '/api/objects/' + id, headers=headers, params={'type': type}, json=body) if not response.ok: if error_msg is None: error_msg = "Error updating object {}".format(id) response_handler(response, error_msg, verbose=verbose) return response def copy_object(connection, id, name, folder_id, type, error_msg=None): """Create a copy of a specific object. You identify the object with the object ID and object type. You obtain the authorization token needed to execute the request using POST /auth/login; you obtain the project ID using GET /projects. You pass the authorization token and the project ID in the request header. You specify the object ID in the path of the request and object type as a query parameter; possible values for object type are provided in EnumDSSXMLObjectTypes. You specify the name and location (folder ID) of the new object in the body of the request. If you do not specify a new name, a default name is generated, such as 'Old Name (1)'. If you do not specify a folder ID, the object is saved in the same folder as the source object. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. id (str): Object ID type (int): One of EnumDSSXMLObjectTypes. Ex. 34 (User or usergroup), 44 (Security Role), 32 (Project), 8 (Folder), 36 (type of I-Server configuration) error_msg (string, optional): Custom Error Message for Error Handling Returns: HTTP response object returned by the MicroStrategy REST server. """ connection._validate_project_selected() body = { "name": name, "folderId": folder_id } response = connection.session.post(url=connection.base_url + '/api/objects/' + id + '/copy', params={'type': type}, json=body) if not response.ok: if error_msg is None: error_msg = "Error creating a copy of object {}".format(id) response_handler(response, error_msg) return response def get_vldb_settings(connection, id, type, project_id=None, error_msg=None): """Get vldb settings for an object. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. id (str): Object ID type (int): DssXmlTypeReportDefinition(3) for Dataset and DssXmlTypeDocumentDefinition(55) for document/dossier project_id: project ID error_msg (string, optional): Custom Error Message for Error Handling Returns: HTTP response object returned by the MicroStrategy REST server """ headers = {} if project_id: headers = {'X-MSTR-ProjectID': project_id} else: connection._validate_project_selected() headers = {'X-MSTR-ProjectID': connection.project_id} response = connection.session.get(url=f"{connection.base_url}/api/objects/{id}/vldb/propertySets", params={'type': type}, headers=headers) if not response.ok: if error_msg is None: error_msg = "Error getting VLDB settings for object '{}'".format(id) response_handler(response, error_msg) return response def delete_vldb_settings(connection, id, type, project_id=None, error_msg=None): """Delete all customized vldb settings in one object, this operation will reset all vldb settings to default. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. id (str): Object ID type (int): DssXmlTypeReportDefinition(3) for Dataset and DssXmlTypeDocumentDefinition(55) for document/dossier project_id: project ID error_msg (string, optional): Custom Error Message for Error Handling Returns: HTTP response object returned by the MicroStrategy REST server """ headers = {} if project_id: headers = {'X-MSTR-ProjectID': project_id} else: connection._validate_project_selected() headers = {'X-MSTR-ProjectID': connection.project_id} response = connection.session.delete(url=f"{connection.base_url}/api/objects/{id}/vldb/propertySets", params={'type': type}, headers=headers) if not response.ok: if error_msg is None: error_msg = "Error resetting all custom vldb settings to default for object '{}'".format(id) response_handler(response, error_msg) return response def set_vldb_settings(connection, id, type, name, body, project_id=None, error_msg=None): """Set vldb settings for one property set in one object. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. id (str): Object ID type (int): DssXmlTypeReportDefinition(3) for Dataset and DssXmlTypeDocumentDefinition(55) for document/dossier name: property set name project_id: project ID body: [{"name": "string", "value": {}}] error_msg (string, optional): Custom Error Message for Error Handling Returns: HTTP response object returned by the MicroStrategy REST server """ headers = {} if project_id: headers = {'X-MSTR-ProjectID': project_id} else: connection._validate_project_selected() headers = {'X-MSTR-ProjectID': connection.project_id} response = connection.session.put(url=f"{connection.base_url}/api/objects/{id}/vldb/propertySets/{name}", params={'type': type}, headers=headers, json=body) if not response.ok: if error_msg is None: error_msg = "Error setting vldb settings for object '{}'".format(id) response_handler(response, error_msg) return response def create_search_objects_instance(connection, name=None, pattern=4, domain=2, root=None, object_type=None, error_msg=None): """Create a search instance. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. name: expression used with the pattern to do the search pattern: specifies the nature of the search. Possible values are defined in the EnumDSSXMLSearchTypes javadoc domain: search domain. specifies the domain/scope of the search. Possible values are defined in the EnumDSSXMLSearchDomain javadoc root: folder ID of the root in which the search is done object_type: specifies the type of objects to be searched. Possible values are defined in the EnumDSSObjectType javadoc error_msg (string, optional): Custom Error Message for Error Handling Returns: HTTP response returned by the MicroStrategy REST server """ connection._validate_project_selected() response = connection.session.post(url=f"{connection.base_url}/api/objects", headers={'X-MSTR-ProjectID': connection.project_id}, params={'name': name, 'pattern': pattern, 'domain': domain, 'root': root, 'type': object_type}) if not response.ok: if error_msg is None: error_msg = "Error getting objects." response_handler(response, error_msg) return response def get_objects(connection, search_id, offset=0, limit=-1, get_tree=False, error_msg=None): """Get list of objects from metadata. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. search_id: ID for the results of a previous search stored in I-Server memory offset: starting point within the collection of returned results. Used to control paging behavior. limit: maximum number of items returned for a single request. Used to control paging behavior get_tree: specifies that the search results should be displayed in a tree structure instead of a list. The ancestors of the searched objects are the nodes and the searched objects are the leaves of the tree. error_msg (string, optional): Custom Error Message for Error Handling Returns: HTTP response returned by the MicroStrategy REST server """ connection._validate_project_selected response = connection.session.get(url=f"{connection.base_url}/api/objects", headers={'X-MSTR-ProjectID': connection.project_id}, params={'searchId': search_id, 'offset': offset, 'limit': limit, 'getTree': get_tree}) if not response.ok: if error_msg is None: error_msg = "Error getting objects." response_handler(response, error_msg) return response def get_objects_async(future_session, connection, search_id, offset=0, limit=-1, get_tree=False, error_msg=None): """Get list of objects from metadata asynchronously. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. search_id: ID for the results of a previous search stored in I-Server memory offset: starting point within the collection of returned results. Used to control paging behavior. limit: maximum number of items returned for a single request. Used to control paging behavior. get_tree: specifies that the search results should be displayed in a tree structure instead of a list. The ancestors of the searched objects are the nodes and the searched objects are the leaves of the tree. Returns: HTTP response returned by the MicroStrategy REST server """ connection._validate_project_selected() url = connection.base_url + '/api/objects' headers = {'X-MSTR-ProjectID': connection.project_id} params = {'searchId': search_id, 'offset': offset, 'limit': limit, 'getTree': get_tree} future = future_session.get(url=url, headers=headers, params=params) return future
[ "mstrio.utils.helper.response_handler" ]
[((1403, 1469), 'mstrio.utils.helper.response_handler', 'response_handler', (['response', 'error_msg'], {'whitelist': "[('ERR001', 500)]"}), "(response, error_msg, whitelist=[('ERR001', 500)])\n", (1419, 1469), False, 'from mstrio.utils.helper import response_handler\n'), ((2829, 2866), 'mstrio.utils.helper.response_handler', 'response_handler', (['response', 'error_msg'], {}), '(response, error_msg)\n', (2845, 2866), False, 'from mstrio.utils.helper import response_handler\n'), ((4326, 4380), 'mstrio.utils.helper.response_handler', 'response_handler', (['response', 'error_msg'], {'verbose': 'verbose'}), '(response, error_msg, verbose=verbose)\n', (4342, 4380), False, 'from mstrio.utils.helper import response_handler\n'), ((6231, 6268), 'mstrio.utils.helper.response_handler', 'response_handler', (['response', 'error_msg'], {}), '(response, error_msg)\n', (6247, 6268), False, 'from mstrio.utils.helper import response_handler\n'), ((7463, 7500), 'mstrio.utils.helper.response_handler', 'response_handler', (['response', 'error_msg'], {}), '(response, error_msg)\n', (7479, 7500), False, 'from mstrio.utils.helper import response_handler\n'), ((8809, 8846), 'mstrio.utils.helper.response_handler', 'response_handler', (['response', 'error_msg'], {}), '(response, error_msg)\n', (8825, 8846), False, 'from mstrio.utils.helper import response_handler\n'), ((10226, 10263), 'mstrio.utils.helper.response_handler', 'response_handler', (['response', 'error_msg'], {}), '(response, error_msg)\n', (10242, 10263), False, 'from mstrio.utils.helper import response_handler\n'), ((11930, 11967), 'mstrio.utils.helper.response_handler', 'response_handler', (['response', 'error_msg'], {}), '(response, error_msg)\n', (11946, 11967), False, 'from mstrio.utils.helper import response_handler\n'), ((13579, 13616), 'mstrio.utils.helper.response_handler', 'response_handler', (['response', 'error_msg'], {}), '(response, error_msg)\n', (13595, 13616), False, 'from mstrio.utils.helper import response_handler\n')]
# tf2.0目标检测之csv 2 Tfrecord from __future__ import division from __future__ import print_function from __future__ import absolute_import import tensorflow as tf import numpy as np import random import cv2 from tqdm import tqdm import datetime import os import time from detection.models.detectors import faster_rcnn from bjod_data import ZiptrainDataset, Zipvaluedata os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' random.seed(234) def save_images(image, boxes, filen, label_pre, pth=''): image = image.numpy() image = image.astype(np.uint8) if image.shape[0] == 1: image = np.squeeze(image, axis=0) cv2.cvtColor(image, cv2.COLOR_RGB2BGR, image) n = boxes.shape[0] if not n: print("no instances to display ") for i in range(n): color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) if not np.any(boxes[i]): continue x1, y1, x2, y2, _ = boxes[i] y1, x1, y2, x2 = int(y1), int(x1), int(y2), int(x2) cv2.rectangle(image, (x1, y1), (x2, y2), color, 2, 8, 0) cv2.putText(image, str(label_pre[i]), (int((x1 + x2) / 2), int((y1 + y2) / 2)), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 1) filen = filen[:-4] + '.jpg' cv2.imwrite(os.path.join(pth, filen), image) def voc_ap(rec, prec, use_07_metric=False): """Compute VOC AP given precision and recall. If use_07_metric is true, uses the VOC 07 11-point method (default:False). """ if use_07_metric: # 11 point metric ap = 0. for t in np.arange(0., 1.1, 0.1): if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap = ap + p / 11. else: # correct AP calculation # first append sentinel values at the end mrec = np.concatenate(([0.], rec, [1.])) mpre = np.concatenate(([0.], prec, [0.])) # compute the precision envelope for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap def eval_modle(gt_dict, pre__dict, npos, categaries): result = {} for key in range(categaries): b1 = pre__dict[str(key + 1)] if not b1: continue image_ids = [tt[0] for tt in b1] confidence = np.array([tt[5] for tt in b1]) BB = np.array([tt[1:5] for tt in b1]) sorted_ind = np.argsort(-confidence) BB = BB[sorted_ind, :] image_ids = [image_ids[x] for x in sorted_ind] nd = len(image_ids) tp = np.zeros(nd) fp = np.zeros(nd) for d in range(nd): R = gt_dict[str(key + 1)][str(image_ids[d])] # ann bb = BB[d, :].astype(float) ovmax = -np.inf # 负数最大值 BBGT = R['bbox'].astype(float) if BBGT.size > 0: # compute overlaps # intersection ixmin = np.maximum(BBGT[:, 0], bb[0]) iymin = np.maximum(BBGT[:, 1], bb[1]) ixmax = np.minimum(BBGT[:, 2], bb[2]) iymax = np.minimum(BBGT[:, 3], bb[3]) iw = np.maximum(ixmax - ixmin + 1., 0.) ih = np.maximum(iymax - iymin + 1., 0.) inters = iw * ih # union uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) + (BBGT[:, 2] - BBGT[:, 0] + 1.) * (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters) overlaps = inters / uni ovmax = np.max(overlaps) # 最大重叠 jmax = np.argmax(overlaps) # 最大重合率对应的gt # 计算tp 和 fp个数 if ovmax > 0.5: if not R['det'][jmax]: tp[d] = 1. R['det'][jmax] = 1 # 标记为已检测 else: fp[d] = 1. else: fp[d] = 1. # compute precision recall fp = np.cumsum(fp) # np.cumsum() 按位累加 tp = np.cumsum(tp) rec = tp / np.maximum(float(npos[str(key + 1)]), np.finfo(np.float64).eps) prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) ap = voc_ap(rec, prec) print('key+1: ', str(key + 1)) print(ap) result[str(key + 1)] = ap return result class DistTrainer: def __init__(self, dis_strategy, ori_model, categaries, nu_devices, maxap=0.0, epoch=[0, 200], trian_dir=''): self.dist_strategy = dis_strategy self.model = ori_model self.num_devices = nu_devices self.trian_dir = trian_dir self.epochs = epoch self.maxap = maxap self.total_categaries = categaries self.optimizer = tf.keras.optimizers.SGD(1e-4, momentum=0.9, nesterov=True) # @tf.function def train_step(self, batch_imgs, batch_metas, batch_bboxes, batch_labels): with tf.GradientTape() as tape: rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss = \ self.model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True) loss_value = rpn_class_loss + rpn_bbox_loss + rcnn_class_loss + rcnn_bbox_loss grads = tape.gradient(loss_value, self.model.trainable_variables) self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables)) return loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss def dist_train_step(self, batch_imgs, batch_metas, batch_bboxes, batch_labels): per_loss_value, per_rpn_class_loss, per_rpn_bbox_loss, per_rcnn_class_loss, per_rcnn_bbox_loss = self.dist_strategy.run( self.train_step, args=(batch_imgs, batch_metas, batch_bboxes, batch_labels)) loss_value = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_loss_value, axis=None) rpn_class_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rpn_class_loss, axis=None) rpn_bbox_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rpn_bbox_loss, axis=None) rcnn_class_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rcnn_class_loss, axis=None) rcnn_bbox_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rcnn_bbox_loss, axis=None) return loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss # @tf.function def test_step(self, batch_imgs, batch_metas, batch_bboxes, batch_labels): rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss = \ self.model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True) loss_value = rpn_class_loss + rpn_bbox_loss + rcnn_class_loss + rcnn_bbox_loss return loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss def dist_test_step(self, batch_imgs, batch_metas, batch_bboxes, batch_labels): per_loss_value, per_rpn_class_loss, per_rpn_bbox_loss, per_rcnn_class_loss, per_rcnn_bbox_loss = self.dist_strategy.run( self.test_step, args=(batch_imgs, batch_metas, batch_bboxes, batch_labels)) loss_value = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_loss_value, axis=None) rpn_class_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rpn_class_loss, axis=None) rpn_bbox_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rpn_bbox_loss, axis=None) rcnn_class_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rcnn_class_loss, axis=None) rcnn_bbox_loss = self.dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_rcnn_bbox_loss, axis=None) return loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss def eval_step(self, val_dts): gt_bbox_dict = {str(key + 1): {} for key in range(self.total_categaries)} pre_bbox_dict = {str(key + 1): [] for key in range(self.total_categaries)} # number of true positive npos = {str(key + 1): 0 for key in range(self.total_categaries)} img_ids = 10000 for val_imgs, val_metas, val_bboxes, val_labels, val_file in tqdm(val_dts): # if random.randint(1, 100) > 11: # continue val_labels = tf.squeeze(val_labels, axis=0).numpy() val_bboxes = tf.squeeze(val_bboxes, 0).numpy().astype(np.int) val_imgs = tf.squeeze(tf.cast(val_imgs, tf.float32), axis=0) val_metas = tf.squeeze(tf.cast(val_metas, tf.float32), axis=0) val_predict_bboxes = [] for key in range(self.total_categaries): tmp_box = [val_bboxes[indcc] for indcc, cc in enumerate(val_labels) if cc == key + 1] det = [False] * len(tmp_box) gt_bbox_dict[str(key + 1)][str(img_ids)] = {'bbox': np.array(tmp_box), 'det': det} npos[str(key + 1)] += len(tmp_box) proposals = self.model.simple_test_rpn(val_imgs, val_metas) res = self.model.simple_test_bboxes(val_imgs, val_metas, proposals) for pos in range(res['class_ids'].shape[0]): label_id = int(res['class_ids'][pos]) y1, x1, y2, x2 = [int(num) for num in list(res['rois'][pos])] tmp_list2 = [img_ids, x1, y1, x2, y2, float(res['scores'][pos])] val_predict_bboxes.append([x1, y1, x2, y2, float(res['scores'][pos])]) pre_bbox_dict[str(label_id)].append(tmp_list2) img_ids += 1 return gt_bbox_dict, pre_bbox_dict, npos def rd_save_images(self, val_dts, img_save_path): for val_imgs, val_metas, _, _, val_file in tqdm(val_dts): if random.randint(1, 100) > 10: continue val_file = val_file.numpy()[0].decode('utf-8') val_imgs = tf.squeeze(tf.cast(val_imgs, tf.float32), axis=0) val_metas = tf.squeeze(tf.cast(val_metas, tf.float32), axis=0) val_predict_bboxes = [] proposals = self.model.simple_test_rpn(val_imgs, val_metas) res = self.model.simple_test_bboxes(val_imgs, val_metas, proposals) for pos in range(res['class_ids'].shape[0]): y1, x1, y2, x2 = [int(num) for num in list(res['rois'][pos])] val_predict_bboxes.append([x1, y1, x2, y2, float(res['scores'][pos])]) save_images(val_imgs, np.array(val_predict_bboxes), val_file, res['class_ids'], img_save_path) def train(self, train_ds, val_ds): # train model train_dts = self.dist_strategy.experimental_distribute_dataset(train_ds) val_dts = self.dist_strategy.experimental_distribute_dataset(val_ds) log_dir = self.trian_dir + 'log_dir/' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") file_writer = tf.summary.create_file_writer(log_dir) index_step = 0 time_start = time.time() input_flag = False for epoch in range(self.epochs[0], self.epochs[1]): loss_history = np.zeros(5) for (step, inputs) in enumerate(train_dts): batch_imgs, batch_metas, batch_bboxes, batch_labels, filen = inputs labels_tmp = tf.cast(tf.fill([1,1000], -1), tf.int32) if self.num_devices > 1: for per_tensor in batch_labels.values: if tf.equal(per_tensor, labels_tmp).numpy().all(): input_flag = True print("skip this batch") break else: pass if input_flag: input_flag = False continue else: if tf.equal(batch_labels, labels_tmp).numpy().all(): continue loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss \ = self.dist_train_step(batch_imgs, batch_metas, batch_bboxes, batch_labels) loss_history[0] += loss_value loss_history[1] += rpn_class_loss loss_history[2] += rpn_bbox_loss loss_history[3] += rcnn_class_loss loss_history[4] += rcnn_bbox_loss if step % 10 == 0: if step: loss_history = loss_history / 10 print('time:', round(time.time() - time_start, 2), 'epoch:', epoch, ', step:', step, ', loss:', loss_history) time_start = time.time() with file_writer.as_default(): tf.summary.scalar('total_loss', loss_history[0], step=index_step) tf.summary.scalar('rpn_class_loss', loss_history[1], step=index_step) tf.summary.scalar('rpn_bbox_loss', loss_history[2], step=index_step) tf.summary.scalar('rcnn_class_loss', loss_history[3], step=index_step) tf.summary.scalar('rcnn_bbox_loss', loss_history[4], step=index_step) file_writer.flush() index_step += 1 loss_history = np.zeros(5) else: print('epoch:', epoch, ', step:', step, ', loss:', loss_history) if step % 2000 == 0: weights_dir = self.trian_dir + 'weights/epoch_' + str(epoch) + '_loss_' sum_loss = 0 for (val_step, inputs_val) in tqdm(enumerate(val_dts)): batch_imgs, batch_metas, batch_bboxes, batch_labels, filen = inputs_val labels_tmp = tf.cast(tf.fill([1, 1000], -1), tf.int32) if self.num_devices > 1: for per_tensor in batch_labels.values: if tf.equal(per_tensor, labels_tmp).numpy().all(): input_flag = True print("skip this batch") break else: pass if input_flag: input_flag = False continue else: if tf.equal(batch_labels, labels_tmp).numpy().all(): continue loss_value, rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss \ = self.dist_test_step(batch_imgs, batch_metas, batch_bboxes, batch_labels) sum_loss += loss_value print('sum_loss: ', sum_loss) if sum_loss > self.maxap: self.maxap = sum_loss self.model.save_weights(weights_dir + str(tf.round(sum_loss, 2).numpy()) + '.h5') if __name__ == '__main__': PER_GPU_BATCHSIZE = 1 dist_strategy = tf.distribute.MirroredStrategy( cross_device_ops=tf.distribute.HierarchicalCopyAllReduce() ) num_devices = dist_strategy.num_replicas_in_sync print('Number of devices: {}'.format(num_devices)) GLOBAL_BATCHSIZE = num_devices * PER_GPU_BATCHSIZE with dist_strategy.scope(): if os.name == 'nt': tf_record_path = 'D:/datasets/bjod/' train_dir = './train_dir/' else: tf_record_path = '../../../../../datasets/bjod/' train_dir = './train_dir/' crop_size = [992, 992, 3] train_datasets = ZiptrainDataset(tf_record_path, 1, 96, crop_size=crop_size, roi_path='D:/datasets/bjod/roi_test/').prepare(True) val_train = Zipvaluedata(tf_record_path, crop_size=crop_size).prepare('train_api_97.record') val_test = Zipvaluedata(tf_record_path, crop_size=crop_size).prepare('val_api_19.record') one_imgs, one_metas, one_bboxes, one_labels, _ = next(iter(val_train)) one_imgs = tf.expand_dims(tf.cast(one_imgs[0], tf.float32), axis=0) one_metas = tf.expand_dims(tf.cast(one_metas[0], tf.float32), axis=0) model = faster_rcnn.FasterRCNN(num_classes=2) _ = model((one_imgs, one_metas), training=False) model_ori = faster_rcnn.FasterRCNN(num_classes=81) _ = model_ori((one_imgs, one_metas), training=False) model_ori.load_weights('./weights/faster_rcnn_resnet101_fpn_coco2017_map35.h5', by_name=True) model.backbone.set_weights(model_ori.backbone.get_weights()) model.neck.set_weights(model_ori.neck.get_weights()) model.rpn_head.set_weights(model_ori.rpn_head.get_weights()) model.roi_align.set_weights(model_ori.roi_align.get_weights()) # print(cc) model.summary() def __init__(self, dis_strategy, ori_model, categaries, nu_devices, maxap=0.0, epoch=[0, 200], trian_dir=''): self.dist_strategy = dis_strategy self.model = ori_model self.num_devices = nu_devices self.trian_dir = trian_dir self.epochs = epoch self.maxap = maxap self.total_categaries = categaries self.optimizer = tf.keras.optimizers.SGD(1e-4, momentum=0.9, nesterov=True) trainer = DistTrainer(dis_strategy=dist_strategy, ori_model=model, categaries=2, nu_devices=1, maxap=0.0, epoch=[0, 200], trian_dir=train_dir ) trainer.train(train_datasets, val_test)
[ "numpy.sum", "numpy.maximum", "numpy.argmax", "tensorflow.keras.optimizers.SGD", "bjod_data.ZiptrainDataset", "numpy.argsort", "bjod_data.Zipvaluedata", "numpy.arange", "cv2.rectangle", "os.path.join", "random.randint", "cv2.cvtColor", "numpy.cumsum", "tensorflow.cast", "numpy.max", "random.seed", "numpy.finfo", "tensorflow.squeeze", "tensorflow.distribute.HierarchicalCopyAllReduce", "datetime.datetime.now", "tensorflow.equal", "tqdm.tqdm", "numpy.minimum", "tensorflow.summary.scalar", "tensorflow.round", "numpy.squeeze", "numpy.concatenate", "detection.models.detectors.faster_rcnn.FasterRCNN", "numpy.zeros", "time.time", "numpy.any", "tensorflow.fill", "numpy.where", "numpy.array", "tensorflow.summary.create_file_writer", "tensorflow.GradientTape" ]
[((413, 429), 'random.seed', 'random.seed', (['(234)'], {}), '(234)\n', (424, 429), False, 'import random\n'), ((624, 669), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR', 'image'], {}), '(image, cv2.COLOR_RGB2BGR, image)\n', (636, 669), False, 'import cv2\n'), ((594, 619), 'numpy.squeeze', 'np.squeeze', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (604, 619), True, 'import numpy as np\n'), ((1020, 1076), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x1, y1)', '(x2, y2)', 'color', '(2)', '(8)', '(0)'], {}), '(image, (x1, y1), (x2, y2), color, 2, 8, 0)\n', (1033, 1076), False, 'import cv2\n'), ((1272, 1296), 'os.path.join', 'os.path.join', (['pth', 'filen'], {}), '(pth, filen)\n', (1284, 1296), False, 'import os\n'), ((1569, 1593), 'numpy.arange', 'np.arange', (['(0.0)', '(1.1)', '(0.1)'], {}), '(0.0, 1.1, 0.1)\n', (1578, 1593), True, 'import numpy as np\n'), ((1853, 1888), 'numpy.concatenate', 'np.concatenate', (['([0.0], rec, [1.0])'], {}), '(([0.0], rec, [1.0]))\n', (1867, 1888), True, 'import numpy as np\n'), ((1902, 1938), 'numpy.concatenate', 'np.concatenate', (['([0.0], prec, [0.0])'], {}), '(([0.0], prec, [0.0]))\n', (1916, 1938), True, 'import numpy as np\n'), ((2293, 2338), 'numpy.sum', 'np.sum', (['((mrec[i + 1] - mrec[i]) * mpre[i + 1])'], {}), '((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n', (2299, 2338), True, 'import numpy as np\n'), ((2598, 2628), 'numpy.array', 'np.array', (['[tt[5] for tt in b1]'], {}), '([tt[5] for tt in b1])\n', (2606, 2628), True, 'import numpy as np\n'), ((2642, 2674), 'numpy.array', 'np.array', (['[tt[1:5] for tt in b1]'], {}), '([tt[1:5] for tt in b1])\n', (2650, 2674), True, 'import numpy as np\n'), ((2696, 2719), 'numpy.argsort', 'np.argsort', (['(-confidence)'], {}), '(-confidence)\n', (2706, 2719), True, 'import numpy as np\n'), ((2848, 2860), 'numpy.zeros', 'np.zeros', (['nd'], {}), '(nd)\n', (2856, 2860), True, 'import numpy as np\n'), ((2874, 2886), 'numpy.zeros', 'np.zeros', (['nd'], {}), '(nd)\n', (2882, 2886), True, 'import numpy as np\n'), ((4236, 4249), 'numpy.cumsum', 'np.cumsum', (['fp'], {}), '(fp)\n', (4245, 4249), True, 'import numpy as np\n'), ((4283, 4296), 'numpy.cumsum', 'np.cumsum', (['tp'], {}), '(tp)\n', (4292, 4296), True, 'import numpy as np\n'), ((4990, 5050), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', (['(0.0001)'], {'momentum': '(0.9)', 'nesterov': '(True)'}), '(0.0001, momentum=0.9, nesterov=True)\n', (5013, 5050), True, 'import tensorflow as tf\n'), ((8437, 8450), 'tqdm.tqdm', 'tqdm', (['val_dts'], {}), '(val_dts)\n', (8441, 8450), False, 'from tqdm import tqdm\n'), ((9951, 9964), 'tqdm.tqdm', 'tqdm', (['val_dts'], {}), '(val_dts)\n', (9955, 9964), False, 'from tqdm import tqdm\n'), ((11099, 11137), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['log_dir'], {}), '(log_dir)\n', (11128, 11137), True, 'import tensorflow as tf\n'), ((11182, 11193), 'time.time', 'time.time', ([], {}), '()\n', (11191, 11193), False, 'import time\n'), ((16604, 16641), 'detection.models.detectors.faster_rcnn.FasterRCNN', 'faster_rcnn.FasterRCNN', ([], {'num_classes': '(2)'}), '(num_classes=2)\n', (16626, 16641), False, 'from detection.models.detectors import faster_rcnn\n'), ((16720, 16758), 'detection.models.detectors.faster_rcnn.FasterRCNN', 'faster_rcnn.FasterRCNN', ([], {'num_classes': '(81)'}), '(num_classes=81)\n', (16742, 16758), False, 'from detection.models.detectors import faster_rcnn\n'), ((789, 811), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (803, 811), False, 'import random\n'), ((813, 835), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (827, 835), False, 'import random\n'), ((837, 859), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (851, 859), False, 'import random\n'), ((876, 892), 'numpy.any', 'np.any', (['boxes[i]'], {}), '(boxes[i])\n', (882, 892), True, 'import numpy as np\n'), ((2051, 2083), 'numpy.maximum', 'np.maximum', (['mpre[i - 1]', 'mpre[i]'], {}), '(mpre[i - 1], mpre[i])\n', (2061, 2083), True, 'import numpy as np\n'), ((2203, 2234), 'numpy.where', 'np.where', (['(mrec[1:] != mrec[:-1])'], {}), '(mrec[1:] != mrec[:-1])\n', (2211, 2234), True, 'import numpy as np\n'), ((5161, 5178), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5176, 5178), True, 'import tensorflow as tf\n'), ((11308, 11319), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (11316, 11319), True, 'import numpy as np\n'), ((15467, 15508), 'tensorflow.distribute.HierarchicalCopyAllReduce', 'tf.distribute.HierarchicalCopyAllReduce', ([], {}), '()\n', (15506, 15508), True, 'import tensorflow as tf\n'), ((16468, 16500), 'tensorflow.cast', 'tf.cast', (['one_imgs[0]', 'tf.float32'], {}), '(one_imgs[0], tf.float32)\n', (16475, 16500), True, 'import tensorflow as tf\n'), ((16545, 16578), 'tensorflow.cast', 'tf.cast', (['one_metas[0]', 'tf.float32'], {}), '(one_metas[0], tf.float32)\n', (16552, 16578), True, 'import tensorflow as tf\n'), ((17688, 17748), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', (['(0.0001)'], {'momentum': '(0.9)', 'nesterov': '(True)'}), '(0.0001, momentum=0.9, nesterov=True)\n', (17711, 17748), True, 'import tensorflow as tf\n'), ((1609, 1625), 'numpy.sum', 'np.sum', (['(rec >= t)'], {}), '(rec >= t)\n', (1615, 1625), True, 'import numpy as np\n'), ((1692, 1714), 'numpy.max', 'np.max', (['prec[rec >= t]'], {}), '(prec[rec >= t])\n', (1698, 1714), True, 'import numpy as np\n'), ((3220, 3249), 'numpy.maximum', 'np.maximum', (['BBGT[:, 0]', 'bb[0]'], {}), '(BBGT[:, 0], bb[0])\n', (3230, 3249), True, 'import numpy as np\n'), ((3274, 3303), 'numpy.maximum', 'np.maximum', (['BBGT[:, 1]', 'bb[1]'], {}), '(BBGT[:, 1], bb[1])\n', (3284, 3303), True, 'import numpy as np\n'), ((3328, 3357), 'numpy.minimum', 'np.minimum', (['BBGT[:, 2]', 'bb[2]'], {}), '(BBGT[:, 2], bb[2])\n', (3338, 3357), True, 'import numpy as np\n'), ((3382, 3411), 'numpy.minimum', 'np.minimum', (['BBGT[:, 3]', 'bb[3]'], {}), '(BBGT[:, 3], bb[3])\n', (3392, 3411), True, 'import numpy as np\n'), ((3433, 3469), 'numpy.maximum', 'np.maximum', (['(ixmax - ixmin + 1.0)', '(0.0)'], {}), '(ixmax - ixmin + 1.0, 0.0)\n', (3443, 3469), True, 'import numpy as np\n'), ((3489, 3525), 'numpy.maximum', 'np.maximum', (['(iymax - iymin + 1.0)', '(0.0)'], {}), '(iymax - iymin + 1.0, 0.0)\n', (3499, 3525), True, 'import numpy as np\n'), ((3835, 3851), 'numpy.max', 'np.max', (['overlaps'], {}), '(overlaps)\n', (3841, 3851), True, 'import numpy as np\n'), ((3883, 3902), 'numpy.argmax', 'np.argmax', (['overlaps'], {}), '(overlaps)\n', (3892, 3902), True, 'import numpy as np\n'), ((8697, 8726), 'tensorflow.cast', 'tf.cast', (['val_imgs', 'tf.float32'], {}), '(val_imgs, tf.float32)\n', (8704, 8726), True, 'import tensorflow as tf\n'), ((8771, 8801), 'tensorflow.cast', 'tf.cast', (['val_metas', 'tf.float32'], {}), '(val_metas, tf.float32)\n', (8778, 8801), True, 'import tensorflow as tf\n'), ((9981, 10003), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (9995, 10003), False, 'import random\n'), ((10128, 10157), 'tensorflow.cast', 'tf.cast', (['val_imgs', 'tf.float32'], {}), '(val_imgs, tf.float32)\n', (10135, 10157), True, 'import tensorflow as tf\n'), ((10202, 10232), 'tensorflow.cast', 'tf.cast', (['val_metas', 'tf.float32'], {}), '(val_metas, tf.float32)\n', (10209, 10232), True, 'import tensorflow as tf\n'), ((10686, 10714), 'numpy.array', 'np.array', (['val_predict_bboxes'], {}), '(val_predict_bboxes)\n', (10694, 10714), True, 'import numpy as np\n'), ((16000, 16103), 'bjod_data.ZiptrainDataset', 'ZiptrainDataset', (['tf_record_path', '(1)', '(96)'], {'crop_size': 'crop_size', 'roi_path': '"""D:/datasets/bjod/roi_test/"""'}), "(tf_record_path, 1, 96, crop_size=crop_size, roi_path=\n 'D:/datasets/bjod/roi_test/')\n", (16015, 16103), False, 'from bjod_data import ZiptrainDataset, Zipvaluedata\n'), ((16175, 16224), 'bjod_data.Zipvaluedata', 'Zipvaluedata', (['tf_record_path'], {'crop_size': 'crop_size'}), '(tf_record_path, crop_size=crop_size)\n', (16187, 16224), False, 'from bjod_data import ZiptrainDataset, Zipvaluedata\n'), ((16275, 16324), 'bjod_data.Zipvaluedata', 'Zipvaluedata', (['tf_record_path'], {'crop_size': 'crop_size'}), '(tf_record_path, crop_size=crop_size)\n', (16287, 16324), False, 'from bjod_data import ZiptrainDataset, Zipvaluedata\n'), ((4354, 4374), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (4362, 4374), True, 'import numpy as np\n'), ((4420, 4440), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (4428, 4440), True, 'import numpy as np\n'), ((8550, 8580), 'tensorflow.squeeze', 'tf.squeeze', (['val_labels'], {'axis': '(0)'}), '(val_labels, axis=0)\n', (8560, 8580), True, 'import tensorflow as tf\n'), ((9116, 9133), 'numpy.array', 'np.array', (['tmp_box'], {}), '(tmp_box)\n', (9124, 9133), True, 'import numpy as np\n'), ((11027, 11050), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11048, 11050), False, 'import datetime\n'), ((11497, 11519), 'tensorflow.fill', 'tf.fill', (['[1, 1000]', '(-1)'], {}), '([1, 1000], -1)\n', (11504, 11519), True, 'import tensorflow as tf\n'), ((12898, 12909), 'time.time', 'time.time', ([], {}), '()\n', (12907, 12909), False, 'import time\n'), ((13574, 13585), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (13582, 13585), True, 'import numpy as np\n'), ((8614, 8639), 'tensorflow.squeeze', 'tf.squeeze', (['val_bboxes', '(0)'], {}), '(val_bboxes, 0)\n', (8624, 8639), True, 'import tensorflow as tf\n'), ((12993, 13058), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_loss"""', 'loss_history[0]'], {'step': 'index_step'}), "('total_loss', loss_history[0], step=index_step)\n", (13010, 13058), True, 'import tensorflow as tf\n'), ((13087, 13156), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rpn_class_loss"""', 'loss_history[1]'], {'step': 'index_step'}), "('rpn_class_loss', loss_history[1], step=index_step)\n", (13104, 13156), True, 'import tensorflow as tf\n'), ((13185, 13253), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rpn_bbox_loss"""', 'loss_history[2]'], {'step': 'index_step'}), "('rpn_bbox_loss', loss_history[2], step=index_step)\n", (13202, 13253), True, 'import tensorflow as tf\n'), ((13282, 13352), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rcnn_class_loss"""', 'loss_history[3]'], {'step': 'index_step'}), "('rcnn_class_loss', loss_history[3], step=index_step)\n", (13299, 13352), True, 'import tensorflow as tf\n'), ((13381, 13450), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rcnn_bbox_loss"""', 'loss_history[4]'], {'step': 'index_step'}), "('rcnn_bbox_loss', loss_history[4], step=index_step)\n", (13398, 13450), True, 'import tensorflow as tf\n'), ((14080, 14102), 'tensorflow.fill', 'tf.fill', (['[1, 1000]', '(-1)'], {}), '([1, 1000], -1)\n', (14087, 14102), True, 'import tensorflow as tf\n'), ((12057, 12091), 'tensorflow.equal', 'tf.equal', (['batch_labels', 'labels_tmp'], {}), '(batch_labels, labels_tmp)\n', (12065, 12091), True, 'import tensorflow as tf\n'), ((12742, 12753), 'time.time', 'time.time', ([], {}), '()\n', (12751, 12753), False, 'import time\n'), ((11657, 11689), 'tensorflow.equal', 'tf.equal', (['per_tensor', 'labels_tmp'], {}), '(per_tensor, labels_tmp)\n', (11665, 11689), True, 'import tensorflow as tf\n'), ((14745, 14779), 'tensorflow.equal', 'tf.equal', (['batch_labels', 'labels_tmp'], {}), '(batch_labels, labels_tmp)\n', (14753, 14779), True, 'import tensorflow as tf\n'), ((14265, 14297), 'tensorflow.equal', 'tf.equal', (['per_tensor', 'labels_tmp'], {}), '(per_tensor, labels_tmp)\n', (14273, 14297), True, 'import tensorflow as tf\n'), ((15295, 15316), 'tensorflow.round', 'tf.round', (['sum_loss', '(2)'], {}), '(sum_loss, 2)\n', (15303, 15316), True, 'import tensorflow as tf\n')]
import os import argparse import json import sys import re import requests from json import JSONEncoder from bs4 import BeautifulSoup class Song: def __init__(self, title: str, artist: str, album: str, release: str, lyrics: str, url: str): self.title = title self.artist = artist self.album = album self.release = release self.lyrics = lyrics self.url = url class SongEncoder(JSONEncoder): def default(self, o): return o.__dict__ class SearchResult: def __init__(self, result): result = result['result'] self.link = result['url'].encode('ascii', 'ignore').decode("utf-8") self.title = result['title'].encode('ascii', 'ignore').decode("utf-8") self.artist = result['primary_artist']['name'].encode('ascii', 'ignore').decode("utf-8") def __str__(self): return f'{self.title} by {self.artist}' def eprint(*args, **kwargs): """ Print the given message to stderr """ print(*args, file=sys.stderr, **kwargs) def search(term: str) -> str: """ Search for a term """ original_term = term term = re.sub('[^a-zA-Z0-9 ]+', '', term).strip() term = re.sub(' ', '+', term) search_page = requests.get(f'https://genius.com/api/search/song?page=1&q={term}') if search_page.status_code != 200: eprint(f'Status code {search_page.status_code} for search term "{original_term}" indicates failure') return None parsed_page = json.loads(search_page.text) search_results = parsed_page['response']['sections'][0]['hits'] results = [SearchResult(result) for result in search_results] if len(results) == 0: eprint(f'No songs found for query {original_term}') sys.exit(1) if len(results) is 1: print(f'Only result found is {results[0]}') return results[0].link for num in range(1, min(16, len(results)+1)): print(f'{num}. {results[num-1]}') result = results[int(input('Select a number: '))-1] return result.link def download_url(url: str): """ Retrieve the page contents and parse out the lyrics from a given url """ if not url.startswith('https://genius.com/'): eprint(f'URL "{url}" does not appear to be a valid genius lyrics url') return None result = requests.get(url) if result.status_code != 200: eprint(f'Status code {result.status_code} for url "{url}" indicates failure') return None parsed_page = BeautifulSoup(result.text.replace(u"\u2018", "'").replace(u"\u2019", "'"), 'html.parser') song_lyrics = parsed_page.find_all('div', attrs={'class': 'lyrics'})[0].text.strip() song_data = json.loads([line for line in result.text.split('\n') if 'TRACKING_DATA' in line][0].split('=')[1].strip(' ;')) song_artist = song_data['Primary Artist'].encode('ascii', 'ignore').decode("utf-8") song_title = song_data['Title'].encode('ascii', 'ignore').decode("utf-8") song_album = (song_data['Primary Album'] if song_data['Primary Album'] is not None else 'Unknown Album').encode('ascii', 'ignore').decode("utf-8") song_release = song_data['Release Date'].encode('ascii', 'ignore').decode("utf-8") song = Song(title=song_title, artist=song_artist, album=song_album, lyrics=song_lyrics, url=url, release=song_release) return song def save_to_file(song: Song): filename = './lyrics/genius_' for c in song.title.lower(): if c.isalpha() or c.isdigit(): filename = filename + c if c is ' ': filename = filename + '-' filename = filename + '_' for c in song.artist.lower(): if c.isalpha() or c.isdigit(): filename = filename + c if c is ' ': filename = filename + '-' filename = filename + '.json' if not os.path.isdir('./lyrics'): os.mkdir('./lyrics') f = open(filename, 'w') json.dump(song, f, indent=4, cls=SongEncoder) f.close() print('Lyrics saved to ' + filename) def main(): parser = argparse.ArgumentParser(description='Scraper for lyrics from genius.com') parser.add_argument('term', metavar='TERM', help='Term to search for', nargs='+') parser.add_argument('--no-save', help='Whether or not to save the data to a file', action='store_false') args = parser.parse_args() if args.term is not None: term = ' '.join(args.term) if term.startswith('https://genius.com/'): song = download_url(term) else: song = download_url(search(term)) if args.no_save: save_to_file(song) else: print('Title: ' + song.title) print('Artist: ' + song.artist) print('Album: ' + song.album + '\n') print(song.lyrics) else: eprint('No URL given, doing nothing') if __name__ == '__main__': main()
[ "json.dump", "os.mkdir", "argparse.ArgumentParser", "json.loads", "os.path.isdir", "requests.get", "re.sub", "sys.exit" ]
[((1200, 1222), 're.sub', 're.sub', (['""" """', '"""+"""', 'term'], {}), "(' ', '+', term)\n", (1206, 1222), False, 'import re\n'), ((1241, 1308), 'requests.get', 'requests.get', (['f"""https://genius.com/api/search/song?page=1&q={term}"""'], {}), "(f'https://genius.com/api/search/song?page=1&q={term}')\n", (1253, 1308), False, 'import requests\n'), ((1495, 1523), 'json.loads', 'json.loads', (['search_page.text'], {}), '(search_page.text)\n', (1505, 1523), False, 'import json\n'), ((2325, 2342), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2337, 2342), False, 'import requests\n'), ((3914, 3959), 'json.dump', 'json.dump', (['song', 'f'], {'indent': '(4)', 'cls': 'SongEncoder'}), '(song, f, indent=4, cls=SongEncoder)\n', (3923, 3959), False, 'import json\n'), ((4042, 4115), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Scraper for lyrics from genius.com"""'}), "(description='Scraper for lyrics from genius.com')\n", (4065, 4115), False, 'import argparse\n'), ((1752, 1763), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1760, 1763), False, 'import sys\n'), ((3826, 3851), 'os.path.isdir', 'os.path.isdir', (['"""./lyrics"""'], {}), "('./lyrics')\n", (3839, 3851), False, 'import os\n'), ((3861, 3881), 'os.mkdir', 'os.mkdir', (['"""./lyrics"""'], {}), "('./lyrics')\n", (3869, 3881), False, 'import os\n'), ((1146, 1180), 're.sub', 're.sub', (['"""[^a-zA-Z0-9 ]+"""', '""""""', 'term'], {}), "('[^a-zA-Z0-9 ]+', '', term)\n", (1152, 1180), False, 'import re\n')]
################################################################################ # # Copyright (c) 2019, the Perspective Authors. # # This file is part of the Perspective library, distributed under the terms of # the Apache License 2.0. The full license can be found in the LICENSE file. # from pytest import raises from perspective import PerspectiveError, PerspectiveViewer,\ PerspectiveWidget, Plugin class TestPlugin: def test_plugin_widget_load_grid(self): data = {"a": [1, 2, 3], "b": ["a", "b", "c"]} widget = PerspectiveWidget(data, plugin=Plugin.GRID) assert widget.plugin == "datagrid" def test_plugin_widget_load(self): data = {"a": [1, 2, 3], "b": ["a", "b", "c"]} widget = PerspectiveWidget(data, plugin=Plugin.XBAR) assert widget.plugin == "X Bar" def test_plugin_widget_setattr(self): data = {"a": [1, 2, 3], "b": ["a", "b", "c"]} widget = PerspectiveWidget(data) widget.plugin = Plugin.XBAR assert widget.plugin == "X Bar" def test_plugin_widget_load_invalid(self): data = {"a": [1, 2, 3], "b": ["a", "b", "c"]} with raises(PerspectiveError): PerspectiveWidget(data, plugin="?") def test_plugin_widget_setattr_invalid(self): data = {"a": [1, 2, 3], "b": ["a", "b", "c"]} widget = PerspectiveWidget(data) with raises(PerspectiveError): widget.plugin = "?" def test_plugin_widget_init_all(self): data = {"a": [1, 2, 3], "b": ["a", "b", "c"]} for plugin in Plugin: widget = PerspectiveWidget(data, plugin=plugin) assert widget.plugin == plugin.value def test_plugin_widget_set_all(self): data = {"a": [1, 2, 3], "b": ["a", "b", "c"]} widget = PerspectiveWidget(data) for plugin in Plugin: widget.plugin = plugin assert widget.plugin == plugin.value def test_plugin_viewer_load(self): viewer = PerspectiveViewer(plugin=Plugin.XBAR) assert viewer.plugin == "X Bar" def test_plugin_viewer_setattr(self): viewer = PerspectiveViewer() viewer.plugin = Plugin.XBAR assert viewer.plugin == "X Bar" def test_plugin_viewer_init_all(self): for plugin in Plugin: viewer = PerspectiveViewer(plugin=plugin) assert viewer.plugin == plugin.value def test_plugin_viewer_set_all(self): viewer = PerspectiveViewer() for plugin in Plugin: viewer.plugin = plugin assert viewer.plugin == plugin.value
[ "pytest.raises", "perspective.PerspectiveViewer", "perspective.PerspectiveWidget" ]
[((566, 609), 'perspective.PerspectiveWidget', 'PerspectiveWidget', (['data'], {'plugin': 'Plugin.GRID'}), '(data, plugin=Plugin.GRID)\n', (583, 609), False, 'from perspective import PerspectiveError, PerspectiveViewer, PerspectiveWidget, Plugin\n'), ((764, 807), 'perspective.PerspectiveWidget', 'PerspectiveWidget', (['data'], {'plugin': 'Plugin.XBAR'}), '(data, plugin=Plugin.XBAR)\n', (781, 807), False, 'from perspective import PerspectiveError, PerspectiveViewer, PerspectiveWidget, Plugin\n'), ((962, 985), 'perspective.PerspectiveWidget', 'PerspectiveWidget', (['data'], {}), '(data)\n', (979, 985), False, 'from perspective import PerspectiveError, PerspectiveViewer, PerspectiveWidget, Plugin\n'), ((1373, 1396), 'perspective.PerspectiveWidget', 'PerspectiveWidget', (['data'], {}), '(data)\n', (1390, 1396), False, 'from perspective import PerspectiveError, PerspectiveViewer, PerspectiveWidget, Plugin\n'), ((1820, 1843), 'perspective.PerspectiveWidget', 'PerspectiveWidget', (['data'], {}), '(data)\n', (1837, 1843), False, 'from perspective import PerspectiveError, PerspectiveViewer, PerspectiveWidget, Plugin\n'), ((2015, 2052), 'perspective.PerspectiveViewer', 'PerspectiveViewer', ([], {'plugin': 'Plugin.XBAR'}), '(plugin=Plugin.XBAR)\n', (2032, 2052), False, 'from perspective import PerspectiveError, PerspectiveViewer, PerspectiveWidget, Plugin\n'), ((2153, 2172), 'perspective.PerspectiveViewer', 'PerspectiveViewer', ([], {}), '()\n', (2170, 2172), False, 'from perspective import PerspectiveError, PerspectiveViewer, PerspectiveWidget, Plugin\n'), ((2486, 2505), 'perspective.PerspectiveViewer', 'PerspectiveViewer', ([], {}), '()\n', (2503, 2505), False, 'from perspective import PerspectiveError, PerspectiveViewer, PerspectiveWidget, Plugin\n'), ((1177, 1201), 'pytest.raises', 'raises', (['PerspectiveError'], {}), '(PerspectiveError)\n', (1183, 1201), False, 'from pytest import raises\n'), ((1215, 1250), 'perspective.PerspectiveWidget', 'PerspectiveWidget', (['data'], {'plugin': '"""?"""'}), "(data, plugin='?')\n", (1232, 1250), False, 'from perspective import PerspectiveError, PerspectiveViewer, PerspectiveWidget, Plugin\n'), ((1411, 1435), 'pytest.raises', 'raises', (['PerspectiveError'], {}), '(PerspectiveError)\n', (1417, 1435), False, 'from pytest import raises\n'), ((1618, 1656), 'perspective.PerspectiveWidget', 'PerspectiveWidget', (['data'], {'plugin': 'plugin'}), '(data, plugin=plugin)\n', (1635, 1656), False, 'from perspective import PerspectiveError, PerspectiveViewer, PerspectiveWidget, Plugin\n'), ((2344, 2376), 'perspective.PerspectiveViewer', 'PerspectiveViewer', ([], {'plugin': 'plugin'}), '(plugin=plugin)\n', (2361, 2376), False, 'from perspective import PerspectiveError, PerspectiveViewer, PerspectiveWidget, Plugin\n')]
import numpy as np from liegroups.numpy import _base from liegroups.numpy.so2 import SO2 class SE2(_base.SpecialEuclideanBase): """Homogeneous transformation matrix in :math:`SE(2)` using active (alibi) transformations. .. math:: SE(2) &= \\left\\{ \\mathbf{T}= \\begin{bmatrix} \\mathbf{C} & \\mathbf{r} \\\\ \\mathbf{0}^T & 1 \\end{bmatrix} \\in \\mathbb{R}^{3 \\times 3} ~\\middle|~ \\mathbf{C} \\in SO(2), \\mathbf{r} \\in \\mathbb{R}^2 \\right\\} \\\\ \\mathfrak{se}(2) &= \\left\\{ \\boldsymbol{\\Xi} = \\boldsymbol{\\xi}^\\wedge \\in \\mathbb{R}^{3 \\times 3} ~\\middle|~ \\boldsymbol{\\xi}= \\begin{bmatrix} \\boldsymbol{\\rho} \\\\ \\phi \\end{bmatrix} \\in \\mathbb{R}^3, \\boldsymbol{\\rho} \\in \\mathbb{R}^2, \\phi \in \\mathbb{R} \\right\\} :cvar ~liegroups.SE2.dim: Dimension of the rotation matrix. :cvar ~liegroups.SE2.dof: Underlying degrees of freedom (i.e., dimension of the tangent space). :ivar rot: Storage for the rotation matrix :math:`\mathbf{C}`. :ivar trans: Storage for the translation vector :math:`\mathbf{r}`. """ dim = 3 """Dimension of the transformation matrix.""" dof = 3 """Underlying degrees of freedom (i.e., dimension of the tangent space).""" RotationType = SO2 def adjoint(self): """Adjoint matrix of the transformation. .. math:: \\text{Ad}(\\mathbf{T}) = \\begin{bmatrix} \\mathbf{C} & 1^\\wedge \\mathbf{r} \\\\ \\mathbf{0}^T & 1 \\end{bmatrix} \\in \\mathbb{R}^{3 \\times 3} """ rot_part = self.rot.as_matrix() trans_part = np.array([self.trans[1], -self.trans[0]]).reshape((2, 1)) return np.vstack([np.hstack([rot_part, trans_part]), [0, 0, 1]]) @classmethod def exp(cls, xi): """Exponential map for :math:`SE(2)`, which computes a transformation from a tangent vector: .. math:: \\mathbf{T}(\\boldsymbol{\\xi}) = \\exp(\\boldsymbol{\\xi}^\\wedge) = \\begin{bmatrix} \\exp(\\phi ^\\wedge) & \\mathbf{J} \\boldsymbol{\\rho} \\\\ \\mathbf{0} ^ T & 1 \\end{bmatrix} This is the inverse operation to :meth:`~liegroups.SE2.log`. """ if len(xi) != cls.dof: raise ValueError("xi must have length {}".format(cls.dof)) rho = xi[0:2] phi = xi[2] return cls(cls.RotationType.exp(phi), cls.RotationType.left_jacobian(phi).dot(rho)) @classmethod def inv_left_jacobian(cls, xi): """:math:`SE(2)` inverse left Jacobian. .. math:: \\mathcal{J}^{-1}(\\boldsymbol{\\xi}) """ raise NotImplementedError @classmethod def left_jacobian(cls, xi): """:math:`SE(2)` left Jacobian. .. math:: \\mathcal{J}(\\boldsymbol{\\xi}) """ raise NotImplementedError def log(self): """Logarithmic map for :math:`SE(2)`, which computes a tangent vector from a transformation: .. math:: \\boldsymbol{\\xi}(\\mathbf{T}) = \\ln(\\mathbf{T})^\\vee = \\begin{bmatrix} \\mathbf{J} ^ {-1} \\mathbf{r} \\\\ \\ln(\\boldsymbol{C}) ^\\vee \\end{bmatrix} This is the inverse operation to :meth:`~liegroups.SE2.log`. """ phi = self.rot.log() rho = self.RotationType.inv_left_jacobian(phi).dot(self.trans) return np.hstack([rho, phi]) @classmethod def odot(cls, p, directional=False): """:math:`SE(2)` odot operator as defined by Barfoot. This is the Jacobian of a vector .. math:: \\mathbf{p} = \\begin{bmatrix} sx \\\\ sy \\\\ sz \\\\ s \\end{bmatrix} = \\begin{bmatrix} \\boldsymbol{\\epsilon} \\\\ \\eta \\end{bmatrix} with respect to a perturbation in the underlying parameters of :math:`\\mathbf{T}`. If :math:`\\mathbf{p}` is given in Euclidean coordinates and directional=False, the missing scale value :math:`\\eta` is assumed to be 1 and the Jacobian is 2x3. If directional=True, :math:`\\eta` is assumed to be 0: .. math:: \\mathbf{p}^\\odot = \\begin{bmatrix} \\eta \\mathbf{1} & 1^\\wedge \\boldsymbol{\\epsilon} \\end{bmatrix} If :math:`\\mathbf{p}` is given in Homogeneous coordinates, the Jacobian is 3x3: .. math:: \\mathbf{p}^\\odot = \\begin{bmatrix} \\eta \\mathbf{1} & 1^\\wedge \\boldsymbol{\\epsilon} \\\\ \\mathbf{0}^T & 0 \\end{bmatrix} """ p = np.atleast_2d(p) result = np.zeros([p.shape[0], p.shape[1], cls.dof]) if p.shape[1] == cls.dim - 1: # Assume scale parameter is 1 unless p is a direction # vector, in which case the scale is 0 if not directional: result[:, 0:2, 0:2] = np.eye(2) result[:, 0:2, 2] = cls.RotationType.wedge(1).dot(p.T).T elif p.shape[1] == cls.dim: result[:, 0:2, 0:2] = p[:, 2] * np.eye(2) result[:, 0:2, 2] = cls.RotationType.wedge(1).dot(p[:, 0:2].T).T else: raise ValueError("p must have shape ({},), ({},), (N,{}) or (N,{})".format( cls.dim - 1, cls.dim, cls.dim - 1, cls.dim)) return np.squeeze(result) @classmethod def vee(cls, Xi): """:math:`SE(2)` vee operator as defined by Barfoot. .. math:: \\boldsymbol{\\xi} = \\boldsymbol{\\Xi} ^\\vee This is the inverse operation to :meth:`~liegroups.SE2.wedge`. """ if Xi.ndim < 3: Xi = np.expand_dims(Xi, axis=0) if Xi.shape[1:3] != (cls.dof, cls.dof): raise ValueError("Xi must have shape ({},{}) or (N,{},{})".format( cls.dof, cls.dof, cls.dof, cls.dof)) xi = np.empty([Xi.shape[0], cls.dof]) xi[:, 0:2] = Xi[:, 0:2, 2] xi[:, 2] = cls.RotationType.vee(Xi[:, 0:2, 0:2]) return np.squeeze(xi) @classmethod def wedge(cls, xi): """:math:`SE(2)` wedge operator as defined by Barfoot. .. math:: \\boldsymbol{\\Xi} = \\boldsymbol{\\xi} ^\\wedge = \\begin{bmatrix} \\phi ^\\wedge & \\boldsymbol{\\rho} \\\\ \\mathbf{0} ^ T & 0 \\end{bmatrix} This is the inverse operation to :meth:`~liegroups.SE2.vee`. """ xi = np.atleast_2d(xi) if xi.shape[1] != cls.dof: raise ValueError( "xi must have shape ({},) or (N,{})".format(cls.dof, cls.dof)) Xi = np.zeros([xi.shape[0], cls.dof, cls.dof]) Xi[:, 0:2, 0:2] = cls.RotationType.wedge(xi[:, 2]) Xi[:, 0:2, 2] = xi[:, 0:2] return np.squeeze(Xi)
[ "numpy.empty", "numpy.zeros", "numpy.expand_dims", "numpy.hstack", "numpy.array", "numpy.squeeze", "numpy.eye", "numpy.atleast_2d" ]
[((3705, 3726), 'numpy.hstack', 'np.hstack', (['[rho, phi]'], {}), '([rho, phi])\n', (3714, 3726), True, 'import numpy as np\n'), ((4973, 4989), 'numpy.atleast_2d', 'np.atleast_2d', (['p'], {}), '(p)\n', (4986, 4989), True, 'import numpy as np\n'), ((5007, 5050), 'numpy.zeros', 'np.zeros', (['[p.shape[0], p.shape[1], cls.dof]'], {}), '([p.shape[0], p.shape[1], cls.dof])\n', (5015, 5050), True, 'import numpy as np\n'), ((5705, 5723), 'numpy.squeeze', 'np.squeeze', (['result'], {}), '(result)\n', (5715, 5723), True, 'import numpy as np\n'), ((6250, 6282), 'numpy.empty', 'np.empty', (['[Xi.shape[0], cls.dof]'], {}), '([Xi.shape[0], cls.dof])\n', (6258, 6282), True, 'import numpy as np\n'), ((6390, 6404), 'numpy.squeeze', 'np.squeeze', (['xi'], {}), '(xi)\n', (6400, 6404), True, 'import numpy as np\n'), ((6849, 6866), 'numpy.atleast_2d', 'np.atleast_2d', (['xi'], {}), '(xi)\n', (6862, 6866), True, 'import numpy as np\n'), ((7025, 7066), 'numpy.zeros', 'np.zeros', (['[xi.shape[0], cls.dof, cls.dof]'], {}), '([xi.shape[0], cls.dof, cls.dof])\n', (7033, 7066), True, 'import numpy as np\n'), ((7177, 7191), 'numpy.squeeze', 'np.squeeze', (['Xi'], {}), '(Xi)\n', (7187, 7191), True, 'import numpy as np\n'), ((6028, 6054), 'numpy.expand_dims', 'np.expand_dims', (['Xi'], {'axis': '(0)'}), '(Xi, axis=0)\n', (6042, 6054), True, 'import numpy as np\n'), ((1795, 1836), 'numpy.array', 'np.array', (['[self.trans[1], -self.trans[0]]'], {}), '([self.trans[1], -self.trans[0]])\n', (1803, 1836), True, 'import numpy as np\n'), ((1879, 1912), 'numpy.hstack', 'np.hstack', (['[rot_part, trans_part]'], {}), '([rot_part, trans_part])\n', (1888, 1912), True, 'import numpy as np\n'), ((5277, 5286), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (5283, 5286), True, 'import numpy as np\n'), ((5438, 5447), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (5444, 5447), True, 'import numpy as np\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- from flask import Blueprint,redirect,url_for, session, jsonify, request from result_linker.models import db from result_linker.logger import logger from result_linker.models.share import Share home_blueprint = Blueprint("home", __name__) @home_blueprint.route("/") def index(issue_id=0): """Serve the ReactJS-based index.html""" from flask import render_template #logger.info("Its coming here") from flask import request logger.info(request.headers.get('User-Agent')) from flask_login import current_user if current_user.is_authenticated: logger.info("Authenticated!!") else: logger.info("Not Authenticated!!") logger.debug(session) return render_template("index.html") @home_blueprint.route("/issue/<issue_id>") def issues(issue_id): """Serve the ReactJS-based index.html""" from flask import render_template share = Share.query.filter_by(issue=issue_id).first() if share: return redirect(url_for("share.check_share_link",token = share.link)) session["issue_id"] = issue_id return redirect(url_for("home.index")) @home_blueprint.route("/clear") def clear(issue_id=0): """Serve the ReactJS-based index.html""" from flask import render_template return jsonify({"success":True}) #@home_blueprint.route("/get_issue") #def get_issue(issue_id): # """Serve the ReactJS-based index.html""" # issue_id = session.get("issue_id",0) # success_flag=False # if issue_id: # success_flag = True # return jsonify({"success":success_flag,"issue_id": issue_id})
[ "flask.Blueprint", "flask.request.headers.get", "result_linker.logger.logger.info", "flask.jsonify", "flask.url_for", "flask.render_template", "result_linker.models.share.Share.query.filter_by", "result_linker.logger.logger.debug" ]
[((258, 285), 'flask.Blueprint', 'Blueprint', (['"""home"""', '__name__'], {}), "('home', __name__)\n", (267, 285), False, 'from flask import Blueprint, redirect, url_for, session, jsonify, request\n'), ((713, 734), 'result_linker.logger.logger.debug', 'logger.debug', (['session'], {}), '(session)\n', (725, 734), False, 'from result_linker.logger import logger\n'), ((746, 775), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (761, 775), False, 'from flask import render_template\n'), ((1305, 1331), 'flask.jsonify', 'jsonify', (["{'success': True}"], {}), "({'success': True})\n", (1312, 1331), False, 'from flask import Blueprint, redirect, url_for, session, jsonify, request\n'), ((503, 536), 'flask.request.headers.get', 'request.headers.get', (['"""User-Agent"""'], {}), "('User-Agent')\n", (522, 536), False, 'from flask import request\n'), ((625, 655), 'result_linker.logger.logger.info', 'logger.info', (['"""Authenticated!!"""'], {}), "('Authenticated!!')\n", (636, 655), False, 'from result_linker.logger import logger\n'), ((674, 708), 'result_linker.logger.logger.info', 'logger.info', (['"""Not Authenticated!!"""'], {}), "('Not Authenticated!!')\n", (685, 708), False, 'from result_linker.logger import logger\n'), ((1131, 1152), 'flask.url_for', 'url_for', (['"""home.index"""'], {}), "('home.index')\n", (1138, 1152), False, 'from flask import Blueprint, redirect, url_for, session, jsonify, request\n'), ((938, 975), 'result_linker.models.share.Share.query.filter_by', 'Share.query.filter_by', ([], {'issue': 'issue_id'}), '(issue=issue_id)\n', (959, 975), False, 'from result_linker.models.share import Share\n'), ((1022, 1073), 'flask.url_for', 'url_for', (['"""share.check_share_link"""'], {'token': 'share.link'}), "('share.check_share_link', token=share.link)\n", (1029, 1073), False, 'from flask import Blueprint, redirect, url_for, session, jsonify, request\n')]
#!/usr/bin/python """ Tool to analyze some datalogger raw data """ from __future__ import print_function import os import sys import argparse import json parser = argparse.ArgumentParser(description="Tool to analyze some datalogger raw data") parser.add_argument("-i", "--input-file", help="file to read from", required=True) options = parser.parse_args("-i /var/rrd/snmp/raw/ifTable_2017-11-15.csv".split()) if not os.path.isfile(options.input_file): print("file %s does not exist" % options.input_file) sys.exit(1) data = {} meta = {} meta["delimiter"] = "\t" meta["index_keynames"] = ("hostname", "ifDescr") meta["ts_keyname"] = "ts" meta["interval"] = 300 headers = None with open(options.input_file, "rt") as infile: for line in infile.read().split("\n"): if line == "" or line == "\n": continue if headers is None: headers = line.split(meta["delimiter"]) meta["headers"] = headers data["length"] = len(headers) for header in headers: data[header] = { "isnumeric" : True, "interval" : 0 } assert meta["ts_keyname"] in headers assert all((index_key in headers for index_key in meta["index_keynames"])) else: columns = line.split(meta["delimiter"]) assert len(columns) == data["length"] for index, column in enumerate(columns): data[headers[index]]["isnumeric"] = all((data[headers[index]]["isnumeric"], column.isnumeric())) print(line) meta["value_keynames"] = dict([(header, "asis") for header in headers if data[header]["isnumeric"] == True]) meta["blacklist"] = [header for header in headers if (data[header]["isnumeric"] == False) and (header not in meta["index_keynames"]) and (header != meta["ts_keyname"])] print(json.dumps(meta, indent=4, sort_keys=True))
[ "os.path.isfile", "argparse.ArgumentParser", "sys.exit", "json.dumps" ]
[((164, 243), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tool to analyze some datalogger raw data"""'}), "(description='Tool to analyze some datalogger raw data')\n", (187, 243), False, 'import argparse\n'), ((417, 451), 'os.path.isfile', 'os.path.isfile', (['options.input_file'], {}), '(options.input_file)\n', (431, 451), False, 'import os\n'), ((514, 525), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (522, 525), False, 'import sys\n'), ((1881, 1923), 'json.dumps', 'json.dumps', (['meta'], {'indent': '(4)', 'sort_keys': '(True)'}), '(meta, indent=4, sort_keys=True)\n', (1891, 1923), False, 'import json\n')]
# -*- encoding: utf-8 -*- ''' @Author : lance @Email : <EMAIL> ''' import tensorflow as tf config = tf.ConfigProto() config.gpu_options.allow_growth = True session = tf.Session(config=config) import keras from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from keras.layers import Flatten, Dense, regularizers, GlobalAveragePooling2D, Dropout from keras.optimizers import SGD, Adagrad, Adam from keras import backend as K, Model import os from model_cx.load_data import load_data def densenet(classes,epochs,steps_per_epoch,validation_steps,input_shape): #加载数据 train_batches,valid_batches=load_data(input_shape) input_shape+=(3,) #DenseNet121, DenseNet169, DenseNet201 temp_model= keras.applications.densenet.DenseNet121(include_top=False, weights='imagenet', input_shape=input_shape) x = temp_model.output x = GlobalAveragePooling2D()(x) # GlobalAveragePooling2D 将 MxNxC 的张量转换成 1xC 张量,C是通道数 x = Dense(1024, activation='relu')(x) x=Dropout(0.2)(x) if classes==1: print("sigmoid") predictions = Dense(classes, activation='sigmoid')(x) else: print("softmax") predictions = Dense(classes, activation='softmax')(x) model = Model(inputs=temp_model.input, outputs=predictions) if classes==1: print("二元分类") model.compile(optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08), loss='binary_crossentropy', metrics=['accuracy']) else: print("多分类") model.compile(optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08), loss='categorical_crossentropy', metrics=['accuracy']) # model_cx.summary() print('the number of layers in this model_cx:' + str(len(model.layers))) #保存模型 out_dir = "../weights/" if not os.path.exists(out_dir): os.makedirs(out_dir) filepath ="../weights/densenet_{epoch:04d}.h5" # 中途训练效果提升, 则将文件保存, 每提升一次, 保存一次 checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=False, mode='max') #学习率调整 lr_reduce = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, verbose=1, min_lr=0.000005, mode="min") # 早停 earlystopping = EarlyStopping(monitor='val_loss', patience=15, verbose=1, mode='min') #保存训练过程 log_dir = "../logs/" if not os.path.exists(log_dir): os.makedirs(log_dir) logfile="../logs/densenet.csv" log=keras.callbacks.CSVLogger(logfile, separator=',', append=False) loggraph=keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True) callbacks_list = [checkpoint,lr_reduce,log] # 训练 model.fit_generator(train_batches, steps_per_epoch=steps_per_epoch, validation_data=valid_batches, validation_steps=validation_steps, epochs=epochs, verbose=2, callbacks=callbacks_list,workers=16,max_queue_size=20) if __name__=="__main__": densenet(1,200,210,15,(224,224)) #densenet121 batch=16 acc:93.3 densenet(3,50,20,5 ,(224,224)) SGD #factor:0.5 pat:10
[ "keras.Model", "keras.callbacks.CSVLogger", "os.makedirs", "model_cx.load_data.load_data", "keras.callbacks.ModelCheckpoint", "keras.layers.Dropout", "tensorflow.Session", "os.path.exists", "keras.optimizers.Adam", "keras.layers.GlobalAveragePooling2D", "tensorflow.ConfigProto", "keras.applications.densenet.DenseNet121", "keras.callbacks.EarlyStopping", "keras.callbacks.TensorBoard", "keras.layers.Dense", "keras.callbacks.ReduceLROnPlateau" ]
[((116, 132), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (130, 132), True, 'import tensorflow as tf\n'), ((184, 209), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (194, 209), True, 'import tensorflow as tf\n'), ((658, 680), 'model_cx.load_data.load_data', 'load_data', (['input_shape'], {}), '(input_shape)\n', (667, 680), False, 'from model_cx.load_data import load_data\n'), ((767, 875), 'keras.applications.densenet.DenseNet121', 'keras.applications.densenet.DenseNet121', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': 'input_shape'}), "(include_top=False, weights=\n 'imagenet', input_shape=input_shape)\n", (806, 875), False, 'import keras\n'), ((1277, 1328), 'keras.Model', 'Model', ([], {'inputs': 'temp_model.input', 'outputs': 'predictions'}), '(inputs=temp_model.input, outputs=predictions)\n', (1282, 1328), False, 'from keras import backend as K, Model\n'), ((2047, 2141), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_acc"""', 'verbose': '(0)', 'save_best_only': '(False)', 'mode': '"""max"""'}), "(filepath, monitor='val_acc', verbose=0, save_best_only=\n False, mode='max')\n", (2062, 2141), False, 'from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2200, 2303), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.5)', 'patience': '(10)', 'verbose': '(1)', 'min_lr': '(5e-06)', 'mode': '"""min"""'}), "(monitor='val_loss', factor=0.5, patience=10, verbose=1,\n min_lr=5e-06, mode='min')\n", (2217, 2303), False, 'from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2369, 2438), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(15)', 'verbose': '(1)', 'mode': '"""min"""'}), "(monitor='val_loss', patience=15, verbose=1, mode='min')\n", (2382, 2438), False, 'from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2590, 2653), 'keras.callbacks.CSVLogger', 'keras.callbacks.CSVLogger', (['logfile'], {'separator': '""","""', 'append': '(False)'}), "(logfile, separator=',', append=False)\n", (2615, 2653), False, 'import keras\n'), ((2668, 2773), 'keras.callbacks.TensorBoard', 'keras.callbacks.TensorBoard', ([], {'log_dir': '"""./logs"""', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(True)'}), "(log_dir='./logs', histogram_freq=0, write_graph\n =True, write_images=True)\n", (2695, 2773), False, 'import keras\n'), ((907, 931), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (929, 931), False, 'from keras.layers import Flatten, Dense, regularizers, GlobalAveragePooling2D, Dropout\n'), ((998, 1028), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (1003, 1028), False, 'from keras.layers import Flatten, Dense, regularizers, GlobalAveragePooling2D, Dropout\n'), ((1039, 1051), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1046, 1051), False, 'from keras.layers import Flatten, Dense, regularizers, GlobalAveragePooling2D, Dropout\n'), ((1885, 1908), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (1899, 1908), False, 'import os\n'), ((1919, 1939), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (1930, 1939), False, 'import os\n'), ((2490, 2513), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (2504, 2513), False, 'import os\n'), ((2524, 2544), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (2535, 2544), False, 'import os\n'), ((1124, 1160), 'keras.layers.Dense', 'Dense', (['classes'], {'activation': '"""sigmoid"""'}), "(classes, activation='sigmoid')\n", (1129, 1160), False, 'from keras.layers import Flatten, Dense, regularizers, GlobalAveragePooling2D, Dropout\n'), ((1224, 1260), 'keras.layers.Dense', 'Dense', (['classes'], {'activation': '"""softmax"""'}), "(classes, activation='softmax')\n", (1229, 1260), False, 'from keras.layers import Flatten, Dense, regularizers, GlobalAveragePooling2D, Dropout\n'), ((1411, 1467), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)'}), '(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n', (1415, 1467), False, 'from keras.optimizers import SGD, Adagrad, Adam\n'), ((1608, 1664), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)'}), '(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n', (1612, 1664), False, 'from keras.optimizers import SGD, Adagrad, Adam\n')]
# Generated by Django 2.2.3 on 2019-07-22 14:47 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('djedi', '0001_initial'), ] operations = [ migrations.AlterField( model_name='node', name='is_published', field=models.BooleanField(blank=True, default=False), ), ]
[ "django.db.models.BooleanField" ]
[((327, 373), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'blank': '(True)', 'default': '(False)'}), '(blank=True, default=False)\n', (346, 373), False, 'from django.db import migrations, models\n')]
# -*- coding: utf-8 -*- from copy import deepcopy import pytest from schematics.models import Model from schematics.types import * from schematics.types.compound import * from schematics.exceptions import * from schematics.undefined import Undefined @pytest.mark.parametrize('init', (True, False)) def test_import_data(init): class M(Model): a, b, c, d = IntType(), IntType(), IntType(), IntType() m = M({ 'a': 1, 'b': None, 'c': 3 }, init=init) m.import_data({ 'a': None, 'b': 2 }) if init: assert m._data == {'a': None, 'b': 2, 'c': 3, 'd': None} else: assert m._data == {'a': None, 'b': 2, 'c': 3} @pytest.mark.parametrize('init', (True, False)) def test_import_data_with_error(init): class M(Model): a, b, c, d = IntType(), IntType(), IntType(required=True), IntType() m = M({ 'a': 1, 'b': None, 'c': 3 }, init=init) with pytest.raises(DataError): m.import_data({ 'a': None, 'b': 2, 'c': None, }) if init: assert m._data == {'a': 1, 'b': None, 'c': 3, 'd': None} else: assert m._data == {'a': 1, 'b': None, 'c': 3} @pytest.mark.parametrize('preconvert_source, populate_source', [( False, None), ( True, True), ( True, False)]) @pytest.mark.parametrize('recursive, populate_target, init_to_none, populated_result', [( False, True, True, True), ( False, False, False, False), ( True, True, True, True), ( True, False, True, True), ( True, False, False, False)]) def test_complex_import_data(recursive, preconvert_source, populate_source, populate_target, init_to_none, populated_result): class M(Model): intfield = IntType(max_value=2) matrixfield = ListType(ListType(IntType)) dictfield = DictType(IntType) modelfield = ModelType('M') origdict = { 'intfield': '1', 'dictfield': dict(a=1, b=2), 'modelfield': { 'intfield': '2', 'matrixfield': [[0, 0, 0], [1, 1, 1], [2, 2, 2]], 'dictfield': dict(a=11, b=22), 'modelfield': { 'intfield': '3', 'dictfield': dict(a=111, b=222)}}} m = M(origdict, init=populate_target) sourcedict = { 'intfield': '101', 'dictfield': dict(c=3), 'modelfield': { 'matrixfield': [[9]], 'modelfield': { 'intfield': '103', 'dictfield': dict(c=33)}}} sourcedata = deepcopy(sourcedict) if preconvert_source: sourcedata = M(sourcedata, init=populate_source) m.import_data(sourcedata, recursive=recursive, init_values=init_to_none) assert id(m) != id(sourcedata) if preconvert_source and populate_source: assert m == M(sourcedict, init=True) elif recursive: assert m == M({ 'intfield': '101', 'dictfield': dict(c=3), 'modelfield': { 'intfield': '2', 'matrixfield': [[9]], 'dictfield': dict(a=11, b=22), 'modelfield': { 'intfield': '103', 'dictfield': dict(c=33)}}}, init=populated_result) else: assert m == M(sourcedict, init=populated_result)
[ "pytest.raises", "pytest.mark.parametrize", "copy.deepcopy" ]
[((256, 302), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init"""', '(True, False)'], {}), "('init', (True, False))\n", (279, 302), False, 'import pytest\n'), ((705, 751), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init"""', '(True, False)'], {}), "('init', (True, False))\n", (728, 751), False, 'import pytest\n'), ((1253, 1365), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""preconvert_source, populate_source"""', '[(False, None), (True, True), (True, False)]'], {}), "('preconvert_source, populate_source', [(False, None\n ), (True, True), (True, False)])\n", (1276, 1365), False, 'import pytest\n'), ((1474, 1715), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""recursive, populate_target, init_to_none, populated_result"""', '[(False, True, True, True), (False, False, False, False), (True, True, True,\n True), (True, False, True, True), (True, False, False, False)]'], {}), "(\n 'recursive, populate_target, init_to_none, populated_result', [(False, \n True, True, True), (False, False, False, False), (True, True, True, \n True), (True, False, True, True), (True, False, False, False)])\n", (1497, 1715), False, 'import pytest\n'), ((2932, 2952), 'copy.deepcopy', 'deepcopy', (['sourcedict'], {}), '(sourcedict)\n', (2940, 2952), False, 'from copy import deepcopy\n'), ((980, 1004), 'pytest.raises', 'pytest.raises', (['DataError'], {}), '(DataError)\n', (993, 1004), False, 'import pytest\n')]
import torch.nn as nn import torch.functional as F class ShiftedReLU(nn.Module): def __init__(self, offset=1): super().__init__() self.offset = offset def forward(self, x): return F.relu(x + self.offset) class ShiftedSoftplus(nn.Module): def __init__(self, offset=1): super().__init__() self.offset = offset def forward(self, x): return F.softplus(x - self.offset)
[ "torch.functional.softplus", "torch.functional.relu" ]
[((216, 239), 'torch.functional.relu', 'F.relu', (['(x + self.offset)'], {}), '(x + self.offset)\n', (222, 239), True, 'import torch.functional as F\n'), ((409, 436), 'torch.functional.softplus', 'F.softplus', (['(x - self.offset)'], {}), '(x - self.offset)\n', (419, 436), True, 'import torch.functional as F\n')]
import pickle import numpy as np from neupy import algorithms from neupy.exceptions import NotTrained from algorithms.memory.data import zero, one, half_one, half_zero from base import BaseTestCase from helpers import vectors_for_testing zero_hint = np.array([[0, 1, 0, 0]]) one_hint = np.array([[1, 0, 0, 0]]) class BAMTestCase(BaseTestCase): def setUp(self): super(BAMTestCase, self).setUp() self.data = np.concatenate([zero, one], axis=0) self.hints = np.concatenate([zero_hint, one_hint], axis=0) def test_bam_exceptions(self): with self.assertRaises(NotTrained): dbnet = algorithms.DiscreteBAM() dbnet.predict(np.array([0, 1])) with self.assertRaises(NotTrained): dbnet = algorithms.DiscreteBAM() dbnet.predict_input(np.array([0, 1])) with self.assertRaises(ValueError): dbnet = algorithms.DiscreteBAM() dbnet.weight = np.array([[0, 1], [1, 0]]) dbnet.train(np.array([0, 1, 1]), np.array([0, 1])) def test_bam_X_validation(self): dbnet = algorithms.DiscreteBAM() dbnet.weight = np.array([[0, 1], [1, 0]]) with self.assertRaises(ValueError): # Invalid discrete input values dbnet.train(np.array([-1, 1]), np.array([0, 1])) with self.assertRaises(ValueError): dbnet.train(np.array([0, 1]), np.array([-1, 1])) with self.assertRaises(ValueError): dbnet.energy(np.array([-1, 1]), np.array([0, 1])) with self.assertRaises(ValueError): dbnet.energy(np.array([0, 1]), np.array([-1, 1])) with self.assertRaises(ValueError): dbnet.predict(np.array([-1, 1])) def test_discrete_bam_storage(self): network = algorithms.DiscreteBAM(mode='sync') network.train(self.data, self.hints) stored_network = pickle.dumps(network) loaded_network = pickle.loads(stored_network) network_prediction = network.predict(self.data) loaded_network_prediction = loaded_network.predict(self.data) np.testing.assert_array_almost_equal( loaded_network_prediction[0], network_prediction[0]) np.testing.assert_array_almost_equal( loaded_network_prediction[1], network_prediction[1]) def test_discrete_bam_sync(self): bamnet = algorithms.DiscreteBAM(mode='sync') bamnet.train(self.data, self.hints) data_before = self.data.copy() hints_before = self.hints.copy() np.testing.assert_array_almost_equal( bamnet.predict(half_zero)[1], zero_hint ) np.testing.assert_array_almost_equal( bamnet.predict_output(half_one)[1], one_hint ) np.testing.assert_array_almost_equal( bamnet.predict_input(zero_hint)[0], zero ) np.testing.assert_array_almost_equal( bamnet.predict_input(one_hint)[0], one ) # Test 1d input array prediction np.testing.assert_array_almost_equal( bamnet.predict_input(one_hint.ravel())[0], one ) # Test 1d output array input prediction np.testing.assert_array_almost_equal( bamnet.predict_output(half_one.ravel())[1], one_hint ) # Test multiple input values prediction input_matrix = np.vstack([one, zero]) output_matrix = np.vstack([one_hint, zero_hint]) output_matrix_before = output_matrix.copy() input_matrix_before = input_matrix.copy() np.testing.assert_array_almost_equal( bamnet.predict_input(output_matrix)[0], input_matrix ) np.testing.assert_array_almost_equal( bamnet.predict(input_matrix)[1], output_matrix ) np.testing.assert_array_equal(self.data, data_before) np.testing.assert_array_equal(self.hints, hints_before) np.testing.assert_array_equal(output_matrix, output_matrix_before) np.testing.assert_array_equal(input_matrix, input_matrix_before) def test_discrete_bam_async(self): bamnet = algorithms.DiscreteBAM(mode='async', n_times=400) data_before = self.data.copy() hints_before = self.hints.copy() bamnet.train(self.data, self.hints) input_matrix = np.vstack([one, zero]) output_matrix = np.vstack([one_hint, zero_hint]) output_matrix_before = output_matrix.copy() input_matrix_before = input_matrix.copy() np.testing.assert_array_almost_equal( bamnet.predict_input(output_matrix)[0], input_matrix ) np.testing.assert_array_almost_equal( bamnet.predict_output(input_matrix)[1], output_matrix ) np.testing.assert_array_equal(self.data, data_before) np.testing.assert_array_equal(self.hints, hints_before) np.testing.assert_array_equal(output_matrix, output_matrix_before) np.testing.assert_array_equal(input_matrix, input_matrix_before) def test_bam_argument_in_predict_method(self): dbnet = algorithms.DiscreteBAM(mode='async', n_times=1) dbnet.train(self.data, self.hints) self.assertTrue(np.any(one != dbnet.predict_output(half_one)[0])) np.testing.assert_array_almost_equal( one, dbnet.predict_output(half_one, n_times=100)[0]) def test_bam_energy_function(self): input_vector = np.array([[1, 0, 0, 1, 1, 0, 0]]) output_vector = np.array([[1, 0]]) dbnet = algorithms.DiscreteBAM() dbnet.train(input_vector, output_vector) self.assertEqual(-7, dbnet.energy(input_vector, output_vector)) self.assertEqual(0, dbnet.energy( np.array([[0, 0, 0, 0, 0, 0, 0]]), np.array([[0, 0]]) )) self.assertEqual(-7, dbnet.energy( np.array([[0, 1, 1, 0, 0, 1, 1]]), np.array([[0, 1]]) )) # Test 1d array self.assertEqual(-7, dbnet.energy( np.array([0, 1, 1, 0, 0, 1, 1]), np.array([0, 1]) )) # Test multiple input values energy calculation np.testing.assert_array_almost_equal( np.array([-7, 0]), dbnet.energy( np.array([ [0, 1, 1, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 0], ]), np.array([ [0, 1], [0, 0], ]) ) ) def test_bam_train_different_inputs(self): self.assertInvalidVectorTrain( algorithms.DiscreteBAM(), np.array([1, 0, 0, 1]), np.array([1, 0]), is_feature1d=False) def test_bam_predict_different_inputs(self): bamnet = algorithms.DiscreteBAM() data = np.array([[1, 0, 0, 1]]) target = np.array([[1, 0]]) bamnet.train(data, target) test_vectors = vectors_for_testing( data.reshape(data.size), is_feature1d=False) for test_vector in test_vectors: np.testing.assert_array_almost_equal( bamnet.predict(test_vector)[1], target)
[ "pickle.loads", "algorithms.memory.data.half_one.ravel", "numpy.testing.assert_array_equal", "numpy.array", "neupy.algorithms.DiscreteBAM", "numpy.vstack", "numpy.testing.assert_array_almost_equal", "numpy.concatenate", "pickle.dumps" ]
[((255, 279), 'numpy.array', 'np.array', (['[[0, 1, 0, 0]]'], {}), '([[0, 1, 0, 0]])\n', (263, 279), True, 'import numpy as np\n'), ((291, 315), 'numpy.array', 'np.array', (['[[1, 0, 0, 0]]'], {}), '([[1, 0, 0, 0]])\n', (299, 315), True, 'import numpy as np\n'), ((433, 468), 'numpy.concatenate', 'np.concatenate', (['[zero, one]'], {'axis': '(0)'}), '([zero, one], axis=0)\n', (447, 468), True, 'import numpy as np\n'), ((490, 535), 'numpy.concatenate', 'np.concatenate', (['[zero_hint, one_hint]'], {'axis': '(0)'}), '([zero_hint, one_hint], axis=0)\n', (504, 535), True, 'import numpy as np\n'), ((1106, 1130), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (1128, 1130), False, 'from neupy import algorithms\n'), ((1154, 1180), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (1162, 1180), True, 'import numpy as np\n'), ((1801, 1836), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {'mode': '"""sync"""'}), "(mode='sync')\n", (1823, 1836), False, 'from neupy import algorithms\n'), ((1908, 1929), 'pickle.dumps', 'pickle.dumps', (['network'], {}), '(network)\n', (1920, 1929), False, 'import pickle\n'), ((1955, 1983), 'pickle.loads', 'pickle.loads', (['stored_network'], {}), '(stored_network)\n', (1967, 1983), False, 'import pickle\n'), ((2120, 2213), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['loaded_network_prediction[0]', 'network_prediction[0]'], {}), '(loaded_network_prediction[0],\n network_prediction[0])\n', (2156, 2213), True, 'import numpy as np\n'), ((2232, 2325), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['loaded_network_prediction[1]', 'network_prediction[1]'], {}), '(loaded_network_prediction[1],\n network_prediction[1])\n', (2268, 2325), True, 'import numpy as np\n'), ((2391, 2426), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {'mode': '"""sync"""'}), "(mode='sync')\n", (2413, 2426), False, 'from neupy import algorithms\n'), ((3461, 3483), 'numpy.vstack', 'np.vstack', (['[one, zero]'], {}), '([one, zero])\n', (3470, 3483), True, 'import numpy as np\n'), ((3508, 3540), 'numpy.vstack', 'np.vstack', (['[one_hint, zero_hint]'], {}), '([one_hint, zero_hint])\n', (3517, 3540), True, 'import numpy as np\n'), ((3913, 3966), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.data', 'data_before'], {}), '(self.data, data_before)\n', (3942, 3966), True, 'import numpy as np\n'), ((3975, 4030), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.hints', 'hints_before'], {}), '(self.hints, hints_before)\n', (4004, 4030), True, 'import numpy as np\n'), ((4039, 4105), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['output_matrix', 'output_matrix_before'], {}), '(output_matrix, output_matrix_before)\n', (4068, 4105), True, 'import numpy as np\n'), ((4114, 4178), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['input_matrix', 'input_matrix_before'], {}), '(input_matrix, input_matrix_before)\n', (4143, 4178), True, 'import numpy as np\n'), ((4236, 4285), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {'mode': '"""async"""', 'n_times': '(400)'}), "(mode='async', n_times=400)\n", (4258, 4285), False, 'from neupy import algorithms\n'), ((4434, 4456), 'numpy.vstack', 'np.vstack', (['[one, zero]'], {}), '([one, zero])\n', (4443, 4456), True, 'import numpy as np\n'), ((4481, 4513), 'numpy.vstack', 'np.vstack', (['[one_hint, zero_hint]'], {}), '([one_hint, zero_hint])\n', (4490, 4513), True, 'import numpy as np\n'), ((4893, 4946), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.data', 'data_before'], {}), '(self.data, data_before)\n', (4922, 4946), True, 'import numpy as np\n'), ((4955, 5010), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.hints', 'hints_before'], {}), '(self.hints, hints_before)\n', (4984, 5010), True, 'import numpy as np\n'), ((5019, 5085), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['output_matrix', 'output_matrix_before'], {}), '(output_matrix, output_matrix_before)\n', (5048, 5085), True, 'import numpy as np\n'), ((5094, 5158), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['input_matrix', 'input_matrix_before'], {}), '(input_matrix, input_matrix_before)\n', (5123, 5158), True, 'import numpy as np\n'), ((5227, 5274), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {'mode': '"""async"""', 'n_times': '(1)'}), "(mode='async', n_times=1)\n", (5249, 5274), False, 'from neupy import algorithms\n'), ((5568, 5601), 'numpy.array', 'np.array', (['[[1, 0, 0, 1, 1, 0, 0]]'], {}), '([[1, 0, 0, 1, 1, 0, 0]])\n', (5576, 5601), True, 'import numpy as np\n'), ((5626, 5644), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (5634, 5644), True, 'import numpy as np\n'), ((5661, 5685), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (5683, 5685), False, 'from neupy import algorithms\n'), ((6933, 6957), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (6955, 6957), False, 'from neupy import algorithms\n'), ((6974, 6998), 'numpy.array', 'np.array', (['[[1, 0, 0, 1]]'], {}), '([[1, 0, 0, 1]])\n', (6982, 6998), True, 'import numpy as np\n'), ((7016, 7034), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (7024, 7034), True, 'import numpy as np\n'), ((636, 660), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (658, 660), False, 'from neupy import algorithms\n'), ((770, 794), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (792, 794), False, 'from neupy import algorithms\n'), ((910, 934), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (932, 934), False, 'from neupy import algorithms\n'), ((962, 988), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (970, 988), True, 'import numpy as np\n'), ((6339, 6356), 'numpy.array', 'np.array', (['[-7, 0]'], {}), '([-7, 0])\n', (6347, 6356), True, 'import numpy as np\n'), ((6742, 6766), 'neupy.algorithms.DiscreteBAM', 'algorithms.DiscreteBAM', ([], {}), '()\n', (6764, 6766), False, 'from neupy import algorithms\n'), ((6780, 6802), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (6788, 6802), True, 'import numpy as np\n'), ((6816, 6832), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (6824, 6832), True, 'import numpy as np\n'), ((687, 703), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (695, 703), True, 'import numpy as np\n'), ((827, 843), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (835, 843), True, 'import numpy as np\n'), ((1013, 1032), 'numpy.array', 'np.array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (1021, 1032), True, 'import numpy as np\n'), ((1034, 1050), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1042, 1050), True, 'import numpy as np\n'), ((1294, 1311), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1302, 1311), True, 'import numpy as np\n'), ((1313, 1329), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1321, 1329), True, 'import numpy as np\n'), ((1400, 1416), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1408, 1416), True, 'import numpy as np\n'), ((1418, 1435), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1426, 1435), True, 'import numpy as np\n'), ((1507, 1524), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1515, 1524), True, 'import numpy as np\n'), ((1526, 1542), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1534, 1542), True, 'import numpy as np\n'), ((1614, 1630), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1622, 1630), True, 'import numpy as np\n'), ((1632, 1649), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1640, 1649), True, 'import numpy as np\n'), ((1722, 1739), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1730, 1739), True, 'import numpy as np\n'), ((5862, 5895), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0]])\n', (5870, 5895), True, 'import numpy as np\n'), ((5909, 5927), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (5917, 5927), True, 'import numpy as np\n'), ((5994, 6027), 'numpy.array', 'np.array', (['[[0, 1, 1, 0, 0, 1, 1]]'], {}), '([[0, 1, 1, 0, 0, 1, 1]])\n', (6002, 6027), True, 'import numpy as np\n'), ((6041, 6059), 'numpy.array', 'np.array', (['[[0, 1]]'], {}), '([[0, 1]])\n', (6049, 6059), True, 'import numpy as np\n'), ((6151, 6182), 'numpy.array', 'np.array', (['[0, 1, 1, 0, 0, 1, 1]'], {}), '([0, 1, 1, 0, 0, 1, 1])\n', (6159, 6182), True, 'import numpy as np\n'), ((6196, 6212), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (6204, 6212), True, 'import numpy as np\n'), ((6400, 6456), 'numpy.array', 'np.array', (['[[0, 1, 1, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 0]]'], {}), '([[0, 1, 1, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 0]])\n', (6408, 6456), True, 'import numpy as np\n'), ((6533, 6559), 'numpy.array', 'np.array', (['[[0, 1], [0, 0]]'], {}), '([[0, 1], [0, 0]])\n', (6541, 6559), True, 'import numpy as np\n'), ((3336, 3352), 'algorithms.memory.data.half_one.ravel', 'half_one.ravel', ([], {}), '()\n', (3350, 3352), False, 'from algorithms.memory.data import zero, one, half_one, half_zero\n')]
from django import template from page.models import ExternalAccount register = template.Library() ... # ExternalAccount snippets @register.inclusion_tag('tags/external_account.html', takes_context=True) def external_accounts(context): return { 'external_accounts': ExternalAccount.objects.all(), 'request': context['request'], }
[ "django.template.Library", "page.models.ExternalAccount.objects.all" ]
[((80, 98), 'django.template.Library', 'template.Library', ([], {}), '()\n', (96, 98), False, 'from django import template\n'), ((280, 309), 'page.models.ExternalAccount.objects.all', 'ExternalAccount.objects.all', ([], {}), '()\n', (307, 309), False, 'from page.models import ExternalAccount\n')]
from json import JSONDecodeError import unittest from StatusChangedQueueTrigger import extract_properties, get_source_dest_env_vars, is_require_data_copy class TestPropertiesExtraction(unittest.TestCase): def test_extract_prop_valid_body_return_all_values(self): msg = "{ \"data\": { \"request_id\":\"123\",\"status\":\"456\" , \"type\":\"789\", \"workspace_id\":\"ws1\" }}" req_prop = extract_properties(msg) self.assertEqual(req_prop.request_id, "123") self.assertEqual(req_prop.status, "456") self.assertEqual(req_prop.type, "789") self.assertEqual(req_prop.workspace_id, "ws1") def test_extract_prop_missing_arg_throws(self): msg = "{ \"data\": { \"status\":\"456\" , \"type\":\"789\", \"workspace_id\":\"ws1\" }}" self.assertRaises(Exception, extract_properties, msg) msg = "{ \"data\": { \"request_id\":\"123\", \"type\":\"789\", \"workspace_id\":\"ws1\" }}" self.assertRaises(Exception, extract_properties, msg) msg = "{ \"data\": { \"request_id\":\"123\",\"status\":\"456\" , \"workspace_id\":\"ws1\" }}" self.assertRaises(Exception, extract_properties, msg) msg = "{ \"data\": { \"request_id\":\"123\",\"status\":\"456\" , \"type\":\"789\" }}" self.assertRaises(Exception, extract_properties, msg) def test_extract_prop_invalid_json_throws(self): msg = "Hi" self.assertRaises(JSONDecodeError, extract_properties, msg) class TestDataCopyProperties(unittest.TestCase): def test_only_specific_status_are_triggering_copy(self): self.assertEqual(is_require_data_copy("Mitzi"), False) self.assertEqual(is_require_data_copy(""), False) self.assertEqual(is_require_data_copy("submit"), False) # Testing all values that should return true self.assertEqual(is_require_data_copy("submITted"), True) self.assertEqual(is_require_data_copy("submitted"), True) self.assertEqual(is_require_data_copy("approved"), True) self.assertEqual(is_require_data_copy("REJected"), True) self.assertEqual(is_require_data_copy("blocked"), True) def test_wrong_status_raises_when_getting_storage_account_properties(self): self.assertRaises(Exception, get_source_dest_env_vars, "Miaow", "import") def test_wrong_type_raises_when_getting_storage_account_properties(self): self.assertRaises(Exception, get_source_dest_env_vars, "accepted", "somethingelse")
[ "StatusChangedQueueTrigger.is_require_data_copy", "StatusChangedQueueTrigger.extract_properties" ]
[((410, 433), 'StatusChangedQueueTrigger.extract_properties', 'extract_properties', (['msg'], {}), '(msg)\n', (428, 433), False, 'from StatusChangedQueueTrigger import extract_properties, get_source_dest_env_vars, is_require_data_copy\n'), ((1618, 1647), 'StatusChangedQueueTrigger.is_require_data_copy', 'is_require_data_copy', (['"""Mitzi"""'], {}), "('Mitzi')\n", (1638, 1647), False, 'from StatusChangedQueueTrigger import extract_properties, get_source_dest_env_vars, is_require_data_copy\n'), ((1681, 1705), 'StatusChangedQueueTrigger.is_require_data_copy', 'is_require_data_copy', (['""""""'], {}), "('')\n", (1701, 1705), False, 'from StatusChangedQueueTrigger import extract_properties, get_source_dest_env_vars, is_require_data_copy\n'), ((1739, 1769), 'StatusChangedQueueTrigger.is_require_data_copy', 'is_require_data_copy', (['"""submit"""'], {}), "('submit')\n", (1759, 1769), False, 'from StatusChangedQueueTrigger import extract_properties, get_source_dest_env_vars, is_require_data_copy\n'), ((1857, 1890), 'StatusChangedQueueTrigger.is_require_data_copy', 'is_require_data_copy', (['"""submITted"""'], {}), "('submITted')\n", (1877, 1890), False, 'from StatusChangedQueueTrigger import extract_properties, get_source_dest_env_vars, is_require_data_copy\n'), ((1923, 1956), 'StatusChangedQueueTrigger.is_require_data_copy', 'is_require_data_copy', (['"""submitted"""'], {}), "('submitted')\n", (1943, 1956), False, 'from StatusChangedQueueTrigger import extract_properties, get_source_dest_env_vars, is_require_data_copy\n'), ((1989, 2021), 'StatusChangedQueueTrigger.is_require_data_copy', 'is_require_data_copy', (['"""approved"""'], {}), "('approved')\n", (2009, 2021), False, 'from StatusChangedQueueTrigger import extract_properties, get_source_dest_env_vars, is_require_data_copy\n'), ((2054, 2086), 'StatusChangedQueueTrigger.is_require_data_copy', 'is_require_data_copy', (['"""REJected"""'], {}), "('REJected')\n", (2074, 2086), False, 'from StatusChangedQueueTrigger import extract_properties, get_source_dest_env_vars, is_require_data_copy\n'), ((2119, 2150), 'StatusChangedQueueTrigger.is_require_data_copy', 'is_require_data_copy', (['"""blocked"""'], {}), "('blocked')\n", (2139, 2150), False, 'from StatusChangedQueueTrigger import extract_properties, get_source_dest_env_vars, is_require_data_copy\n')]
#!/usr/bin/env python3 import urllib.request import pandas as pd import rcf if __name__ == "__main__": data_filename = 'nyc_taxi.csv' data_source = 'https://raw.githubusercontent.com/numenta/NAB/master/data/realKnownCause/nyc_taxi.csv' urllib.request.urlretrieve(data_source, data_filename) taxi_data = pd.read_csv(data_filename, delimiter=',') taxi_input = taxi_data.value.to_numpy().reshape(-1,1) anomaly_scores = rcf.rcf(taxi_input) print(anomaly_scores[5600:5620])
[ "pandas.read_csv", "rcf.rcf" ]
[((322, 363), 'pandas.read_csv', 'pd.read_csv', (['data_filename'], {'delimiter': '""","""'}), "(data_filename, delimiter=',')\n", (333, 363), True, 'import pandas as pd\n'), ((443, 462), 'rcf.rcf', 'rcf.rcf', (['taxi_input'], {}), '(taxi_input)\n', (450, 462), False, 'import rcf\n')]
import importlib import inspect import logging import os from collections import OrderedDict from types import FunctionType from typing import Any, Callable, Dict, List, Optional, Type, Union from rap.common.channel import UserChannel from rap.common.exceptions import FuncNotFoundError, RegisteredError from rap.common.types import is_json_type from rap.common.utils import constant from rap.server.model import Request logger: logging.Logger = logging.getLogger(__name__) class FuncModel(object): def __init__( self, group: str, func_type: str, func: Callable, is_private: bool, doc: Optional[str] = None, func_name: Optional[str] = None, ) -> None: self.func_sig = inspect.signature(func) self.group: str = group self.func_type: str = func_type self.func: Callable = func self.is_gen_func: bool = inspect.isgenerator(func) or inspect.isasyncgenfunction(func) self.is_private: bool = is_private self.doc: str = doc or func.__doc__ or "" self.func_name: str = func_name or func.__name__ self.return_type: Type = self.func_sig.return_annotation self.arg_list: List[str] = [] self.kwarg_dict: OrderedDict = OrderedDict() if self.func_type == constant.CHANNEL_TYPE and self.is_gen_func: raise RegisteredError("Is not a legal function. is channel or gen func?") for name, parameter in self.func_sig.parameters.items(): if parameter.default is parameter.empty: self.arg_list.append(name) else: self.kwarg_dict[name] = parameter.default def to_dict(self) -> Dict[str, Any]: return { "group": self.group, "func_type": self.func_type, "is_gen_func": self.is_gen_func, "is_private": self.is_private, "doc": self.doc, "func_name": self.func_name, } class RegistryManager(object): """server func manager""" def __init__(self) -> None: self._cwd: str = os.getcwd() self.func_dict: Dict[str, FuncModel] = dict() self.register(self._load, "load", group="registry", is_private=True) self.register(self._reload, "reload", group="registry", is_private=True) self.register(self.get_register_func_list, "list", group="registry", is_private=True) @staticmethod def gen_key(group: str, name: str, type_: str) -> str: """gen func key""" return f"{type_}:{group}:{name}" @staticmethod def _get_func_type(func: Callable) -> str: """get func type, normal or channel""" sig: "inspect.Signature" = inspect.signature(func) func_arg_parameter: List[inspect.Parameter] = [i for i in sig.parameters.values() if i.default == i.empty] func_type: str = constant.NORMAL_TYPE try: if len(func_arg_parameter) == 1 and issubclass(func_arg_parameter[0].annotation, UserChannel): func_type = constant.CHANNEL_TYPE except TypeError: # ignore error TypeError: issubclass() arg 1 must be a class pass return func_type def get_func_model(self, request: Request, func_type: str) -> FuncModel: func_key: str = self.gen_key(request.group, request.func_name, func_type) if func_key not in self.func_dict: raise FuncNotFoundError(extra_msg=f"name: {request.func_name}") func_model: FuncModel = self.func_dict[func_key] return func_model def register( self, func: Callable, name: Optional[str] = None, group: Optional[str] = None, is_private: bool = False, doc: Optional[str] = None, ) -> None: """ register func to manager :param func: Function that need to be registered :param name: If the function name is not specified, the system will obtain its own name according to the function, otherwise it will be replaced by the specified function name :param group: Specify the correlation_id to which the function to be registered belongs. The same function can be registered to different groups. The root correlation_id is generally used for system components, and there are restrictions when calling. :param is_private: If the function is private, it will be restricted to call and cannot be overloaded :param doc: func doc, if not set, auto use python func doc """ if inspect.isfunction(func) or inspect.ismethod(func): name = name if name else func.__name__ else: raise RegisteredError("func must be func or method") sig: "inspect.Signature" = inspect.signature(func) func_type: str = self._get_func_type(func) if func_type == constant.NORMAL_TYPE: # check func param&return value type hint if sig.return_annotation is sig.empty: raise RegisteredError(f"{func.__name__} must use TypeHints") if not is_json_type(sig.return_annotation): raise RegisteredError(f"{func.__name__} return type:{sig.return_annotation} is not json type") for param in sig.parameters.values(): if param.annotation is sig.empty: raise RegisteredError(f"{func.__name__} param:{param.name} must use TypeHints") if not is_json_type(param.annotation): raise RegisteredError( f"{func.__name__} param:{param.name} type:{param.annotation} is not json type" ) if group is None: group = constant.DEFAULT_GROUP func_key: str = self.gen_key(group, name, func_type) if func_key in self.func_dict: raise RegisteredError(f"`{func_key}` Already register") self.func_dict[func_key] = FuncModel( group=group, func_type=func_type, func_name=name, func=func, is_private=is_private, doc=doc ) logger.debug(f"register `{func_key}` success") @staticmethod def _load_func(path: str, func_str: str) -> FunctionType: """Dynamic loading function :param path: func file path :param func_str: func name """ reload_module = importlib.import_module(path) func = getattr(reload_module, func_str) if not hasattr(func, "__call__"): raise RegisteredError(f"{func_str} is not a callable object") return func def _load( self, path: str, func_str: str, name: Optional[str] = None, group: Optional[str] = None, is_private: bool = False, doc: Optional[str] = None, ) -> str: """load func to registry""" try: func = self._load_func(path, func_str) if not name: name = func.__name__ if group is None: group = constant.DEFAULT_GROUP func_type: str = self._get_func_type(func) func_key: str = self.gen_key(group, name, func_type) if func_key in self.func_dict: raise RegisteredError(f"`{func_key}` already exists") self.register(func, name, group, is_private, doc) return f"load {func_str} from {path} success" except Exception as e: raise RegisteredError(f"load {func_str} from {path} fail, {str(e)}") def _reload( self, path: str, func_str: str, name: Optional[str] = None, group: Optional[str] = None, doc: Optional[str] = None, ) -> str: """reload func by registry""" try: func = self._load_func(path, func_str) if not name: name = func.__name__ if group is None: group = constant.DEFAULT_GROUP func_type: str = self._get_func_type(func) func_key: str = self.gen_key(group, name, func_type) if func_key not in self.func_dict: raise RegisteredError(f"`{func_key}` not exists") func_model: FuncModel = self.func_dict[func_key] if func_model.is_private: raise RegisteredError(f"{func_key} reload fail, private func can not reload") self.func_dict[func_key] = FuncModel( group=group, func_type=func_type, func_name=name, func=func, is_private=func_model.is_private, doc=doc ) return f"reload {func_str} from {path} success" except Exception as e: raise RegisteredError(f"reload {func_str} from {path} fail, {str(e)}") def get_register_func_list(self) -> List[Dict[str, Union[str, bool]]]: """get func info which in registry""" register_list: List[Dict[str, Union[str, bool]]] = [] for key, value in self.func_dict.items(): module = inspect.getmodule(value.func) if not module: continue func_info_dict: Dict[str, Any] = value.to_dict() func_info_dict.update({"module_name": module.__name__, "module_file": module.__file__}) register_list.append(func_info_dict) return register_list def __contains__(self, key: str) -> bool: return key in self.func_dict def __getitem__(self, key: str) -> FuncModel: return self.func_dict[key]
[ "inspect.ismethod", "rap.common.exceptions.RegisteredError", "importlib.import_module", "os.getcwd", "inspect.isasyncgenfunction", "rap.common.exceptions.FuncNotFoundError", "inspect.getmodule", "inspect.signature", "inspect.isgenerator", "inspect.isfunction", "collections.OrderedDict", "logging.getLogger", "rap.common.types.is_json_type" ]
[((448, 475), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (465, 475), False, 'import logging\n'), ((744, 767), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (761, 767), False, 'import inspect\n'), ((1262, 1275), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1273, 1275), False, 'from collections import OrderedDict\n'), ((2096, 2107), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2105, 2107), False, 'import os\n'), ((2709, 2732), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (2726, 2732), False, 'import inspect\n'), ((4805, 4828), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (4822, 4828), False, 'import inspect\n'), ((6378, 6407), 'importlib.import_module', 'importlib.import_module', (['path'], {}), '(path)\n', (6401, 6407), False, 'import importlib\n'), ((908, 933), 'inspect.isgenerator', 'inspect.isgenerator', (['func'], {}), '(func)\n', (927, 933), False, 'import inspect\n'), ((937, 969), 'inspect.isasyncgenfunction', 'inspect.isasyncgenfunction', (['func'], {}), '(func)\n', (963, 969), False, 'import inspect\n'), ((1368, 1435), 'rap.common.exceptions.RegisteredError', 'RegisteredError', (['"""Is not a legal function. is channel or gen func?"""'], {}), "('Is not a legal function. is channel or gen func?')\n", (1383, 1435), False, 'from rap.common.exceptions import FuncNotFoundError, RegisteredError\n'), ((3427, 3484), 'rap.common.exceptions.FuncNotFoundError', 'FuncNotFoundError', ([], {'extra_msg': 'f"""name: {request.func_name}"""'}), "(extra_msg=f'name: {request.func_name}')\n", (3444, 3484), False, 'from rap.common.exceptions import FuncNotFoundError, RegisteredError\n'), ((4587, 4611), 'inspect.isfunction', 'inspect.isfunction', (['func'], {}), '(func)\n', (4605, 4611), False, 'import inspect\n'), ((4615, 4637), 'inspect.ismethod', 'inspect.ismethod', (['func'], {}), '(func)\n', (4631, 4637), False, 'import inspect\n'), ((4722, 4768), 'rap.common.exceptions.RegisteredError', 'RegisteredError', (['"""func must be func or method"""'], {}), "('func must be func or method')\n", (4737, 4768), False, 'from rap.common.exceptions import FuncNotFoundError, RegisteredError\n'), ((5889, 5938), 'rap.common.exceptions.RegisteredError', 'RegisteredError', (['f"""`{func_key}` Already register"""'], {}), "(f'`{func_key}` Already register')\n", (5904, 5938), False, 'from rap.common.exceptions import FuncNotFoundError, RegisteredError\n'), ((6516, 6571), 'rap.common.exceptions.RegisteredError', 'RegisteredError', (['f"""{func_str} is not a callable object"""'], {}), "(f'{func_str} is not a callable object')\n", (6531, 6571), False, 'from rap.common.exceptions import FuncNotFoundError, RegisteredError\n'), ((9002, 9031), 'inspect.getmodule', 'inspect.getmodule', (['value.func'], {}), '(value.func)\n', (9019, 9031), False, 'import inspect\n'), ((5055, 5109), 'rap.common.exceptions.RegisteredError', 'RegisteredError', (['f"""{func.__name__} must use TypeHints"""'], {}), "(f'{func.__name__} must use TypeHints')\n", (5070, 5109), False, 'from rap.common.exceptions import FuncNotFoundError, RegisteredError\n'), ((5129, 5164), 'rap.common.types.is_json_type', 'is_json_type', (['sig.return_annotation'], {}), '(sig.return_annotation)\n', (5141, 5164), False, 'from rap.common.types import is_json_type\n'), ((5188, 5281), 'rap.common.exceptions.RegisteredError', 'RegisteredError', (['f"""{func.__name__} return type:{sig.return_annotation} is not json type"""'], {}), "(\n f'{func.__name__} return type:{sig.return_annotation} is not json type')\n", (5203, 5281), False, 'from rap.common.exceptions import FuncNotFoundError, RegisteredError\n'), ((7245, 7292), 'rap.common.exceptions.RegisteredError', 'RegisteredError', (['f"""`{func_key}` already exists"""'], {}), "(f'`{func_key}` already exists')\n", (7260, 7292), False, 'from rap.common.exceptions import FuncNotFoundError, RegisteredError\n'), ((8152, 8195), 'rap.common.exceptions.RegisteredError', 'RegisteredError', (['f"""`{func_key}` not exists"""'], {}), "(f'`{func_key}` not exists')\n", (8167, 8195), False, 'from rap.common.exceptions import FuncNotFoundError, RegisteredError\n'), ((8318, 8389), 'rap.common.exceptions.RegisteredError', 'RegisteredError', (['f"""{func_key} reload fail, private func can not reload"""'], {}), "(f'{func_key} reload fail, private func can not reload')\n", (8333, 8389), False, 'from rap.common.exceptions import FuncNotFoundError, RegisteredError\n'), ((5403, 5476), 'rap.common.exceptions.RegisteredError', 'RegisteredError', (['f"""{func.__name__} param:{param.name} must use TypeHints"""'], {}), "(f'{func.__name__} param:{param.name} must use TypeHints')\n", (5418, 5476), False, 'from rap.common.exceptions import FuncNotFoundError, RegisteredError\n'), ((5500, 5530), 'rap.common.types.is_json_type', 'is_json_type', (['param.annotation'], {}), '(param.annotation)\n', (5512, 5530), False, 'from rap.common.types import is_json_type\n'), ((5558, 5663), 'rap.common.exceptions.RegisteredError', 'RegisteredError', (['f"""{func.__name__} param:{param.name} type:{param.annotation} is not json type"""'], {}), "(\n f'{func.__name__} param:{param.name} type:{param.annotation} is not json type'\n )\n", (5573, 5663), False, 'from rap.common.exceptions import FuncNotFoundError, RegisteredError\n')]
"""Implementation of magic functions that control various automatic behaviors. """ #----------------------------------------------------------------------------- # Copyright (c) 2012 The IPython Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Our own packages from IPython.core.magic import Bunch, Magics, magics_class, line_magic from IPython.testing.skipdoctest import skip_doctest from logging import error #----------------------------------------------------------------------------- # Magic implementation classes #----------------------------------------------------------------------------- @magics_class class AutoMagics(Magics): """Magics that control various autoX behaviors.""" def __init__(self, shell): super(AutoMagics, self).__init__(shell) # namespace for holding state we may need self._magic_state = Bunch() @line_magic def automagic(self, parameter_s=''): """Make magic functions callable without having to type the initial %. Without arguments toggles on/off (when off, you must call it as %automagic, of course). With arguments it sets the value, and you can use any of (case insensitive): - on, 1, True: to activate - off, 0, False: to deactivate. Note that magic functions have lowest priority, so if there's a variable whose name collides with that of a magic fn, automagic won't work for that function (you get the variable instead). However, if you delete the variable (del var), the previously shadowed magic function becomes visible to automagic again.""" arg = parameter_s.lower() mman = self.shell.magics_manager if arg in ('on', '1', 'true'): val = True elif arg in ('off', '0', 'false'): val = False else: val = not mman.auto_magic mman.auto_magic = val print('\n' + self.shell.magics_manager.auto_status()) @skip_doctest @line_magic def autocall(self, parameter_s=''): """Make functions callable without having to type parentheses. Usage: %autocall [mode] The mode can be one of: 0->Off, 1->Smart, 2->Full. If not given, the value is toggled on and off (remembering the previous state). In more detail, these values mean: 0 -> fully disabled 1 -> active, but do not apply if there are no arguments on the line. In this mode, you get:: In [1]: callable Out[1]: <built-in function callable> In [2]: callable 'hello' ------> callable('hello') Out[2]: False 2 -> Active always. Even if no arguments are present, the callable object is called:: In [2]: float ------> float() Out[2]: 0.0 Note that even with autocall off, you can still use '/' at the start of a line to treat the first argument on the command line as a function and add parentheses to it:: In [8]: /str 43 ------> str(43) Out[8]: '43' # all-random (note for auto-testing) """ if parameter_s: arg = int(parameter_s) else: arg = 'toggle' if not arg in (0, 1, 2, 'toggle'): error('Valid modes: (0->Off, 1->Smart, 2->Full') return if arg in (0, 1, 2): self.shell.autocall = arg else: # toggle if self.shell.autocall: self._magic_state.autocall_save = self.shell.autocall self.shell.autocall = 0 else: try: self.shell.autocall = self._magic_state.autocall_save except AttributeError: self.shell.autocall = self._magic_state.autocall_save = 1 print("Automatic calling is:",['OFF','Smart','Full'][self.shell.autocall])
[ "logging.error", "IPython.core.magic.Bunch" ]
[((1220, 1227), 'IPython.core.magic.Bunch', 'Bunch', ([], {}), '()\n', (1225, 1227), False, 'from IPython.core.magic import Bunch, Magics, magics_class, line_magic\n'), ((3696, 3744), 'logging.error', 'error', (['"""Valid modes: (0->Off, 1->Smart, 2->Full"""'], {}), "('Valid modes: (0->Off, 1->Smart, 2->Full')\n", (3701, 3744), False, 'from logging import error\n')]
import pandas as pd import numpy as np class DataFixer: def __init__(self): pass def get_fix(self, data): self.data = data self.data = pd.get_dummies(self.data) return self.data
[ "pandas.get_dummies" ]
[((192, 217), 'pandas.get_dummies', 'pd.get_dummies', (['self.data'], {}), '(self.data)\n', (206, 217), True, 'import pandas as pd\n')]
# Python Standard Library Imports import base64 import hashlib import hmac import json # HTK Imports from htk.utils import htk_setting from htk.utils.general import resolve_method_dynamically def validate_webhook_request(request): """Validates a 321Forms webhook request Returns a JSON request body if it is valid Otherwise, returns None """ webhook_data = json.loads(request.body) company_id = webhook_data.get('company', {}).get('id') headers = request.META expected_signature = headers.get('HTTP_X_ONBOARDING_SIGNATURE', '') hash_key_retriever = resolve_method_dynamically(htk_setting('HTK_321FORMS_WEBHOOK_HASH_KEY_RETRIEVER')) hash_key = hash_key_retriever(company_id) signature = base64.b64encode( hmac.new( bytes(hash_key), request.body, digestmod=hashlib.sha1 ).digest() ) is_valid = signature == expected_signature if is_valid: webhook_data = webhook_data else: webhook_data = None return webhook_data def handle_webhook_request(webhook_data): topic = webhook_data.get('topic', None) event_handlers = htk_setting('HTK_321FORMS_WEBHOOK_EVENT_HANDLERS') event_handler_method = event_handlers.get(topic) event_handler = resolve_method_dynamically(event_handler_method) if event_handler_method else None if event_handler: event_handler(webhook_data) else: pass
[ "json.loads", "htk.utils.htk_setting", "htk.utils.general.resolve_method_dynamically" ]
[((381, 405), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (391, 405), False, 'import json\n'), ((1162, 1212), 'htk.utils.htk_setting', 'htk_setting', (['"""HTK_321FORMS_WEBHOOK_EVENT_HANDLERS"""'], {}), "('HTK_321FORMS_WEBHOOK_EVENT_HANDLERS')\n", (1173, 1212), False, 'from htk.utils import htk_setting\n'), ((618, 672), 'htk.utils.htk_setting', 'htk_setting', (['"""HTK_321FORMS_WEBHOOK_HASH_KEY_RETRIEVER"""'], {}), "('HTK_321FORMS_WEBHOOK_HASH_KEY_RETRIEVER')\n", (629, 672), False, 'from htk.utils import htk_setting\n'), ((1287, 1335), 'htk.utils.general.resolve_method_dynamically', 'resolve_method_dynamically', (['event_handler_method'], {}), '(event_handler_method)\n', (1313, 1335), False, 'from htk.utils.general import resolve_method_dynamically\n')]
import socket import os from playsound import playsound from pydub import AudioSegment def sendToClient(msg): msg = msg.decode('utf-8') lang = msg[:3] # ITA or ENG msg = msg[3:] # actual message words = msg.split(" ") if len(words) > 18: sentences = [] sentence = "" for i in range(len(words)): sentence += words[i] + " " if i%12 == 0 and i != 0: sentences.append(sentence) sentence = "" elif i == len(words)-1: sentences.append(sentence) with open('harvard_sentences.txt','w') as f: first = True for i, sentence in enumerate(sentences, start=1): if first: f.write("first line\n1. "+str(sentence)+"\n") first = False else: f.write(f"{i}. {str(sentence)}\n") num_sentences = len(sentences) else: with open('harvard_sentences.txt','w') as f: f.write("first line\n1. "+str(msg)+"\n") num_sentences = 1 os.system('python synthesize.py '+lang) sounds = 0 for i in range(0, num_sentences): sounds += AudioSegment.from_wav(f"samples/{i+1}.wav") # increase volume by 10dB sounds += 10 sounds.export("backup/final.wav", format="wav") f.close() with open('backup/final.wav', 'rb') as f: audiob = f.read() clientsocket.send(audiob) clientsocket.close() f.close() if __name__ == '__main__': s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("0.0.0.0", 1234)) s.listen(5) while True: print("Waiting for connection...") clientsocket, address = s.accept() print(f"Connection from {address} has been established") msg = clientsocket.recv(2048) print(msg) sendToClient(msg)
[ "socket.socket", "os.system", "pydub.AudioSegment.from_wav" ]
[((1119, 1160), 'os.system', 'os.system', (["('python synthesize.py ' + lang)"], {}), "('python synthesize.py ' + lang)\n", (1128, 1160), False, 'import os\n'), ((1570, 1619), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1583, 1619), False, 'import socket\n'), ((1230, 1275), 'pydub.AudioSegment.from_wav', 'AudioSegment.from_wav', (['f"""samples/{i + 1}.wav"""'], {}), "(f'samples/{i + 1}.wav')\n", (1251, 1275), False, 'from pydub import AudioSegment\n')]
import data.tools.maths as m import pygame, numpy class MousePicker: current_ray = None RAY_RANGE = 600.0 RECURSION_COUNT = 200 def __init__(self, camera, projection_matrix, display, terrain): self.camera = camera self.projection_matrix = projection_matrix self.display = display self.terrain = terrain self.view_matrix = m.Maths().create_view_matrix(camera) self.current_terrain_point = None self.count = 0 def get_current_ray(self): return self.current_ray def update(self): self.view_matrix = m.Maths().create_view_matrix(self.camera) self.current_ray = self.calculate_mouse_ray() def calculate_mouse_ray(self): mouse_x, mouse_y = float(pygame.mouse.get_pos()[0]), float(pygame.mouse.get_pos()[1]) normalized_device_coordinates = self.get_normalized_device_coordinates(mouse_x, mouse_y) clip_coordinates = (normalized_device_coordinates[0], normalized_device_coordinates[1], -1.0, 1.0) eye_coordinates = self.to_eye_coordinates(clip_coordinates) world_ray = self.to_world_coordinates(eye_coordinates) return world_ray def to_world_coordinates(self, eye_coordinates): inverted_view_matrix = numpy.linalg.inv(self.view_matrix) ray_world_coordinates = numpy.dot(inverted_view_matrix, eye_coordinates) mouse_ray = (-ray_world_coordinates[0], ray_world_coordinates[1], -ray_world_coordinates[2]) return mouse_ray def to_eye_coordinates(self, clip_coordinates): inverted_projection_matrix = numpy.linalg.inv(self.projection_matrix) eye_coordinates = numpy.dot(inverted_projection_matrix, clip_coordinates) return eye_coordinates[0], eye_coordinates[1], -1.0, 0.0 def get_normalized_device_coordinates(self, mouse_x, mouse_y): x = (2.0 * mouse_x) / self.display.get_width() - 1.0 y = (2.0 * mouse_y) / self.display.get_height() - 1.0 return (x, y) def intersect_with_y(self): a = self.camera.position[0] b = self.camera.position[1] c = self.camera.position[2] alpha = self.current_ray[0] beta = self.current_ray[1] gamma = self.current_ray[2] x = a - (alpha * b) / beta if self.terrain.height is not None: y = self.terrain.height else: y = 0.0 z = c - (gamma * b) / beta return (x, y, z)
[ "numpy.dot", "data.tools.maths.Maths", "numpy.linalg.inv", "pygame.mouse.get_pos" ]
[((1302, 1336), 'numpy.linalg.inv', 'numpy.linalg.inv', (['self.view_matrix'], {}), '(self.view_matrix)\n', (1318, 1336), False, 'import pygame, numpy\n'), ((1370, 1418), 'numpy.dot', 'numpy.dot', (['inverted_view_matrix', 'eye_coordinates'], {}), '(inverted_view_matrix, eye_coordinates)\n', (1379, 1418), False, 'import pygame, numpy\n'), ((1640, 1680), 'numpy.linalg.inv', 'numpy.linalg.inv', (['self.projection_matrix'], {}), '(self.projection_matrix)\n', (1656, 1680), False, 'import pygame, numpy\n'), ((1708, 1763), 'numpy.dot', 'numpy.dot', (['inverted_projection_matrix', 'clip_coordinates'], {}), '(inverted_projection_matrix, clip_coordinates)\n', (1717, 1763), False, 'import pygame, numpy\n'), ((395, 404), 'data.tools.maths.Maths', 'm.Maths', ([], {}), '()\n', (402, 404), True, 'import data.tools.maths as m\n'), ((619, 628), 'data.tools.maths.Maths', 'm.Maths', ([], {}), '()\n', (626, 628), True, 'import data.tools.maths as m\n'), ((788, 810), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (808, 810), False, 'import pygame, numpy\n'), ((822, 844), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (842, 844), False, 'import pygame, numpy\n')]
# Author: <NAME> <<EMAIL>> from math import log from pathlib import Path from ._utils import download MINIMAL_ENTRY = { 'FREQcount': 1, 'CDcount': 1, 'Lg10WF': log(2, 10), # log10(FREQcount + 1) 'Lg10CD': log(2, 10), } TOTAL_COUNT = 51e6 def read_subtlex(lower=False): """Read the SUBTLEXus data Parameters ---------- lower : bool Use lower case keys (default is upper case). Notes ----- http://www.ugent.be/pp/experimentele-psychologie/en/research/documents/subtlexus Columns: Word, FREQcount, CDcount, FREQlow, Cdlow, SUBTLWF, Lg10WF, SUBTLCD, Lg10CD """ path = download('https://www.ugent.be/pp/experimentele-psychologie/en/research/documents/subtlexus/subtlexus2.zip/at_download/file', 'SUBTLEXus74286wordstextversion.txt', unzip=True) out = {} str_trans = str.lower if lower else str.upper with path.open() as fid: columns = fid.readline().split() i_key = columns.index('Word') columns.pop(i_key) for line in fid: items = line.split() key = str_trans(items.pop(i_key)) if key in out: raise RuntimeError(f"Duplicate key: {key}") out[key] = dict(zip(columns, map(float, items))) return out def read_subtlex_pos(): """Read SUBTLEXus with part-of-speech tags""" path = download('http://crr.ugent.be/papers/SUBTLEX-US_frequency_list_with_PoS_information_final_text_version.zip', 'SUBTLEX-US-PoS.txt', unzip=True) with path.open() as fid: keys = next(fid).split() i_word = keys.index('Word') i_class = keys.index('All_PoS_SUBTLEX') i_freq = keys.index('All_freqs_SUBTLEX') d = {} for line in fid: line = line.split() d[line[i_word]] = {k: int(v) for k, v in zip(line[i_class].split('.'), line[i_freq].split('.')) if v != '#N/A'} return d
[ "math.log" ]
[((175, 185), 'math.log', 'log', (['(2)', '(10)'], {}), '(2, 10)\n', (178, 185), False, 'from math import log\n'), ((225, 235), 'math.log', 'log', (['(2)', '(10)'], {}), '(2, 10)\n', (228, 235), False, 'from math import log\n')]
# -*- coding: utf-8 -*- """ Created on Sun Nov 1 12:21:00 2020 @author: cbri3325 """ #%% Import functions import matplotlib.pyplot as plt import matplotlib as mpl import numpy as np import pandas as pd import datetime import os import glob import shutil import xlsxwriter import time from scipy.stats.stats import pearsonr def createList(r1, r2): return [item for item in range(r1, r2+1)] #%% Set Working directory data_supradir = 'path to directory containing analysis results xlsx files' #Set working directory users_path = [ f.path for f in os.scandir(data_supradir) if f.is_dir() ] #Create a list of the paths to the users directories users_name = [ f.name for f in os.scandir(data_supradir) if f.is_dir() ] #Create a lisdt of users names n_users = len(users_name) #Total number of subjects print(users_path) subj_list = ['AB001', 'DA003', 'FF004', 'GF002', 'GW005', 'KF006'] storeResults = {subj: pd.DataFrame(columns=['User', 'Repeats', 'Method', 'BTV Volume [mm3]', 'CTRL VOI Mean intensity [SUV]']) for subj in subj_list} for current in users_name: user_dir = data_supradir+current user_name = current #Set paths to subfolders MI_dir = user_dir +'/MI method' CS_dir = user_dir +'/CS method' MI_results_df = pd.read_excel(MI_dir+'/Results_MI.xlsx', sheet_name=None) CS_results_df = pd.read_excel(CS_dir+'/Results_CS.xlsx', sheet_name=None) n_repeats = len(MI_results_df) n_subjs = len(MI_results_df.get('Repeat1')['Subject_ID']) # subj_list = MI_results_df.get('Repeat1')['Subject_ID'].tolist() MI_results_df = pd.read_excel(MI_dir+'/Results_MI.xlsx', sheet_name=None, index_col='Subject_ID') CS_results_df = pd.read_excel(CS_dir+'/Results_CS.xlsx', sheet_name=None, index_col='Subject_ID') #Create empty dataframes to populate as going through the loop # storeResults = {subj: pd.DataFrame(columns=['User', 'Repeats', 'Method', 'BTV Volume [mm3]', 'CTRL VOI Mean intensity [SUV]']) for subj in subj_list} for repeat in range(1, n_repeats+1): # Repeats_ns = Repeats_ns.append([repeat]) MI_BTV_CTRL = MI_results_df.get('Repeat'+str(repeat)).loc[:, ['BTV Volume [mm3]','CTRL VOI Mean intensity [SUV]']] CS_BTV_CTRL = CS_results_df.get('Repeat'+str(repeat)).loc[:, ['BTV Volume [mm3]','CTRL VOI Mean intensity [SUV]']] method1, method2 = 'MI', 'CS' for subj in subj_list: storeResults[subj] = storeResults[subj].append({'User': current, 'Repeats': repeat, 'Method': method1, 'BTV Volume [mm3]': float(MI_BTV_CTRL.loc[subj, 'BTV Volume [mm3]']), 'CTRL VOI Mean intensity [SUV]': float(MI_BTV_CTRL.loc[subj, 'CTRL VOI Mean intensity [SUV]']) }, ignore_index=True) storeResults[subj] = storeResults[subj].append({'User': current, 'Repeats': repeat, 'Method': method2, 'BTV Volume [mm3]': float(CS_BTV_CTRL.loc[subj, 'BTV Volume [mm3]']), 'CTRL VOI Mean intensity [SUV]': float(CS_BTV_CTRL.loc[subj, 'CTRL VOI Mean intensity [SUV]']) }, ignore_index=True) # new_df = df.groupby(['User', 'Method']).['BTV Volume [mm3]'].agg({'mean','std'}) # df = df.merge(new_df, left_on=['User', 'Method'], right_index=True) writer = pd.ExcelWriter(data_supradir +'OverallResults.xlsx', engine='xlsxwriter') for name, df in storeResults.items(): # Calculate CoV values for BTV and CTRL for each group of User, Method for each Subject BTV_df = df.groupby(['User', 'Method'])['BTV Volume [mm3]'].agg({'mean','std'}) CTRL_df = df.groupby(['User', 'Method'])['CTRL VOI Mean intensity [SUV]'].agg({'mean','std'}) BTV_df['BTV_CoV'] = BTV_df['std']/BTV_df['mean'] CTRL_df['CTRL_CoV'] = CTRL_df['std']/CTRL_df['mean'] df = df.merge(BTV_df['BTV_CoV'], left_on=['User', 'Method'], right_index=True) df = df.merge(CTRL_df['CTRL_CoV'], left_on=['User', 'Method'], right_index=True) df.to_excel(writer, sheet_name=name, index=False) writer.save()
[ "pandas.DataFrame", "pandas.ExcelWriter", "os.scandir", "pandas.read_excel" ]
[((3920, 3994), 'pandas.ExcelWriter', 'pd.ExcelWriter', (["(data_supradir + 'OverallResults.xlsx')"], {'engine': '"""xlsxwriter"""'}), "(data_supradir + 'OverallResults.xlsx', engine='xlsxwriter')\n", (3934, 3994), True, 'import pandas as pd\n'), ((965, 1073), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['User', 'Repeats', 'Method', 'BTV Volume [mm3]',\n 'CTRL VOI Mean intensity [SUV]']"}), "(columns=['User', 'Repeats', 'Method', 'BTV Volume [mm3]',\n 'CTRL VOI Mean intensity [SUV]'])\n", (977, 1073), True, 'import pandas as pd\n'), ((1327, 1386), 'pandas.read_excel', 'pd.read_excel', (["(MI_dir + '/Results_MI.xlsx')"], {'sheet_name': 'None'}), "(MI_dir + '/Results_MI.xlsx', sheet_name=None)\n", (1340, 1386), True, 'import pandas as pd\n'), ((1406, 1465), 'pandas.read_excel', 'pd.read_excel', (["(CS_dir + '/Results_CS.xlsx')"], {'sheet_name': 'None'}), "(CS_dir + '/Results_CS.xlsx', sheet_name=None)\n", (1419, 1465), True, 'import pandas as pd\n'), ((1673, 1761), 'pandas.read_excel', 'pd.read_excel', (["(MI_dir + '/Results_MI.xlsx')"], {'sheet_name': 'None', 'index_col': '"""Subject_ID"""'}), "(MI_dir + '/Results_MI.xlsx', sheet_name=None, index_col=\n 'Subject_ID')\n", (1686, 1761), True, 'import pandas as pd\n'), ((1776, 1864), 'pandas.read_excel', 'pd.read_excel', (["(CS_dir + '/Results_CS.xlsx')"], {'sheet_name': 'None', 'index_col': '"""Subject_ID"""'}), "(CS_dir + '/Results_CS.xlsx', sheet_name=None, index_col=\n 'Subject_ID')\n", (1789, 1864), True, 'import pandas as pd\n'), ((596, 621), 'os.scandir', 'os.scandir', (['data_supradir'], {}), '(data_supradir)\n', (606, 621), False, 'import os\n'), ((723, 748), 'os.scandir', 'os.scandir', (['data_supradir'], {}), '(data_supradir)\n', (733, 748), False, 'import os\n')]
import sys, os import time import tensorflow as tf import functools from absl import app from plan import * from snappy_io import unsnappy NUM_CLASSES = 4672 BOARD_SHAPE = (20, 8, 8) BOARD_FLOATS = 1280 AUTOTUNE = tf.data.AUTOTUNE FEATURES = { 'board': tf.io.FixedLenFeature(BOARD_SHAPE, tf.float32), 'label': tf.io.FixedLenFeature([], tf.int64) } FEATURES2 = { 'board': tf.io.FixedLenFeature(BOARD_SHAPE, tf.float32), 'label': tf.io.FixedLenFeature([], tf.int64), 'legal_moves': tf.io.VarLenFeature(tf.int64) } def legal_moves_mask(legal_moves_sparse): dense = tf.sparse.to_dense(legal_moves_sparse, default_value=-1) hot = tf.one_hot(dense, on_value=1.0, off_value=0.0, depth=NUM_CLASSES) hot2 = tf.math.reduce_sum(hot, axis=[-2]) return hot2 def _extract(blob): t = tf.io.parse_example(blob, features=FEATURES) return t['board'], t['label'] def _extract2(blob): t = tf.io.parse_example(blob, features=FEATURES2) return ({'board': t['board'], 'legal_moves': t['legal_moves'], #'legal_moves_mask': legal_moves_mask(t['legal_moves']) # very slow - try to do in model and maybe runs on gpu? }, t['label']) # def gen_snappy(fn): # for ex in unsnappy(fn): # board = tf.convert_to_tensor(ex.features.feature['board'].float_list.value, # dtype=tf.float32) # board = tf.reshape(board, BOARD_SHAPE) # action = tf.convert_to_tensor(ex.features.feature['label'].int64_list.value[0], # dtype=tf.int64) # yield (board, action) def create_input_generator_rio(dplan, fns, is_train=True, verbose=True, do_repeat=True, return_legal_moves=False): if type(fns) == type(""): fns = [fns] if verbose: print(f'Open {fns}') datasets = [] for fn in fns: assert os.path.isfile(fn), fn assert fn.endswith('.recordio') ds = tf.data.TFRecordDataset(fns, 'ZLIB', num_parallel_reads=len(fns)) if is_train: ds = ds.shuffle(dplan.shuffle) if do_repeat: ds = ds.repeat() if dplan.get('swap_batch_map_order', False): ds = ds.batch(dplan.batch, num_parallel_calls=AUTOTUNE, deterministic=False) ds = ds.map(_extract2 if return_legal_moves else _extract, num_parallel_calls=AUTOTUNE) else: ds = ds.batch(dplan.batch, num_parallel_calls=AUTOTUNE, deterministic=False) # performance ds = ds.map(_extract2 if return_legal_moves else _extract) ds = ds.prefetch(dplan.prefetch) return ds def create_input_generator(dplan, fns, is_train=True, verbose=True, do_repeat=True, return_legal_moves=False): if type(fns) == type(""): fns = [fns] if fns[0].endswith('.recordio'): return create_input_generator_rio(dplan, fns, is_train, verbose, do_repeat, return_legal_moves) assert False, 'obsolete code path' # if verbose: # print(f'Open {fns}') # datasets = [] # for fn in fns: # assert os.path.isfile(fn), fn # assert fn.endswith('.snappy') # gen1 = functools.partial(gen_snappy, fn) # ds = tf.data.Dataset.from_generator(gen1, # output_types=('float32', 'int64'), # output_shapes=(BOARD_SHAPE, [])) # if do_repeat: # ds = ds.repeat() # datasets.append(ds) # del ds # ds = tf.data.experimental.sample_from_datasets( # datasets, # weights=None # Uniform # ) # if is_train: # ds = ds.shuffle(dplan.shuffle) # ds = ds.repeat() # ds = ds.batch(dplan.batch, # num_parallel_calls=AUTOTUNE, # deterministic=False) # performance # ds = ds.prefetch(dplan.prefetch) # return ds def main(argv): plan = load_plan('v0.toml') print(next(iter(create_input_generator(plan.data, 'mega-v2-9.snappy')))) if __name__ == '__main__': app.run(main)
[ "tensorflow.one_hot", "tensorflow.io.VarLenFeature", "tensorflow.sparse.to_dense", "os.path.isfile", "absl.app.run", "tensorflow.io.parse_example", "tensorflow.io.FixedLenFeature", "tensorflow.math.reduce_sum" ]
[((260, 306), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['BOARD_SHAPE', 'tf.float32'], {}), '(BOARD_SHAPE, tf.float32)\n', (281, 306), True, 'import tensorflow as tf\n'), ((319, 354), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (340, 354), True, 'import tensorflow as tf\n'), ((383, 429), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['BOARD_SHAPE', 'tf.float32'], {}), '(BOARD_SHAPE, tf.float32)\n', (404, 429), True, 'import tensorflow as tf\n'), ((442, 477), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (463, 477), True, 'import tensorflow as tf\n'), ((496, 525), 'tensorflow.io.VarLenFeature', 'tf.io.VarLenFeature', (['tf.int64'], {}), '(tf.int64)\n', (515, 525), True, 'import tensorflow as tf\n'), ((582, 638), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['legal_moves_sparse'], {'default_value': '(-1)'}), '(legal_moves_sparse, default_value=-1)\n', (600, 638), True, 'import tensorflow as tf\n'), ((647, 712), 'tensorflow.one_hot', 'tf.one_hot', (['dense'], {'on_value': '(1.0)', 'off_value': '(0.0)', 'depth': 'NUM_CLASSES'}), '(dense, on_value=1.0, off_value=0.0, depth=NUM_CLASSES)\n', (657, 712), True, 'import tensorflow as tf\n'), ((722, 756), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['hot'], {'axis': '[-2]'}), '(hot, axis=[-2])\n', (740, 756), True, 'import tensorflow as tf\n'), ((799, 843), 'tensorflow.io.parse_example', 'tf.io.parse_example', (['blob'], {'features': 'FEATURES'}), '(blob, features=FEATURES)\n', (818, 843), True, 'import tensorflow as tf\n'), ((905, 950), 'tensorflow.io.parse_example', 'tf.io.parse_example', (['blob'], {'features': 'FEATURES2'}), '(blob, features=FEATURES2)\n', (924, 950), True, 'import tensorflow as tf\n'), ((3900, 3913), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (3907, 3913), False, 'from absl import app\n'), ((1828, 1846), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (1842, 1846), False, 'import sys, os\n')]
from dataclasses import dataclass, field from typing import List from xsdata.models.datatype import XmlDateTime __NAMESPACE__ = "http://xstest-tns/schema11_F4_3_16_v01" @dataclass class Root: class Meta: name = "root" namespace = "http://xstest-tns/schema11_F4_3_16_v01" el_dtime_type: List[XmlDateTime] = field( default_factory=list, metadata={ "name": "elDTimeType", "type": "Element", "namespace": "", } ) el_dtime_etprohibited: List[XmlDateTime] = field( default_factory=list, metadata={ "name": "elDTimeETProhibited", "type": "Element", "namespace": "", "min_occurs": 1, "explicit_timezone": "prohibited", } ) el_dtime_etrequired: List[XmlDateTime] = field( default_factory=list, metadata={ "name": "elDTimeETRequired", "type": "Element", "namespace": "", "min_occurs": 1, "explicit_timezone": "required", } ) el_dtime_etoptional: List[XmlDateTime] = field( default_factory=list, metadata={ "name": "elDTimeETOptional", "type": "Element", "namespace": "", "min_occurs": 1, "explicit_timezone": "optional", } )
[ "dataclasses.field" ]
[((334, 435), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'name': 'elDTimeType', 'type': 'Element', 'namespace': ''}"}), "(default_factory=list, metadata={'name': 'elDTimeType', 'type':\n 'Element', 'namespace': ''})\n", (339, 435), False, 'from dataclasses import dataclass, field\n'), ((548, 713), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'name': 'elDTimeETProhibited', 'type': 'Element', 'namespace': '',\n 'min_occurs': 1, 'explicit_timezone': 'prohibited'}"}), "(default_factory=list, metadata={'name': 'elDTimeETProhibited', 'type':\n 'Element', 'namespace': '', 'min_occurs': 1, 'explicit_timezone':\n 'prohibited'})\n", (553, 713), False, 'from dataclasses import dataclass, field\n'), ((844, 1005), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'name': 'elDTimeETRequired', 'type': 'Element', 'namespace': '',\n 'min_occurs': 1, 'explicit_timezone': 'required'}"}), "(default_factory=list, metadata={'name': 'elDTimeETRequired', 'type':\n 'Element', 'namespace': '', 'min_occurs': 1, 'explicit_timezone':\n 'required'})\n", (849, 1005), False, 'from dataclasses import dataclass, field\n'), ((1136, 1297), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'name': 'elDTimeETOptional', 'type': 'Element', 'namespace': '',\n 'min_occurs': 1, 'explicit_timezone': 'optional'}"}), "(default_factory=list, metadata={'name': 'elDTimeETOptional', 'type':\n 'Element', 'namespace': '', 'min_occurs': 1, 'explicit_timezone':\n 'optional'})\n", (1141, 1297), False, 'from dataclasses import dataclass, field\n')]
from django.conf import settings from django.shortcuts import render from djpagan.czech.forms import ReimbursementForm def reimbursement(request): form = ReimbursementForm() return render( request, 'czech/reimbursement/form.html', {'form': form,} )
[ "djpagan.czech.forms.ReimbursementForm", "django.shortcuts.render" ]
[((162, 181), 'djpagan.czech.forms.ReimbursementForm', 'ReimbursementForm', ([], {}), '()\n', (179, 181), False, 'from djpagan.czech.forms import ReimbursementForm\n'), ((194, 258), 'django.shortcuts.render', 'render', (['request', '"""czech/reimbursement/form.html"""', "{'form': form}"], {}), "(request, 'czech/reimbursement/form.html', {'form': form})\n", (200, 258), False, 'from django.shortcuts import render\n')]
import os import subprocess from subprocess import check_output import cv2 import numpy as np class VideoCaptureYUV: def __init__(self, filename, size): self.height, self.width = size self.frame_len = int(self.width * self.height * 3 / 2) self.f = open(filename, 'rb') self.shape = (int(self.height*1.5), self.width) def quantize(self,y_comp,bits,bdepth=8): y_comp=np.uint8(np.rint(y_comp*((pow(2,bits)-1)/(pow(2,bdepth)-1)))) return y_comp def quantize_inverse(self,y_comp,bits,bdepth=8): y_comp=np.uint8(np.rint(y_comp*((pow(2,bdepth)-1)/(pow(2,bits)-1)))) return y_comp def adjust_luminance(self,y_comp,step): y_comp=np.clip(y_comp+step,a_min = 2, a_max = 255) return y_comp def read_raw(self): try: raw = self.f.read(self.frame_len) yuv = np.frombuffer(raw, dtype=np.uint8) yuv = yuv.reshape(self.shape) except Exception as e: print(str(e)) return False, None return True, yuv def read(self,lum_step=0): ret, yuv = self.read_raw() if not ret: return ret, yuv y=yuv[:1080,:] uv=yuv[1080:,:] y=self.quantize(y,6,8) uv=self.quantize(uv,5,8) y=self.quantize_inverse(y,6,8) uv=self.quantize_inverse(uv,5,8) yuv=np.concatenate((y,uv),axis=0) yuv_mod = yuv.reshape(self.frame_len,) bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2RGB_I420) rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB) # gray=gray = cv2.cvtColor(yuv, cv2.COLOR_BGR2GRAY) return ret, rgb def step1(): # step 1 inputfilepath="./mp4" outputfilepath="./yuv" files=os.listdir(inputfilepath) for name in files: cmd="ffmpeg -i {0} -c:v rawvideo -pix_fmt yuv420p {1}".format(inputfilepath+"/"+name,outputfilepath+"/"+name[:-3]+"yuv") check_output(cmd, shell=True).decode() print(cmd) #os.remove(inputfilepath+"/"+name) print("shukar hai!") print("Step 1 completed") for name in files: os.remove(inputfilepath+"/"+name) def step2(): # step 2 path="./yuv" files=os.listdir(path) for name in files: filename=path+'\\'+name print(filename) for lum_step in range(0,1): size = (1080, 1920) cap = VideoCaptureYUV(filename, size) fourcc=cv2.VideoWriter_fourcc(*'MP4V') fourcc=0x7634706d fps=int(name[-6:-4]) out=cv2.VideoWriter('./mp4/{0}_6b.mp4'.format(name[:-4]),fourcc,fps,(1920,1080)) while 1: ret, frame = cap.read(lum_step) out.write(frame) if ret: pass else: break print("step 2 completed") def step3(): #step3 path = "./yuv" files = os.listdir(path) for name in files: filename=path+'\\'+name os.remove(filename) print("step3 completed") def step4(): #step 4 inputfilepath="./mp4" outputfilepath="./png" files=os.listdir(inputfilepath) for name in files: try: cmd="ffmpeg -i {0} -vf fps=0.2 {1}".format(inputfilepath+"/"+name,outputfilepath+"/%06d_"+name[:4]+".png") check_output(cmd, shell=True).decode() except: pass print("step 3 completed") if __name__ == "__main__": step1() step2() step3() step4()
[ "os.remove", "cv2.VideoWriter_fourcc", "cv2.cvtColor", "numpy.frombuffer", "subprocess.check_output", "numpy.clip", "os.listdir", "numpy.concatenate" ]
[((1505, 1530), 'os.listdir', 'os.listdir', (['inputfilepath'], {}), '(inputfilepath)\n', (1515, 1530), False, 'import os\n'), ((1916, 1932), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1926, 1932), False, 'import os\n'), ((2454, 2470), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2464, 2470), False, 'import os\n'), ((2644, 2669), 'os.listdir', 'os.listdir', (['inputfilepath'], {}), '(inputfilepath)\n', (2654, 2669), False, 'import os\n'), ((644, 686), 'numpy.clip', 'np.clip', (['(y_comp + step)'], {'a_min': '(2)', 'a_max': '(255)'}), '(y_comp + step, a_min=2, a_max=255)\n', (651, 686), True, 'import numpy as np\n'), ((1189, 1220), 'numpy.concatenate', 'np.concatenate', (['(y, uv)'], {'axis': '(0)'}), '((y, uv), axis=0)\n', (1203, 1220), True, 'import numpy as np\n'), ((1268, 1309), 'cv2.cvtColor', 'cv2.cvtColor', (['yuv', 'cv2.COLOR_YUV2RGB_I420'], {}), '(yuv, cv2.COLOR_YUV2RGB_I420)\n', (1280, 1309), False, 'import cv2\n'), ((1318, 1354), 'cv2.cvtColor', 'cv2.cvtColor', (['bgr', 'cv2.COLOR_BGR2RGB'], {}), '(bgr, cv2.COLOR_BGR2RGB)\n', (1330, 1354), False, 'import cv2\n'), ((1837, 1874), 'os.remove', 'os.remove', (["(inputfilepath + '/' + name)"], {}), "(inputfilepath + '/' + name)\n", (1846, 1874), False, 'import os\n'), ((2519, 2538), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (2528, 2538), False, 'import os\n'), ((786, 820), 'numpy.frombuffer', 'np.frombuffer', (['raw'], {'dtype': 'np.uint8'}), '(raw, dtype=np.uint8)\n', (799, 820), True, 'import numpy as np\n'), ((2101, 2132), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MP4V'"], {}), "(*'MP4V')\n", (2123, 2132), False, 'import cv2\n'), ((1677, 1706), 'subprocess.check_output', 'check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (1689, 1706), False, 'from subprocess import check_output\n'), ((2811, 2840), 'subprocess.check_output', 'check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (2823, 2840), False, 'from subprocess import check_output\n')]
"""Example of using the 'shred' transformation. You will need a copy of 'zfs.owl' and specify its location at 'CHISEL_EXAMPLES_ZFS_OWL'. """ import os from deriva.core import DerivaServer from deriva.chisel import Model from deriva.chisel import shred __dry_run__ = os.getenv('CHISEL_EXAMPLE_DRY_RUN', True) __host__ = os.getenv('CHISEL_EXAMPLES_HOSTNAME', 'localhost') __catalog_id__ = os.getenv('CHISEL_EXAMPLES_CATALOG', '1') zfs_filename = os.getenv('CHISEL_EXAMPLES_ZFS_OWL') if not zfs_filename: print("ERROR: env var 'CHISEL_EXAMPLES_ZFS_OWL' not defined") exit(1) server = DerivaServer('https', __host__) catalog = server.connect_ermrest(__catalog_id__) model = Model.from_catalog(catalog) # SPARQL expression to extract the id (i.e., short identifier) and name (i.e., preferred readable name) from the graph sparql_class_and_props = """ SELECT DISTINCT ?id (?label AS ?name) WHERE { ?s oboInOwl:id ?id . ?s rdfs:label ?label . }""" # Create a new relation computed from the shredded graph with model.begin(dry_run=__dry_run__) as session: session.create_table_as( 'vocab', 'zebrafish_stage_terms', shred(zfs_filename, sparql_class_and_props) )
[ "deriva.chisel.shred", "deriva.chisel.Model.from_catalog", "deriva.core.DerivaServer", "os.getenv" ]
[((268, 309), 'os.getenv', 'os.getenv', (['"""CHISEL_EXAMPLE_DRY_RUN"""', '(True)'], {}), "('CHISEL_EXAMPLE_DRY_RUN', True)\n", (277, 309), False, 'import os\n'), ((321, 371), 'os.getenv', 'os.getenv', (['"""CHISEL_EXAMPLES_HOSTNAME"""', '"""localhost"""'], {}), "('CHISEL_EXAMPLES_HOSTNAME', 'localhost')\n", (330, 371), False, 'import os\n'), ((389, 430), 'os.getenv', 'os.getenv', (['"""CHISEL_EXAMPLES_CATALOG"""', '"""1"""'], {}), "('CHISEL_EXAMPLES_CATALOG', '1')\n", (398, 430), False, 'import os\n'), ((447, 483), 'os.getenv', 'os.getenv', (['"""CHISEL_EXAMPLES_ZFS_OWL"""'], {}), "('CHISEL_EXAMPLES_ZFS_OWL')\n", (456, 483), False, 'import os\n'), ((593, 624), 'deriva.core.DerivaServer', 'DerivaServer', (['"""https"""', '__host__'], {}), "('https', __host__)\n", (605, 624), False, 'from deriva.core import DerivaServer\n'), ((682, 709), 'deriva.chisel.Model.from_catalog', 'Model.from_catalog', (['catalog'], {}), '(catalog)\n', (700, 709), False, 'from deriva.chisel import Model\n'), ((1145, 1188), 'deriva.chisel.shred', 'shred', (['zfs_filename', 'sparql_class_and_props'], {}), '(zfs_filename, sparql_class_and_props)\n', (1150, 1188), False, 'from deriva.chisel import shred\n')]
import pandas as pd with open('input.txt') as fh: lines = fh.readlines() class Board: def __init__(self, lines): self.values = [ (int(val), r, c) for r, row in enumerate(lines) for c, val in enumerate(row.split()) ] self.board = pd.DataFrame([[0] * 5] * 5) self.last_num = 0 def update(self, number): for val, r, c in self.values: if val == number: self.board.iloc[r, c] = 1 self.last_num = number self.values = [ x for x in self.values if x[0] != number ] @property def score(self): return self.last_num * sum([x[0] for x in self.values]) @property def winner(self): return any([ self.board.all().any(), self.board.all(axis=1).any() ]) sequence = list(map(int, lines[0].split(','))) boards = [ Board(lines[offs:offs + 5]) for offs in range(2, len(lines) + 1, 6) ] # part 1 def find_first_winner(sequence, boards): for num in sequence: for i, b in enumerate(boards): b.update(num) if b.winner: return(i + 1, b.score) print('first winner:', find_first_winner(sequence, boards)) # part 2 def find_last_winner(sequence, boards): for num in sequence: for i, b in enumerate(boards): if not b.winner: b.update(num) if b.winner: if all([board.winner for board in boards]): return (i + 1, b.score) print('last winner:', find_last_winner(sequence, boards))
[ "pandas.DataFrame" ]
[((301, 328), 'pandas.DataFrame', 'pd.DataFrame', (['([[0] * 5] * 5)'], {}), '([[0] * 5] * 5)\n', (313, 328), True, 'import pandas as pd\n')]
# -*- coding: utf-8 -*- """ wakatime.main ~~~~~~~~~~~~~ Module entry point. :copyright: (c) 2013 <NAME>. :license: BSD, see LICENSE for more details. """ from __future__ import print_function import logging import os import sys import time import traceback pwd = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.dirname(pwd)) sys.path.insert(0, os.path.join(pwd, 'packages')) from .__about__ import __version__ from .api import send_heartbeats from .arguments import parse_arguments from .compat import u, json from .constants import SUCCESS, UNKNOWN_ERROR, HEARTBEATS_PER_REQUEST from .logger import setup_logging log = logging.getLogger('WakaTime') from .heartbeat import Heartbeat from .offlinequeue import Queue def execute(argv=None): if argv: sys.argv = ['wakatime'] + argv args, configs = parse_arguments() setup_logging(args, __version__) try: heartbeats = [] hb = Heartbeat(vars(args), args, configs) if hb: heartbeats.append(hb) else: log.debug(hb.skip) if args.extra_heartbeats: try: for extra_data in json.loads(sys.stdin.readline()): hb = Heartbeat(extra_data, args, configs) if hb: heartbeats.append(hb) else: log.debug(hb.skip) except json.JSONDecodeError as ex: log.warning(u('Malformed extra heartbeats json: {msg}').format( msg=u(ex), )) retval = SUCCESS while heartbeats: retval = send_heartbeats(heartbeats[:HEARTBEATS_PER_REQUEST], args, configs) heartbeats = heartbeats[HEARTBEATS_PER_REQUEST:] if retval != SUCCESS: break if heartbeats: Queue(args, configs).push_many(heartbeats) if retval == SUCCESS: queue = Queue(args, configs) for offline_heartbeats in queue.pop_many(args.sync_offline_activity): time.sleep(1) retval = send_heartbeats(offline_heartbeats, args, configs) if retval != SUCCESS: break return retval except: log.traceback(logging.ERROR) print(traceback.format_exc()) return UNKNOWN_ERROR
[ "os.path.abspath", "os.path.dirname", "time.sleep", "traceback.format_exc", "os.path.join", "sys.stdin.readline", "logging.getLogger" ]
[((669, 698), 'logging.getLogger', 'logging.getLogger', (['"""WakaTime"""'], {}), "('WakaTime')\n", (686, 698), False, 'import logging\n'), ((304, 329), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (319, 329), False, 'import os\n'), ((350, 370), 'os.path.dirname', 'os.path.dirname', (['pwd'], {}), '(pwd)\n', (365, 370), False, 'import os\n'), ((391, 420), 'os.path.join', 'os.path.join', (['pwd', '"""packages"""'], {}), "(pwd, 'packages')\n", (403, 420), False, 'import os\n'), ((2107, 2120), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2117, 2120), False, 'import time\n'), ((2348, 2370), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2368, 2370), False, 'import traceback\n'), ((1196, 1216), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1214, 1216), False, 'import sys\n')]
import subprocess def displayConnection(): lisData = [] lisData2 = [] command = 'nmcli connection show > /tmp/listConnection ' try: subprocess.run(command, check=True, shell=True) except subprocess.CalledProcessError: print("Error While fetching Connnections ") with open('/tmp/listConnection') as f: lineList = f.readlines() lineList.pop(0) for i in lineList: i = i[::-1] i = i[56:] i = i[::-1] lisData.append(i) for i in lisData: i = i.replace('\n', '') i = i.replace(' ', '\\ ') i = i.replace('\\', ',') i = i.replace(', ,', '') i = i.replace('\n', '') i = i.replace(',', '\\') i = i.replace(' \\', '') lisData2.append(i) return lisData2
[ "subprocess.run" ]
[((160, 207), 'subprocess.run', 'subprocess.run', (['command'], {'check': '(True)', 'shell': '(True)'}), '(command, check=True, shell=True)\n', (174, 207), False, 'import subprocess\n')]
from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension # [!!!] be sure to use different file names for cpp and cu files # because `setuptools` does not see the filename extension setup( name='PMTS_cuda', ext_modules=[ CUDAExtension('PMTS_cuda', [ 'PMTS_cuda.cpp', 'PMTS_cuda_kernels.cu', ]) ], cmdclass={ 'build_ext': BuildExtension })
[ "torch.utils.cpp_extension.CUDAExtension" ]
[((278, 347), 'torch.utils.cpp_extension.CUDAExtension', 'CUDAExtension', (['"""PMTS_cuda"""', "['PMTS_cuda.cpp', 'PMTS_cuda_kernels.cu']"], {}), "('PMTS_cuda', ['PMTS_cuda.cpp', 'PMTS_cuda_kernels.cu'])\n", (291, 347), False, 'from torch.utils.cpp_extension import BuildExtension, CUDAExtension\n')]
# VXT # Developed by <NAME> # # MIT License # Copyright (c) 2021 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # from locale import getdefaultlocale from typing import Optional from vxt.misc.track_fmt import TrackFmt from vxt.speech2text.engine import Speech2TextEngine from vxt.speech2text.bing import BingSpeech2TextEngine from vxt.speech2text.google import GoogleSpeech2TextEngine from vxt.speech2text.google_cloud import GoogleCloudSpeech2TextEngine from vxt.speech2text.houndify import HoundifySpeech2TextEngine from vxt.speech2text.ibm import IbmSpeech2TextEngine from vxt.speech2text.sphinx import SphinxSpeech2TextEngine class Config(object): """App configuration""" def __init__(self) -> None: super().__init__() self.__engine: Speech2TextEngine = GoogleSpeech2TextEngine(None) locale = getdefaultlocale()[0] if locale: self.__language: str = locale else: self.__language: str = "en_US" self.__min_silence_len: int = 500 self.__silence_threshold: int = -16 self.__keep_silence: int = 500 self.__output_fmt: TrackFmt = TrackFmt("%t-%s.64") self.__output_dir: Optional[str] = None @property def engine(self) -> Speech2TextEngine: return self.__engine @property def language(self) -> str: return self.__language @language.setter def language(self, language: str) -> None: self.__language = language @property def min_silence_len(self) -> int: return self.__min_silence_len @min_silence_len.setter def min_silence_len(self, len: int) -> None: self.__min_silence_len = len @property def silence_threshold(self) -> int: return self.__silence_threshold @silence_threshold.setter def silence_threshold(self, t: int) -> None: if t > 0: raise InvalidConfigError("Silence threshold should be a negative number") self.__silence_threshold = t @property def keep_silence(self) -> int: return self.__keep_silence @keep_silence.setter def keep_silence(self, how_much: int) -> None: if how_much < 0: raise InvalidConfigError( "Keep silence should be a positive integer bigger than or equal to 0" ) self.__keep_silence = how_much @property def output_fmt(self) -> TrackFmt: return self.__output_fmt @output_fmt.setter def output_fmt(self, fmt: str) -> None: try: self.__output_fmt = TrackFmt(fmt) except Exception: raise InvalidConfigError("Invalid fmt syntax") @property def output_dir(self) -> str: return self.__output_dir @output_dir.setter def output_dir(self, d: str) -> None: self.__output_dir = d # -- speech 2 text setters def use_bing_speech2text(self, api_key: str) -> None: self.__engine = BingSpeech2TextEngine(api_key) def use_google_speech2text(self, api_key: Optional[str]) -> None: self.__engine = GoogleSpeech2TextEngine(api_key) def use_google_cloud_speech2text(self, credentials: Optional[str]) -> None: self.__engine = GoogleCloudSpeech2TextEngine(credentials) def use_houndify_speech2text(self, client_id: str, client_key: str) -> None: self.__engine = HoundifySpeech2TextEngine(client_id, client_key) def use_ibm_speech2text(self, username: str, password: str) -> None: self.__engine = IbmSpeech2TextEngine(username, password) def use_sphinx_speech2text( self, keyword_entries: Optional[str], grammar_file: Optional[str] ) -> None: self.__engine = SphinxSpeech2TextEngine(keyword_entries, grammar_file) class InvalidConfigError(Exception): """ Indicates an invalid configuration """ def __init__(self, message: str): self.message = message def __str__(self): return repr(self.message) def __repr__(self): return str(self.message)
[ "vxt.speech2text.houndify.HoundifySpeech2TextEngine", "locale.getdefaultlocale", "vxt.speech2text.bing.BingSpeech2TextEngine", "vxt.speech2text.sphinx.SphinxSpeech2TextEngine", "vxt.speech2text.google.GoogleSpeech2TextEngine", "vxt.speech2text.google_cloud.GoogleCloudSpeech2TextEngine", "vxt.misc.track_fmt.TrackFmt", "vxt.speech2text.ibm.IbmSpeech2TextEngine" ]
[((1797, 1826), 'vxt.speech2text.google.GoogleSpeech2TextEngine', 'GoogleSpeech2TextEngine', (['None'], {}), '(None)\n', (1820, 1826), False, 'from vxt.speech2text.google import GoogleSpeech2TextEngine\n'), ((2147, 2167), 'vxt.misc.track_fmt.TrackFmt', 'TrackFmt', (['"""%t-%s.64"""'], {}), "('%t-%s.64')\n", (2155, 2167), False, 'from vxt.misc.track_fmt import TrackFmt\n'), ((3960, 3990), 'vxt.speech2text.bing.BingSpeech2TextEngine', 'BingSpeech2TextEngine', (['api_key'], {}), '(api_key)\n', (3981, 3990), False, 'from vxt.speech2text.bing import BingSpeech2TextEngine\n'), ((4086, 4118), 'vxt.speech2text.google.GoogleSpeech2TextEngine', 'GoogleSpeech2TextEngine', (['api_key'], {}), '(api_key)\n', (4109, 4118), False, 'from vxt.speech2text.google import GoogleSpeech2TextEngine\n'), ((4224, 4265), 'vxt.speech2text.google_cloud.GoogleCloudSpeech2TextEngine', 'GoogleCloudSpeech2TextEngine', (['credentials'], {}), '(credentials)\n', (4252, 4265), False, 'from vxt.speech2text.google_cloud import GoogleCloudSpeech2TextEngine\n'), ((4372, 4420), 'vxt.speech2text.houndify.HoundifySpeech2TextEngine', 'HoundifySpeech2TextEngine', (['client_id', 'client_key'], {}), '(client_id, client_key)\n', (4397, 4420), False, 'from vxt.speech2text.houndify import HoundifySpeech2TextEngine\n'), ((4519, 4559), 'vxt.speech2text.ibm.IbmSpeech2TextEngine', 'IbmSpeech2TextEngine', (['username', 'password'], {}), '(username, password)\n', (4539, 4559), False, 'from vxt.speech2text.ibm import IbmSpeech2TextEngine\n'), ((4706, 4760), 'vxt.speech2text.sphinx.SphinxSpeech2TextEngine', 'SphinxSpeech2TextEngine', (['keyword_entries', 'grammar_file'], {}), '(keyword_entries, grammar_file)\n', (4729, 4760), False, 'from vxt.speech2text.sphinx import SphinxSpeech2TextEngine\n'), ((1844, 1862), 'locale.getdefaultlocale', 'getdefaultlocale', ([], {}), '()\n', (1860, 1862), False, 'from locale import getdefaultlocale\n'), ((3569, 3582), 'vxt.misc.track_fmt.TrackFmt', 'TrackFmt', (['fmt'], {}), '(fmt)\n', (3577, 3582), False, 'from vxt.misc.track_fmt import TrackFmt\n')]
from unittest import TestCase import json import responses import re from seed_services_client.message_sender \ import MessageSenderApiClient class TestMessageSenderClient(TestCase): def setUp(self): self.api = MessageSenderApiClient( "NO", "http://ms.example.org/api/v1") @responses.activate def test_create_inbound(self): # Catch all requests responses.add( responses.POST, re.compile(r'.*'), json={'test': 'response'}, status=200) inbound_payload = { 'from_addr': '+1234' } response = self.api.create_inbound(inbound_payload) # Check self.assertEqual(response, {'test': 'response'}) self.assertEqual(len(responses.calls), 1) request = responses.calls[0].request self.assertEqual(request.method, 'POST') self.assertEqual(request.url, "http://ms.example.org/api/v1/inbound/") self.assertEqual(json.loads(request.body), inbound_payload) @responses.activate def test_create_outbound(self): # Setup outbound_payload = { "to_addr": "+27123", "content": "my outbound message", "metadata": {} } response = { 'attempts': 0, 'updated_at': '2016-08-18T11:32:17.750207Z', 'content': outbound_payload["content"], 'created_at': '2016-08-18T11:32:17.750236Z', 'vumi_message_id': '075a32da-e1e4-4424-be46-1d09b71056fd', 'to_addr': outbound_payload["to_addr"], 'metadata': outbound_payload["metadata"], 'id': 'c99bd21e-6b9d-48ba-9f07-1e8e406737fe', 'delivered': False, 'version': 1, 'url': 'http://ms.example.org/api/v1/outbound/c99bd21e-6b9d-48ba-9f07-1e8e406737fe/' # noqa } responses.add( responses.POST, "http://ms.example.org/api/v1/outbound/", json=response, status=200, content_type='application/json', ) # Execute result = self.api.create_outbound(outbound_payload) # Check self.assertEqual(result["id"], "c99bd21e-6b9d-48ba-9f07-1e8e406737fe") self.assertEqual(result["content"], outbound_payload["content"]) self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.url, "http://ms.example.org/api/v1/outbound/") @responses.activate def test_get_outbounds_single_page(self): outbounds = { "next": None, "previous": None, "results": [ {'to_addr': 'addr1', 'content': 'content1'}, {'to_addr': 'addr2', 'content': 'content2'}, ] } responses.add( responses.GET, "http://ms.example.org/api/v1/outbound/", json=outbounds, status=200, content_type='application/json', ) # Execute result = self.api.get_outbounds() # Check self.assertEqual(list(result["results"]), [ {'to_addr': 'addr1', 'content': 'content1'}, {'to_addr': 'addr2', 'content': 'content2'}]) self.assertEqual(len(responses.calls), 1) self.assertEqual( responses.calls[0].request.url, "http://ms.example.org/api/v1/outbound/" ) @responses.activate def test_get_outbounds_mulitple_pages(self): outbounds = { "next": "http://ms.example.org/api/v1/outbound/?cursor=1", "previous": None, "results": [ {'to_addr': 'addr1', 'content': 'content1'}, {'to_addr': 'addr2', 'content': 'content2'}, ] } responses.add( responses.GET, "http://ms.example.org/api/v1/outbound/", json=outbounds, status=200, content_type='application/json', match_querystring=True ) outbounds = { "next": None, "previous": "http://ms.example.org/api/v1/outbound/?cursor=0", "results": [ {'to_addr': 'addr3', 'content': 'content3'}, ] } responses.add( responses.GET, "http://ms.example.org/api/v1/outbound/?cursor=1", json=outbounds, status=200, content_type='application/json', match_querystring=True ) # Execute result = self.api.get_outbounds() # Check self.assertEqual(list(result["results"]), [ {'to_addr': 'addr1', 'content': 'content1'}, {'to_addr': 'addr2', 'content': 'content2'}, {'to_addr': 'addr3', 'content': 'content3'}]) self.assertEqual(len(responses.calls), 2) self.assertEqual( responses.calls[0].request.url, "http://ms.example.org/api/v1/outbound/" ) self.assertEqual( responses.calls[1].request.url, "http://ms.example.org/api/v1/outbound/?cursor=1" ) @responses.activate def test_get_inbounds_single_page(self): inbounds = { "next": None, "previous": None, "results": [ {'from_addr': '+1234', 'content': 'content1'}, {'from_addr': '+1234', 'content': 'content2'}, ] } # Catch all requests responses.add( responses.GET, "http://ms.example.org/api/v1/inbound/", json=inbounds, status=200) # Execute response = self.api.get_inbounds({'from_addr': '+1234'}) # Check self.assertEqual(list(response["results"]), [ {'from_addr': '+1234', 'content': 'content1'}, {'from_addr': '+1234', 'content': 'content2'}]) self.assertEqual(len(responses.calls), 1) self.assertEqual(responses.calls[0].request.method, 'GET') self.assertEqual( responses.calls[0].request.url, "http://ms.example.org/api/v1/inbound/?from_addr=%2B1234" ) @responses.activate def test_get_inbounds_mulitple_pages(self): inbounds = { "next": "http://ms.example.org/api/v1/inbound/?from_addr=%2B1234" "&cursor=1", "previous": None, "results": [ {'from_addr': '+1234', 'content': 'content1'}, {'from_addr': '+1234', 'content': 'content2'}, ] } # Catch all requests responses.add( responses.GET, "http://ms.example.org/api/v1/inbound/?from_addr=%2B1234", json=inbounds, status=200, match_querystring=True) inbounds = { "next": None, "previous": "http://ms.example.org/api/v1/inbound/?" "from_addr=%2B1234&cursor=1", "results": [ {'from_addr': '+1234', 'content': 'content3'}, ] } responses.add( responses.GET, "http://ms.example.org/api/v1/inbound/?from_addr=%2B1234&cursor=1", json=inbounds, status=200, match_querystring=True) # Execute response = self.api.get_inbounds({'from_addr': '+1234'}) # Check self.assertEqual(list(response["results"]), [ {'from_addr': '+1234', 'content': 'content1'}, {'from_addr': '+1234', 'content': 'content2'}, {'from_addr': '+1234', 'content': 'content3'}]) self.assertEqual(len(responses.calls), 2) self.assertEqual(responses.calls[0].request.method, 'GET') self.assertEqual( responses.calls[0].request.url, "http://ms.example.org/api/v1/inbound/?from_addr=%2B1234" ) self.assertEqual(responses.calls[1].request.method, 'GET') self.assertEqual( responses.calls[1].request.url, "http://ms.example.org/api/v1/inbound/?from_addr=%2B1234&cursor=1" )
[ "seed_services_client.message_sender.MessageSenderApiClient", "responses.add", "json.loads", "re.compile" ]
[((231, 291), 'seed_services_client.message_sender.MessageSenderApiClient', 'MessageSenderApiClient', (['"""NO"""', '"""http://ms.example.org/api/v1"""'], {}), "('NO', 'http://ms.example.org/api/v1')\n", (253, 291), False, 'from seed_services_client.message_sender import MessageSenderApiClient\n'), ((1865, 2000), 'responses.add', 'responses.add', (['responses.POST', '"""http://ms.example.org/api/v1/outbound/"""'], {'json': 'response', 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.POST, 'http://ms.example.org/api/v1/outbound/',\n json=response, status=200, content_type='application/json')\n", (1878, 2000), False, 'import responses\n'), ((2805, 2941), 'responses.add', 'responses.add', (['responses.GET', '"""http://ms.example.org/api/v1/outbound/"""'], {'json': 'outbounds', 'status': '(200)', 'content_type': '"""application/json"""'}), "(responses.GET, 'http://ms.example.org/api/v1/outbound/', json\n =outbounds, status=200, content_type='application/json')\n", (2818, 2941), False, 'import responses\n'), ((3799, 3963), 'responses.add', 'responses.add', (['responses.GET', '"""http://ms.example.org/api/v1/outbound/"""'], {'json': 'outbounds', 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET, 'http://ms.example.org/api/v1/outbound/', json\n =outbounds, status=200, content_type='application/json',\n match_querystring=True)\n", (3812, 3963), False, 'import responses\n'), ((4255, 4427), 'responses.add', 'responses.add', (['responses.GET', '"""http://ms.example.org/api/v1/outbound/?cursor=1"""'], {'json': 'outbounds', 'status': '(200)', 'content_type': '"""application/json"""', 'match_querystring': '(True)'}), "(responses.GET,\n 'http://ms.example.org/api/v1/outbound/?cursor=1', json=outbounds,\n status=200, content_type='application/json', match_querystring=True)\n", (4268, 4427), False, 'import responses\n'), ((5463, 5564), 'responses.add', 'responses.add', (['responses.GET', '"""http://ms.example.org/api/v1/inbound/"""'], {'json': 'inbounds', 'status': '(200)'}), "(responses.GET, 'http://ms.example.org/api/v1/inbound/', json=\n inbounds, status=200)\n", (5476, 5564), False, 'import responses\n'), ((6573, 6720), 'responses.add', 'responses.add', (['responses.GET', '"""http://ms.example.org/api/v1/inbound/?from_addr=%2B1234"""'], {'json': 'inbounds', 'status': '(200)', 'match_querystring': '(True)'}), "(responses.GET,\n 'http://ms.example.org/api/v1/inbound/?from_addr=%2B1234', json=\n inbounds, status=200, match_querystring=True)\n", (6586, 6720), False, 'import responses\n'), ((7036, 7191), 'responses.add', 'responses.add', (['responses.GET', '"""http://ms.example.org/api/v1/inbound/?from_addr=%2B1234&cursor=1"""'], {'json': 'inbounds', 'status': '(200)', 'match_querystring': '(True)'}), "(responses.GET,\n 'http://ms.example.org/api/v1/inbound/?from_addr=%2B1234&cursor=1',\n json=inbounds, status=200, match_querystring=True)\n", (7049, 7191), False, 'import responses\n'), ((445, 461), 're.compile', 're.compile', (['""".*"""'], {}), "('.*')\n", (455, 461), False, 'import re\n'), ((970, 994), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (980, 994), False, 'import json\n')]
from cogdl import experiment from cogdl.utils import build_args_from_dict DATASET_REGISTRY = {} def default_parameter(): args = { "hidden_size": 128, "seed": [0, 1, 2], "lr": 0.025, "walk_length": 80, "walk_num": 40, "batch_size": 1000, "hop": 2, "negative": 5, "epochs": 1, } return build_args_from_dict(args) def register_func(name): def register_func_name(func): DATASET_REGISTRY[name] = func return func return register_func_name @register_func("gtn-dblp") def dblp_config(args): return args @register_func("gtn-acm") def acm_config(args): return args @register_func("gtn-imdb") def imdb_config(args): return args def run(dataset_name): args = default_parameter() args = DATASET_REGISTRY[dataset_name](args).__dict__ results = experiment(task="multiplex_node_classification", dataset=dataset_name, model="hin2vec", **args) return results if __name__ == "__main__": datasets = ["gtn-dblp", "gtn-acm", "gtn-imdb"] for x in datasets: run(x)
[ "cogdl.utils.build_args_from_dict", "cogdl.experiment" ]
[((371, 397), 'cogdl.utils.build_args_from_dict', 'build_args_from_dict', (['args'], {}), '(args)\n', (391, 397), False, 'from cogdl.utils import build_args_from_dict\n'), ((877, 976), 'cogdl.experiment', 'experiment', ([], {'task': '"""multiplex_node_classification"""', 'dataset': 'dataset_name', 'model': '"""hin2vec"""'}), "(task='multiplex_node_classification', dataset=dataset_name,\n model='hin2vec', **args)\n", (887, 976), False, 'from cogdl import experiment\n')]
import json import re from typing import Any __all__ = [ 'dict_get_value', 'dict_has_keys', 'try_parse_json', ] def dict_has_keys(data: dict, *keys) -> bool: for key in keys: if not isinstance(data, dict): return False if key not in data: return False data = data[key] return True def dict_get_value(data: dict, *keys, default: Any = None) -> Any: if len(keys) == 1: keys = re.split(r'[./]', keys[0]) for key in keys: try: data = data[key] except (TypeError, IndexError, KeyError): return default return data def try_parse_json(data, **default_keys) -> dict: if isinstance(data, dict): return data try: return json.loads(data) except (TypeError, json.JSONDecodeError) as _: return default_keys
[ "re.split", "json.loads" ]
[((462, 487), 're.split', 're.split', (['"""[./]"""', 'keys[0]'], {}), "('[./]', keys[0])\n", (470, 487), False, 'import re\n'), ((775, 791), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (785, 791), False, 'import json\n')]
# coding: utf-8 # In[1]: import pandas as pd import matplotlib import numpy as np import matplotlib.pyplot as plt get_ipython().magic('matplotlib inline') # In[2]: data = pd.read_csv("../build/nis_array_.log",delimiter="\n") t = [7.8 for i in range(498)] ts = np.arange(0,498,1) # In[3]: plt.plot(ts, t, label='first plot') plt.plot(ts, data, label='second plot') plt.legend # If the curve is way under, we're overestimating the uncertainty in the system; if half of the curve is over, we're underestimating the uncertainty
[ "pandas.read_csv", "numpy.arange", "matplotlib.pyplot.plot" ]
[((179, 233), 'pandas.read_csv', 'pd.read_csv', (['"""../build/nis_array_.log"""'], {'delimiter': '"""\n"""'}), "('../build/nis_array_.log', delimiter='\\n')\n", (190, 233), True, 'import pandas as pd\n'), ((268, 288), 'numpy.arange', 'np.arange', (['(0)', '(498)', '(1)'], {}), '(0, 498, 1)\n', (277, 288), True, 'import numpy as np\n'), ((300, 335), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 't'], {'label': '"""first plot"""'}), "(ts, t, label='first plot')\n", (308, 335), True, 'import matplotlib.pyplot as plt\n'), ((336, 375), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'data'], {'label': '"""second plot"""'}), "(ts, data, label='second plot')\n", (344, 375), True, 'import matplotlib.pyplot as plt\n')]
# -*- coding: utf-8 -*- import unittest from dynamic_url import Url class TestParams(unittest.TestCase): array_test = ["Origin"] dict_test = {"Origin": "test"} dict_test2 = {"Not_Origin": "test"} dict_test3 = {"Origin": "https://www.google.com/"} dict_test4 = {"Origin": ""} def test_get_url_two_params(self): self.assertEqual( Url().get_url("hola", "db"), "", "Error in params -> two strings" ) self.assertEqual( Url().get_url("", ""), "", "Error in params -> two empty strings" ) def test_get_url_array_params(self): self.assertEqual( Url().get_url(self.array_test, "db"), "", "Error in params with array", ) def test_get_url_dict_params(self): self.assertEqual( Url().get_url(self.dict_test, "db"), "", "Error in params with array", ) self.assertEqual( Url().get_url(self.dict_test2, db={"db": ""}), "", "Error in params with array", ) self.assertEqual( Url().get_url(self.dict_test3, "db"), "", "Error in params with array", ) self.assertEqual( Url().get_url(self.dict_test4, "db"), "", "Error in params with array", ) if __name__ == "__main__": unittest.main()
[ "unittest.main", "dynamic_url.Url" ]
[((1409, 1424), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1422, 1424), False, 'import unittest\n'), ((375, 380), 'dynamic_url.Url', 'Url', ([], {}), '()\n', (378, 380), False, 'from dynamic_url import Url\n'), ((489, 494), 'dynamic_url.Url', 'Url', ([], {}), '()\n', (492, 494), False, 'from dynamic_url import Url\n'), ((645, 650), 'dynamic_url.Url', 'Url', ([], {}), '()\n', (648, 650), False, 'from dynamic_url import Url\n'), ((830, 835), 'dynamic_url.Url', 'Url', ([], {}), '()\n', (833, 835), False, 'from dynamic_url import Url\n'), ((973, 978), 'dynamic_url.Url', 'Url', ([], {}), '()\n', (976, 978), False, 'from dynamic_url import Url\n'), ((1126, 1131), 'dynamic_url.Url', 'Url', ([], {}), '()\n', (1129, 1131), False, 'from dynamic_url import Url\n'), ((1270, 1275), 'dynamic_url.Url', 'Url', ([], {}), '()\n', (1273, 1275), False, 'from dynamic_url import Url\n')]
"""Utilities for Inverted Pendulum.""" import torch from torch.distributions import MultivariateNormal from rllib.model import AbstractModel from rllib.reward.utilities import tolerance from rllib.util.neural_networks.utilities import to_torch class PendulumSparseReward(AbstractModel): """Reward for Inverted Pendulum.""" def __init__(self, action_cost=0): super().__init__(dim_state=(2,), dim_action=(1,), model_kind="rewards") self.action_cost = action_cost self.reward_offset = 0 def forward(self, state, action, next_state): """See `abstract_reward.forward'.""" state, action = to_torch(state), to_torch(action) cos_angle = torch.cos(state[..., 0]) velocity = state[..., 1] angle_tolerance = tolerance(cos_angle, lower=0.95, upper=1.0, margin=0.1) velocity_tolerance = tolerance(velocity, lower=-0.5, upper=0.5, margin=0.5) state_cost = angle_tolerance * velocity_tolerance action_tolerance = tolerance(action[..., 0], lower=-0.1, upper=0.1, margin=0.1) action_cost = self.action_cost * (action_tolerance - 1) cost = state_cost + action_cost return cost.unsqueeze(-1), torch.zeros(1) class PendulumDenseReward(AbstractModel): """Reward for Inverted Pendulum.""" def __init__(self, action_cost=0.0): super().__init__(dim_state=(2,), dim_action=(1,), model_kind="rewards") self.action_cost = action_cost self.reward_offset = 0 def forward(self, state, action, next_state): """See `abstract_reward.forward'.""" state, action = to_torch(state), to_torch(action) cos_angle = 1 - torch.cos(state[..., 0]) state_cost = cos_angle ** 2 action_cost = self.action_cost * (action ** 2).sum(-1) return -(action_cost + state_cost), torch.tensor(0.0) class PendulumModel(AbstractModel): """Pendulum Model. Torch implementation of a pendulum model using euler forwards integration. """ def __init__( self, mass, length, friction, step_size=1 / 80, noise: MultivariateNormal = None ): super().__init__(dim_state=(2,), dim_action=(1,)) self.mass = mass self.length = length self.friction = friction self.step_size = step_size self.noise = noise def forward(self, state, action): """Get next-state distribution.""" # Physical dynamics action = action.clamp(-1.0, 1.0) mass = self.mass gravity = 9.81 length = self.length friction = self.friction inertia = mass * length ** 2 dt = self.step_size angle, angular_velocity = torch.split(state, 1, dim=-1) for _ in range(1): x_ddot = ( (gravity / length) * torch.sin(angle) + action * (1 / inertia) - (friction / inertia) * angular_velocity ) angle = angle + dt * angular_velocity angular_velocity = angular_velocity + dt * x_ddot next_state = torch.cat((angle, angular_velocity), dim=-1) if self.noise is None: return next_state, torch.zeros(1) else: return next_state + self.noise.mean, self.noise.covariance_matrix
[ "torch.split", "torch.cat", "torch.sin", "rllib.util.neural_networks.utilities.to_torch", "torch.cos", "torch.zeros", "rllib.reward.utilities.tolerance", "torch.tensor" ]
[((695, 719), 'torch.cos', 'torch.cos', (['state[..., 0]'], {}), '(state[..., 0])\n', (704, 719), False, 'import torch\n'), ((780, 835), 'rllib.reward.utilities.tolerance', 'tolerance', (['cos_angle'], {'lower': '(0.95)', 'upper': '(1.0)', 'margin': '(0.1)'}), '(cos_angle, lower=0.95, upper=1.0, margin=0.1)\n', (789, 835), False, 'from rllib.reward.utilities import tolerance\n'), ((865, 919), 'rllib.reward.utilities.tolerance', 'tolerance', (['velocity'], {'lower': '(-0.5)', 'upper': '(0.5)', 'margin': '(0.5)'}), '(velocity, lower=-0.5, upper=0.5, margin=0.5)\n', (874, 919), False, 'from rllib.reward.utilities import tolerance\n'), ((1006, 1066), 'rllib.reward.utilities.tolerance', 'tolerance', (['action[..., 0]'], {'lower': '(-0.1)', 'upper': '(0.1)', 'margin': '(0.1)'}), '(action[..., 0], lower=-0.1, upper=0.1, margin=0.1)\n', (1015, 1066), False, 'from rllib.reward.utilities import tolerance\n'), ((2697, 2726), 'torch.split', 'torch.split', (['state', '(1)'], {'dim': '(-1)'}), '(state, 1, dim=-1)\n', (2708, 2726), False, 'import torch\n'), ((3079, 3123), 'torch.cat', 'torch.cat', (['(angle, angular_velocity)'], {'dim': '(-1)'}), '((angle, angular_velocity), dim=-1)\n', (3088, 3123), False, 'import torch\n'), ((640, 655), 'rllib.util.neural_networks.utilities.to_torch', 'to_torch', (['state'], {}), '(state)\n', (648, 655), False, 'from rllib.util.neural_networks.utilities import to_torch\n'), ((657, 673), 'rllib.util.neural_networks.utilities.to_torch', 'to_torch', (['action'], {}), '(action)\n', (665, 673), False, 'from rllib.util.neural_networks.utilities import to_torch\n'), ((1208, 1222), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1219, 1222), False, 'import torch\n'), ((1619, 1634), 'rllib.util.neural_networks.utilities.to_torch', 'to_torch', (['state'], {}), '(state)\n', (1627, 1634), False, 'from rllib.util.neural_networks.utilities import to_torch\n'), ((1636, 1652), 'rllib.util.neural_networks.utilities.to_torch', 'to_torch', (['action'], {}), '(action)\n', (1644, 1652), False, 'from rllib.util.neural_networks.utilities import to_torch\n'), ((1678, 1702), 'torch.cos', 'torch.cos', (['state[..., 0]'], {}), '(state[..., 0])\n', (1687, 1702), False, 'import torch\n'), ((1847, 1864), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (1859, 1864), False, 'import torch\n'), ((3187, 3201), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (3198, 3201), False, 'import torch\n'), ((2814, 2830), 'torch.sin', 'torch.sin', (['angle'], {}), '(angle)\n', (2823, 2830), False, 'import torch\n')]
import logging from qfieldcloud.authentication.models import AuthToken from qfieldcloud.core import querysets_utils from qfieldcloud.core.models import ( Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User, ) from rest_framework.test import APITestCase logging.disable(logging.CRITICAL) class QfcTestCase(APITestCase): def setUp(self): # user1 owns p1 and p2 # user1 owns o1 # user1 collaborates on p7 self.user1 = User.objects.create_user(username="user1", password="<PASSWORD>") self.token1 = AuthToken.objects.get_or_create(user=self.user1)[0] # user2 owns p3 and p4 # user2 admins o1 self.user2 = User.objects.create_user(username="user2", password="<PASSWORD>") self.token2 = AuthToken.objects.get_or_create(user=self.user2)[0] # user2 owns p7 and p8 # user2 is member of o1 self.user3 = User.objects.create_user(username="user3", password="<PASSWORD>") self.token3 = AuthToken.objects.get_or_create(user=self.user3)[0] # organization1 owns p4 and p5 self.organization1 = Organization.objects.create( username="organization1", password="<PASSWORD>", user_type=2, organization_owner=self.user1, ) self.membership1 = OrganizationMember.objects.create( organization=self.organization1, member=self.user2, role=OrganizationMember.Roles.ADMIN, ) self.membership2 = OrganizationMember.objects.create( organization=self.organization1, member=self.user3, role=OrganizationMember.Roles.MEMBER, ) self.team1 = Team.objects.create( username="team1", password="<PASSWORD>", user_type=User.TYPE_TEAM, team_organization=self.organization1, ) self.teammembership1 = TeamMember.objects.create( team=self.team1, member=self.user3, ) self.project1 = Project.objects.create( name="project1", is_public=False, owner=self.user1 ) self.project2 = Project.objects.create( name="project2", is_public=True, owner=self.user1 ) self.project3 = Project.objects.create( name="project3", is_public=False, owner=self.user2 ) self.project4 = Project.objects.create( name="project4", is_public=True, owner=self.user2 ) self.project5 = Project.objects.create( name="project5", is_public=False, owner=self.organization1 ) self.project6 = Project.objects.create( name="project6", is_public=True, owner=self.organization1 ) self.project7 = Project.objects.create( name="project7", is_public=False, owner=self.user3 ) self.project8 = Project.objects.create( name="project8", is_public=True, owner=self.user3 ) self.project9 = Project.objects.create( name="project9", is_public=False, owner=self.organization1 ) self.collaborator1 = ProjectCollaborator.objects.create( project=self.project7, collaborator=self.user1, role=ProjectCollaborator.Roles.REPORTER, ) self.collaborator2 = ProjectCollaborator.objects.create( project=self.project9, collaborator=self.team1, role=ProjectCollaborator.Roles.EDITOR, ) def test_get_users(self): # should get all the available users queryset = querysets_utils.get_users("") self.assertEqual(len(queryset), 5) self.assertTrue(self.user1 in queryset) self.assertTrue(self.user2 in queryset) self.assertTrue(self.user3 in queryset) self.assertTrue(self.organization1.user_ptr in queryset) self.assertTrue(self.team1.user_ptr in queryset) # should get all the available users queryset = querysets_utils.get_users("user3") self.assertEqual(len(queryset), 1) self.assertTrue(self.user3 in queryset) # should get only the users that are not an organization queryset = querysets_utils.get_users("", exclude_organizations=True) self.assertEqual(len(queryset), 4) self.assertTrue(self.user1 in queryset) self.assertTrue(self.user2 in queryset) self.assertTrue(self.user3 in queryset) self.assertTrue(self.team1.user_ptr in queryset) # should get only the users that are not a team queryset = querysets_utils.get_users("", exclude_teams=True) self.assertEqual(len(queryset), 4) self.assertTrue(self.user1 in queryset) self.assertTrue(self.user2 in queryset) self.assertTrue(self.user3 in queryset) self.assertTrue(self.organization1.user_ptr in queryset) # should get all the users, that are not members or owners of an organization queryset = querysets_utils.get_users("", organization=self.organization1) self.assertEqual(len(queryset), 1) # should get all the users, that are not members or owner of a project queryset = querysets_utils.get_users("", project=self.project1) self.assertEqual(len(queryset), 3) self.assertTrue(self.user2 in queryset) self.assertTrue(self.user3 in queryset) self.assertTrue(self.organization1.user_ptr in queryset) # should get all the users, that are not members or owner of a project queryset = querysets_utils.get_users("", project=self.project5) self.assertEqual(len(queryset), 4) self.assertTrue(self.user1 in queryset) self.assertTrue(self.user2 in queryset) self.assertTrue(self.user3 in queryset) self.assertTrue(self.team1.user_ptr in queryset) # should get all the users, that are not members or owner of a project and are not an organization queryset = querysets_utils.get_users( "", project=self.project1, exclude_organizations=True ) self.assertEqual(len(queryset), 2) self.assertTrue(self.user2 in queryset) self.assertTrue(self.user3 in queryset) def test_projects_roles_and_role_origins(self): """ Checks user_role and user_role_origin are correctly defined """ def p(proj, user): return Project.objects.for_user(user).get(pk=proj.pk) # fmt: off self.assertEqual(p(self.project1, self.user1).user_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project1, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value) self.assertEqual(p(self.project2, self.user1).user_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project2, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value) with self.assertRaises(Project.DoesNotExist): p(self.project3, self.user1) self.assertEqual(p(self.project4, self.user1).user_role, ProjectCollaborator.Roles.READER) self.assertEqual(p(self.project4, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value) self.assertEqual(p(self.project5, self.user1).user_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project5, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONOWNER.value) self.assertEqual(p(self.project6, self.user1).user_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project6, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONOWNER.value) self.assertEqual(p(self.project7, self.user1).user_role, ProjectCollaborator.Roles.REPORTER) self.assertEqual(p(self.project7, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.COLLABORATOR.value) self.assertEqual(p(self.project8, self.user1).user_role, ProjectCollaborator.Roles.READER) self.assertEqual(p(self.project8, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value) self.assertEqual(p(self.project9, self.user1).user_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project9, self.user1).user_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONOWNER.value) with self.assertRaises(Project.DoesNotExist): p(self.project1, self.user2) self.assertEqual(p(self.project2, self.user2).user_role, ProjectCollaborator.Roles.READER) self.assertEqual(p(self.project2, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value) self.assertEqual(p(self.project3, self.user2).user_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project3, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value) self.assertEqual(p(self.project4, self.user2).user_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project4, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value) self.assertEqual(p(self.project5, self.user2).user_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project5, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONADMIN.value) self.assertEqual(p(self.project6, self.user2).user_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project6, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONADMIN.value) with self.assertRaises(Project.DoesNotExist): p(self.project7, self.user2) self.assertEqual(p(self.project8, self.user2).user_role, ProjectCollaborator.Roles.READER) self.assertEqual(p(self.project8, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value) self.assertEqual(p(self.project9, self.user2).user_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project9, self.user2).user_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONADMIN.value) with self.assertRaises(Project.DoesNotExist): p(self.project1, self.user3) self.assertEqual(p(self.project2, self.user3).user_role, ProjectCollaborator.Roles.READER) self.assertEqual(p(self.project2, self.user3).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value) with self.assertRaises(Project.DoesNotExist): p(self.project3, self.user3) self.assertEqual(p(self.project4, self.user3).user_role, ProjectCollaborator.Roles.READER) self.assertEqual(p(self.project4, self.user3).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value) with self.assertRaises(Project.DoesNotExist): p(self.project5, self.user3) self.assertEqual(p(self.project6, self.user3).user_role, ProjectCollaborator.Roles.READER) self.assertEqual(p(self.project6, self.user3).user_role_origin, ProjectQueryset.RoleOrigins.PUBLIC.value) self.assertEqual(p(self.project7, self.user3).user_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project7, self.user3).user_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value) self.assertEqual(p(self.project8, self.user3).user_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project8, self.user3).user_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value) self.assertEqual(p(self.project9, self.user3).user_role, ProjectCollaborator.Roles.EDITOR) self.assertEqual(p(self.project9, self.user3).user_role_origin, ProjectQueryset.RoleOrigins.TEAMMEMBER.value) # fmt: on def test_user_roles_and_role_origins(self): """ Checks project_role and project_role_origin are correctly defined """ def p(proj, user): return User.objects.for_project(proj).get(pk=user.pk) # fmt: off self.assertEqual(p(self.project1, self.user1).project_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project1, self.user1).project_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value) self.assertEqual(p(self.project2, self.user1).project_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project2, self.user1).project_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value) with self.assertRaises(User.DoesNotExist): p(self.project3, self.user1) with self.assertRaises(User.DoesNotExist): p(self.project4, self.user1) self.assertEqual(p(self.project5, self.user1).project_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project5, self.user1).project_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONOWNER.value) self.assertEqual(p(self.project6, self.user1).project_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project6, self.user1).project_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONOWNER.value) self.assertEqual(p(self.project7, self.user1).project_role, ProjectCollaborator.Roles.REPORTER) self.assertEqual(p(self.project7, self.user1).project_role_origin, ProjectQueryset.RoleOrigins.COLLABORATOR.value) with self.assertRaises(User.DoesNotExist): p(self.project8, self.user1) self.assertEqual(p(self.project9, self.user1).project_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project9, self.user1).project_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONOWNER.value) with self.assertRaises(User.DoesNotExist): p(self.project1, self.user2) with self.assertRaises(User.DoesNotExist): p(self.project2, self.user2) self.assertEqual(p(self.project3, self.user2).project_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project3, self.user2).project_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value) self.assertEqual(p(self.project4, self.user2).project_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project4, self.user2).project_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value) self.assertEqual(p(self.project5, self.user2).project_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project5, self.user2).project_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONADMIN.value) self.assertEqual(p(self.project6, self.user2).project_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project6, self.user2).project_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONADMIN.value) with self.assertRaises(User.DoesNotExist): p(self.project7, self.user2) with self.assertRaises(User.DoesNotExist): p(self.project8, self.user2) self.assertEqual(p(self.project9, self.user2).project_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project9, self.user2).project_role_origin, ProjectQueryset.RoleOrigins.ORGANIZATIONADMIN.value) with self.assertRaises(User.DoesNotExist): p(self.project1, self.user3) with self.assertRaises(User.DoesNotExist): p(self.project2, self.user3) with self.assertRaises(User.DoesNotExist): p(self.project3, self.user3) with self.assertRaises(User.DoesNotExist): p(self.project4, self.user3) with self.assertRaises(User.DoesNotExist): p(self.project5, self.user3) with self.assertRaises(User.DoesNotExist): p(self.project6, self.user3) self.assertEqual(p(self.project7, self.user3).project_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project7, self.user3).project_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value) self.assertEqual(p(self.project8, self.user3).project_role, ProjectCollaborator.Roles.ADMIN) self.assertEqual(p(self.project8, self.user3).project_role_origin, ProjectQueryset.RoleOrigins.PROJECTOWNER.value) self.assertEqual(p(self.project9, self.user3).project_role, ProjectCollaborator.Roles.EDITOR) self.assertEqual(p(self.project9, self.user3).project_role_origin, ProjectQueryset.RoleOrigins.TEAMMEMBER.value) # fmt: on
[ "qfieldcloud.core.models.Project.objects.for_user", "qfieldcloud.core.models.Team.objects.create", "qfieldcloud.core.models.Organization.objects.create", "qfieldcloud.core.models.OrganizationMember.objects.create", "qfieldcloud.core.querysets_utils.get_users", "qfieldcloud.authentication.models.AuthToken.objects.get_or_create", "qfieldcloud.core.models.TeamMember.objects.create", "logging.disable", "qfieldcloud.core.models.ProjectCollaborator.objects.create", "qfieldcloud.core.models.User.objects.for_project", "qfieldcloud.core.models.Project.objects.create", "qfieldcloud.core.models.User.objects.create_user" ]
[((339, 372), 'logging.disable', 'logging.disable', (['logging.CRITICAL'], {}), '(logging.CRITICAL)\n', (354, 372), False, 'import logging\n'), ((539, 604), 'qfieldcloud.core.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""user1"""', 'password': '"""<PASSWORD>"""'}), "(username='user1', password='<PASSWORD>')\n", (563, 604), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((758, 823), 'qfieldcloud.core.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""user2"""', 'password': '"""<PASSWORD>"""'}), "(username='user2', password='<PASSWORD>')\n", (782, 823), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((983, 1048), 'qfieldcloud.core.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""user3"""', 'password': '"""<PASSWORD>"""'}), "(username='user3', password='<PASSWORD>')\n", (1007, 1048), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((1192, 1316), 'qfieldcloud.core.models.Organization.objects.create', 'Organization.objects.create', ([], {'username': '"""organization1"""', 'password': '"""<PASSWORD>"""', 'user_type': '(2)', 'organization_owner': 'self.user1'}), "(username='organization1', password='<PASSWORD>',\n user_type=2, organization_owner=self.user1)\n", (1219, 1316), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((1400, 1527), 'qfieldcloud.core.models.OrganizationMember.objects.create', 'OrganizationMember.objects.create', ([], {'organization': 'self.organization1', 'member': 'self.user2', 'role': 'OrganizationMember.Roles.ADMIN'}), '(organization=self.organization1, member=\n self.user2, role=OrganizationMember.Roles.ADMIN)\n', (1433, 1527), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((1598, 1726), 'qfieldcloud.core.models.OrganizationMember.objects.create', 'OrganizationMember.objects.create', ([], {'organization': 'self.organization1', 'member': 'self.user3', 'role': 'OrganizationMember.Roles.MEMBER'}), '(organization=self.organization1, member=\n self.user3, role=OrganizationMember.Roles.MEMBER)\n', (1631, 1726), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((1791, 1920), 'qfieldcloud.core.models.Team.objects.create', 'Team.objects.create', ([], {'username': '"""team1"""', 'password': '"""<PASSWORD>"""', 'user_type': 'User.TYPE_TEAM', 'team_organization': 'self.organization1'}), "(username='team1', password='<PASSWORD>', user_type=User\n .TYPE_TEAM, team_organization=self.organization1)\n", (1810, 1920), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((2007, 2068), 'qfieldcloud.core.models.TeamMember.objects.create', 'TeamMember.objects.create', ([], {'team': 'self.team1', 'member': 'self.user3'}), '(team=self.team1, member=self.user3)\n', (2032, 2068), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((2129, 2203), 'qfieldcloud.core.models.Project.objects.create', 'Project.objects.create', ([], {'name': '"""project1"""', 'is_public': '(False)', 'owner': 'self.user1'}), "(name='project1', is_public=False, owner=self.user1)\n", (2151, 2203), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((2251, 2324), 'qfieldcloud.core.models.Project.objects.create', 'Project.objects.create', ([], {'name': '"""project2"""', 'is_public': '(True)', 'owner': 'self.user1'}), "(name='project2', is_public=True, owner=self.user1)\n", (2273, 2324), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((2372, 2446), 'qfieldcloud.core.models.Project.objects.create', 'Project.objects.create', ([], {'name': '"""project3"""', 'is_public': '(False)', 'owner': 'self.user2'}), "(name='project3', is_public=False, owner=self.user2)\n", (2394, 2446), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((2494, 2567), 'qfieldcloud.core.models.Project.objects.create', 'Project.objects.create', ([], {'name': '"""project4"""', 'is_public': '(True)', 'owner': 'self.user2'}), "(name='project4', is_public=True, owner=self.user2)\n", (2516, 2567), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((2615, 2702), 'qfieldcloud.core.models.Project.objects.create', 'Project.objects.create', ([], {'name': '"""project5"""', 'is_public': '(False)', 'owner': 'self.organization1'}), "(name='project5', is_public=False, owner=self.\n organization1)\n", (2637, 2702), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((2745, 2831), 'qfieldcloud.core.models.Project.objects.create', 'Project.objects.create', ([], {'name': '"""project6"""', 'is_public': '(True)', 'owner': 'self.organization1'}), "(name='project6', is_public=True, owner=self.\n organization1)\n", (2767, 2831), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((2874, 2948), 'qfieldcloud.core.models.Project.objects.create', 'Project.objects.create', ([], {'name': '"""project7"""', 'is_public': '(False)', 'owner': 'self.user3'}), "(name='project7', is_public=False, owner=self.user3)\n", (2896, 2948), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((2996, 3069), 'qfieldcloud.core.models.Project.objects.create', 'Project.objects.create', ([], {'name': '"""project8"""', 'is_public': '(True)', 'owner': 'self.user3'}), "(name='project8', is_public=True, owner=self.user3)\n", (3018, 3069), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((3117, 3204), 'qfieldcloud.core.models.Project.objects.create', 'Project.objects.create', ([], {'name': '"""project9"""', 'is_public': '(False)', 'owner': 'self.organization1'}), "(name='project9', is_public=False, owner=self.\n organization1)\n", (3139, 3204), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((3252, 3380), 'qfieldcloud.core.models.ProjectCollaborator.objects.create', 'ProjectCollaborator.objects.create', ([], {'project': 'self.project7', 'collaborator': 'self.user1', 'role': 'ProjectCollaborator.Roles.REPORTER'}), '(project=self.project7, collaborator=self\n .user1, role=ProjectCollaborator.Roles.REPORTER)\n', (3286, 3380), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((3453, 3579), 'qfieldcloud.core.models.ProjectCollaborator.objects.create', 'ProjectCollaborator.objects.create', ([], {'project': 'self.project9', 'collaborator': 'self.team1', 'role': 'ProjectCollaborator.Roles.EDITOR'}), '(project=self.project9, collaborator=self\n .team1, role=ProjectCollaborator.Roles.EDITOR)\n', (3487, 3579), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((3717, 3746), 'qfieldcloud.core.querysets_utils.get_users', 'querysets_utils.get_users', (['""""""'], {}), "('')\n", (3742, 3746), False, 'from qfieldcloud.core import querysets_utils\n'), ((4121, 4155), 'qfieldcloud.core.querysets_utils.get_users', 'querysets_utils.get_users', (['"""user3"""'], {}), "('user3')\n", (4146, 4155), False, 'from qfieldcloud.core import querysets_utils\n'), ((4332, 4389), 'qfieldcloud.core.querysets_utils.get_users', 'querysets_utils.get_users', (['""""""'], {'exclude_organizations': '(True)'}), "('', exclude_organizations=True)\n", (4357, 4389), False, 'from qfieldcloud.core import querysets_utils\n'), ((4710, 4759), 'qfieldcloud.core.querysets_utils.get_users', 'querysets_utils.get_users', (['""""""'], {'exclude_teams': '(True)'}), "('', exclude_teams=True)\n", (4735, 4759), False, 'from qfieldcloud.core import querysets_utils\n'), ((5118, 5180), 'qfieldcloud.core.querysets_utils.get_users', 'querysets_utils.get_users', (['""""""'], {'organization': 'self.organization1'}), "('', organization=self.organization1)\n", (5143, 5180), False, 'from qfieldcloud.core import querysets_utils\n'), ((5323, 5375), 'qfieldcloud.core.querysets_utils.get_users', 'querysets_utils.get_users', (['""""""'], {'project': 'self.project1'}), "('', project=self.project1)\n", (5348, 5375), False, 'from qfieldcloud.core import querysets_utils\n'), ((5679, 5731), 'qfieldcloud.core.querysets_utils.get_users', 'querysets_utils.get_users', (['""""""'], {'project': 'self.project5'}), "('', project=self.project5)\n", (5704, 5731), False, 'from qfieldcloud.core import querysets_utils\n'), ((6103, 6188), 'qfieldcloud.core.querysets_utils.get_users', 'querysets_utils.get_users', (['""""""'], {'project': 'self.project1', 'exclude_organizations': '(True)'}), "('', project=self.project1, exclude_organizations=True\n )\n", (6128, 6188), False, 'from qfieldcloud.core import querysets_utils\n'), ((627, 675), 'qfieldcloud.authentication.models.AuthToken.objects.get_or_create', 'AuthToken.objects.get_or_create', ([], {'user': 'self.user1'}), '(user=self.user1)\n', (658, 675), False, 'from qfieldcloud.authentication.models import AuthToken\n'), ((846, 894), 'qfieldcloud.authentication.models.AuthToken.objects.get_or_create', 'AuthToken.objects.get_or_create', ([], {'user': 'self.user2'}), '(user=self.user2)\n', (877, 894), False, 'from qfieldcloud.authentication.models import AuthToken\n'), ((1071, 1119), 'qfieldcloud.authentication.models.AuthToken.objects.get_or_create', 'AuthToken.objects.get_or_create', ([], {'user': 'self.user3'}), '(user=self.user3)\n', (1102, 1119), False, 'from qfieldcloud.authentication.models import AuthToken\n'), ((6537, 6567), 'qfieldcloud.core.models.Project.objects.for_user', 'Project.objects.for_user', (['user'], {}), '(user)\n', (6561, 6567), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n'), ((11963, 11993), 'qfieldcloud.core.models.User.objects.for_project', 'User.objects.for_project', (['proj'], {}), '(proj)\n', (11987, 11993), False, 'from qfieldcloud.core.models import Organization, OrganizationMember, Project, ProjectCollaborator, ProjectQueryset, Team, TeamMember, User\n')]
""" Evaluate the model using Eigen split of KITTI dataset - prepare gt depth running the script https://github.com/nianticlabs/monodepth2/blob/master/export_gt_depth.py """ import argparse import os import cv2 import numpy as np import tensorflow as tf from tqdm import tqdm from eval_utils import compute_errors, compute_scale_and_shift from network import Pydnet os.environ["CUDA_VISIBLE_DEVICES"] = "-1" class KITTILoader(object): def __init__(self, params): self.params = params self.height = params["height"] self.width = params["width"] self.data_list_file = params["data_list_file"] self.data_path = params["data_path"] self.num_workers = 4 self.data_list = np.loadtxt(self.data_list_file, dtype=bytes).astype(np.str) self.default_img_shape = None def read_and_decode(self, filename_queue): """Read jpeg file from file system""" img0_name = tf.strings.join([self.data_path, "/", filename_queue, ".jpg"]) img0 = tf.image.decode_jpeg(tf.io.read_file(img0_name), channels=3) img0 = tf.cast(img0, tf.float32) return img0 def preprocess(self, filename_queue): """Prepare single image at testing time""" img0 = self.read_and_decode(filename_queue) img0 = tf.image.resize_images(img0, [self.height, self.width], tf.image.ResizeMethod.AREA) img0.set_shape([self.height, self.width, 3]) img0 = img0 / 255.0 return img0 def create_iterator(self, num_parallel_calls=4): """Create iterator""" data_list = tf.convert_to_tensor(self.data_list, dtype=tf.string) dataset = tf.data.Dataset.from_tensor_slices(data_list) dataset = dataset.map(self.preprocess, num_parallel_calls=num_parallel_calls) dataset = dataset.batch(1) dataset = dataset.repeat() iterator = dataset.make_initializable_iterator() return iterator def read_test_files(test_file) -> list: """Read test files from txt file""" assert os.path.exists(test_file) with open(test_file, "r") as f: lines = f.readlines() lines = [l.strip() for l in lines] return lines def run_inference(opts): """Run the model on KITTI""" network_params = {"height": 320, "width": 640, "is_training": False} dataset_params = { "height": 320, "width": 640, "data_path": opts.data_path, "data_list_file": opts.data_list_file, } dataset = KITTILoader(dataset_params) iterator = dataset.create_iterator() batch_img = iterator.get_next() network = Pydnet(network_params) predicted_idepth = network.forward(batch_img) predicted_idepth = tf.nn.relu(predicted_idepth) # restore graph saver = tf.train.Saver() sess = tf.Session() sess.run(tf.compat.v1.global_variables_initializer()) sess.run(iterator.initializer) saver.restore(sess, opts.ckpt) os.makedirs(opts.dest, exist_ok=True) test_images = read_test_files(opts.data_list_file) num_images = len(test_images) with tqdm(total=num_images) as pbar: for i in range(num_images): idepth = sess.run(predicted_idepth) idepth = np.squeeze(idepth) min_idepth = idepth.min() max_idepth = idepth.max() norm_idepth = (idepth - min_idepth) / (max_idepth - min_idepth) norm_idepth *= 255.0 target_path = os.path.join(opts.data_path, f"{test_images[i]}.jpg") target = cv2.imread(target_path) h, w = target.shape[:2] norm_idepth = cv2.resize(norm_idepth, (w, h)) img_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png") cv2.imwrite(img_path, (norm_idepth * 256.0).astype(np.uint16)) pbar.update(1) print("Inference done!") def eval(opts): """Compute error metrics.""" errors = [] test_images = read_test_files(opts.data_list_file) print("=> loading gt data") gt_depths = np.load(opts.gt_path, fix_imports=True, encoding="latin1", allow_pickle=True)[ "data" ] print("=> starting evaluation") with tqdm(total=len(test_images)) as pbar: for i in range(len(test_images)): target = gt_depths[i] pred_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png") prediction_idepth = cv2.imread(pred_path, -1) / 256.0 mask = (target > 1e-3) & (target < opts.max_depth) target_idepth = np.zeros_like(target) target_idepth[mask == 1] = 1.0 / target[mask == 1] scale, shift = compute_scale_and_shift(prediction_idepth, target_idepth, mask) prediction_idepth_aligned = scale * prediction_idepth + shift disparity_cap = 1.0 / opts.max_depth prediction_idepth_aligned[prediction_idepth_aligned < disparity_cap] = disparity_cap prediciton_depth_aligned = 1.0 / prediction_idepth_aligned prediciton_depth_aligned = prediciton_depth_aligned[mask == 1] target = target[mask == 1] errors.append(compute_errors(target, prediciton_depth_aligned)) pbar.update(1) mean_errors = np.array(errors).mean(0) labels = ["abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"] for i in range(len(labels)): print(f"{labels[i]}:{mean_errors[i]}") print("Evaluation done!") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Evaluate depth network on KITTI") parser.add_argument("--ckpt", type=str, help="path to checkpoint", required=True) parser.add_argument("--data_path", type=str, help="path to kitti", required=True) parser.add_argument("--gt_path", type=str, help="path to gt_depths.npz", required=True) parser.add_argument( "--data_list_file", type=str, help="path to data list", default="test_kitti.txt" ) parser.add_argument("--dest", type=str, help="prediction folder", default="kitti") parser.add_argument("--max_depth", type=float, help="maximum depth value", default=80.0) opts = parser.parse_args() run_inference(opts) eval(opts)
[ "numpy.load", "argparse.ArgumentParser", "os.path.join", "eval_utils.compute_scale_and_shift", "eval_utils.compute_errors", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.nn.relu", "numpy.zeros_like", "os.path.exists", "tensorflow.cast", "numpy.loadtxt", "tensorflow.io.read_file", "cv2.resize", "tensorflow.image.resize_images", "tqdm.tqdm", "tensorflow.train.Saver", "tensorflow.Session", "network.Pydnet", "numpy.squeeze", "os.makedirs", "tensorflow.convert_to_tensor", "tensorflow.data.Dataset.from_tensor_slices", "cv2.imread", "tensorflow.strings.join", "numpy.array" ]
[((2040, 2065), 'os.path.exists', 'os.path.exists', (['test_file'], {}), '(test_file)\n', (2054, 2065), False, 'import os\n'), ((2614, 2636), 'network.Pydnet', 'Pydnet', (['network_params'], {}), '(network_params)\n', (2620, 2636), False, 'from network import Pydnet\n'), ((2710, 2738), 'tensorflow.nn.relu', 'tf.nn.relu', (['predicted_idepth'], {}), '(predicted_idepth)\n', (2720, 2738), True, 'import tensorflow as tf\n'), ((2772, 2788), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2786, 2788), True, 'import tensorflow as tf\n'), ((2800, 2812), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2810, 2812), True, 'import tensorflow as tf\n'), ((2946, 2983), 'os.makedirs', 'os.makedirs', (['opts.dest'], {'exist_ok': '(True)'}), '(opts.dest, exist_ok=True)\n', (2957, 2983), False, 'import os\n'), ((5467, 5537), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate depth network on KITTI"""'}), "(description='Evaluate depth network on KITTI')\n", (5490, 5537), False, 'import argparse\n'), ((942, 1004), 'tensorflow.strings.join', 'tf.strings.join', (["[self.data_path, '/', filename_queue, '.jpg']"], {}), "([self.data_path, '/', filename_queue, '.jpg'])\n", (957, 1004), True, 'import tensorflow as tf\n'), ((1096, 1121), 'tensorflow.cast', 'tf.cast', (['img0', 'tf.float32'], {}), '(img0, tf.float32)\n', (1103, 1121), True, 'import tensorflow as tf\n'), ((1303, 1391), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['img0', '[self.height, self.width]', 'tf.image.ResizeMethod.AREA'], {}), '(img0, [self.height, self.width], tf.image.\n ResizeMethod.AREA)\n', (1325, 1391), True, 'import tensorflow as tf\n'), ((1592, 1645), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.data_list'], {'dtype': 'tf.string'}), '(self.data_list, dtype=tf.string)\n', (1612, 1645), True, 'import tensorflow as tf\n'), ((1664, 1709), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['data_list'], {}), '(data_list)\n', (1698, 1709), True, 'import tensorflow as tf\n'), ((2826, 2869), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (2867, 2869), True, 'import tensorflow as tf\n'), ((3082, 3104), 'tqdm.tqdm', 'tqdm', ([], {'total': 'num_images'}), '(total=num_images)\n', (3086, 3104), False, 'from tqdm import tqdm\n'), ((4018, 4095), 'numpy.load', 'np.load', (['opts.gt_path'], {'fix_imports': '(True)', 'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(opts.gt_path, fix_imports=True, encoding='latin1', allow_pickle=True)\n", (4025, 4095), True, 'import numpy as np\n'), ((1041, 1067), 'tensorflow.io.read_file', 'tf.io.read_file', (['img0_name'], {}), '(img0_name)\n', (1056, 1067), True, 'import tensorflow as tf\n'), ((3219, 3237), 'numpy.squeeze', 'np.squeeze', (['idepth'], {}), '(idepth)\n', (3229, 3237), True, 'import numpy as np\n'), ((3450, 3503), 'os.path.join', 'os.path.join', (['opts.data_path', 'f"""{test_images[i]}.jpg"""'], {}), "(opts.data_path, f'{test_images[i]}.jpg')\n", (3462, 3503), False, 'import os\n'), ((3525, 3548), 'cv2.imread', 'cv2.imread', (['target_path'], {}), '(target_path)\n', (3535, 3548), False, 'import cv2\n'), ((3611, 3642), 'cv2.resize', 'cv2.resize', (['norm_idepth', '(w, h)'], {}), '(norm_idepth, (w, h))\n', (3621, 3642), False, 'import cv2\n'), ((4510, 4531), 'numpy.zeros_like', 'np.zeros_like', (['target'], {}), '(target)\n', (4523, 4531), True, 'import numpy as np\n'), ((4622, 4685), 'eval_utils.compute_scale_and_shift', 'compute_scale_and_shift', (['prediction_idepth', 'target_idepth', 'mask'], {}), '(prediction_idepth, target_idepth, mask)\n', (4645, 4685), False, 'from eval_utils import compute_errors, compute_scale_and_shift\n'), ((5216, 5232), 'numpy.array', 'np.array', (['errors'], {}), '(errors)\n', (5224, 5232), True, 'import numpy as np\n'), ((730, 774), 'numpy.loadtxt', 'np.loadtxt', (['self.data_list_file'], {'dtype': 'bytes'}), '(self.data_list_file, dtype=bytes)\n', (740, 774), True, 'import numpy as np\n'), ((4383, 4408), 'cv2.imread', 'cv2.imread', (['pred_path', '(-1)'], {}), '(pred_path, -1)\n', (4393, 4408), False, 'import cv2\n'), ((5119, 5167), 'eval_utils.compute_errors', 'compute_errors', (['target', 'prediciton_depth_aligned'], {}), '(target, prediciton_depth_aligned)\n', (5133, 5167), False, 'from eval_utils import compute_errors, compute_scale_and_shift\n')]
# # -*- coding: utf-8 -*- # # Copyright (c) 2018-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import absolute_import from __future__ import division from __future__ import print_function from common.base_model_init import BaseModelInitializer import os import argparse class ModelInitializer(BaseModelInitializer): """Model initializer for minigo""" def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) arg_parser = argparse.ArgumentParser(description='Parse additional args') arg_parser.add_argument( "--quantization", help="quantization flag", dest="quantization", default="False") arg_parser.add_argument( "--large-scale", help="train on large scale", dest="large_scale", default="False") arg_parser.add_argument( "--num-train-nodes", help="number of train nodes", dest="num_train_nodes", default=0, type=int) arg_parser.add_argument( "--num-eval-nodes", help="number of evaluation nodes", dest="num_eval_nodes", default=0, type=int) arg_parser.add_argument( "--multi-node", help="train on large scale", dest="multi_node", default="False") self.additional_args, unknown_args = arg_parser.parse_known_args(custom_args) if self.additional_args.large_scale == "True" and self.additional_args.multi_node == "True": # multi-node training mode with large scale self.cmd = "./run_mn.sh " self.cmd += " {0}".format(self.additional_args.num_train_nodes) self.cmd += " {0}".format(self.additional_args.num_eval_nodes) self.cmd += " {0}".format(self.additional_args.quantization) elif self.additional_args.large_scale == "False" and self.additional_args.multi_node == "True": # multi-node training mode self.cmd = "./run_mn.sh " self.cmd += " {0}".format(self.additional_args.num_train_nodes) self.cmd += " {0}".format(self.additional_args.quantization) else: # single-node training mode self.cmd = "./run.sh " self.cmd += " {0}".format(self.additional_args.quantization) def run(self): org_path = os.getcwd() os.chdir(self.args.model_source_dir) self.run_command(self.cmd) os.chdir(org_path)
[ "os.getcwd", "argparse.ArgumentParser", "os.chdir" ]
[((1108, 1168), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse additional args"""'}), "(description='Parse additional args')\n", (1131, 1168), False, 'import argparse\n'), ((2971, 2982), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2980, 2982), False, 'import os\n'), ((2992, 3028), 'os.chdir', 'os.chdir', (['self.args.model_source_dir'], {}), '(self.args.model_source_dir)\n', (3000, 3028), False, 'import os\n'), ((3074, 3092), 'os.chdir', 'os.chdir', (['org_path'], {}), '(org_path)\n', (3082, 3092), False, 'import os\n')]
""" ============== Edge operators ============== Edge operators are used in image processing within edge detection algorithms. They are discrete differentiation operators, computing an approximation of the gradient of the image intensity function. """ import numpy as np import matplotlib.pyplot as plt from skimage.data import camera from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, \ scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h image = camera() edge_roberts = roberts(image) edge_sobel = sobel(image) fig, ax = plt.subplots(ncols=2, sharex=True, sharey=True, figsize=(8, 4)) ax[0].imshow(edge_roberts, cmap=plt.cm.gray) ax[0].set_title('Roberts Edge Detection') ax[1].imshow(edge_sobel, cmap=plt.cm.gray) ax[1].set_title('Sobel Edge Detection') for a in ax: a.axis('off') plt.tight_layout() plt.show() ###################################################################### # Different operators compute different finite-difference approximations of # the gradient. For example, the Scharr filter results in a less rotational # variance than the Sobel filter that is in turn better than the Prewitt # filter [1]_ [2]_ [3]_. The difference between the Prewitt and Sobel filters # and the Scharr filter is illustrated below with an image that is the # discretization of a rotation- invariant continuous function. The # discrepancy between the Prewitt and Sobel filters, and the Scharr filter is # stronger for regions of the image where the direction of the gradient is # close to diagonal, and for regions with high spatial frequencies. For the # example image the differences between the filter results are very small and # the filter results are visually almost indistinguishable. # # .. [1] https://en.wikipedia.org/wiki/Sobel_operator#Alternative_operators # # .. [2] <NAME>, <NAME>, and <NAME>. Principles of filter design. # In Handbook of Computer Vision and Applications. Academic Press, # 1999. # # .. [3] https://en.wikipedia.org/wiki/Prewitt_operator x, y = np.ogrid[:100, :100] # Rotation-invariant image with different spatial frequencies img = np.exp(1j * np.hypot(x, y) ** 1.3 / 20.).real edge_sobel = sobel(img) edge_scharr = scharr(img) edge_prewitt = prewitt(img) diff_scharr_prewitt = edge_scharr - edge_prewitt diff_scharr_sobel = edge_scharr - edge_sobel max_diff = np.max(np.maximum(diff_scharr_prewitt, diff_scharr_sobel)) fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(8, 8)) ax = axes.ravel() ax[0].imshow(img, cmap=plt.cm.gray) ax[0].set_title('Original image') ax[1].imshow(edge_scharr, cmap=plt.cm.gray) ax[1].set_title('Scharr Edge Detection') ax[2].imshow(diff_scharr_prewitt, cmap=plt.cm.gray, vmax=max_diff) ax[2].set_title('Scharr - Prewitt') ax[3].imshow(diff_scharr_sobel, cmap=plt.cm.gray, vmax=max_diff) ax[3].set_title('Scharr - Sobel') for a in ax: a.axis('off') plt.tight_layout() plt.show() ###################################################################### # As in the previous example, here we illustrate the rotational invariance of # the filters. The top row shows a rotationally invariant image along with the # angle of its analytical gradient. The other two rows contain the difference # between the different gradient approximations (Sobel, Prewitt, Scharr & # Farid) and analytical gradient. # # The Farid & Simoncelli derivative filters [4]_, [5]_ are the most # rotationally invariant, but require a 5x5 kernel, which is computationally # more intensive than a 3x3 kernel. # # .. [4] <NAME>. and <NAME>., "Differentiation of discrete # multidimensional signals", IEEE Transactions on Image Processing 13(4): # 496-508, 2004. :DOI:`10.1109/TIP.2004.823819` # # .. [5] Wikipedia, "Farid and Simoncelli Derivatives." Available at: # <https://en.wikipedia.org/wiki/Image_derivatives#Farid_and_Simoncelli_Derivatives> x, y = np.mgrid[-10:10:255j, -10:10:255j] img = np.sin(x ** 2 + y ** 2) imgx = 2 * x * np.cos(x ** 2 + y ** 2) imgy = 2 * y * np.cos(x ** 2 + y ** 2) def angle(dx, dy): return np.mod(np.arctan2(dy, dx), np.pi) true_angle = angle(imgx, imgy) angle_farid = angle(farid_h(img), farid_v(img)) angle_sobel = angle(sobel_h(img), sobel_v(img)) angle_scharr = angle(scharr_h(img), scharr_v(img)) angle_prewitt = angle(prewitt_h(img), prewitt_v(img)) def diff_angle(angle_1, angle_2): return np.minimum(np.pi - np.abs(angle_1 - angle_2), np.abs(angle_1 - angle_2)) diff_farid = diff_angle(true_angle, angle_farid) diff_sobel = diff_angle(true_angle, angle_sobel) diff_scharr = diff_angle(true_angle, angle_scharr) diff_prewitt = diff_angle(true_angle, angle_prewitt) fig, axes = plt.subplots(nrows=3, ncols=2, sharex=True, sharey=True, figsize=(8, 8)) ax = axes.ravel() ax[0].imshow(img, cmap=plt.cm.gray) ax[0].set_title('Original image') ax[1].imshow(true_angle, cmap=plt.cm.hsv) ax[1].set_title('Analytical gradient angle') ax[2].imshow(diff_sobel, cmap=plt.cm.inferno, vmin=0, vmax=0.02) ax[2].set_title('Sobel error') ax[3].imshow(diff_prewitt, cmap=plt.cm.inferno, vmin=0, vmax=0.02) ax[3].set_title('Prewitt error') ax[4].imshow(diff_scharr, cmap=plt.cm.inferno, vmin=0, vmax=0.02) ax[4].set_title('Scharr error') cax = ax[5].imshow(diff_farid, cmap=plt.cm.inferno, vmin=0, vmax=0.02) ax[5].set_title('Farid error') fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.90, 0.10, 0.02, 0.50]) fig.colorbar(cax, cax=cbar_ax, ticks=[0, 0.01, 0.02]) for a in ax: a.axis('off') plt.show()
[ "numpy.maximum", "numpy.arctan2", "numpy.abs", "skimage.filters.farid_v", "skimage.filters.scharr_h", "skimage.filters.sobel_v", "skimage.filters.sobel", "numpy.sin", "matplotlib.pyplot.tight_layout", "skimage.filters.farid_h", "skimage.filters.scharr", "skimage.filters.prewitt_h", "skimage.filters.sobel_h", "skimage.filters.prewitt_v", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "numpy.hypot", "numpy.cos", "skimage.data.camera", "skimage.filters.roberts", "skimage.filters.scharr_v", "skimage.filters.prewitt" ]
[((491, 499), 'skimage.data.camera', 'camera', ([], {}), '()\n', (497, 499), False, 'from skimage.data import camera\n'), ((515, 529), 'skimage.filters.roberts', 'roberts', (['image'], {}), '(image)\n', (522, 529), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((543, 555), 'skimage.filters.sobel', 'sobel', (['image'], {}), '(image)\n', (548, 555), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((567, 630), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)', 'figsize': '(8, 4)'}), '(ncols=2, sharex=True, sharey=True, figsize=(8, 4))\n', (579, 630), True, 'import matplotlib.pyplot as plt\n'), ((859, 877), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (875, 877), True, 'import matplotlib.pyplot as plt\n'), ((878, 888), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (886, 888), True, 'import matplotlib.pyplot as plt\n'), ((2219, 2229), 'skimage.filters.sobel', 'sobel', (['img'], {}), '(img)\n', (2224, 2229), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((2244, 2255), 'skimage.filters.scharr', 'scharr', (['img'], {}), '(img)\n', (2250, 2255), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((2271, 2283), 'skimage.filters.prewitt', 'prewitt', (['img'], {}), '(img)\n', (2278, 2283), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((2462, 2534), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)', 'figsize': '(8, 8)'}), '(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(8, 8))\n', (2474, 2534), True, 'import matplotlib.pyplot as plt\n'), ((2972, 2990), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2988, 2990), True, 'import matplotlib.pyplot as plt\n'), ((2991, 3001), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2999, 3001), True, 'import matplotlib.pyplot as plt\n'), ((4012, 4035), 'numpy.sin', 'np.sin', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (4018, 4035), True, 'import numpy as np\n'), ((4775, 4847), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)', 'figsize': '(8, 8)'}), '(nrows=3, ncols=2, sharex=True, sharey=True, figsize=(8, 8))\n', (4787, 4847), True, 'import matplotlib.pyplot as plt\n'), ((5618, 5628), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5626, 5628), True, 'import matplotlib.pyplot as plt\n'), ((2397, 2447), 'numpy.maximum', 'np.maximum', (['diff_scharr_prewitt', 'diff_scharr_sobel'], {}), '(diff_scharr_prewitt, diff_scharr_sobel)\n', (2407, 2447), True, 'import numpy as np\n'), ((4052, 4075), 'numpy.cos', 'np.cos', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (4058, 4075), True, 'import numpy as np\n'), ((4091, 4114), 'numpy.cos', 'np.cos', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (4097, 4114), True, 'import numpy as np\n'), ((4235, 4247), 'skimage.filters.farid_h', 'farid_h', (['img'], {}), '(img)\n', (4242, 4247), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4249, 4261), 'skimage.filters.farid_v', 'farid_v', (['img'], {}), '(img)\n', (4256, 4261), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4283, 4295), 'skimage.filters.sobel_h', 'sobel_h', (['img'], {}), '(img)\n', (4290, 4295), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4297, 4309), 'skimage.filters.sobel_v', 'sobel_v', (['img'], {}), '(img)\n', (4304, 4309), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4332, 4345), 'skimage.filters.scharr_h', 'scharr_h', (['img'], {}), '(img)\n', (4340, 4345), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4347, 4360), 'skimage.filters.scharr_v', 'scharr_v', (['img'], {}), '(img)\n', (4355, 4360), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4384, 4398), 'skimage.filters.prewitt_h', 'prewitt_h', (['img'], {}), '(img)\n', (4393, 4398), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4400, 4414), 'skimage.filters.prewitt_v', 'prewitt_v', (['img'], {}), '(img)\n', (4409, 4414), False, 'from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h\n'), ((4154, 4172), 'numpy.arctan2', 'np.arctan2', (['dy', 'dx'], {}), '(dy, dx)\n', (4164, 4172), True, 'import numpy as np\n'), ((4531, 4556), 'numpy.abs', 'np.abs', (['(angle_1 - angle_2)'], {}), '(angle_1 - angle_2)\n', (4537, 4556), True, 'import numpy as np\n'), ((4482, 4507), 'numpy.abs', 'np.abs', (['(angle_1 - angle_2)'], {}), '(angle_1 - angle_2)\n', (4488, 4507), True, 'import numpy as np\n'), ((2171, 2185), 'numpy.hypot', 'np.hypot', (['x', 'y'], {}), '(x, y)\n', (2179, 2185), True, 'import numpy as np\n')]
# -*- encoding: utf-8 -*- """ Copyright (c) 2019 - present AppSeed.us """ import time from flask.globals import request from app.home import blueprint from flask import render_template, redirect, url_for from flask_login import login_required, current_user from app import login_manager from jinja2 import TemplateNotFound from flask import jsonify import matplotlib.pyplot as plt import nltk import pandas as pd import praw import squarify from flask import Flask, render_template from nltk.corpus import stopwords from nltk.sentiment.vader import SentimentIntensityAnalyzer from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer import os from app.settings import APP_STATIC from data import * from app.base.models import User, Picks from app import db nltk.download('stopwords') set(stopwords.words('english')) @blueprint.route('/index') #@login_required def index1(): return render_template('core/reddit-index.html') @blueprint.route('/index1') #@login_required def index(): # db.drop_all() # db.create_all() #found=Picks.query.all() arr=[] for i in Picks.query.all(): print(i.__dict__) temp = i #temp.time = int(time.mktime(temp.time.timetuple())) * 1000 del temp._sa_instance_state arr.append(temp.__dict__) return render_template('index.html', time=12345, df=arr) @blueprint.route('/reddit-index') def my_form(): return render_template('core/reddit-index.html') @blueprint.route('/reddit-index', methods=['POST']) def my_form_input(): input = { 'subs': request.form['subs'] if request.form['subs'] else ['wallstreetbets'], 'post_flairs': request.form['post_flairs'] if request.form['post_flairs'] else {'Daily Discussion', 'Weekend Discussion', 'Discussion'}, 'goodAuth': request.form['goodAuth'] if request.form['goodAuth'] else{'AutoModerator'}, 'uniqueCmt': request.form['uniqueCmt'] if request.form['uniqueCmt'] else True, 'ignoreAuthP': request.form['ignoreAuthP'] if request.form['ignoreAuthP'] else {'example'}, 'ignoreAuthC': request.form['ignoreAuthC'] if request.form['ignoreAuthC'] else {'example,'}, 'upvoteRatio': request.form['upvoteRatio'] if request.form['upvoteRatio'] else 0.70, 'ups': request.form['ups'] if request.form['ups'] else 20, 'limit': request.form['limit'] if request.form['limit'] else 500, 'upvotes': request.form['upvotes'] if request.form['upvotes'] else 2, 'picks': request.form['picks'] if request.form['picks'] else 10, 'picks_ayz': request.form['picks_ayz'] if request.form['picks_ayz'] else 5, } print("input is", input) return render_template('core/reddit-index.html') @ blueprint.route('/data', methods=['POST', 'GET']) def my_form_post(): import time start_time = time.time() ctime = time.ctime() print('time is', time.ctime()) reddit = praw.Reddit(user_agent="Comment Extraction", client_id="ZM9jcd0nyXvtlA", client_secret="<KEY>", username="", password="") '''############################################################################''' # set the program parameters subs = ['wallstreetbets'] # sub-reddit to search # posts flairs to search || None flair is automatically considered post_flairs = {'Daily Discussion', 'Weekend Discussion', 'Discussion'} # authors whom comments are allowed more than once goodAuth = {'AutoModerator'} uniqueCmt = True # allow one comment per author per symbol ignoreAuthP = {'example'} # authors to ignore for posts ignoreAuthC = {'example'} # authors to ignore for comment upvoteRatio = 0.70 # upvote ratio for post to be considered, 0.70 = 70% ups = 20 # define # of upvotes, post is considered if upvotes exceed this # limit = 5 # define the limit, comments 'replace more' limit upvotes = 2 # define # of upvotes, comment is considered if upvotes exceed this # picks = 10 # define # of picks here, prints as "Top ## picks are:" picks_ayz = 5 # define # of picks for sentiment analysis '''############################################################################''' posts, count, c_analyzed, tickers, titles, a_comments = 0, 0, 0, {}, [], {} cmt_auth = {} num = 0 comm = 0 for sub in subs: subreddit = reddit.subreddit(sub) hot_python = subreddit.hot() # sorting posts by hot # Extracting comments, symbols from subreddit print("running", str(hot_python)) for submission in hot_python: flair = submission.link_flair_text author = submission.author.name # custom write func file = open(os.path.join(APP_STATIC, "output/sample.py"), "w", encoding='utf-8') hotlist = [i for i in hot_python] file.write("start time was %s num is %d and hotlist is %s " % (str(time.ctime()), num, str(hotlist))) print('num is', num) file.close() num += 1 # checking: post upvote ratio # of upvotes, post flair, and author if submission.upvote_ratio >= upvoteRatio and submission.ups > ups and (flair in post_flairs or flair is None) and author not in ignoreAuthP: submission.comment_sort = 'new' comments = submission.comments titles.append(submission.title) posts += 1 try: submission.comments.replace_more(limit=limit) for comment in comments: file = open(os.path.join( APP_STATIC, "output/sample.py"), "a", encoding='utf-8') file.write("comnum is %d and comm is %s " % (comm, str(comment))) file.close() comm += 1 #print("comnum is", comm) # try except for deleted account? try: auth = comment.author.name except: pass c_analyzed += 1 # checking: comment upvotes and author if comment.score > upvotes and auth not in ignoreAuthC: split = comment.body.split(" ") for word in split: word = word.replace("$", "") # upper = ticker, length of ticker <= 5, excluded words, if word.isupper() and len(word) <= 5 and word not in blacklist and word in us: # unique comments, try/except for key errors if uniqueCmt and auth not in goodAuth: try: if auth in cmt_auth[word]: break except: pass # counting tickers if word in tickers: tickers[word] += 1 a_comments[word].append(comment.body) cmt_auth[word].append(auth) count += 1 else: tickers[word] = 1 cmt_auth[word] = [auth] a_comments[word] = [comment.body] count += 1 except Exception as e: print(e) # sorts the dictionary symbols = dict( sorted(tickers.items(), key=lambda item: item[1], reverse=True)) top_picks = list(symbols.keys())[0:picks] time = (time.time() - start_time) # print top picks print("It took {t:.2f} seconds to analyze {c} comments in {p} posts in {s} subreddits.\n".format( t=time, c=c_analyzed, p=posts, s=len(subs))) print("Posts analyzed saved in titles") # for i in titles: print(i) # prints the title of the posts analyzed print(f"\n{picks} most mentioned picks: ") times = [] top = [] for i in top_picks: print(f"{i}: {symbols[i]}") times.append(symbols[i]) top.append(f"{i}: {symbols[i]}") # Applying Sentiment Analysis scores, s = {}, {} vader = SentimentIntensityAnalyzer() # adding custom words from data.py vader.lexicon.update(new_words) picks_sentiment = list(symbols.keys())[0:picks_ayz] for symbol in picks_sentiment: stock_comments = a_comments[symbol] for cmnt in stock_comments: score = vader.polarity_scores(cmnt) if symbol in s: s[symbol][cmnt] = score else: s[symbol] = {cmnt: score} if symbol in scores: for key, _ in score.items(): scores[symbol][key] += score[key] else: scores[symbol] = score # calculating avg. for key in score: scores[symbol][key] = scores[symbol][key] / symbols[symbol] scores[symbol][key] = "{pol:.3f}".format(pol=scores[symbol][key]) picksdb = Picks(pick=scores) timesdb = Picks(pick=[times, top, top_picks]) # print(picks) db.session.add(picksdb) db.session.add(timesdb) db.session.commit() # printing sentiment analysis print(f"\nSentiment analysis of top {picks_ayz} picks:") df = pd.DataFrame(scores) df.index = ['Bearish', 'Neutral', 'Bullish', 'Total/Compound'] df = df.T print(df) # Date Visualization # most mentioned picks squarify.plot(sizes=times, label=top, alpha=.7) plt.axis('off') plt.title(f"{picks} most mentioned picks") # plt.show() # Sentiment analysis df = df.astype(float) colors = ['red', 'springgreen', 'forestgreen', 'coral'] df.plot(kind='bar', color=colors, title=f"Sentiment analysis of top {picks_ayz} picks:") # plt.show() print('done') file = open(os.path.join(APP_STATIC, "output/final_output.py"), "w", encoding='utf-8') file.write("start time was %s /n/n top picks are %s and df is %s" % (str(ctime), str(top_picks), str(df))) print('num is', num) file.close() return render_template('core/reddit-data.html', result='done', final=df, t=ctime, c=c_analyzed, p=posts, s=len(subs)) @ blueprint.route('/visualize', methods=['POST', 'GET']) def visualize(): return render_template('core/reddit-data.html', result='done', final='ok') @ blueprint.route('/status_bar', methods=['POST', 'GET']) def status_bar(): file = open(os.path.join(APP_STATIC, "output/sample.py"), "r") stat = file.read() file.close() admin = User(username='admin', email='<EMAIL>', password='<PASSWORD>') db.session.add(admin) print(User.query.all()) return render_template('core/reddit-data.html', final=stat, result='read complete') @ blueprint.route('/output', methods=['POST', 'GET']) def output(): file = open(os.path.join(APP_STATIC, 'output/final_output.py'), "r") stat = file.read() print("stat is %s" % stat) file.close() return render_template('core/reddit-output.html', arg=stat) @ blueprint.route('/test', methods=['POST', 'GET']) def test(): picks = Picks(pick='hoho', bearish='whooter', bullish='what') db.session.add(picks) db.session.commit() return jsonify({'result': 'ohk'}) @ blueprint.route('/test2', methods=['POST', 'GET']) def test2(): hoho = 'hoho' found=Picks.query.filter_by(pick='hoho').first() print((Picks.query.filter_by(pick='hoho').first())) return 'ohkk' @ blueprint.route('/core/settings', methods=['GET']) def settingsGet(): return render_template('core/settings.html',delete_db=delete_db, create_db=create_db) @ blueprint.route('/core/settings', methods=['POST']) def settings(): query = request.form['query'] found = Picks.query.filter_by(id=query).first() print(found) return render_template('core/settings.html', found=found, delete_db=delete_db, create_db=create_db) def delete_db(): #db.drop_all() return 'DB deleted' def create_db(): db.create_all() return 'All DB created' @ blueprint.route('/core/<template>') def route_core_template(template): try: if not template.endswith('.html'): core='core/' template += '.html' template=core+template return render_template(template) except TemplateNotFound: return render_template('page-404.html'), 404 except: return render_template('page-500.html'), 500 @ blueprint.route('/<template>') def route_template(template): try: if not template.endswith('.html'): template += '.html' return render_template(template) except TemplateNotFound: return render_template('page-404.html'), 404 except: return render_template('page-500.html'), 500
[ "matplotlib.pyplot.title", "time.ctime", "flask.jsonify", "nltk.download", "os.path.join", "pandas.DataFrame", "squarify.plot", "app.base.models.Picks.query.all", "app.base.models.Picks", "app.home.blueprint.route", "app.db.session.commit", "flask.render_template", "app.base.models.Picks.query.filter_by", "app.db.create_all", "app.base.models.User", "app.base.models.User.query.all", "nltk.corpus.stopwords.words", "matplotlib.pyplot.axis", "time.time", "vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer", "praw.Reddit", "app.db.session.add" ]
[((771, 797), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (784, 797), False, 'import nltk\n'), ((833, 858), 'app.home.blueprint.route', 'blueprint.route', (['"""/index"""'], {}), "('/index')\n", (848, 858), False, 'from app.home import blueprint\n'), ((945, 971), 'app.home.blueprint.route', 'blueprint.route', (['"""/index1"""'], {}), "('/index1')\n", (960, 971), False, 'from app.home import blueprint\n'), ((1370, 1402), 'app.home.blueprint.route', 'blueprint.route', (['"""/reddit-index"""'], {}), "('/reddit-index')\n", (1385, 1402), False, 'from app.home import blueprint\n'), ((1475, 1525), 'app.home.blueprint.route', 'blueprint.route', (['"""/reddit-index"""'], {'methods': "['POST']"}), "('/reddit-index', methods=['POST'])\n", (1490, 1525), False, 'from app.home import blueprint\n'), ((2739, 2788), 'app.home.blueprint.route', 'blueprint.route', (['"""/data"""'], {'methods': "['POST', 'GET']"}), "('/data', methods=['POST', 'GET'])\n", (2754, 2788), False, 'from app.home import blueprint\n'), ((10911, 10965), 'app.home.blueprint.route', 'blueprint.route', (['"""/visualize"""'], {'methods': "['POST', 'GET']"}), "('/visualize', methods=['POST', 'GET'])\n", (10926, 10965), False, 'from app.home import blueprint\n'), ((11066, 11121), 'app.home.blueprint.route', 'blueprint.route', (['"""/status_bar"""'], {'methods': "['POST', 'GET']"}), "('/status_bar', methods=['POST', 'GET'])\n", (11081, 11121), False, 'from app.home import blueprint\n'), ((11468, 11519), 'app.home.blueprint.route', 'blueprint.route', (['"""/output"""'], {'methods': "['POST', 'GET']"}), "('/output', methods=['POST', 'GET'])\n", (11483, 11519), False, 'from app.home import blueprint\n'), ((11746, 11795), 'app.home.blueprint.route', 'blueprint.route', (['"""/test"""'], {'methods': "['POST', 'GET']"}), "('/test', methods=['POST', 'GET'])\n", (11761, 11795), False, 'from app.home import blueprint\n'), ((11966, 12016), 'app.home.blueprint.route', 'blueprint.route', (['"""/test2"""'], {'methods': "['POST', 'GET']"}), "('/test2', methods=['POST', 'GET'])\n", (11981, 12016), False, 'from app.home import blueprint\n'), ((12178, 12228), 'app.home.blueprint.route', 'blueprint.route', (['"""/core/settings"""'], {'methods': "['GET']"}), "('/core/settings', methods=['GET'])\n", (12193, 12228), False, 'from app.home import blueprint\n'), ((12341, 12392), 'app.home.blueprint.route', 'blueprint.route', (['"""/core/settings"""'], {'methods': "['POST']"}), "('/core/settings', methods=['POST'])\n", (12356, 12392), False, 'from app.home import blueprint\n'), ((12747, 12782), 'app.home.blueprint.route', 'blueprint.route', (['"""/core/<template>"""'], {}), "('/core/<template>')\n", (12762, 12782), False, 'from app.home import blueprint\n'), ((13153, 13183), 'app.home.blueprint.route', 'blueprint.route', (['"""/<template>"""'], {}), "('/<template>')\n", (13168, 13183), False, 'from app.home import blueprint\n'), ((802, 828), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (817, 828), False, 'from nltk.corpus import stopwords\n'), ((901, 942), 'flask.render_template', 'render_template', (['"""core/reddit-index.html"""'], {}), "('core/reddit-index.html')\n", (916, 942), False, 'from flask import Flask, render_template\n'), ((1097, 1114), 'app.base.models.Picks.query.all', 'Picks.query.all', ([], {}), '()\n', (1112, 1114), False, 'from app.base.models import User, Picks\n'), ((1317, 1366), 'flask.render_template', 'render_template', (['"""index.html"""'], {'time': '(12345)', 'df': 'arr'}), "('index.html', time=12345, df=arr)\n", (1332, 1366), False, 'from flask import Flask, render_template\n'), ((1429, 1470), 'flask.render_template', 'render_template', (['"""core/reddit-index.html"""'], {}), "('core/reddit-index.html')\n", (1444, 1470), False, 'from flask import Flask, render_template\n'), ((2693, 2734), 'flask.render_template', 'render_template', (['"""core/reddit-index.html"""'], {}), "('core/reddit-index.html')\n", (2708, 2734), False, 'from flask import Flask, render_template\n'), ((2842, 2853), 'time.time', 'time.time', ([], {}), '()\n', (2851, 2853), False, 'import time\n'), ((2866, 2878), 'time.ctime', 'time.ctime', ([], {}), '()\n', (2876, 2878), False, 'import time\n'), ((2928, 3053), 'praw.Reddit', 'praw.Reddit', ([], {'user_agent': '"""Comment Extraction"""', 'client_id': '"""ZM9jcd0nyXvtlA"""', 'client_secret': '"""<KEY>"""', 'username': '""""""', 'password': '""""""'}), "(user_agent='Comment Extraction', client_id='ZM9jcd0nyXvtlA',\n client_secret='<KEY>', username='', password='')\n", (2939, 3053), False, 'import praw\n'), ((8819, 8847), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (8845, 8847), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((9680, 9698), 'app.base.models.Picks', 'Picks', ([], {'pick': 'scores'}), '(pick=scores)\n', (9685, 9698), False, 'from app.base.models import User, Picks\n'), ((9713, 9748), 'app.base.models.Picks', 'Picks', ([], {'pick': '[times, top, top_picks]'}), '(pick=[times, top, top_picks])\n', (9718, 9748), False, 'from app.base.models import User, Picks\n'), ((9772, 9795), 'app.db.session.add', 'db.session.add', (['picksdb'], {}), '(picksdb)\n', (9786, 9795), False, 'from app import db\n'), ((9800, 9823), 'app.db.session.add', 'db.session.add', (['timesdb'], {}), '(timesdb)\n', (9814, 9823), False, 'from app import db\n'), ((9828, 9847), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9845, 9847), False, 'from app import db\n'), ((9952, 9972), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {}), '(scores)\n', (9964, 9972), True, 'import pandas as pd\n'), ((10125, 10173), 'squarify.plot', 'squarify.plot', ([], {'sizes': 'times', 'label': 'top', 'alpha': '(0.7)'}), '(sizes=times, label=top, alpha=0.7)\n', (10138, 10173), False, 'import squarify\n'), ((10177, 10192), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10185, 10192), True, 'import matplotlib.pyplot as plt\n'), ((10197, 10239), 'matplotlib.pyplot.title', 'plt.title', (['f"""{picks} most mentioned picks"""'], {}), "(f'{picks} most mentioned picks')\n", (10206, 10239), True, 'import matplotlib.pyplot as plt\n'), ((10994, 11061), 'flask.render_template', 'render_template', (['"""core/reddit-data.html"""'], {'result': '"""done"""', 'final': '"""ok"""'}), "('core/reddit-data.html', result='done', final='ok')\n", (11009, 11061), False, 'from flask import Flask, render_template\n'), ((11259, 11321), 'app.base.models.User', 'User', ([], {'username': '"""admin"""', 'email': '"""<EMAIL>"""', 'password': '"""<PASSWORD>"""'}), "(username='admin', email='<EMAIL>', password='<PASSWORD>')\n", (11263, 11321), False, 'from app.base.models import User, Picks\n'), ((11326, 11347), 'app.db.session.add', 'db.session.add', (['admin'], {}), '(admin)\n', (11340, 11347), False, 'from app import db\n'), ((11387, 11463), 'flask.render_template', 'render_template', (['"""core/reddit-data.html"""'], {'final': 'stat', 'result': '"""read complete"""'}), "('core/reddit-data.html', final=stat, result='read complete')\n", (11402, 11463), False, 'from flask import Flask, render_template\n'), ((11689, 11741), 'flask.render_template', 'render_template', (['"""core/reddit-output.html"""'], {'arg': 'stat'}), "('core/reddit-output.html', arg=stat)\n", (11704, 11741), False, 'from flask import Flask, render_template\n'), ((11820, 11873), 'app.base.models.Picks', 'Picks', ([], {'pick': '"""hoho"""', 'bearish': '"""whooter"""', 'bullish': '"""what"""'}), "(pick='hoho', bearish='whooter', bullish='what')\n", (11825, 11873), False, 'from app.base.models import User, Picks\n'), ((11878, 11899), 'app.db.session.add', 'db.session.add', (['picks'], {}), '(picks)\n', (11892, 11899), False, 'from app import db\n'), ((11904, 11923), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (11921, 11923), False, 'from app import db\n'), ((11935, 11961), 'flask.jsonify', 'jsonify', (["{'result': 'ohk'}"], {}), "({'result': 'ohk'})\n", (11942, 11961), False, 'from flask import jsonify\n'), ((12259, 12338), 'flask.render_template', 'render_template', (['"""core/settings.html"""'], {'delete_db': 'delete_db', 'create_db': 'create_db'}), "('core/settings.html', delete_db=delete_db, create_db=create_db)\n", (12274, 12338), False, 'from flask import Flask, render_template\n'), ((12523, 12619), 'flask.render_template', 'render_template', (['"""core/settings.html"""'], {'found': 'found', 'delete_db': 'delete_db', 'create_db': 'create_db'}), "('core/settings.html', found=found, delete_db=delete_db,\n create_db=create_db)\n", (12538, 12619), False, 'from flask import Flask, render_template\n'), ((12697, 12712), 'app.db.create_all', 'db.create_all', ([], {}), '()\n', (12710, 12712), False, 'from app import db\n'), ((2900, 2912), 'time.ctime', 'time.ctime', ([], {}), '()\n', (2910, 2912), False, 'import time\n'), ((8216, 8227), 'time.time', 'time.time', ([], {}), '()\n', (8225, 8227), False, 'import time\n'), ((10525, 10575), 'os.path.join', 'os.path.join', (['APP_STATIC', '"""output/final_output.py"""'], {}), "(APP_STATIC, 'output/final_output.py')\n", (10537, 10575), False, 'import os\n'), ((11156, 11200), 'os.path.join', 'os.path.join', (['APP_STATIC', '"""output/sample.py"""'], {}), "(APP_STATIC, 'output/sample.py')\n", (11168, 11200), False, 'import os\n'), ((11358, 11374), 'app.base.models.User.query.all', 'User.query.all', ([], {}), '()\n', (11372, 11374), False, 'from app.base.models import User, Picks\n'), ((11550, 11600), 'os.path.join', 'os.path.join', (['APP_STATIC', '"""output/final_output.py"""'], {}), "(APP_STATIC, 'output/final_output.py')\n", (11562, 11600), False, 'import os\n'), ((12977, 13002), 'flask.render_template', 'render_template', (['template'], {}), '(template)\n', (12992, 13002), False, 'from flask import Flask, render_template\n'), ((13313, 13338), 'flask.render_template', 'render_template', (['template'], {}), '(template)\n', (13328, 13338), False, 'from flask import Flask, render_template\n'), ((12058, 12092), 'app.base.models.Picks.query.filter_by', 'Picks.query.filter_by', ([], {'pick': '"""hoho"""'}), "(pick='hoho')\n", (12079, 12092), False, 'from app.base.models import User, Picks\n'), ((12455, 12486), 'app.base.models.Picks.query.filter_by', 'Picks.query.filter_by', ([], {'id': 'query'}), '(id=query)\n', (12476, 12486), False, 'from app.base.models import User, Picks\n'), ((4858, 4902), 'os.path.join', 'os.path.join', (['APP_STATIC', '"""output/sample.py"""'], {}), "(APP_STATIC, 'output/sample.py')\n", (4870, 4902), False, 'import os\n'), ((12112, 12146), 'app.base.models.Picks.query.filter_by', 'Picks.query.filter_by', ([], {'pick': '"""hoho"""'}), "(pick='hoho')\n", (12133, 12146), False, 'from app.base.models import User, Picks\n'), ((13047, 13079), 'flask.render_template', 'render_template', (['"""page-404.html"""'], {}), "('page-404.html')\n", (13062, 13079), False, 'from flask import Flask, render_template\n'), ((13112, 13144), 'flask.render_template', 'render_template', (['"""page-500.html"""'], {}), "('page-500.html')\n", (13127, 13144), False, 'from flask import Flask, render_template\n'), ((13383, 13415), 'flask.render_template', 'render_template', (['"""page-404.html"""'], {}), "('page-404.html')\n", (13398, 13415), False, 'from flask import Flask, render_template\n'), ((13448, 13480), 'flask.render_template', 'render_template', (['"""page-500.html"""'], {}), "('page-500.html')\n", (13463, 13480), False, 'from flask import Flask, render_template\n'), ((5099, 5111), 'time.ctime', 'time.ctime', ([], {}), '()\n', (5109, 5111), False, 'import time\n'), ((5785, 5829), 'os.path.join', 'os.path.join', (['APP_STATIC', '"""output/sample.py"""'], {}), "(APP_STATIC, 'output/sample.py')\n", (5797, 5829), False, 'import os\n')]
import asyncio from kubernetes_asyncio import client, config from kubernetes_asyncio.stream import WsApiClient async def main(): # Configs can be set in Configuration class directly or using helper # utility. If no argument provided, the config will be loaded from # default location. config.load_kube_config() v1 = client.CoreV1Api() print("Try to find a pod with busybox (name busybox*) ...") ret = await v1.list_pod_for_all_namespaces() for i in ret.items: if i.metadata.name.startswith('busybox'): pod = i.metadata.name namespace = i.metadata.namespace print('Buxy box', pod, 'namespace', namespace) break else: print('Busybox not found !') return v1_ws = client.CoreV1Api(api_client=WsApiClient()) exec_command = [ '/bin/sh', '-c', 'echo This message goes to stderr >&2; echo This message goes to stdout'] resp = v1_ws.connect_get_namespaced_pod_exec(pod, namespace, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False) ret = await resp print("Response: ", ret) if __name__ == '__main__': loop = asyncio.get_event_loop() loop.run_until_complete(main()) loop.close()
[ "kubernetes_asyncio.config.load_kube_config", "kubernetes_asyncio.stream.WsApiClient", "asyncio.get_event_loop", "kubernetes_asyncio.client.CoreV1Api" ]
[((304, 329), 'kubernetes_asyncio.config.load_kube_config', 'config.load_kube_config', ([], {}), '()\n', (327, 329), False, 'from kubernetes_asyncio import client, config\n'), ((340, 358), 'kubernetes_asyncio.client.CoreV1Api', 'client.CoreV1Api', ([], {}), '()\n', (356, 358), False, 'from kubernetes_asyncio import client, config\n'), ((1336, 1360), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1358, 1360), False, 'import asyncio\n'), ((807, 820), 'kubernetes_asyncio.stream.WsApiClient', 'WsApiClient', ([], {}), '()\n', (818, 820), False, 'from kubernetes_asyncio.stream import WsApiClient\n')]
# self play print("loading...") import numpy as np import tensorflow as tf tf.config.threading.set_inter_op_parallelism_threads(1) tf.config.threading.set_intra_op_parallelism_threads(1) import MCTS import sys if(len(sys.argv)<=1): print("Error! No argument given! Quiting.") quit() alwaysNew=False fpu,fpu1=1.3,1.0 t0,t1=5,5 if(alwaysNew):#use newest engine to generate MCTS.loadEngine(2,"./RNG64.tf") MCTS.setFPU(fpu,fpu1) MCTS.timeReset() MCTS.selfPlay(t0,MCTS.evaluatePositionA2,"./games/dat_train"+sys.argv[1]+".npz",40) #vldn time is 2 as the argument is the total time from calling time reset MCTS.selfPlay(t1,MCTS.evaluatePositionA2,"./games/dat_vlidn"+sys.argv[1]+".npz",10) else:#use best engine by test to generate MCTS.loadEngine(1,"./weights/RNG64.tf") MCTS.setFPU(fpu,fpu1) MCTS.timeReset() MCTS.selfPlay(t0,MCTS.evaluatePositionA,"./games/dat_train"+sys.argv[1]+".npz",40) #vldn time is 2 as the argument is the total time from calling time reset MCTS.selfPlay(t1,MCTS.evaluatePositionA,"./games/dat_vlidn"+sys.argv[1]+".npz",10) # npz_t=np.load("dat_train.npz") # x_tr=npz_t['arr_0'] # y_tr=npz_t['arr_1'] # for i in range(len(y_tr)): # MCTS.showHeatMap(x_tr[i],y_tr[i])
[ "MCTS.selfPlay", "tensorflow.config.threading.set_intra_op_parallelism_threads", "tensorflow.config.threading.set_inter_op_parallelism_threads", "MCTS.setFPU", "MCTS.loadEngine", "MCTS.timeReset" ]
[((75, 130), 'tensorflow.config.threading.set_inter_op_parallelism_threads', 'tf.config.threading.set_inter_op_parallelism_threads', (['(1)'], {}), '(1)\n', (127, 130), True, 'import tensorflow as tf\n'), ((131, 186), 'tensorflow.config.threading.set_intra_op_parallelism_threads', 'tf.config.threading.set_intra_op_parallelism_threads', (['(1)'], {}), '(1)\n', (183, 186), True, 'import tensorflow as tf\n'), ((385, 417), 'MCTS.loadEngine', 'MCTS.loadEngine', (['(2)', '"""./RNG64.tf"""'], {}), "(2, './RNG64.tf')\n", (400, 417), False, 'import MCTS\n'), ((421, 443), 'MCTS.setFPU', 'MCTS.setFPU', (['fpu', 'fpu1'], {}), '(fpu, fpu1)\n', (432, 443), False, 'import MCTS\n'), ((447, 463), 'MCTS.timeReset', 'MCTS.timeReset', ([], {}), '()\n', (461, 463), False, 'import MCTS\n'), ((468, 563), 'MCTS.selfPlay', 'MCTS.selfPlay', (['t0', 'MCTS.evaluatePositionA2', "('./games/dat_train' + sys.argv[1] + '.npz')", '(40)'], {}), "(t0, MCTS.evaluatePositionA2, './games/dat_train' + sys.argv[1\n ] + '.npz', 40)\n", (481, 563), False, 'import MCTS\n'), ((634, 729), 'MCTS.selfPlay', 'MCTS.selfPlay', (['t1', 'MCTS.evaluatePositionA2', "('./games/dat_vlidn' + sys.argv[1] + '.npz')", '(10)'], {}), "(t1, MCTS.evaluatePositionA2, './games/dat_vlidn' + sys.argv[1\n ] + '.npz', 10)\n", (647, 729), False, 'import MCTS\n'), ((764, 804), 'MCTS.loadEngine', 'MCTS.loadEngine', (['(1)', '"""./weights/RNG64.tf"""'], {}), "(1, './weights/RNG64.tf')\n", (779, 804), False, 'import MCTS\n'), ((808, 830), 'MCTS.setFPU', 'MCTS.setFPU', (['fpu', 'fpu1'], {}), '(fpu, fpu1)\n', (819, 830), False, 'import MCTS\n'), ((834, 850), 'MCTS.timeReset', 'MCTS.timeReset', ([], {}), '()\n', (848, 850), False, 'import MCTS\n'), ((855, 948), 'MCTS.selfPlay', 'MCTS.selfPlay', (['t0', 'MCTS.evaluatePositionA', "('./games/dat_train' + sys.argv[1] + '.npz')", '(40)'], {}), "(t0, MCTS.evaluatePositionA, './games/dat_train' + sys.argv[1] +\n '.npz', 40)\n", (868, 948), False, 'import MCTS\n'), ((1020, 1113), 'MCTS.selfPlay', 'MCTS.selfPlay', (['t1', 'MCTS.evaluatePositionA', "('./games/dat_vlidn' + sys.argv[1] + '.npz')", '(10)'], {}), "(t1, MCTS.evaluatePositionA, './games/dat_vlidn' + sys.argv[1] +\n '.npz', 10)\n", (1033, 1113), False, 'import MCTS\n')]
import tensorflow as tf from keras.models import Model from deephar.layers import * from deephar.utils import * def conv_block(inp, kernel_size, filters, last_act=True): filters1, filters2, filters3 = filters x = conv_bn_act(inp, filters1, (1, 1)) x = conv_bn_act(x, filters2, kernel_size) x = conv_bn(x, filters3, (1, 1)) shortcut = conv_bn(inp, filters3, (1, 1)) x = add([x, shortcut]) if last_act: x = Activation('relu')(x) return x def identity_block(inp, kernel_size, filters, last_act=True): filters1, filters2, filters3 = filters x = conv_bn_act(inp, filters1, (1, 1)) x = conv_bn_act(x, filters2, kernel_size) x = conv_bn(x, filters3, (1, 1)) x = add([x, inp]) if last_act: x = Activation('relu')(x) return x def stem_inception_v4(x, image_div=8): """Entry-flow network (stem) *based* on Inception_v4.""" assert image_div in [4, 8, 16, 32], \ 'Invalid image_div ({}).'.format(image_div) x = conv_bn_act(x, 32, (3, 3), strides=(2, 2)) x = conv_bn_act(x, 32, (3, 3)) if image_div is 32: x = MaxPooling2D((2, 2))(x) x = conv_bn_act(x, 64, (3, 3)) a = conv_bn_act(x, 96, (3, 3), strides=(2, 2)) b = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) x = concatenate([a, b]) a = conv_bn_act(x, 64, (1, 1)) a = conv(a, 96, (3, 3)) b = conv_bn_act(x, 64, (1, 1)) b = conv_bn_act(b, 64, (5, 1)) b = conv_bn_act(b, 64, (1, 5)) b = conv(b, 96, (3, 3)) x = concatenate([a, b]) x = BatchNormalization(axis=-1, scale=False)(x) if image_div is not 4: a = act_conv_bn(x, 192, (3, 3), strides=(2, 2)) b = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) x = concatenate([a, b]) if image_div in [16, 32]: a = act_conv_bn(x, 192, (3, 3), strides=(2, 2)) b = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) x = concatenate([a, b]) if image_div is 4: x = residual(x, int_size=112, out_size=2*192+64, convtype='normal', name='residual0') else: x = residual(x, int_size=144, out_size=3*192, convtype='normal', name='residual0') return x def stem_residual_eccv(x, image_div=8): """Entry-flow network (stem) *based* on ResNet ('residual' option).""" assert image_div in [4, 8, 16, 32], \ 'Invalid image_div ({}).'.format(image_div) x = conv_bn_act(x, 64, (7, 7), strides=(2, 2), padding='same') a = conv_bn_act(x, 128, (3, 3), padding='same') b = conv_bn_act(x, 128, (1, 1), padding='same') x = add([a, b]) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) x = residual(x, int_size=128, out_size=256, convtype='normal', name='rn0') x = residual(x, int_size=128, out_size=256, convtype='normal', name='rn1') if image_div is 4: x = residual(x, out_size=256, convtype='normal', name='rn3') else: x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) x = residual(x, int_size=192, out_size=384, convtype='normal', name='rn3') x = residual(x, int_size=192, out_size=384, convtype='normal', name='rn4') if image_div in [16, 32]: x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) x = residual(x, int_size=256, out_size=512, convtype='normal', name='rn5') x = residual(x, int_size=256, out_size=512, convtype='normal', name='rn6') if image_div is 32: x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(x) return x def reception_block(x, num_levels, kernel_size, int_size=None, convtype='depthwise', name=None): def hourglass(x, n): up1 = residual(x, kernel_size=kernel_size, int_size=int_size, convtype=convtype) low = MaxPooling2D((2, 2))(x) if n == num_levels: low = act_conv_bn(low, int(K.int_shape(x)[-1] / 2), (1, 1)) low = residual(low, kernel_size=kernel_size, int_size=int_size, convtype=convtype) if n > 2: low = hourglass(low, n-1) else: low = residual(low, kernel_size=kernel_size, int_size=int_size, convtype=convtype) if n == num_levels: low = residual(low, kernel_size=kernel_size, out_size=K.int_shape(x)[-1], int_size=int_size, convtype=convtype) else: low = residual(low, kernel_size=kernel_size, int_size=int_size, convtype=convtype) up2 = UpSampling2D((2, 2))(low) x = add([up1, up2]) return x x = hourglass(x, num_levels) return x def build_keypoints_regressor(input_shape, dim, num_maps, sam_model, prob_model, name=None, verbose=0): assert num_maps >= 1, \ 'The number of maps should be at least 1 (%d given)' % num_maps inputs = [] inputs3d = [] p_concat = [] v_concat = [] # Auxiliary functions v_tile = Lambda(lambda x: K.tile(x, (1, 1, dim))) # This depends on TensorFlow because keras does not implement divide. tf_div = Lambda(lambda x: tf.divide(x[0], x[1])) for i in range(num_maps): h = Input(shape=input_shape) inputs.append(h) h_s = act_channel_softmax(h) p = sam_model(h_s) v = prob_model(h_s) if dim == 3: d = Input(shape=input_shape) inputs3d.append(d) d_s = Activation('sigmoid')(d) dm = multiply([d_s, h_s]) z = Lambda(lambda x: K.sum(x, axis=(1, 2)))(dm) z = Lambda(lambda x: K.expand_dims(x, axis=-1))(z) p = concatenate([p, z]) if num_maps > 1: t = v_tile(v) p = multiply([p, v_tile(v)]) p_concat.append(p) v_concat.append(v) if num_maps > 1: p = add(p_concat) v_sum = add(v_concat) p = tf_div([p, v_tile(v_sum)]) v = maximum(v_concat) else: p = p_concat[0] v = v_concat[0] model = Model(inputs+inputs3d, [p, v], name=name) if verbose: model.summary() return model def build_context_aggregation(num_joints, num_context, alpha, num_frames=1, name=None): inp = Input(shape=(num_joints * num_context, 1)) d = Dense(num_joints, use_bias=False) x = Lambda(lambda x: K.squeeze(x, axis=-1))(inp) x = d(x) x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x) w = d.get_weights() w[0].fill(0) for j in range(num_joints): start = j*num_context w[0][j * num_context : (j + 1) * num_context, j] = 1. d.set_weights(w) d.trainable = False ctx_sum = Model(inputs=inp, outputs=x) ctx_sum.trainable = False if num_frames > 1: ctx_sum = TimeDistributed(ctx_sum, input_shape=(num_frames,) + K.int_shape(inp)[1:]) # Define auxiliary layers. mul_alpha = Lambda(lambda x: alpha * x) mul_1alpha = Lambda(lambda x: (1 - alpha) * x) # This depends on TensorFlow because keras does not implement divide. tf_div = Lambda(lambda x: tf.divide(x[0], x[1])) if num_frames == 1: # Define inputs ys = Input(shape=(num_joints, 2)) yc = Input(shape=(num_joints * num_context, 2)) pc = Input(shape=(num_joints * num_context, 1)) # Split contextual predictions in x and y and do computations separately xi = Lambda(lambda x: x[:,:, 0:1])(yc) yi = Lambda(lambda x: x[:,:, 1:2])(yc) else: ys = Input(shape=(num_frames, num_joints, 2)) yc = Input(shape=(num_frames, num_joints * num_context, 2)) pc = Input(shape=(num_frames, num_joints * num_context, 1)) # Split contextual predictions in x and y and do computations separately xi = Lambda(lambda x: x[:,:,:, 0:1])(yc) yi = Lambda(lambda x: x[:,:,:, 1:2])(yc) pxi = multiply([xi, pc]) pyi = multiply([yi, pc]) pc_sum = ctx_sum(pc) pxi_sum = ctx_sum(pxi) pyi_sum = ctx_sum(pyi) pc_div = Lambda(lambda x: x / num_context)(pc_sum) pxi_div = tf_div([pxi_sum, pc_sum]) pyi_div = tf_div([pyi_sum, pc_sum]) yc_div = concatenate([pxi_div, pyi_div]) ys_alpha = mul_alpha(ys) yc_div_1alpha = mul_1alpha(yc_div) y = add([ys_alpha, yc_div_1alpha]) model = Model(inputs=[ys, yc, pc], outputs=y, name=name) model.trainable = False return model def build_softargmax_1d(input_shape, name=None): if name is None: name_sm = None else: name_sm = name + '_softmax' inp = Input(shape=input_shape) x = act_depth_softmax(inp, name=name_sm) x = lin_interpolation_1d(x) model = Model(inputs=inp, outputs=x, name=name) model.trainable = False return model def build_softargmax_2d(input_shape, rho=0., name=None): if name is None: name_sm = None else: name_sm = name + '_softmax' inp = Input(shape=input_shape) x = act_channel_softmax(inp, name=name_sm) if rho > 0: x = kl_divergence_regularizer(x, rho=rho) x_x = lin_interpolation_2d(x, axis=0) x_y = lin_interpolation_2d(x, axis=1) x = concatenate([x_x, x_y]) model = Model(inputs=inp, outputs=x, name=name) model.trainable = False return model def build_joints_probability(input_shape, name=None, verbose=0): inp = Input(shape=input_shape) x = inp x = AveragePooling2D((2, 2), strides=(1, 1))(x) x = Lambda(lambda x: 4*x)(x) x = GlobalMaxPooling2D()(x) x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x) model = Model(inputs=inp, outputs=x, name=name) if verbose: model.summary() return model
[ "tensorflow.divide", "keras.models.Model" ]
[((6223, 6266), 'keras.models.Model', 'Model', (['(inputs + inputs3d)', '[p, v]'], {'name': 'name'}), '(inputs + inputs3d, [p, v], name=name)\n', (6228, 6266), False, 'from keras.models import Model\n'), ((6865, 6893), 'keras.models.Model', 'Model', ([], {'inputs': 'inp', 'outputs': 'x'}), '(inputs=inp, outputs=x)\n', (6870, 6893), False, 'from keras.models import Model\n'), ((8511, 8559), 'keras.models.Model', 'Model', ([], {'inputs': '[ys, yc, pc]', 'outputs': 'y', 'name': 'name'}), '(inputs=[ys, yc, pc], outputs=y, name=name)\n', (8516, 8559), False, 'from keras.models import Model\n'), ((8875, 8914), 'keras.models.Model', 'Model', ([], {'inputs': 'inp', 'outputs': 'x', 'name': 'name'}), '(inputs=inp, outputs=x, name=name)\n', (8880, 8914), False, 'from keras.models import Model\n'), ((9390, 9429), 'keras.models.Model', 'Model', ([], {'inputs': 'inp', 'outputs': 'x', 'name': 'name'}), '(inputs=inp, outputs=x, name=name)\n', (9395, 9429), False, 'from keras.models import Model\n'), ((9778, 9817), 'keras.models.Model', 'Model', ([], {'inputs': 'inp', 'outputs': 'x', 'name': 'name'}), '(inputs=inp, outputs=x, name=name)\n', (9783, 9817), False, 'from keras.models import Model\n'), ((5315, 5336), 'tensorflow.divide', 'tf.divide', (['x[0]', 'x[1]'], {}), '(x[0], x[1])\n', (5324, 5336), True, 'import tensorflow as tf\n'), ((7288, 7309), 'tensorflow.divide', 'tf.divide', (['x[0]', 'x[1]'], {}), '(x[0], x[1])\n', (7297, 7309), True, 'import tensorflow as tf\n')]
""" MIT License Copyright (c) 2021 <NAME> <<EMAIL>> This module belongs to https://github.com/arthur-bryan/puppeteer: A implementation of a botnet using Python on server (C&C) side and C on the puppets side. This module contains the class that represents the Database, with the responsible methods for create/update the database based on the server requests """ import sqlite3 from config import to_red class Database: """ Creates database (if not exists), then connect to it, defines a cursor and then creates the puppets table if not exists """ def __init__(self, filename): """ Args: filename (str): the path for the database """ try: self.conn = sqlite3.connect(filename, check_same_thread=False) self.conn.row_factory = lambda cursor, row: row[0] except sqlite3.Error as error: print(to_red(f"\n[ DATABASE ERROR ] {error} {filename}\n")) else: self.cursor = self.conn.cursor() self.create_table() def create_table(self): """ Creates (if not exists) the table to store the puppets infos """ try: self.cursor.execute( """ CREATE TABLE IF NOT EXISTS puppets ( ID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, IP_ADDRESS VARCHAR(15), IS_CONNECTED INTEGER, AUTORUN_ENABLED INTEGER, OP_SYSTEM VARCHAR(15), ARCHITECTURE VARCHAR(15), KERNEL_RELEASE VARCHAR(30), HOSTNAME VARCHAR(20), USERNAME VARCHAR(20), LAST_CONNECTION DATE NOT NULL, HASH TEXT NOT NULL UNIQUE ); """) except sqlite3.Error as error: print(to_red(f"\n[ DATABASE ERROR ] {error}\n")) else: self.conn.commit() def add_puppet(self, puppet): """ Inserts the puppet information to the table As some puppet attributes are received as C string (terminated by '\x00'), these strings must be sliced to prevent sqlite store it as BLOB binary data Args: puppet (:obj: 'Puppet'): puppet to be added to database """ try: self.cursor.execute( """ INSERT INTO puppets ( IP_ADDRESS, IS_CONNECTED, AUTORUN_ENABLED, OP_SYSTEM, ARCHITECTURE, KERNEL_RELEASE, HOSTNAME, USERNAME, LAST_CONNECTION, HASH ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """, (puppet.ip_address, puppet.is_connected, puppet.autorun_is_enabled, puppet.op_system[:-1], puppet.architecture[:-1], puppet.kernel_release[:-1], puppet.hostname[:-1], puppet.username[:-1], puppet.last_connection, puppet.id_hash) ) except ValueError as error: print(to_red(f"\n[ DATABASE ERROR ] {error} inserting\n")) else: self.conn.commit() def update_all_puppet_info(self, puppet): """ Update all information of the puppet Args: puppet (:obj: 'Puppet'): puppet with new values to replace the old ones """ try: self.cursor.execute( """ UPDATE puppets SET IP_ADDRESS = ?, IS_CONNECTED = ?, AUTORUN_ENABLED = ?, OP_SYSTEM = ?, ARCHITECTURE = ?, KERNEL_RELEASE = ?, HOSTNAME = ?, USERNAME = ?, LAST_CONNECTION = ?, HASH = ? WHERE HASH = ? """, (puppet.ip_address, puppet.is_connected, puppet.autorun_is_enabled, puppet.op_system[:-1], puppet.architecture[:-1], puppet.kernel_release[:-1], puppet.hostname[:-1], puppet.username[:-1], puppet.last_connection, puppet.id_hash, puppet.id_hash) ) except sqlite3.Error as error: print(to_red(f"\n[ DATABASE ERROR ] {error} updating\n")) else: self.conn.commit() def update_puppet_status(self, puppet, new_status): """ Updates a connection status of the puppet Args: puppet (:obj: 'Puppet'): puppet to be updated new_status (int): the new value for the status """ try: self.cursor.execute( """ UPDATE puppets SET IS_CONNECTED = ? WHERE HASH = ? """, (new_status, puppet.id_hash) ) except sqlite3.Error as error: print(to_red(f"\n[ DATABASE ERROR ] {error} update status\n")) else: self.conn.commit() def get_all_puppets(self): """ Fetches all puppets on database and returns them in a list of tuples Returns: puppets (:obj: 'list' of :obj: 'tuples'): list of tuples with puppet information (one tuple per puppet) """ try: puppets = self.cursor.execute("SELECT * FROM puppets;").fetchall() return puppets except sqlite3.Error as error: print(f"\n [ DATABASE ERROR ] {error}\n") def get_connected_puppets(self): """ Fetches all puppets on database that are currently connected to the server and returns them in a list of tuples Returns: puppets (:obj: 'list' of :obj: 'tuples'): list of tuples with the connected puppets information (one tuple per puppet) """ try: connected_puppets = self.cursor.execute( "SELECT * FROM puppets WHERE IS_CONNECTED = 1;" ).fetchall() return connected_puppets except sqlite3.Error as error: print(to_red(f"\n [ DATABASE ERROR ] {error}\n")) def get_puppets_hashes(self): """ Fetches and returns the hashes of all puppets on database Returns: puppets_hashes (:obj: 'tuple'): tuple containing the hashes of the puppets in database """ try: puppets_hashes = self.cursor.execute( "SELECT HASH FROM puppets;" ).fetchall() return puppets_hashes except sqlite3.Error as error: print(to_red(f"\n [ DATABASE ERROR ] {error}\n")) def disconnect_puppets_on_exit(self): """ Updates a connection status of all puppets to 0 when the server stops """ try: self.cursor.execute( """ UPDATE puppets SET IS_CONNECTED = 0 """ ) except sqlite3.Error as error: print(to_red(f"\n[ DATABASE ERROR ] {error}\n")) else: self.conn.commit()
[ "sqlite3.connect", "config.to_red" ]
[((763, 813), 'sqlite3.connect', 'sqlite3.connect', (['filename'], {'check_same_thread': '(False)'}), '(filename, check_same_thread=False)\n', (778, 813), False, 'import sqlite3\n'), ((934, 988), 'config.to_red', 'to_red', (['f"""\n[ DATABASE ERROR ] {error} {filename}\n"""'], {}), '(f"""\n[ DATABASE ERROR ] {error} {filename}\n""")\n', (940, 988), False, 'from config import to_red\n'), ((1917, 1960), 'config.to_red', 'to_red', (['f"""\n[ DATABASE ERROR ] {error}\n"""'], {}), '(f"""\n[ DATABASE ERROR ] {error}\n""")\n', (1923, 1960), False, 'from config import to_red\n'), ((3406, 3459), 'config.to_red', 'to_red', (['f"""\n[ DATABASE ERROR ] {error} inserting\n"""'], {}), '(f"""\n[ DATABASE ERROR ] {error} inserting\n""")\n', (3412, 3459), False, 'from config import to_red\n'), ((4792, 4844), 'config.to_red', 'to_red', (['f"""\n[ DATABASE ERROR ] {error} updating\n"""'], {}), '(f"""\n[ DATABASE ERROR ] {error} updating\n""")\n', (4798, 4844), False, 'from config import to_red\n'), ((5473, 5530), 'config.to_red', 'to_red', (['f"""\n[ DATABASE ERROR ] {error} update status\n"""'], {}), '(f"""\n[ DATABASE ERROR ] {error} update status\n""")\n', (5479, 5530), False, 'from config import to_red\n'), ((6690, 6734), 'config.to_red', 'to_red', (['f"""\n [ DATABASE ERROR ] {error}\n"""'], {}), '(f"""\n [ DATABASE ERROR ] {error}\n""")\n', (6696, 6734), False, 'from config import to_red\n'), ((7224, 7268), 'config.to_red', 'to_red', (['f"""\n [ DATABASE ERROR ] {error}\n"""'], {}), '(f"""\n [ DATABASE ERROR ] {error}\n""")\n', (7230, 7268), False, 'from config import to_red\n'), ((7643, 7686), 'config.to_red', 'to_red', (['f"""\n[ DATABASE ERROR ] {error}\n"""'], {}), '(f"""\n[ DATABASE ERROR ] {error}\n""")\n', (7649, 7686), False, 'from config import to_red\n')]
# -*- coding: utf-8 -*- from flask import url_for from flask_testing import TestCase import thermos from thermos.models import User, Bookmark class ThermosTestCase(TestCase): def create_app(self): return thermos.create_app('test') def setUp(self): self.db = thermos.db self.db.create_all() self.client = self.app.test_client() u = User(username='test', email='<EMAIL>', password='<PASSWORD>') bm = Bookmark(user=u, url='http://www.example.com', tags='one,two,three') self.db.session.add(u) self.db.session.add(bm) self.db.session.commit() self.client.post(url_for('auth.login'), data = dict(username='test', password='<PASSWORD>')) def tearDown(self): thermos.db.session.remove() thermos.db.drop_all() def test_delete_all_tags(self): response = self.client.post( url_for('bookmarks.edit_bookmark', bookmark_id=1), data = dict( url = 'http://test.example.com', tags = '' ), follow_redirects = True ) assert response.status_code == 200 bm = Bookmark.query.first() assert not bm._tags
[ "thermos.db.session.remove", "thermos.models.User", "thermos.create_app", "thermos.db.drop_all", "flask.url_for", "thermos.models.Bookmark", "thermos.models.Bookmark.query.first" ]
[((224, 250), 'thermos.create_app', 'thermos.create_app', (['"""test"""'], {}), "('test')\n", (242, 250), False, 'import thermos\n'), ((405, 466), 'thermos.models.User', 'User', ([], {'username': '"""test"""', 'email': '"""<EMAIL>"""', 'password': '"""<PASSWORD>"""'}), "(username='test', email='<EMAIL>', password='<PASSWORD>')\n", (409, 466), False, 'from thermos.models import User, Bookmark\n'), ((480, 548), 'thermos.models.Bookmark', 'Bookmark', ([], {'user': 'u', 'url': '"""http://www.example.com"""', 'tags': '"""one,two,three"""'}), "(user=u, url='http://www.example.com', tags='one,two,three')\n", (488, 548), False, 'from thermos.models import User, Bookmark\n'), ((834, 861), 'thermos.db.session.remove', 'thermos.db.session.remove', ([], {}), '()\n', (859, 861), False, 'import thermos\n'), ((870, 891), 'thermos.db.drop_all', 'thermos.db.drop_all', ([], {}), '()\n', (889, 891), False, 'import thermos\n'), ((1263, 1285), 'thermos.models.Bookmark.query.first', 'Bookmark.query.first', ([], {}), '()\n', (1283, 1285), False, 'from thermos.models import User, Bookmark\n'), ((701, 722), 'flask.url_for', 'url_for', (['"""auth.login"""'], {}), "('auth.login')\n", (708, 722), False, 'from flask import url_for\n'), ((986, 1035), 'flask.url_for', 'url_for', (['"""bookmarks.edit_bookmark"""'], {'bookmark_id': '(1)'}), "('bookmarks.edit_bookmark', bookmark_id=1)\n", (993, 1035), False, 'from flask import url_for\n')]
# Copyright 2012 OpenStack Foundation # Copyright 2013 Nebula Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Compute v2 Security Group action implementations""" import logging import six from cliff import command from cliff import lister from cliff import show from novaclient.v1_1 import security_group_rules from openstackclient.common import parseractions from openstackclient.common import utils def _xform_security_group_rule(sgroup): info = {} info.update(sgroup) info.update( {'port_range': "%u:%u" % ( info.pop('from_port'), info.pop('to_port'), )} ) info['ip_range'] = info['ip_range']['cidr'] if info['ip_protocol'] == 'icmp': info['port_range'] = '' return info class CreateSecurityGroup(show.ShowOne): """Create a new security group""" log = logging.getLogger(__name__ + ".CreateSecurityGroup") def get_parser(self, prog_name): parser = super(CreateSecurityGroup, self).get_parser(prog_name) parser.add_argument( "name", metavar="<name>", help="New security group name", ) parser.add_argument( "--description", metavar="<description>", help="Security group description", ) return parser def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) compute_client = self.app.client_manager.compute data = compute_client.security_groups.create( parsed_args.name, parsed_args.description, ) info = {} info.update(data._info) return zip(*sorted(six.iteritems(info))) class DeleteSecurityGroup(command.Command): """Delete a security group""" log = logging.getLogger(__name__ + '.DeleteSecurityGroup') def get_parser(self, prog_name): parser = super(DeleteSecurityGroup, self).get_parser(prog_name) parser.add_argument( 'group', metavar='<group>', help='Name or ID of security group to delete', ) return parser def take_action(self, parsed_args): self.log.debug('take_action(%s)' % parsed_args) compute_client = self.app.client_manager.compute data = utils.find_resource( compute_client.security_groups, parsed_args.group, ) compute_client.security_groups.delete(data.id) return class ListSecurityGroup(lister.Lister): """List all security groups""" log = logging.getLogger(__name__ + ".ListSecurityGroup") def get_parser(self, prog_name): parser = super(ListSecurityGroup, self).get_parser(prog_name) parser.add_argument( '--all-projects', action='store_true', default=False, help='Display information from all projects (admin only)', ) return parser def take_action(self, parsed_args): def _get_project(project_id): try: return getattr(project_hash[project_id], 'name', project_id) except KeyError: return project_id self.log.debug("take_action(%s)" % parsed_args) compute_client = self.app.client_manager.compute columns = ( "ID", "Name", "Description", ) column_headers = columns if parsed_args.all_projects: # TODO(dtroyer): Translate Project_ID to Project (name) columns = columns + ('Tenant ID',) column_headers = column_headers + ('Project',) search = {'all_tenants': parsed_args.all_projects} data = compute_client.security_groups.list(search_opts=search) projects = self.app.client_manager.identity.projects.list() project_hash = {} for project in projects: project_hash[project.id] = project return (column_headers, (utils.get_item_properties( s, columns, formatters={'Tenant ID': _get_project}, ) for s in data)) class SetSecurityGroup(show.ShowOne): """Set security group properties""" log = logging.getLogger(__name__ + '.SetSecurityGroup') def get_parser(self, prog_name): parser = super(SetSecurityGroup, self).get_parser(prog_name) parser.add_argument( 'group', metavar='<group>', help='Name or ID of security group to change', ) parser.add_argument( '--name', metavar='<new-name>', help='New security group name', ) parser.add_argument( "--description", metavar="<description>", help="New security group name", ) return parser def take_action(self, parsed_args): self.log.debug('take_action(%s)' % parsed_args) compute_client = self.app.client_manager.compute data = utils.find_resource( compute_client.security_groups, parsed_args.group, ) if parsed_args.name: data.name = parsed_args.name if parsed_args.description: data.description = parsed_args.description info = {} info.update(compute_client.security_groups.update( data, data.name, data.description, )._info) if info: return zip(*sorted(six.iteritems(info))) else: return ({}, {}) class ShowSecurityGroup(show.ShowOne): """Show a specific security group""" log = logging.getLogger(__name__ + '.ShowSecurityGroup') def get_parser(self, prog_name): parser = super(ShowSecurityGroup, self).get_parser(prog_name) parser.add_argument( 'group', metavar='<group>', help='Name or ID of security group to change', ) return parser def take_action(self, parsed_args): self.log.debug('take_action(%s)' % parsed_args) compute_client = self.app.client_manager.compute info = {} info.update(utils.find_resource( compute_client.security_groups, parsed_args.group, )._info) rules = [] for r in info['rules']: rules.append(utils.format_dict(_xform_security_group_rule(r))) # Format rules into a list of strings info.update( {'rules': rules} ) # Map 'tenant_id' column to 'project_id' info.update( {'project_id': info.pop('tenant_id')} ) return zip(*sorted(six.iteritems(info))) class CreateSecurityGroupRule(show.ShowOne): """Create a new security group rule""" log = logging.getLogger(__name__ + ".CreateSecurityGroupRule") def get_parser(self, prog_name): parser = super(CreateSecurityGroupRule, self).get_parser(prog_name) parser.add_argument( 'group', metavar='<group>', help='Create rule in this security group', ) parser.add_argument( "--proto", metavar="<proto>", default="tcp", help="IP protocol (icmp, tcp, udp; default: tcp)", ) parser.add_argument( "--src-ip", metavar="<ip-address>", default="0.0.0.0/0", help="Source IP (may use CIDR notation; default: 0.0.0.0/0)", ) parser.add_argument( "--dst-port", metavar="<port-range>", action=parseractions.RangeAction, help="Destination port, may be a range: 137:139 (default: 0; " "only required for proto tcp and udp)", ) return parser def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) compute_client = self.app.client_manager.compute group = utils.find_resource( compute_client.security_groups, parsed_args.group, ) from_port, to_port = parsed_args.dst_port data = compute_client.security_group_rules.create( group.id, parsed_args.proto, from_port, to_port, parsed_args.src_ip, ) info = _xform_security_group_rule(data._info) return zip(*sorted(six.iteritems(info))) class DeleteSecurityGroupRule(command.Command): """Delete a security group rule""" log = logging.getLogger(__name__ + '.DeleteSecurityGroupRule') def get_parser(self, prog_name): parser = super(DeleteSecurityGroupRule, self).get_parser(prog_name) parser.add_argument( 'group', metavar='<group>', help='Create rule in this security group', ) parser.add_argument( "--proto", metavar="<proto>", default="tcp", help="IP protocol (icmp, tcp, udp; default: tcp)", ) parser.add_argument( "--src-ip", metavar="<ip-address>", default="0.0.0.0/0", help="Source IP (may use CIDR notation; default: 0.0.0.0/0)", ) parser.add_argument( "--dst-port", metavar="<port-range>", action=parseractions.RangeAction, help="Destination port, may be a range: 137:139 (default: 0; " "only required for proto tcp and udp)", ) return parser def take_action(self, parsed_args): self.log.debug('take_action(%s)' % parsed_args) compute_client = self.app.client_manager.compute group = utils.find_resource( compute_client.security_groups, parsed_args.group, ) from_port, to_port = parsed_args.dst_port # sigh...delete by ID? compute_client.security_group_rules.delete( group.id, parsed_args.proto, from_port, to_port, parsed_args.src_ip, ) return class ListSecurityGroupRule(lister.Lister): """List all security group rules""" log = logging.getLogger(__name__ + ".ListSecurityGroupRule") def get_parser(self, prog_name): parser = super(ListSecurityGroupRule, self).get_parser(prog_name) parser.add_argument( 'group', metavar='<group>', help='Create rule in this security group', ) return parser def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) compute_client = self.app.client_manager.compute group = utils.find_resource( compute_client.security_groups, parsed_args.group, ) # Argh, the rules are not Resources... rules = [] for rule in group.rules: rules.append(security_group_rules.SecurityGroupRule( compute_client.security_group_rules, _xform_security_group_rule(rule), )) columns = column_headers = ( "ID", "IP Protocol", "IP Range", "Port Range", ) return (column_headers, (utils.get_item_properties( s, columns, ) for s in rules))
[ "six.iteritems", "openstackclient.common.utils.get_item_properties", "openstackclient.common.utils.find_resource", "logging.getLogger" ]
[((1378, 1430), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.CreateSecurityGroup')"], {}), "(__name__ + '.CreateSecurityGroup')\n", (1395, 1430), False, 'import logging\n'), ((2326, 2378), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.DeleteSecurityGroup')"], {}), "(__name__ + '.DeleteSecurityGroup')\n", (2343, 2378), False, 'import logging\n'), ((3095, 3145), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.ListSecurityGroup')"], {}), "(__name__ + '.ListSecurityGroup')\n", (3112, 3145), False, 'import logging\n'), ((4766, 4815), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.SetSecurityGroup')"], {}), "(__name__ + '.SetSecurityGroup')\n", (4783, 4815), False, 'import logging\n'), ((6193, 6243), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.ShowSecurityGroup')"], {}), "(__name__ + '.ShowSecurityGroup')\n", (6210, 6243), False, 'import logging\n'), ((7344, 7400), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.CreateSecurityGroupRule')"], {}), "(__name__ + '.CreateSecurityGroupRule')\n", (7361, 7400), False, 'import logging\n'), ((9080, 9136), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.DeleteSecurityGroupRule')"], {}), "(__name__ + '.DeleteSecurityGroupRule')\n", (9097, 9136), False, 'import logging\n'), ((10748, 10802), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.ListSecurityGroupRule')"], {}), "(__name__ + '.ListSecurityGroupRule')\n", (10765, 10802), False, 'import logging\n'), ((2831, 2901), 'openstackclient.common.utils.find_resource', 'utils.find_resource', (['compute_client.security_groups', 'parsed_args.group'], {}), '(compute_client.security_groups, parsed_args.group)\n', (2850, 2901), False, 'from openstackclient.common import utils\n'), ((5553, 5623), 'openstackclient.common.utils.find_resource', 'utils.find_resource', (['compute_client.security_groups', 'parsed_args.group'], {}), '(compute_client.security_groups, parsed_args.group)\n', (5572, 5623), False, 'from openstackclient.common import utils\n'), ((8522, 8592), 'openstackclient.common.utils.find_resource', 'utils.find_resource', (['compute_client.security_groups', 'parsed_args.group'], {}), '(compute_client.security_groups, parsed_args.group)\n', (8541, 8592), False, 'from openstackclient.common import utils\n'), ((10258, 10328), 'openstackclient.common.utils.find_resource', 'utils.find_resource', (['compute_client.security_groups', 'parsed_args.group'], {}), '(compute_client.security_groups, parsed_args.group)\n', (10277, 10328), False, 'from openstackclient.common import utils\n'), ((11254, 11324), 'openstackclient.common.utils.find_resource', 'utils.find_resource', (['compute_client.security_groups', 'parsed_args.group'], {}), '(compute_client.security_groups, parsed_args.group)\n', (11273, 11324), False, 'from openstackclient.common import utils\n'), ((4522, 4599), 'openstackclient.common.utils.get_item_properties', 'utils.get_item_properties', (['s', 'columns'], {'formatters': "{'Tenant ID': _get_project}"}), "(s, columns, formatters={'Tenant ID': _get_project})\n", (4547, 4599), False, 'from openstackclient.common import utils\n'), ((6717, 6787), 'openstackclient.common.utils.find_resource', 'utils.find_resource', (['compute_client.security_groups', 'parsed_args.group'], {}), '(compute_client.security_groups, parsed_args.group)\n', (6736, 6787), False, 'from openstackclient.common import utils\n'), ((11835, 11872), 'openstackclient.common.utils.get_item_properties', 'utils.get_item_properties', (['s', 'columns'], {}), '(s, columns)\n', (11860, 11872), False, 'from openstackclient.common import utils\n'), ((2213, 2232), 'six.iteritems', 'six.iteritems', (['info'], {}), '(info)\n', (2226, 2232), False, 'import six\n'), ((7221, 7240), 'six.iteritems', 'six.iteritems', (['info'], {}), '(info)\n', (7234, 7240), False, 'import six\n'), ((8958, 8977), 'six.iteritems', 'six.iteritems', (['info'], {}), '(info)\n', (8971, 8977), False, 'import six\n'), ((6036, 6055), 'six.iteritems', 'six.iteritems', (['info'], {}), '(info)\n', (6049, 6055), False, 'import six\n')]
from tensorflow.keras import backend as K def _get_accuracy(y_true, y_pred, mask, sparse_target=False): y_pred = K.argmax(y_pred, -1) if sparse_target: y_true = K.cast(y_true[:, :, 0], K.dtype(y_pred)) else: y_true = K.argmax(y_true, -1) judge = K.cast(K.equal(y_pred, y_true), K.floatx()) if mask is None: return K.mean(judge) else: mask = K.cast(mask, K.floatx()) return K.sum(judge * mask) / K.sum(mask) def crf_viterbi_accuracy(y_true, y_pred): '''Use Viterbi algorithm to get best path, and compute its accuracy. `y_pred` must be an output from CRF.''' crf, idx = y_pred._keras_history[:2] X = crf._inbound_nodes[idx].input_tensors[0] mask = crf._inbound_nodes[idx].input_masks[0] y_pred = crf.viterbi_decoding(X, mask) return _get_accuracy(y_true, y_pred, mask, crf.sparse_target) def crf_marginal_accuracy(y_true, y_pred): '''Use time-wise marginal argmax as prediction. `y_pred` must be an output from CRF with `learn_mode="marginal"`.''' crf, idx = y_pred._keras_history[:2] X = crf._inbound_nodes[idx].input_tensors[0] mask = crf._inbound_nodes[idx].input_masks[0] y_pred = crf.get_marginal_prob(X, mask) return _get_accuracy(y_true, y_pred, mask, crf.sparse_target) def crf_accuracy(y_true, y_pred): '''Ge default accuracy based on CRF `test_mode`.''' crf, idx = y_pred._keras_history[:2] if crf.test_mode == 'viterbi': return crf_viterbi_accuracy(y_true, y_pred) else: return crf_marginal_accuracy(y_true, y_pred)
[ "tensorflow.keras.backend.sum", "tensorflow.keras.backend.dtype", "tensorflow.keras.backend.argmax", "tensorflow.keras.backend.floatx", "tensorflow.keras.backend.mean", "tensorflow.keras.backend.equal" ]
[((119, 139), 'tensorflow.keras.backend.argmax', 'K.argmax', (['y_pred', '(-1)'], {}), '(y_pred, -1)\n', (127, 139), True, 'from tensorflow.keras import backend as K\n'), ((247, 267), 'tensorflow.keras.backend.argmax', 'K.argmax', (['y_true', '(-1)'], {}), '(y_true, -1)\n', (255, 267), True, 'from tensorflow.keras import backend as K\n'), ((287, 310), 'tensorflow.keras.backend.equal', 'K.equal', (['y_pred', 'y_true'], {}), '(y_pred, y_true)\n', (294, 310), True, 'from tensorflow.keras import backend as K\n'), ((312, 322), 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (320, 322), True, 'from tensorflow.keras import backend as K\n'), ((360, 373), 'tensorflow.keras.backend.mean', 'K.mean', (['judge'], {}), '(judge)\n', (366, 373), True, 'from tensorflow.keras import backend as K\n'), ((203, 218), 'tensorflow.keras.backend.dtype', 'K.dtype', (['y_pred'], {}), '(y_pred)\n', (210, 218), True, 'from tensorflow.keras import backend as K\n'), ((412, 422), 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (420, 422), True, 'from tensorflow.keras import backend as K\n'), ((439, 458), 'tensorflow.keras.backend.sum', 'K.sum', (['(judge * mask)'], {}), '(judge * mask)\n', (444, 458), True, 'from tensorflow.keras import backend as K\n'), ((461, 472), 'tensorflow.keras.backend.sum', 'K.sum', (['mask'], {}), '(mask)\n', (466, 472), True, 'from tensorflow.keras import backend as K\n')]
import pandas as pd import os import time import numpy as np from deriveSummaryDUC import read_simMats, cluster_mat, oracle_per_cluster import pickle from collections import defaultdict from utils import offset_str2list, offset_decreaseSentOffset, insert_string def find_abstractive_target(predictions_topic_cluster, alignments, topic): cluster_spans = list(predictions_topic_cluster['docSpanText'].values) alignments_cluster = alignments[(alignments['topic']==topic) & (alignments['docSpanText'].isin(cluster_spans))] aligned_summ_span_cands = list(alignments_cluster['summarySpanText'].drop_duplicates().values) summ_span_cands_score = [] for summ_span in aligned_summ_span_cands: alignments_cluster_summ_span = alignments_cluster[alignments_cluster['summarySpanText'] == summ_span] summ_span_cands_score.append(alignments_cluster_summ_span['pred_prob'].sum()) return aligned_summ_span_cands[np.argmax(summ_span_cands_score)] def add_OIE_special_tok(docSpanOffsets, docSentCharIdx, sent): # document_tmp = document[:] span_offsets = offset_str2list(docSpanOffsets) offsets = offset_decreaseSentOffset(docSentCharIdx, span_offsets) # assume we have max 2 parts for offset in offsets[::-1]: # [::-1] start from the end so the remain offsets won't be shifted sent = insert_string(sent, offset[1], ' > ') sent = insert_string(sent, offset[0], ' < ') return sent ################################## ###### main ############## ################################## if __name__ == "__main__": MAX_SENT = 100 DATASETS = ['DUC2004']#['TAC2008','TAC2009','TAC2010'] SET_TYPE = 'test' CLUSTERING = True SUMM_LEN = 100 MAX_CLUSTERS = 10 DUC2004_Benchmark = True FULL_SENT = False if FULL_SENT: full_sent_flag = '_full_sent' else: full_sent_flag = '' sys_model = 'roberta' model_name = 'greedyMaxRouge' sys_checkpoint = 'checkpoint-1200' # 'checkpoint-180'#'checkpoint-540'#'checkpoint-1020'#'checkpoint-540'#'checkpoint-600' #'checkpoint-1140'#'checkpoint-240'#'checkpoint-180' # 'checkpoint-1080' sys_folder = 'OIE_TAC2008_TAC2009_2010_highlighter_CDLM_greedyMaxRouge_no_alignment_filter_negative_over_sample_positive_span_classifier_head_fixed' ##DUC2004 if DUC2004_Benchmark: sys_checkpoint = 'checkpoint-1500' # 'checkpoint-180'#'checkpoint-540'#'checkpoint-1020'#'checkpoint-540'#'checkpoint-600' #'checkpoint-1140'#'checkpoint-240'#'checkpoint-180' # 'checkpoint-1080' sys_folder = 'OIE_DUC2003_highlighter_CDLM_greedyMaxRouge_no_alignment_filter_negative_over_sample_positive_span_classifier_head_fixed_finetuned_TAC8910' empty = 0 analysis_list = [] fusion_text = [] fusion_target = [] cluster_metadata = [] ##full full_fixed = 'fixed' if DATASETS[0] == 'TAC2011': full_fixed = 'full' if DUC2004_Benchmark: if DATASETS[0] == 'DUC2004': metadata = pd.read_csv( './OIE_highlights/{}_{}_CDLM_allAlignments_{}_truncated_metadata.csv'.format( '_'.join(DATASETS), SET_TYPE, full_fixed)) else: metadata = pd.read_csv( './OIE_highlights/{}_{}_CDLM_greedyMaxRouge_no_alignment_{}_truncated_metadata.csv'.format( '_'.join(DATASETS), SET_TYPE, full_fixed)) else: metadata = pd.read_csv( './OIE_highlights/{}_{}_CDLM_allAlignments_{}_truncated_metadata.csv'.format( '_'.join(DATASETS), SET_TYPE,full_fixed)) predictions = pd.read_csv( './models/{}/{}/{}_{}_results_None.csv'.format(sys_folder, sys_checkpoint, SET_TYPE, '_'.join(DATASETS))) assert (len(predictions) == len(metadata)) metadata.insert(2, "prediction", predictions['prediction']) predictions = metadata for SET in DATASETS: alignments = pd.read_csv( './dev{}_checkpoint-2000_negative.csv'.format(SET)) sys_summary_path = './{}_system_summaries/{}/{}_'.format(SET, sys_folder, sys_checkpoint) + time.strftime( "%Y%m%d-%H%M%S") + '/' data_path = './data/{}/'.format(SET) gold_summary_path = data_path + 'summaries/' for topic in os.listdir(data_path): print(topic) if topic == 'summaries': continue if SET.startswith('TAC'): topic = topic[:-3] + topic[-2:] summary = '' predictions_topic = predictions[predictions['topic'] == topic] if DUC2004_Benchmark: predictions_topic = predictions_topic[predictions_topic['prediction'] >= 0.4] else: predictions_topic = predictions_topic[predictions_topic['prediction'] >= 0.04] predictions_topic = predictions_topic.sort_values(by=['prediction'], ascending=False) if len(predictions_topic) == 0: empty += 1 continue if CLUSTERING: simMat = read_simMats(topic, predictions_topic, SET) cluster_mat(simMat, predictions_topic['simMat_idx'].values, predictions_topic) oracle_per_cluster(SET, gold_summary_path, topic, predictions_topic, MAX_CLUSTERS) allowed_clusters = list( predictions_topic.sort_values(by=['cluster_size', 'inFile_sentIdx'], ascending=[False, True])[ 'cluster_idx'].drop_duplicates(keep="first").values)[:MAX_CLUSTERS] selected_spans = [] summary = ' ' for allowed_cluster_idx in allowed_clusters: predictions_topic_cluster = predictions_topic[ predictions_topic['cluster_idx'] == allowed_cluster_idx] predictions_topic_cluster = predictions_topic_cluster.sort_values(by=['prediction'], ascending=False) if len(predictions_topic_cluster) > 0: if FULL_SENT: predictions_topic_cluster['docSentText_special_tokens'] = predictions_topic_cluster.apply(lambda x: add_OIE_special_tok(x['docSpanOffsets'], x['docSentCharIdx'], x['docSentText']), axis=1) fusion_text.append( '<s> ' + ' </s> <s> '.join( list(predictions_topic_cluster['docSentText_special_tokens'].values)) + ' </s>') else: fusion_text.append( '<s> ' + ' </s> <s> '.join(list(predictions_topic_cluster['docSpanText'].values)) + ' </s>') fusion_target.append(find_abstractive_target(predictions_topic_cluster, alignments, topic)) cluster_metadata.append([topic, list(predictions_topic_cluster.index)]) assert (predictions['docSpanText'].values[predictions_topic_cluster.index[0]] == predictions_topic_cluster['docSpanText'].values[0]) if DUC2004_Benchmark: out_dir = 'fusion_data/DUC2004{}/{}/'.format(full_sent_flag,model_name) else: out_dir = 'fusion_data/TAC2011{}/'.format(model_name) if not os.path.exists(out_dir): os.makedirs(out_dir) cluster_metadata_df = pd.DataFrame(cluster_metadata, columns=['topic', 'cluster_indexes']) cluster_metadata_df.to_csv('{}/cluster_metadata_{}.csv'.format(out_dir,'_'.join(DATASETS))) if SET_TYPE == 'dev': SET_TYPE = 'val' with open('{}/{}.source'.format(out_dir, SET_TYPE), 'w') as f: f.write('\n'.join(fusion_text).replace('...', ' ')) with open('{}/{}.target'.format(out_dir, SET_TYPE), 'w') as f: f.write('\n'.join(fusion_target).replace('...', ' '))
[ "pandas.DataFrame", "utils.offset_decreaseSentOffset", "os.makedirs", "numpy.argmax", "deriveSummaryDUC.cluster_mat", "deriveSummaryDUC.oracle_per_cluster", "os.path.exists", "time.strftime", "utils.insert_string", "deriveSummaryDUC.read_simMats", "utils.offset_str2list", "os.listdir" ]
[((1114, 1145), 'utils.offset_str2list', 'offset_str2list', (['docSpanOffsets'], {}), '(docSpanOffsets)\n', (1129, 1145), False, 'from utils import offset_str2list, offset_decreaseSentOffset, insert_string\n'), ((1161, 1216), 'utils.offset_decreaseSentOffset', 'offset_decreaseSentOffset', (['docSentCharIdx', 'span_offsets'], {}), '(docSentCharIdx, span_offsets)\n', (1186, 1216), False, 'from utils import offset_str2list, offset_decreaseSentOffset, insert_string\n'), ((7917, 7985), 'pandas.DataFrame', 'pd.DataFrame', (['cluster_metadata'], {'columns': "['topic', 'cluster_indexes']"}), "(cluster_metadata, columns=['topic', 'cluster_indexes'])\n", (7929, 7985), True, 'import pandas as pd\n'), ((958, 990), 'numpy.argmax', 'np.argmax', (['summ_span_cands_score'], {}), '(summ_span_cands_score)\n', (967, 990), True, 'import numpy as np\n'), ((1373, 1410), 'utils.insert_string', 'insert_string', (['sent', 'offset[1]', '""" > """'], {}), "(sent, offset[1], ' > ')\n", (1386, 1410), False, 'from utils import offset_str2list, offset_decreaseSentOffset, insert_string\n'), ((1427, 1464), 'utils.insert_string', 'insert_string', (['sent', 'offset[0]', '""" < """'], {}), "(sent, offset[0], ' < ')\n", (1440, 1464), False, 'from utils import offset_str2list, offset_decreaseSentOffset, insert_string\n'), ((4657, 4678), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (4667, 4678), False, 'import os\n'), ((7829, 7852), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (7843, 7852), False, 'import os\n'), ((7863, 7883), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (7874, 7883), False, 'import os\n'), ((4468, 4498), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (4481, 4498), False, 'import time\n'), ((5474, 5517), 'deriveSummaryDUC.read_simMats', 'read_simMats', (['topic', 'predictions_topic', 'SET'], {}), '(topic, predictions_topic, SET)\n', (5486, 5517), False, 'from deriveSummaryDUC import read_simMats, cluster_mat, oracle_per_cluster\n'), ((5535, 5613), 'deriveSummaryDUC.cluster_mat', 'cluster_mat', (['simMat', "predictions_topic['simMat_idx'].values", 'predictions_topic'], {}), "(simMat, predictions_topic['simMat_idx'].values, predictions_topic)\n", (5546, 5613), False, 'from deriveSummaryDUC import read_simMats, cluster_mat, oracle_per_cluster\n'), ((5633, 5719), 'deriveSummaryDUC.oracle_per_cluster', 'oracle_per_cluster', (['SET', 'gold_summary_path', 'topic', 'predictions_topic', 'MAX_CLUSTERS'], {}), '(SET, gold_summary_path, topic, predictions_topic,\n MAX_CLUSTERS)\n', (5651, 5719), False, 'from deriveSummaryDUC import read_simMats, cluster_mat, oracle_per_cluster\n')]
import json from collections import Counter class Encoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) if isinstance(obj, type({}.items())): return {str(k): v for k, v in obj} return json.JSONEncoder.default(self, obj) def is_probably_equal(arg1: object, arg2: object) -> bool: if isinstance(arg1, dict): arg1 = {str(k): v for k, v in arg1.items()} if isinstance(arg2, dict): arg2 = {str(k): v for k, v in arg2.items()} c_a = Counter(arg1) if isinstance(arg1, str) else \ Counter(str(json.dumps(arg1, cls=Encoder))) c_b = Counter(arg2) if isinstance(arg2, str) else \ Counter(str(json.dumps(arg2, cls=Encoder))) return c_a == c_b def parse_babylonian_data(fp): item_sep = '################################' atts_sep = '================================' with open(fp, encoding='utf-8') as fh: s = fh.read() items = [itm for itm in s.split(item_sep) if itm.strip()] for itm in items: atts_str, grammar_str = itm.split(atts_sep) yield parse_bab_data_atts(atts_str), grammar_str def parse_bab_data_atts(atts_str): lines = atts_str.split('\n') atts = {line.split(':', 1)[0]: line.split(':', 1)[1].strip() for line in lines if line} for key in atts: if key in ['outcomes', 'inputs']: atts[key] = atts[key].strip().split(';') elif key in ['loads']: atts[key] = parse_bool(atts[key].strip()) else: atts[key] = atts[key].strip() return atts def parse_bool(s): if s.lower() == 'true': return True elif s.lower() == 'false': return False else: raise ValueError(f'Incorrect boolean value: {s}') class Underscore: pass class FakeToken: def __init__(self, orth, lemma, pos): self.orth = orth self.lemma = lemma self.pos = pos self._ = Underscore() class FakeDocument: def __init__(self, tokens): self.tokens = tokens self.pos_ = 0 def __iter__(self): return iter(self.tokens) def __len__(self): return len(self.tokens) def __getitem__(self, item): if isinstance(item, int): return self.tokens[item] else: return FakeDocument([t for t in self.tokens[item]])
[ "collections.Counter", "json.JSONEncoder.default", "json.dumps" ]
[((277, 312), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (301, 312), False, 'import json\n'), ((550, 563), 'collections.Counter', 'Counter', (['arg1'], {}), '(arg1)\n', (557, 563), False, 'from collections import Counter\n'), ((658, 671), 'collections.Counter', 'Counter', (['arg2'], {}), '(arg2)\n', (665, 671), False, 'from collections import Counter\n'), ((616, 645), 'json.dumps', 'json.dumps', (['arg1'], {'cls': 'Encoder'}), '(arg1, cls=Encoder)\n', (626, 645), False, 'import json\n'), ((724, 753), 'json.dumps', 'json.dumps', (['arg2'], {'cls': 'Encoder'}), '(arg2, cls=Encoder)\n', (734, 753), False, 'import json\n')]
# Generated by Django 3.0.3 on 2020-02-27 08:03 from django.db import migrations, models import sortedm2m.fields class Migration(migrations.Migration): dependencies = [ ('orchestrator', '0003_v022_1'), ] operations = [ migrations.AlterField( model_name='filetrigger', name='bot', field=models.ForeignKey(help_text='Select the bot for this trigger.', null=True, on_delete=models.deletion.PROTECT, to='orchestrator.Bot'), ), migrations.AlterField( model_name='scheduletrigger', name='bot', field=models.ForeignKey(help_text='Select the bot for this trigger.', null=True, on_delete=models.deletion.PROTECT, to='orchestrator.Bot'), ), migrations.AlterField( model_name='emailimaptrigger', name='bot', field=models.ForeignKey(help_text='Select the bot for this trigger.', null=True, on_delete=models.deletion.PROTECT, to='orchestrator.Bot'), ), migrations.AlterField( model_name='emailoutlooktrigger', name='bot', field=models.ForeignKey(help_text='Select the bot for this trigger.', null=True, on_delete=models.deletion.PROTECT, to='orchestrator.Bot'), ), migrations.AlterField( model_name='apitrigger', name='bot', field=models.ForeignKey(help_text='Select the bot for this trigger.', null=True, on_delete=models.deletion.PROTECT, to='orchestrator.Bot'), ), migrations.AddField( model_name='apitrigger', name='bots', field=sortedm2m.fields.SortedManyToManyField(help_text='Select the bots for this trigger.', related_name='api_trigger_bot', to='orchestrator.Bot'), ), migrations.AddField( model_name='emailimaptrigger', name='bots', field=sortedm2m.fields.SortedManyToManyField(help_text='Select the bots for this trigger.', related_name='email_imap_trigger_bot', to='orchestrator.Bot'), ), migrations.AddField( model_name='emailoutlooktrigger', name='bots', field=sortedm2m.fields.SortedManyToManyField(help_text='Select the bots for this trigger.', related_name='email_outlook_trigger_bot', to='orchestrator.Bot'), ), migrations.AddField( model_name='filetrigger', name='bots', field=sortedm2m.fields.SortedManyToManyField(help_text='Select the bots for this trigger.', related_name='file_trigger_bot', to='orchestrator.Bot'), ), migrations.AddField( model_name='scheduletrigger', name='bots', field=sortedm2m.fields.SortedManyToManyField(help_text='Select the bots for this trigger.', related_name='schedule_trigger_bot', to='orchestrator.Bot'), ), ]
[ "django.db.models.ForeignKey" ]
[((355, 491), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Select the bot for this trigger."""', 'null': '(True)', 'on_delete': 'models.deletion.PROTECT', 'to': '"""orchestrator.Bot"""'}), "(help_text='Select the bot for this trigger.', null=True,\n on_delete=models.deletion.PROTECT, to='orchestrator.Bot')\n", (372, 491), False, 'from django.db import migrations, models\n'), ((615, 751), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Select the bot for this trigger."""', 'null': '(True)', 'on_delete': 'models.deletion.PROTECT', 'to': '"""orchestrator.Bot"""'}), "(help_text='Select the bot for this trigger.', null=True,\n on_delete=models.deletion.PROTECT, to='orchestrator.Bot')\n", (632, 751), False, 'from django.db import migrations, models\n'), ((876, 1012), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Select the bot for this trigger."""', 'null': '(True)', 'on_delete': 'models.deletion.PROTECT', 'to': '"""orchestrator.Bot"""'}), "(help_text='Select the bot for this trigger.', null=True,\n on_delete=models.deletion.PROTECT, to='orchestrator.Bot')\n", (893, 1012), False, 'from django.db import migrations, models\n'), ((1140, 1276), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Select the bot for this trigger."""', 'null': '(True)', 'on_delete': 'models.deletion.PROTECT', 'to': '"""orchestrator.Bot"""'}), "(help_text='Select the bot for this trigger.', null=True,\n on_delete=models.deletion.PROTECT, to='orchestrator.Bot')\n", (1157, 1276), False, 'from django.db import migrations, models\n'), ((1395, 1531), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Select the bot for this trigger."""', 'null': '(True)', 'on_delete': 'models.deletion.PROTECT', 'to': '"""orchestrator.Bot"""'}), "(help_text='Select the bot for this trigger.', null=True,\n on_delete=models.deletion.PROTECT, to='orchestrator.Bot')\n", (1412, 1531), False, 'from django.db import migrations, models\n')]
import pandas as pd import re from nltk import word_tokenize def get_list_small_words(list_of_titles, word_size): # Create a list of acronyms by selecting all words that are `word_size` or less letters word_list = [] for row in list_of_titles: [word_list.append(x) for x in row if len(x) < word_size] return list(set(word_list)) def load_dict(filename): # Read file to dictionary return pd.read_csv(filename, encoding='latin-1')[['keyword','type']].set_index('keyword')['type'].to_dict() def split_to_subject_degree(list_of_titles, word_list): # Iterate through all the degree titles. Process each word: # 1. If the word = 'in', then add it as a `degree_row`, remove that word from the # `subject_row`, and stop processing word # 2. If the word is in the acronym list or the manual dictionary, then add it as a # `degree_row`, remove that word from the `subject_row`, and stop processing word # 3. If the word is not 1 or 2, stop processing the word # Load these dictionaries from the `configuration_files` folder degree_type_word_dict = load_dict('functions/configuration_files/degree_type_word_dict.csv') degree_type_phrase_dict = load_dict('functions/configuration_files/degree_type_phrase_dict.csv') degree_name_list = [] subject_name_list = [] for row in list_of_titles: degree_row = [] subject_row = row for token in row: if token == 'in': degree_row.append(token) subject_row = subject_row[1:] break elif token in list(degree_type_word_dict.keys()) + word_list: degree_row.append(token) subject_row = subject_row[1:] else: break degree_name_list.append(' '.join(degree_row)) subject_name_list.append(' '.join(subject_row)) return degree_name_list, subject_name_list def tag_with_degree_category(list_of_degrees, list_of_subjects): # This function takes the list of degrees and tags each degree with one or more degree categories last_dict = { 'immersive':'bootcamp', 'certificate':'bootcamp', 'bootcamp':'bootcamp', 'boot camp':'bootcamp', 'license':'license', 'licensure':'license', 'certification':'certificate', 'certificate':'certificate', } degree_category_list = [] # Load these dictionaries from the `configuration_files` folder degree_type_word_dict = load_dict('functions/configuration_files/degree_type_word_dict.csv') degree_type_phrase_dict = load_dict('functions/configuration_files/degree_type_phrase_dict.csv') # Iterate through each degree for index, row in enumerate(list_of_degrees): degree_category = [] found_key=0 # First, use the `degree_type_word_dict` dictionary to assign a degree category for key in filter(lambda x: str(degree_type_word_dict[x])!='nan', degree_type_word_dict): if key in row.split(): degree_category.append(degree_type_word_dict[key]) found_key=1 if found_key==0: # If degree category is still empty, # use the `degree_type_phrase_dict` dictionary to assign a degree category for phrase in degree_type_phrase_dict: if re.match(phrase,row): degree_category.append(degree_type_phrase_dict[phrase]) found_key=1 if found_key==0: # If degree category is still empty, # use the `last_dict` dictionary and match on the subject instead of the degree # to assign a degree category for key in last_dict: if key in list_of_subjects[index]: degree_category.append(last_dict[key]) degree_category_list.append(list(set([x.strip() for x in degree_category if str(x)!='nan' and str(x)!= ' ']))) return degree_category_list def find_best_degree_category(list_of_degree_categories): # This function takes a list of degree categories and returns the highest ranked one # The `degree_category_ranking` list shows ranking of the categories # Each row will be assigned only 1 degree category in the end degree_category_ranking = ['minor', 'all but dissertation', 'juris doctor', 'doctorate', 'associates', 'some education', 'masters', 'bachelors', 'license', 'hs diploma', 'vocational', 'certificate'] final_degree_category_list = [] for row in list_of_degree_categories: if len(row) > 1: for job in degree_category_ranking: if job in row: final_degree_category_list.append(job) break elif len(row) == 1: final_degree_category_list.append(row[0]) else: final_degree_category_list.append('unknown') return final_degree_category_list def process_edu_titles(list_of_titles): # Remove anything outside of words and numbers list_of_titles = [re.sub('[^A-Za-z0-9\s]+', '', row.lower()) for row in list_of_titles] # Tokenize all the words list_of_titles = [word_tokenize(row) for row in list_of_titles] # Find all the acronyms in the education titles acronym_list = get_list_small_words(list_of_titles, 5) # Split the overall title into subject and degree degree_name_list, subject_name_list = split_to_subject_degree(list_of_titles, acronym_list) # Find the degree categories for each degree degree_category_list = tag_with_degree_category(degree_name_list, subject_name_list) # Condense the degree categories into one degree final_degree_category_list = find_best_degree_category(degree_category_list) # Return these lists return subject_name_list, degree_name_list, final_degree_category_list
[ "re.match", "pandas.read_csv", "nltk.word_tokenize" ]
[((5398, 5416), 'nltk.word_tokenize', 'word_tokenize', (['row'], {}), '(row)\n', (5411, 5416), False, 'from nltk import word_tokenize\n'), ((3391, 3412), 're.match', 're.match', (['phrase', 'row'], {}), '(phrase, row)\n', (3399, 3412), False, 'import re\n'), ((424, 465), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'encoding': '"""latin-1"""'}), "(filename, encoding='latin-1')\n", (435, 465), True, 'import pandas as pd\n')]
import os import tarfile import zipfile from contextlib import contextmanager from poetry.masonry import api from poetry.utils.helpers import temporary_directory @contextmanager def cwd(directory): prev = os.getcwd() os.chdir(str(directory)) try: yield finally: os.chdir(prev) fixtures = os.path.join(os.path.dirname(__file__), "builders", "fixtures") def test_get_requires_for_build_wheel(): expected = ["cleo>=0.6.0,<0.7.0", "cachy[msgpack]>=0.2.0,<0.3.0"] with cwd(os.path.join(fixtures, "complete")): api.get_requires_for_build_wheel() == expected def test_get_requires_for_build_sdist(): expected = ["cleo>=0.6.0,<0.7.0", "cachy[msgpack]>=0.2.0,<0.3.0"] with cwd(os.path.join(fixtures, "complete")): api.get_requires_for_build_sdist() == expected def test_build_wheel(): with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, "complete")): filename = api.build_wheel(tmp_dir) with zipfile.ZipFile(str(os.path.join(tmp_dir, filename))) as zip: namelist = zip.namelist() assert "my_package-1.2.3.dist-info/entry_points.txt" in namelist assert "my_package-1.2.3.dist-info/WHEEL" in namelist assert "my_package-1.2.3.dist-info/METADATA" in namelist def test_build_sdist(): with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, "complete")): filename = api.build_sdist(tmp_dir) with tarfile.open(str(os.path.join(tmp_dir, filename))) as tar: namelist = tar.getnames() assert "my-package-1.2.3/LICENSE" in namelist
[ "poetry.masonry.api.get_requires_for_build_sdist", "poetry.utils.helpers.temporary_directory", "os.getcwd", "poetry.masonry.api.build_sdist", "os.path.dirname", "poetry.masonry.api.build_wheel", "poetry.masonry.api.get_requires_for_build_wheel", "os.path.join", "os.chdir" ]
[((213, 224), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (222, 224), False, 'import os\n'), ((339, 364), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (354, 364), False, 'import os\n'), ((298, 312), 'os.chdir', 'os.chdir', (['prev'], {}), '(prev)\n', (306, 312), False, 'import os\n'), ((861, 882), 'poetry.utils.helpers.temporary_directory', 'temporary_directory', ([], {}), '()\n', (880, 882), False, 'from poetry.utils.helpers import temporary_directory\n'), ((955, 979), 'poetry.masonry.api.build_wheel', 'api.build_wheel', (['tmp_dir'], {}), '(tmp_dir)\n', (970, 979), False, 'from poetry.masonry import api\n'), ((1342, 1363), 'poetry.utils.helpers.temporary_directory', 'temporary_directory', ([], {}), '()\n', (1361, 1363), False, 'from poetry.utils.helpers import temporary_directory\n'), ((1436, 1460), 'poetry.masonry.api.build_sdist', 'api.build_sdist', (['tmp_dir'], {}), '(tmp_dir)\n', (1451, 1460), False, 'from poetry.masonry import api\n'), ((516, 550), 'os.path.join', 'os.path.join', (['fixtures', '"""complete"""'], {}), "(fixtures, 'complete')\n", (528, 550), False, 'import os\n'), ((561, 595), 'poetry.masonry.api.get_requires_for_build_wheel', 'api.get_requires_for_build_wheel', ([], {}), '()\n', (593, 595), False, 'from poetry.masonry import api\n'), ((734, 768), 'os.path.join', 'os.path.join', (['fixtures', '"""complete"""'], {}), "(fixtures, 'complete')\n", (746, 768), False, 'import os\n'), ((779, 813), 'poetry.masonry.api.get_requires_for_build_sdist', 'api.get_requires_for_build_sdist', ([], {}), '()\n', (811, 813), False, 'from poetry.masonry import api\n'), ((899, 933), 'os.path.join', 'os.path.join', (['fixtures', '"""complete"""'], {}), "(fixtures, 'complete')\n", (911, 933), False, 'import os\n'), ((1380, 1414), 'os.path.join', 'os.path.join', (['fixtures', '"""complete"""'], {}), "(fixtures, 'complete')\n", (1392, 1414), False, 'import os\n'), ((1014, 1045), 'os.path.join', 'os.path.join', (['tmp_dir', 'filename'], {}), '(tmp_dir, filename)\n', (1026, 1045), False, 'import os\n'), ((1492, 1523), 'os.path.join', 'os.path.join', (['tmp_dir', 'filename'], {}), '(tmp_dir, filename)\n', (1504, 1523), False, 'import os\n')]
# Copyright (C) 2011 <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of the contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import traceback enabled = False def not_impl(self, name=None): method = name or traceback.extract_stack()[-2][2] raise NotImplementedError("{}.{} not implemented".format(self.__class__.__name__, method)) class NullConnection: def write_line(self, line, split=True): pass def write_status(self, code, message): pass def write_text_end(self): pass def write_text(self, lines): pass def __getattr__(self, name): not_impl(self, name)
[ "traceback.extract_stack" ]
[((1627, 1652), 'traceback.extract_stack', 'traceback.extract_stack', ([], {}), '()\n', (1650, 1652), False, 'import traceback\n')]
import heterocl as hcl import numpy as np def top_syr2k(M=20, N=30, alpha=1.5, beta=1.2, dtype=hcl.Int(), target=None): hcl.init(dtype) A = hcl.placeholder((N, M), "A") B = hcl.placeholder((N, M), "B") C = hcl.placeholder((N, N), "C") def kernel_syr2k(A, B, C): # Irregulax axis access with hcl.Stage("loop_1"): with hcl.for_(0, N, name="i") as i: with hcl.for_(0, i + 1, name="j") as j: C[i][j] *= beta with hcl.for_(0, M, name="k") as k: with hcl.for_(0, i + 1, name="j") as j: C[i][j] += A[j][k] * alpha * B[i][k] + B[j][k] * alpha * A[i][k] s = hcl.create_schedule([A, B, C], kernel_syr2k) #### Apply customizations #### #### Apply customizations #### return hcl.build(s, target=target) def syr2k_golden(alpha, beta, M, N, A, B, C, DATA_TYPE): dtype = NDATA_TYPE_DICT[DATA_TYPE.lower()] for i in range(N): for j in range(i + 1): C[i][j] *= beta for k in range(M): for j in range(i + 1): C[i][j] += A[j][k] * alpha * B[i][k] + B[j][k] * alpha * A[i][k]
[ "heterocl.Stage", "heterocl.for_", "heterocl.placeholder", "heterocl.build", "heterocl.create_schedule", "heterocl.init", "heterocl.Int" ]
[((97, 106), 'heterocl.Int', 'hcl.Int', ([], {}), '()\n', (104, 106), True, 'import heterocl as hcl\n'), ((127, 142), 'heterocl.init', 'hcl.init', (['dtype'], {}), '(dtype)\n', (135, 142), True, 'import heterocl as hcl\n'), ((151, 179), 'heterocl.placeholder', 'hcl.placeholder', (['(N, M)', '"""A"""'], {}), "((N, M), 'A')\n", (166, 179), True, 'import heterocl as hcl\n'), ((188, 216), 'heterocl.placeholder', 'hcl.placeholder', (['(N, M)', '"""B"""'], {}), "((N, M), 'B')\n", (203, 216), True, 'import heterocl as hcl\n'), ((225, 253), 'heterocl.placeholder', 'hcl.placeholder', (['(N, N)', '"""C"""'], {}), "((N, N), 'C')\n", (240, 253), True, 'import heterocl as hcl\n'), ((712, 756), 'heterocl.create_schedule', 'hcl.create_schedule', (['[A, B, C]', 'kernel_syr2k'], {}), '([A, B, C], kernel_syr2k)\n', (731, 756), True, 'import heterocl as hcl\n'), ((842, 869), 'heterocl.build', 'hcl.build', (['s'], {'target': 'target'}), '(s, target=target)\n', (851, 869), True, 'import heterocl as hcl\n'), ((332, 351), 'heterocl.Stage', 'hcl.Stage', (['"""loop_1"""'], {}), "('loop_1')\n", (341, 351), True, 'import heterocl as hcl\n'), ((370, 394), 'heterocl.for_', 'hcl.for_', (['(0)', 'N'], {'name': '"""i"""'}), "(0, N, name='i')\n", (378, 394), True, 'import heterocl as hcl\n'), ((422, 450), 'heterocl.for_', 'hcl.for_', (['(0)', '(i + 1)'], {'name': '"""j"""'}), "(0, i + 1, name='j')\n", (430, 450), True, 'import heterocl as hcl\n'), ((514, 538), 'heterocl.for_', 'hcl.for_', (['(0)', 'M'], {'name': '"""k"""'}), "(0, M, name='k')\n", (522, 538), True, 'import heterocl as hcl\n'), ((570, 598), 'heterocl.for_', 'hcl.for_', (['(0)', '(i + 1)'], {'name': '"""j"""'}), "(0, i + 1, name='j')\n", (578, 598), True, 'import heterocl as hcl\n')]
import os ''' def list_files(startpath): my_file = open("file.txt","w+") for root, dirs, files in os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent = '-' * 4 * (level) print('{}{}/'.format(indent, os.path.basename(root))) subindent = ' ' * 4 * (level + 1) for f in files: print('{}{}'.format(subindent, f)) my_file.write('{}{}'.format(subindent, f)) my_file.close() list_files("Unity Source") ''' from pathlib import Path class DisplayablePath(object): display_filename_prefix_middle = "├──" display_filename_prefix_last = "└──" display_parent_prefix_middle = " " display_parent_prefix_last = "│ " def __init__(self, path, parent_path, is_last): self.path = Path(str(path)) self.parent = parent_path self.is_last = is_last if self.parent: self.depth = self.parent.depth + 1 else: self.depth = 0 @property def displayname(self): if self.path.is_dir(): return self.path.name + '/' return self.path.name @classmethod def make_tree(cls, root, parent=None, is_last=False, criteria=None): root = Path(str(root)) criteria = criteria or cls._default_criteria displayable_root = cls(root, parent, is_last) yield displayable_root children = sorted(list(path for path in root.iterdir() if criteria(path)), key=lambda s: str(s).lower()) count = 1 for path in children: is_last = count == len(children) if path.is_dir(): yield from cls.make_tree(path, parent=displayable_root, is_last=is_last, criteria=criteria) else: yield cls(path, displayable_root, is_last) count += 1 @classmethod def _default_criteria(cls, path): return True @property def displayname(self): if self.path.is_dir(): return self.path.name + '/' return self.path.name def displayable(self): if self.parent is None: return self.displayname _filename_prefix = (self.display_filename_prefix_last if self.is_last else self.display_filename_prefix_middle) parts = ['{!s} {!s}'.format(_filename_prefix, self.displayname)] parent = self.parent while parent and parent.parent is not None: parts.append(self.display_parent_prefix_middle if parent.is_last else self.display_parent_prefix_last) parent = parent.parent return ''.join(reversed(parts)) paths = DisplayablePath.make_tree(Path("Unity Source")) myfile = open("file.txt","w+") for path in paths: print(path.displayable()) myfile.write(path.displayable()) myfile.close()
[ "pathlib.Path" ]
[((3011, 3031), 'pathlib.Path', 'Path', (['"""Unity Source"""'], {}), "('Unity Source')\n", (3015, 3031), False, 'from pathlib import Path\n')]
import logging import timeit import numpy as np import pandas as pd from tqdm import tqdm from unified_model import UnifiedModel from unified_model.utils import truncate_middle, ITEM_COLUMN, SCORE_COLUMN log = logging.getLogger(__name__) UNKNOWN_ITEM = '<UNK>' # https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval) # http://scikit-learn.org/stable/modules/model_evaluation.html def f1_score(precision, recall): return 2 * precision * recall / (precision + recall) def evaluate_classifier(unified_model, test_data: list, target_predictions: list, k: list = None, per_label=False): # TODO multithreaded evaluation k = [1, 5] if k is None else [k] if isinstance(k, int) else k # set default value for k k.sort() pred_labels, pred_scores, in_top_k, avg_pred_time = _process_predictions(unified_model, test_data, target_predictions, k) scored_labels = _score_labels(target_predictions, k, pred_labels, in_top_k) metrics = _calculate_metrics(scored_labels, k) metrics['avg_prediction_time'] = avg_pred_time if per_label: return metrics, scored_labels else: return metrics def _calculate_metrics(scored_labels, k): metrics = {} for i in k: i = str(i) try: metrics['micro_precision@k' + i] = scored_labels['true_positives@k' + i].sum() / scored_labels[ 'predicted_count@k' + i].sum() except ZeroDivisionError: metrics['micro_precision@k' + i] = 0 metrics['micro_recall@k' + i] = scored_labels['true_positives@k' + i].sum() / scored_labels['count'].sum() try: metrics['micro_f1@k' + i] = f1_score(metrics['micro_precision@k' + i], metrics['micro_recall@k' + i]) except ZeroDivisionError: metrics['micro_f1@k' + i] = 0 metrics['macro_precision@k' + i] = scored_labels['precision@k' + i].mean() metrics['macro_recall@k' + i] = scored_labels['recall@k' + i].mean() metrics['macro_f1@k' + i] = scored_labels['f1@k' + i].mean() return metrics def _score_labels(target_predictions, k, pred_labels, in_top_k): unique_labels = list(set(target_predictions)) target_predictions = np.array(target_predictions) # convert true predictions to no array columns = ['count'] # tp + fn for i in k: i = str(i) columns.append('predicted_count@k' + i) # tp + fp columns.append('true_positives@k' + i) columns.append('precision@k' + i) columns.append('recall@k' + i) columns.append('f1@k' + i) df = pd.DataFrame(0, columns=columns, index=unique_labels) for label in unique_labels: df['count'][label] = np.sum(target_predictions == label) for i in k: df['predicted_count@k' + str(i)][label] = np.sum(pred_labels[:, :i].flatten() == label) df['true_positives@k' + str(i)][label] = np.sum(in_top_k[i][target_predictions == label]) for i in k: i = str(i) df['precision@k' + i] = df['true_positives@k' + i] / df['predicted_count@k' + i] df['recall@k' + i] = df['true_positives@k' + i] / df['count'] df['f1@k' + i] = f1_score(df['precision@k' + i], df['recall@k' + i]) df = df.fillna(0) return df.sort_values(by='count', ascending=False) def _fill_missing_predictions(df: pd.DataFrame, max_k: int) -> pd.DataFrame: for i in range(max_k - df.shape[0]): df = df.append({ITEM_COLUMN: UNKNOWN_ITEM, SCORE_COLUMN: 0}, ignore_index=True) return df def _process_predictions(unified_model, test_data, target_predictions, k): # allow target_predictions to also contain a list of true labels per prediction target_predictions = np.array(target_predictions) # convert true predictions to no array start_time = timeit.default_timer() predictions = [] for data in tqdm(test_data, desc="Calculating metrics..."): try: prediction_result = unified_model.predict(data, limit=np.amax(k)) if prediction_result.shape[0] < np.amax(k): log.warning("Model returned " + str(prediction_result.shape[0]) + " predictions, " + str(np.amax(k)) + " were expected.") log.debug("Model data: " + str(data)) prediction_result = _fill_missing_predictions(prediction_result, np.amax(k)) if prediction_result is None: log.warning("Model returned no prediction (None).") log.debug("Model data: " + str(data)) # add empty predictions prediction_result = _fill_missing_predictions(pd.DataFrame(columns=[ITEM_COLUMN, SCORE_COLUMN]), np.amax(k)) except Exception as ex: log.warning("Exception during prediction: " + str(ex)) log.debug("Model data: " + str(data)) prediction_result = _fill_missing_predictions(pd.DataFrame(columns=[ITEM_COLUMN, SCORE_COLUMN]), np.amax(k)) predictions.append(prediction_result) avg_pred_time = ((timeit.default_timer() - start_time) / len(test_data) * 1000) pred_labels = np.array([prediction[ITEM_COLUMN].tolist() for prediction in predictions]) pred_scores = np.array([prediction[SCORE_COLUMN].tolist() for prediction in predictions]) in_top_k = {} for i in k: in_top_k[i] = np.array( [true_label in k_predictions[:i] for true_label, k_predictions in zip(target_predictions, pred_labels)]) return pred_labels, pred_scores, in_top_k, avg_pred_time def compare_models(unified_models: list, data_list: list, target_predictions: list, styled=True, **kwargs) -> pd.DataFrame: """ Compare evaluation metrics for the given list of models. # Arguments data_list (list): List of data items used for the evaluations. target_predictions (list): List of true predictions for test data. styled (boolean): If 'True', a styled DataFrame will be returned (with coloring, etc.) **kwargs: Provide additional keyword-based parameters. # Returns DataFrame that summarizes the metrics of all of the given models. """ model_names = [] metrics_per_model = [] for model in unified_models: print("Calculating metrics for " + str(model)) model_names.append(truncate_middle(str(model), 40)) metrics_per_model.append(model.evaluate(data_list, target_predictions, **kwargs)) ## compare evaluation df, also use color to show best and worst values # add random baseline and combined score df = pd.DataFrame(metrics_per_model, index=model_names) # https://pandas.pydata.org/pandas-docs/stable/style.html if styled: # return df.style.bar(color='#f0fbff') return df.style.background_gradient(cmap='BuGn', low=0.1, high=0.8, axis=0) else: return df def test_unified_model(model_instance: UnifiedModel, data=None, conda_environment=False): """ Helps to test whether your model instance can be successfully loaded in another python environment. This method saves the model instance, loads the model file in another python process, and (optionally) calls `predict()` with the provided test data. # Arguments model_instance (UnifiedModel): Unified model instance. data (string or bytes): Input data to test the model (optional). conda_environment (bool): If `True`, a clean conda environment will be created for the test (optional). """ import sys import os import tempfile import subprocess import shutil log.info("Starting model test.") temp_test_folder = tempfile.mkdtemp() saved_model_path = model_instance.save(os.path.join(temp_test_folder, "test_model")) python_runtime = sys.executable CONDA_ENV = "model-test-env" if conda_environment: log.info("Creating clean conda environment.") try: log.info(subprocess.check_output("conda create -n " + CONDA_ENV + " python=3.6 cython -y", stderr=subprocess.STDOUT, shell=True).decode("utf-8")) log.info("Installing unified model.") log.info( subprocess.check_output("/opt/conda/envs/" + CONDA_ENV + "/bin/pip install --upgrade unified-model", stderr=subprocess.STDOUT, shell=True).decode("utf-8")) python_runtime = "/opt/conda/envs/" + CONDA_ENV + "/bin/python" except subprocess.CalledProcessError as e: log.info("Failed to create conda environment: \n" + e.output.decode("utf-8")) test_command = python_runtime + " " + saved_model_path + ' predict' if data: test_command += ' --input-data "' + str(data) + '"' log.info("Executing " + test_command) try: log.info(subprocess.check_output(test_command, stderr=subprocess.STDOUT, shell=True).decode("utf-8")) log.info("Finished model test successfully!") except subprocess.CalledProcessError as e: log.info("Test failed: \n" + e.output.decode("utf-8")) shutil.rmtree(temp_test_folder) if conda_environment: log.info("Removing conda environment.") subprocess.call("conda remove --name " + CONDA_ENV + " --all -y", shell=True)
[ "pandas.DataFrame", "tqdm.tqdm", "numpy.sum", "timeit.default_timer", "subprocess.check_output", "numpy.amax", "tempfile.mkdtemp", "numpy.array", "subprocess.call", "shutil.rmtree", "os.path.join", "logging.getLogger" ]
[((213, 240), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (230, 240), False, 'import logging\n'), ((2368, 2396), 'numpy.array', 'np.array', (['target_predictions'], {}), '(target_predictions)\n', (2376, 2396), True, 'import numpy as np\n'), ((2740, 2793), 'pandas.DataFrame', 'pd.DataFrame', (['(0)'], {'columns': 'columns', 'index': 'unique_labels'}), '(0, columns=columns, index=unique_labels)\n', (2752, 2793), True, 'import pandas as pd\n'), ((3895, 3923), 'numpy.array', 'np.array', (['target_predictions'], {}), '(target_predictions)\n', (3903, 3923), True, 'import numpy as np\n'), ((3981, 4003), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4001, 4003), False, 'import timeit\n'), ((4042, 4088), 'tqdm.tqdm', 'tqdm', (['test_data'], {'desc': '"""Calculating metrics..."""'}), "(test_data, desc='Calculating metrics...')\n", (4046, 4088), False, 'from tqdm import tqdm\n'), ((6824, 6874), 'pandas.DataFrame', 'pd.DataFrame', (['metrics_per_model'], {'index': 'model_names'}), '(metrics_per_model, index=model_names)\n', (6836, 6874), True, 'import pandas as pd\n'), ((7898, 7916), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (7914, 7916), False, 'import tempfile\n'), ((9473, 9504), 'shutil.rmtree', 'shutil.rmtree', (['temp_test_folder'], {}), '(temp_test_folder)\n', (9486, 9504), False, 'import shutil\n'), ((2855, 2890), 'numpy.sum', 'np.sum', (['(target_predictions == label)'], {}), '(target_predictions == label)\n', (2861, 2890), True, 'import numpy as np\n'), ((7960, 8004), 'os.path.join', 'os.path.join', (['temp_test_folder', '"""test_model"""'], {}), "(temp_test_folder, 'test_model')\n", (7972, 8004), False, 'import os\n'), ((9588, 9665), 'subprocess.call', 'subprocess.call', (["('conda remove --name ' + CONDA_ENV + ' --all -y')"], {'shell': '(True)'}), "('conda remove --name ' + CONDA_ENV + ' --all -y', shell=True)\n", (9603, 9665), False, 'import subprocess\n'), ((3064, 3112), 'numpy.sum', 'np.sum', (['in_top_k[i][target_predictions == label]'], {}), '(in_top_k[i][target_predictions == label])\n', (3070, 3112), True, 'import numpy as np\n'), ((4226, 4236), 'numpy.amax', 'np.amax', (['k'], {}), '(k)\n', (4233, 4236), True, 'import numpy as np\n'), ((5282, 5304), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5302, 5304), False, 'import timeit\n'), ((4170, 4180), 'numpy.amax', 'np.amax', (['k'], {}), '(k)\n', (4177, 4180), True, 'import numpy as np\n'), ((4539, 4549), 'numpy.amax', 'np.amax', (['k'], {}), '(k)\n', (4546, 4549), True, 'import numpy as np\n'), ((4817, 4866), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': '[ITEM_COLUMN, SCORE_COLUMN]'}), '(columns=[ITEM_COLUMN, SCORE_COLUMN])\n', (4829, 4866), True, 'import pandas as pd\n'), ((4930, 4940), 'numpy.amax', 'np.amax', (['k'], {}), '(k)\n', (4937, 4940), True, 'import numpy as np\n'), ((5149, 5198), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': '[ITEM_COLUMN, SCORE_COLUMN]'}), '(columns=[ITEM_COLUMN, SCORE_COLUMN])\n', (5161, 5198), True, 'import pandas as pd\n'), ((5200, 5210), 'numpy.amax', 'np.amax', (['k'], {}), '(k)\n', (5207, 5210), True, 'import numpy as np\n'), ((9211, 9286), 'subprocess.check_output', 'subprocess.check_output', (['test_command'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), '(test_command, stderr=subprocess.STDOUT, shell=True)\n', (9234, 9286), False, 'import subprocess\n'), ((8191, 8314), 'subprocess.check_output', 'subprocess.check_output', (["('conda create -n ' + CONDA_ENV + ' python=3.6 cython -y')"], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), "('conda create -n ' + CONDA_ENV +\n ' python=3.6 cython -y', stderr=subprocess.STDOUT, shell=True)\n", (8214, 8314), False, 'import subprocess\n'), ((8461, 8607), 'subprocess.check_output', 'subprocess.check_output', (["('/opt/conda/envs/' + CONDA_ENV + '/bin/pip install --upgrade unified-model')"], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), "('/opt/conda/envs/' + CONDA_ENV +\n '/bin/pip install --upgrade unified-model', stderr=subprocess.STDOUT,\n shell=True)\n", (8484, 8607), False, 'import subprocess\n'), ((4371, 4381), 'numpy.amax', 'np.amax', (['k'], {}), '(k)\n', (4378, 4381), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2017-01-05 09:51 from __future__ import unicode_literals import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('website', '0013_auto_20170105_1113'), ] operations = [ migrations.AlterField( model_name='transaction', name='datetime', field=models.DateTimeField(auto_created=True, default=datetime.datetime(2017, 1, 5, 11, 51, 51, 548614)), ), migrations.AlterField( model_name='uniqueidentifier', name='is_active', field=models.BooleanField(default=True), ), ]
[ "django.db.models.BooleanField", "datetime.datetime" ]
[((656, 689), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (675, 689), False, 'from django.db import migrations, models\n'), ((471, 520), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(5)', '(11)', '(51)', '(51)', '(548614)'], {}), '(2017, 1, 5, 11, 51, 51, 548614)\n', (488, 520), False, 'import datetime\n')]
# coding: utf-8 """ Cisco Intersight OpenAPI specification. The Cisco Intersight OpenAPI specification. OpenAPI spec version: 1.0.9-1461 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class StorageVirtualDrive(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'account_moid': 'str', 'create_time': 'datetime', 'domain_group_moid': 'str', 'mod_time': 'datetime', 'moid': 'str', 'object_type': 'str', 'owners': 'list[str]', 'shared_scope': 'str', 'tags': 'list[MoTag]', 'version_context': 'MoVersionContext', 'ancestors': 'list[MoBaseMoRef]', 'parent': 'MoBaseMoRef', 'permission_resources': 'list[MoBaseMoRef]', 'device_mo_id': 'str', 'dn': 'str', 'rn': 'str', 'model': 'str', 'revision': 'str', 'serial': 'str', 'vendor': 'str', 'access_policy': 'str', 'actual_write_cache_policy': 'str', 'available_size': 'str', 'block_size': 'str', 'bootable': 'str', 'config_state': 'str', 'configured_write_cache_policy': 'str', 'connection_protocol': 'str', 'drive_cache': 'str', 'drive_security': 'str', 'drive_state': 'str', 'io_policy': 'str', 'name': 'str', 'num_blocks': 'str', 'oper_state': 'str', 'operability': 'str', 'physical_block_size': 'str', 'presence': 'str', 'read_policy': 'str', 'security_flags': 'str', 'size': 'str', 'strip_size': 'str', 'type': 'str', 'uuid': 'str', 'vendor_uuid': 'str', 'virtual_drive_id': 'str', 'physical_disk_usages': 'list[StoragePhysicalDiskUsageRef]', 'registered_device': 'AssetDeviceRegistrationRef', 'storage_controller': 'StorageControllerRef', 'vd_member_eps': 'list[StorageVdMemberEpRef]', 'virtual_drive_extension': 'StorageVirtualDriveExtensionRef' } attribute_map = { 'account_moid': 'AccountMoid', 'create_time': 'CreateTime', 'domain_group_moid': 'DomainGroupMoid', 'mod_time': 'ModTime', 'moid': 'Moid', 'object_type': 'ObjectType', 'owners': 'Owners', 'shared_scope': 'SharedScope', 'tags': 'Tags', 'version_context': 'VersionContext', 'ancestors': 'Ancestors', 'parent': 'Parent', 'permission_resources': 'PermissionResources', 'device_mo_id': 'DeviceMoId', 'dn': 'Dn', 'rn': 'Rn', 'model': 'Model', 'revision': 'Revision', 'serial': 'Serial', 'vendor': 'Vendor', 'access_policy': 'AccessPolicy', 'actual_write_cache_policy': 'ActualWriteCachePolicy', 'available_size': 'AvailableSize', 'block_size': 'BlockSize', 'bootable': 'Bootable', 'config_state': 'ConfigState', 'configured_write_cache_policy': 'ConfiguredWriteCachePolicy', 'connection_protocol': 'ConnectionProtocol', 'drive_cache': 'DriveCache', 'drive_security': 'DriveSecurity', 'drive_state': 'DriveState', 'io_policy': 'IoPolicy', 'name': 'Name', 'num_blocks': 'NumBlocks', 'oper_state': 'OperState', 'operability': 'Operability', 'physical_block_size': 'PhysicalBlockSize', 'presence': 'Presence', 'read_policy': 'ReadPolicy', 'security_flags': 'SecurityFlags', 'size': 'Size', 'strip_size': 'StripSize', 'type': 'Type', 'uuid': 'Uuid', 'vendor_uuid': 'VendorUuid', 'virtual_drive_id': 'VirtualDriveId', 'physical_disk_usages': 'PhysicalDiskUsages', 'registered_device': 'RegisteredDevice', 'storage_controller': 'StorageController', 'vd_member_eps': 'VdMemberEps', 'virtual_drive_extension': 'VirtualDriveExtension' } def __init__(self, account_moid=None, create_time=None, domain_group_moid=None, mod_time=None, moid=None, object_type=None, owners=None, shared_scope=None, tags=None, version_context=None, ancestors=None, parent=None, permission_resources=None, device_mo_id=None, dn=None, rn=None, model=None, revision=None, serial=None, vendor=None, access_policy=None, actual_write_cache_policy=None, available_size=None, block_size=None, bootable=None, config_state=None, configured_write_cache_policy=None, connection_protocol=None, drive_cache=None, drive_security=None, drive_state=None, io_policy=None, name=None, num_blocks=None, oper_state=None, operability=None, physical_block_size=None, presence=None, read_policy=None, security_flags=None, size=None, strip_size=None, type=None, uuid=None, vendor_uuid=None, virtual_drive_id=None, physical_disk_usages=None, registered_device=None, storage_controller=None, vd_member_eps=None, virtual_drive_extension=None): """ StorageVirtualDrive - a model defined in Swagger """ self._account_moid = None self._create_time = None self._domain_group_moid = None self._mod_time = None self._moid = None self._object_type = None self._owners = None self._shared_scope = None self._tags = None self._version_context = None self._ancestors = None self._parent = None self._permission_resources = None self._device_mo_id = None self._dn = None self._rn = None self._model = None self._revision = None self._serial = None self._vendor = None self._access_policy = None self._actual_write_cache_policy = None self._available_size = None self._block_size = None self._bootable = None self._config_state = None self._configured_write_cache_policy = None self._connection_protocol = None self._drive_cache = None self._drive_security = None self._drive_state = None self._io_policy = None self._name = None self._num_blocks = None self._oper_state = None self._operability = None self._physical_block_size = None self._presence = None self._read_policy = None self._security_flags = None self._size = None self._strip_size = None self._type = None self._uuid = None self._vendor_uuid = None self._virtual_drive_id = None self._physical_disk_usages = None self._registered_device = None self._storage_controller = None self._vd_member_eps = None self._virtual_drive_extension = None if account_moid is not None: self.account_moid = account_moid if create_time is not None: self.create_time = create_time if domain_group_moid is not None: self.domain_group_moid = domain_group_moid if mod_time is not None: self.mod_time = mod_time if moid is not None: self.moid = moid if object_type is not None: self.object_type = object_type if owners is not None: self.owners = owners if shared_scope is not None: self.shared_scope = shared_scope if tags is not None: self.tags = tags if version_context is not None: self.version_context = version_context if ancestors is not None: self.ancestors = ancestors if parent is not None: self.parent = parent if permission_resources is not None: self.permission_resources = permission_resources if device_mo_id is not None: self.device_mo_id = device_mo_id if dn is not None: self.dn = dn if rn is not None: self.rn = rn if model is not None: self.model = model if revision is not None: self.revision = revision if serial is not None: self.serial = serial if vendor is not None: self.vendor = vendor if access_policy is not None: self.access_policy = access_policy if actual_write_cache_policy is not None: self.actual_write_cache_policy = actual_write_cache_policy if available_size is not None: self.available_size = available_size if block_size is not None: self.block_size = block_size if bootable is not None: self.bootable = bootable if config_state is not None: self.config_state = config_state if configured_write_cache_policy is not None: self.configured_write_cache_policy = configured_write_cache_policy if connection_protocol is not None: self.connection_protocol = connection_protocol if drive_cache is not None: self.drive_cache = drive_cache if drive_security is not None: self.drive_security = drive_security if drive_state is not None: self.drive_state = drive_state if io_policy is not None: self.io_policy = io_policy if name is not None: self.name = name if num_blocks is not None: self.num_blocks = num_blocks if oper_state is not None: self.oper_state = oper_state if operability is not None: self.operability = operability if physical_block_size is not None: self.physical_block_size = physical_block_size if presence is not None: self.presence = presence if read_policy is not None: self.read_policy = read_policy if security_flags is not None: self.security_flags = security_flags if size is not None: self.size = size if strip_size is not None: self.strip_size = strip_size if type is not None: self.type = type if uuid is not None: self.uuid = uuid if vendor_uuid is not None: self.vendor_uuid = vendor_uuid if virtual_drive_id is not None: self.virtual_drive_id = virtual_drive_id if physical_disk_usages is not None: self.physical_disk_usages = physical_disk_usages if registered_device is not None: self.registered_device = registered_device if storage_controller is not None: self.storage_controller = storage_controller if vd_member_eps is not None: self.vd_member_eps = vd_member_eps if virtual_drive_extension is not None: self.virtual_drive_extension = virtual_drive_extension @property def account_moid(self): """ Gets the account_moid of this StorageVirtualDrive. The Account ID for this managed object. :return: The account_moid of this StorageVirtualDrive. :rtype: str """ return self._account_moid @account_moid.setter def account_moid(self, account_moid): """ Sets the account_moid of this StorageVirtualDrive. The Account ID for this managed object. :param account_moid: The account_moid of this StorageVirtualDrive. :type: str """ self._account_moid = account_moid @property def create_time(self): """ Gets the create_time of this StorageVirtualDrive. The time when this managed object was created. :return: The create_time of this StorageVirtualDrive. :rtype: datetime """ return self._create_time @create_time.setter def create_time(self, create_time): """ Sets the create_time of this StorageVirtualDrive. The time when this managed object was created. :param create_time: The create_time of this StorageVirtualDrive. :type: datetime """ self._create_time = create_time @property def domain_group_moid(self): """ Gets the domain_group_moid of this StorageVirtualDrive. The DomainGroup ID for this managed object. :return: The domain_group_moid of this StorageVirtualDrive. :rtype: str """ return self._domain_group_moid @domain_group_moid.setter def domain_group_moid(self, domain_group_moid): """ Sets the domain_group_moid of this StorageVirtualDrive. The DomainGroup ID for this managed object. :param domain_group_moid: The domain_group_moid of this StorageVirtualDrive. :type: str """ self._domain_group_moid = domain_group_moid @property def mod_time(self): """ Gets the mod_time of this StorageVirtualDrive. The time when this managed object was last modified. :return: The mod_time of this StorageVirtualDrive. :rtype: datetime """ return self._mod_time @mod_time.setter def mod_time(self, mod_time): """ Sets the mod_time of this StorageVirtualDrive. The time when this managed object was last modified. :param mod_time: The mod_time of this StorageVirtualDrive. :type: datetime """ self._mod_time = mod_time @property def moid(self): """ Gets the moid of this StorageVirtualDrive. The unique identifier of this Managed Object instance. :return: The moid of this StorageVirtualDrive. :rtype: str """ return self._moid @moid.setter def moid(self, moid): """ Sets the moid of this StorageVirtualDrive. The unique identifier of this Managed Object instance. :param moid: The moid of this StorageVirtualDrive. :type: str """ self._moid = moid @property def object_type(self): """ Gets the object_type of this StorageVirtualDrive. The fully-qualified type of this managed object, i.e. the class name. This property is optional. The ObjectType is implied from the URL path. If specified, the value of objectType must match the class name specified in the URL path. :return: The object_type of this StorageVirtualDrive. :rtype: str """ return self._object_type @object_type.setter def object_type(self, object_type): """ Sets the object_type of this StorageVirtualDrive. The fully-qualified type of this managed object, i.e. the class name. This property is optional. The ObjectType is implied from the URL path. If specified, the value of objectType must match the class name specified in the URL path. :param object_type: The object_type of this StorageVirtualDrive. :type: str """ self._object_type = object_type @property def owners(self): """ Gets the owners of this StorageVirtualDrive. The array of owners which represent effective ownership of this object. :return: The owners of this StorageVirtualDrive. :rtype: list[str] """ return self._owners @owners.setter def owners(self, owners): """ Sets the owners of this StorageVirtualDrive. The array of owners which represent effective ownership of this object. :param owners: The owners of this StorageVirtualDrive. :type: list[str] """ self._owners = owners @property def shared_scope(self): """ Gets the shared_scope of this StorageVirtualDrive. Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs. :return: The shared_scope of this StorageVirtualDrive. :rtype: str """ return self._shared_scope @shared_scope.setter def shared_scope(self, shared_scope): """ Sets the shared_scope of this StorageVirtualDrive. Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs. :param shared_scope: The shared_scope of this StorageVirtualDrive. :type: str """ self._shared_scope = shared_scope @property def tags(self): """ Gets the tags of this StorageVirtualDrive. The array of tags, which allow to add key, value meta-data to managed objects. :return: The tags of this StorageVirtualDrive. :rtype: list[MoTag] """ return self._tags @tags.setter def tags(self, tags): """ Sets the tags of this StorageVirtualDrive. The array of tags, which allow to add key, value meta-data to managed objects. :param tags: The tags of this StorageVirtualDrive. :type: list[MoTag] """ self._tags = tags @property def version_context(self): """ Gets the version_context of this StorageVirtualDrive. The versioning info for this managed object. :return: The version_context of this StorageVirtualDrive. :rtype: MoVersionContext """ return self._version_context @version_context.setter def version_context(self, version_context): """ Sets the version_context of this StorageVirtualDrive. The versioning info for this managed object. :param version_context: The version_context of this StorageVirtualDrive. :type: MoVersionContext """ self._version_context = version_context @property def ancestors(self): """ Gets the ancestors of this StorageVirtualDrive. The array containing the MO references of the ancestors in the object containment hierarchy. :return: The ancestors of this StorageVirtualDrive. :rtype: list[MoBaseMoRef] """ return self._ancestors @ancestors.setter def ancestors(self, ancestors): """ Sets the ancestors of this StorageVirtualDrive. The array containing the MO references of the ancestors in the object containment hierarchy. :param ancestors: The ancestors of this StorageVirtualDrive. :type: list[MoBaseMoRef] """ self._ancestors = ancestors @property def parent(self): """ Gets the parent of this StorageVirtualDrive. The direct ancestor of this managed object in the containment hierarchy. :return: The parent of this StorageVirtualDrive. :rtype: MoBaseMoRef """ return self._parent @parent.setter def parent(self, parent): """ Sets the parent of this StorageVirtualDrive. The direct ancestor of this managed object in the containment hierarchy. :param parent: The parent of this StorageVirtualDrive. :type: MoBaseMoRef """ self._parent = parent @property def permission_resources(self): """ Gets the permission_resources of this StorageVirtualDrive. A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources. :return: The permission_resources of this StorageVirtualDrive. :rtype: list[MoBaseMoRef] """ return self._permission_resources @permission_resources.setter def permission_resources(self, permission_resources): """ Sets the permission_resources of this StorageVirtualDrive. A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources. :param permission_resources: The permission_resources of this StorageVirtualDrive. :type: list[MoBaseMoRef] """ self._permission_resources = permission_resources @property def device_mo_id(self): """ Gets the device_mo_id of this StorageVirtualDrive. :return: The device_mo_id of this StorageVirtualDrive. :rtype: str """ return self._device_mo_id @device_mo_id.setter def device_mo_id(self, device_mo_id): """ Sets the device_mo_id of this StorageVirtualDrive. :param device_mo_id: The device_mo_id of this StorageVirtualDrive. :type: str """ self._device_mo_id = device_mo_id @property def dn(self): """ Gets the dn of this StorageVirtualDrive. The Distinguished Name unambiguously identifies an object in the system. :return: The dn of this StorageVirtualDrive. :rtype: str """ return self._dn @dn.setter def dn(self, dn): """ Sets the dn of this StorageVirtualDrive. The Distinguished Name unambiguously identifies an object in the system. :param dn: The dn of this StorageVirtualDrive. :type: str """ self._dn = dn @property def rn(self): """ Gets the rn of this StorageVirtualDrive. The Relative Name uniquely identifies an object within a given context. :return: The rn of this StorageVirtualDrive. :rtype: str """ return self._rn @rn.setter def rn(self, rn): """ Sets the rn of this StorageVirtualDrive. The Relative Name uniquely identifies an object within a given context. :param rn: The rn of this StorageVirtualDrive. :type: str """ self._rn = rn @property def model(self): """ Gets the model of this StorageVirtualDrive. This field identifies the model of the given component. :return: The model of this StorageVirtualDrive. :rtype: str """ return self._model @model.setter def model(self, model): """ Sets the model of this StorageVirtualDrive. This field identifies the model of the given component. :param model: The model of this StorageVirtualDrive. :type: str """ self._model = model @property def revision(self): """ Gets the revision of this StorageVirtualDrive. :return: The revision of this StorageVirtualDrive. :rtype: str """ return self._revision @revision.setter def revision(self, revision): """ Sets the revision of this StorageVirtualDrive. :param revision: The revision of this StorageVirtualDrive. :type: str """ self._revision = revision @property def serial(self): """ Gets the serial of this StorageVirtualDrive. This field identifies the serial of the given component. :return: The serial of this StorageVirtualDrive. :rtype: str """ return self._serial @serial.setter def serial(self, serial): """ Sets the serial of this StorageVirtualDrive. This field identifies the serial of the given component. :param serial: The serial of this StorageVirtualDrive. :type: str """ self._serial = serial @property def vendor(self): """ Gets the vendor of this StorageVirtualDrive. This field identifies the vendor of the given component. :return: The vendor of this StorageVirtualDrive. :rtype: str """ return self._vendor @vendor.setter def vendor(self, vendor): """ Sets the vendor of this StorageVirtualDrive. This field identifies the vendor of the given component. :param vendor: The vendor of this StorageVirtualDrive. :type: str """ self._vendor = vendor @property def access_policy(self): """ Gets the access_policy of this StorageVirtualDrive. :return: The access_policy of this StorageVirtualDrive. :rtype: str """ return self._access_policy @access_policy.setter def access_policy(self, access_policy): """ Sets the access_policy of this StorageVirtualDrive. :param access_policy: The access_policy of this StorageVirtualDrive. :type: str """ self._access_policy = access_policy @property def actual_write_cache_policy(self): """ Gets the actual_write_cache_policy of this StorageVirtualDrive. :return: The actual_write_cache_policy of this StorageVirtualDrive. :rtype: str """ return self._actual_write_cache_policy @actual_write_cache_policy.setter def actual_write_cache_policy(self, actual_write_cache_policy): """ Sets the actual_write_cache_policy of this StorageVirtualDrive. :param actual_write_cache_policy: The actual_write_cache_policy of this StorageVirtualDrive. :type: str """ self._actual_write_cache_policy = actual_write_cache_policy @property def available_size(self): """ Gets the available_size of this StorageVirtualDrive. :return: The available_size of this StorageVirtualDrive. :rtype: str """ return self._available_size @available_size.setter def available_size(self, available_size): """ Sets the available_size of this StorageVirtualDrive. :param available_size: The available_size of this StorageVirtualDrive. :type: str """ self._available_size = available_size @property def block_size(self): """ Gets the block_size of this StorageVirtualDrive. :return: The block_size of this StorageVirtualDrive. :rtype: str """ return self._block_size @block_size.setter def block_size(self, block_size): """ Sets the block_size of this StorageVirtualDrive. :param block_size: The block_size of this StorageVirtualDrive. :type: str """ self._block_size = block_size @property def bootable(self): """ Gets the bootable of this StorageVirtualDrive. :return: The bootable of this StorageVirtualDrive. :rtype: str """ return self._bootable @bootable.setter def bootable(self, bootable): """ Sets the bootable of this StorageVirtualDrive. :param bootable: The bootable of this StorageVirtualDrive. :type: str """ self._bootable = bootable @property def config_state(self): """ Gets the config_state of this StorageVirtualDrive. :return: The config_state of this StorageVirtualDrive. :rtype: str """ return self._config_state @config_state.setter def config_state(self, config_state): """ Sets the config_state of this StorageVirtualDrive. :param config_state: The config_state of this StorageVirtualDrive. :type: str """ self._config_state = config_state @property def configured_write_cache_policy(self): """ Gets the configured_write_cache_policy of this StorageVirtualDrive. :return: The configured_write_cache_policy of this StorageVirtualDrive. :rtype: str """ return self._configured_write_cache_policy @configured_write_cache_policy.setter def configured_write_cache_policy(self, configured_write_cache_policy): """ Sets the configured_write_cache_policy of this StorageVirtualDrive. :param configured_write_cache_policy: The configured_write_cache_policy of this StorageVirtualDrive. :type: str """ self._configured_write_cache_policy = configured_write_cache_policy @property def connection_protocol(self): """ Gets the connection_protocol of this StorageVirtualDrive. :return: The connection_protocol of this StorageVirtualDrive. :rtype: str """ return self._connection_protocol @connection_protocol.setter def connection_protocol(self, connection_protocol): """ Sets the connection_protocol of this StorageVirtualDrive. :param connection_protocol: The connection_protocol of this StorageVirtualDrive. :type: str """ self._connection_protocol = connection_protocol @property def drive_cache(self): """ Gets the drive_cache of this StorageVirtualDrive. :return: The drive_cache of this StorageVirtualDrive. :rtype: str """ return self._drive_cache @drive_cache.setter def drive_cache(self, drive_cache): """ Sets the drive_cache of this StorageVirtualDrive. :param drive_cache: The drive_cache of this StorageVirtualDrive. :type: str """ self._drive_cache = drive_cache @property def drive_security(self): """ Gets the drive_security of this StorageVirtualDrive. :return: The drive_security of this StorageVirtualDrive. :rtype: str """ return self._drive_security @drive_security.setter def drive_security(self, drive_security): """ Sets the drive_security of this StorageVirtualDrive. :param drive_security: The drive_security of this StorageVirtualDrive. :type: str """ self._drive_security = drive_security @property def drive_state(self): """ Gets the drive_state of this StorageVirtualDrive. It shows the Virtual drive state. :return: The drive_state of this StorageVirtualDrive. :rtype: str """ return self._drive_state @drive_state.setter def drive_state(self, drive_state): """ Sets the drive_state of this StorageVirtualDrive. It shows the Virtual drive state. :param drive_state: The drive_state of this StorageVirtualDrive. :type: str """ self._drive_state = drive_state @property def io_policy(self): """ Gets the io_policy of this StorageVirtualDrive. :return: The io_policy of this StorageVirtualDrive. :rtype: str """ return self._io_policy @io_policy.setter def io_policy(self, io_policy): """ Sets the io_policy of this StorageVirtualDrive. :param io_policy: The io_policy of this StorageVirtualDrive. :type: str """ self._io_policy = io_policy @property def name(self): """ Gets the name of this StorageVirtualDrive. :return: The name of this StorageVirtualDrive. :rtype: str """ return self._name @name.setter def name(self, name): """ Sets the name of this StorageVirtualDrive. :param name: The name of this StorageVirtualDrive. :type: str """ self._name = name @property def num_blocks(self): """ Gets the num_blocks of this StorageVirtualDrive. :return: The num_blocks of this StorageVirtualDrive. :rtype: str """ return self._num_blocks @num_blocks.setter def num_blocks(self, num_blocks): """ Sets the num_blocks of this StorageVirtualDrive. :param num_blocks: The num_blocks of this StorageVirtualDrive. :type: str """ self._num_blocks = num_blocks @property def oper_state(self): """ Gets the oper_state of this StorageVirtualDrive. It shows the current operational state of Virtual drive. :return: The oper_state of this StorageVirtualDrive. :rtype: str """ return self._oper_state @oper_state.setter def oper_state(self, oper_state): """ Sets the oper_state of this StorageVirtualDrive. It shows the current operational state of Virtual drive. :param oper_state: The oper_state of this StorageVirtualDrive. :type: str """ self._oper_state = oper_state @property def operability(self): """ Gets the operability of this StorageVirtualDrive. :return: The operability of this StorageVirtualDrive. :rtype: str """ return self._operability @operability.setter def operability(self, operability): """ Sets the operability of this StorageVirtualDrive. :param operability: The operability of this StorageVirtualDrive. :type: str """ self._operability = operability @property def physical_block_size(self): """ Gets the physical_block_size of this StorageVirtualDrive. :return: The physical_block_size of this StorageVirtualDrive. :rtype: str """ return self._physical_block_size @physical_block_size.setter def physical_block_size(self, physical_block_size): """ Sets the physical_block_size of this StorageVirtualDrive. :param physical_block_size: The physical_block_size of this StorageVirtualDrive. :type: str """ self._physical_block_size = physical_block_size @property def presence(self): """ Gets the presence of this StorageVirtualDrive. :return: The presence of this StorageVirtualDrive. :rtype: str """ return self._presence @presence.setter def presence(self, presence): """ Sets the presence of this StorageVirtualDrive. :param presence: The presence of this StorageVirtualDrive. :type: str """ self._presence = presence @property def read_policy(self): """ Gets the read_policy of this StorageVirtualDrive. :return: The read_policy of this StorageVirtualDrive. :rtype: str """ return self._read_policy @read_policy.setter def read_policy(self, read_policy): """ Sets the read_policy of this StorageVirtualDrive. :param read_policy: The read_policy of this StorageVirtualDrive. :type: str """ self._read_policy = read_policy @property def security_flags(self): """ Gets the security_flags of this StorageVirtualDrive. :return: The security_flags of this StorageVirtualDrive. :rtype: str """ return self._security_flags @security_flags.setter def security_flags(self, security_flags): """ Sets the security_flags of this StorageVirtualDrive. :param security_flags: The security_flags of this StorageVirtualDrive. :type: str """ self._security_flags = security_flags @property def size(self): """ Gets the size of this StorageVirtualDrive. :return: The size of this StorageVirtualDrive. :rtype: str """ return self._size @size.setter def size(self, size): """ Sets the size of this StorageVirtualDrive. :param size: The size of this StorageVirtualDrive. :type: str """ self._size = size @property def strip_size(self): """ Gets the strip_size of this StorageVirtualDrive. The strip size is the portion of a stripe that resides on a single drive in the drive group, this is measured in KB. :return: The strip_size of this StorageVirtualDrive. :rtype: str """ return self._strip_size @strip_size.setter def strip_size(self, strip_size): """ Sets the strip_size of this StorageVirtualDrive. The strip size is the portion of a stripe that resides on a single drive in the drive group, this is measured in KB. :param strip_size: The strip_size of this StorageVirtualDrive. :type: str """ self._strip_size = strip_size @property def type(self): """ Gets the type of this StorageVirtualDrive. :return: The type of this StorageVirtualDrive. :rtype: str """ return self._type @type.setter def type(self, type): """ Sets the type of this StorageVirtualDrive. :param type: The type of this StorageVirtualDrive. :type: str """ self._type = type @property def uuid(self): """ Gets the uuid of this StorageVirtualDrive. :return: The uuid of this StorageVirtualDrive. :rtype: str """ return self._uuid @uuid.setter def uuid(self, uuid): """ Sets the uuid of this StorageVirtualDrive. :param uuid: The uuid of this StorageVirtualDrive. :type: str """ self._uuid = uuid @property def vendor_uuid(self): """ Gets the vendor_uuid of this StorageVirtualDrive. :return: The vendor_uuid of this StorageVirtualDrive. :rtype: str """ return self._vendor_uuid @vendor_uuid.setter def vendor_uuid(self, vendor_uuid): """ Sets the vendor_uuid of this StorageVirtualDrive. :param vendor_uuid: The vendor_uuid of this StorageVirtualDrive. :type: str """ self._vendor_uuid = vendor_uuid @property def virtual_drive_id(self): """ Gets the virtual_drive_id of this StorageVirtualDrive. :return: The virtual_drive_id of this StorageVirtualDrive. :rtype: str """ return self._virtual_drive_id @virtual_drive_id.setter def virtual_drive_id(self, virtual_drive_id): """ Sets the virtual_drive_id of this StorageVirtualDrive. :param virtual_drive_id: The virtual_drive_id of this StorageVirtualDrive. :type: str """ self._virtual_drive_id = virtual_drive_id @property def physical_disk_usages(self): """ Gets the physical_disk_usages of this StorageVirtualDrive. :return: The physical_disk_usages of this StorageVirtualDrive. :rtype: list[StoragePhysicalDiskUsageRef] """ return self._physical_disk_usages @physical_disk_usages.setter def physical_disk_usages(self, physical_disk_usages): """ Sets the physical_disk_usages of this StorageVirtualDrive. :param physical_disk_usages: The physical_disk_usages of this StorageVirtualDrive. :type: list[StoragePhysicalDiskUsageRef] """ self._physical_disk_usages = physical_disk_usages @property def registered_device(self): """ Gets the registered_device of this StorageVirtualDrive. The Device to which this Managed Object is associated. :return: The registered_device of this StorageVirtualDrive. :rtype: AssetDeviceRegistrationRef """ return self._registered_device @registered_device.setter def registered_device(self, registered_device): """ Sets the registered_device of this StorageVirtualDrive. The Device to which this Managed Object is associated. :param registered_device: The registered_device of this StorageVirtualDrive. :type: AssetDeviceRegistrationRef """ self._registered_device = registered_device @property def storage_controller(self): """ Gets the storage_controller of this StorageVirtualDrive. A collection of references to the [storage.Controller](mo://storage.Controller) Managed Object. When this managed object is deleted, the referenced [storage.Controller](mo://storage.Controller) MO unsets its reference to this deleted MO. :return: The storage_controller of this StorageVirtualDrive. :rtype: StorageControllerRef """ return self._storage_controller @storage_controller.setter def storage_controller(self, storage_controller): """ Sets the storage_controller of this StorageVirtualDrive. A collection of references to the [storage.Controller](mo://storage.Controller) Managed Object. When this managed object is deleted, the referenced [storage.Controller](mo://storage.Controller) MO unsets its reference to this deleted MO. :param storage_controller: The storage_controller of this StorageVirtualDrive. :type: StorageControllerRef """ self._storage_controller = storage_controller @property def vd_member_eps(self): """ Gets the vd_member_eps of this StorageVirtualDrive. It is a reference to LocalDisk to build up a VirtualDrive. :return: The vd_member_eps of this StorageVirtualDrive. :rtype: list[StorageVdMemberEpRef] """ return self._vd_member_eps @vd_member_eps.setter def vd_member_eps(self, vd_member_eps): """ Sets the vd_member_eps of this StorageVirtualDrive. It is a reference to LocalDisk to build up a VirtualDrive. :param vd_member_eps: The vd_member_eps of this StorageVirtualDrive. :type: list[StorageVdMemberEpRef] """ self._vd_member_eps = vd_member_eps @property def virtual_drive_extension(self): """ Gets the virtual_drive_extension of this StorageVirtualDrive. A collection of references to the [storage.VirtualDriveExtension](mo://storage.VirtualDriveExtension) Managed Object. When this managed object is deleted, the referenced [storage.VirtualDriveExtension](mo://storage.VirtualDriveExtension) MO unsets its reference to this deleted MO. :return: The virtual_drive_extension of this StorageVirtualDrive. :rtype: StorageVirtualDriveExtensionRef """ return self._virtual_drive_extension @virtual_drive_extension.setter def virtual_drive_extension(self, virtual_drive_extension): """ Sets the virtual_drive_extension of this StorageVirtualDrive. A collection of references to the [storage.VirtualDriveExtension](mo://storage.VirtualDriveExtension) Managed Object. When this managed object is deleted, the referenced [storage.VirtualDriveExtension](mo://storage.VirtualDriveExtension) MO unsets its reference to this deleted MO. :param virtual_drive_extension: The virtual_drive_extension of this StorageVirtualDrive. :type: StorageVirtualDriveExtensionRef """ self._virtual_drive_extension = virtual_drive_extension def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, StorageVirtualDrive): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
[ "six.iteritems" ]
[((45213, 45242), 'six.iteritems', 'iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (45222, 45242), False, 'from six import iteritems\n')]
from typing import Optional, List from data.downloads import Download from data.packages import Package from data.release_history import ReleaseHistory from data.users import User class PackageService: @classmethod def package_count(cls): return Package.objects().count() @classmethod def release_count(cls): return ReleaseHistory.objects().count() @classmethod def user_count(cls): return User.objects().count() @classmethod def download_count(cls): return Download.objects().count() @classmethod def find_package_by_name(cls, name): package = Package.objects(name=name).first() return package @classmethod def latest_release(cls, package: Package) -> Optional[ReleaseHistory]: release = ReleaseHistory \ .objects(package_id=package.id) \ .order_by('-created') \ .first() return release @classmethod def find_maintainers(cls, package: Package) -> List[User]: users = User.objects(id__in=package.maintainers) return list(users) @classmethod def popular_packages(cls, limit: int) -> List[Package]: packages = Package.objects()\ .order_by('-total_downloads')\ .limit(limit) return list(packages)
[ "data.packages.Package.objects", "data.users.User.objects", "data.downloads.Download.objects", "data.release_history.ReleaseHistory.objects" ]
[((1042, 1082), 'data.users.User.objects', 'User.objects', ([], {'id__in': 'package.maintainers'}), '(id__in=package.maintainers)\n', (1054, 1082), False, 'from data.users import User\n'), ((265, 282), 'data.packages.Package.objects', 'Package.objects', ([], {}), '()\n', (280, 282), False, 'from data.packages import Package\n'), ((352, 376), 'data.release_history.ReleaseHistory.objects', 'ReleaseHistory.objects', ([], {}), '()\n', (374, 376), False, 'from data.release_history import ReleaseHistory\n'), ((443, 457), 'data.users.User.objects', 'User.objects', ([], {}), '()\n', (455, 457), False, 'from data.users import User\n'), ((528, 546), 'data.downloads.Download.objects', 'Download.objects', ([], {}), '()\n', (544, 546), False, 'from data.downloads import Download\n'), ((632, 658), 'data.packages.Package.objects', 'Package.objects', ([], {'name': 'name'}), '(name=name)\n', (647, 658), False, 'from data.packages import Package\n'), ((801, 846), 'data.release_history.ReleaseHistory.objects', 'ReleaseHistory.objects', ([], {'package_id': 'package.id'}), '(package_id=package.id)\n', (823, 846), False, 'from data.release_history import ReleaseHistory\n'), ((1207, 1224), 'data.packages.Package.objects', 'Package.objects', ([], {}), '()\n', (1222, 1224), False, 'from data.packages import Package\n')]
#!/usr/bin/python3 """Various utils to check the integrity of the movesGraph""" import argparse import datetime import os.path import logging LOG_FILENAME = '/tmp/yoga.log' logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG) import moves import strengthaerobics import stretches def sloppyRun(func, *args, **kwargs): """Runs a function, catching all exceptions and writing them to a log file.""" try: return func(*args, **kwargs) except: logging.exception(func.__name__ + str(args) + str(kwargs)) def log_isinstance(ob, t, context=None, level = logging.ERROR): if not isinstance(ob, t): logging.log(level, repr(ob) + " is not " + repr(t) + (" :" + str(context) if context is not None else "")) return False return True def generateAllMoves(d = 1, a = 0, s = 0): movesGraph = moves.generateMoves(d) sloppyRun(stretches.defineStretches, movesGraph, difficulty=d) sloppyRun(moves.linkMain, movesGraph, difficulty=d) if a: sloppyRun(strengthaerobics.linkAerobics, movesGraph, d, a) if s: sloppyRun(strengthaerobics.linkStrength, movesGraph, d, s) if a*s: sloppyRun(strengthaerobics.linkStrengthAerobics, movesGraph, d, s, a) sloppyRun(moves.unlinkWarmup, movesGraph, [], d) sloppyRun(moves.linkHarder, movesGraph, d) if s: sloppyRun(strengthaerobics.linkStrengthHarder, movesGraph, d, s) sloppyRun(moves.linkEnding, movesGraph) sloppyRun(stretches.linkCooldown, movesGraph) if s: sloppyRun(strengthaerobics.linkStrengthCooldown, movesGraph, d, s) if a: sloppyRun(strengthaerobics.linkAerobicsCooldown, movesGraph, d, a) sloppyRun(moves.linkSavasana, movesGraph, difficulty = d) print("%d moves discovered" % len(movesGraph)) return movesGraph def checkChildType(move): if len(move.nextMove) == 0: logging.error(str(move) + " has no children") for m in move: log_isinstance(m, moves.Move, context=move) def checkGraph(movesGraph): for i in movesGraph.values(): if isinstance(i, tuple): for j in i: if log_isinstance(j, moves.Move): checkChildType(j) elif log_isinstance(i, moves.Move): checkChildType(i) def checkConnected(movesGraph): allmoves = set() linkedmoves = set() for i in movesGraph.values(): if isinstance(i, tuple): for j in i: allmoves.add(j) linkedmoves.update(j) else: allmoves.add(i) linkedmoves.update(i) return allmoves.difference(linkedmoves) def checkLog(filename): if os.path.isfile(filename): print("Error file exists:", filename) if __name__== "__main__": parser = argparse.ArgumentParser() parser.add_argument("-a", "--aerobics", dest="a", help="Insert aerobics moves", action='count', default=0) parser.add_argument("-s", "--strength", dest="s", help="Insert strength moves", action='count', default=0) parser.add_argument("-d", "--difficulty", dest="d", help="Difficulty: larger number=harder", default=1, type=int, choices=[-1,0,1,2]) args = parser.parse_args() logging.info("Running with settings: " + str(vars(args))) logging.info("Run time: " + datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S')) print("Generating moves graph") movesGraph = generateAllMoves(**vars(args)) print("Checking graph") checkGraph(movesGraph) m = checkConnected(movesGraph) if m: logging.debug("There is no way to get to the following moves:\n " + "\n ".join(repr(i) for i in sorted(m))) checkLog(LOG_FILENAME)
[ "moves.generateMoves", "datetime.datetime.now", "argparse.ArgumentParser", "logging.basicConfig" ]
[((175, 238), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'LOG_FILENAME', 'level': 'logging.DEBUG'}), '(filename=LOG_FILENAME, level=logging.DEBUG)\n', (194, 238), False, 'import logging\n'), ((852, 874), 'moves.generateMoves', 'moves.generateMoves', (['d'], {}), '(d)\n', (871, 874), False, 'import moves\n'), ((2760, 2785), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2783, 2785), False, 'import argparse\n'), ((3271, 3294), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3292, 3294), False, 'import datetime\n')]
#!/usr/bin/env import datetime import starter import pytest import pathlib import sys @pytest.mark.skipif(pathlib.Path(sys.prefix) != pathlib.Path(r"C:\ProgramData\Anaconda3\envs\starter"), reason="Test only in native enironment") def test_date(): starter_date = datetime.datetime.strptime(starter.__date__, "%Y-%m-%d") today = datetime.datetime.today() assert starter_date.year == today.year, "date.year in `__init__.py` is not current" assert starter_date.month == today.month, "date.month in `__init__.py` is not current" assert starter_date.day == today.day, "date.day in `__init__.py` is not current" def test_author(): assert starter.__author__ assert isinstance(starter.__author__, str) def test_author_email(): assert starter.__author_email__ assert isinstance(starter.__author_email__, str) assert "@" in starter.__author_email__
[ "datetime.datetime.strptime", "datetime.datetime.today", "pathlib.Path" ]
[((271, 327), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['starter.__date__', '"""%Y-%m-%d"""'], {}), "(starter.__date__, '%Y-%m-%d')\n", (297, 327), False, 'import datetime\n'), ((340, 365), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (363, 365), False, 'import datetime\n'), ((110, 134), 'pathlib.Path', 'pathlib.Path', (['sys.prefix'], {}), '(sys.prefix)\n', (122, 134), False, 'import pathlib\n'), ((138, 195), 'pathlib.Path', 'pathlib.Path', (['"""C:\\\\ProgramData\\\\Anaconda3\\\\envs\\\\starter"""'], {}), "('C:\\\\ProgramData\\\\Anaconda3\\\\envs\\\\starter')\n", (150, 195), False, 'import pathlib\n')]
import pytvmaze from pynab import log import pynab.ids TVMAZE_SEARCH_URL = ' http://api.tvmaze.com/search/shows' NAME = 'TVMAZE' def search(data): """ Search TVMaze for Show Info. :param data: show data :return: show details """ year = data.get('year') country = data.get('country') clean_name = pynab.ids.clean_name(data.get('name')) log.debug('tvmaze: attempting to find "{}" online'.format(clean_name)) # code contributed by srob650 (https://github.com/srob650) showname = '' if year: showname = clean_name[:-5] if country: showname = clean_name.split(country)[0].strip() if not year or country: showname = clean_name maze_show = None tvm = pytvmaze.TVMaze() try: maze_show = tvm.get_show(show_name=showname, show_year=year, show_country=country) except Exception as e: log.debug('tvmaze: exception: {}'.format(e)) if maze_show: log.debug('tvmaze: returning show - {} with id - {}'.format(maze_show.name, maze_show.id)) return maze_show.id else: log.debug('tvmaze: No show found') return None
[ "pynab.log.debug", "pytvmaze.TVMaze" ]
[((747, 764), 'pytvmaze.TVMaze', 'pytvmaze.TVMaze', ([], {}), '()\n', (762, 764), False, 'import pytvmaze\n'), ((1110, 1144), 'pynab.log.debug', 'log.debug', (['"""tvmaze: No show found"""'], {}), "('tvmaze: No show found')\n", (1119, 1144), False, 'from pynab import log\n')]
# -*- coding:utf-8 -*- __author__ = 'yangjian' """ """ import pandas as pd from deeptables.models import DeepTable from deeptables.models.hyper_dt import HyperDT, tiny_dt_space from hypernets.core.callbacks import SummaryCallback, FileStorageLoggingCallback from hypernets.core.searcher import OptimizeDirection from hypernets.searchers import RandomSearcher from sklearn.datasets import load_boston from sklearn.model_selection import train_test_split from .. import homedir class Test_HyperDT_Regression(): def test_boston(self): print("Loading datasets...") boston_dataset = load_boston() df_train = pd.DataFrame(boston_dataset.data) df_train.columns = boston_dataset.feature_names self.y = pd.Series(boston_dataset.target) self.X = df_train self.X_train, \ self.X_test, \ self.y_train, \ self.y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=42) rs = RandomSearcher(tiny_dt_space, optimize_direction=OptimizeDirection.Maximize, ) hdt = HyperDT(rs, callbacks=[SummaryCallback(), FileStorageLoggingCallback(rs, output_dir=f'{homedir}/hyn_logs')], reward_metric='RootMeanSquaredError', dnn_params={ 'hidden_units': ((256, 0, False), (256, 0, False)), 'dnn_activation': 'relu', }, ) hdt.search(self.X_train, self.y_train, self.X_test, self.y_test, max_trials=3) best_trial = hdt.get_best_trial() estimator = hdt.final_train(best_trial.space_sample, self.X, self.y) score = estimator.predict(self.X_test) result = estimator.evaluate(self.X_test, self.y_test) assert result assert isinstance(estimator.model, DeepTable)
[ "pandas.DataFrame", "hypernets.searchers.RandomSearcher", "hypernets.core.callbacks.SummaryCallback", "sklearn.model_selection.train_test_split", "hypernets.core.callbacks.FileStorageLoggingCallback", "sklearn.datasets.load_boston", "pandas.Series" ]
[((603, 616), 'sklearn.datasets.load_boston', 'load_boston', ([], {}), '()\n', (614, 616), False, 'from sklearn.datasets import load_boston\n'), ((637, 670), 'pandas.DataFrame', 'pd.DataFrame', (['boston_dataset.data'], {}), '(boston_dataset.data)\n', (649, 670), True, 'import pandas as pd\n'), ((744, 776), 'pandas.Series', 'pd.Series', (['boston_dataset.target'], {}), '(boston_dataset.target)\n', (753, 776), True, 'import pandas as pd\n'), ((897, 961), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X', 'self.y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(self.X, self.y, test_size=0.2, random_state=42)\n', (913, 961), False, 'from sklearn.model_selection import train_test_split\n'), ((976, 1052), 'hypernets.searchers.RandomSearcher', 'RandomSearcher', (['tiny_dt_space'], {'optimize_direction': 'OptimizeDirection.Maximize'}), '(tiny_dt_space, optimize_direction=OptimizeDirection.Maximize)\n', (990, 1052), False, 'from hypernets.searchers import RandomSearcher\n'), ((1114, 1131), 'hypernets.core.callbacks.SummaryCallback', 'SummaryCallback', ([], {}), '()\n', (1129, 1131), False, 'from hypernets.core.callbacks import SummaryCallback, FileStorageLoggingCallback\n'), ((1133, 1197), 'hypernets.core.callbacks.FileStorageLoggingCallback', 'FileStorageLoggingCallback', (['rs'], {'output_dir': 'f"""{homedir}/hyn_logs"""'}), "(rs, output_dir=f'{homedir}/hyn_logs')\n", (1159, 1197), False, 'from hypernets.core.callbacks import SummaryCallback, FileStorageLoggingCallback\n')]
import torch import torch.nn as nn from utils import v_wrap, set_init, push_and_pull, record import torch.nn.functional as F import torch.multiprocessing as mp from shared_adam import SharedAdam import gym import os import argparse import matplotlib.pyplot as plt from simulations.cartpole_sim import Simulation os.environ["OMP_NUM_THREADS"] = "1" UPDATE_GLOBAL_ITER = 40 GAMMA = 0.9 MAX_EP = 3000 parser = argparse.ArgumentParser() parser.add_argument('--test', action='store_true', help='run testing') parser.add_argument('--model_path', type=str, default='models/model_discrete.pth', help='path to the model') args = parser.parse_args() class DiscreteNet(nn.Module): def __init__(self, s_dim, a_dim): super(DiscreteNet, self).__init__() self.s_dim = s_dim self.a_dim = a_dim self.pi1 = nn.Linear(s_dim, 200) self.pi2 = nn.Linear(200, a_dim) self.v1 = nn.Linear(s_dim, 100) self.v2 = nn.Linear(100, 1) set_init([self.pi1, self.pi2, self.v1, self.v2]) self.distribution = torch.distributions.Categorical def forward(self, x): pi1 = F.relu6(self.pi1(x)) logits = self.pi2(pi1) v1 = F.relu6(self.v1(x)) values = self.v2(v1) return logits, values def choose_action(self, s): self.eval() logits, _ = self.forward(s) prob = F.softmax(logits, dim=1).data m = self.distribution(prob) return m.sample().numpy()[0] def loss_func(self, s, a, v_t): self.train() logits, values = self.forward(s) td = v_t - values c_loss = td.pow(2) probs = F.softmax(logits, dim=1) m = self.distribution(probs) exp_v = m.log_prob(a) * td.detach().squeeze() a_loss = -exp_v total_loss = (c_loss + a_loss).mean() return total_loss class Worker(mp.Process): def __init__(self, gnet, opt, global_ep, global_ep_r, res_queue, name): super(Worker, self).__init__() self.name = 'w%i' % name self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue self.gnet, self.opt = gnet, opt self.env = Simulation() self.lnet = DiscreteNet(self.env.state_space, self.env.action_space) # local network def run(self): total_step = 1 while self.g_ep.value < MAX_EP: s = self.env.reset_env() buffer_s, buffer_a, buffer_r = [], [], [] ep_r = 0. while True: if self.name == 'w0': # self.env.show() pass a = self.lnet.choose_action(v_wrap(s[None, :])) s_, r, done, _ = self.env.move(a) if done: r = -1 ep_r += r buffer_a.append(a) buffer_s.append(s) buffer_r.append(r) if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net # sync push_and_pull(self.opt, self.lnet, self.gnet, done, s_, buffer_s, buffer_a, buffer_r, GAMMA) buffer_s, buffer_a, buffer_r = [], [], [] if done: # done and print information record(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.name) break s = s_ total_step += 1 self.res_queue.put(None) def run_test (gnet, opt): env = Simulation() lnet = gnet s = env.reset_env() # Reset the env buffer_s, buffer_a, buffer_r = [], [], [] ep_r = 0 total_step = 1 while True: env.show() a = lnet.choose_action(v_wrap(s[None, :])) # Choose next action to perform, left or right by what magnitude s_, r, done, _ = env.move(a) # Perform the action and record the state and rewards # Also take the boolean of whether the sim is done ep_r += r buffer_a.append(a) # Buffer for action buffer_s.append(s) # Buffer for state buffer_r.append(r) # Buffer for rewards if total_step % UPDATE_GLOBAL_ITER == 0 or done: # TODO: Test if we really need the feedback training, maybe can remove this push_and_pull(opt, lnet, gnet, done, s_, buffer_s, buffer_a, buffer_r, GAMMA) buffer_s, buffer_a, buffer_r = [], [], [] if done: print (total_step) s = env.reset_env() # Reset the env total_step = 0 s = s_ # Set current state to the new state caused by action total_step += 1 if __name__ == "__main__": sim = Simulation() gnet = DiscreteNet(sim.state_space, sim.action_space) # global network if args.test: gnet.load_state_dict(torch.load(args.model_path)) # Load the previously trained network gnet.share_memory() # share the global parameters in multiprocessing opt = SharedAdam(gnet.parameters(), lr=0.0001) # global optimizer global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue() if args.test: run_test(gnet, opt) else: # parallel training workers = [Worker(gnet, opt, global_ep, global_ep_r, res_queue, i) for i in range(mp.cpu_count())] [w.start() for w in workers] res = [] # record episode reward to plot while True: r = res_queue.get() if r is not None: res.append(r) else: break [w.join() for w in workers] print ("Saving model...") torch.save(gnet.state_dict(), args.model_path) plt.plot(res) plt.ylabel('Moving average ep reward') plt.xlabel('Step') plt.show()
[ "matplotlib.pyplot.show", "argparse.ArgumentParser", "matplotlib.pyplot.plot", "utils.push_and_pull", "torch.load", "utils.set_init", "torch.multiprocessing.cpu_count", "utils.v_wrap", "torch.nn.functional.softmax", "utils.record", "simulations.cartpole_sim.Simulation", "torch.multiprocessing.Queue", "torch.nn.Linear", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "torch.multiprocessing.Value" ]
[((410, 435), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (433, 435), False, 'import argparse\n'), ((3501, 3513), 'simulations.cartpole_sim.Simulation', 'Simulation', ([], {}), '()\n', (3511, 3513), False, 'from simulations.cartpole_sim import Simulation\n'), ((4694, 4706), 'simulations.cartpole_sim.Simulation', 'Simulation', ([], {}), '()\n', (4704, 4706), False, 'from simulations.cartpole_sim import Simulation\n'), ((830, 851), 'torch.nn.Linear', 'nn.Linear', (['s_dim', '(200)'], {}), '(s_dim, 200)\n', (839, 851), True, 'import torch.nn as nn\n'), ((871, 892), 'torch.nn.Linear', 'nn.Linear', (['(200)', 'a_dim'], {}), '(200, a_dim)\n', (880, 892), True, 'import torch.nn as nn\n'), ((911, 932), 'torch.nn.Linear', 'nn.Linear', (['s_dim', '(100)'], {}), '(s_dim, 100)\n', (920, 932), True, 'import torch.nn as nn\n'), ((951, 968), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(1)'], {}), '(100, 1)\n', (960, 968), True, 'import torch.nn as nn\n'), ((977, 1025), 'utils.set_init', 'set_init', (['[self.pi1, self.pi2, self.v1, self.v2]'], {}), '([self.pi1, self.pi2, self.v1, self.v2])\n', (985, 1025), False, 'from utils import v_wrap, set_init, push_and_pull, record\n'), ((1655, 1679), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (1664, 1679), True, 'import torch.nn.functional as F\n'), ((2185, 2197), 'simulations.cartpole_sim.Simulation', 'Simulation', ([], {}), '()\n', (2195, 2197), False, 'from simulations.cartpole_sim import Simulation\n'), ((5105, 5121), 'torch.multiprocessing.Value', 'mp.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (5113, 5121), True, 'import torch.multiprocessing as mp\n'), ((5123, 5141), 'torch.multiprocessing.Value', 'mp.Value', (['"""d"""', '(0.0)'], {}), "('d', 0.0)\n", (5131, 5141), True, 'import torch.multiprocessing as mp\n'), ((5142, 5152), 'torch.multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (5150, 5152), True, 'import torch.multiprocessing as mp\n'), ((5738, 5751), 'matplotlib.pyplot.plot', 'plt.plot', (['res'], {}), '(res)\n', (5746, 5751), True, 'import matplotlib.pyplot as plt\n'), ((5760, 5798), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Moving average ep reward"""'], {}), "('Moving average ep reward')\n", (5770, 5798), True, 'import matplotlib.pyplot as plt\n'), ((5807, 5825), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Step"""'], {}), "('Step')\n", (5817, 5825), True, 'import matplotlib.pyplot as plt\n'), ((5834, 5844), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5842, 5844), True, 'import matplotlib.pyplot as plt\n'), ((1375, 1399), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (1384, 1399), True, 'import torch.nn.functional as F\n'), ((3721, 3739), 'utils.v_wrap', 'v_wrap', (['s[None, :]'], {}), '(s[None, :])\n', (3727, 3739), False, 'from utils import v_wrap, set_init, push_and_pull, record\n'), ((4274, 4351), 'utils.push_and_pull', 'push_and_pull', (['opt', 'lnet', 'gnet', 'done', 's_', 'buffer_s', 'buffer_a', 'buffer_r', 'GAMMA'], {}), '(opt, lnet, gnet, done, s_, buffer_s, buffer_a, buffer_r, GAMMA)\n', (4287, 4351), False, 'from utils import v_wrap, set_init, push_and_pull, record\n'), ((4837, 4864), 'torch.load', 'torch.load', (['args.model_path'], {}), '(args.model_path)\n', (4847, 4864), False, 'import torch\n'), ((2656, 2674), 'utils.v_wrap', 'v_wrap', (['s[None, :]'], {}), '(s[None, :])\n', (2662, 2674), False, 'from utils import v_wrap, set_init, push_and_pull, record\n'), ((3043, 3139), 'utils.push_and_pull', 'push_and_pull', (['self.opt', 'self.lnet', 'self.gnet', 'done', 's_', 'buffer_s', 'buffer_a', 'buffer_r', 'GAMMA'], {}), '(self.opt, self.lnet, self.gnet, done, s_, buffer_s, buffer_a,\n buffer_r, GAMMA)\n', (3056, 3139), False, 'from utils import v_wrap, set_init, push_and_pull, record\n'), ((5329, 5343), 'torch.multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (5341, 5343), True, 'import torch.multiprocessing as mp\n'), ((3282, 3345), 'utils.record', 'record', (['self.g_ep', 'self.g_ep_r', 'ep_r', 'self.res_queue', 'self.name'], {}), '(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.name)\n', (3288, 3345), False, 'from utils import v_wrap, set_init, push_and_pull, record\n')]
import os from pathlib import Path from argparse import ArgumentParser import duplicates as du _parser = ArgumentParser( 'Utility for parsing a directory, and finding duplicates.' ) _parser.add_argument( '-d', '--directory', help='Directory to scan recursively. ' 'Default is current directory', type=str, required=True, default=[os.getcwd()], nargs=1, ) _parser.add_argument( '-l', '--memory_limit', help='Memory limit for the parser. In bytes. Default is 2147483648 (2GB).', type=int, required=False, default=[2147483648], ) _parser.add_argument( '--dashboard', help='Creates a dashboard of the data. ' 'Specify a file name to which HTML will be exported.', type=str, required=False, nargs=1, ) _args = _parser.parse_args() _directory = _args.directory[0] _limit = _args.memory_limit[0] if _args.dashboard: _generate_dashboard = True _dashboard_output_file = Path(_args.dashboard[0]).resolve() else: _generate_dashboard = False _dashboard_output_file = None if __name__ == "__main__": parser = du.DuplicateParser(_directory, _limit) ext = du.Extractor(parser) duplicates = ext.get_duplicates() ext.clean_and_overwrite_dataframe() df = ext.get_df() if _generate_dashboard: dashboard = du.Dashboard(df) dashboard.generate(output_file=_dashboard_output_file, data=duplicates) print('duplicates: ', duplicates)
[ "duplicates.DuplicateParser", "argparse.ArgumentParser", "duplicates.Dashboard", "os.getcwd", "duplicates.Extractor", "pathlib.Path" ]
[((108, 182), 'argparse.ArgumentParser', 'ArgumentParser', (['"""Utility for parsing a directory, and finding duplicates."""'], {}), "('Utility for parsing a directory, and finding duplicates.')\n", (122, 182), False, 'from argparse import ArgumentParser\n'), ((1082, 1120), 'duplicates.DuplicateParser', 'du.DuplicateParser', (['_directory', '_limit'], {}), '(_directory, _limit)\n', (1100, 1120), True, 'import duplicates as du\n'), ((1132, 1152), 'duplicates.Extractor', 'du.Extractor', (['parser'], {}), '(parser)\n', (1144, 1152), True, 'import duplicates as du\n'), ((1303, 1319), 'duplicates.Dashboard', 'du.Dashboard', (['df'], {}), '(df)\n', (1315, 1319), True, 'import duplicates as du\n'), ((359, 370), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (368, 370), False, 'import os\n'), ((934, 958), 'pathlib.Path', 'Path', (['_args.dashboard[0]'], {}), '(_args.dashboard[0])\n', (938, 958), False, 'from pathlib import Path\n')]
from sys import stderr import pandas as pd import pytest import spacy from atap_widgets.conversation import Conversation # Workaround for spacy models being difficult to install # via pip try: nlp = spacy.load("en_core_web_sm") except OSError: print( "Downloading language model for spaCy\n" "(don't worry, this will only happen once)", file=stderr, ) from spacy.cli import download download("en_core_web_sm") @pytest.fixture(scope="session") def sherlock_holmes_five_sentences(): return """To <NAME> she is always the woman. I have seldom heard him mention her under any other name. In his eyes she eclipses and predominates the whole of her sex. It was not that he felt any emotion akin to love for <NAME>. All emotions, and that one particularly, were abhorrent to his cold, precise but admirably balanced mind. """ @pytest.fixture(scope="session") def basic_spacy_nlp(): return spacy.load("en_core_web_sm") @pytest.fixture def sherlock_holmes_doc(sherlock_holmes_five_sentences, basic_spacy_nlp): return basic_spacy_nlp(sherlock_holmes_five_sentences) @pytest.fixture def sherlock_holmes_dummy_df(sherlock_holmes_doc): """ DataFrame, one row per sentence from the Sherlock Holmes example """ df = pd.DataFrame( { "text": [str(sentence) for sentence in sherlock_holmes_doc.sents], "speaker": list("ABABA"), } ) return df @pytest.fixture def sherlock_holmes_dummy_conversation(sherlock_holmes_dummy_df): """ Treat each sentence from the Sherlock Holmes example as a turn in a conversation, for checking contingency counts etc. """ return Conversation(sherlock_holmes_dummy_df) @pytest.fixture def sortable_text_df(): df = pd.DataFrame( { "text": ["The pen is red", "My pen is green", "Your pen is blue"], "text_id": [1, 2, 3], } ) return df
[ "pandas.DataFrame", "spacy.cli.download", "atap_widgets.conversation.Conversation", "pytest.fixture", "spacy.load" ]
[((463, 494), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (477, 494), False, 'import pytest\n'), ((890, 921), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (904, 921), False, 'import pytest\n'), ((208, 236), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (218, 236), False, 'import spacy\n'), ((956, 984), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (966, 984), False, 'import spacy\n'), ((1708, 1746), 'atap_widgets.conversation.Conversation', 'Conversation', (['sherlock_holmes_dummy_df'], {}), '(sherlock_holmes_dummy_df)\n', (1720, 1746), False, 'from atap_widgets.conversation import Conversation\n'), ((1798, 1905), 'pandas.DataFrame', 'pd.DataFrame', (["{'text': ['The pen is red', 'My pen is green', 'Your pen is blue'],\n 'text_id': [1, 2, 3]}"], {}), "({'text': ['The pen is red', 'My pen is green',\n 'Your pen is blue'], 'text_id': [1, 2, 3]})\n", (1810, 1905), True, 'import pandas as pd\n'), ((433, 459), 'spacy.cli.download', 'download', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (441, 459), False, 'from spacy.cli import download\n')]
""" This module contains the python script that defines the configuration of the mouse for my Qtile configuration. For more information check: github.com/pablocorbalann/dotfiles/tree/main/qtile """ # Imports (just qtile) from libqtile.config import Drag, Click from libqtile.command import lazy from settings.keys import mod # We can define the mouse as a list of actions, formed by # mods, buttons and results. # TODO: implement a more consistent mouse functionallity level mouse = [ Drag( [mod], "Button1", lazy.window.set_position_floating(), start=lazy.window.get_position() ), Drag( [mod], "Button3", lazy.window.set_size_floating(), start=lazy.window.get_size() ), Click([mod], "Button2", lazy.window.bring_to_front()) ]
[ "libqtile.command.lazy.window.set_size_floating", "libqtile.command.lazy.window.get_size", "libqtile.command.lazy.window.get_position", "libqtile.command.lazy.window.bring_to_front", "libqtile.command.lazy.window.set_position_floating" ]
[((542, 577), 'libqtile.command.lazy.window.set_position_floating', 'lazy.window.set_position_floating', ([], {}), '()\n', (575, 577), False, 'from libqtile.command import lazy\n'), ((679, 710), 'libqtile.command.lazy.window.set_size_floating', 'lazy.window.set_size_floating', ([], {}), '()\n', (708, 710), False, 'from libqtile.command import lazy\n'), ((784, 812), 'libqtile.command.lazy.window.bring_to_front', 'lazy.window.bring_to_front', ([], {}), '()\n', (810, 812), False, 'from libqtile.command import lazy\n'), ((593, 619), 'libqtile.command.lazy.window.get_position', 'lazy.window.get_position', ([], {}), '()\n', (617, 619), False, 'from libqtile.command import lazy\n'), ((726, 748), 'libqtile.command.lazy.window.get_size', 'lazy.window.get_size', ([], {}), '()\n', (746, 748), False, 'from libqtile.command import lazy\n')]
#----------------------------------------------------------------------------- # Copyright (c) 2021, PyInstaller Development Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # # The full license is in the file COPYING.txt, distributed with this software. # # SPDX-License-Identifier: Apache-2.0 #----------------------------------------------------------------------------- import sys import os import inspect _orig_inspect_getsourcefile = inspect.getsourcefile # Provide custom implementation of inspect.getsourcefile() for frozen # applications that properly resolves relative filenames obtained from # object (e.g., inspect stack-frames). See #5963. def _pyi_getsourcefile(object): filename = inspect.getfile(object) if not os.path.isabs(filename): # Check if given filename matches the basename of __main__'s # __file__ main_file = sys.modules['__main__'].__file__ if filename == os.path.basename(main_file): return main_file # If filename ends with .py suffix and does not correspond to # frozen entry-point script, convert it to corresponding .pyc # in sys._MEIPASS if filename.endswith('.py'): filename = os.path.normpath( os.path.join(sys._MEIPASS, filename + 'c')) # Ensure the relative path did not try to jump out of # sys._MEIPASS, just in case... if filename.startswith(sys._MEIPASS): return filename elif filename.startswith(sys._MEIPASS) and filename.endswith('.pyc'): # If filename is already PyInstaller-compatible, prevent any # further processing (i.e., with original implementation) return filename # Use original implementation as a fallback return _orig_inspect_getsourcefile(object) inspect.getsourcefile = _pyi_getsourcefile
[ "inspect.getfile", "os.path.isabs", "os.path.join", "os.path.basename" ]
[((798, 821), 'inspect.getfile', 'inspect.getfile', (['object'], {}), '(object)\n', (813, 821), False, 'import inspect\n'), ((833, 856), 'os.path.isabs', 'os.path.isabs', (['filename'], {}), '(filename)\n', (846, 856), False, 'import os\n'), ((1022, 1049), 'os.path.basename', 'os.path.basename', (['main_file'], {}), '(main_file)\n', (1038, 1049), False, 'import os\n'), ((1341, 1383), 'os.path.join', 'os.path.join', (['sys._MEIPASS', "(filename + 'c')"], {}), "(sys._MEIPASS, filename + 'c')\n", (1353, 1383), False, 'import os\n')]
#!/usr/bin/env python from collections import defaultdict import pandas as pd import numpy as np import sys import pprint from Bio import SearchIO import argparse def hmmer_to_df(hmmTbl, only_top_hit=False): """ Takes a table from HMMER 3 and converts it to a Pandas Dataframe Adapted from https://stackoverflow.com/a/62021471 """ attribs = [ 'id', 'evalue'] # can add in more columns hits = defaultdict(list) prev_hit_id = None ## open hmmTbl and extract hits with open(hmmTbl) as handle: for queryresult in SearchIO.parse(handle, 'hmmer3-tab'): for hit in queryresult.hits: # Only record the top hit if only_top_hit and hit.id == prev_hit_id: continue for attrib in attribs: hits[attrib].append(getattr(hit, attrib)) hits['KO'].append(queryresult.id) prev_hit_id = hit.id return pd.DataFrame.from_dict(hits) def main(): parser = argparse.ArgumentParser() parser.add_argument('brite', type=argparse.FileType('r'), help="The brite hierachy level file.") parser.add_argument('hmm_tbls', nargs='+', help='A list of tables from HMMER 3.') parser.add_argument('--consistent-pathways', action='store_true', help='Outputs all the pathways consistently across each output file even if they do not exist at that level.') parser.add_argument('--outprefix', help="The samplename prefix") args = parser.parse_args() levels = ["Level1", "Level2", "Level3"] # load brite database brite_df = pd.read_csv(args.brite, sep='\t') # Loop over the HMMER tables counts_df = [] for hmm_tbl in args.hmm_tbls: hmmer_df = hmmer_to_df( hmm_tbl, only_top_hit=False ) # Select ids for rows with minimum e value idx_evalue_min = hmmer_df.groupby('id')['evalue'].idxmin() # Filter hmmer dataframe with these indexes hmmer_min_e_df = hmmer_df.loc[idx_evalue_min] brite_filtered = brite_df[brite_df['KO'].isin(hmmer_min_e_df.KO)] for level in levels: my_counts_df = brite_filtered[level].value_counts().rename_axis('pathway').reset_index(name='counts') my_counts_df['level'] = level my_counts_df['hmm_tbl'] = hmm_tbl # Store in single dataframe counts_df = my_counts_df if len(counts_df) == 0 else pd.concat( [counts_df, my_counts_df ], ignore_index=True) # Output the counts into text files for level in levels: output_filepath = f"{level}.{args.outprefix}.counts.tsv" print(f"Writing to file {output_filepath}") with open(output_filepath, 'w') as f: # Get pathways for this level so that we can have consistency in the output files even when the counts are zero df_for_pathways = counts_df if args.consistent_pathways else counts_df[ counts_df.level == level ] pathways_for_level = sorted(df_for_pathways.pathway.unique()) headers = ["Pathway"] + args.hmm_tbls f.write( "\t".join(headers) ) f.write( "\n" ) for pathway in pathways_for_level: f.write(f"{pathway}") for hmm_tbl in args.hmm_tbls: filtered = counts_df[ (counts_df.pathway == pathway) & (counts_df.level == level) & (counts_df.hmm_tbl == hmm_tbl) ] count = filtered.counts.sum() f.write( f"\t{count}" ) f.write( "\n" ) if __name__ == "__main__": main()
[ "pandas.DataFrame.from_dict", "argparse.ArgumentParser", "pandas.read_csv", "collections.defaultdict", "Bio.SearchIO.parse", "pandas.concat", "argparse.FileType" ]
[((403, 420), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (414, 420), False, 'from collections import defaultdict\n'), ((837, 865), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['hits'], {}), '(hits)\n', (859, 865), True, 'import pandas as pd\n'), ((890, 915), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (913, 915), False, 'import argparse\n'), ((1454, 1487), 'pandas.read_csv', 'pd.read_csv', (['args.brite'], {'sep': '"""\t"""'}), "(args.brite, sep='\\t')\n", (1465, 1487), True, 'import pandas as pd\n'), ((525, 561), 'Bio.SearchIO.parse', 'SearchIO.parse', (['handle', '"""hmmer3-tab"""'], {}), "(handle, 'hmmer3-tab')\n", (539, 561), False, 'from Bio import SearchIO\n'), ((957, 979), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (974, 979), False, 'import argparse\n'), ((2180, 2235), 'pandas.concat', 'pd.concat', (['[counts_df, my_counts_df]'], {'ignore_index': '(True)'}), '([counts_df, my_counts_df], ignore_index=True)\n', (2189, 2235), True, 'import pandas as pd\n')]
from luminaire.model.base_model import BaseModel, BaseModelHyperParams from luminaire.exploration.data_exploration import DataExploration class WindowDensityHyperParams(BaseModelHyperParams): """ Hyperparameter class for Luminaire Window density model. :param str freq: The frequency of the time-series. Luminaire supports default configuration for 'S', T, '15T', 'H', 'D'. Any other frequency type should be specified as 'custom' and configuration should be set manually. :param float max_missing_train_prop: Maximum proportion of missing observation allowed in the training data. :param bool is_log_transformed: A flag to specify whether to take a log transform of the input data. If the data contain negatives, is_log_transformed is ignored even though it is set to True. :param str baseline_type: A string flag to specify whether to take set a baseline as the previous sub-window from the training data for scoring or to aggregate the overall window as a baseline. Possible values: - "last_window" - "aggregated" :param str detection_method: A string that select between two window testing method. Possible values: - "kldiv" (KL-divergence). This is recommended to be set for high frequency time series such as 'S', 'T' etc. - "sign_test" (Wilcoxon sign rank test). This is recommended to be set for low frequency time series such as 'H', 'D' etc. :param int min_window_length: Minimum size of the scoring window / a stable training sub-window length. .. Note :: This is not the minimum size of the whole training window which is the combination of stable sub-windows. :param int max_window_length: Maximum size of the scoring window / a stable training sub-window length. .. Note :: This is not the maximum size of the whole training window which is the combination of stable sub-windows. :param int window_length: Size of the scoring window / a stable training sub-window length. .. Note :: This is not the size of the whole training window which is the combination of stable sub-windows. :param str detrend_method: A string that select between two stationarizing method. Possible values: - "ma" (moving average based) - "diff" (differencing based). """ def __init__(self, freq=None, max_missing_train_prop=0.1, is_log_transformed=False, baseline_type="aggregated", detection_method=None, min_window_length=None, max_window_length=None, window_length=None, detrend_method='modeling' ): super(WindowDensityHyperParams, self).__init__( model_name="WindowDensityModel", freq=freq, max_missing_train_prop=max_missing_train_prop, is_log_transformed=is_log_transformed, baseline_type=baseline_type, detection_method=detection_method, min_window_length=min_window_length, max_window_length=max_window_length, window_length=window_length, detrend_method=detrend_method ) class WindowDensityModel(BaseModel): """ This model detects anomalous windows using KL divergence (for high frequency data) and Wilcoxon sign rank test (for low frequency data). This default monitoring frequency is set to pandas time frequency type 'T'. :param dict hyper_params: Hyper parameters for Luminaire window density model. See :class:`luminaire.model.window_density.WindowDensityHyperParams` for detailed information. :return: Anomaly probability for the execution window and other related model outputs :rtype: list[dict] """ __version__ = "0.1" def __init__(self, hyper_params: WindowDensityHyperParams().params or None, **kwargs): # Specifying the minimum and maximum number of training windows self.min_num_train_windows = 5 self.max_num_train_windows = 10000 self.hyper_params = hyper_params self.sig_level = 0.001 super(WindowDensityModel, self).__init__(**hyper_params, **kwargs) def _volume_shift_detection(self, mean_list=None, sd_list=None, probability_threshold=0.5): """ This function detects any significant shift in the training data volume using a Bayesian change point detection technique. :param list mean_list: The list of means from each training sub-window. :param list sd_list: The list of standard deviations from each training sub-window. :param float probability_threshold: Threshold for the probability value to be flagged as a change point. :return: Indices with significant vdata volume shift. :rtype: int """ import numpy as np from bayesian_changepoint_detection import offline_changepoint_detection as offcd from functools import partial # Volume shift detection over the means of the training window q, p, pcp = offcd.offline_changepoint_detection( data=np.array(mean_list), prior_func=partial(offcd.const_prior, l=(len(mean_list) + 1)), observation_log_likelihood_function=offcd.gaussian_obs_log_likelihood, truncate=-10) mask_mean = np.append(0, np.exp(pcp).sum(0)) > probability_threshold # Volume shift detection over the standard deviations of the training window change_points = np.array(mask_mean).nonzero() last_mean_cp = change_points[0][-1] if len(change_points[0]) > 0 else [] q, p, pcp = offcd.offline_changepoint_detection( data=np.array(sd_list), prior_func=partial(offcd.const_prior, l=(len(sd_list) + 1)), observation_log_likelihood_function=offcd.gaussian_obs_log_likelihood, truncate=-10) mask_sd = np.append(0, np.exp(pcp).sum(0)) > probability_threshold change_points = np.array(mask_sd).nonzero() last_sd_cp = change_points[0][-1] if len(change_points[0]) > 0 else [] # Change point is the maximum obtained from mean list and the standard deviation list cdate = max(last_mean_cp, last_sd_cp) return cdate def _distance_function(self, data=None, called_for=None, baseline=None): """ This function finds the distance of the given data from the baseline using KL divergence. :param list data: The list containing the scoring window (for scoring) / training sub-window (for training). :param str distance_method: The method to be used to calculate the distance between two datasets. :param str called_for: A flag to specify whether this function is called for training or scoring. :param list baseline: A list containing the base line to be compared with the given data. :return: KL divergence between two time windows. :rtype: float """ import numpy as np import scipy.stats as stats float_min = 1e-50 float_max = 1e50 # If called for training, Kl divergence is performed over each pair of consecutive windows to create # the past anomaly scores if called_for == "training": distance = [] for i in range(0, len(data) - 1): q = stats.kde.gaussian_kde(data[i]) p = stats.kde.gaussian_kde(data[i + 1]) ts_min = min(np.min(data[i]), np.min(data[i + 1])) ts_max = max(np.max(data[i]), np.max(data[i + 1])) density_domain = np.linspace(ts_min, ts_max, 1000) q = q(density_domain) p = p(density_domain) # approximating the zero probability regions to avoid divide by zero issue in KL divergence q[q == 0] = min(np.array(q)[np.array(q) > 0]) p[p == 0] = min(np.array(p)[np.array(p) > 0]) q = np.clip(q, float_min, float_max) p = np.clip(p, float_min, float_max) distance.append(stats.entropy(pk=p, qk=q)) # If called for scoring, Kl divergence is performed between the scoring window and the baseline elif called_for == "scoring": q = stats.kde.gaussian_kde(baseline) p = stats.kde.gaussian_kde(data) ts_min = min(np.min(baseline), np.min(data)) ts_max = max(np.max(baseline), np.max(data)) density_domain = np.linspace(ts_min, ts_max, 1000) q = q(density_domain) p = p(density_domain) q[q == 0] = min(np.array(q)[np.array(q) > 0]) p[p == 0] = min(np.array(p)[np.array(p) > 0]) q = np.clip(q, float_min, float_max) p = np.clip(p, float_min, float_max) distance = stats.entropy(pk=p, qk=q) return distance def _training_data_truncation(self, sliced_training_data=None): """ This function performs the truncation of the training data using the _volume_shift_detection function. :param list sliced_training_data: The list containing the training data. :return: Sliced training sample based on the most recent change point :rtype: list """ import numpy as np # Change point detection is performed over the means and standard deviations of the sub windows window_means = [] window_sds = [] for ts in sliced_training_data: window_means.append(np.mean(ts)) window_sds.append(np.std(ts)) change_point = self._volume_shift_detection(mean_list=window_means, sd_list=window_sds) # Truncating the training data based on the last change point if change_point: sliced_training_data_truncated = sliced_training_data[change_point:] return sliced_training_data_truncated else: return sliced_training_data def _call_training(self, df=None, window_length=None, imputed_metric=None, detrend_method=None, detection_method=None, freq=None, **kwargs): """ This function generates the baseline and training metrics to be used for scoring. :param pandas.DataFrame df: Input training data frame. :param int window_length: The length of a training sub-window. :param str imputed_metric: Column storing the time series values. :param str detrend_method: Detrend method "modeling" or "diff" for nonstationarity. :param str detection_method: Detection method "kldiv" or "sign_test". :param str freq: Data frequency. :return: Returns past anomaly scores based on training data, baseline and other related metrics. :rtype: tuple(list, float, float, float, int, list, luminaire.model, float, dict, list) """ import pandas as pd past_anomaly_scores = dict() gamma_alpha = dict() gama_loc = dict() gamma_beta = dict() detrend_order = dict() baseline = dict() agg_data_model = dict() agg_data = dict() past_model = kwargs.get('past_model') training_start = df.first_valid_index() training_end = df.last_valid_index() current_training_end = training_end while (training_end - current_training_end) < pd.Timedelta('1D'): df_current = df[df.index <= current_training_end] past_anomaly_scores_current, gamma_alpha_current, gama_loc_current, gamma_beta_current, \ detrend_order_current, baseline_current, agg_data_model_current, \ agg_data_current = self._anomalous_region_detection(input_df=df_current, window_length=window_length, value_column=imputed_metric, called_for="training", detrend_method=detrend_method, past_model=past_model, detection_method=detection_method) past_anomaly_scores.update({str(current_training_end.time().strftime('%H:%M:%S')): past_anomaly_scores_current}) gamma_alpha.update({str(current_training_end.time().strftime('%H:%M:%S')): float(gamma_alpha_current) if gamma_alpha_current else None}) gama_loc.update({str(current_training_end.time().strftime('%H:%M:%S')): float(gama_loc_current) if gama_loc_current else None}) gamma_beta.update({str(current_training_end.time().strftime('%H:%M:%S')): float(gamma_beta_current) if gamma_beta_current else None}) detrend_order.update({str(current_training_end.time().strftime('%H:%M:%S')): detrend_order_current}) baseline.update({str(current_training_end.time().strftime('%H:%M:%S')): baseline_current}) agg_data_model.update({str(current_training_end.time().strftime('%H:%M:%S')): agg_data_model_current}) agg_data.update({str(current_training_end.time().strftime('%H:%M:%S')): agg_data_current}) if isinstance(freq, str): freq = pd.Timedelta('1' + freq) current_training_end = current_training_end - min(pd.Timedelta('30T'), freq * 10) return past_anomaly_scores, gamma_alpha, gama_loc, gamma_beta, \ detrend_order, baseline, agg_data_model, agg_data, training_start, training_end def _get_model(self, input_df=None, window_length=None, value_column=None, detrend_method=None, baseline_type=None, detection_method=None, past_model=None): """ This function runs the training process given the input parameters. :param pandas.DataFrame input_df: Input data containing the training and the scoring data. :param int window_length: The length of a training sub-window / scoring window. :param str value_column: Column containing the values. :param str detrend_method: Selects between "modeling" or "diff" detrend method. :param str baseline_type: Selects between "aggregated" or "last_window" baseline. :param str detection_method: Selects between "kldiv" or "sign_test" distance method. :param luminaire.model.window_density.WindowDensityModel past_model: luminaire.model to append model metadata from past :return: Returns past anomaly scores based on training data, baseline and other related metrics. :rtype: tuple(list, float, float, float, int, list, luminaire.model, float) """ import numpy as np import pandas as pd from itertools import chain import scipy.stats as st model_history_truncation_prop = 0.25 # This is the proportion of history to truncate from both sides # everytime we store the past anomaly scores de_obj = DataExploration() sliced_training_data, agg_datetime = de_obj._partition(input_df, window_length, value_column) # performing the stationarity test sliced_training_data_cleaned, detrend_order, agg_data_model, agg_data = de_obj._detrender( training_data_sliced=sliced_training_data, significance_level=0.05, detrend_method=detrend_method, agg_datetime=agg_datetime, past_model=past_model) # Obtain the past anomaly scores and the anomaly means and standard deviation if the detection method # is KL divergence if detection_method == "kldiv": past_anomaly_scores = np.array(self._distance_function(data=sliced_training_data_cleaned, called_for="training")) if past_model: model_timestamps = list(past_model._params['PastAnomalyScores'].keys()) training_end = input_df.index[-1] current_min_timedelta = pd.Timedelta('10D') for timestamp in model_timestamps: current_datetime = pd.Timestamp(str(training_end.date()) + ' ' + timestamp) temp_timedelta = training_end - current_datetime temp_timedelta = pd.Timedelta('1D') + temp_timedelta if temp_timedelta < pd.Timedelta( 0) else temp_timedelta if temp_timedelta < current_min_timedelta: opt_timestamp = timestamp current_min_timedelta = temp_timedelta past_anomaly_scores = np.concatenate([past_model._params['PastAnomalyScores'][opt_timestamp][ int(len(past_anomaly_scores) * model_history_truncation_prop): -int(len(past_anomaly_scores) * model_history_truncation_prop)] , past_anomaly_scores]) if len(past_anomaly_scores) < 100: alpha = [] loc = [] beta = [] for i in range(10): boot_scores = np.random.choice(past_anomaly_scores.tolist(), size=100, replace=True) alpha_i, loc_i, beta_i = st.gamma.fit(boot_scores) alpha.append(alpha_i) loc.append(loc_i) beta.append(beta_i) gamma_alpha = np.mean(alpha) gamma_loc = np.mean(loc) gamma_beta = np.mean(beta) else: gamma_alpha, gamma_loc, gamma_beta = st.gamma.fit(past_anomaly_scores) else: past_anomaly_scores, gamma_alpha, gamma_loc, gamma_beta = None, None, None, None # If aggregated baseline type is specified, we take the whole training window as a baseline, else we # take the last training sub window from the sliced training data if baseline_type == "aggregated": sliced_training_data_cleaned = self._training_data_truncation( sliced_training_data=sliced_training_data_cleaned) if detection_method == "kldiv": baseline = list(chain.from_iterable(sliced_training_data_cleaned)) elif detection_method == "sign_test": baseline = sliced_training_data_cleaned elif baseline_type == "last_window": baseline = sliced_training_data_cleaned[-1] return past_anomaly_scores, gamma_alpha, gamma_loc, gamma_beta, detrend_order, \ baseline, agg_data_model, agg_data def train(self, data, **kwargs): """ Input time series for training. :param pandas.DataFrame data: Input time series. :return: Trained model with the training timestamp and a success flag :rtype: tuple(bool, str, python model object) >>> data raw interpolated index 2017-10-02 00:00:00 118870 118870 2017-10-02 01:00:00 121914 121914 2017-10-02 02:00:00 116097 116097 2017-10-02 03:00:00 94511 94511 2017-10-02 04:00:00 68330 68330 ... ... ... 2018-10-10 19:00:00 219908 219908 2018-10-10 20:00:00 219149 219149 2018-10-10 21:00:00 207232 207232 2018-10-10 22:00:00 198741 198741 2018-10-10 23:00:00 213751 213751 >>> hyper_params = WindowDensityHyperParams(freq='H').params >>> wdm_obj = WindowDensityModel(hyper_params=hyper_params) >>> success, model = wdm_obj.train(data) >>> success, model (True, "2018-10-10 23:00:00", <luminaire.model.window_density.WindowDensityModel object at 0x7fd7c5a34e80>) """ import numpy as np import pandas as pd freq = pd.Timedelta(self._params['freq']) if self._params['freq'] not in ['S', 'T', '15T', 'H', 'D'] \ else self._params['freq'] if freq in ['S', 'T', '15T', 'H', 'D']: window_length = self._params['window_length'] else: min_window_length = self._params['min_window_length'] max_window_length = self._params['max_window_length'] window_length = self._params['window_length'] if not min_window_length or not max_window_length or not window_length: raise ValueError( 'Training window length with min and max should be specified in case frequency not in the ' 'specified list') is_log_transformed = self._params['is_log_transformed'] detrend_method = self._params['detrend_method'] target_metric = 'raw' imputed_metric = 'interpolated' if not self._params['detection_method']: if freq in ['S', 'T', '15T']: detection_method = 'kldiv' elif freq in ['H', 'D']: detection_method = 'sign_test' else: detection_method = 'sign_test' if freq > np.timedelta64(30, 'm') else 'kldiv' else: detection_method = self._params['detection_method'] if len(data) == 0: model = {'ErrorMessage': 'DataFrame length is 0'} success = False return success, WindowDensityModel(**model) # Shift the interpolated value by +1 and get the log. This handles values with 0. if is_log_transformed: neg_flag = True if not data[data[target_metric] < 0].empty else False data[imputed_metric] = data[imputed_metric] if neg_flag else np.log(data[imputed_metric] + 1) past_anomaly_scores, anomaly_scores_gamma_alpha, anomaly_scores_gamma_loc, anomaly_scores_gamma_beta, \ detrend_order, baseline, agg_data_model, agg_data, \ training_start, training_end = self._call_training(df=data, window_length=window_length, imputed_metric=imputed_metric, detrend_method=detrend_method, detection_method=detection_method, freq=freq, **kwargs) success = True self.hyper_params['is_log_transformed'] = is_log_transformed self.hyper_params['detection_method'] = detection_method model = {'TrainingStartDate': str(training_start), 'PastAnomalyScores': past_anomaly_scores, 'AnomalyScoresGammaAlpha': anomaly_scores_gamma_alpha, 'AnomalyScoresGammaLoc': anomaly_scores_gamma_loc, 'AnomalyScoresGammaBeta': anomaly_scores_gamma_beta, 'NonStationarityOrder': detrend_order, 'Baseline': baseline, 'AggregatedDataModel': agg_data_model, 'AggregatedData': agg_data } return success, str(training_end), WindowDensityModel(hyper_params=self.hyper_params, **model) def _call_scoring(self, df=None, target_metric=None, anomaly_scores_gamma_alpha=None, anomaly_scores_gamma_loc=None, anomaly_scores_gamma_beta=None, baseline=None, detrend_order=None, detrend_method=None, agg_data_model=None, detection_method=None, attributes=None, agg_data=None): """ This function generates the anomaly flag and and probability for the scoring window. :param pandas.DataFrame df: Input training data frame. :param str target_metric: Column storing the time series values. :param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter. :param float anomaly_scores_gamma_loc: Gamma fit location parameter. :param float anomaly_scores_gamma_beta: Gamma fit beta parameter. :param list baseline: A list storing a baseline window used to score the scoring window. :param int detrend_order: The order of detrending based on MA or differencing method. :param str detrend_method: Selects between "modeling" or "diff" detrend method. :param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data. :param str detection_method: Selects between "kldiv" or "sign_test" distance method. :param attributes: Model attributes. :param agg_data: Aggregated Data per day. :return: Returns the anomaly flag with the corresponding anomaly probability. :rtype: tuple(bool, float, dict) """ is_anomaly, prob_of_anomaly = self._anomalous_region_detection(input_df=df, value_column=target_metric, called_for="scoring", anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha, anomaly_scores_gamma_loc=anomaly_scores_gamma_loc, anomaly_scores_gamma_beta=anomaly_scores_gamma_beta, baseline=baseline, detrend_order=detrend_order, detrend_method=detrend_method, agg_data_model=agg_data_model, detection_method=detection_method, agg_data=agg_data) return is_anomaly, prob_of_anomaly, attributes def _get_result(self, input_df=None, detrend_order=None, agg_data_model=None, value_column=None, detrend_method=None, baseline_type=None, detection_method=None, baseline=None, anomaly_scores_gamma_alpha=None, anomaly_scores_gamma_loc=None, anomaly_scores_gamma_beta=None, agg_data=None): """ The function scores the scoring window for anomalies based on the training metrics and the baseline :param pandas.DataFrame input_df: Input data containing the training and the scoring data. :param int detrend_order: The non-negative order of detrending based on Modeling or differencing method. When the detrend_order > 0, corresponding detrending need to be performed using the method specified in the model config. :param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data. :param str value_column: Column containing the values. :param str detrend_method: Selects between "modeling" or "diff" detrend method. :param str baseline_type: Selects between "aggregated" or "last_window" baseline. :param str detection_method: Selects between "kldiv" or "sign_test" distance method. :param list baseline: A list storing a baseline window used to score the scoring window. :param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter. :param float anomaly_scores_gamma_loc: Gamma fit location parameter. :param float anomaly_scores_gamma_beta: Gamma fit beta parameter. :param agg_data: Aggregated Data per day. :return: Returns the anomaly flag with the corresponding anomaly probability. :rtype: tuple(bool, float) """ import numpy as np import pandas as pd import copy import scipy.stats as st from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn.covariance import EmpiricalCovariance, MinCovDet import collections import operator is_anomaly = False execution_data = input_df[value_column] adjusted_execution_data = [] prob_of_anomaly = [] len_req_agg_data_model = 42 # Setting a hard threshold to have predictions from aggregated data # for stationarity adjustment if detrend_method == 'diff': # Obtain the execution data and perform the necessary differencing execution_data = list(execution_data) adjusted_execution_data = np.diff(execution_data, detrend_order).tolist() if detrend_order > 0 \ else execution_data elif detrend_method == 'modeling': idx = input_df.index.normalize() dates_freq_dist = dict(collections.Counter(idx)) scoring_datetime = str(max(dates_freq_dist.items(), key=operator.itemgetter(1))[0]) execution_data_avg = np.mean(execution_data) # If detrending is needed, we scale the scoring data accordingly using the agg_dat_model forecast if detrend_order > 0: snapshot_len_max = min(len(agg_data), len_req_agg_data_model) agg_data_trunc = np.array(agg_data)[:, 1][-snapshot_len_max:] data_adjust_forecast = [] try: # Setting the data adjustment window of the original data using the predictions and the CILower and # CIUpper keeping the prediction uncertainty of the agg_model in mind if agg_data_model and len(agg_data) > len_req_agg_data_model: score = agg_data_model.score(execution_data_avg, scoring_datetime) data_adjust_forecast.append(score['Prediction']) data_adjust_forecast.append(score['CILower']) data_adjust_forecast.append(score['CIUpper']) else: data_adjust_forecast.append(np.median(agg_data_trunc)) data_adjust_forecast.append(np.percentile(agg_data_trunc, 5)) # setting a 2-sigma limit data_adjust_forecast.append(np.percentile(agg_data_trunc, 95)) # setting a 2-sigma limit except: # If the scoring for the agg_data_model fails for some reason, we use the latest agg_data for the # detrending adjustment data_adjust_forecast.append(np.median(agg_data_trunc)) data_adjust_forecast.append(np.percentile(agg_data_trunc, 5)) # setting a 2-sigma limit data_adjust_forecast.append(np.percentile(agg_data_trunc, 95)) # setting a 2-sigma limit for i in range(3): if data_adjust_forecast[i] != 0: adjusted_execution_data.append((execution_data / data_adjust_forecast[i]).tolist()) else: adjusted_execution_data = list(execution_data) # Kl divergence based anomaly detection if detection_method == "kldiv": if detrend_order > 0: prob_of_anomaly = [] for i in range(3): current_anomaly_score = self._distance_function(data=adjusted_execution_data[i], called_for="scoring", baseline=baseline) prob_of_anomaly.append(st.gamma.cdf(current_anomaly_score, anomaly_scores_gamma_alpha, anomaly_scores_gamma_loc, anomaly_scores_gamma_beta)) prob_of_anomaly = np.min(prob_of_anomaly) else: current_anomaly_score = self._distance_function(data=adjusted_execution_data, called_for="scoring", baseline=baseline) prob_of_anomaly = st.gamma.cdf(current_anomaly_score, anomaly_scores_gamma_alpha, anomaly_scores_gamma_loc, anomaly_scores_gamma_beta) if 1 - prob_of_anomaly < self.sig_level: is_anomaly = True # Sign test based anomaly detection elif detection_method == "sign_test": # If last window is the baseline, we perform the Wilcoxon sign rank test for means and levene # test for variance to detect anomalies if baseline_type == "last_window": test_stat_wilcoxon, pvalue_wilcoxon = st.wilcoxon(execution_data, baseline) test_stat_levene, pvalue_levene = st.levene(execution_data, baseline) if pvalue_wilcoxon < self.sig_level or pvalue_levene < self.sig_level: is_anomaly = True prob_of_anomaly = 1 - min(pvalue_wilcoxon, pvalue_levene) # If aggregated is the baseline, we perform the Wilcoxon sign rank test for means and gamma distribution # based test for the past standard deviations to detect anomalies elif baseline_type == "aggregated": baseline_sds = np.array(baseline).std(1).tolist() if detrend_order == 0: # crearing a 2d list to make it easy to loop through in the following for loop adjusted_execution_data = [adjusted_execution_data] for current_adjusted_data in adjusted_execution_data: baseline_execution_data = copy.copy(baseline) baseline_execution_data.append(current_adjusted_data) pca = PCA() scores = pca.fit_transform(StandardScaler().fit_transform(baseline_execution_data)) robust_cov = MinCovDet().fit(scores[:, :3]) mahalanobis_distance = robust_cov.mahalanobis(scores[:, :3]) # getting the top 3 dimensions pvalue_mahalanobis = 1 - st.chi2.cdf(mahalanobis_distance[-1], np.array(baseline_execution_data).shape[1]) gamma_alpha, gamma_loc, gamma_beta = st.gamma.fit(baseline_sds) pvalue_gamma = 1 - st.gamma.cdf(np.std(current_adjusted_data), gamma_alpha, gamma_loc, gamma_beta) if pvalue_mahalanobis < self.sig_level or pvalue_gamma < self.sig_level: is_anomaly = True prob_of_anomaly.append(1 - min(pvalue_mahalanobis, pvalue_gamma)) prob_of_anomaly = np.min(prob_of_anomaly) return is_anomaly, prob_of_anomaly def score(self, data, **kwargs): """ Function scores input series for anomalies :param pandas.DataFrame data: Input time series to score :return: Output dictionary with scoring summary. :rtype: dict >>> data raw interpolated index 2018-10-11 00:00:00 204800 204800 2018-10-11 01:00:00 222218 222218 2018-10-11 02:00:00 218903 218903 2018-10-11 03:00:00 190639 190639 2018-10-11 04:00:00 148214 148214 2018-10-11 05:00:00 106358 106358 2018-10-11 06:00:00 70081 70081 2018-10-11 07:00:00 47748 47748 2018-10-11 08:00:00 36837 36837 2018-10-11 09:00:00 33023 33023 2018-10-11 10:00:00 44432 44432 2018-10-11 11:00:00 72773 72773 2018-10-11 12:00:00 115180 115180 2018-10-11 13:00:00 157568 157568 2018-10-11 14:00:00 180174 180174 2018-10-11 15:00:00 190048 190048 2018-10-11 16:00:00 188391 188391 2018-10-11 17:00:00 189233 189233 2018-10-11 18:00:00 191703 191703 2018-10-11 19:00:00 189848 189848 2018-10-11 20:00:00 192685 192685 2018-10-11 21:00:00 196743 196743 2018-10-11 22:00:00 193016 193016 2018-10-11 23:00:00 196441 196441 >>> model <luminaire.model.window_density.WindowDensityModel object at 0x7fcaab72fdd8> >>> model.score(data) {'Success': True, 'ConfLevel': 99.9, 'IsAnomaly': False, 'AnomalyProbability': 0.6963188902776808} """ import numpy as np import pandas as pd is_log_transformed = self._params['is_log_transformed'] detrend_method = self._params['detrend_method'] target_metric = 'raw' imputed_metric = 'interpolated' detection_method = self._params['detection_method'] # We want to make sure the time series does not contain any negatives in case of log transformation if is_log_transformed: neg_flag = True if not data[data[target_metric] < 0].empty else False data[imputed_metric] = data[imputed_metric] if neg_flag else np.log(data[imputed_metric] + 1) model_timestamps = list(self._params['AnomalyScoresGammaAlpha'].keys()) scoring_start = data.index[0] current_min_timedelta = pd.Timedelta('10D') for timestamp in model_timestamps: current_datetime = pd.Timestamp(str(scoring_start.date()) + ' ' + timestamp) temp_timedelta = scoring_start - current_datetime temp_timedelta = pd.Timedelta('1D') + temp_timedelta if temp_timedelta < pd.Timedelta(0) else temp_timedelta if temp_timedelta < current_min_timedelta: opt_timestamp = timestamp current_min_timedelta = temp_timedelta anomaly_scores_gamma_alpha = self._params['AnomalyScoresGammaAlpha'][opt_timestamp] anomaly_scores_gamma_loc = self._params['AnomalyScoresGammaLoc'][opt_timestamp] anomaly_scores_gamma_beta = self._params['AnomalyScoresGammaBeta'][opt_timestamp] baseline = self._params['Baseline'][opt_timestamp] detrend_order = self._params['NonStationarityOrder'][opt_timestamp] agg_data_model = self._params['AggregatedDataModel'][opt_timestamp] agg_data = self._params['AggregatedData'][opt_timestamp] is_anomaly, prob_of_anomaly, attributes = self._call_scoring(df=data, target_metric=target_metric, anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha, anomaly_scores_gamma_loc=anomaly_scores_gamma_loc, anomaly_scores_gamma_beta=anomaly_scores_gamma_beta, baseline=baseline, detrend_order=detrend_order, detrend_method=detrend_method, agg_data_model=agg_data_model, detection_method=detection_method, agg_data=agg_data) result = {'Success': True, 'ConfLevel': float(1.0 - self.sig_level) * 100, 'IsAnomaly': is_anomaly, 'AnomalyProbability': float(prob_of_anomaly), } return result, data.reset_index().values.tolist() def _anomalous_region_detection(self, input_df=None, window_length=None, value_column=None, called_for=None, anomaly_scores_gamma_alpha=None, anomaly_scores_gamma_loc=None, anomaly_scores_gamma_beta=None, detrend_order=None, baseline=None, detrend_method=None, agg_data_model=None, past_model=None, detection_method=None, agg_data=None): """ This function detects anomaly given a training and a scoring window. :param pandas.DataFrame input_df: Input data containing the training and the scoring data. :param int window_length: The length of a training sub-window / scoring window. :param str value_column: A string identifying the value column from the input dataframe :param str called_for: A flag to specify whether this function is called for training or scoring. :param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter. :param float anomaly_scores_gamma_loc: Gamma fit location parameter. :param float anomaly_scores_gamma_beta: Gamma fit beta parameter. :param int detrend_order: Number of differencing for the scoring data. Only required if called for scoring. :param list baseline: The baseline for the scoring. only required if called for scoring. :param str detrend_method: Selects between "modeling" or "diff" detrend method. :param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data. :param luminaire.model.window_density.WindowDensityModel past_model: Past stored window density model. :param str detection_method: Selects between "kldiv" or "sign_test" distance method. :param agg_data: Aggregated Data per day. :return: Anomaly flag with the corresponding probability of anomaly. :rtype: tuple(bool, float) """ baseline_type = self._params['baseline_type'] input_df.fillna(0, inplace=True) # The function can be called for either training or scoring if called_for == "training": return self._get_model(input_df=input_df, window_length=window_length, value_column=value_column, detrend_method=detrend_method, baseline_type=baseline_type, detection_method=detection_method, past_model=past_model) elif called_for == "scoring": return self._get_result(input_df=input_df, detrend_order=detrend_order, agg_data_model=agg_data_model, value_column=value_column, detrend_method=detrend_method, baseline_type=baseline_type, detection_method=detection_method, baseline=baseline, anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha, anomaly_scores_gamma_loc=anomaly_scores_gamma_loc, anomaly_scores_gamma_beta=anomaly_scores_gamma_beta, agg_data=agg_data)
[ "sklearn.preprocessing.StandardScaler", "numpy.clip", "scipy.stats.levene", "numpy.mean", "numpy.exp", "numpy.std", "luminaire.exploration.data_exploration.DataExploration", "numpy.max", "numpy.linspace", "pandas.Timedelta", "collections.Counter", "scipy.stats.kde.gaussian_kde", "scipy.stats.gamma.cdf", "numpy.median", "numpy.percentile", "numpy.min", "scipy.stats.wilcoxon", "sklearn.covariance.MinCovDet", "numpy.log", "scipy.stats.entropy", "copy.copy", "numpy.diff", "numpy.array", "numpy.timedelta64", "sklearn.decomposition.PCA", "scipy.stats.gamma.fit", "operator.itemgetter", "itertools.chain.from_iterable" ]
[((15154, 15171), 'luminaire.exploration.data_exploration.DataExploration', 'DataExploration', ([], {}), '()\n', (15169, 15171), False, 'from luminaire.exploration.data_exploration import DataExploration\n'), ((37362, 37381), 'pandas.Timedelta', 'pd.Timedelta', (['"""10D"""'], {}), "('10D')\n", (37374, 37381), True, 'import pandas as pd\n'), ((11479, 11497), 'pandas.Timedelta', 'pd.Timedelta', (['"""1D"""'], {}), "('1D')\n", (11491, 11497), True, 'import pandas as pd\n'), ((20150, 20184), 'pandas.Timedelta', 'pd.Timedelta', (["self._params['freq']"], {}), "(self._params['freq'])\n", (20162, 20184), True, 'import pandas as pd\n'), ((5213, 5232), 'numpy.array', 'np.array', (['mean_list'], {}), '(mean_list)\n', (5221, 5232), True, 'import numpy as np\n'), ((5606, 5625), 'numpy.array', 'np.array', (['mask_mean'], {}), '(mask_mean)\n', (5614, 5625), True, 'import numpy as np\n'), ((5792, 5809), 'numpy.array', 'np.array', (['sd_list'], {}), '(sd_list)\n', (5800, 5809), True, 'import numpy as np\n'), ((6094, 6111), 'numpy.array', 'np.array', (['mask_sd'], {}), '(mask_sd)\n', (6102, 6111), True, 'import numpy as np\n'), ((7458, 7489), 'scipy.stats.kde.gaussian_kde', 'stats.kde.gaussian_kde', (['data[i]'], {}), '(data[i])\n', (7480, 7489), True, 'import scipy.stats as stats\n'), ((7510, 7545), 'scipy.stats.kde.gaussian_kde', 'stats.kde.gaussian_kde', (['data[i + 1]'], {}), '(data[i + 1])\n', (7532, 7545), True, 'import scipy.stats as stats\n'), ((7715, 7748), 'numpy.linspace', 'np.linspace', (['ts_min', 'ts_max', '(1000)'], {}), '(ts_min, ts_max, 1000)\n', (7726, 7748), True, 'import numpy as np\n'), ((8079, 8111), 'numpy.clip', 'np.clip', (['q', 'float_min', 'float_max'], {}), '(q, float_min, float_max)\n', (8086, 8111), True, 'import numpy as np\n'), ((8132, 8164), 'numpy.clip', 'np.clip', (['p', 'float_min', 'float_max'], {}), '(p, float_min, float_max)\n', (8139, 8164), True, 'import numpy as np\n'), ((8384, 8416), 'scipy.stats.kde.gaussian_kde', 'stats.kde.gaussian_kde', (['baseline'], {}), '(baseline)\n', (8406, 8416), True, 'import scipy.stats as stats\n'), ((8433, 8461), 'scipy.stats.kde.gaussian_kde', 'stats.kde.gaussian_kde', (['data'], {}), '(data)\n', (8455, 8461), True, 'import scipy.stats as stats\n'), ((8607, 8640), 'numpy.linspace', 'np.linspace', (['ts_min', 'ts_max', '(1000)'], {}), '(ts_min, ts_max, 1000)\n', (8618, 8640), True, 'import numpy as np\n'), ((8843, 8875), 'numpy.clip', 'np.clip', (['q', 'float_min', 'float_max'], {}), '(q, float_min, float_max)\n', (8850, 8875), True, 'import numpy as np\n'), ((8892, 8924), 'numpy.clip', 'np.clip', (['p', 'float_min', 'float_max'], {}), '(p, float_min, float_max)\n', (8899, 8924), True, 'import numpy as np\n'), ((8949, 8974), 'scipy.stats.entropy', 'stats.entropy', ([], {'pk': 'p', 'qk': 'q'}), '(pk=p, qk=q)\n', (8962, 8974), True, 'import scipy.stats as stats\n'), ((9639, 9650), 'numpy.mean', 'np.mean', (['ts'], {}), '(ts)\n', (9646, 9650), True, 'import numpy as np\n'), ((9682, 9692), 'numpy.std', 'np.std', (['ts'], {}), '(ts)\n', (9688, 9692), True, 'import numpy as np\n'), ((13438, 13462), 'pandas.Timedelta', 'pd.Timedelta', (["('1' + freq)"], {}), "('1' + freq)\n", (13450, 13462), True, 'import pandas as pd\n'), ((16203, 16222), 'pandas.Timedelta', 'pd.Timedelta', (['"""10D"""'], {}), "('10D')\n", (16215, 16222), True, 'import pandas as pd\n'), ((17684, 17698), 'numpy.mean', 'np.mean', (['alpha'], {}), '(alpha)\n', (17691, 17698), True, 'import numpy as np\n'), ((17727, 17739), 'numpy.mean', 'np.mean', (['loc'], {}), '(loc)\n', (17734, 17739), True, 'import numpy as np\n'), ((17769, 17782), 'numpy.mean', 'np.mean', (['beta'], {}), '(beta)\n', (17776, 17782), True, 'import numpy as np\n'), ((17854, 17887), 'scipy.stats.gamma.fit', 'st.gamma.fit', (['past_anomaly_scores'], {}), '(past_anomaly_scores)\n', (17866, 17887), True, 'import scipy.stats as st\n'), ((21911, 21943), 'numpy.log', 'np.log', (['(data[imputed_metric] + 1)'], {}), '(data[imputed_metric] + 1)\n', (21917, 21943), True, 'import numpy as np\n'), ((29115, 29138), 'numpy.mean', 'np.mean', (['execution_data'], {}), '(execution_data)\n', (29122, 29138), True, 'import numpy as np\n'), ((31863, 31886), 'numpy.min', 'np.min', (['prob_of_anomaly'], {}), '(prob_of_anomaly)\n', (31869, 31886), True, 'import numpy as np\n'), ((32138, 32258), 'scipy.stats.gamma.cdf', 'st.gamma.cdf', (['current_anomaly_score', 'anomaly_scores_gamma_alpha', 'anomaly_scores_gamma_loc', 'anomaly_scores_gamma_beta'], {}), '(current_anomaly_score, anomaly_scores_gamma_alpha,\n anomaly_scores_gamma_loc, anomaly_scores_gamma_beta)\n', (32150, 32258), True, 'import scipy.stats as st\n'), ((37178, 37210), 'numpy.log', 'np.log', (['(data[imputed_metric] + 1)'], {}), '(data[imputed_metric] + 1)\n', (37184, 37210), True, 'import numpy as np\n'), ((7576, 7591), 'numpy.min', 'np.min', (['data[i]'], {}), '(data[i])\n', (7582, 7591), True, 'import numpy as np\n'), ((7593, 7612), 'numpy.min', 'np.min', (['data[i + 1]'], {}), '(data[i + 1])\n', (7599, 7612), True, 'import numpy as np\n'), ((7643, 7658), 'numpy.max', 'np.max', (['data[i]'], {}), '(data[i])\n', (7649, 7658), True, 'import numpy as np\n'), ((7660, 7679), 'numpy.max', 'np.max', (['data[i + 1]'], {}), '(data[i + 1])\n', (7666, 7679), True, 'import numpy as np\n'), ((8198, 8223), 'scipy.stats.entropy', 'stats.entropy', ([], {'pk': 'p', 'qk': 'q'}), '(pk=p, qk=q)\n', (8211, 8223), True, 'import scipy.stats as stats\n'), ((8488, 8504), 'numpy.min', 'np.min', (['baseline'], {}), '(baseline)\n', (8494, 8504), True, 'import numpy as np\n'), ((8506, 8518), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (8512, 8518), True, 'import numpy as np\n'), ((8545, 8561), 'numpy.max', 'np.max', (['baseline'], {}), '(baseline)\n', (8551, 8561), True, 'import numpy as np\n'), ((8563, 8575), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (8569, 8575), True, 'import numpy as np\n'), ((13525, 13544), 'pandas.Timedelta', 'pd.Timedelta', (['"""30T"""'], {}), "('30T')\n", (13537, 13544), True, 'import pandas as pd\n'), ((17508, 17533), 'scipy.stats.gamma.fit', 'st.gamma.fit', (['boot_scores'], {}), '(boot_scores)\n', (17520, 17533), True, 'import scipy.stats as st\n'), ((18439, 18488), 'itertools.chain.from_iterable', 'chain.from_iterable', (['sliced_training_data_cleaned'], {}), '(sliced_training_data_cleaned)\n', (18458, 18488), False, 'from itertools import chain\n'), ((28960, 28984), 'collections.Counter', 'collections.Counter', (['idx'], {}), '(idx)\n', (28979, 28984), False, 'import collections\n'), ((32739, 32776), 'scipy.stats.wilcoxon', 'st.wilcoxon', (['execution_data', 'baseline'], {}), '(execution_data, baseline)\n', (32750, 32776), True, 'import scipy.stats as st\n'), ((32827, 32862), 'scipy.stats.levene', 'st.levene', (['execution_data', 'baseline'], {}), '(execution_data, baseline)\n', (32836, 32862), True, 'import scipy.stats as st\n'), ((37661, 37676), 'pandas.Timedelta', 'pd.Timedelta', (['(0)'], {}), '(0)\n', (37673, 37676), True, 'import pandas as pd\n'), ((37605, 37623), 'pandas.Timedelta', 'pd.Timedelta', (['"""1D"""'], {}), "('1D')\n", (37617, 37623), True, 'import pandas as pd\n'), ((5452, 5463), 'numpy.exp', 'np.exp', (['pcp'], {}), '(pcp)\n', (5458, 5463), True, 'import numpy as np\n'), ((6025, 6036), 'numpy.exp', 'np.exp', (['pcp'], {}), '(pcp)\n', (6031, 6036), True, 'import numpy as np\n'), ((7966, 7977), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (7974, 7977), True, 'import numpy as np\n'), ((8028, 8039), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (8036, 8039), True, 'import numpy as np\n'), ((8738, 8749), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (8746, 8749), True, 'import numpy as np\n'), ((8796, 8807), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (8804, 8807), True, 'import numpy as np\n'), ((28730, 28768), 'numpy.diff', 'np.diff', (['execution_data', 'detrend_order'], {}), '(execution_data, detrend_order)\n', (28737, 28768), True, 'import numpy as np\n'), ((31655, 31775), 'scipy.stats.gamma.cdf', 'st.gamma.cdf', (['current_anomaly_score', 'anomaly_scores_gamma_alpha', 'anomaly_scores_gamma_loc', 'anomaly_scores_gamma_beta'], {}), '(current_anomaly_score, anomaly_scores_gamma_alpha,\n anomaly_scores_gamma_loc, anomaly_scores_gamma_beta)\n', (31667, 31775), True, 'import scipy.stats as st\n'), ((34753, 34776), 'numpy.min', 'np.min', (['prob_of_anomaly'], {}), '(prob_of_anomaly)\n', (34759, 34776), True, 'import numpy as np\n'), ((7978, 7989), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (7986, 7989), True, 'import numpy as np\n'), ((8040, 8051), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (8048, 8051), True, 'import numpy as np\n'), ((8750, 8761), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (8758, 8761), True, 'import numpy as np\n'), ((8808, 8819), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (8816, 8819), True, 'import numpy as np\n'), ((16532, 16547), 'pandas.Timedelta', 'pd.Timedelta', (['(0)'], {}), '(0)\n', (16544, 16547), True, 'import pandas as pd\n'), ((16476, 16494), 'pandas.Timedelta', 'pd.Timedelta', (['"""1D"""'], {}), "('1D')\n", (16488, 16494), True, 'import pandas as pd\n'), ((21345, 21368), 'numpy.timedelta64', 'np.timedelta64', (['(30)', '"""m"""'], {}), "(30, 'm')\n", (21359, 21368), True, 'import numpy as np\n'), ((29394, 29412), 'numpy.array', 'np.array', (['agg_data'], {}), '(agg_data)\n', (29402, 29412), True, 'import numpy as np\n'), ((33697, 33716), 'copy.copy', 'copy.copy', (['baseline'], {}), '(baseline)\n', (33706, 33716), False, 'import copy\n'), ((33817, 33822), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (33820, 33822), False, 'from sklearn.decomposition import PCA\n'), ((34352, 34378), 'scipy.stats.gamma.fit', 'st.gamma.fit', (['baseline_sds'], {}), '(baseline_sds)\n', (34364, 34378), True, 'import scipy.stats as st\n'), ((29054, 29076), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (29073, 29076), False, 'import operator\n'), ((30176, 30201), 'numpy.median', 'np.median', (['agg_data_trunc'], {}), '(agg_data_trunc)\n', (30185, 30201), True, 'import numpy as np\n'), ((30255, 30287), 'numpy.percentile', 'np.percentile', (['agg_data_trunc', '(5)'], {}), '(agg_data_trunc, 5)\n', (30268, 30287), True, 'import numpy as np\n'), ((30373, 30406), 'numpy.percentile', 'np.percentile', (['agg_data_trunc', '(95)'], {}), '(agg_data_trunc, 95)\n', (30386, 30406), True, 'import numpy as np\n'), ((30674, 30699), 'numpy.median', 'np.median', (['agg_data_trunc'], {}), '(agg_data_trunc)\n', (30683, 30699), True, 'import numpy as np\n'), ((30749, 30781), 'numpy.percentile', 'np.percentile', (['agg_data_trunc', '(5)'], {}), '(agg_data_trunc, 5)\n', (30762, 30781), True, 'import numpy as np\n'), ((30863, 30896), 'numpy.percentile', 'np.percentile', (['agg_data_trunc', '(95)'], {}), '(agg_data_trunc, 95)\n', (30876, 30896), True, 'import numpy as np\n'), ((33960, 33971), 'sklearn.covariance.MinCovDet', 'MinCovDet', ([], {}), '()\n', (33969, 33971), False, 'from sklearn.covariance import EmpiricalCovariance, MinCovDet\n'), ((34431, 34460), 'numpy.std', 'np.std', (['current_adjusted_data'], {}), '(current_adjusted_data)\n', (34437, 34460), True, 'import numpy as np\n'), ((33336, 33354), 'numpy.array', 'np.array', (['baseline'], {}), '(baseline)\n', (33344, 33354), True, 'import numpy as np\n'), ((33870, 33886), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (33884, 33886), False, 'from sklearn.preprocessing import StandardScaler\n'), ((34250, 34283), 'numpy.array', 'np.array', (['baseline_execution_data'], {}), '(baseline_execution_data)\n', (34258, 34283), True, 'import numpy as np\n')]
import sys import time import scipy as sp from scipy import stats import h5py from ldpred import LDpred_inf from ldpred import util from ldpred import ld from ldpred import reporting from ldpred import coord_genotypes def get_LDpred_sample_size(n,ns,verbose): if n is None: #If coefficient of variation is very small, then use one N nevertheless. n_cv = sp.std(ns)/sp.mean(ns) if n_cv<0.01: ldpred_n = sp.mean(ns) if verbose: print ("Sample size does not vary much (CV=%0.4f). Using a fixed sample size of %0.2f"%(n_cv,ldpred_n)) else: if verbose: print ("Using varying sample sizes") print ("Sample size ranges between %d and %d"%(min(ns),max(ns))) print ("Average sample size is %0.2f "%(sp.mean(ns))) ldpred_inf_n = sp.mean(ns) ldpred_n = None else: ldpred_n = float(n) if verbose: print ("Using the given fixed sample size of %d"%(n)) ldpred_inf_n = float(n) return ldpred_n,ldpred_inf_n def prepare_constants(ldpred_n,ns,m,p,h2,sampl_var_shrink_factor): Mp = m * p hdmp = (h2 / Mp) const_dict = {'Mp':Mp, 'hdmp':hdmp} rv_scalars = sp.zeros(m) if ldpred_n is not None: hdmpn = hdmp + 1.0 / ldpred_n hdmp_hdmpn = (hdmp / hdmpn) c_const = (p / sp.sqrt(hdmpn)) d_const = (1.0 - p) / (sp.sqrt(1.0 / ldpred_n)) const_dict['n']=ldpred_n const_dict['hdmpn']=hdmpn const_dict['hdmp_hdmpn']=hdmp_hdmpn const_dict['c_const']=c_const const_dict['d_const']=d_const rv_scalars[:]=sampl_var_shrink_factor* sp.sqrt((hdmp_hdmpn) * (1.0 / ldpred_n)) else: snp_dict = {} for i in range(m): ni = ns[i] hdmpn_i = hdmp + 1.0 / ni hdmp_hdmpn_i = (hdmp / hdmpn_i) c_const_i = (p / sp.sqrt(hdmpn_i)) d_const_i = (1.0 - p) / (sp.sqrt(1.0 / ni)) snp_dict[i]={'n':ni, 'hdmpn':hdmpn_i, 'hdmp_hdmpn':hdmp_hdmpn_i, 'c_const':c_const_i, 'd_const':d_const_i} rv_scalars[i]=sampl_var_shrink_factor* sp.sqrt((hdmp_hdmpn_i) * (1.0 / ni)) const_dict['snp_dict']=snp_dict const_dict['rv_scalars']=rv_scalars return const_dict def get_constants(snp_i,const_dict): if 'snp_dict' in const_dict: return const_dict['snp_dict'][snp_i] else: return const_dict def ldpred_gibbs(beta_hats, genotypes=None, start_betas=None, h2=None, n=None, ns= None, ld_radius=100, num_iter=60, burn_in=10, p=None, zero_jump_prob=0.01, sampl_var_shrink_factor=0.9, tight_sampling=False,ld_dict=None, reference_ld_mats=None, ld_boundaries=None, snp_lrld=None, verbose=False, print_progress=True): """ LDpred (Gibbs Sampler) """ # Set random seed to stabilize results sp.random.seed(42) t0 = time.time() m = len(beta_hats) ldpred_n, ldpred_inf_n = get_LDpred_sample_size(n,ns,verbose) # If no starting values for effects were given, then use the infinitesimal model starting values. if start_betas is None and verbose: print('Initializing LDpred effects with posterior mean LDpred-inf effects.') print('Calculating LDpred-inf effects.') start_betas = LDpred_inf.ldpred_inf(beta_hats, genotypes=genotypes, reference_ld_mats=reference_ld_mats, h2=h2, n=ldpred_inf_n, ld_window_size=2 * ld_radius, verbose=False) curr_betas = sp.copy(start_betas) assert len(curr_betas)==m,'Betas returned by LDpred_inf do not have the same length as expected.' curr_post_means = sp.zeros(m) avg_betas = sp.zeros(m) # Iterating over effect estimates in sequential order iter_order = sp.arange(m) # Setting up the marginal Bayes shrink const_dict = prepare_constants(ldpred_n,ns,m,p,h2,sampl_var_shrink_factor) for k in range(num_iter): # Big iteration h2_est = max(0.00001, sp.sum(curr_betas ** 2)) if tight_sampling: # Force an alpha shrink if estimates are way off compared to heritability estimates. #(May improve MCMC convergence.) alpha = min(1.0 - zero_jump_prob, 1.0 / h2_est, (h2 + 1.0 / sp.sqrt(ldpred_n)) / h2_est) else: alpha = 1.0 - zero_jump_prob rand_ps = sp.random.random(m) rand_norms = stats.norm.rvs(0.0, 1, size=m)*const_dict['rv_scalars'] for i, snp_i in enumerate(iter_order): if ld_boundaries is None: start_i = max(0, snp_i - ld_radius) focal_i = min(ld_radius, snp_i) stop_i = min(m, snp_i + ld_radius + 1) else: start_i = ld_boundaries[snp_i][0] stop_i = ld_boundaries[snp_i][1] focal_i = snp_i - start_i if snp_lrld is not None: if snp_lrld[snp_i]: continue #Figure out what sample size and constants to use cd = get_constants(snp_i,const_dict) # Local LD matrix D_i = ld_dict[snp_i] # Local (most recently updated) effect estimates local_betas = curr_betas[start_i: stop_i] # Calculate the local posterior mean, used when sampling. local_betas[focal_i] = 0.0 res_beta_hat_i = beta_hats[snp_i] - sp.dot(D_i , local_betas) b2 = res_beta_hat_i ** 2 d_const_b2_exp = cd['d_const'] * sp.exp(-b2 * cd['n'] / 2.0) if sp.isreal(d_const_b2_exp): numerator = cd['c_const'] * sp.exp(-b2 / (2.0 * cd['hdmpn'])) if sp.isreal(numerator): if numerator == 0.0: postp = 0.0 else: postp = numerator / (numerator + d_const_b2_exp) assert sp.isreal(postp), 'The posterior mean is not a real number? Possibly due to problems with summary stats, LD estimates, or parameter settings.' else: postp = 0.0 else: postp = 1.0 curr_post_means[snp_i] = cd['hdmp_hdmpn'] * postp * res_beta_hat_i if rand_ps[i] < postp * alpha: # Sample from the posterior Gaussian dist. proposed_beta = rand_norms[snp_i] + cd['hdmp_hdmpn'] * res_beta_hat_i else: # Sample 0 proposed_beta = 0.0 curr_betas[snp_i] = proposed_beta # UPDATE BETA if verbose and print_progress: sys.stdout.write('\r%0.2f%%' % (100.0 * (min(1, float(k + 1) / num_iter)))) sys.stdout.flush() if k >= burn_in: avg_betas += curr_post_means # Averaging over the posterior means instead of samples. if verbose and print_progress: sys.stdout.write('\r%0.2f%%\n' % (100.0)) sys.stdout.flush() avg_betas = avg_betas / float(num_iter - burn_in) t1 = time.time() t = (t1 - t0) if verbose: print('Took %d minutes and %0.2f seconds' % (t / 60, t % 60)) return {'betas':avg_betas, 'inf_betas':start_betas} def ldpred_genomewide(data_file=None, ld_radius=None, ld_dict=None, out_file_prefix=None, summary_dict=None, ps=None,n=None, h2=None, use_gw_h2=False, sampl_var_shrink_factor=1, incl_long_range_ld=False, num_iter=None, verbose=False, zero_jump_prob=0.01, burn_in=5): """ Calculate LDpred for a genome """ print('Applying LDpred with LD radius: %d' % ld_radius) df = h5py.File(data_file, 'r') has_phenotypes = False if 'y' in df: y = df['y'][...] # Phenotype num_individs = len(y) risk_scores_pval_derived = sp.zeros(num_individs) has_phenotypes = True ld_scores_dict = ld_dict['ld_scores_dict'] chrom_ld_dict = ld_dict['chrom_ld_dict'] chrom_ref_ld_mats = ld_dict['chrom_ref_ld_mats'] cord_data_g = df['cord_data'] mean_n = coord_genotypes.get_mean_sample_size(n, cord_data_g) #Calculating genome-wide heritability using LD score regression, and partition heritability by chromsomes herit_dict = ld.get_chromosome_herits(cord_data_g, ld_scores_dict, mean_n, h2=h2, use_gw_h2=use_gw_h2, debug=verbose, summary_dict=summary_dict) if herit_dict['gw_h2_ld_score_est']>ld_radius/10.0: print ('\033[93m Warning: LD radius seems small in comparison to the average LD score. ' 'Please consider a larger one, or a smaller number of SNPs used in the analysis. \033[0m') LDpred_inf_chrom_dict = {} print('Calculating LDpred-inf weights') for chrom_str in util.chromosomes_list: if chrom_str in cord_data_g: if verbose: print('Calculating SNP weights for Chromosome %s' % ((chrom_str.split('_'))[1])) g = cord_data_g[chrom_str] # Filter monomorphic SNPs snp_stds = g['snp_stds_ref'][...] snp_stds = snp_stds.flatten() ok_snps_filter = snp_stds > 0 pval_derived_betas = g['betas'][...] pval_derived_betas = pval_derived_betas[ok_snps_filter] h2_chrom = herit_dict[chrom_str]['h2'] start_betas = LDpred_inf.ldpred_inf(pval_derived_betas, genotypes=None, reference_ld_mats=chrom_ref_ld_mats[chrom_str], h2=h2_chrom, n=mean_n, ld_window_size=2 * ld_radius, verbose=verbose) LDpred_inf_chrom_dict[chrom_str] = start_betas if not incl_long_range_ld: lrld_dict = util.load_lrld_dict() num_snps_in_lrld = 0 results_dict = {} convergence_report = {} for p in ps: convergence_report[p] = False print('Starting LDpred gibbs with f=%0.4f' % p) p_str = '%0.4f' % p results_dict[p_str] = {} if out_file_prefix: # Preparing output files raw_effect_sizes = [] ldpred_effect_sizes = [] ldpred_inf_effect_sizes = [] out_sids = [] chromosomes = [] out_positions = [] out_nts = [] chrom_i = 0 num_chrom = len(cord_data_g.keys()) for chrom_str in util.chromosomes_list: chrom_i+=1 if chrom_str in cord_data_g: g = cord_data_g[chrom_str] if out_file_prefix: positions = g['positions'][...] sids = (g['sids'][...]).astype(util.sids_u_dtype) log_odds = g['log_odds'][...] nts = (g['nts'][...]).astype(util.nts_u_dtype) chromosomes.extend([chrom_str] * len(positions)) out_positions.extend(positions) out_sids.extend(sids) raw_effect_sizes.extend(log_odds) out_nts.extend(nts) pval_derived_betas = g['betas'][...] ns = g['ns'][...] h2_chrom = herit_dict[chrom_str]['h2'] snp_lrld = None if not incl_long_range_ld: snp_lrld = util.get_snp_lrld_status(chrom_i, positions, lrld_dict) num_snps_in_lrld +=sp.sum(snp_lrld) ld_boundaries = None if 'chrom_ld_boundaries' in ld_dict: ld_boundaries = ld_dict['chrom_ld_boundaries'][chrom_str] if verbose: print('Calculating SNP weights for Chromosome %s' % ((chrom_str.split('_'))[1])) res_dict = ldpred_gibbs(pval_derived_betas,h2=h2_chrom, n=n, ns=ns, p=p, ld_radius=ld_radius, verbose=verbose, num_iter=num_iter, burn_in=burn_in, ld_dict=chrom_ld_dict[chrom_str], start_betas=LDpred_inf_chrom_dict[chrom_str], ld_boundaries=ld_boundaries, zero_jump_prob=zero_jump_prob,sampl_var_shrink_factor=sampl_var_shrink_factor, snp_lrld=snp_lrld, print_progress=False) updated_betas = res_dict['betas'] updated_inf_betas = res_dict['inf_betas'] sum_sqr_effects = sp.sum(updated_betas ** 2) if sum_sqr_effects > herit_dict['gw_h2_ld_score_est']: print('Sum of squared updated effects estimates seems too large: %0.4f'% sum_sqr_effects) print('This suggests that the Gibbs sampler did not convergence.') convergence_report[p] = True snp_stds = g['snp_stds_ref'][...] snp_stds = snp_stds.flatten() updated_betas = updated_betas / snp_stds updated_inf_betas = updated_inf_betas / snp_stds ldpred_effect_sizes.extend(updated_betas) ldpred_inf_effect_sizes.extend(updated_inf_betas) if not verbose: sys.stdout.write('\r%0.2f%%' % (100.0 * (min(1, float(chrom_i) / num_chrom)))) sys.stdout.flush() else: if has_phenotypes: if 'raw_snps_val' in g: raw_snps = g['raw_snps_val'][...] else: raw_snps = g['raw_snps_ref'][...] prs = sp.dot(updated_betas, raw_snps) risk_scores_pval_derived += prs corr = sp.corrcoef(y, prs)[0, 1] r2 = corr ** 2 print('The R2 prediction accuracy of PRS using %s was: %0.4f' % (chrom_str, r2)) if not incl_long_range_ld: summary_dict[1.3]={'name':'SNPs in long-range LD regions','value':'%d'%num_snps_in_lrld} if not verbose: sys.stdout.write('\r%0.2f%%\n' % (100.0)) sys.stdout.flush() if verbose and has_phenotypes: num_indivs = len(y) results_dict[p_str]['y'] = y results_dict[p_str]['risk_scores_pd'] = risk_scores_pval_derived print('Prediction accuracy was assessed using %d individuals.' % (num_indivs)) corr = sp.corrcoef(y, risk_scores_pval_derived)[0, 1] r2 = corr ** 2 results_dict[p_str]['r2_pd'] = r2 print('The R2 prediction accuracy (observed scale) for the whole genome was: %0.4f (%0.6f)' % (r2, ((1 - r2) ** 2) / num_indivs)) if corr < 0: risk_scores_pval_derived = -1 * risk_scores_pval_derived auc = util.calc_auc(y, risk_scores_pval_derived) print('AUC for the whole genome was: %0.4f' % auc) # Now calibration denominator = sp.dot(risk_scores_pval_derived.T, risk_scores_pval_derived) y_norm = (y - sp.mean(y)) / sp.std(y) numerator = sp.dot(risk_scores_pval_derived.T, y_norm) regression_slope = (numerator / denominator) # [0][0] print('The slope for predictions with P-value derived effects is: %0.4f' % regression_slope) results_dict[p_str]['slope_pd'] = regression_slope weights_out_file = '%s_LDpred_p%0.4e.txt' % (out_file_prefix, p) with open(weights_out_file, 'w') as f: f.write('chrom pos sid nt1 nt2 raw_beta ldpred_beta\n') for chrom, pos, sid, nt, raw_beta, ldpred_beta in zip(chromosomes, out_positions, out_sids, out_nts, raw_effect_sizes, ldpred_effect_sizes): nt1, nt2 = nt[0], nt[1] f.write('%s %d %s %s %s %0.4e %0.4e\n' % (chrom, pos, sid, nt1, nt2, raw_beta, ldpred_beta)) weights_out_file = '%s_LDpred-inf.txt' % (out_file_prefix) with open(weights_out_file, 'w') as f: f.write('chrom pos sid nt1 nt2 raw_beta ldpred_inf_beta \n') for chrom, pos, sid, nt, raw_beta, ldpred_inf_beta in zip(chromosomes, out_positions, out_sids, out_nts, raw_effect_sizes, ldpred_inf_effect_sizes): nt1, nt2 = nt[0], nt[1] f.write('%s %d %s %s %s %0.4e %0.4e\n' % (chrom, pos, sid, nt1, nt2, raw_beta, ldpred_inf_beta)) summary_dict[2.0]={'name':'Gibbs sampler fractions used','value':str(ps)} ['Yes' if convergence_report[p] else 'No' for p in ps] summary_dict[2.1]={'name':'Number of burn-iterations used','value':'%i'%burn_in} summary_dict[2.2]={'name':'Number of iterations used','value':'%i'%num_iter} summary_dict[2.3]={'name':'Convergence issues (for each fraction)','value':str(['Yes' if convergence_report[p] else 'No' for p in ps])} def main(p_dict): #Check parameters summary_dict = {} summary_dict[0]={'name':'Coordinated data filename','value':p_dict['cf']} summary_dict[0.1]={'name':'SNP weights output file (prefix)', 'value':p_dict['out']} summary_dict[0.2]={'name':'LD data filename (prefix)', 'value':p_dict['ldf']} summary_dict[1.01]={'name':'LD radius used','value':str(p_dict['ldr'])} t0 = time.time() summary_dict[1]={'name':'dash', 'value':'LD information'} ld_dict = ld.get_ld_dict_using_p_dict(p_dict, summary_dict) t1 = time.time() t = (t1 - t0) summary_dict[1.2]={'name':'Running time for calculating LD information:','value':'%d min and %0.2f secs'% (t / 60, t % 60)} t0 = time.time() summary_dict[1.9]={'name':'dash', 'value':'LDpred Gibbs sampler'} ldpred_genomewide(data_file=p_dict['cf'], out_file_prefix=p_dict['out'], ps=p_dict['f'], ld_radius=p_dict['ldr'], ld_dict=ld_dict, n=p_dict['N'], num_iter=p_dict['n_iter'], burn_in=p_dict['n_burn_in'], h2=p_dict['h2'], use_gw_h2=p_dict['use_gw_h2'], incl_long_range_ld=p_dict['incl_long_range_ld'], sampl_var_shrink_factor=1, verbose=p_dict['debug'], summary_dict=summary_dict) t1 = time.time() t = (t1 - t0) summary_dict[3]={'name':'Running time for Gibbs sampler(s):','value':'%d min and %0.2f secs'% (t / 60, t % 60)} reporting.print_summary(summary_dict, 'Summary of LDpred Gibbs')
[ "sys.stdout.write", "scipy.isreal", "ldpred.coord_genotypes.get_mean_sample_size", "scipy.sum", "scipy.stats.norm.rvs", "ldpred.reporting.print_summary", "ldpred.util.load_lrld_dict", "sys.stdout.flush", "ldpred.LDpred_inf.ldpred_inf", "scipy.exp", "scipy.zeros", "ldpred.util.get_snp_lrld_status", "ldpred.ld.get_chromosome_herits", "scipy.copy", "h5py.File", "scipy.arange", "scipy.mean", "scipy.random.seed", "scipy.sqrt", "ldpred.util.calc_auc", "ldpred.ld.get_ld_dict_using_p_dict", "scipy.std", "scipy.random.random", "time.time", "scipy.dot", "scipy.corrcoef" ]
[((1271, 1282), 'scipy.zeros', 'sp.zeros', (['m'], {}), '(m)\n', (1279, 1282), True, 'import scipy as sp\n'), ((3053, 3071), 'scipy.random.seed', 'sp.random.seed', (['(42)'], {}), '(42)\n', (3067, 3071), True, 'import scipy as sp\n'), ((3083, 3094), 'time.time', 'time.time', ([], {}), '()\n', (3092, 3094), False, 'import time\n'), ((3709, 3729), 'scipy.copy', 'sp.copy', (['start_betas'], {}), '(start_betas)\n', (3716, 3729), True, 'import scipy as sp\n'), ((3854, 3865), 'scipy.zeros', 'sp.zeros', (['m'], {}), '(m)\n', (3862, 3865), True, 'import scipy as sp\n'), ((3882, 3893), 'scipy.zeros', 'sp.zeros', (['m'], {}), '(m)\n', (3890, 3893), True, 'import scipy as sp\n'), ((3970, 3982), 'scipy.arange', 'sp.arange', (['m'], {}), '(m)\n', (3979, 3982), True, 'import scipy as sp\n'), ((7311, 7322), 'time.time', 'time.time', ([], {}), '()\n', (7320, 7322), False, 'import time\n'), ((7967, 7992), 'h5py.File', 'h5py.File', (['data_file', '"""r"""'], {}), "(data_file, 'r')\n", (7976, 7992), False, 'import h5py\n'), ((8397, 8449), 'ldpred.coord_genotypes.get_mean_sample_size', 'coord_genotypes.get_mean_sample_size', (['n', 'cord_data_g'], {}), '(n, cord_data_g)\n', (8433, 8449), False, 'from ldpred import coord_genotypes\n'), ((8578, 8713), 'ldpred.ld.get_chromosome_herits', 'ld.get_chromosome_herits', (['cord_data_g', 'ld_scores_dict', 'mean_n'], {'h2': 'h2', 'use_gw_h2': 'use_gw_h2', 'debug': 'verbose', 'summary_dict': 'summary_dict'}), '(cord_data_g, ld_scores_dict, mean_n, h2=h2,\n use_gw_h2=use_gw_h2, debug=verbose, summary_dict=summary_dict)\n', (8602, 8713), False, 'from ldpred import ld\n'), ((17817, 17828), 'time.time', 'time.time', ([], {}), '()\n', (17826, 17828), False, 'import time\n'), ((17905, 17954), 'ldpred.ld.get_ld_dict_using_p_dict', 'ld.get_ld_dict_using_p_dict', (['p_dict', 'summary_dict'], {}), '(p_dict, summary_dict)\n', (17932, 17954), False, 'from ldpred import ld\n'), ((17964, 17975), 'time.time', 'time.time', ([], {}), '()\n', (17973, 17975), False, 'import time\n'), ((18131, 18142), 'time.time', 'time.time', ([], {}), '()\n', (18140, 18142), False, 'import time\n'), ((18672, 18683), 'time.time', 'time.time', ([], {}), '()\n', (18681, 18683), False, 'import time\n'), ((18822, 18886), 'ldpred.reporting.print_summary', 'reporting.print_summary', (['summary_dict', '"""Summary of LDpred Gibbs"""'], {}), "(summary_dict, 'Summary of LDpred Gibbs')\n", (18845, 18886), False, 'from ldpred import reporting\n'), ((868, 879), 'scipy.mean', 'sp.mean', (['ns'], {}), '(ns)\n', (875, 879), True, 'import scipy as sp\n'), ((3489, 3656), 'ldpred.LDpred_inf.ldpred_inf', 'LDpred_inf.ldpred_inf', (['beta_hats'], {'genotypes': 'genotypes', 'reference_ld_mats': 'reference_ld_mats', 'h2': 'h2', 'n': 'ldpred_inf_n', 'ld_window_size': '(2 * ld_radius)', 'verbose': '(False)'}), '(beta_hats, genotypes=genotypes, reference_ld_mats=\n reference_ld_mats, h2=h2, n=ldpred_inf_n, ld_window_size=2 * ld_radius,\n verbose=False)\n', (3510, 3656), False, 'from ldpred import LDpred_inf\n'), ((4560, 4579), 'scipy.random.random', 'sp.random.random', (['m'], {}), '(m)\n', (4576, 4579), True, 'import scipy as sp\n'), ((7178, 7217), 'sys.stdout.write', 'sys.stdout.write', (["('\\r%0.2f%%\\n' % 100.0)"], {}), "('\\r%0.2f%%\\n' % 100.0)\n", (7194, 7217), False, 'import sys\n'), ((7228, 7246), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7244, 7246), False, 'import sys\n'), ((8141, 8163), 'scipy.zeros', 'sp.zeros', (['num_individs'], {}), '(num_individs)\n', (8149, 8163), True, 'import scipy as sp\n'), ((10081, 10102), 'ldpred.util.load_lrld_dict', 'util.load_lrld_dict', ([], {}), '()\n', (10100, 10102), False, 'from ldpred import util\n'), ((378, 388), 'scipy.std', 'sp.std', (['ns'], {}), '(ns)\n', (384, 388), True, 'import scipy as sp\n'), ((389, 400), 'scipy.mean', 'sp.mean', (['ns'], {}), '(ns)\n', (396, 400), True, 'import scipy as sp\n'), ((446, 457), 'scipy.mean', 'sp.mean', (['ns'], {}), '(ns)\n', (453, 457), True, 'import scipy as sp\n'), ((1409, 1423), 'scipy.sqrt', 'sp.sqrt', (['hdmpn'], {}), '(hdmpn)\n', (1416, 1423), True, 'import scipy as sp\n'), ((1456, 1479), 'scipy.sqrt', 'sp.sqrt', (['(1.0 / ldpred_n)'], {}), '(1.0 / ldpred_n)\n', (1463, 1479), True, 'import scipy as sp\n'), ((1715, 1753), 'scipy.sqrt', 'sp.sqrt', (['(hdmp_hdmpn * (1.0 / ldpred_n))'], {}), '(hdmp_hdmpn * (1.0 / ldpred_n))\n', (1722, 1753), True, 'import scipy as sp\n'), ((4189, 4212), 'scipy.sum', 'sp.sum', (['(curr_betas ** 2)'], {}), '(curr_betas ** 2)\n', (4195, 4212), True, 'import scipy as sp\n'), ((4610, 4640), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['(0.0)', '(1)'], {'size': 'm'}), '(0.0, 1, size=m)\n', (4624, 4640), False, 'from scipy import stats\n'), ((5828, 5853), 'scipy.isreal', 'sp.isreal', (['d_const_b2_exp'], {}), '(d_const_b2_exp)\n', (5837, 5853), True, 'import scipy as sp\n'), ((6991, 7009), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7007, 7009), False, 'import sys\n'), ((9746, 9930), 'ldpred.LDpred_inf.ldpred_inf', 'LDpred_inf.ldpred_inf', (['pval_derived_betas'], {'genotypes': 'None', 'reference_ld_mats': 'chrom_ref_ld_mats[chrom_str]', 'h2': 'h2_chrom', 'n': 'mean_n', 'ld_window_size': '(2 * ld_radius)', 'verbose': 'verbose'}), '(pval_derived_betas, genotypes=None, reference_ld_mats\n =chrom_ref_ld_mats[chrom_str], h2=h2_chrom, n=mean_n, ld_window_size=2 *\n ld_radius, verbose=verbose)\n', (9767, 9930), False, 'from ldpred import LDpred_inf\n'), ((14539, 14578), 'sys.stdout.write', 'sys.stdout.write', (["('\\r%0.2f%%\\n' % 100.0)"], {}), "('\\r%0.2f%%\\n' % 100.0)\n", (14555, 14578), False, 'import sys\n'), ((14593, 14611), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (14609, 14611), False, 'import sys\n'), ((15300, 15342), 'ldpred.util.calc_auc', 'util.calc_auc', (['y', 'risk_scores_pval_derived'], {}), '(y, risk_scores_pval_derived)\n', (15313, 15342), False, 'from ldpred import util\n'), ((15498, 15558), 'scipy.dot', 'sp.dot', (['risk_scores_pval_derived.T', 'risk_scores_pval_derived'], {}), '(risk_scores_pval_derived.T, risk_scores_pval_derived)\n', (15504, 15558), True, 'import scipy as sp\n'), ((15633, 15675), 'scipy.dot', 'sp.dot', (['risk_scores_pval_derived.T', 'y_norm'], {}), '(risk_scores_pval_derived.T, y_norm)\n', (15639, 15675), True, 'import scipy as sp\n'), ((1949, 1965), 'scipy.sqrt', 'sp.sqrt', (['hdmpn_i'], {}), '(hdmpn_i)\n', (1956, 1965), True, 'import scipy as sp\n'), ((2004, 2021), 'scipy.sqrt', 'sp.sqrt', (['(1.0 / ni)'], {}), '(1.0 / ni)\n', (2011, 2021), True, 'import scipy as sp\n'), ((2277, 2311), 'scipy.sqrt', 'sp.sqrt', (['(hdmp_hdmpn_i * (1.0 / ni))'], {}), '(hdmp_hdmpn_i * (1.0 / ni))\n', (2284, 2311), True, 'import scipy as sp\n'), ((5675, 5699), 'scipy.dot', 'sp.dot', (['D_i', 'local_betas'], {}), '(D_i, local_betas)\n', (5681, 5699), True, 'import scipy as sp\n'), ((5785, 5812), 'scipy.exp', 'sp.exp', (["(-b2 * cd['n'] / 2.0)"], {}), "(-b2 * cd['n'] / 2.0)\n", (5791, 5812), True, 'import scipy as sp\n'), ((5952, 5972), 'scipy.isreal', 'sp.isreal', (['numerator'], {}), '(numerator)\n', (5961, 5972), True, 'import scipy as sp\n'), ((12884, 12910), 'scipy.sum', 'sp.sum', (['(updated_betas ** 2)'], {}), '(updated_betas ** 2)\n', (12890, 12910), True, 'import scipy as sp\n'), ((14916, 14956), 'scipy.corrcoef', 'sp.corrcoef', (['y', 'risk_scores_pval_derived'], {}), '(y, risk_scores_pval_derived)\n', (14927, 14956), True, 'import scipy as sp\n'), ((15599, 15608), 'scipy.std', 'sp.std', (['y'], {}), '(y)\n', (15605, 15608), True, 'import scipy as sp\n'), ((5899, 5932), 'scipy.exp', 'sp.exp', (["(-b2 / (2.0 * cd['hdmpn']))"], {}), "(-b2 / (2.0 * cd['hdmpn']))\n", (5905, 5932), True, 'import scipy as sp\n'), ((11737, 11792), 'ldpred.util.get_snp_lrld_status', 'util.get_snp_lrld_status', (['chrom_i', 'positions', 'lrld_dict'], {}), '(chrom_i, positions, lrld_dict)\n', (11761, 11792), False, 'from ldpred import util\n'), ((11838, 11854), 'scipy.sum', 'sp.sum', (['snp_lrld'], {}), '(snp_lrld)\n', (11844, 11854), True, 'import scipy as sp\n'), ((13755, 13773), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13771, 13773), False, 'import sys\n'), ((15585, 15595), 'scipy.mean', 'sp.mean', (['y'], {}), '(y)\n', (15592, 15595), True, 'import scipy as sp\n'), ((831, 842), 'scipy.mean', 'sp.mean', (['ns'], {}), '(ns)\n', (838, 842), True, 'import scipy as sp\n'), ((6181, 6197), 'scipy.isreal', 'sp.isreal', (['postp'], {}), '(postp)\n', (6190, 6197), True, 'import scipy as sp\n'), ((14068, 14099), 'scipy.dot', 'sp.dot', (['updated_betas', 'raw_snps'], {}), '(updated_betas, raw_snps)\n', (14074, 14099), True, 'import scipy as sp\n'), ((4457, 4474), 'scipy.sqrt', 'sp.sqrt', (['ldpred_n'], {}), '(ldpred_n)\n', (4464, 4474), True, 'import scipy as sp\n'), ((14187, 14206), 'scipy.corrcoef', 'sp.corrcoef', (['y', 'prs'], {}), '(y, prs)\n', (14198, 14206), True, 'import scipy as sp\n')]
from textwrap import dedent from sml_test.cli import cli def assert_result(result, ok=0, fail=0, err=0, exit_code=0, contains=""): assert f"OK={ok}, FAIL={fail}, ERR={err}" in result.output assert result.exit_code == exit_code assert contains in result.output def test_ok_and_err(sml_test_file, cli_runner): sml_test_file.write_text( dedent( """ val test_1 = 1 = 1 val test_2 = 1 = 2 """ ) ) result = cli_runner.invoke(cli) assert_result(result, ok=1, fail=1, exit_code=1) def test_err(sml_test_file, cli_runner): sml_test_file.write_text( dedent( """ test_1 = 2 = 2 """ ) ) result = cli_runner.invoke(cli) # Two errors: mismatch type and unbound variable assert_result(result, err=2, exit_code=1) def test_ok(sml_test_file, cli_runner): sml_test_file.write_text( dedent( """ val test_1 = 1 = 1 val test_2 = 2 = 2 """ ) ) result = cli_runner.invoke(cli) assert_result(result, ok=2) def test_no_tests(sml_test_file, cli_runner): sml_test_file.write_text( dedent( """ val foo = 2 = 2 """ ) ) result = cli_runner.invoke(cli) assert_result(result) def test_verbose(sml_test_file, cli_runner): sml_test_file.write_text( dedent( """ val test_1 = 1 = 1 """ ) ) error_message = "val test_1 = true : bool" result = cli_runner.invoke(cli, ["-v"]) assert_result(result, ok=1, exit_code=0, contains=error_message) def test_unknown_symbol_in_impl(sml_test_file, sml_impl_file, cli_runner): sml_impl_file.write_text( dedent( """ fun sum_pair_list(xs : (int * int) list) = sum_list(firsts xs) + sum_list(seconds xs) """ ) ) sml_test_file.write_text( dedent( """ use "sample.sml"; val test_1 = sum_pair_list([(1, 2), (3, 4)]) = 10 """ ) ) result = cli_runner.invoke(cli) assert_result(result, err=4, exit_code=1) def test_type_mismatch_in_impl(sml_test_file, sml_impl_file, cli_runner): sml_impl_file.write_text( dedent( """ fun list_product(xs : int list) = if null xs then 1 else hd xs * list_product(tl xs) fun countdown(x : int) = if x = 0 then [] else x :: countdown(x - 1) fun factorial(x : int) = list_product countdown x """ ) ) sml_test_file.write_text( dedent( """ use "sample.sml"; val test9_2 = factorial 4 = 24 """ ) ) result = cli_runner.invoke(cli) assert_result(result, err=1, exit_code=1) def test_runtime_exception(sml_test_file, sml_impl_file, cli_runner): sml_impl_file.write_text( dedent( """ fun max1(xs : int list) = if null xs then NONE else let val tl_ans = max1(tl xs) in if isSome tl_ans andalso valOf tl_ans > hd xs then tl_ans else SOME (hd xs) end """ ) ) sml_test_file.write_text( dedent( """ use "sample.sml"; val test3 = valOf(max1 []); """ ) ) result = cli_runner.invoke(cli) assert_result(result, err=1, exit_code=1) def test_usage_fail(sml_test_file, cli_runner): sml_test_file.write_text('use "foo_bar.sml";') error_message = "use failed: 'foo_bar.sml'" result = cli_runner.invoke(cli) assert_result(result, err=1, exit_code=1, contains=error_message)
[ "textwrap.dedent" ]
[((363, 470), 'textwrap.dedent', 'dedent', (['"""\n val test_1 = 1 = 1\n val test_2 = 1 = 2\n """'], {}), '(\n """\n val test_1 = 1 = 1\n val test_2 = 1 = 2\n """\n )\n', (369, 470), False, 'from textwrap import dedent\n'), ((659, 717), 'textwrap.dedent', 'dedent', (['"""\n test_1 = 2 = 2\n """'], {}), '("""\n test_1 = 2 = 2\n """)\n', (665, 717), False, 'from textwrap import dedent\n'), ((961, 1068), 'textwrap.dedent', 'dedent', (['"""\n val test_1 = 1 = 1\n val test_2 = 2 = 2\n """'], {}), '(\n """\n val test_1 = 1 = 1\n val test_2 = 2 = 2\n """\n )\n', (967, 1068), False, 'from textwrap import dedent\n'), ((1241, 1300), 'textwrap.dedent', 'dedent', (['"""\n val foo = 2 = 2\n """'], {}), '("""\n val foo = 2 = 2\n """)\n', (1247, 1300), False, 'from textwrap import dedent\n'), ((1476, 1538), 'textwrap.dedent', 'dedent', (['"""\n val test_1 = 1 = 1\n """'], {}), '("""\n val test_1 = 1 = 1\n """)\n', (1482, 1538), False, 'from textwrap import dedent\n'), ((1842, 2001), 'textwrap.dedent', 'dedent', (['"""\n fun sum_pair_list(xs : (int * int) list) =\n sum_list(firsts xs) + sum_list(seconds xs)\n """'], {}), '(\n """\n fun sum_pair_list(xs : (int * int) list) =\n sum_list(firsts xs) + sum_list(seconds xs)\n """\n )\n', (1848, 2001), False, 'from textwrap import dedent\n'), ((2058, 2195), 'textwrap.dedent', 'dedent', (['"""\n use "sample.sml";\n val test_1 = sum_pair_list([(1, 2), (3, 4)]) = 10\n """'], {}), '(\n """\n use "sample.sml";\n val test_1 = sum_pair_list([(1, 2), (3, 4)]) = 10\n """\n )\n', (2064, 2195), False, 'from textwrap import dedent\n'), ((2410, 2829), 'textwrap.dedent', 'dedent', (['"""\n fun list_product(xs : int list) =\n if null xs\n then 1\n else hd xs * list_product(tl xs)\n\n fun countdown(x : int) =\n if x = 0\n then []\n else x :: countdown(x - 1)\n\n fun factorial(x : int) =\n list_product countdown x\n """'], {}), '(\n """\n fun list_product(xs : int list) =\n if null xs\n then 1\n else hd xs * list_product(tl xs)\n\n fun countdown(x : int) =\n if x = 0\n then []\n else x :: countdown(x - 1)\n\n fun factorial(x : int) =\n list_product countdown x\n """\n )\n', (2416, 2829), False, 'from textwrap import dedent\n'), ((2886, 3004), 'textwrap.dedent', 'dedent', (['"""\n use "sample.sml";\n val test9_2 = factorial 4 = 24\n """'], {}), '(\n """\n use "sample.sml";\n val test9_2 = factorial 4 = 24\n """\n )\n', (2892, 3004), False, 'from textwrap import dedent\n'), ((3215, 3592), 'textwrap.dedent', 'dedent', (['"""\n fun max1(xs : int list) =\n if null xs\n then NONE\n else\n let val tl_ans = max1(tl xs)\n in if isSome tl_ans andalso valOf tl_ans > hd xs\n then tl_ans\n else SOME (hd xs)\n end\n """'], {}), '(\n """\n fun max1(xs : int list) =\n if null xs\n then NONE\n else\n let val tl_ans = max1(tl xs)\n in if isSome tl_ans andalso valOf tl_ans > hd xs\n then tl_ans\n else SOME (hd xs)\n end\n """\n )\n', (3221, 3592), False, 'from textwrap import dedent\n'), ((3649, 3764), 'textwrap.dedent', 'dedent', (['"""\n use "sample.sml";\n val test3 = valOf(max1 []);\n """'], {}), '(\n """\n use "sample.sml";\n val test3 = valOf(max1 []);\n """\n )\n', (3655, 3764), False, 'from textwrap import dedent\n')]
from unittest import TestCase from unittest.mock import patch import numpy as np from exceptions import NoUriProviden from main import calculate_average_face_encoding from main import obtain_image_face_encodings from main import parallelize_face_encodings class TryTesting(TestCase): def test_obtain_image_face_encodings_empty_uri(self): with self.assertRaises(NoUriProviden): obtain_image_face_encodings("") def test_obtain_image_face_encodings_uri_not_found(self): with self.assertRaises(FileNotFoundError): obtain_image_face_encodings("uri/that/doesnt/exists") @patch("main.IMAGE_DIRECTORY", "/path/that/doesnt/exist") def test_parallelize_face_encodings_directory_not_found(self): # this is not checked on the actual code but the actual # face_recognition library is raising the exception with self.assertRaises(FileNotFoundError): parallelize_face_encodings() @patch("main.obtain_image_face_encodings", return_value=[]) @patch("os.listdir", return_value=[]) def test_parallelize_face_encodings_empty_directory_encoding_not_called( self, listdir_mock, obtain_mock ): self.assertFalse(obtain_mock.called) def test_calculate_average_face_encoding_with_empty_encodings(self): self.assertIsNone(calculate_average_face_encoding([])) @patch("main.np.savetxt") def test_calculate_average_face_encoding_ensure_file_creation_called( self, mocked_np ): calculate_average_face_encoding(np.ndarray([1])) self.assertTrue(mocked_np.called)
[ "main.obtain_image_face_encodings", "main.parallelize_face_encodings", "unittest.mock.patch", "main.calculate_average_face_encoding", "numpy.ndarray" ]
[((622, 678), 'unittest.mock.patch', 'patch', (['"""main.IMAGE_DIRECTORY"""', '"""/path/that/doesnt/exist"""'], {}), "('main.IMAGE_DIRECTORY', '/path/that/doesnt/exist')\n", (627, 678), False, 'from unittest.mock import patch\n'), ((968, 1026), 'unittest.mock.patch', 'patch', (['"""main.obtain_image_face_encodings"""'], {'return_value': '[]'}), "('main.obtain_image_face_encodings', return_value=[])\n", (973, 1026), False, 'from unittest.mock import patch\n'), ((1032, 1068), 'unittest.mock.patch', 'patch', (['"""os.listdir"""'], {'return_value': '[]'}), "('os.listdir', return_value=[])\n", (1037, 1068), False, 'from unittest.mock import patch\n'), ((1381, 1405), 'unittest.mock.patch', 'patch', (['"""main.np.savetxt"""'], {}), "('main.np.savetxt')\n", (1386, 1405), False, 'from unittest.mock import patch\n'), ((404, 435), 'main.obtain_image_face_encodings', 'obtain_image_face_encodings', (['""""""'], {}), "('')\n", (431, 435), False, 'from main import obtain_image_face_encodings\n'), ((562, 615), 'main.obtain_image_face_encodings', 'obtain_image_face_encodings', (['"""uri/that/doesnt/exists"""'], {}), "('uri/that/doesnt/exists')\n", (589, 615), False, 'from main import obtain_image_face_encodings\n'), ((933, 961), 'main.parallelize_face_encodings', 'parallelize_face_encodings', ([], {}), '()\n', (959, 961), False, 'from main import parallelize_face_encodings\n'), ((1338, 1373), 'main.calculate_average_face_encoding', 'calculate_average_face_encoding', (['[]'], {}), '([])\n', (1369, 1373), False, 'from main import calculate_average_face_encoding\n'), ((1551, 1566), 'numpy.ndarray', 'np.ndarray', (['[1]'], {}), '([1])\n', (1561, 1566), True, 'import numpy as np\n')]
# GPLv3 License # # Copyright (C) 2020 Ubisoft # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ Define and register encodable/decodable message types """ from mixer import codec from mixer.broadcaster.common import MessageType from mixer.blender_client import messages message_types = { MessageType.TRANSFORM: messages.TransformMessage, MessageType.LIGHT: messages.LightMessage, } def register(): codec.register_message_types(message_types) def unregister(): codec.unregister_message_types(message_types)
[ "mixer.codec.register_message_types", "mixer.codec.unregister_message_types" ]
[((1005, 1048), 'mixer.codec.register_message_types', 'codec.register_message_types', (['message_types'], {}), '(message_types)\n', (1033, 1048), False, 'from mixer import codec\n'), ((1073, 1118), 'mixer.codec.unregister_message_types', 'codec.unregister_message_types', (['message_types'], {}), '(message_types)\n', (1103, 1118), False, 'from mixer import codec\n')]
import numpy as np import cv2 import matplotlib.pyplot as plt from davg.lanefinding.Prediction import Prediction def plot_line(img, x, y, color=(255,255,0), thickness=2): ''' Takes an image and two arrays of x and y points similar to matplotlib and writes the lines onto the image. If the points are floats, they are rounded and converted to ints to satisfy opencv. ''' points = np.rint(np.vstack([x,y]).T).astype(int) #print(points) cv2.polylines(img, [points], False, color, thickness) def demonstrate_weighted_average_and_prediction(): # Create a blank array to be used as an image test_img = np.zeros((128, 128, 3), dtype='uint8') # Define common y-points y = np.array([0,31,63,95,127]) # Define an array of x-point arrays #recent_x = np.array([[40,40,40,40,40]]) #recent_x = np.array([[40,40,40,40,40], [30,35,37,39,40]]) #recent_x = np.array([[40,40,40,40,40], [30,35,37,39,40], [20,30,35,38,40], [10,25,32,37,40]]) #recent_x = np.array([[40,40,40,40,40], [30,35,37,39,40], [20,30,35,38,40], [10,25,32,37,40], [20,30,35,38,40]]) recent_x = np.array([[40,40,40,40,40], [30,35,37,39,40], [20,30,35,38,40], [10,25,32,37,40], [0,20,29,36,40]]) print ("recent_x", recent_x) # Calculate the softmax weighted averages for the x-points averages = Prediction.find_weighted_averages(recent_x, window=3) print("weighted averages", averages) # Calculate the differences between the each consecutive set of x-points recent_xdiff = np.diff(recent_x, axis=0) print ("recent_xdiff", recent_xdiff) if len(recent_xdiff) != 0: # Calculate the non-weighted average of the differences for a baseline recent_xdiff_avg = np.average(recent_xdiff, axis=0) print ("recent_xdiff_avg", recent_xdiff_avg) # Calculate the softmax weighted averages for the differences in the x-points xdiff_weighted_averages = Prediction.find_weighted_averages(recent_xdiff, window=2) print("xdiff_weighted_averages[-1]:", xdiff_weighted_averages[-1]) # Predict the next line location by applying the last weighted diff to the last x-points #predicted_x = np.add(xdiff_weighted_averages[-1], recent_x[-1]) predicted_x = Prediction.predict_next_values(recent_x, window=2) print("predicted:", predicted_x) # Plot the various lines for i in range(len(recent_x)): # Plot a red line for the weighted moving averages plot_line(test_img, averages[i], y, thickness=1, color=(200,0,0)) # Plot a yellow line for the current points plot_line(test_img, recent_x[i], y, thickness=1) # Plot a green line for the predicted next line based on weighted averages of the diffs plot_line(test_img, predicted_x, y, thickness=1, color=(0,200,0)) plt.imshow(test_img) plt.show() # UNCOMMENT TO RUN demonstrate_weighted_average_and_prediction()
[ "matplotlib.pyplot.show", "cv2.polylines", "numpy.average", "davg.lanefinding.Prediction.Prediction.predict_next_values", "matplotlib.pyplot.imshow", "numpy.zeros", "davg.lanefinding.Prediction.Prediction.find_weighted_averages", "numpy.diff", "numpy.array", "numpy.vstack" ]
[((472, 525), 'cv2.polylines', 'cv2.polylines', (['img', '[points]', '(False)', 'color', 'thickness'], {}), '(img, [points], False, color, thickness)\n', (485, 525), False, 'import cv2\n'), ((644, 682), 'numpy.zeros', 'np.zeros', (['(128, 128, 3)'], {'dtype': '"""uint8"""'}), "((128, 128, 3), dtype='uint8')\n", (652, 682), True, 'import numpy as np\n'), ((721, 751), 'numpy.array', 'np.array', (['[0, 31, 63, 95, 127]'], {}), '([0, 31, 63, 95, 127])\n', (729, 751), True, 'import numpy as np\n'), ((1128, 1251), 'numpy.array', 'np.array', (['[[40, 40, 40, 40, 40], [30, 35, 37, 39, 40], [20, 30, 35, 38, 40], [10, 25,\n 32, 37, 40], [0, 20, 29, 36, 40]]'], {}), '([[40, 40, 40, 40, 40], [30, 35, 37, 39, 40], [20, 30, 35, 38, 40],\n [10, 25, 32, 37, 40], [0, 20, 29, 36, 40]])\n', (1136, 1251), True, 'import numpy as np\n'), ((1340, 1393), 'davg.lanefinding.Prediction.Prediction.find_weighted_averages', 'Prediction.find_weighted_averages', (['recent_x'], {'window': '(3)'}), '(recent_x, window=3)\n', (1373, 1393), False, 'from davg.lanefinding.Prediction import Prediction\n'), ((1532, 1557), 'numpy.diff', 'np.diff', (['recent_x'], {'axis': '(0)'}), '(recent_x, axis=0)\n', (1539, 1557), True, 'import numpy as np\n'), ((2258, 2308), 'davg.lanefinding.Prediction.Prediction.predict_next_values', 'Prediction.predict_next_values', (['recent_x'], {'window': '(2)'}), '(recent_x, window=2)\n', (2288, 2308), False, 'from davg.lanefinding.Prediction import Prediction\n'), ((2822, 2842), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test_img'], {}), '(test_img)\n', (2832, 2842), True, 'import matplotlib.pyplot as plt\n'), ((2847, 2857), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2855, 2857), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1769), 'numpy.average', 'np.average', (['recent_xdiff'], {'axis': '(0)'}), '(recent_xdiff, axis=0)\n', (1747, 1769), True, 'import numpy as np\n'), ((1944, 2001), 'davg.lanefinding.Prediction.Prediction.find_weighted_averages', 'Prediction.find_weighted_averages', (['recent_xdiff'], {'window': '(2)'}), '(recent_xdiff, window=2)\n', (1977, 2001), False, 'from davg.lanefinding.Prediction import Prediction\n'), ((417, 434), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (426, 434), True, 'import numpy as np\n')]
import os import json import sys import argparse from pathlib import Path import pandas as pd from tqdm import tqdm DESCRIPTION = """ Build a csv file containing necessary information of a COCO dataset that is compatible with this package. """ def get_bbox(bbox): """Get bbox of type (xmin, ymin, xmax, ymax) from a bbox of type (x, y, w, h)""" xmin, ymin, w, h = bbox xmin = round(xmin) ymin = round(ymin) xmax = round(xmin + w) - 1 ymax = round(ymin + h) - 1 return [xmin, ymin, xmax, ymax] def process_df(df_images, df_objects): if df_objects is None: df_merge = df_images[["id", "file_name", "width", "height"]] df_merge = df_merge.set_index("id") else: # Merge df = pd.merge(df_objects, df_images, left_on="image_id", right_on="id") df = df[["image_id", "bbox", "category_id", "file_name", "height", "width"]] # Convert bboxes to integers df["bbox"] = df["bbox"].apply(get_bbox) # Merge all objects within each image def transform(sub_df): image_id, file_name, height, width = sub_df.iloc[0][ ["image_id", "file_name", "height", "width"]] category_ids = sub_df["category_id"].tolist() category_ids = ",".join(map(str, category_ids)) bboxes = sub_df["bbox"].tolist() bboxes = sum(bboxes, []) bboxes = ",".join(map(str, bboxes)) return pd.Series({ "image_id": image_id, "img_name": file_name, "width": width, "height": height, "bboxes": bboxes, "labels": category_ids }) df_merge = df.groupby("image_id").apply(transform) assert len(df_merge) == df_objects["image_id"].nunique() return df_merge def main(args): # Read annotation file print("Reading annotation file...") with open(args.ann_path) as fin: ann = json.load(fin) print(f"Number of images: {len(ann['images'])}, number of annotations: " f"{len(ann['annotations']) if 'annotations' in ann else -1}") # Convert to dataframes df_images = pd.DataFrame.from_records(ann["images"]) if "annotations" in ann: df_objects = pd.DataFrame.from_records(ann["annotations"]) assert df_objects["image_id"].isin(df_images["id"]).all() else: df_objects = None # Process dataframes print("Processing dataframes...") df = process_df(df_images, df_objects) # Parse images print("Parsing images...") ids = [] file_paths = [] no_info_ids = [] paths = list(Path(args.image_dir).glob("*.jpg")) for file_path in tqdm(paths): _, file_name = os.path.split(file_path) if not file_name.startswith("COCO"): continue name, _ = os.path.splitext(file_name) id = int(name.split("_")[-1]) if id not in df.index: no_info_ids.append(id) else: ids.append(id) file_paths.append(file_path) assert len(ids) == len(df) # make sure all images in `df` are found df = df.loc[ids] df["img_path"] = file_paths if df_objects is None: df = df[["img_path", "width", "height"]] else: df = df[["img_path", "width", "height", "bboxes", "labels"]] df.to_csv(args.save_path, index=False) print(f"There are {len(no_info_ids)} images that have no " f"information: {no_info_ids}") print("Done.") def parse_arguments(argv): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=DESCRIPTION) parser.add_argument( '-d', '--image-dir', type=str, required=True, help='Path(s) to the image directory.') parser.add_argument( '-a', '--ann-path', type=str, required=True, help='Path to the annotation file (e.g., instances_train2014.json).') parser.add_argument( '-s', '--save-path', type=str, required=True, help='Path(s) to save the dataset information.') return parser.parse_args(argv) if __name__ == '__main__': main(parse_arguments(sys.argv[1:]))
[ "tqdm.tqdm", "json.load", "argparse.ArgumentParser", "pandas.merge", "pathlib.Path", "os.path.splitext", "pandas.DataFrame.from_records", "pandas.Series", "os.path.split" ]
[((2150, 2190), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (["ann['images']"], {}), "(ann['images'])\n", (2175, 2190), True, 'import pandas as pd\n'), ((2676, 2687), 'tqdm.tqdm', 'tqdm', (['paths'], {}), '(paths)\n', (2680, 2687), False, 'from tqdm import tqdm\n'), ((3528, 3637), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': 'DESCRIPTION'}), '(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=DESCRIPTION)\n', (3551, 3637), False, 'import argparse\n'), ((750, 816), 'pandas.merge', 'pd.merge', (['df_objects', 'df_images'], {'left_on': '"""image_id"""', 'right_on': '"""id"""'}), "(df_objects, df_images, left_on='image_id', right_on='id')\n", (758, 816), True, 'import pandas as pd\n'), ((1941, 1955), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (1950, 1955), False, 'import json\n'), ((2241, 2286), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (["ann['annotations']"], {}), "(ann['annotations'])\n", (2266, 2286), True, 'import pandas as pd\n'), ((2712, 2736), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (2725, 2736), False, 'import os\n'), ((2822, 2849), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (2838, 2849), False, 'import os\n'), ((1480, 1616), 'pandas.Series', 'pd.Series', (["{'image_id': image_id, 'img_name': file_name, 'width': width, 'height':\n height, 'bboxes': bboxes, 'labels': category_ids}"], {}), "({'image_id': image_id, 'img_name': file_name, 'width': width,\n 'height': height, 'bboxes': bboxes, 'labels': category_ids})\n", (1489, 1616), True, 'import pandas as pd\n'), ((2618, 2638), 'pathlib.Path', 'Path', (['args.image_dir'], {}), '(args.image_dir)\n', (2622, 2638), False, 'from pathlib import Path\n')]
import pandas as pd from utils.date import to_datetime from functools import wraps nb_author = 16 class DataFrameGenertion: def __init__(self): self.columns = self._generate_columns() self.df = pd.DataFrame(columns=self.columns) self.d = None self.article = None def _generate_columns(self): columns = ["pmid", "title", "abstract", "date", "journal", "substance", "author_list", "affiliation_list"] for i in range(0, nb_author): columns.append(f'author_{i}') columns.append(f'affiliation_{i}') return columns def try_data(func): """ decorator to add before every function that get information from article : """ def inner(self,): try: return func(self) except: return None return inner @try_data def get_abstract(self): return self.article["MedlineCitation"]["Article"]["Abstract"]["AbstractText"][0] @try_data def get_affiliation_list(self): return [ele['Affiliation'] for ele in self.article["MedlineCitation"]['Article']['AuthorList'][0]['AffiliationInfo']] @try_data def get_author_list(self): return [ele['ForeName'] + " " + ele['LastName'] for ele in self.article["MedlineCitation"]['Article']['AuthorList']] @try_data def get_chemical_list(self): return ', '.join([str(ele['NameOfSubstance']) for ele in self.article["MedlineCitation"]["ChemicalList"]]) def explode_data(self, dic_key): data_l = self.d[dic_key] if data_l != None: name = dic_key.strip('list') for i in range(len(data_l)): self.d[f'{name}{i}'] = self.d[dic_key][i] if i > nb_author: break def update_df(self, article): self.d = {} self.article = article self.d['abstract'] = self.get_abstract() self.d['pmid'] = str(article["MedlineCitation"]["PMID"]) self.d['title'] = article["MedlineCitation"]['Article']['ArticleTitle'] self.d['date'] = to_datetime(article["MedlineCitation"]["DateCompleted"]) self.d['journal'] = article["MedlineCitation"]["MedlineJournalInfo"]['MedlineTA'] self.d['author_list'] = self.get_author_list() self.d['affiliation_list'] = self.get_affiliation_list() self.explode_data("affiliation_list") self.d['author_list'] = self.get_author_list() self.explode_data("author_list") self.df = self.df.append(self.d, ignore_index=True)
[ "pandas.DataFrame", "utils.date.to_datetime" ]
[((215, 249), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.columns'}), '(columns=self.columns)\n', (227, 249), True, 'import pandas as pd\n'), ((2144, 2200), 'utils.date.to_datetime', 'to_datetime', (["article['MedlineCitation']['DateCompleted']"], {}), "(article['MedlineCitation']['DateCompleted'])\n", (2155, 2200), False, 'from utils.date import to_datetime\n')]
import unittest from index import update_inverted_index __author__ = 'guoyong' class IndexTest(unittest.TestCase): def setUp(self): self.index = { 'python': [] } def test_update_inverted_index_empty(self): update_inverted_index(self.index, 'python', 1, 2, 3) self.assertEqual([1, 2, 3], self.index.get('python')) def test_update_inverted_index_duplicate_item(self): update_inverted_index(self.index, 'python', 1, 2, 3) update_inverted_index(self.index, 'python', 3) self.assertEqual([1, 2, 3], self.index.get('python')) def test_update_inverted_index_sorted(self): update_inverted_index(self.index, 'python', 3, 1, 2) self.assertEqual([1, 2, 3], self.index.get('python'))
[ "index.update_inverted_index" ]
[((256, 308), 'index.update_inverted_index', 'update_inverted_index', (['self.index', '"""python"""', '(1)', '(2)', '(3)'], {}), "(self.index, 'python', 1, 2, 3)\n", (277, 308), False, 'from index import update_inverted_index\n'), ((437, 489), 'index.update_inverted_index', 'update_inverted_index', (['self.index', '"""python"""', '(1)', '(2)', '(3)'], {}), "(self.index, 'python', 1, 2, 3)\n", (458, 489), False, 'from index import update_inverted_index\n'), ((498, 544), 'index.update_inverted_index', 'update_inverted_index', (['self.index', '"""python"""', '(3)'], {}), "(self.index, 'python', 3)\n", (519, 544), False, 'from index import update_inverted_index\n'), ((665, 717), 'index.update_inverted_index', 'update_inverted_index', (['self.index', '"""python"""', '(3)', '(1)', '(2)'], {}), "(self.index, 'python', 3, 1, 2)\n", (686, 717), False, 'from index import update_inverted_index\n')]